seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27193127483 | from collections import namedtuple
import re
import string
import logging
import pickle
class Files:
dictionary = "dataset/nettalk.data"
top1000words = "dataset/nettalk.list"
continuous = "dataset/data"
Word = namedtuple('Word', ['letters', 'phonemes', 'structure', 'correspondance'])
all_letters = string.ascii_lowercase + ',' + '.' + ' '
all_phoneme_traits = frozenset([
'front1',
'front2',
'central1',
'central2',
'back1',
'back2',
'stop',
'nasal',
'fricative',
'affricative',
'glide',
'liquid',
'voiced', # 'unvoiced' is the default
'tensed',
'high',
'medium',
'low',
'silent',
'elide',
'pause',
'full stop'
])
all_stress_traits = frozenset([
'stress1',
'stress3', # 'stress2' is the default
'syllable boundary'
])
# synonyms for the same phoneme traits
phoneme_trait_synonyms = {
'labial' : 'front1',
'dental' : 'front2',
'alveolar' : 'central1',
'palatal' : 'central2',
'velar' : 'back1',
'glottal' : 'back2'
}
# traits we can ignore because they are the defaults
phoneme_trait_defaults = set([
'unvoiced'
])
phonemes_data = [
('a', ['low', 'tensed', 'central2']),
('b', ['voiced', 'labial', 'stop']),
('c', ['unvoiced', 'velar', 'medium']),
('d', ['voiced', 'alveolar', 'stop']),
('e', ['medium', 'tensed', 'front2']),
('f', ['unvoiced', 'labial', 'fricative']),
('g', ['voiced', 'velar', 'stop']),
('h', ['unvoiced', 'glottal', 'glide']),
('i', ['high', 'tensed', 'front1']),
('k', ['unvoiced', 'velar', 'stop']),
('l', ['voiced', 'dental', 'liquid']),
('m', ['voiced', 'labial', 'nasal']),
('n', ['voiced', 'alveolar', 'nasal']),
('o', ['medium', 'tensed', 'back2']),
('p', ['unvoiced', 'labial', 'stop']),
('r', ['voiced', 'palatal', 'liquid']),
('s', ['unvoiced', 'alveolar', 'fricative']),
('t', ['unvoiced', 'alveolar', 'stop']),
('u', ['high', 'tensed', 'back2']),
('v', ['voiced', 'labial', 'fricative']),
('w', ['voiced', 'labial', 'glide']),
('x', ['medium', 'central2']),
('y', ['voiced', 'palatal', 'glide']),
('z', ['voiced', 'alveolar', 'fricative']),
('A', ['medium', 'tensed', 'front2', 'central1']),
('C', ['unvoiced', 'palatal', 'affricative']),
('D', ['voiced', 'dental', 'fricative']),
('E', ['medium', 'front1', 'front2']),
('G', ['voiced', 'velar', 'nasal']),
('I', ['high', 'front1']),
('J', ['voiced', 'velar', 'nasal']),
('K', ['unvoiced', 'palatal', 'fricative', 'velar', 'affricative']),
('L', ['voiced', 'alveolar', 'liquid']),
('M', ['voiced', 'dental', 'nasal']),
('N', ['voiced', 'palatal', 'nasal']),
('O', ['medium', 'tensed', 'central1', 'central2']),
('Q', ['voiced', 'labial', 'velar', 'affricative', 'stop']),
('R', ['voiced', 'velar', 'liquid']),
('S', ['unvoiced', 'palatal', 'fricative']),
('T', ['unvoiced', 'dental', 'fricative']),
('U', ['high', 'back1']),
('W', ['high', 'medium', 'tensed', 'central2', 'back1']),
('X', ['unvoiced', 'affricative', 'front2', 'central1']),
('Y', ['high', 'tensed', 'front1', 'front2', 'central1']),
('Z', ['voiced', 'palatal', 'fricative']),
('@', ['low', 'front2']),
('!', ['unvoiced', 'labial', 'dental', 'affricative']),
('#', ['voiced', 'palatal', 'velar', 'affricative']),
('*', ['voiced', 'glide', 'front1', 'low', 'central1']),
(':', ['high', 'front1', 'front2']),
('^', ['low', 'central1']),
('-', ['silent', 'elide']),
(' ', ['pause', 'elide']),
('.', ['pause', 'full stop'])
]
for (name, traits) in phonemes_data:
# map synonyms
for (i, trait) in enumerate(traits):
if trait in phoneme_trait_synonyms:
traits[i] = phoneme_trait_synonyms[trait]
# delete defaults
for (i, trait) in enumerate(traits):
if trait in phoneme_trait_defaults:
del traits[i]
# encapsulate mapped traits
phoneme_traits = dict({(name, frozenset(traits)) for name, traits in phonemes_data})
# make sure there are no errors
for traits in phoneme_traits.itervalues():
assert traits.issubset(all_phoneme_traits), 'one is a bad trait: %s' % traits
def loadDictionary():
dictionary = {}
with open(Files.dictionary) as f:
for line in f:
# break line into columns
line = line.strip()
cols = line.split('\t')
# skip lines that don't appear to be dictionary entries
if len(cols) != 4:
logging.debug('skipping line: %s' % line)
continue
else:
word = Word(*cols)
dictionary[word.letters] = word
return dictionary
def loadTop1000Words(dict):
text = file(Files.top1000words).read()
text = re.search(r'\((\w+\b\s*){1000}\)', text).group(0)
text = text.lower()
words = re.findall(r'\w+', text)
return [dict[w] for w in words]
def loadContinuous(dict):
f = open(Files.continuous,'r')
text = pickle.load(f)
ltr = text[0]
letters = ltr
pho = text[1]
phonemes = pho
training_set = [(letters, phonemes)]
return training_set
dictionary = loadDictionary()
top1000words = loadTop1000Words(dictionary)
continuous = loadContinuous(dictionary)
| dtingley/netwhisperer | corpus.py | corpus.py | py | 5,330 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "re.... |
9862393543 | import random
from flask import Flask, render_template, request
import tensorflow as tf
import numpy as np
from io import BytesIO
from PIL import Image
import base64
import os
# initiates flask app
app = Flask(__name__)
tf.get_logger().setLevel('ERROR')
model = None
model = tf.keras.models.load_model("prod_model.h5")
# loads in the weights of the model
model.load_weights("new_model_3000_0.h5")
# defines home route
@app.route("/")
def home():
send = ""
return render_template("index.html", send="")
# defines route to submit user image
@app.route("/guess", methods=["POST"])
def guess():
# gets the data from the image drawn by user
image_data = request.form["image_data"]
# saves the full image data to be used later before it is manipulated
image_data_full = image_data
# splits the data into the values needed to make an array
image_data = image_data.split(",")[1]
# decodes the data to make it usable
decoded_data = base64.b64decode(image_data)
# creates a PIL image
image = Image.open(BytesIO(decoded_data)).convert('L')
# turns image into a numpy array and preprocesses the array in the same way as the training images
image_array = np.reshape(np.array(image).astype(float) / 255, (1,400,400,1))
# defines the parameters of the model
lambda_ = 0.01
dropout_enter = 0
dropout_exit = 0.25
#sets the model to be used to predict what the user drew if the model couldn't be loaded
global model
if model is None:
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(12, (6, 6), strides=(1, 1), padding="valid", activation="relu",
input_shape=(400, 400, 1), kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_enter),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (8, 8), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_enter),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (10, 10), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_exit),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (12, 12), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_exit),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(20, activation="softmax")
])
prediction = model.predict(tf.convert_to_tensor(image_array))
# turns the output of the model into a human-readable response
index = prediction.argmax()
categories = ["umbrella", "house", "sun", "apple", "envelope", "star", "heart",
"lightning bolt", "cloud", "spoon", "balloon", "mug", "mountains",
"fish", "bowtie", "ladder", "ice cream cone", "bow", "moon", "smiley"]
# gets the path need to display an example image on the front end
image_paths = ["umbrella", "house", "sun", "apple", "envelope", "star", "heart",
"lightning", "cloud", "spoon", "balloon", "mug", "mountains",
"fish", "bowtie", "ladder", "icecream", "bow", "moon", "smiley"]
# randomly picks on of 3 images to show
num = random.randint(1, 3)
image_url = "Images/" + image_paths[index] + str(num) + ".png"
send = categories[index]
# renders a template with the guess from the model
return render_template("guess.html", send=send, index=index, image=image_url, imagedata=image_data_full)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| joeschueren/SketchDetect | main.py | main.py | py | 4,108 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_logger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "... |
42528330524 | import requests
from bs4 import BeautifulSoup
import re
import json
import sys
import eventlet
import concurrent.futures
import Constants
class Scraper:
def __init__(self, url_to_check):
self.BASE_URL = url_to_check
self.dictionary = {}
@staticmethod
def get_html(url):
try:
website_html = requests.get(url)
html = BeautifulSoup(website_html.text, 'html.parser')
except requests.exceptions.SSLError:
print(Constants.WEBSITE_NOT_FOUND_ERROR)
sys.exit(0)
except requests.exceptions.ConnectionError:
return None
return html
@staticmethod
def get_attributes(html, base_url, tag_name, attr_name):
links = []
for tag in html.findAll(tag_name):
url = str(tag.get(attr_name))
if re.search("^https?://", url) is None:
if not str(url).startswith("/") and not str(base_url).endswith("/"):
url = base_url + "/" + url
elif str(url).startswith("/") and str(base_url).endswith("/"):
base_url = base_url[:-1]
url = base_url + url
else:
url = base_url + url
links.append(url)
return links
def get_all_urls(self, url):
html = self.get_html(url)
if html:
links = self.get_attributes(html, url, "a", "href")
return links
def check_the_urls(self, link_to_check):
all_urls = self.get_all_urls(link_to_check)
if all_urls:
if link_to_check.endswith("/"):
link_to_check = link_to_check[:-1]
if link_to_check not in self.dictionary.keys():
for_each_broken_links = []
valid_links = []
for url in all_urls:
try:
with eventlet.Timeout(10):
get_link = requests.get(url)
if get_link.status_code >= 400:
for_each_broken_links.append(url)
continue
except requests.exceptions.ConnectionError:
for_each_broken_links.append(url)
continue
if url not in valid_links:
valid_links.append(url)
print("valid url -> ", str(url))
self.dictionary[link_to_check] = for_each_broken_links
return valid_links
def write(self):
with open("file.json", "w") as file:
file.truncate(0)
json.dump(self.dictionary, file)
def main(url, first_base_url):
scraper = Scraper(url)
normal_urls = scraper.check_the_urls(url)
while True:
if normal_urls:
for link in normal_urls:
if (link.split("//")[1]).find(str(first_base_url)) and link not in scraper.dictionary.keys():
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(scraper.check_the_urls, link)
return_value = future.result()
if return_value:
for value in return_value:
if value not in normal_urls:
normal_urls.append(value)
if link in normal_urls:
normal_urls.remove(link)
else:
normal_urls.remove(link)
break
else:
break
scraper.write()
| Hayk1997gh/Broken_Link_Checker | Scraper.py | Scraper.py | py | 3,659 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "Constants.WEB... |
31691135600 | """
Module: libfmp.c8.c8s1_hps
Author: Meinard Müller, Frank Zalkow
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
from collections import OrderedDict
import numpy as np
from scipy import signal
import librosa
import IPython.display as ipd
import pandas as pd
def median_filter_horizontal(x, filter_len):
"""Apply median filter in horizontal direction
Notebook: C8/C8S1_HPS.ipynb
"""
return signal.medfilt(x, [1, filter_len])
def median_filter_vertical(x, filter_len):
"""Apply median filter in vertical direction
Notebook: C8/C8S1_HPS.ipynb
"""
return signal.medfilt(x, [filter_len, 1])
def convert_l_sec_to_frames(L_h_sec, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from seconds to frame indices
Notebook: C8/C8S1_HPS.ipynb
"""
L_h = int(np.ceil(L_h_sec * Fs / H))
return L_h
def convert_l_hertz_to_bins(L_p_Hz, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from Hertz to frequency bins
Notebook: C8/C8S1_HPS.ipynb
"""
L_p = int(np.ceil(L_p_Hz * N / Fs))
return L_p
def make_integer_odd(n):
"""Convert integer into odd integer
Notebook: C8/C8S1_HPS.ipynb
"""
if(n % 2 == 0):
n += 1
return n
def hps(x, Fs, N, H, L_h, L_p, L_unit='physical', mask='binary', eps=0.001, detail=False):
"""Harmonic-percussive separation (HPS) algorithm
Notebook: C8/C8S1_HPS.ipynb
Args:
x: Input signal
Fs: Sampling rate of x
N: Frame length
H: Hopsize
L_h: Horizontal median filter length given in seconds or frames
L_p: Percussive median filter length given in Hertz or bins
L_unit: Adjusts unit, either 'pyhsical' or 'indices'
mask: Either 'binary' or 'soft'
eps: Parameter used in soft maskig
detail (bool): Returns detailed information
Returns:
x_h: Harmonic signal
x_p: Percussive signal
dict: dictionary containing detailed information; returned if "detail=True"
"""
assert L_unit in ['physical', 'indices']
assert mask in ['binary', 'soft']
# stft
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, window='hann', center=True, pad_mode='constant')
# power spectrogram
Y = np.abs(X) ** 2
# median filtering
if L_unit == 'physical':
L_h = convert_l_sec_to_frames(L_h_sec=L_h, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p, Fs=Fs, N=N, H=H)
L_h = make_integer_odd(L_h)
L_p = make_integer_odd(L_p)
Y_h = signal.medfilt(Y, [1, L_h])
Y_p = signal.medfilt(Y, [L_p, 1])
# masking
if mask == 'binary':
M_h = np.int8(Y_h >= Y_p)
M_p = np.int8(Y_h < Y_p)
if mask == 'soft':
eps = 0.00001
M_h = (Y_h + eps / 2) / (Y_h + Y_p + eps)
M_p = (Y_p + eps / 2) / (Y_h + Y_p + eps)
X_h = X * M_h
X_p = X * M_p
# istft
x_h = librosa.istft(X_h, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_p = librosa.istft(X_p, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
if detail:
return x_h, x_p, dict(Y_h=Y_h, Y_p=Y_p, M_h=M_h, M_p=M_p, X_h=X_h, X_p=X_p)
else:
return x_h, x_p
def generate_audio_tag_html_list(list_x, Fs, width='150', height='40'):
"""Generates audio tag for html needed to be shown in table
Notebook: C8/C8S1_HPS.ipynb
"""
audio_tag_html_list = []
for i in range(len(list_x)):
audio_tag = ipd.Audio(list_x[i], rate=Fs)
audio_tag_html = audio_tag._repr_html_().replace('\n', '').strip()
audio_tag_html = audio_tag_html.replace('<audio ',
'<audio style="width: '+width+'px; height: '+height+'px;"')
audio_tag_html_list.append(audio_tag_html)
return audio_tag_html_list
def hrps(x, Fs, N, H, L_h, L_p, beta=2, L_unit='physical', detail=False):
"""Harmonic-residual-percussive separation (HRPS) algorithm
Notebook: C8/C8S1_HPS.ipynb
Args:
x: Input signal
Fs: Sampling rate of x
N: Frame length
H: Hopsize
L_h: Horizontal median filter length given in seconds or frames
L_p: Percussive median filter length given in Hertz or bins
beta: Separation factor
L_unit: Adjusts unit, either 'pyhsical' or 'indices'
detail (bool): Returns detailed information
Returns:
x_h: Harmonic signal
x_p: Percussive signal
x_r: Residual signal
dict: dictionary containing detailed information; returned if "detail=True"
"""
assert L_unit in ['physical', 'indices']
# stft
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, window='hann', center=True, pad_mode='constant')
# power spectrogram
Y = np.abs(X) ** 2
# median filtering
if L_unit == 'physical':
L_h = convert_l_sec_to_frames(L_h_sec=L_h, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p, Fs=Fs, N=N, H=H)
L_h = make_integer_odd(L_h)
L_p = make_integer_odd(L_p)
Y_h = signal.medfilt(Y, [1, L_h])
Y_p = signal.medfilt(Y, [L_p, 1])
# masking
M_h = np.int8(Y_h >= beta * Y_p)
M_p = np.int8(Y_p > beta * Y_h)
M_r = 1 - (M_h + M_p)
X_h = X * M_h
X_p = X * M_p
X_r = X * M_r
# istft
x_h = librosa.istft(X_h, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_p = librosa.istft(X_p, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_r = librosa.istft(X_r, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
if detail:
return x_h, x_p, x_r, dict(Y_h=Y_h, Y_p=Y_p, M_h=M_h, M_r=M_r, M_p=M_p, X_h=X_h, X_r=X_r, X_p=X_p)
else:
return x_h, x_p, x_r
def experiment_hrps_parameter(fn_wav, param_list):
"""Script for running experiment over parameter list [[1024, 256, 0.1, 100], ...
Notebook: C8/C8S1_HRPS.ipynb
"""
Fs = 22050
x, Fs = librosa.load(fn_wav, sr=Fs)
list_x = []
list_x_h = []
list_x_p = []
list_x_r = []
list_N = []
list_H = []
list_L_h_sec = []
list_L_p_Hz = []
list_L_h = []
list_L_p = []
list_beta = []
for param in param_list:
N, H, L_h_sec, L_p_Hz, beta = param
print('N=%4d, H=%4d, L_h_sec=%4.2f, L_p_Hz=%3.1f, beta=%3.1f' % (N, H, L_h_sec, L_p_Hz, beta))
x_h, x_p, x_r = hrps(x, Fs=Fs, N=1024, H=512, L_h=L_h_sec, L_p=L_p_Hz, beta=beta)
L_h = convert_l_sec_to_frames(L_h_sec=L_h_sec, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p_Hz, Fs=Fs, N=N, H=H)
list_x.append(x)
list_x_h.append(x_h)
list_x_p.append(x_p)
list_x_r.append(x_r)
list_N.append(N)
list_H.append(H)
list_L_h_sec.append(L_h_sec)
list_L_p_Hz.append(L_p_Hz)
list_L_h.append(L_h)
list_L_p.append(L_p)
list_beta.append(beta)
html_x = generate_audio_tag_html_list(list_x, Fs=Fs)
html_x_h = generate_audio_tag_html_list(list_x_h, Fs=Fs)
html_x_p = generate_audio_tag_html_list(list_x_p, Fs=Fs)
html_x_r = generate_audio_tag_html_list(list_x_r, Fs=Fs)
pd.options.display.float_format = '{:,.1f}'.format
pd.set_option('display.max_colwidth', None)
df = pd.DataFrame(OrderedDict([
('$N$', list_N),
('$H$', list_H),
('$L_h$ (sec)', list_L_h_sec),
('$L_p$ (Hz)', list_L_p_Hz),
('$L_h$', list_L_h),
('$L_p$', list_L_p),
('$\\beta$', list_beta),
('$x$', html_x),
('$x_h$', html_x_h),
('$x_r$', html_x_r),
('$x_p$', html_x_p)]))
df.index = np.arange(1, len(df) + 1)
ipd.display(ipd.HTML(df.to_html(escape=False, index=False)))
| christofw/pitchclass_mctc | libfmp/c8/c8s1_hps.py | c8s1_hps.py | py | 8,127 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "scipy.signal.medfilt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "scipy.signal.medfilt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.signal",... |
423347793 | import numpy as np
import yfinance as yf
import ta
import pandas as pd
from ta.trend import ADXIndicator
import pyxirr
def get_clean_df(ticker):
df = yf.Ticker(ticker).history(
period="10y").reset_index()[["Date", "Close", "Dividends", 'High', "Low"]]
df["Close"] = yf.download(tickers=ticker, period="10y")["Adj Close"].values
df["Returns"] = df["Close"].pct_change()
df["RSI"] = ta.momentum.RSIIndicator(df["Close"], 14).rsi()
adxI = ADXIndicator(df['High'], df['Low'], df['Close'], 14, True)
df["Plus DI"] = adxI.adx_pos()
df['Minus DI'] = adxI.adx_neg()
df['ADX'] = adxI.adx()
return df
def mod_df(conditions, df, reinvest_dividends):
ret_cond = conditions[0]
rsi_cond = conditions[1]*10
adx_cond = conditions[2]*10
for i in range(len(df["Returns"])):
df.at[i, "ADX_tf"] = df.at[i, "ADX"] >= adx_cond and df.at[i,
"Plus DI"] <= df.at[i, "Minus DI"]
df["Portfolio Opt"] = np.zeros(len(df["RSI"].values))
df["Buy Opt"] = np.zeros(len(df["RSI"].values))
df = df.dropna().reset_index(drop=True)
for i in np.arange(len(df["Returns"].values)):
if df["Returns"].values[i] < -ret_cond/100 and df["RSI"].values[i] <= rsi_cond and df["ADX_tf"].values[i] == True:
df.at[i, "Portfolio Opt"] = -100
df.at[i, "Buy Opt"] = 100/df["Close"].values[i]
df = pd.concat([df, df.tail(1)], axis=0).reset_index(drop=True)
df.loc[df.index[-1], "Portfolio Opt"] = 0
df.loc[df.index[-1], "Buy Opt"] = 0
if reinvest_dividends:
df.at[0, "Holdings Opt"] = df.at[0, "Buy Opt"]
for i in np.arange(len(df["Returns"].values)-1):
df.at[i+1, "Holdings Opt"] = df.at[i,
"Holdings Opt"] + df.at[i+1, "Buy Opt"] + (df.at[i, "Holdings Opt"] * df.at[i+1, "Dividends"])/df.at[i+1, "Close"]
df.loc[df.index[-1], "Portfolio Opt"] = df["Close"].values[-1] * \
df.loc[df.index[-1], "Holdings Opt"]
return df
def get_buy_months(df):
df['Month Year'] = df['Date'].dt.to_period('M')
buy_months = [i for i in df[df['Portfolio Opt'] == -100]
["Month Year"].values]
unique_months_pct = len(set(buy_months))/(10*12 + 1)*100
return unique_months_pct
def get_performance(df):
try:
# xirr_value = xirr(df[df['Portfolio Opt'] != 0]
# ["Portfolio Opt"].values, df[df['Portfolio Opt'] != 0]
# ["Date"].values)*100
xirr_value = pyxirr.xirr(
df["Date"].values, df["Portfolio Opt"].values)*100
except:
xirr_value = 0
return xirr_value
def get_irr_all(df, reinvest_dividends):
df["Portfolio All"] = np.zeros(len(df["RSI"].values))
df["Buy All"] = np.zeros(len(df["RSI"].values))
df = df.dropna().reset_index(drop=True)
for i in np.arange(len(df["Returns"].values)):
df.at[i, "Portfolio All"] = -100
df.at[i, "Buy All"] = 100/df["Close"].values[i]
df = pd.concat([df, df.tail(1)], axis=0).reset_index()
df.loc[df.index[-1], "Portfolio All"] = 0
df.loc[df.index[-1], "Buy All"] = 0
if reinvest_dividends:
df.at[0, "Holdings All"] = df.at[0, "Buy All"]
for i in np.arange(len(df["Returns"].values)-1):
df.at[i+1, "Holdings All"] = df.at[i,
"Holdings All"] + df.at[i+1, "Buy All"] + (df.at[i, "Holdings All"] * df.at[i+1, "Dividends"])/df.at[i+1, "Close"]
df.loc[df.index[-1], "Portfolio All"] = df["Close"].values[-1] * \
df.loc[df.index[-1], "Holdings All"]
all_irr = pyxirr.xirr(df["Date"].values, df["Portfolio All"].values)
return all_irr*100
def iterative_function(conditions, df, reinvest_dividends, pct_trading):
temp_df = mod_df(conditions, df, reinvest_dividends)
unique_months_pct_temp = get_buy_months(temp_df)
if unique_months_pct_temp >= pct_trading:
irr = get_performance(temp_df)
else:
irr = 0
return irr
def find_best_sco(ticker):
df = get_clean_df(ticker)
conditions = [0, 0, 0]
irr = 0
for ret in np.linspace(0, 5, 16):
for rsi in np.linspace(0, 7, 15):
for adx in np.linspace(0, 7, 15):
conditions_temp = [ret, rsi, adx]
irr_temp = iterative_function(
conditions_temp, df, True, 33)
if irr_temp > irr:
irr = irr_temp
conditions = conditions_temp
print(irr, conditions)
# bounds = ((0, 5), (0, 10), (0, 10))
# result = sco.minimize(iterative_function, (1, 3.5, 2),
# (df, reinvest_dividends, pct_trading), method="SLSQP", bounds=bounds, options={'eps': 0.01})
all_irr = get_irr_all(df, True)
np.savetxt(f"./optimise_data/{ticker}_optimise.csv",
np.array([all_irr, irr, conditions[0], conditions[1]*10, conditions[2]*10]))
if __name__ == "__main__":
ticker = "SSSS"
find_best_sco(ticker)
# find_best_sco("ALD.PA")
# df = mod_df((1, 70, 1), get_clean_df("TTE"), True)
# print(get_clean_df("AAPL"))
| victormorizon/stable-dividend-stock-trading-strategy | functions.py | functions.py | py | 5,229 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "yfinance.Ticker",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "yfinance.download",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ta.momentum.RSIIndicator",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ta.momentu... |
24669291411 | import requests, json
import pandas as pd
import os
from datetime import date
#from mysql.connector import connect, Error
from flatten_json import flatten
from airflow.models import Variable
'''
Connects to the edamam API and sends a request
Return: The response object from the API query
'''
def airflow_var_test( ti ):
print( Variable.get('EDAMAM_ID') )
def edamam_get(ti):
# Initialize Variables
dag_path = os.getcwd()
host = 'https://api.edamam.com/'
recipe_base = 'api/recipes/v2'
url = host + recipe_base
# Xcom Pulls
query= "chicken"
# Initialize our config for the query
payload = {'type': 'public',
'q': query,
'app_id': Variable.get('EDAMAM_ID'),
'app_key': Variable.get('EDAMAM_KEY')
}
# Send a GET request to Edamam API
with requests.get(url, params=payload) as response:
query_results = response.json()['hits']
# Return the response
write_json(query_results, f"{dag_path}/raw_data/chicken_query.json")
def parse_json_request( ti ):
# Initialize variables
hits_list= ti.xcom_pull( task_ids=['get_edamam_request'][0] )
if not hits_list:
raise ValueError( 'no value currently in XComs.')
# Return our cleaned up search results
return edamam_json_cleanup( hits_list )
#[TODO] This is a redirecting function to other helper functions
# Have the return type be important for picking which filetype to convert to
def edamam_json_cleanup( json_list ):
# Initialization
# Isolate the hits and discard the metadata
hits_data = json_list
# Flatten the data from our hits
# Make the json data relational
return edamam_json_flatten( hits_data )
def edamam_json_flatten( json_list ):
# Init
index = 0
for index in range( len( json_list )):
json_list[index] = flatten( json_list[index] )
return json_list
def edamam_json_rename_cols( jason ):
jason.columns = jason.columns.str.replace('recipe_', '', regex=True)
return jason
def write_json( json_txt, path='new_json.json' ):
# [TODO] Initialize filename with date and time
# push file to XCom
with open( path, 'w' ) as outfile:
json.dump( json_txt, outfile )
''' #########
Submission Function
''' #########
def df_submit_mysql( ti ):
# Initialization
table_name = "testing_1"
########################################################
df= pd.json_normalize( ti.xcom_pull(task_ids=['parse_json_request']) )
# Write CREATE TABLE query using our dataframe
# Create the table query
table_query = df_create_table( table_name, df )
# Insert the information query
insert_queries = df_insert( df, table_name )
# Connect to local mysql
with connect( host='127.0.0.1', user=Variable.get('MYSQL_USER'), password=Variable.get('MYSQL_PW'), database=Variable.get('MYSQL_DB')) \
as connection:
cursor = connection.cursor()
# Submit the CREATE TABLE query to the database
cursor.execute( table_query )
connection.commit()
# Submit our INSERT queries into our newly CREATED TABLE
for query in insert_queries:
cursor.execute( query )
connection.commit()
print( cursor.rowcount, ": worked'" )
# Close our connection
cursor.close()
connection.close()
print( 'successful' )
return True
def df_create_table( table_name, df ):
# Initialization
query = f'CREATE TABLE IF NOT EXISTS {table_name} ( id INT AUTO_INCREMENT PRIMARY KEY, \n'
# Create column types (for this exercise, it'll all be strings)
table_cols = create_table_columns( df )
# Add our table columns to our query string
query += table_cols + ' )'
return query
def create_table_columns( df ):
# Initialization
col_string = ""
index = 0
# Loop through the columns of a dataframe to create a table query
for col in df.columns:
# Skip the first one for this example pipeline
if index==0:
index+=1
continue
col_string += f'{col} VARCHAR(255)'
index += 1
if index > 30:
return col_string
else:
col_string+= ',\n'
return col_string
def df_insert( df, table ):
# Initialization
df_cols = create_table_columns( df ).replace( ' VARCHAR(255)', '')
queries = []
row_limit = 10
row = 0
row_list = df.iloc[0: row_limit]
# Create template query string
insert_query= f'INSERT INTO {table} ({df_cols})\
VALUES ($val)'
# Add df info to the query
for row in row_list:
row_info = row[1:31]
# Convert our list to a string that REPLACE can use
row_values = f'\"{row_info[0]}\" '
for value in row_info[1:]:
row_values += f', \n\"{str(value)[:254]}\"'
queries.append( insert_query.replace('$val', row_values))
# Return the string
return queries | JoshusTenakhongva/Mentorship_Repo | food_at_home/dags/airflow_functions.py | airflow_functions.py | py | 5,053 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "airflow.models.Variable.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "airflow.models.Variable",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "airflow... |
32262924755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_response'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='weight',
),
migrations.AlterField(
model_name='survey',
name='evaluator',
field=models.CharField(help_text='Leave this blank for the first save. Enter values such as .5{1}+.5{2} for two equally weighted questions.', blank=True, max_length=200),
),
]
| mikelaughton/harold | polls/migrations/0012_auto_20160804_0005.py | 0012_auto_20160804_0005.py | py | 630 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RemoveField",
"line_number": 14,
"usage_type": "call"
},
... |
35217766012 | from itertools import product
import sys
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import random
sys.path.append('../..')
from lib import excelUtils
from lib import httpUtils
from lib import textUtil
from lib.htmlEleUtils import getNodeText
from lib.htmlEleUtils import getInnerHtml
products = []
header=['link','Category','CAS号','Product Name','price','imageName']
def addHeader(title):
if title not in header and len(title) > 0:
header.append(title)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
# chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
def getProductInfo(url, type):
print(str(len(products)) + ":" + url)
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
if nav == None:
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
if nav == None:
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
pInfo = {
"Category": type,
"link": url
}
baseInfos = sope.find_all("li", attrs={"class":"proulllli"})
for baseInfo in baseInfos:
ebs = baseInfo.find_all("b")
for b in ebs:
title = getNodeText(b)
if title == "names:":
pInfo["Product Name"] = getNodeText(baseInfo).replace("names:", "")
else:
titlePart = title.split(":")
if len(titlePart) > 1:
addHeader(titlePart[0])
pInfo[titlePart[0]] = titlePart[1]
spans = baseInfo.find_all("span")
for span in spans:
title = getNodeText(span)
titlePart = title.split(":")
if len(titlePart) == 1:
titlePart = title.split(":")
if len(titlePart)>1:
addHeader(titlePart[0])
pInfo[titlePart[0]] = titlePart[1]
specTbs = sope.find_all("table",attrs={"class":"protwtab"})
specStr = ""
for specTb in specTbs:
trs = specTb.find_all("tr")
if len(trs) > 0:
ths = trs[0].find_all("th")
if len(ths)>2:
title = getNodeText(ths[1])
if title == "规格":
for inx,tr in enumerate(trs):
if inx>0:
tds = tr.find_all("td")
specStr += "("+getNodeText(tds[1])+"/"+getNodeText(tds[4])+");"
pInfo["price"] = specStr
infoTrs = sope.find_all("tr")
for infoTr in infoTrs:
tds = infoTr.find_all("td")
if len(tds) == 2:
title = getNodeText(tds[0])
value = getNodeText(tds[1])
addHeader(title)
pInfo[title] = value
imageName = ""
if "Product Name" in pInfo:
imageName = pInfo["Product Name"]+".png"
if "CAS号" in pInfo:
imageName = pInfo["CAS号"]+".png"
pInfo["imageName"] = imageName
imgArea = sope.find("i", attrs={"id":"D2"})
img = imgArea.find("img")
if img!=None:
httpUtils.urllib_download("http://bio-fount.com"+img["src"], imageName)
products.append(pInfo.copy())
def getProductType(url, type1):
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
plinkAreas = sope.find("ul", attrs={"id":"mo"}).find_all("li", attrs={"class":"fl"})
if len(plinkAreas) == 0:
time.sleep(1)
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
plinkAreas = sope.find_all("article")
for plinkArea in plinkAreas:
pLink = plinkArea.find("a")
getProductInfo("http://bio-fount.com"+pLink["href"], type1)
# getProductType("http://bio-fount.com/cn/goods-list/1375.html",'cDNA Clones')
# getProductInfo("http://bio-fount.com/cn/goods2/61740_1375.html", "a")
for pageIndex in range(1, 5):
getProductType("http://bio-fount.com/cn/goods-list/1375__"+str(pageIndex)+".html",'脂肪族含氟砌块')
for pageIndex in range(1, 6):
getProductType("http://bio-fount.com/cn/goods-list/1374__"+str(pageIndex)+".html",'杂环含氟砌块')
getProductType("http://bio-fount.com/cn/goods-list/1372.html",'氟标记化合物')
for pageIndex in range(1, 22):
getProductType("http://bio-fount.com/cn/goods-list/1371__"+str(pageIndex)+".html",'芳香族含氟砌块')
excelUtils.generateExcel('bio-fount.xlsx', products, header) | Just-Doing/python-caiji | src/work/20230110/bio-fount.py | bio-fount.py | py | 4,549 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "selenium... |
38097195752 | import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
from os.path import exists
from GaslightEnv import GaslightEnv
from stable_baselines3 import PPO, TD3
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from utils import distance
#Callback class that saves the model after a set interval of steps.
class GaslightCheckpoint(CheckpointCallback):
def __init__(self, save_interval, rl_model):
super().__init__(save_interval, ".", name_prefix=rl_model)
self.save_interval = save_interval
self.rl_model = rl_model
def _on_step(self) -> bool:
if self.save_interval > 0 and self.n_calls % self.save_interval == 0:
if self.rl_model is not None:
self.model.save(self.rl_model)
return True
def gaslightRun(predict, extra, input_shape, input_range, max_delta, target, norm, model_name, framework="PPO", save_interval=0, param_file=None):
if framework == "PPO":
hyperparams = {}
net_arch = dict(pi=[256, 256], vf=[256, 256])
hyperparams['policy_kwargs'] = dict(net_arch=net_arch)
#Hyperparameters collected from Optuna.py
if param_file is not None:
study = pickle.load(open(param_file, 'rb'))
hyperparams = study.best_params
if hyperparams['batch_size'] > hyperparams['n_steps']:
hyperparams['batch_size'] = hyperparams['n_steps']
#Create vectorized environment and model-saving callback.
env_kwargs = {
"predict": predict,
"extra": extra,
"input_shape": input_shape,
"input_range": input_range,
"max_delta": max_delta,
"target": target,
"norm": norm
}
vec_env = make_vec_env(GaslightEnv, 4, env_kwargs=env_kwargs)
checkpoint_callback = GaslightCheckpoint(save_interval, model_name)
#Create or load attack model.
model_attack = PPO("MlpPolicy", vec_env, **hyperparams)
if model_name is not None and exists(model_name):
model_attack.set_parameters(model_name)
elif framework == "TD3":
hyperparams = {}
hyperparams['policy_kwargs'] = dict(net_arch=[256, 256])
#Hyperparameters collected from Optuna.py
if param_file is not None:
study = pickle.load(open(param_file, 'rb'))
hyperparams = study.best_params
if hyperparams['noise_type'] == 'normal':
hyperparams['action_noise'] = NormalActionNoise(
mean=np.zeros(input_shape), sigma=hyperparams['noise_std'] * np.ones(input_shape)
)
elif hyperparams['noise_type'] == 'ornstein-uhlenbeck':
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(
mean=np.zeros(input_shape), sigma=hyperparams['noise_std'] * np.ones(input_shape)
)
del hyperparams['noise_type']
del hyperparams['noise_std']
hyperparams['gradient_steps'] = hyperparams['train_freq']
#Create environment and model-saving callback.
env_kwargs = {
"predict": predict,
"extra": extra,
"input_shape": input_shape,
"input_range": input_range,
"max_delta": max_delta,
"target": target,
"norm": norm
}
vec_env = make_vec_env(GaslightEnv, 4, env_kwargs=env_kwargs)
checkpoint_callback = GaslightCheckpoint(save_interval, model_name)
#Create or load attack model.
model_attack = TD3("MlpPolicy", vec_env, **hyperparams)
if model_name is not None and exists(model_name):
model_attack.set_parameters(model_name)
else:
print(f"Framework {framework} does not exist. Available frameworks are (PPO, TD3)")
exit()
#Generate 1000 random inputs for testing.
originals = [np.random.uniform(low=input_range[0], high=input_range[1], size=input_shape) for _ in range(100)]
#Determine "true" labels from testing inputs.
true_labels = [predict(x, extra) for x in originals]
#Metrics used to validate attack model. Includes L2 Norm, L-Inf Norm, and Success Rate.
timesteps = []
l2_list = []
linf_list = []
success_list = []
#Create subplots to visualize metrics.
plt.ion()
figure, ax = plt.subplots(1, 3, figsize=(18, 6))
#Each iteration trains the attack model for a certain amount of steps. After each iteration, recalculate the metrics.
for timestep in range(500):
#Train the attack model for 1000 steps.
model_attack.learn(1000, progress_bar=True, callback=checkpoint_callback)
#Initialize metric averages to 0.
l2_avg = 0
linf_avg = 0
success_count = 0
#For every testing input, perturb it and calculate metrics.
for idx in range(len(originals)):
#Find the optimal distortion/action to modify the input values.
action, _ = model_attack.predict(originals[idx])
adv = np.clip(originals[idx] + action, input_range[0], input_range[1])
#Feed perturbed input into victim classifier and check its label.
new_label = predict(adv, extra)
#Calculate distance metrics.
l2_avg += distance(adv, originals[idx], 2)
linf_avg += distance(adv, originals[idx], np.inf)
#Determine if the attack is successful (Either for untargeted or targeted attacks).
if (target is None and new_label != true_labels[idx]) or (target is not None and new_label == target):
success_count += 1
#Average findings across all tests.
timesteps.append((timestep + 1) * 1000)
l2_list.append(l2_avg / len(originals))
linf_list.append(linf_avg / len(originals))
success_list.append(success_count * 100 / len(originals))
#Plot the new metrics.
ax[0].clear()
ax[1].clear()
ax[2].clear()
ax[0].plot(timesteps, l2_list)
ax[0].set_title("L-2")
ax[0].set_xlabel("Timesteps")
ax[1].plot(timesteps, linf_list)
ax[1].set_title("L-Inf")
ax[1].set_xlabel("Timesteps")
ax[2].plot(timesteps, success_list)
ax[2].set_title("Success Rate")
ax[2].set_xlabel("Timesteps")
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.1)
plt.savefig(f"Graphs/Graph.png")
| RajatSethi2001/Gaslight | GaslightEngine.py | GaslightEngine.py | py | 6,819 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "stable_baselines3.common.callbacks.CheckpointCallback",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 37,
"usage_type": "call"
},
{
"a... |
30176950739 | from functools import cache
def najcenejsa_pot(mat):
m, n = len(mat), len(mat[0])
@cache
def pomozna(i, j):
if i == m - 1 and j == n - 1:
return (mat[-1][-1], "o")
else:
moznosti = []
if i < m - 1:
cena_dol, pot_dol = pomozna(i + 1, j)
moznosti.append((cena_dol, "↓" + pot_dol))
if j < n - 1:
cena_desno, pot_desno = pomozna(i, j + 1)
moznosti.append((cena_desno, "→" + pot_desno))
cena, pot = min(moznosti)
return mat[i][j] + cena, pot
return pomozna(0, 0)
mat = [[131, 673, 234, 103, 18],
[201, 96, 342, 965, 150],
[630, 803, 746, 422, 111],
[537, 699, 497, 121, 956],
[805, 732, 524, 37, 331]] | matijapretnar/programiranje-1 | 13-memoizacija-v-pythonu/predavanja/pot.py | pot.py | py | 772 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "functools.cache",
"line_number": 5,
"usage_type": "name"
}
] |
6172988761 | from google.cloud import texttospeech
from pydub import AudioSegment
from pydub.playback import play
google_credentials_file = "PATH_TO_YOUR_GOOGLE_CREDENTIALS_JSON"
# Set the environment variable for Google Text-to-Speech API
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = google_credentials_file
# Initialize Google Text-to-Speech API client
tts_client = texttospeech.TextToSpeechClient()
# Function to synthesize text to speech using Google Text-to-Speech API
def synthesize_text(text, language_code="zh-CN"):
input_text = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16
)
response = tts_client.synthesize_speech(
input=input_text, voice=voice, audio_config=audio_config
)
return response.audio_content
# Function to play the synthesized audio
def play_audio(audio_data):
audio = AudioSegment.from_file(io.BytesIO(audio_data), format="wav")
play(audio)
| qiusiyuan/gpt-live-stream | src/bilibiligptlive/tts.py | tts.py | py | 1,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.cloud.texttospeech.TextToSpeechClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "google.cloud.texttospeech",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "google.cloud.texttospeech.SynthesisInput",
"line_number": 14,
"usa... |
32846235212 | #Goal of this project is to make a song that we like on youtube go directly to our spotify "liked youtube songs" playlist
""" STEPS
1 - Log into youtube
2 - Grab our playlist
3 - Create a new playlist
4 - Search the song
5 - Add the song to the spotify playlist
"""
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import requests
import youtube_dl
from exceptions import ResponseException
from userData import spotyId,spotyToken
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
from youtube_title_parse import get_artist_title
#print(spotifyUser.token)
class CreatePlaylist:
def __init__(self):
#self.youtube_client = self.yt_client()
self.all_song_info = {}
#1 - Log into youtube
def yt_client(self):
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
# from the Youtube DATA API
youtube_client = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
return youtube_client
#2 - Grab our playlist
def get_ytplaylist(self):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
request = youtube.playlistItems().list(
part="snippet,contentDetails",
maxResults=25,
playlistId="PLQ_99qrIfCg3Mm7SDtxHfIBMt7aUkIDZD"
)
response = request.execute()
for item in response["items"]:
video_title = item["snippet"]["title"]
youtube_url = "https://www.youtube.com/watch?v={}".format(
item["id"])
print("\n\n\n")
#print(video_title)
#print(youtube_url)
# use youtube_dl to collect the song name & artist name
#video = youtube_dl.YoutubeDL({}).extract_info(
#'https://www.youtube.com/watch?v=dPhwbZBvW2o', download=False)
artist, title = get_artist_title(video_title)
#print(artist)
#print(title)
if title is not None and artist is not None:
# save all important info and skip any missing song and artist
self.all_song_info[video_title] = {
"youtube_url": youtube_url,
"song_name": title,
"artist": artist,
# add the uri, easy to get song to put into playlist
"spotify_uri": self.search_song(title, artist)
}
#print(response)
print("\n\n\n")
#print(video_title)
#3 - Create a new playlist
def new_spotifyplaylist(self):
request_body = json.dumps({
"name": "Youtube to Spotify playlist",
"description": "Playlist of a program that I did in python that picks my songs from a youtube playlist, search them and add to this playlist :) ",
"public": True
})
print(request_body)
query = f"https://api.spotify.com/v1/users/{spotyId}/playlists"
response = requests.post(
url=query,
data=request_body,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {spotyToken}"
}
)
print(response)
response_json = response.json()
# playlist id
return response_json["id"]
#4 - Search the song
def search_song(self,song,artist):
query = "https://api.spotify.com/v1/search?query=track%3A{}+artist%3A{}&type=track&offset=0&limit=20".format(
song,
artist
)
response = requests.get(
query,
headers={
"Content-Type":"application/json",
"Authorization":"Bearer {}".format(spotyToken)
}
)
response_json = response.json()
songs = response_json["tracks"]["items"]
#first song only
uri = songs[0]["uri"]
return uri
#5 - Add the song to the spotify playlist
def add_song(self):
# populate dictionary with our liked songs
self.get_ytplaylist()
# collect all of uri
uris = [info["spotify_uri"]
for song, info in self.all_song_info.items()]
# create a new playlist
playlist_id = self.new_spotifyplaylist()
# add all songs into new playlist
request_data = json.dumps(uris)
query = "https://api.spotify.com/v1/playlists/{}/tracks".format(
playlist_id)
response = requests.post(
query,
data=request_data,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(spotyToken)
}
)
# check for valid response status
if response.status_code != 201:
raise ResponseException(response.status_code)
response_json = response.json()
return response_json
if __name__ == '__main__':
cp = CreatePlaylist()
cp.add_song()
| GiovaniCenta/YoutubetoSpotify | spotyoutube.py | spotyoutube.py | py | 6,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "google_auth_oauthlib.flow.flow.InstalledAppFlow.from_client_secrets_file",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.flow",
"line_number": 47,
... |
16515229074 | import time
from werkzeug.wrappers import Response
import netmanthan
import netmanthan.rate_limiter
from netmanthan.rate_limiter import RateLimiter
from netmanthan.tests.utils import netmanthanTestCase
from netmanthan.utils import cint
class TestRateLimiter(netmanthanTestCase):
def test_apply_with_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 1}
netmanthan.rate_limiter.apply()
self.assertTrue(hasattr(netmanthan.local, "rate_limiter"))
self.assertIsInstance(netmanthan.local.rate_limiter, RateLimiter)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_apply_without_limit(self):
netmanthan.conf.rate_limit = None
netmanthan.rate_limiter.apply()
self.assertFalse(hasattr(netmanthan.local, "rate_limiter"))
def test_respond_over_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
self.assertRaises(netmanthan.TooManyRequestsError, netmanthan.rate_limiter.apply)
netmanthan.rate_limiter.update()
response = netmanthan.rate_limiter.respond()
self.assertIsInstance(response, Response)
self.assertEqual(response.status_code, 429)
headers = netmanthan.local.rate_limiter.headers()
self.assertIn("Retry-After", headers)
self.assertNotIn("X-RateLimit-Used", headers)
self.assertIn("X-RateLimit-Reset", headers)
self.assertIn("X-RateLimit-Limit", headers)
self.assertIn("X-RateLimit-Remaining", headers)
self.assertTrue(int(headers["X-RateLimit-Reset"]) <= 86400)
self.assertEqual(int(headers["X-RateLimit-Limit"]), 10000)
self.assertEqual(int(headers["X-RateLimit-Remaining"]), 0)
netmanthan.cache().delete(limiter.key)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_respond_under_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
netmanthan.rate_limiter.apply()
netmanthan.rate_limiter.update()
response = netmanthan.rate_limiter.respond()
self.assertEqual(response, None)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_headers_under_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
netmanthan.rate_limiter.apply()
netmanthan.rate_limiter.update()
headers = netmanthan.local.rate_limiter.headers()
self.assertNotIn("Retry-After", headers)
self.assertIn("X-RateLimit-Reset", headers)
self.assertTrue(int(headers["X-RateLimit-Reset"] < 86400))
self.assertEqual(int(headers["X-RateLimit-Used"]), netmanthan.local.rate_limiter.duration)
self.assertEqual(int(headers["X-RateLimit-Limit"]), 10000)
self.assertEqual(int(headers["X-RateLimit-Remaining"]), 10000)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_reject_over_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
limiter = RateLimiter(0.01, 86400)
self.assertRaises(netmanthan.TooManyRequestsError, limiter.apply)
netmanthan.cache().delete(limiter.key)
def test_do_not_reject_under_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
limiter = RateLimiter(0.02, 86400)
self.assertEqual(limiter.apply(), None)
netmanthan.cache().delete(limiter.key)
def test_update_method(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
self.assertEqual(limiter.duration, cint(netmanthan.cache().get(limiter.key)))
netmanthan.cache().delete(limiter.key)
| netmanthan/Netmanthan | netmanthan/tests/test_rate_limiter.py | test_rate_limiter.py | py | 3,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "netmanthan.tests.utils.netmanthanTestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "netmanthan.conf",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "netmanthan.rate_limiter.apply",
"line_number": 15,
"usage_type": "call"
... |
73325698663 | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import tree
import pydotplus
iris = load_iris()
iris_X = iris.data
iris_Y = iris.target
X_train, X_test, y_train, y_test = train_test_split(
iris_X, iris_Y, test_size=0.3)
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
dot_data = tree.export_graphviz(clf, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("iris.pdf")
| beancookie/sklearn | tree.py | tree.py | py | 473 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 12,
"usage_type": ... |
2986533119 |
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import time
from constants import URL
def get_product_properties(browser):
properties = {}
search_result = browser.find_element(By.CLASS_NAME, "sooqrSearchResults")
title = search_result.find_element(By.CLASS_NAME, "productlist-title")
title.find_element(By.TAG_NAME, "a").click()
time.sleep(0.1)
content_place_holder = browser.find_element(By.ID, "pdetailTableSpecs")
table_body = content_place_holder.find_element(By.TAG_NAME, "tbody")
searched_keys = ["Zusammenstellung", "Nadelstärke"]
for table_row in table_body.find_elements(By.TAG_NAME, "tr"):
key, value = table_row.find_elements(By.TAG_NAME, "td")
if key.text in searched_keys:
properties[key.text] = value.text
try:
properties["Lieferbar"] = True if browser.find_element(By.CLASS_NAME, "stock-green").text == "Lieferbar" else False
except:
properties["Lieferbar"] = False
properties["Preis"] = browser.find_element(By.CLASS_NAME, "product-price-amount").text
return properties
def select_correct_brand(browser, marke):
"""checks if any search results were found and if they are from the correct marke
Args:
browser ([webdriver]): Firefox browser
marke ([str]):
Returns:
[str]: returns error message if the correct prodcut can not be found
"""
# try to locate marken_filter
try:
marken_search_filter = browser.find_element(By.ID, "sooqr44898be26662b0dfSearchFilter191640")
marken_search_filter_field = marken_search_filter.find_elements(By.TAG_NAME, "input")
except NoSuchElementException:
return "No search result found"
# try to click marke
for marken_input in marken_search_filter_field:
test_marke = marken_input.get_attribute("value")
if test_marke == marke:
marken_input.click()
break
else:
return "No such brand for search term"
return ""
def search_product(browser, marke, bezeichnung):
"""webscrapes all needed information for product
Args:
browser ([webdriver]): Firefox browser
marke ([str]):
bezeichnung ([str]):
Returns:
[dict]: dictionary of properties of searched products
"""
# nativating to url (home) of site everytime before searching for product becuase occationally elements could not
# be found (although visible) after continuing from previous product site
browser.get(URL)
search_field = browser.find_element(By.ID, "searchSooqrTop")
search_field.clear()
search_field.send_keys(bezeichnung)
product_properties = {"marke": marke, "bezeichnung": bezeichnung}
# checking for errors like not finding any element when searching for bezeichnung or none of the correct marke
occured_errors = select_correct_brand(browser, marke)
if occured_errors != "":
product_properties["error"] = occured_errors
return product_properties
product_properties.update(get_product_properties(browser))
return product_properties | Felix-95/programming_challenge | src/scraper.py | scraper.py | py | 3,238 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 1... |
72516592744 | import time
import datetime
from timeit import default_timer as timer
import settings
from pymongo import MongoClient
from faker import Faker
from bson.decimal128 import Decimal128
import random
fake = Faker()
####
# Start script
####
startTs = time.gmtime()
start = timer()
print("================================")
print(" Generating Transactions Data ")
print("================================")
print("\nStarting " + time.strftime("%Y-%m-%d %H:%M:%S", startTs) + "\n")
####
# Main start function
####
def main():
mongo_client = MongoClient(MDB_CONNECTION)
db = mongo_client[MDB_DATABASE]
my_collection = db[MDB_COLLECTION]
print('Begin generating txns documents.')
print('Number of documents to generate: ' + str(NUM_DOCS))
for index in range(int(NUM_DOCS)):
fake_timestamp = fake.date_between(start_date='-1y', end_date='today')
txn_types = ['deposit', 'withdrawal']
txns = random.choice(txn_types)
my_txn_document = {
"customerId": fake.ssn(),
"name": fake.name(),
"address": fake.street_address(),
"city": fake.city(),
"state": fake.state(),
"postalCode": fake.postcode(),
"email": fake.email(),
"lastLocation": {
"type": "Point",
"coordinates": [
Decimal128(fake.longitude()),
Decimal128(fake.latitude())
]
},
"txnType": txns,
"txnAmount": random.randint(0, 10000),
"txnDate": datetime.datetime(fake_timestamp.year, fake_timestamp.month, fake_timestamp.day)
}
# Print example doc on first doc creation
if index == 1:
print('Example Document')
print(my_txn_document)
# Indicate how many docs inserted
if index % 100 == 0:
print('Docs inserted: ' + str(index))
my_collection.insert_one(my_txn_document)
####
# Constants loaded from .env file
####
MDB_CONNECTION = settings.MDB_CONNECTION
MDB_DATABASE = settings.MDB_DATABASE
MDB_COLLECTION = settings.MDB_COLLECTION
NUM_DOCS = settings.NUM_DOCS
####
# Main
####
if __name__ == '__main__':
main()
####
# Indicate end of script
####
end = timer()
endTs = time.gmtime()
total_time = end - start
if total_time < 1:
docs_inserted_time = int(NUM_DOCS) / 1
else:
docs_inserted_time = int(NUM_DOCS) / total_time
print("\nEnding " + time.strftime("%Y-%m-%d %H:%M:%S", endTs))
print('===============================')
print('Total Time Elapsed (in seconds): ' + str(total_time))
print('===============================')
print('Number of Docs inserted per second: ' + str(docs_inserted_time))
print('===============================')
| blainemincey/generate_sample_data | generate_transactions_data.py | generate_transactions_data.py | py | 2,781 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"lin... |
35860587241 | from fastapi import FastAPI, Depends
from sqlalchemy.orm import Session
from typing import List, Optional
from database import SessionLocal, engine
import models, schemas, crud
# データベース作成
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
'''
Organization
Employee
Theme
KwCategory
KeyWord
Meeting
ICS
Schedule
Entry
'''
@app.get("/")
async def index():
return {"message": "Success"}
@app.get("/organizations", response_model=List[schemas.Organization])
async def read_users(
limit: int = 100,
db: Session = Depends(get_db),
q_name: str = None
):
users = crud.get_organizations(db=db, limit=limit, q_name=q_name)
return users
# @app.get("/rooms", response_model=List[schemas.Room])
# async def read_rooms(skip: int =0, limit: int = 100, db: Session = Depends(get_db)):
# rooms = crud.get_rooms(db=db, skip=skip, limit=limit)
# return rooms
# @app.get("/bookings", response_model=List[schemas.Booking])
# async def read_bookings(skip: int =0, limit: int = 100, db: Session = Depends(get_db)):
# bookings = crud.get_bookings(db=db, skip=skip, limit=limit)
# return bookings
@app.post("/organizations", response_model=schemas.Organization)
async def create_organization(data: schemas.OrganizationCreate, db: Session = Depends(get_db)):
organization = crud.create_organization(db=db, data=data)
return organization
@app.post("/employees", response_model=schemas.Employee)
async def create_employee(data: schemas.EmployeeCreate, db: Session = Depends(get_db)):
employee = crud.create_employee(db=db, data=data)
return employee
@app.post("/themes", response_model=schemas.Theme)
async def create_theme(data: schemas.ThemeCreate, db: Session = Depends(get_db)):
theme = crud.create_theme(db=db, data=data)
return theme
@app.post("/kwcategories", response_model=schemas.KwCategory)
async def create_kwcategory(data: schemas.KwCategoryCreate, db: Session = Depends(get_db)):
kwcategory = crud.create_kwcategory(db=db, data=data)
return kwcategory
@app.post("/keywords", response_model=schemas.KeyWord)
async def create_keyword(data: schemas.KeyWordCreate, db: Session = Depends(get_db)):
keyword = crud.create_keyword(db=db, data=data)
return keyword
@app.post("/meetings", response_model=schemas.Meeting)
async def create_meeting(data: schemas.MeetingCreate, db: Session = Depends(get_db)):
meeting = crud.create_meeting(db=db, data=data)
return meeting
@app.post("/icss", response_model=schemas.ICS)
async def create_ics(data: schemas.ICSCreate, db: Session = Depends(get_db)):
ics = crud.create_ics(db=db, data=data)
return ics
@app.post("/schedules", response_model=schemas.Schedule)
async def create_schedule(data: schemas.ScheduleCreate, db: Session = Depends(get_db)):
schedule = crud.create_schedule(db=db, data=data)
return schedule
@app.post("/entries", response_model=schemas.Entry)
async def create_entry(data: schemas.EntryCreate, db: Session = Depends(get_db)):
entry = crud.create_entry(db=db, data=data)
return entry
# @app.post("/users", response_model=schemas.User)
# async def create_users(user: schemas.UserCreate, db: Session = Depends(get_db)):
# user = crud.create_user(db=db, user=user)
# return user
# @app.post("/rooms", response_model=schemas.Room)
# async def create_rooms(room: schemas.RoomCreate, db: Session = Depends(get_db)):
# room = crud.create_room(db=db, room=room)
# return room
# @app.post("/bookings", response_model=schemas.Booking)
# async def create_bookings(booking: schemas.BookingCreate, db: Session = Depends(get_db)):
# booking = crud.create_booking(db=db, booking=booking)
# return booking
| ishi23/fastapi-streamlit | conf_app_test/sql_app/main.py | main.py | py | 3,816 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Base.metadata.create_all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Base",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "database.engine",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "fastapi... |
14029297694 | import asyncio
import datetime
import os
import discord
from new_skyline2 import SKYLINE
token = os.environ['token']
loop = asyncio.get_event_loop()
client = SKYLINE(loop=loop, intents=discord.Intents.all())
async def main():
now = datetime.datetime.utcnow()
endtime = now.replace(hour=17, minute=1, second=0, microsecond=0)
if now >= endtime:
endtime += datetime.timedelta(days=1)
await asyncio.wait([client.start(token)], timeout=(endtime - now).total_seconds())
await client.close()
all_tasks = [t for t in asyncio.all_tasks(loop=loop) if t != main_task]
while all_tasks:
done, pending = await asyncio.wait(all_tasks, timeout=5)
print(pending)
[t.cancel() for t in pending]
if not pending:
break
main_task = loop.create_task(main())
loop.run_until_complete(main_task)
loop.close()
| Kesigomon/Skyline_py | run.py | run.py | py | 870 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "new_skyline2.SKYLINE",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.In... |
19082484272 | import cv2
import sys
import numpy as np
class Context:
def __init__(self):
self.sliders = {}
self.toggles = {}
self._redraw = False
self.cur_buf_id = 0;
self.buffers = []
self.buffers_by_name = {}
self._once = []
self._store = {}
cv2.namedWindow('image')
def once(self, key):
if key in self._once:
return False
self._once.append(key)
return True
def store(self, key, data):
self._store[key] = data
def load(self, key):
return self._store[key]
def redraw(self, *_):
self._redraw = True
def add_buffer(self, name, shape=[], src=None):
if name not in self.buffers_by_name:
img = src if src is not None else np.zeros(shape, np.uint8)
self.buffers.append(img)
self.cur_buf_id = len(self.buffers)-1
self.buffers_by_name[name] = (self.buffers[-1], self.cur_buf_id)
def b(self, name):
return self.buffers_by_name[name][0]
def __setitem__(self, key, value):
if key in self.buffers_by_name:
id = self.buffers_by_name[key][1]
self.buffers_by_name[key] = (value, id)
self.buffers[id] = value
else:
self.add_buffer(key, src=value)
def __getitem__(self, key):
if key in self.buffers_by_name:
return self.buffers_by_name[key][0]
if key in self._store:
return self._store['key']
return None
def get_toggle(self, key, max_, callback, init=0):
key = ord(key)
if key not in self.toggles:
self.toggles[key] = {'state': init, 'has_changed': True, 'callback': callback}
ko = self.toggles[key]
ko['callback'] = callback
has_changed = ko['has_changed']
ko['has_changed'] = False
if ko['state'] > max_:
ko['state'] = 0
return (ko['state'], has_changed)
def got_key(self, key):
(_, _) = self.get_toggle('b', 1, None, init=0)
(_, _) = self.get_toggle('v', 1, None, init=0)
if key in self.toggles:
ko = self.toggles[key]
ko['state'] += 1
ko['has_changed'] = True
if ko['callback'] is not None:
ko['callback'](None)
#print "Key:", chr(key), "=", ko['state']
sys.stdout.flush()
(_, ffd ) = self.get_toggle('b', 1, None, init=0)
(_, back) = self.get_toggle('v', 1, None, init=0)
if back:
self.cur_buf_id -= 1
self._redraw = True
if ffd:
self.cur_buf_id += 1
self._redraw = True
self.cur_buf_id = self.cur_buf_id % len(self.buffers)
def get_slider(self, name, callback=None, init=0, max_=255):
created = False
if name not in self.sliders:
def none():
pass
if callback == None:
callback = none
self.sliders[name] = {'old_value': init}
cv2.createTrackbar(name,'image',init,max_,callback)
created = True
val = cv2.getTrackbarPos(name,'image')
old_val = self.sliders[name]['old_value']
self.sliders[name]['old_value'] = val
return (val, val != old_val or created)
def eventloop(self):
while(1):
k = cv2.waitKey(100) & 0xFF
if k != 255:
self.got_key(k)
if k == 27 or k == ord('q'):
break
if self._redraw:
self._redraw = False
#print "imshow"
#sys.stdout.flush()
cv2.imshow('image', self.buffers[self.cur_buf_id])
cv2.destroyAllWindows()
def save_all_buffers(self):
for (i, (k, b)) in enumerate(self.buffers_by_name.items()):
fn = "debug/%02d_%s.png" % (i, k)
cv2.imwrite(fn, b[0])
| Phaiax/sudoku | src/context.py | context.py | py | 3,957 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.namedWindow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"... |
14054446569 | import numpy as np
import cv2
from .kalman import Kalman
#https://github.com/uoip/monoVO-python
def get_R(alpha):
M = np.array([[np.cos(np.pi*alpha/180), np.sin(np.pi*alpha/180)],
[-np.sin(np.pi*alpha/180), np.cos(np.pi*alpha/180)]
])
return M
def show_direction(image, t, M):
line_thickness = 1
cx, cy = t
triangle = np.array([[-9, 9], [9, 9], [0, -11]]).T
triangle_rot = M@triangle
triangle = triangle_rot.T
triangle[:,0] += cx
triangle[:,1] += cy
points = [[0,1], [0,2], [1,2]]
for point in points:
cv2.line(image, (int(triangle[point[0]][0]),int(triangle[point[0]][1])),
(int(triangle[point[1]][0]),int(triangle[point[1]][1])),
(0, 0, 255),
thickness=line_thickness
)
dt = 0.1
# Q
GPS = 11.7*8.8*dt**2 # assume 8.8m/s2 as maximum acceleration, forcing the vehicle
Course = 1.7*dt # assume 0.2rad/s as maximum turn rate for the vehicle
Velocity= 8.8*dt # assume 8.8m/s2 as maximum acceleration, forcing the vehicle
q = np.diag([GPS**2, GPS**2, Course**2, Velocity**2])
# H
h = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0]])
# R
varGPS = 0.5 # Standard Deviation of GPS Measurement
r = np.diag([varGPS**2.0, varGPS**2.0])
# F
f = np.eye(4)
def mapping(q_in):
kalman = Kalman(f = f, h = h, q = q, r = r)
kalman.set_state()
traj = np.zeros((400,400,3), dtype=np.uint8)
while True:
#raw_frame, frame, coords, frame_id
_, _, coords, frame_id = q_in.get()
alpha = coords[3]
Rt = get_R(alpha)
x, y, z = coords[0], coords[1], coords[2]
# Kalman
# kalman.predict()
# kalman.update(np.array([[float(coords[0])],
# [float(coords[2])]]))
# coords = np.array([[float(kalman.state[0])],
# coords[1],
# [float(kalman.state[1])]])
# x, y, z = coords[0], coords[1], coords[2]
draw_x, draw_y = int(x), int(y)
z_color = int(z*255/300)
#cv2.circle(traj, (draw_x,draw_y), 1, (z_color,255-z_color,255), 2)
cv2.circle(traj, (draw_x,draw_y), 1, (frame_id/1000,255-frame_id/1000,255), 2)
cv2.rectangle(traj, (10, 20), (600, 60), (0,0,0), -1)
text = "Coordinates: x={:.2f}m y={:.2f}m z={:.2f}m".format(x,y,z)
cv2.putText(traj, text, (20,40), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1, 8)
show_direction(traj, (draw_x, draw_y), Rt)
cv2.imshow('Trajectory', traj)
cv2.waitKey(1)
if __name__ == '__main__':
mapping()
| vvabi-sabi/drone_RK3588 | addons/odometry/odometry.py | odometry.py | py | 2,434 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 9,
... |
39914551744 | from fastapi import APIRouter, HTTPException, Request
from utils.model import *
from services.camera_service import camera_service
from services.server_service import server_service
import requests
import threading
router = APIRouter(prefix="/camera")
@router.get("/{server_name}")
async def get_camera(server_name: str=None):
server = server_service.get_by_server_name(server_name)
records = camera_service.get_camera_by_server(server['server_id'])
if server is not None:
server['cameras'] = records if records is not None else []
del server['_id']
return {
"data": server,
"msg": "success",
}
else:
return {
"data": {},
"msg": "fail"
}
@router.get("/")
async def get_all_camera():
servers = server_service.get_all()
if servers is not None:
for server in servers:
cameras = camera_service.get_camera_by_server(server['server_id'])
path = "http://{0}:8005/stream-manage/output/motion-detections-{1}"
for camera in cameras:
camera['stream_url'] = path.format(server['ip'], camera['camera_id'])
server['cameras'] = cameras if cameras is not None else []
del server['id']
return {
"data": list(servers),
"msg": "success",
}
else:
return {
"data": {},
"msg": "fail"
}
@router.post("", response_model=Reponse[CameraResponse])
async def add_camera_api(camera: Camera):
try:
result = camera_service.add_camera(camera)
def add_to_streaming():
server = server_service.get_by_id(camera.server_id)
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera'
requests.post(root_url, json=camera.dict())
background_thread = threading.Thread(target=add_to_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.delete("/{camera_id}", response_model=Reponse[object])
async def delete_camera(camera_id: str):
try:
camera = camera_service.get_by_id(camera_id)
result = camera_service.delete_camera(camera_id)
def delete_to_streaming():
server = server_service.get_by_id(camera['server_id'])
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera/{camera_id}'
requests.delete(root_url)
background_thread = threading.Thread(target=delete_to_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("", response_model=Reponse[CameraResponse])
async def update_camera(camera: Camera):
try:
result = camera_service.update_camera(camera)
def refesh_streaming():
server = server_service.get_by_id(camera.server_id)
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera/refresh'
requests.get(root_url)
background_thread = threading.Thread(target=refesh_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("/update-date", response_model=Reponse[CameraResponse])
async def update_camera_date(model: RangeDate):
try:
result = camera_service.get_by_id(model.camera_id)
if result is None:
raise HTTPException(
status_code=400,
detail='Camera ID does not existed'
)
print(result)
result['start_time'] = model.start_time
result['end_time'] = model.end_time
result = camera_service.update_camera(Camera(**result))
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("/update-status", response_model=Reponse[CameraResponse])
async def update_camera_status(camera: Camera):
try:
result = camera_service.get_by_id(camera.camera_id)
result['camera_status'] = camera.camera_status
result = camera_service.update_camera(Camera(**result))
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
| ngocthien2306/be-cctv | src/router/camera_router.py | camera_router.py | py | 4,936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "services.server_service.server_service.get_by_server_name",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "services.server_service.server_service",
"line_number": 12,
"u... |
31524035648 | import pandas as pd
import numpy as np
from typing import List
from loguru import logger
from meche_copilot.utils.num_tokens_from_string import num_tokens_from_string
def combine_dataframe_chunks(dfs: List[pd.DataFrame]) -> pd.DataFrame:
if all(df.shape[1] == dfs[0].shape[1] for df in dfs):
return pd.concat(dfs, axis=0, ignore_index=True)
elif all(df.shape[0] == dfs[0].shape[0] for df in dfs):
return pd.concat(dfs, axis=1)
else:
raise ValueError("Chunks do not have consistent shape for concatenation.")
def chunk_dataframe(df: pd.DataFrame, axis=1, num_chunks=None, pct_list=None, max_tokens=None, **kwargs) -> List[pd.DataFrame]:
"""Chunk a dataframe into a list of dataframes using number of chunks xor pct of data in each chunk xor max_tokens in each chunk"""
if axis not in [0, 1]:
raise ValueError("axis should be either 0 (rows) or 1 (columns).")
if sum([num_chunks is not None, pct_list is not None, max_tokens is not None]) != 1:
raise ValueError(f"Exactly one of num_chunks, pct_list, or max_tokes must be specified. Got {num_chunks}, {pct_list}, {max_tokens}")
# if using percents, they should not add up to greater than 100
if pct_list:
if sum(pct_list) > 100:
raise ValueError("Sum of pct_list should be 100% or less.")
num_chunks = len(pct_list) + 1
pct_list.append(100 - sum(pct_list))
# if using num_chunks (or pct_list), shouldnt be greater than items in axis
if num_chunks:
if axis == 0 and num_chunks > df.shape[0]:
raise ValueError("Number of chunks should not be greater than number of rows.")
if axis == 1 and num_chunks > df.shape[1]:
raise ValueError("Number of chunks should not be greater than number of columns.")
chunks = []
if num_chunks and not pct_list: # split into num_chunks along axis
logger.debug(f"Splitting df into {num_chunks} chunks along axis {axis}.")
split_func = np.array_split
chunks = split_func(df, num_chunks, axis=axis)
elif pct_list: # split into fractions along axis
logger.debug(f"Splitting df into {len(pct_list)} chunks along axis {axis} with pct_list {pct_list}.")
fractions = [pct / 100 for pct in pct_list]
if axis == 0: # split rows into fractions
start_idx = 0
for frac in fractions:
end_idx = start_idx + int(frac * df.shape[0])
chunks.append(df.iloc[start_idx:end_idx])
start_idx = end_idx
else: # split columns into fractions
start_idx = 0
for frac in fractions:
end_idx = start_idx + int(frac * df.shape[1])
chunks.append(df.iloc[:, start_idx:end_idx])
start_idx = end_idx
else: # split using max_tokens
logger.debug(f"Splitting df along axis {axis} with max_tokens {max_tokens} per chunk.")
encoding_name = kwargs.get("encoding_name", "gpt-4")
start_idx = 0
prev_tokens = None # To keep track of the previous token size
while start_idx < df.shape[0] if axis == 0 else start_idx < df.shape[1]:
for i in range(start_idx, df.shape[0] if axis == 0 else df.shape[1]): # iterate over rows/cols until max_tokens is reached, then append that chunk
csv_string = df.iloc[start_idx:i+1].to_csv() if axis == 0 else df.iloc[:, start_idx:i+1].to_csv()
tokens = num_tokens_from_string(csv_string, encoding_name)
if tokens > max_tokens:
# Print the previous token size, not the updated token size
logger.debug(f"Adding chunk with shape {df.iloc[start_idx:i].shape if axis == 0 else df.iloc[:, start_idx:i].shape} and prev num tokens {prev_tokens}.")
chunks.append(df.iloc[start_idx:i] if axis == 0 else df.iloc[:, start_idx:i])
start_idx = i + 1 # update start_idx
break
prev_tokens = tokens # Save the previous token size
else: # if loop completes without breaking (i.e., all remaining data fits within max_tokens)
chunks.append(df.iloc[start_idx:] if axis == 0 else df.iloc[:, start_idx:])
break
logger.debug(f"Split df into {len(chunks)} chunks")
return chunks
| fuzzy-tribble/meche-copilot | meche_copilot/utils/chunk_dataframe.py | chunk_dataframe.py | py | 4,373 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"lin... |
23382530066 | # -*- coding: utf-8 -*-
import re
import json
import time
import scrapy
import requests
import itertools
from lxml import etree
from hashlib import md5
from overseaSpider.items import ShopItem, SkuAttributesItem, SkuItem
from overseaSpider.util.scriptdetection import detection_main
from overseaSpider.util.utils import isLinux
website = 'samys'
class SamysSpider(scrapy.Spider):
name = website
# allowed_domains = ['samys.com']
start_urls = ['https://www.samys.com/']
@classmethod
def update_settings(cls, settings):
custom_debug_settings = getattr(cls, 'custom_debug_settings' if getattr(cls, 'is_debug',
False) else 'custom_settings', None)
system = isLinux()
if not system:
# 如果不是服务器, 则修改相关配置
custom_debug_settings["HTTPCACHE_ENABLED"] = False
custom_debug_settings["MONGODB_SERVER"] = "127.0.0.1"
settings.setdict(custom_debug_settings or {}, priority='spider')
def __init__(self, **kwargs):
super(SamysSpider, self).__init__(**kwargs)
self.counts = 0
setattr(self, 'author', "无穹")
is_debug = True
custom_debug_settings = {
'MONGODB_COLLECTION': website,
'CONCURRENT_REQUESTS': 4,
'DOWNLOAD_DELAY': 1,
'LOG_LEVEL': 'DEBUG',
'COOKIES_ENABLED': True,
# 'HTTPCACHE_EXPIRATION_SECS': 14 * 24 * 60 * 60, # 秒
'DOWNLOADER_MIDDLEWARES': {
# 'overseaSpider.middlewares.PhantomjsUpdateCookieMiddleware': 543,
# 'overseaSpider.middlewares.OverseaspiderProxyMiddleware': 400,
'overseaSpider.middlewares.OverseaspiderUserAgentMiddleware': 100,
},
'ITEM_PIPELINES': {
'overseaSpider.pipelines.OverseaspiderPipeline': 300,
},
}
def filter_html_label(self, text): # 洗description标签函数
label_pattern = [r'(<!--[\s\S]*?-->)', r'<script>.*?</script>', r'<style>.*?</style>', r'<[^>]+>']
for pattern in label_pattern:
labels = re.findall(pattern, text, re.S)
for label in labels:
text = text.replace(label, '')
text = text.replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').strip()
return text
def filter_text(self, input_text):
filter_list = [u'\x85', u'\xa0', u'\u1680', u'\u180e', u'\u2000-', u'\u200a',
u'\u2028', u'\u2029', u'\u202f', u'\u205f', u'\u3000', u'\xA0', u'\u180E',
u'\u200A', u'\u202F', u'\u205F']
for index in filter_list:
input_text = input_text.replace(index, "").strip()
return input_text
def parse(self, response):
"""获取全部分类"""
category_url = ['https://www.samys.com/c/Photography/1/113.html',
'https://www.samys.com/c/Video/1/235.html',
'https://www.samys.com/c/Studio--Lighting/1/360.html',
'https://www.samys.com/c/Electronics/1/421.html',
'https://www.samys.com/c/Smartphone/1/830.html',
'https://www.samys.com/c/Pro-Cinema--Audio/2/794.html']
for i in category_url:
yield scrapy.Request(
url=i,
callback=self.parse_list,
meta={"flag": 0}
)
def parse_list(self, response):
"""商品列表页"""
detail_url = response.xpath("//div[@itemprop='name']/a/@href").getall()
if detail_url:
for i in detail_url:
yield scrapy.Request(
url='https://www.samys.com'+i,
callback=self.parse_detail
)
if response.meta.get("flag") == 0:
next_url = response.url + '?start=37'
yield scrapy.Request(
url=next_url,
callback=self.parse_list,
meta={"flag": 1, "start": 37, "url": response.url}
)
else:
start = response.meta.get("start") + 36
next_url = response.url + '?start=' + str(start)
yield scrapy.Request(
url=next_url,
callback=self.parse_list,
meta={"flag": 1, "start": start, "url": response.meta.get("url")}
)
else:
category_url = response.xpath("//div[@class='category-container']/div/a/@href").getall()
for i in category_url:
yield scrapy.Request(
url='https://www.samys.com' + i,
callback=self.parse_list,
meta={"flag": 0}
)
def parse_detail(self, response):
"""详情页"""
items = ShopItem()
items["url"] = response.url
items["name"] = response.xpath('//meta[@property="og:title"]/@content').get()
cat_temp = response.xpath("//ul[@class='breadcrumbs floatContainer']//a//text()").getall()
items["detail_cat"] = '/'.join(cat_temp)
items["cat"] = cat_temp[-1]
des_temp=response.xpath('//span[@itemprop="description"]//text()').getall()
items["description"] = self.filter_text(self.filter_html_label(''.join(des_temp)))
items["source"] = 'samys.com'
items["brand"] = response.xpath('//meta[@itemprop="brand"]/@content').get()
image_temp=response.xpath("//ul[@class='slider-detail']/li/a/img/@src").getall()[:1]+response.xpath("//ul[@class='slider-detail']/li/a/img/@data-post-load-image").getall()
if not image_temp:
image_temp = response.xpath("//div[@class='swiper-slide false']/img/@src").getall()
image=[]
for i in image_temp:
image.append('https://www.samys.com'+i)
items["images"] = image
items["current_price"] = response.xpath("//meta[@itemprop='price']/@content").get()
items["original_price"] = items["current_price"]
items["measurements"] = ["Weight: None", "Height: None", "Length: None", "Depth: None"]
items["sku_list"] =[]
status_list = list()
status_list.append(items["url"])
status_list.append(items["original_price"])
status_list.append(items["current_price"])
status_list = [i for i in status_list if i]
status = "-".join(status_list)
items["id"] = md5(status.encode("utf8")).hexdigest()
items["lastCrawlTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
items["created"] = int(time.time())
items["updated"] = int(time.time())
items['is_deleted'] = 0
# detection_main(items=items, website=website, num=20, skulist=True, skulist_attributes=True)
# print(items)
yield items
| husky-happy/templatespider | overseaSpider/spiders/xg/samys.py | samys.py | py | 6,903 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "overseaSpider.util.utils.isLinux",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.S",... |
3784075084 | ###############################################################################
# make park model
###############################################################################
import cantera as ct
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import rmgpy
from rmgpy.data.thermo import ThermoDatabase
from rmgpy.data.kinetics import KineticsDatabase
from rmgpy.molecule import Molecule
from rmgpy.species import Species
from rmgpy.reaction import Reaction
import inspect
import copy
from rmgpy.kinetics.surface import SurfaceArrhenius
from rmgpy.kinetics.surface import StickingCoefficient
from rmgpy.quantity import ScalarQuantity
import rmgpy.chemkin as Chemkin
from cantera import ck2cti
###############################################################################
# useful functions
###############################################################################
def get_thermo(spec_str):
'''
takes a string input and returns a species object with complete thermo
this may already exist in RMG.
'''
spec = Species()
spec.from_smiles(spec_str)
est_thermo = thermo_database.get_thermo_data(spec,metal_to_scale_to="Cu111")
spec.thermo = est_thermo
return spec
def get_gas_phase_precurs(spec):
'''
adapted from ThermoDatabase method:
get_thermo_data_for_surface_species()
gets a Species object corresponding to the gas phase precursor for
a given surface species
does NOT apply adsorption correction!
'''
dummy_molecules = spec.molecule[0].get_desorbed_molecules()
for mol in dummy_molecules:
mol.clear_labeled_atoms()
if len(dummy_molecules) == 0:
raise RuntimeError(f"Cannot get thermo for gas-phase molecule")
# if len(molecule) > 1, it will assume all resonance structures have already been
#generated when it tries to generate them, so evaluate each configuration separately
# and pick the lowest energy one by H298 value
gas_phase_species_from_libraries = []
gas_phase_species_estimates = []
for dummy_molecule in dummy_molecules:
dummy_species = Species()
dummy_species.molecule = [dummy_molecule]
dummy_species.generate_resonance_structures()
dummy_species.thermo = thermo_database.get_thermo_data(dummy_species)
if dummy_species.thermo.label:
gas_phase_species_from_libraries.append(dummy_species)
else:
gas_phase_species_estimates.append(dummy_species)
# define the comparison function to find the lowest energy
def lowest_energy(species):
if hasattr(species.thermo, 'H298'):
print(species.thermo.H298.value_si)
return species.thermo.H298.value_si
else:
print(species.thermo.get_enthalpy(298.0))
return species.thermo.get_enthalpy(298.0)
if gas_phase_species_from_libraries:
species = min(gas_phase_species_from_libraries, key=lowest_energy)
else:
species = min(gas_phase_species_estimates, key=lowest_energy)
thermo = species.thermo
return species
def update_thermo(spec, name, be1, be2):
'''
updates species thermo given an input for binding energy.
input species object (spec)
park name as string (name)
two floats for the original binding
energy (be1) and the "correct" binding energy (be2)
'''
spec_new = copy.deepcopy(spec)
ev_2_kj = 9.6e4
be_diff = (be_dict[name] - be_dict_park[name])*9.6e4
new_h298 = spec.thermo.H298.value_si - be_diff
spec_new.thermo.H298.value_si = new_h298
print(name, id(spec_new.thermo.H298.value_si), id(spec.thermo.H298.value_si))
print(name, spec_new.thermo.H298.value_si, spec.thermo.H298.value_si, be_diff)
return spec_new
def make_reaction(reactants, products, rxn_str, A, Ea, stick = False,):
'''
make a rmgpy reaction object.
takes a list of the species objects for
the reactants and products.
takes a string for the reaction string
if Stick is true, A-factor is the sticking coefficient
'''
if stick:
kinetics = StickingCoefficient(
A=A,
n=0.0,
Ea=Ea,
T0=(1.0, "K"),
Tmin=None,
Tmax=None,
Pmin=None,
Pmax=None,
coverage_dependence=None,
comment=''
)
else:
kinetics = SurfaceArrhenius(
A=A,
n=0.0,
Ea=Ea,
T0=(1.0, "K"),
Tmin=None,
Tmax=None,
Pmin=None,
Pmax=None,
coverage_dependence=None,
comment=''
)
# use the rmgpy reaction object
rxn = Reaction(
index=-1,
label=rxn_str,
reactants=reactants,
products=products,
specific_collider=None,
kinetics=kinetics,
network_kinetics=None,
reversible=True,
transition_state=None,
duplicate=False,
degeneracy=1,
pairs=None,
allow_pdep_route=False,
elementary_high_p=False,
allow_max_rate_violation=False,
rank=None,
comment='',
is_forward=None,
)
return rxn
def convert_to_nasa(spec):
thermo = spec.thermo
thermo_nasa = thermo.to_nasa(298, 1500, 1000)
spec.thermo = thermo_nasa
###############################################################################
# initiialize things
###############################################################################
# quick check that we are using the correct rmgpy and version
print('using rmgpy at: ',inspect.getfile(rmgpy))
print('using rmgpy version: ', rmgpy.__version__)
# save rmgpy and db directory. db is assumed to be in the same
# folder as RMG-Py
rmg_py_path = inspect.getfile(rmgpy).split("rmgpy")[0]
rmg_db_path = rmg_py_path.split("RMG-Py")[0] + "RMG-database/"
# import data
# set absolute location, using './' in jupyter performs differently
# in vscode
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
park_xl_file =os.path.join(__location__,'park_thermo_and_rates.xlsx')
BE_sheet='Binding Energies'
rxn_sheet = 'reactions'
be_df = pd.read_excel(park_xl_file, sheet_name=BE_sheet, engine='openpyxl')
rxn_df = pd.read_excel(park_xl_file, sheet_name=rxn_sheet, engine='openpyxl')
# output files
chemkin_gas_file = os.path.join(__location__, 'park_gas.inp')
chemkin_surface_file = os.path.join(__location__ + '/park_surf.inp') # why do we need a / for surface?
cantera_file = os.path.join(__location__,'park_mech.cti')
###############################################################################
# Constants/values
###############################################################################
site_density_mol_cm = 2.943e-09
site_density_si = site_density_mol_cm * 1e4
site_density_object = ScalarQuantity(site_density_si, 'mol/m^2')
###############################################################################
# get thermo for all species in RMG model. adjust BEs per the sheet values
###############################################################################
db_input_path = rmg_db_path + 'input/'
# load the thermo database
library_path = db_input_path + 'thermo/'
thermo_libraries = [
'surfaceThermoPt111',
]
thermo_database = ThermoDatabase()
thermo_database.load(
library_path,
libraries=thermo_libraries,
depository=False,
surface=True
)
# load the kinetics database
kin_libraries_dir = db_input_path + "kinetics/libraries/Surface/"
kin_fam_dir = db_input_path + "kinetics/families/"
kinetics_libraries = [
'CPOX_Pt/Deutschmann2006_adjusted',
]
kinetics_families = ['surface']
kinetics_database = KineticsDatabase()
kinetics_database.load_recommended_families(kin_fam_dir + 'recommended.py')
kinetics_database.load_families(
path=kin_fam_dir,
families=kinetics_families,
)
kinetics_database.load_libraries(
kin_libraries_dir,
libraries=kinetics_libraries
)
# get binding energies
# need a dictionary translating species names to smiles
# need a dictionary translating species names to smiles
spec_smiles_dict = {
'CO*':'O=C=[*]',
'CO2*':'O=C=O.[*]',
'H*':'[H]*',
'H2O*':'O.[*]',
'CH3OH*':'CO.[*]',
'O*':'O=[*]',
'OH*':'O[*]',
'HCO*':'O=C*',
# 'HCOO**':'O=CO[*][*]', #formate, bidentate
'HCOO**':'O=CO[*].[*]', # formate, bidentate, plus extra X
'H2CO2*':'[*]OCO[*]',
'COOH*':'O=C(O)[*]',
'CH2O*':'C=O.[*]',
'CH3O*':'CO[*]',
'CH3O2*':'OCO[*]',
'*':'[*]',
}
# also need a dict of gas phase species to get be's from
# key is surface species, value is Gas phase precursor
# either from RMGs estimate or if it's explicitly known,
# just the gas phase version (e.g. 'CO2*': 'CO2')
gas_pre_dict = {
'CO*':'[C-]#[O+]',
'CO2*':'O=C=O',
'H*':'[H]',
'H2O*':'O',
'CH3OH*':'CO',
'O*':'[O]',
'OH*':'[OH]',
'HCO*':'[CH]=O',
'HCOO**':'[O]C=O', #formate, bidentate
'H2CO2*':'[O]C[O]',
'COOH*':'O=[C]O',
'CH2O*':'C=O',
'CH3O*':'C[O]',
'CH3O2*':'[O]CO',
'*':'[*]',
}
# all of the gas phase species in the model
gas_smiles_dict = {
'CO':'[C-]#[O+]',
'CO2':'O=C=O',
'H2O':'O',
'CH3OH':'CO',
'CH2O':'C=O',
'H2':'[H][H]',
}
# construct a dictionary of binding energies
be_dict = {}
for label in spec_smiles_dict.keys():
surf_spec = get_thermo(spec_smiles_dict[label])
gas_spec = get_thermo(gas_pre_dict[label])
surf_h298 = surf_spec.thermo.get_enthalpy(298)
gas_h298 = gas_spec.thermo.get_enthalpy(298)
be_dict[label] = (surf_h298 - gas_h298)/9.6e4
species_dict = {}
for spec_name in be_df['Species']:
smiles = spec_smiles_dict[spec_name.strip()]
spec = get_thermo(smiles)
spec.label = spec_name
species_dict[spec_name.strip()] = spec
# # manually add surface site to species_dict
# species_dict['*'] = get_thermo(spec_smiles_dict['*'])
gas_species_dict = {}
for spec_name in gas_smiles_dict.keys():
smiles = gas_smiles_dict[spec_name.strip()]
spec = get_thermo(smiles)
spec.label = spec_name
gas_species_dict[spec_name.strip()] = spec
# make binding energy dictionary from park data
be_dict_park = {}
for i in range(len(be_df)):
species = be_df['Species'][i].strip()
be_park = be_df["BE"][i]
be_dict_park[species] = be_park
# update thermo to be closer to bark BE values
new_thermo_spec_dict = {}
for name, spec in species_dict.items():
spec_new = update_thermo(
spec,
name,
be_dict[name],
be_dict_park[name],
)
new_thermo_spec_dict[name] = spec_new
# combine gas and surface species dicts
combined_species_dict = {**new_thermo_spec_dict, **gas_species_dict}
# now that we've solidified the thermo, convert to nasa so chemkin conversion
# is a little easier
for spec in combined_species_dict.values():
convert_to_nasa(spec)
# pull the information for rea ctants, products,
# and arrhenius prefactors for the equations below
rxn_spec_dict = {}
rxn_dict = {}
rxn_dict_coeff = {}
rxn_list = {}
for index, row in rxn_df.iterrows():
rxn_raw = row['eqtn']
rxn = rxn_raw.strip()
reactants, products = rxn.split("<=>")
reac_spl = reactants.split("+")
prod_spl = products.split("+")
# retain to list with stoichiometric coeff
# just in case we need it
reac_spl_coeff = reac_spl
prod_spl_coeff = prod_spl
# expand split reactant/product string so
# reactants with "2" as prefix become two
# separate strings
# e.g. 2OH --> OH, OH
for reac in reac_spl:
if reac.startswith("2"):
reac_dup = reac.replace("2","")
reac_spl.remove(reac)
reac_spl.extend([reac_dup]*2)
for prod in prod_spl:
if prod.startswith("2"):
prod_dup = prod.replace("2","")
prod_spl.remove(prod)
prod_spl.extend([prod_dup]*2)
rxn_dict[rxn] = [reac_spl, prod_spl]
rxn_dict_coeff[rxn] = [reac_spl_coeff, prod_spl_coeff]
if row['Af'] == 'N/A' and row['stick']:
# if no rate info and sticking coefficient
A = 1.0 # units of mol/m^2/s
elif row['Af'] != 'N/A' and row['stick']:
# if we supply a sticking coefficient
A = float(row['Af'])
else:
# we are making a concession here. rates that do
# not have an A-factor or Ea specified are quasi-
# equilibrated, so I am setting the A-factor to the
# highest value (1e22 1/s) in the mechanism, and
# making it barrierless (Ea=0 eV)
if len(reac_spl) > 1:
A = (float(row['Af'] / site_density_si), 'm^2/(mol*s)') # units of mol/m^2/s
else:
A = (float(row['Af'] / site_density_si), 's^-1') # units of mol/m^2/s
Ea = (float(row['Ef (eV)'] * 9.6e4), 'J/mol') # units of J/mol
rxn_spec_dict[rxn] = [
[combined_species_dict[reac] for reac in reac_spl],
[combined_species_dict[prod] for prod in prod_spl],
]
rxn_obj = make_reaction(
rxn_spec_dict[rxn][0],
rxn_spec_dict[rxn][1],
rxn,
A,
Ea,
stick = row['stick'],
)
rxn_list[rxn] = rxn_obj
# finally, make inputs into lists for chemkin file write
chemkin_specs = []
for spec in combined_species_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
for rxn in rxn_list.values():
chemkin_rxns.append(rxn)
# write chemkin file
# make inputs into lists for chemkin file write
chemkin_specs = []
for spec in gas_species_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
Chemkin.save_chemkin_file(
chemkin_gas_file,
chemkin_specs,
chemkin_rxns,
verbose=True,
check_for_duplicates=True,
)
# make inputs into lists for chemkin file write
chemkin_specs = []
for spec in new_thermo_spec_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
for rxn in rxn_list.values():
chemkin_rxns.append(rxn)
Chemkin.save_chemkin_surface_file(
chemkin_surface_file,
chemkin_specs,
chemkin_rxns,
verbose=True,
check_for_duplicates=True,
surface_site_density=site_density_object,
)
parser = ck2cti.Parser()
parser.convertMech(
chemkin_gas_file,
outName=cantera_file,
quiet=True,
permissive=True,
surfaceFile=chemkin_surface_file
)
# test that model works by attempting to load it
gas = ct.Solution(cantera_file, "gas")
surf = ct.Interface(cantera_file,"surface1", [gas]) | comocheng/meOH-analysis | External_data/park_et_al_model_reconstruction/make_park_model.py | make_park_model.py | py | 14,690 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rmgpy.species.Species",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rmgpy.species.Species",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "rmgpy.kinet... |
23002903726 | import sqlite3
connection = sqlite3.connect('databasePeças.db')
c = connection.cursor()
def CREATE():
# PEÇA #
c.execute('CREATE TABLE IF NOT EXISTS PECA (\
`codigo` VARCHAR(5) NOT NULL,\
`nomeSingular` VARCHAR(25) NOT NULL,\
`nomePlural` VARCHAR(25) NOT NULL,\
`genero` VARCHAR(1) NOT NULL,\
`preco` VARCHAR(8),\
PRIMARY KEY(`codigo`));')
connection.commit()
connection.close()
CREATE() | GilbertoMJ/Projeto-Andaimes | Scripts Banco de Dados/criar_database_Peça.py | criar_database_Peça.py | py | 473 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
7813711056 | """clean up unused tables
Create Date: 2022-05-02 17:19:09.910095
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "20220502_171903"
down_revision = "20220425_225456"
branch_labels = None
depends_on = None
def upgrade():
op.drop_table("region_types", schema="aspen")
op.drop_table("align_read_workflows", schema="aspen")
op.drop_table("call_consensus_workflows", schema="aspen")
op.drop_table("sequencing_reads_collections", schema="aspen")
op.drop_table("sequencing_instrument_types", schema="aspen")
op.drop_table("filter_read_workflows", schema="aspen")
op.drop_table("host_filtered_sequencing_reads_collections", schema="aspen")
op.drop_table("sequencing_protocol_types", schema="aspen")
op.drop_table("bams", schema="aspen")
op.drop_table("called_pathogen_genomes", schema="aspen")
# Drop dummy data tied to our enums, relevant for dev environments
op.execute(
"DELETE FROM aspen.entities WHERE entity_type IN ('SEQUENCING_READS', 'BAM', 'CALLED_PATHOGEN_GENOME', 'HOST_FILTERED_SEQUENCE_READS')"
)
op.enum_delete(
"entity_types",
[
"CALLED_PATHOGEN_GENOME",
"BAM",
"SEQUENCING_READS",
"HOST_FILTERED_SEQUENCE_READS",
],
schema="aspen",
)
op.enum_delete(
"workflow_types",
["CALL_CONSENSUS", "ALIGN_READ", "FILTER_READ"],
schema="aspen",
)
def downgrade():
raise NotImplementedError("don't downgrade")
| chanzuckerberg/czgenepi | src/backend/database_migrations/versions/20220502_171903_clean_up_unused_tables.py | 20220502_171903_clean_up_unused_tables.py | py | 1,530 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "alembic.op.drop_table",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "alembic.op",
... |
15826519032 | import json
import logging
logging.basicConfig(level=logging.DEBUG)
import argparse
import uuid
import emission.storage.decorations.user_queries as esdu
import emission.net.ext_service.push.notify_usage as pnu
import emission.net.ext_service.push.query.dispatch as pqd
import emission.core.wrapper.user as ecwu
import emission.core.get_database as edb
def get_uuid_list_for_platform(platform):
query_fn = pqd.get_query_fn("platform")
return query_fn({"platform": platform})
def get_upgrade_push_spec(platform):
android_url = "https://play.google.com/store/apps/details?id=gov.nrel.cims.openpath"
ios_url = "https://apps.apple.com/us/app/nrel-openpath/id1628058068"
if platform == "android":
platform_url = android_url
elif platform == "ios":
platform_url = ios_url
else:
raise InvalidArgumentException("Found unknown platform %s, expected 'android' or 'ios'" % platform)
push_spec = {
"alert_type": "website",
"title": "Your version of the NREL OpenPATH app may have errors",
"message": "Please upgrade to the most recent version",
"image": "icon",
"spec": {
"url": platform_url
}
}
return push_spec
def needs_version_update(uuid, target_version):
curr_profile = edb.get_profile_db().find_one({"user_id": uuid})
logging.debug("Read profile %s for user %s" % (curr_profile, uuid))
if curr_profile is None:
logging.error("Could not find profile for %s" % uuid)
return False
elif curr_profile["client_app_version"] == target_version:
logging.debug("%s is already at version %s" % (uuid, curr_profile["client_app_version"]))
return False
else:
logging.debug("%s is at version %s, needs update to %s" % (uuid, curr_profile["client_app_version"], target_version))
return True
def push_upgrade_message_for_platform(platform, cli_args):
logging.info("About to push to %s" % platform)
uuid_list = get_uuid_list_for_platform(platform)
logging.info("UUID list for %s = %s" % (platform, uuid_list))
if cli_args.target_version:
filtered_uuid_list = [uuid for uuid in uuid_list if needs_version_update(uuid, cli_args.target_version)]
logging.info("After filtering for %s, uuid_list is %s" % (cli_args.target_version, filtered_uuid_list))
else:
filtered_uuid_list = uuid_list
logging.info("No target version specified, not filtering list")
spec = get_upgrade_push_spec(platform)
if cli_args.dry_run:
logging.info("dry run, skipping actual push")
else:
response = pnu.send_visible_notification_to_users(filtered_uuid_list,
spec["title"],
spec["message"],
spec,
dev = cli_args.dev)
pnu.display_response(response)
def runTests():
try:
edb.get_profile_db().insert_one({"user_id": "v4", "client_app_version": "1.0.4"})
edb.get_profile_db().insert_one({"user_id": "v5", "client_app_version": "1.0.5"})
edb.get_profile_db().insert_one({"user_id": "v6", "client_app_version": "1.0.6"})
assert needs_version_update("v4", "1.0.6")
assert needs_version_update("v5", "1.0.6")
assert not needs_version_update("v6", "1.0.6")
assert not needs_version_update("unknown", "1.0.6")
finally:
logging.debug("About to delete all entries from the profile")
edb.get_profile_db().delete_many({"user_id": "v4"})
edb.get_profile_db().delete_many({"user_id": "v5"})
edb.get_profile_db().delete_many({"user_id": "v6"})
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="prompt_upgrade_to_latest")
# until we figure out a way to add unit tests for scripts
parser.add_argument("--test", action="store_true", default=False,
help="Do everything except actually push the survey")
parser.add_argument("-n", "--dry-run", action="store_true", default=False,
help="Do everything except actually push the survey")
parser.add_argument("-t", "--target-version",
help="Only push to people who have not upgraded to this version")
parser.add_argument("-d", "--dev", action="store_true", default=False)
args = parser.parse_args()
if args.test:
runTests()
else:
push_upgrade_message_for_platform("android", args)
push_upgrade_message_for_platform("ios", args)
| e-mission/e-mission-server | bin/monitor/prompt_upgrade_to_latest.py | prompt_upgrade_to_latest.py | py | 4,640 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "emission.net.ext_service.push.query.dispatch.get_query_fn",
"line_number": 14,
"usage_type": "call"
... |
39157550873 | #!/usr/bin/env python3
import click
import sys
from pathlib import Path
from RecBlast.RecBlast import RecSearch
import RecBlast.WarningsExceptions as RBWE
def deduce_searchtype(query_type, db_type, search_algorithm):
# a bit of cleaning
query_type = query_type.lower()
db_type = db_type.lower()
search_algorithm = search_algorithm.lower()
if "blast" in search_algorithm:
if query_type == "dna":
if db_type == "prot":
return "blastx"
elif db_type == "dna":
return "blastn"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
elif query_type == "prot":
if db_type == "prot":
return "blastp"
elif db_type == "dna":
return "tblastn"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
else:
raise Exception("Unknown search sequence type! Allowed options are 'dna' or 'prot'")
if "blat" in search_algorithm:
if query_type == "dna":
if db_type == "prot":
return "blatx"
elif db_type == "dna":
return "blat"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
elif query_type == "prot":
if db_type == "prot":
return "blatp"
elif db_type == "dna":
return "tblat"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
else:
raise Exception("Unknown search sequence type! Allowed options are 'dna' or 'prot'")
else:
raise RBWE.SearchEngineNotImplementedError("This search engine hasn't been implemented yet! Only BLAT and BLAST have been implemented!")
@click.command()
@click.option("-q", "--query-file", type=click.Path(exists=True))
@click.option("--query-file-type", type=str, default="fasta")
@click.option("-p", "--max-processes", type=int, default=40)
@click.option("-fp", "--forward-port")
@click.option("-rp", "--reverse-port")
@click.option("-fs", "--forward-species", type=str)
@click.option("-ft", "--forward-twobit", type=click.Path(exists=False))
@click.option("-rs", "--reverse-species", type=str)
@click.option("-rt", "--reverse-twobit", type=click.Path(exists=False))
@click.option("-ps", "--perc-score", type=str, default= "0.1")
@click.option("-pi", "--perc-identity", type=str, default = "0.5")
@click.option("-pq", "--perc-query-span", type=str, default = "0.5")
@click.option("--query_type", type=str, default = "prot")
@click.option("--reverse_type", type=str, default = "dna")
@click.option("--forward_algo", type=str, default = "blat")
@click.option("--reverse_algo", type=str, default = "blat")
@click.option("--reverse_db_type", type=str, default = "dna")
@click.option("--forward_db_type", type=str, default = "dna")
@click.option("--annotation_lookup_tsv", type=str, default = "")
@click.option("--output-root", type=str, default="./output")
@click.option('-v', '--verbose', count=True)
def __main__(query_file, forward_port, forward_species, forward_twobit,
reverse_port, reverse_species, reverse_twobit,
query_type, forward_db_type, forward_algo,
reverse_type, reverse_db_type, reverse_algo,
perc_score, perc_identity, perc_query_span, query_file_type, max_processes,
annotation_lookup_tsv, output_root, verbose):
perc_score = float(perc_score)
perc_identity = float(perc_identity)
perc_query_span = float(perc_query_span)
forward_twobit = Path(forward_twobit)
reverse_twobit = Path(reverse_twobit)
print(forward_twobit, reverse_twobit, output_root, perc_identity, perc_score, perc_query_span, query_file, sep="\n", file=sys.stderr)
output_location = Path(output_root, forward_twobit.stem)
print(output_location, file=sys.stderr)
f_search_type = deduce_searchtype(query_type, forward_db_type, forward_algo)
r_search_type = deduce_searchtype(reverse_type, reverse_db_type, reverse_algo)
recblast = RecSearch(target_species=forward_species, query_species=reverse_species,
forward_search_type=f_search_type, reverse_search_type=r_search_type,
sequence_source="twobit", verbose=verbose)
recblast.max_processes = max_processes
recblast.set_queries(query_file,
infile_type=query_file_type)
recblast.forward_search_settings['database_port'] = {forward_species: forward_port}
recblast.forward_search_settings['database'] = {forward_species: str(forward_twobit.name)}
recblast.forward_search_settings['database_path'] = str(forward_twobit.parent)
recblast.forward_search_criteria = dict(perc_score=perc_score,
perc_ident=perc_identity,
perc_query_span=perc_query_span)
recblast.sequence_source_settings['database'] = {forward_species: str(forward_twobit.name)}
recblast.sequence_source_settings['database_path'] = str(forward_twobit.parent)
recblast.memory_saver_level = 1
recblast.reverse_search_settings['database'] = {reverse_species: str(reverse_twobit.name)}
recblast.reverse_search_settings['database_path'] = str(reverse_twobit.parent)
recblast.reverse_search_settings['database_port'] = {reverse_species: reverse_port}
if annotation_lookup_tsv:
recblast.set_translation_annotation_parameters(method="table", key_value_order=False,
tsv_location=annotation_lookup_tsv)
else:
recblast.set_translation_annotation_parameters(method=False)
recblast(run_name="{0}-pcScore{1}_pcIdent{2}_pcQuerySpan{3}_reverse-{4}".format(Path(query_file).stem,
perc_score,
perc_identity,
perc_query_span,
reverse_twobit.stem),
output_type="bed-complete",
output_location=output_location)
if __name__ == "__main__":
__main__()
exit()
| docmanny/smRecSearch | code/rbb.py | rbb.py | py | 6,528 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "RecBlast.WarningsExceptions.SearchEngineNotImplementedError",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "RecBlast.WarningsExceptions",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 86,
"usage_type": "... |
43110008124 | from codecs import open
from os import path
import re
from setuptools import setup, find_packages
dot = path.abspath(path.dirname(__file__))
# get the dependencies and installs
with open(path.join(dot, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if
x.startswith('git+')]
# parse the version file
ver_content = open("cloudy/_version.py", "rt").read()
ver_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", ver_content, re.M)
if ver_match:
version = ver_match.group(1)
else:
raise RuntimeError("Unable to find version string")
setup(
name='cloudy',
version=version,
description='opinionated & personal screenshot handler',
long_description=(
'Watches a directory for file changes, uploads them to a remote,'
'generates a link, shortens it and dumps it into the clipboard.'
),
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': ['cloudy=cloudy.cloudy:cli'],
},
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=install_requires,
dependency_links=dependency_links,
)
| rarescosma/env.cloudy | setup.py | setup.py | py | 1,466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number":... |
36684803245 | import pandas as pd
import tekore as tk
from config import CLIENT_ID, CLIENT_SECRET
class SpotifyData:
def get_one_song_data(self, query):
token = tk.request_client_token(CLIENT_ID, CLIENT_SECRET)
spotify = tk.Spotify(token)
searched_track = spotify.search(query, types=('track',), market='pl')
artist_id = searched_track[0].items[0].artists[0].id
id = searched_track[0].items[0].id
af = spotify.track_audio_features(id)
output = [
[af.danceability,
af.energy,
af.loudness,
af.acousticness,
af.instrumentalness,
af.liveness,
af.speechiness,
af.valence]
]
print("Znaleziono:", searched_track[0].items[0].artists[0].name, "-", searched_track[0].items[0].name)
print("Gatunek:", spotify.artist(artist_id).genres[0])
return pd.DataFrame(output, columns=['danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness',
'liveness', 'speechiness', 'valence'])
def get_data(self, genres):
genres_names = genres
token = tk.request_client_token(CLIENT_ID, CLIENT_SECRET)
spotify = tk.Spotify(token)
output = pd.DataFrame(
columns=['genre', 'danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness',
'liveness', 'speechiness', 'valence'])
for genre in genres_names:
print('now: ', genre)
tracks_id = []
tracks_af = []
try:
searched_playlists = spotify.search(genre, types=('playlist',), market='pl', limit=50, offset=0)
playlist_id = None
for i in range(100):
if searched_playlists[0].items[i].tracks.total >= 100:
playlist_id = searched_playlists[0].items[i].id
break
elif i == 100:
playlist_id = searched_playlists[0].items[0].id
playlist = spotify.playlist(playlist_id)
playlist_tracks = playlist.tracks.items
for i in range(100):
tracks_id.append(playlist_tracks[i].track.id)
afs = spotify.tracks_audio_features(track_ids=tracks_id)
print(len(afs))
for af in afs:
tracks_af.append(
[genre, af.danceability, af.energy, af.loudness, af.acousticness, af.instrumentalness,
af.liveness,
af.speechiness, af.valence])
x = pd.DataFrame(tracks_af,
columns=['genre', 'danceability', 'energy', 'loudness', 'acousticness',
'instrumentalness',
'liveness', 'speechiness', 'valence'])
output = pd.concat([output, x])
except AttributeError:
print('tekore attribute error')
continue
except IndexError:
print('playlist index error')
continue
except TypeError:
print('audio features type error')
continue
return output
| SINEdowskY/spotify-songs-classification | spotify_data.py | spotify_data.py | py | 3,351 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tekore.request_client_token",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.CLIENT_ID",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "config.CLIENT_SECRET",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name":... |
30568657844 | import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["text.usetex"] = True
LEGEND_FONTSIZE = 20
TICK_LABEL_FONTSIZE = 20
AXIS_LABEL_FONTSIZE = 20
TITLE_FONTSIZE = 20
CHART_SIZE = [10, 6]
LONG_CHART_SIZE = [10, 10]
def do_nothing_Rt_plot(Rt_dict, fname=None, ps=True):
fig, ax = plt.subplots(1, 1, figsize=CHART_SIZE)
ax.set_xlabel("Time (days)", fontsize=AXIS_LABEL_FONTSIZE)
ax.set_ylabel(r"$\mathcal{R}_t$", fontsize=AXIS_LABEL_FONTSIZE)
for R0 in Rt_dict:
t_arr = Rt_dict[R0]["t"]
Rt_arr = Rt_dict[R0]["Rt"]
ax.plot(t_arr, Rt_arr, label=f"{R0:.1f}")
ax.legend(
loc="best",
title=r"$\mathcal{R}_0$",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
def do_nothing_hospital_plot(region_dict, fname=None, ps=True):
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=CHART_SIZE)
R0s = [k for k in region_dict]
R0 = R0s[0]
hospital_do_nothing_plot(ax[0, 0], region_dict[R0], R0, xlabel=False)
plt.gca().set_prop_cycle(None)
R0 = R0s[1]
hospital_do_nothing_plot(ax[0, 1], region_dict[R0], R0, xlabel=False)
plt.gca().set_prop_cycle(None)
R0 = R0s[2]
hospital_do_nothing_plot(ax[1, 0], region_dict[R0], R0)
plt.gca().set_prop_cycle(None)
R0 = R0s[3]
hospital_do_nothing_plot(ax[1, 1], region_dict[R0], R0)
fig.suptitle(r"Beds Occupied $(\%N)$", fontsize=TITLE_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
def hospital_do_nothing_plot(ax, Hdict, R0, xlabel=True):
for region in Hdict:
pN = Hdict[region]["pN"]
ax.plot(Hdict[region]["t"], Hdict[region]["H"] * pN, label=region)
if xlabel:
ax.set_xlabel("Time (days)", fontsize=AXIS_LABEL_FONTSIZE)
ax.legend(
loc="best",
title="Region",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
ax.set_title(rf"$\mathcal{{R}}_0$ = {R0:.1f}", fontsize=TITLE_FONTSIZE)
def do_nothing_deaths_plot(region_dict, region_abm_dict, fname=None, ps=True):
fig, ax = plt.subplots(1, 1, figsize=CHART_SIZE)
for region in region_dict:
(line,) = ax.plot(
region_dict[region]["R0"],
region_dict[region]["D"],
label=region,
)
yerr = [
np.array(region_abm_dict[region]["D_mean"])
- np.array(region_abm_dict[region]["D_lb"]),
np.array(region_abm_dict[region]["D_ub"])
- np.array(region_abm_dict[region]["D_mean"]),
]
ax.errorbar(
region_abm_dict[region]["R0"],
region_abm_dict[region]["D_mean"],
yerr,
label=f"PR: {region}",
fmt=".",
c=line.get_color(),
capsize=5,
# c="k",
)
ax.set_xlabel(r"$\mathcal{R}_0$", fontsize=AXIS_LABEL_FONTSIZE)
ax.set_ylabel(r"Dead individuals $(\%N)$", fontsize=AXIS_LABEL_FONTSIZE)
ax.legend(
loc="best",
title="Region",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
| jvanyperen/exploring-interventions-manuscript | plotting_scripts/do_nothing_plots.py | do_nothing_plots.py | py | 3,549 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 15,
"usage_type": "call"
},
{
"api_na... |
2922309209 | import numpy as np
import json
def dump_to_file(arrays, filename):
arrays_for_dump = {}
for key, array in arrays.items():
if isinstance(array, np.ndarray):
arrays_for_dump[key] = array.tolist()
else:
arrays_for_dump[key] = array
if isinstance(array, dict):
try:
for k,v in array.items():
arrays_for_dump[key][k] = v.tolist()
except:
pass
with open(filename, 'w') as handle:
json.dump(arrays_for_dump, handle, indent=2)
def load_from_file(filename):
with open(filename, 'r') as handle:
arrays_for_dump = json.load(handle)
arrays = {}
for key, array in arrays_for_dump.items():
if isinstance(array, list):
arrays[key] = np.asarray(array)
elif isinstance(array, dict):
try:
arrays[key] = {int(k):np.asarray(v) for k,v in array.items()}
except:
arrays[key] = array
else:
arrays[key] = array
return arrays | sdemyanov/tensorflow-worklab | classes/utils.py | utils.py | py | 951 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_numbe... |
32259680615 | # pylint: disable=W0613
from flask import request
from injector import inject
from app import app
from app.regali_app.list.application.use_cases import (
get_gift_list,
get_gift_lists,
delete_gift_list,
create_gift_list,
delete_gift_list_element,
create_gift_list_element
)
from app.regali_app.shared.infrastructure.routes.authentication import token_required
@inject
@app.route('/giftlists', methods=['POST'])
@token_required
def post_giftlist(
current_user,
use_case: create_gift_list.UseCase,
request_data_transformer: create_gift_list.RequestDataTransformer
):
return use_case.execute(
request_data_transformer.transform(
current_user.id,
request
)
)
@inject
@app.route('/giftlists/<reference>', methods=['GET'])
@token_required
def get_giftlist(current_user, use_case: get_gift_list.UseCase, reference):
giftlists = use_case.execute(get_gift_list.Request(reference))
return giftlists
@inject
@app.route('/giftlists', methods=['GET'])
@token_required
def get_giftlists(current_user, use_case: get_gift_lists.UseCase):
giftlists = use_case.execute()
return giftlists
@inject
@app.route('/giftlists/<reference>', methods=['DELETE'])
@token_required
def delete_giftlists(current_user, use_case: delete_gift_list.UseCase, reference):
use_case.execute(delete_gift_list.Request(reference))
return {
'message': 'List Deleted'
}
@inject
@app.route('/giftlists/<reference>/elements', methods=['POST'])
@token_required
def post_giftlist_element(current_user, use_case: create_gift_list_element.UseCase, reference):
return use_case.execute(
create_gift_list_element.Request(reference, request.json['url'])
)
@inject
@app.route('/giftlists/<list_reference>/elements/<element_reference>', methods=['DELETE'])
@token_required
def delete_giftlist_element(
current_user,
use_case: delete_gift_list_element.UseCase,
list_reference,
element_reference
):
use_case.execute(
delete_gift_list_element.Request(
list_reference,
element_reference
)
)
return {
'message': 'List Element Deleted'
}
| MikelDB/regali-app | api/app/regali_app/shared/infrastructure/routes/giftlist.py | giftlist.py | py | 2,209 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.regali_app.list.application.use_cases.create_gift_list.UseCase",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "app.regali_app.list.application.use_cases.create_gift_list",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "app.regali_app... |
30715666439 | import math
from typing import List
import os
from app.resources.logger import logger
def get_block_entropy(block: bytes, block_size: int) -> float:
# start counters
counters = {byte: 0 for byte in range(2 ** 8)}
for byte in block:
counters[byte] += 1
# calculate probabilities for each byte
probabilities = [counter / block_size for counter in counters.values()]
# final sum
entropy = -sum(
probability * math.log2(probability)
for probability in probabilities if probability > 0
)
return entropy
def get_file_entropy(file_name: str, block_size: int) -> List[float]:
entropy_detail = []
f = open(file_name, "rb")
block = f.read(block_size)
# get entropy for each block
while block:
entropy = get_block_entropy(block, block_size)
entropy_detail.append(float(f'{entropy:.2f}'))
block = f.read(block_size)
f.close()
return entropy_detail
def get_entropy_summary(entropy_detail: List[float]) -> dict:
low_entropy_blocks = len([x for x in entropy_detail if x < 2])
high_entropy_blocks = len([x for x in entropy_detail if x > 7])
entropy_summary = {
"low_entropy_blocks": low_entropy_blocks,
"high_entryopy_blocks": high_entropy_blocks
}
return entropy_summary
def delete_saved_file(file_name: str) -> None:
if os.path.exists(file_name):
os.remove(file_name)
def generate_entropy_report(file_name: str, block_size: int) -> dict:
try:
entropy_detail = get_file_entropy(file_name, block_size)
entropy_summary = get_entropy_summary(entropy_detail)
response = {
"entropyDetail": entropy_detail,
"summary": entropy_summary
}
delete_saved_file(file_name)
return response
except Exception as e:
logger.error(f"Exception raised: {e} , deleting saved file.")
delete_saved_file(file_name)
raise e
| jstrah00/entropy-service | app/entropy/entropy.py | entropy.py | py | 1,960 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.log2",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number":... |
71760953383 | """module used to create a matrix containing the distances between the postcodes that specify the locations of the
specified geographies"""
import os
import pandas as pd
from math import radians, sin, cos, sqrt, atan2
import csv
import datetime as dt
def calc_dist(inlat1, inlat2, inlong1, inlong2):
"""simple function that returns a distance between points given lat and longs of points.
Takes curvature of earth into account"""
R = 6373.0
lat1 = radians(inlat1)
lon1 = radians(inlong1)
lat2 = radians(inlat2)
lon2 = radians(inlong2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
def produce_distance_matrix(input_data, geog, lat, long):
""" produces a matrix of distance between lat and logs for given level of geography. Produces separate files
due to potential for a very large matrix for lower levels of geography. If joined would take the form:
A B C
A 0 2 4
B 2 0 5
C 4 5 0
Where A, B and C represent the codes of the geographies.
Requires:
input_data - csv file of lat and longs of each item (e.g. lsoa, la, postcode etc)
geog - the name of the column with the codes
lat - name of the latitude column
long - name of the longitude column
"""
start_time = dt.datetime.now()
print('Started at: ', start_time)
# load lat and long data
fields = [geog, lat, long]
info_df = pd.read_csv(input_data, usecols=fields)
# create lists from dataframe - faster to access than dataframes
name_list = list(info_df[geog])
lat_list = list(info_df[lat])
long_list = list(info_df[long])
# for each record in the input
for i in range(0, len(info_df)):
current = name_list[i]
current_lat = lat_list[i]
current_long = long_list[i]
temp_out = [] # list to store distance results
# calculate distances to all other records in input
for j in range(0, len(info_df)):
temp_name = name_list[j]
temp_lat = lat_list[j]
temp_long = long_list[j]
dist = calc_dist(current_lat, temp_lat, current_long, temp_long)
# investigate if putting zero distances to NaN makes finding min easier...
# create lists of names and associated distances
temp_out.append([temp_name, dist])
# report on progress based on current records processed
if i > 0 and i % 1000 == 0:
time_now = dt.datetime.now()
time_left = ((time_now - start_time).seconds/(i/len(name_list))) - (time_now - start_time).seconds
finish_time = time_now + dt.timedelta(seconds=time_left)
print('Row ', i, 'reached. Projected finish time is: ', finish_time)
temp_out = [['lsoa11cd', current]] + temp_out
out_file = os.path.join(os.getcwd(), 'lsoa distances', current + ".csv")
# write to output file
with open(out_file, 'w') as myfile:
for row in temp_out:
wr = csv.writer(myfile)
wr.writerow(row)
# location of input data
os.chdir(os.path.join(os.getcwd(), 'raw_inputs'))
# input data to include
produce_distance_matrix('LSOA_L&L.csv', 'LSOA11CD', 'LATITUDE', 'LONGITUDE')
| ONSdigital/FOCUS | matrix_generation.py | matrix_generation.py | py | 3,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.radians",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_numbe... |
15855737914 | import threading
import socket
import json
import termcolor
import time
import numpy as np
import pandas as pd
host = '127.0.0.1'#server ip
port = 5555 #server port
#Create a new socket using the given address family, socket type and protocol number.
#ipv4 family
#TCP (SOCK_STREAM) is a connection-based protocol. The connection is established
#and the two parties have a conversation until the connection is terminated by one of the parties or by a network error.
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allows a socket to bind to an address and port already in use.
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server.bind((host, port)) #binidng port ip with server
server.listen()
clients = []
aliases = []
def specific_cast(message):
"""
Forward mesages to client for whom message is meant by checking message header for sender and receiver.
"""
receiver=message[2] #read message list 0 index
index = aliases.index(receiver)
client=clients[index]
reliable_send(client,message)
# Function to handle clients'connections
def users_send():
"""
Read username column from db and return
"""
df=pd.read_csv('users.csv')
new=df['username'].to_numpy().tolist()
return new
def reliable_recv(target):
"""
Receive as long as there is something to receive, can receive more than 1024 bytes
"""
data = ''
while True:
try:
#rstrip removes spaces at end
data = data + target.recv(1024).decode().rstrip()
return json.loads(data)
except ValueError:
continue
def reliable_send(target, data):
"""
Reliable send, json object encoded as string
"""
#json.dumps() takes in a json object and returns a string.
jsondata = json.dumps(data)
target.send(jsondata.encode())
def handle_client(client):
"""
Works in different thread,
handle every client connects with server
checks header to call specific function
"""
while True:
try:
message = reliable_recv(client)
if str(message[0])=='messaging' and str(message[2])=='ALL':
#messageing meant between specific clients
broadcast(message)
elif str(message[0])=='messaging':
#if not broadcast
specific_cast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
alias = aliases[index]
aliases.remove(alias)
#remove client if disconnected from list
break
# Main function to receive the clients connection
def auth(name,password):
"""
Loading users from db and comparing with fed name and password and returns true if authenticated
"""
#checks for password and username parameters
df=pd.read_csv('users.csv')
row=df.loc[df['username']==name]
isAuth=False
row=np.array(row)
if row.size:
isAuth=row[0][1]==password
return isAuth
def broadcast(message):
"""
Broadcast messages ato all clients
"""
for client in clients:
reliable_send(client,message)
def receive():
"""
Once User connects it, checks user credentials with db and send auth=true Response to clients, then client get authenticated.
It also asks for clients name and then calls sender and receiver threads.
"""
while True:
print(termcolor.colored('[+] Server is Running! Waiting For The Incoming Connections ...', 'green'))
client, address = server.accept()
isAuth=False
while not isAuth:
data=reliable_recv(client)
time.sleep(0.05)
if data[0]=='auth':
isAuth=auth(data[1],data[2])
users_list=users_send()
time.sleep(0.050)
reliable_send(client,['auth_res',isAuth,users_list])
time.sleep(0.050)
time.sleep(2)
print(termcolor.colored(str(address) + ' has connected!', 'green'))
reliable_send(client,'alias?')
alias = reliable_recv(client)
aliases.append(alias)
clients.append(client)
print(f'The name of new client is {alias}')
reliable_send(client,'you are now connected!')
thread = threading.Thread(target=handle_client, args=(client,))
thread.start()
receive() | adnankarim/python_socket_chat_client_client | server.py | server.py | py | 4,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_... |
35692497306 | from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.views.generic import FormView
from atelier.models import Profile
from django.views import generic
from atelier.forms import ProfileRegisterForm, ProfileChangeForm
from django.urls import reverse_lazy
from atelier.views.base_view import AtelierFilterObjectsPreMixin, BaseListView, TailorPermissionPreMixin, \
BaseDetailView, BaseDeleteView, BaseUpdateView
class ProfileDetailView(TailorPermissionPreMixin, AtelierFilterObjectsPreMixin, BaseDetailView):
model = Profile
fields = '__all__'
class ProfileListView(AtelierFilterObjectsPreMixin, TailorPermissionPreMixin, BaseListView):
model = Profile
class ProfileCreateView(TailorPermissionPreMixin, FormView):
template_name = 'atelier/create_form.html'
form_class = ProfileRegisterForm
def get_initial(self):
"""
Returns the initial data to use for atelier form field.
"""
initial = super().get_initial()
initial['atelier'] = self.request.user.profile.atelier
return initial
def get_success_url(self):
return reverse_lazy('atelier:profile_list')
def form_valid(self, form):
# The default implementation for form_valid() simply redirects to the success_url.
user = User.objects.create(
email=form.cleaned_data['email'],
username=form.cleaned_data['username'],
)
user.set_password(form.cleaned_data['password2'])
user.save()
Profile.objects.create(
user=user,
atelier=self.request.user.profile.atelier,
is_tailor=form.cleaned_data['is_tailor'],
created_by=self.request.user,
last_updated_by=self.request.user,
)
return super().form_valid(form)
class ProfileChangeView(AtelierFilterObjectsPreMixin, TailorPermissionPreMixin, BaseUpdateView):
model = Profile
template_name = 'atelier/create_form.html'
form_class = ProfileChangeForm
def get_success_url(self):
return reverse_lazy('atelier:profile_list')
def get_profile_object(self):
profile_id = self.kwargs.get('pk')
return Profile.objects.get(id=profile_id)
def get_initial(self):
data = {
'email': self.get_profile_object().user.email,
'is_tailor': self.get_profile_object().is_tailor
}
return data
def form_valid(self, form):
# The default implementation for form_valid() simply redirects to the success_url.
profile = self.get_profile_object()
profile.is_tailor = form.cleaned_data['is_tailor']
profile.user.email = form.cleaned_data['email']
profile.last_updated_by = self.request.user
profile.full_clean()
profile.save()
profile.user.save()
return super().form_valid(form)
class ProfileDeleteView(TailorPermissionPreMixin, AtelierFilterObjectsPreMixin, BaseDeleteView):
model = Profile
success_url = reverse_lazy('atelier:profile_list')
template_name = 'atelier/delete_form.html'
def get_user_object(self):
profile_id = self.kwargs.get('pk')
profile = Profile.objects.get(pk=profile_id)
return profile.user
def delete(self, request, *args, **kwargs):
"""
Overriding the delete() method to delete User instances, and according Profile instance will be deleted too.
"""
self.object = self.get_user_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
def get_success_url(self):
return reverse_lazy('atelier:profile_list')
| Vitamal/vokss | atelier/views/profile_view.py | profile_view.py | py | 3,721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "atelier.views.base_view.TailorPermissionPreMixin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "atelier.views.base_view.AtelierFilterObjectsPreMixin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "atelier.views.base_view.BaseDetailView",
... |
8688324279 | import sys
import gammalib
from utils import *
import numpy as np
import matplotlib.pyplot as plt
# first input is XML file name
models = gammalib.GModels(sys.argv[1])
# second and third input are minimum and maximum energy in TeV
emin = float(sys.argv[2])
emax = float(sys.argv[3])
lons, lats, radii, fluxes, names = dist_from_gammalib(models, emin=emin,emax=emax)
# binning
logs_min = int(np.floor(np.log10(np.min(fluxes))))
logs_max = int(np.ceil(np.log10(np.max(fluxes))))
nbins = 10 * (logs_max - logs_min)
bins_lognlogs = np.logspace(logs_min, logs_max, nbins)
fig1 = plt.figure('LogNLogS')
ax1 = plt.subplot()
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel("Flux {}-{} TeV (Crab units)".format(emin,emax), fontsize=14)
ax1.set_ylabel('Number of sources (> Flux)', fontsize=14)
format_ax(ax1)
ax1.hist(fluxes, bins=bins_lognlogs, density=False, histtype='step', cumulative=-1)
try:
if sys.argv[4] == 'check_flux':
for s, flux in enumerate(fluxes):
if flux > 1.:
msg = "Source {} has flux of {} Crab".format(names[s],flux)
print(msg)
except:
pass
plt.show() | cta-observatory/cta-gps-simulation-paper | skymodel/scripts/logNlogS_fromgammalib.py | logNlogS_fromgammalib.py | py | 1,141 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gammalib.GModels",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_numbe... |
19075073256 | import matplotlib.pyplot as plt
import math
def pre_processing(filename):
file = open(filename)
lines = file.readlines()
cities = [[]]
n = int(lines[0])
for l in lines[1:]:
cities.append(list(map(float, l.split())))
adj_mat = [[None] * (n+1) for i in range(n+1)]
x = [float(i[0]) for i in cities[1:]]
y = [float(i[1]) for i in cities[1:]]
plt.scatter(x, y)
plt.show()
for i in range(1, n+1):
for j in range(1, n+1):
if adj_mat[j][i]:
adj_mat[j][i] = adj_mat[i][j]
else:
c1 = cities[i]
c2 = cities[j]
adj_mat[j][i] = math.sqrt((c1[0] - c2[0])**2 + (c1[1] - c2[1])**2)
return adj_mat, n
def get_bin(n, bits):
if n == 0:
return [0]
res = list()
digit = n-1
cur = 1 << digit
while cur < 1 << bits:
for i in get_bin(n-1, digit):
res.append(cur|i)
digit += 1
cur = 1 << digit
return res
def get_sets(n):
sets = dict()
for i in range(0, n + 1):
#print(i)
sets[i] = get_bin(i, n)
return sets
#sets: dict
#keys: subproblem size
#value: binary set
def get_ele(set_hash):
set_hash >>= 1
k = 2
res = set()
while set_hash:
if set_hash & 1:
res.add(k)
k += 1
set_hash >>= 1
return res
def TSP(n,adj_mat):
sets = get_sets(n)
A = {}
#deal with base case
for i in range(1<<n):
A[i,1] = float("inf")
A[1,1] = 0
for m in range(2, n+1):
print('m=', m)
for subset in sets[m]:
if subset & 1:
elements = get_ele(subset)
for j in elements:
min_ = float("inf")
for k in range(1, n+1):
if k!= j:
if (subset ^ 1<<j-1,k) in A:
min_ = min(min_, A[subset ^ 1<<j-1, k] + adj_mat[k][j])
A[subset,j] = min_
return A
if __name__ == '__main__':
n, adj_mat = pre_processing('tsp.txt')
A = TSP(n,adj_mat)
res = float("inf")
for j in range(2, n+1):
res = min(res, A[(1<<n)-1, j] + adj_mat[j][1])
print(res) | LouisYLWang/Algorithms | Traveling_salesman_problem/tsp.py | tsp.py | py | 2,261 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ma... |
1535928941 | import os
import logging
import shutil
import json
from packratAgent.Collection import Collection
from packratAgent.LocalRepoManager import LocalRepoManager, hashFile
MODULE_OWNER = 'packrat'
class AnsibleGalaxyManager( LocalRepoManager ):
def __init__( self, *args, **kargs ):
super().__init__( *args, **kargs )
self.entry_map = {}
def filePaths( self, filename, distro, distro_version, arch ):
( namespace, name, version ) = filename.split( '-' )
version = version.strip( '.tar.gz' )
name_path = os.path.join( self.repo_dir, 'api', 'collections', namespace, name, 'versions' )
return [
os.path.join( name_path, 'index.html' ),
os.path.join( name_path, version, 'index.html' ),
os.path.join( self.repo_dir, 'downloads', namespace, filename )
]
def metadataFiles( self ):
return [ os.path.join( self.repo_dir, 'api', 'index.html' ) ]
def addEntry( self, type, filename, distro, distro_version, arch ):
if type != 'galaxy':
logging.warning( 'ansiblegalaxy: New entry not a ansible, skipping...' )
return
logging.debug( 'ansiblegalaxy: Got Entry for package: "%s"', filename )
( namespace, name, version ) = filename.split( '-' )
version = version.strip( '.tar.gz' )
if namespace not in self.entry_map:
self.entry_map[ namespace ] = {}
if name not in self.entry_map[ namespace ]:
self.entry_map[ namespace ][ name ] = {}
dir_path = os.path.join( self.repo_dir, 'downloads', namespace )
file_path = os.path.join( dir_path, filename )
size = os.path.getsize( file_path )
( _, sha256, _ ) = hashFile( file_path )
collection = Collection( file_path )
self.entry_map[ namespace ][ name ][ filename ] = ( version, size, sha256, collection.metadata )
def removeEntry( self, filename, distro, distro_version, arch ):
( namespace, name, version ) = filename.split( '-' )
version = version.strip( '.tar.gz' )
try:
del self.entry_map[ namespace ][ name ][ filename ]
if not self.entry_map[ namespace ][ name ]:
del self.entry_map[ namespace ][ name ]
if not self.entry_map[ namespace ]:
del self.entry_map[ namespace ]
except KeyError:
logging.warning( 'ansiblegalaxy: unable to remove entry "%s", ignored.', filename )
def loadFile( self, filename, temp_file, distro, distro_version, arch ):
( namespace, name, _ ) = filename.split( '-' )
dir_path = os.path.join( self.repo_dir, 'downloads', namespace )
if not os.path.exists( dir_path ):
os.makedirs( dir_path )
file_path = os.path.join( dir_path, filename )
shutil.move( temp_file, file_path )
def writeMetadata( self ):
api_path = os.path.join( self.repo_dir, 'api' )
if not os.path.exists( api_path ):
os.makedirs( api_path )
api_metadata_map = {
'description': '{0} - {1}'.format( self.mirror_description, self.repo_description ),
'available_versions': { 'v2': '' }
}
open( os.path.join( api_path, 'index.html' ), 'w' ).write( json.dumps( api_metadata_map ) )
collections_path = os.path.join( api_path, 'collections' )
if not os.path.exists( collections_path ):
os.makedirs( collections_path )
for namespace in self.entry_map.keys():
for name in self.entry_map[ namespace ].keys():
versions_path = os.path.join( collections_path, namespace, name, 'versions' )
if not os.path.exists( versions_path ):
os.makedirs( versions_path )
result_list = []
for filename, entry in self.entry_map[ namespace ][ name ].items():
result_list.append( { 'version': entry[0] } )
entry_map = {
'version': entry[0],
'namespace': { 'name': namespace },
'collection': { 'name': name },
'download_url': '{0}/downloads/{1}/{2}'.format( self.repo_url, namespace, filename ),
'artifact': { 'size': entry[1], 'sha256': entry[2] },
'metadata': entry[3]
}
entry_path = os.path.join( versions_path, entry[0] )
if not os.path.exists( entry_path ):
os.makedirs( entry_path )
open( os.path.join( entry_path, 'index.html' ), 'w' ).write( json.dumps( entry_map ) )
version_map = {
'count': len( result_list ),
'next': None,
'previous': None,
'results': result_list
}
logging.debug( 'ansiblegalaxy: writing version index for "%s"', namespace )
open( os.path.join( versions_path, 'index.html' ), 'w' ).write( json.dumps( version_map ) )
| pnhowe/packrat-agent | packratAgent/AnsibleGalaxyManager.py | AnsibleGalaxyManager.py | py | 4,856 | python | en | code | null | github-code | 36 | [
{
"api_name": "packratAgent.LocalRepoManager.LocalRepoManager",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name... |
2251848233 | import argparse
import pygame as pg
def create_circle(size, circle_color, background_color, filename):
# margin = int(0.1 * size)
margin = 0
screen = pg.display.set_mode((2 * size + margin, 2 * size + margin))
screen.fill(background_color)
x = screen.get_width() // 2
y = screen.get_height() // 2
pg.draw.circle(screen, circle_color, (x, y), size)
pg.image.save(screen, filename)
def create_cell(genome):
colors = {'red': (255,0,0), 'green': (0,255,0), 'blue': (0,0,255), 'darkBlue': (0,0,128),
'white': (255,255,255), 'black': (0,0,0), 'pink': (255,200,200)}
if genome == '00':
screen = pg.display.set_mode((20, 20))
screen.fill(colors['black'])
pg.image.save(screen, 'images/' + genome + '.png')
elif genome == '01':
screen = pg.display.set_mode((40, 20))
screen.fill(colors['white'])
pg.draw.rect(screen, colors['black'], (0, 0, 20, 20))
pg.draw.polygon(screen, colors['black'], [(20, 0), (20, 20), (40, 10)])
pg.image.save(screen, 'images/' + genome + '.png')
elif genome == '10':
screen = pg.display.set_mode((60, 20))
screen.fill(colors['white'])
pg.draw.polygon(screen, colors['black'], [(20, 0), (40, 0), (40, 20), (20, 20)])
pg.draw.polygon(screen, colors['black'], [(0, 10), (20, 0), (20, 20)])
pg.draw.polygon(screen, colors['black'], [(40, 0), (40, 20), (60, 10)])
pg.image.save(screen, 'images/' + genome + '.png')
else:
screen = pg.display.set_mode((60, 40))
screen.fill(colors['white'])
pg.draw.polygon(screen, colors['black'], [(20, 0), (40, 0), (40, 20), (20, 20)])
pg.draw.polygon(screen, colors['black'], [(0, 10), (20, 0), (20, 20)])
pg.draw.polygon(screen, colors['black'], [(40, 0), (40, 20), (60, 10)])
pg.draw.polygon(screen, colors['black'], [(20, 20), (40, 20), (30, 40)])
pg.image.save(screen, 'images/' + genome + '.png')
def create_triangle(colors):
screen = pg.display.set_mode((20, 20))
screen.fill(colors['white'])
# pg.draw.polygon(screen, colors['black'], [(0, 10), (20, 0), (20, 20)])
pg.draw.polygon(screen, colors['black'], [(0, 0), (0, 20), (20, 10)])
pg.image.save(screen, 'images/triangle.png')
if __name__ == '__main__':
colors = {'red': (255,0,0), 'green': (0,255,0), 'blue': (0,0,255), 'darkBlue': (0,0,128),
'white': (255,255,255), 'black': (0,0,0), 'pink': (255,200,200)}
# create_circle(30, colors['black'], colors['white'], 'images/black_circle_30.png')
# create_triangle(colors)
create_cell('11')
| thbeucher/Games | life_games/create_image.py | create_image.py | py | 2,507 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.d... |
5750408592 | import nltk
from nltk.tokenize import sent_tokenize
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('gutenberg')
from tqdm import tqdm
import string
from collections import Counter
from flair.models import SequenceTagger
from flair.data import Sentence
book = nltk.corpus.gutenberg.raw('carroll-alice.txt')
chapters = book.split('CHAPTER')
summary = ''
print(f"NER MODEL")
# For each chapter, run NER
for i in range(1, len(chapters)):
temp = chapters[i]
title = temp.split('\n')[0]
chapters[i] = chapters[i].replace('\n', ' ')
chapters[i] = chapters[i].replace('\r', ' ')
chapters[i] = chapters[i].replace('\'', ' ')
sent = sent_tokenize(chapters[i])
# Flair named entity recognition model
tagger = SequenceTagger.load('ner')
# Get all the characters names and locations
characters = []
locations = []
for line in tqdm(sent):
sentence = Sentence(line)
tagger.predict(sentence)
for entity in sentence.get_spans('ner'):
# If person, add to characters list
if entity.get_label("ner").value == 'PER':
characters.append(entity.text)
# If location, add to location list
elif entity.get_label("ner").value == 'LOC':
locations.append(entity.text)
# Remove any punctuation within the names
names = []
for name in characters:
names.append(name.translate(str.maketrans('', '', string.punctuation)))
# List characters by the frequency with which they are mentioned
result = [item for items, c in Counter(characters).most_common() for item in [items] * c]
common = []
main_freq = []
# Manually remove words that are not character names from our list
not_names = ['Well', 'Ive', 'Five', 'Theyre', 'Dont', 'Wow', 'Ill', 'Miss', 'Hush', 'Yes', ]
for n, c in Counter(names).most_common():
if n not in not_names:
main_freq.append((n, c))
common.append(n)
summary += f"Chapter{title}:\n Character List: {common}\n Locations: {list(set(locations))}\n"
summary += "---------------------------------------------\n"
with open('charactersLocations.txt', 'w') as f:
f.write(summary)
| francelow/eBookSummarizer | NER_model.py | NER_model.py | py | 2,245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.gutenberg.raw",
... |
74050091304 | from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Tuple
from parlai.core.image_featurizers import ImageLoader
from parlai.core.message import Message
from parlai.core.worlds import validate
from parlai.crowdsourcing.tasks.model_chat.utils import Compatibility, get_image_src
from parlai.crowdsourcing.tasks.model_chat.worlds import (
BaseModelChatWorld,
get_bot_worker,
)
class ModelImageChatWorld(BaseModelChatWorld):
"""
A chat world in which an image is shown to the worker and bot at the beginning.
"""
def __init__(self, opt, agent, bot, image_idx: int, image_act: Message):
super().__init__(opt, agent=agent, bot=bot)
self.image_stack = opt['image_stack']
self.image_idx = image_idx
self.image_act = image_act
# Get a stringified version of the image to show the user
orig_image = self.image_act['image']
self.image_src = get_image_src(image=orig_image)
# Get a featurized version of the image to show the bot
with NamedTemporaryFile(suffix='.jpg') as f:
orig_image.save(f)
image_loader = ImageLoader(self.bot.model_agent.opt)
self.image_act.force_set('image', image_loader.load(f.name))
def _run_initial_turn(self) -> None:
"""
Show the image to the human and bot, and show the bot's response to the human.
"""
system_id = 'SYSTEM'
system_agent_idx = None
# Show the image to the human
image_act_for_human = {
'episode_done': False,
'id': system_id,
'text': f"""Welcome! You'll now have a conversation with your partner.
<-- FIRST, YOUR PARTNER WILL SAY SOMETHING ABOUT THIS IMAGE TO YOUR LEFT.
Be sure to talk about this image a little bit before discussing other things!
""",
'task_data': {'image_src': self.image_src},
'agent_idx': system_agent_idx,
}
self.agent.observe(validate(image_act_for_human))
# Show the image to the bot
image_act = {
**self.image_act,
'episode_done': False,
'id': system_id,
'agent_idx': system_agent_idx,
}
self.bot.observe(validate(image_act))
del image_act['image']
# Don't save the image features to disk
# Have the bot respond
bot_first_act_raw = self.bot.act()
bot_first_act_raw = Message(
Compatibility.maybe_fix_act(bot_first_act_raw)
).json_safe_payload()
bot_first_act_raw['id'] = self.bot.agent_id
self.agent.observe(validate(bot_first_act_raw))
bot_first_act = {
'episode_done': False,
'id': bot_first_act_raw['id'],
'text': bot_first_act_raw['text'],
'agent_idx': 1,
}
# Record lines of dialogue
self.dialog.append(image_act)
self.dialog.append(bot_first_act)
def _postprocess_acts(self, acts: List[dict], agent_idx: int):
"""
Show the bot the image again on every turn.
"""
if agent_idx == 0:
# Add the image to every human act, seen by the bot. Also adds in any other
# image-related fields needed by the model
for key, value in self.image_act.items():
if key not in ['episode_done', 'id', 'text', 'agent_idx']:
acts[agent_idx][key] = value
def get_final_chat_data(self) -> Dict[str, Any]:
"""
Add image-specific fields to the final chat data.
"""
data = super().get_final_chat_data()
data['image_idx'] = self.image_idx
return data
def _prepare_acceptability_checking(self) -> Tuple[List[str], List[str]]:
"""
Apply acceptability checking params specific to image-chat conversation.
The conversation starts with an image, so the human shouldn't be starting their
first message with "Hi", etc.
"""
human_messages, violation_types = super()._prepare_acceptability_checking()
violation_types.append('penalize_greetings')
return human_messages, violation_types
def shutdown(self):
if not self.chat_done:
# If the HIT was not completed, remove this worker from the stack
worker = self.agent.mephisto_agent.get_worker().db_id
self.image_stack.remove_worker_from_stack(
worker=worker, stack_idx=self.image_idx
)
self.agent.shutdown()
def make_world(opt, agents):
# We are showing an image to the worker and bot, so grab the image path and other
# context info
image_idx, model_name, no_more_work = opt['image_stack'].get_next_image(
agents[0].mephisto_agent.get_worker().db_id
)
full_image_context = opt['image_contexts'][image_idx]
if no_more_work:
# There are no more HITs for this worker to do, so give them a qualification
agents[0].mephisto_agent.get_worker().grant_qualification(
qualification_name=opt['block_qualification'], value=1
)
# Get a bot agent
bot_worker = get_bot_worker(opt=opt, model_name=model_name)
return ModelImageChatWorld(
opt=opt,
agent=agents[0],
bot=bot_worker,
image_idx=image_idx,
image_act=full_image_context['image_act'],
)
def get_world_params():
return {"agent_count": 1}
| facebookresearch/ParlAI | parlai/crowdsourcing/tasks/model_chat/worlds_image_chat.py | worlds_image_chat.py | py | 5,459 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "parlai.crowdsourcing.tasks.model_chat.worlds.BaseModelChatWorld",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "parlai.core.message.Message",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "parlai.crowdsourcing.tasks.model_chat.utils.get_image... |
1428891150 | import argparse
import shutil
import numpy as np
from matplotlib import image,pyplot
import os
import cv2
import json
parser = argparse.ArgumentParser()
parser.add_argument('--MaskedImageFolder', type=str)
parser.add_argument('--FullImageFolder', type=str)
args = parser.parse_args()
pathX = os.path.join(args.MaskedImageFolder, "X/")
pathY = os.path.join(args.MaskedImageFolder, "Y/")
savepathX = os.path.join(args.FullImageFolder, "X/")
savepathY = os.path.join(args.FullImageFolder, "Y/")
flist = os.path.join(args.FullImageFolder, "imagefiles.flist")
jsonfile = os.path.join(args.MaskedImageFolder, "maskdata.json")
shutil.copy(jsonfile, args.FullImageFolder)
filesX = sorted(os.listdir(pathX))
# flist file creation#
if not os.path.exists(flist):
os.mknod(flist)
fo = open(flist,"w")
with open(jsonfile, encoding='utf-8') as jfile:
mask_info = json.loads(jfile.read())
for fileX in filesX:
imgX = cv2.imread(os.path.join(pathX,fileX), -1) #masked image
imgY = cv2.imread(os.path.join(pathY, fileX), -1) #mask
if imgX.ndim == 3:
origIm = imgX + imgY
mask = np.zeros_like(imgX[:, :, 0])
for i in range(len(mask_info[fileX])):
x, y, size = mask_info[fileX][i]
mask[x:x+size, y:y+size] = 1
# Save original image and the corresponding mask
cv2.imwrite(savepathX + fileX, origIm)
pyplot.imsave(savepathY + fileX, mask, cmap='gray')
# Write the file paths to flist file
fo.write("%s\n" % (savepathX + fileX))
elif imgX.ndim == 2:
if imgY.ndim == 3:
imgY = imgY[:,:,0]
origIm = imgX + imgY
mask = np.zeros_like(imgX)
for i in range(len(mask_info[fileX])):
x, y, size = mask_info[fileX][i]
mask[x:x + size, y:y + size] = 1
# Save original image and the corresponding mask
pyplot.imsave(savepathX + fileX, origIm, cmap='gray')
pyplot.imsave(savepathY + fileX, mask, cmap='gray')
# Write the file paths to flist file
fo.write("%s\n" % (savepathX + fileX))
| unlugi/gen-inpainting-eccv | prepare_dataset_2.py | prepare_dataset_2.py | py | 2,188 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"... |
43777238511 | import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import time
MIN_MATCH_COUNT = 10
img_sample = cv.cvtColor(cv.imread("./img/dataset/9.png",cv.IMREAD_COLOR),cv.COLOR_BGR2GRAY)
img_q = cv.cvtColor(cv.imread("./img/query/3.png",cv.IMREAD_COLOR),cv.COLOR_BGR2GRAY)
sift = cv.SIFT_create()
keypoints_1, descriptors_1 = sift.detectAndCompute(img_q, None)
keypoints_2, descriptors_2 = sift.detectAndCompute(img_sample, None)
outImage_1 = cv.drawKeypoints(img_q, keypoints_1,None)
outImage_2 = cv.drawKeypoints(img_sample, keypoints_2,None)
print(len(keypoints_1))
print(len(keypoints_2))
#cv.imwrite('image.jpg', outImage_1)
#cv.waitKey(0)
# BFMatcher
def BFMatcher(descript_1,descript_2):
bf = cv.BFMatcher()
matches = bf.knnMatch(descript_1,descript_2,k=2)
return matches
# FLANNMatcher
def FLANNMatcher(descript_1,descript_2):
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(descript_1,descript_2,k=2)
return matches
start = time.time()
matches = BFMatcher(descriptors_1,descriptors_2)
#matches = FLANNMatcher(descriptors_1,descriptors_2)
end = time.time()
print("time cost: ",end-start)
# ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
# cv.drawMatchesKnn expects list of lists as matches.
print("match pairs: ", len(good))
# img4 = cv.drawMatchesKnn(img_q,keypoints_1,img_sample,keypoints_2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# plt.imshow(img4),plt.show()
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ keypoints_1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ keypoints_2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img_q.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
#box_color = (0,0,255)
sample_draw = cv.merge((img_sample.copy(),img_sample.copy(),img_sample.copy()))
img_sample_detected = cv.polylines(sample_draw,[np.int32(dst)],True,(255,0,0),5, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
query_draw = cv.merge((img_q.copy(),img_q.copy(),img_q.copy()))
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv.drawMatches(query_draw,keypoints_1,img_sample_detected,keypoints_2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
| Laurie-xzh/AI-Practice | CV/Point_Feature_Match/test.py | test.py | py | 2,845 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"... |
15560983692 | import argparse
import os
import subprocess
import sys
from pathlib import Path
_SWIFT_PATH = Path(__file__).resolve().parents[1]
_KNOWN_SCRIPT_PATHS = [
_SWIFT_PATH / "benchmark/scripts/Benchmark_Driver",
_SWIFT_PATH / "benchmark/scripts/Benchmark_DTrace.in",
_SWIFT_PATH / "benchmark/scripts/Benchmark_GuardMalloc.in",
_SWIFT_PATH / "benchmark/scripts/Benchmark_QuickCheck.in",
_SWIFT_PATH / "benchmark/scripts/Benchmark_RuntimeLeaksRunner.in",
_SWIFT_PATH / "benchmark/scripts/run_smoke_bench",
_SWIFT_PATH / "docs/scripts/ns-html2rst",
_SWIFT_PATH / "test/Driver/Inputs/fake-toolchain/ld",
_SWIFT_PATH / "utils/80+-check",
_SWIFT_PATH / "utils/backtrace-check",
_SWIFT_PATH / "utils/build-script",
_SWIFT_PATH / "utils/check-incremental",
_SWIFT_PATH / "utils/coverage/coverage-build-db",
_SWIFT_PATH / "utils/coverage/coverage-generate-data",
_SWIFT_PATH / "utils/coverage/coverage-query-db",
_SWIFT_PATH / "utils/coverage/coverage-touch-tests",
_SWIFT_PATH / "utils/dev-scripts/blockifyasm",
_SWIFT_PATH / "utils/dev-scripts/split-cmdline",
_SWIFT_PATH / "utils/gyb",
_SWIFT_PATH / "utils/line-directive",
_SWIFT_PATH / "utils/PathSanitizingFileCheck",
_SWIFT_PATH / "utils/recursive-lipo",
_SWIFT_PATH / "utils/round-trip-syntax-test",
_SWIFT_PATH / "utils/rth",
_SWIFT_PATH / "utils/run-test",
_SWIFT_PATH / "utils/scale-test",
_SWIFT_PATH / "utils/submit-benchmark-results",
_SWIFT_PATH / "utils/swift_build_support/tests/mock-distcc",
_SWIFT_PATH / "utils/symbolicate-linux-fatal",
_SWIFT_PATH / "utils/update-checkout",
_SWIFT_PATH / "utils/viewcfg",
]
_INSTALL_BLACK_MESSAGE = """\
The black Python package is required for formatting, but it was not found on
your system.
You can install it using:
python3 -m pip install black
For more help, see https://black.readthedocs.io.
"""
def _get_python_sources():
"""Returns a list of path objects for all known Python sources in the Swift
project.
"""
return list(_SWIFT_PATH.rglob("*.py")) + _KNOWN_SCRIPT_PATHS
def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
"-m",
"pip",
"show",
"--quiet",
name,
]
with open(os.devnull, "w") as devnull:
status = subprocess.call(command, stderr=devnull)
return not status
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"paths",
type=Path,
metavar="PATH",
nargs="*",
help="Source path to format.",
)
parser.add_argument(
"--check",
action="store_true",
help="Don't write the files back, just return the status.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Emit messages to stderr about files that were not changed.",
)
parser.add_argument(
"--diff",
action="store_true",
help="Don't write the files back, just output a diff for each file on stdout.",
)
parser.add_argument(
"-S",
"--skip-string-normalization",
action="store_true",
help="Don't normalize string quotes or prefixes.",
)
return parser.parse_args()
def main():
args = parse_args()
if not _is_package_installed("black"):
print(_INSTALL_BLACK_MESSAGE)
return 1
command = [
sys.executable,
"-m",
"black",
"--target-version",
"py38",
]
if args.check:
command.append("--check")
if args.verbose:
command.append("--verbose")
if args.diff:
command.append("--diff")
if args.skip_string_normalization:
command.append("--skip-string-normalization")
requested_paths = [path.resolve() for path in args.paths]
# Narrow down the set of paths to format to only those paths which are either
# included in the set of requested paths or are subpaths of the requested paths.
format_paths = {
known_path
for path in requested_paths
for known_path in _get_python_sources()
if path == known_path or path in known_path.parents
}
# Add requested paths that exists, but aren't included in the format set.
for path in requested_paths:
if path not in format_paths and path.exists():
format_paths.add(path)
command += sorted([str(path) for path in format_paths])
return subprocess.call(command)
if __name__ == "__main__":
sys.exit(main())
| apple/swift | utils/python_format.py | python_format.py | py | 4,666 | python | en | code | 64,554 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.executable",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.devnull",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
... |
42491959106 | """
10:05 시작
"""
import sys
import copy
from collections import deque
def func():
N, L, R = map(int, sys.stdin.readline().split())
land = []
for i in range(N):
land.append(list(map(int, sys.stdin.readline().split())))
if N == 1:
return 0
time = 0
while time < 2000: # 2000일 이상은 주어지지 않음
time += 1
visited = [[False for i in range(N)] for j in range(N)] # BFS로 인구 이동이 일어나는 곳을 체크하기 위함
tmp_lands = copy.deepcopy(land)
lands_cnt = 0 # 인구 이동에 포함된 나라의 수
plus_x = [1, -1, 0, 0]
plus_y = [0, 0, 1, -1]
for i in range(N): # 인구 이동을 할 나라가 있는지 탐색
for j in range(N):
if visited[i][j]: continue
# BFS로 열려 있는 국경선끼리 인구 공유
lands_cnt += 1
people_sum = land[i][j] # 국경선이 인접한 나라의 인구의 합
country_num = 1 # 국경선이 인접한 나라의 개수
countries = [(i, j)]
dq = deque()
dq.append((i, j))
visited[i][j] = True
# BFS를 하며 인접한 국가 탐색
while dq:
tup = dq.popleft()
y = tup[0]
x = tup[1]
for k in range(4):
nx = x + plus_x[k]
ny = y + plus_y[k]
if 0 <= nx < N and 0 <= ny < N and not visited[ny][nx] and L <= abs(land[y][x] - land[ny][nx]) <= R:
visited[ny][nx] = True
dq.append((ny, nx))
people_sum += land[ny][nx]
country_num += 1
countries.append((ny, nx))
if len(countries) == N * N: # 모든 나라를 방문할 수 있는 경우
return time
for country in countries: # 나라 인구의 합의 평균 값을 대입
tmp_lands[country[0]][country[1]] = people_sum // country_num
if lands_cnt == N * N: # 모든 나라가 인접하지 않은 경우
return time - 1
land = copy.deepcopy(tmp_lands)
if time >= 2001:
return 0
print(func())
| Mugamta/Boostcamp_AITech5_CV11 | 3.22/서지훈_백준_16234_인구이동.py | 서지훈_백준_16234_인구이동.py | py | 2,430 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
... |
21159376882 | #!/usr/bin/env python
# coding: utf-8
# In[193]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from matplotlib.colors import ListedColormap
import pickle
'''
このプログラムはCSVファイルから読み込んだデータから機械学習を行い、結果を出力する。
分析手法:ロジスティック回帰分析
テストデータ:CreateTestDataによって生成されたCSVファイル
目的変数:在職(1 or 0)
説明変数:勤務時間と年齢
'''
# 定数
TR_CSV_PLACE = "./Data/Train_Data.csv"
RS_CSV_PLACE = "./Data/Test_Data.csv"
SAVE_MODEL = "./Data/LogisticModel.sav"
# In[194]:
# LogisticRegressionクラスのインスタンスを作成
lreg = LogisticRegression()
tr_df = pd.read_csv(TR_CSV_PLACE)
test_df = pd.read_csv(RS_CSV_PLACE)
# 説明変数の読み込み
X_train = tr_df[['年齢', '勤務時間']].values
# 目的変数の読み込み
Y_train = tr_df['在職'].values
# ロジスティック回帰モデルの作成
try:
lr = pickle.load(open(SAVE_MODEL, 'rb'))
except Exception as e:
print('Error!')
lr = LogisticRegression(C=1000, random_state=0)
# 学習させる
lr.fit(X_train, Y_train)
# 学習モデルの保存
pickle.dump(lr, open(SAVE_MODEL, 'wb'))
# In[195]:
from sklearn.metrics import accuracy_score, precision_score, recall_score
# テストデータの読み込み
X_test = test_df[['年齢', '勤務時間']].values
Y_test = test_df['在職'].values
# 作成したモデルを元にした予測の実行
predict = lr.predict(X_test)
# 結果の出力
#print(accuracy_score(Y_test, predict), precision_score(Y_test, predict), recall_score(Y_test, predict))
print("正解率(Accuracy):", '{:.2f}'.format(accuracy_score(Y_test, predict)*100),"%", sep="")
print("適合率(Precsion):", '{:.2f}'.format(precision_score(Y_test, predict)*100),"%", sep="")
print("再現率(Recall):", '{:.2f}'.format(recall_score(Y_test, predict)*100),"%", sep="")
| nagnag0707/hr_ai_solution_test | test_sklearn.py | test_sklearn.py | py | 2,092 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name"... |
16128455391 | import secure
from fastapi import FastAPI
from webhooktesting.routes import diagnostics, core
app = FastAPI()
server = secure.Server().set("Secure")
hsts = secure.StrictTransportSecurity().include_subdomains().preload().max_age(2592000)
cache_value = secure.CacheControl().must_revalidate()
secure_headers = secure.Secure(server=server, hsts=hsts, cache=cache_value)
@app.middleware("http")
async def set_secure_headers(request, call_next):
response = await call_next(request)
secure_headers.framework.fastapi(response)
return response
app.include_router(diagnostics.router)
app.include_router(core.router)
| Brightmd/WebhookTesting | src/webhooktesting/main.py | main.py | py | 627 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "secure.Server",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "secure.StrictTransportSecurity",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "secure.Cach... |
33204018569 | import numpy as np
from numpy.linalg import qr
from scipy.special import erfc
from scipy.linalg import hadamard
from ..config import *
def getMatrix(dim, k, res, rowSpace = 'random', spectrum = 'smooth gap',
returnSVD = False, coherenceScalar = .1, steepness = 1):
#Check for valid inputs
assert type(dim) == int and dim > 0, 'Matrix dimension must be a positive '\
'integer.'
assert type(k) == int and k > 0, 'Target rank must be a positive integer.'
assert type(dim >= k), 'Target rank cannot excced matrix dimension'
assert res >0 and res < 1, 'Target residual must be in the open '\
'interval (0,1).'
#Constructing the column space (left singular subspace) of the matrix
U = qr(np.random.normal(size=(dim,dim)))[0]
#Constructing the row space (right singular subspace) of the matrix
if rowSpace == 'random':
V = qr(np.random.normal(size=(dim,dim)))[0]
elif rowSpace == 'hadamard':
#Scipy hadamard only works for powers of 2. Can manually save other
#Hadamard matrices using saveHadamard.jl and then load them here
try:
V= hadamard(dim)/np.sqrt(dim)
except:
a = os.path.abspath(".").rfind('/')
projectPath = os.path.abspath(".")[:a+1]
V=np.load(projectPath + hadamardMatricesPath + 'hadamard' + str(dim) + '.npy')/np.sqrt(dim)
elif rowSpace == 'incoherent':
try:
V= hadamard(dim)/np.sqrt(dim)
except:
V=np.load(hadamardMatricesPath + 'hadamard' + str(dim) + '.npy')/np.sqrt(dim)
L, _, R = np.linalg.svd(V +coherenceScalar*np.random.normal(size=(dim,dim)))
V=L@R
elif rowSpace == 'permutation':
V=np.eye(dim)[:,np.random.permutation(dim)]
elif rowSpace == 'coherent':
V=np.eye(dim)[:,np.random.permutation(dim)]
L, _, R = np.linalg.svd(V + coherenceScalar*np.random.normal(size=(dim,dim)))
V=L@R
else:
raise Exception ('Not a valid row space.')
#Constructing the singular spectrum
if spectrum == 'smooth gap':
decayLength = int(np.floor(.7*k))
x = np.linspace(0, 1, dim)
x *= steepness*5/(x[k-1] - x[k-1-decayLength])
x += 2.5 - x[k-1]
singularValues = .5*(1+erfc(x))/1.5
beta = np.log(res)/np.log(singularValues[k])
singularValues **= beta
sigma = np.diag(singularValues)
if returnSVD:
return U, sigma, V
else:
return U @ sigma@ V.T
# U,sigma,V = getMatrix(96,48,1e-12,'incoherent',returnSVD = True)
# k = np.arange(0,96,1)
# plt.plot(k,np.diag(sigma))
# plt.show() | alexbuzali2233/RandomLowRank-Alex | src/helpers/getMatrix.py | getMatrix.py | py | 2,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linalg.qr",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.qr... |
43303341114 | import py
import random
from collections import OrderedDict
from hypothesis import settings, given, strategies
from hypothesis.stateful import run_state_machine_as_test
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem import rordereddict, rstr
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.annlowlevel import llstr, hlstr
from rpython.rtyper.test.test_rdict import (
BaseTestRDict, MappingSpace, MappingSM)
from rpython.rlib import objectmodel
rodct = rordereddict
def get_indexes(ll_d):
return ll_d.indexes._obj.container._as_ptr()
def foreach_index(ll_d):
indexes = get_indexes(ll_d)
for i in range(len(indexes)):
yield rffi.cast(lltype.Signed, indexes[i])
def count_items(ll_d, ITEM):
c = 0
for item in foreach_index(ll_d):
if item == ITEM:
c += 1
return c
class TestRDictDirect(object):
dummykeyobj = None
dummyvalueobj = None
def _get_str_dict(self):
# STR -> lltype.Signed
DICT = rordereddict.get_ll_dict(lltype.Ptr(rstr.STR), lltype.Signed,
ll_fasthash_function=rstr.LLHelpers.ll_strhash,
ll_hash_function=rstr.LLHelpers.ll_strhash,
ll_eq_function=rstr.LLHelpers.ll_streq,
dummykeyobj=self.dummykeyobj,
dummyvalueobj=self.dummyvalueobj)
return DICT
def test_dict_creation(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("abc")
rordereddict.ll_dict_setitem(ll_d, lls, 13)
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("abc")) == 13
assert rordereddict.ll_dict_getitem(ll_d, lls) == 13
rordereddict.ll_dict_setitem(ll_d, lls, 42)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 42
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 43)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 43
def test_dict_creation_2(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
llab = llstr("ab")
llb = llstr("b")
rordereddict.ll_dict_setitem(ll_d, llab, 1)
rordereddict.ll_dict_setitem(ll_d, llb, 2)
assert rordereddict.ll_dict_getitem(ll_d, llb) == 2
def test_dict_store_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(i):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
assert ll_d.num_live_items == 20
for i in range(20):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_store_get_del(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(0, i, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
if i % 2 != 0:
rordereddict.ll_dict_delitem(ll_d, llstr(str(i)))
assert ll_d.num_live_items == 10
for i in range(0, 20, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_del_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("abc"))
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("def"))
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert count_items(ll_d, rordereddict.DELETED) == 1
py.test.raises(KeyError, rordereddict.ll_dict_getitem, ll_d, llstr("abc"))
def test_dict_del_not_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
rordereddict.ll_dict_setitem(ll_d, llstr("def"), 15)
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 2
assert count_items(ll_d, rordereddict.DELETED) == 1
def test_dict_resize(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("a"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3)
rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4)
rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6)
rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7)
rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8)
rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10)
assert len(get_indexes(ll_d)) == 16
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13)
assert len(get_indexes(ll_d)) == 64
for item in 'abcdefghijklm':
assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1
def test_dict_grow_cleanup(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("a")
for i in range(40):
rordereddict.ll_dict_setitem(ll_d, lls, i)
rordereddict.ll_dict_delitem(ll_d, lls)
assert ll_d.num_ever_used_items <= 10
def test_dict_iteration(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
assert [hlstr(entry.key) for entry in self._ll_iter(ll_d)] == ["k", "j"]
def _ll_iter(self, ll_d):
ITER = rordereddict.get_ll_dictiter(lltype.typeOf(ll_d))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
ll_dictnext = rordereddict._ll_dictnext
while True:
try:
num = ll_dictnext(ll_iter)
except StopIteration:
break
yield ll_d.entries[num]
def test_popitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
TUP = lltype.Ptr(lltype.GcStruct('x', ('item0', lltype.Ptr(rstr.STR)),
('item1', lltype.Signed)))
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "j"
assert ll_elem.item1 == 2
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "k"
assert ll_elem.item1 == 1
py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d)
def test_popitem_first(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3)
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
for expected in ["k", "j", "m"]:
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == expected
rordereddict.ll_dict_delitem(ll_d, ll_key)
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter)
def test_popitem_first_bug(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_delitem(ll_d, llstr("k"))
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == "j"
assert ll_d.lookup_function_no == ( # 1 free item found at the start
(1 << rordereddict.FUNC_SHIFT) | rordereddict.FUNC_BYTE)
rordereddict.ll_dict_delitem(ll_d, llstr("j"))
assert ll_d.num_ever_used_items == 0
assert ll_d.lookup_function_no == rordereddict.FUNC_BYTE # reset
def _get_int_dict(self):
def eq(a, b):
return a == b
return rordereddict.get_ll_dict(lltype.Signed, lltype.Signed,
ll_fasthash_function=intmask,
ll_hash_function=intmask,
ll_eq_function=eq)
def test_direct_enter_and_del(self):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
numbers = [i * rordereddict.DICT_INITSIZE + 1 for i in range(8)]
for num in numbers:
rordereddict.ll_dict_setitem(ll_d, num, 1)
rordereddict.ll_dict_delitem(ll_d, num)
for k in foreach_index(ll_d):
assert k < rordereddict.VALID_OFFSET
def test_contains(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_contains(ll_d, llstr("k"))
assert not rordereddict.ll_dict_contains(ll_d, llstr("j"))
def test_clear(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 1)
rordereddict.ll_dict_clear(ll_d)
assert ll_d.num_live_items == 0
def test_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_get(ll_d, llstr("k"), 32) == 1
assert rordereddict.ll_dict_get(ll_d, llstr("j"), 32) == 32
def test_setdefault(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_setdefault(ll_d, llstr("j"), 42) == 42
assert rordereddict.ll_dict_getitem(ll_d, llstr("j")) == 42
assert rordereddict.ll_dict_setdefault(ll_d, llstr("k"), 42) == 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("k")) == 1
def test_copy(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
ll_d2 = rordereddict.ll_dict_copy(ll_d)
for ll_d3 in [ll_d, ll_d2]:
assert rordereddict.ll_dict_getitem(ll_d3, llstr("k")) == 1
assert rordereddict.ll_dict_get(ll_d3, llstr("j"), 42) == 2
assert rordereddict.ll_dict_get(ll_d3, llstr("i"), 42) == 42
def test_update(self):
DICT = self._get_str_dict()
ll_d1 = rordereddict.ll_newdict(DICT)
ll_d2 = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d1, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d1, llstr("j"), 6)
rordereddict.ll_dict_setitem(ll_d2, llstr("i"), 7)
rordereddict.ll_dict_setitem(ll_d2, llstr("k"), 8)
rordereddict.ll_dict_update(ll_d1, ll_d2)
for key, value in [("k", 8), ("i", 7), ("j", 6)]:
assert rordereddict.ll_dict_getitem(ll_d1, llstr(key)) == value
def test_pop(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop(ll_d, llstr("k")) == 5
assert rordereddict.ll_dict_pop(ll_d, llstr("j")) == 6
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("k"))
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("j"))
def test_pop_default(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 42) == 5
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 41) == 6
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 40) == 40
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 39) == 39
def test_bug_remove_deleted_items(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(15):
rordereddict.ll_dict_setitem(ll_d, llstr(chr(i)), 5)
for i in range(15):
rordereddict.ll_dict_delitem(ll_d, llstr(chr(i)))
rordereddict.ll_prepare_dict_update(ll_d, 7)
# used to get UninitializedMemoryAccess
def test_bug_resize_counter(self):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 1, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_setitem(ll_d, 2, 0)
rordereddict.ll_dict_delitem(ll_d, 1)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_delitem(ll_d, 2)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_setitem(ll_d, 1, 0)
d = ll_d
idx = d.indexes._obj.container
num_nonfrees = 0
for i in range(idx.getlength()):
got = idx.getitem(i) # 0: unused; 1: deleted
num_nonfrees += (got > 0)
assert d.resize_counter <= idx.getlength() * 2 - num_nonfrees * 3
@given(strategies.lists(strategies.integers(min_value=1, max_value=5)))
def test_direct_move_to_end(self, lst):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, 1, 11)
rordereddict.ll_dict_setitem(ll_d, 2, 22)
def content():
return [(entry.key, entry.value) for entry in self._ll_iter(ll_d)]
for case in lst:
if case == 1:
rordereddict.ll_dict_move_to_end(ll_d, 1, True)
assert content() == [(2, 22), (1, 11)]
elif case == 2:
rordereddict.ll_dict_move_to_end(ll_d, 2, True)
assert content() == [(1, 11), (2, 22)]
elif case == 3:
py.test.raises(KeyError, rordereddict.ll_dict_move_to_end,
ll_d, 3, True)
elif case == 4:
rordereddict.ll_dict_move_to_end(ll_d, 2, False)
assert content() == [(2, 22), (1, 11)]
elif case == 5:
rordereddict.ll_dict_move_to_end(ll_d, 1, False)
assert content() == [(1, 11), (2, 22)]
class TestRDictDirectDummyKey(TestRDictDirect):
class dummykeyobj:
ll_dummy_value = llstr("dupa")
class TestRDictDirectDummyValue(TestRDictDirect):
class dummyvalueobj:
ll_dummy_value = -42
class TestOrderedRDict(BaseTestRDict):
@staticmethod
def newdict():
return OrderedDict()
@staticmethod
def newdict2():
return OrderedDict()
@staticmethod
def new_r_dict(myeq, myhash, force_non_null=False, simple_hash_eq=False):
return objectmodel.r_ordereddict(
myeq, myhash, force_non_null=force_non_null,
simple_hash_eq=simple_hash_eq)
def test_two_dicts_with_different_value_types(self):
def func(i):
d1 = OrderedDict()
d1['hello'] = i + 1
d2 = OrderedDict()
d2['world'] = d1
return d2['world']['hello']
res = self.interpret(func, [5])
assert res == 6
def test_move_to_end(self):
def func():
d1 = OrderedDict()
d1['key1'] = 'value1'
d1['key2'] = 'value2'
for i in range(20):
objectmodel.move_to_end(d1, 'key1')
assert d1.keys() == ['key2', 'key1']
objectmodel.move_to_end(d1, 'key2')
assert d1.keys() == ['key1', 'key2']
for i in range(20):
objectmodel.move_to_end(d1, 'key2', last=False)
assert d1.keys() == ['key2', 'key1']
objectmodel.move_to_end(d1, 'key1', last=False)
assert d1.keys() == ['key1', 'key2']
func()
self.interpret(func, [])
class ODictSpace(MappingSpace):
MappingRepr = rodct.OrderedDictRepr
moved_around = False
ll_getitem = staticmethod(rodct.ll_dict_getitem)
ll_setitem = staticmethod(rodct.ll_dict_setitem)
ll_delitem = staticmethod(rodct.ll_dict_delitem)
ll_len = staticmethod(rodct.ll_dict_len)
ll_contains = staticmethod(rodct.ll_dict_contains)
ll_copy = staticmethod(rodct.ll_dict_copy)
ll_clear = staticmethod(rodct.ll_dict_clear)
ll_popitem = staticmethod(rodct.ll_dict_popitem)
def newdict(self, repr):
return rodct.ll_newdict(repr.DICT)
def get_keys(self):
DICT = lltype.typeOf(self.l_dict).TO
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict)
ll_dictnext = rordereddict._ll_dictnext
keys_ll = []
while True:
try:
num = ll_dictnext(ll_iter)
keys_ll.append(self.l_dict.entries[num].key)
except StopIteration:
break
return keys_ll
def popitem(self):
# overridden to check that we're getting the most recent key,
# not a random one
try:
ll_tuple = self.ll_popitem(self.TUPLE, self.l_dict)
except KeyError:
assert len(self.reference) == 0
else:
ll_key = ll_tuple.item0
ll_value = ll_tuple.item1
key, value = self.reference.popitem()
assert self.ll_key(key) == ll_key
assert self.ll_value(value) == ll_value
self.removed_keys.append(key)
def removeindex(self):
# remove the index, as done during translation for prebuilt dicts
# (but cannot be done if we already removed a key)
if not self.removed_keys and not self.moved_around:
rodct.ll_no_initial_index(self.l_dict)
def move_to_end(self, key, last=True):
ll_key = self.ll_key(key)
rodct.ll_dict_move_to_end(self.l_dict, ll_key, last)
value = self.reference.pop(key)
if last:
self.reference[key] = value
else:
items = self.reference.items()
self.reference.clear()
self.reference[key] = value
self.reference.update(items)
# prevent ll_no_initial_index()
self.moved_around = True
def fullcheck(self):
# overridden to also check key order
assert self.ll_len(self.l_dict) == len(self.reference)
keys_ll = self.get_keys()
assert len(keys_ll) == len(self.reference)
for key, ll_key in zip(self.reference, keys_ll):
assert self.ll_key(key) == ll_key
assert (self.ll_getitem(self.l_dict, self.ll_key(key)) ==
self.ll_value(self.reference[key]))
for key in self.removed_keys:
if key not in self.reference:
try:
self.ll_getitem(self.l_dict, self.ll_key(key))
except KeyError:
pass
else:
raise AssertionError("removed key still shows up")
# check some internal invariants
d = self.l_dict
num_lives = 0
for i in range(d.num_ever_used_items):
if d.entries.valid(i):
num_lives += 1
assert num_lives == d.num_live_items
fun = d.lookup_function_no & rordereddict.FUNC_MASK
if fun == rordereddict.FUNC_MUST_REINDEX:
assert not d.indexes
else:
assert d.indexes
idx = d.indexes._obj.container
num_lives = 0
num_nonfrees = 0
for i in range(idx.getlength()):
got = idx.getitem(i) # 0: unused; 1: deleted
num_nonfrees += (got > 0)
num_lives += (got > 1)
assert num_lives == d.num_live_items
assert 0 < d.resize_counter <= idx.getlength()*2 - num_nonfrees*3
class ODictSM(MappingSM):
Space = ODictSpace
def test_hypothesis():
run_state_machine_as_test(
ODictSM, settings(max_examples=500, stateful_step_count=100))
| mozillazg/pypy | rpython/rtyper/test/test_rordereddict.py | test_rordereddict.py | py | 22,081 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.rtyper.lltypesystem.rordereddict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rpython.rtyper.lltypesystem.rffi.cast",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rpython.rtyper.lltypesystem.rffi",
"line_number": 24,
"usa... |
29351919666 | from urllib.request import urlopen
import datetime
import ast, csv
import pandas as pd
# 获取今日日期(年月日)
today = datetime.date.today()
year = str(today.year)
month = str(today.month)
monthsub = str(today.month-1) # 用于大连商品交易所特殊的月份(比当前月份少一个月)
# 将少于10的日期前加上0
if today.month < 10:
month0 = '0'+month
else:
month0 = month
day = today.day
if day < 10:
day0 = '0'+str(day)
else:
day0 = str(day)
## 读取dat文件并转化成dictionary
def readDat(data):
dict_data = {}
try:
# 将dat文件先转化成str,去掉多余的信息,保留完整的dictionary
strLine = ''.join(data)
start = strLine.find('[')
end = strLine.find(']')
dict_data = strLine[start:end+1]
except:
print("Record: ", data)
raise Exception("Failed while unpacking.")
# 将dict格式的str文件解析成dictionary
dict_data = ast.literal_eval(dict_data)
return dict_data
## 上海能源交易所dat文件解析至csv文件
def ine_csvFile(data):
# 将dat文件转化成dictionary
dict_data = readDat(data)
# 按照dat里的数据标签将dictionary转化成csv文件
csv_columns = ['INSTRUMENTID', 'TRADEFEEUNIT', 'TRADEFEERATIO', 'HEDGSHORTMARGINRATIO', 'SETTLEMENTPRICE', 'COMMODITYDELIVFEEUNIT', 'SPECLONGMARGINRATIO', 'SPECSHORTMARGINRATIO', 'HEDGLONGMARGINRATIO', 'PRODUCTID', 'PRODUCTNAME']
csv_file = "INE_Margin.csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for d in dict_data:
writer.writerow(d)
except IOError:
print("I/O error")
# 处理csv文件,将列表统一成网页手动下载时的格式
csv_data = pd.read_csv(csv_file, low_memory = False)
csv_df = pd.DataFrame(csv_data)
# 将列表的英文换成对应中文
csv_df.columns = ['交割月份', '交易手续费额(元/手)', '交易手续费率(%)', '卖套保交易保证金率', '结算价', '交割手续费', '买投机交易保证金率', '卖投机交易保证金率', '买套保交易保证金率', '商品id', '商品名称']
# 删掉手动下载格式中不存在的列
csv_df.drop(csv_df.columns[[9]], axis=1, inplace=True)
# 重新排列列表的顺序
csv_df = csv_df[['商品名称', '交割月份', '结算价', '交易手续费率(%)', '交易手续费额(元/手)', '交割手续费', '买投机交易保证金率', '买套保交易保证金率', '卖投机交易保证金率', '卖套保交易保证金率']]
csv_df = csv_df.set_index('商品名称')
csv_df.to_csv(csv_file)
## 上海期货交易所dat文件解析至csv文件
def shfe_csvFile(data):
# 将dat文件转化成dictionary
dict_data = readDat(data)
# 按照dat里的数据标签将dictionary转化成csv文件
csv_columns = ['COMMODITYDELIVERYFEERATION', 'COMMODITYDELIVERYFEEUNIT', 'DISCOUNTRATE', 'INSTRUMENTID', 'LONGMARGINRATIO', 'SETTLEMENTPRICE', 'SHORTMARGINRATIO', 'SPEC_LONGMARGINRATIO', 'SPEC_SHORTMARGINRATIO', 'TRADEFEERATION', 'TRADEFEEUNIT', 'TRADINGDAY', 'UPDATE_DATE', 'id']
csv_file = "SHFE_Margin.csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for d in dict_data:
writer.writerow(d)
except IOError:
print("I/O error")
# 处理csv文件,将列表统一成网页手动下载时的格式
csv_data = pd.read_csv(csv_file, low_memory = False)
csv_df = pd.DataFrame(csv_data)
# 将列表的英文换成对应中文
csv_df.columns = ['商品手续费率(%)', '交割手续费', '平今折扣率(%)', '合约代码', '投机买保证金率(%)', '结算价', '投机卖保证金率(%)', '套保买保证金率(%)', '套保卖保证金率(%)', '交易手续费率(%)', '交易手续费额(元/手)', '交易日', '更新日期', 'id']
# 删掉手动下载格式中不存在的列
csv_df.drop(csv_df.columns[[0, 11, 12, 13]], axis=1, inplace=True)
# 重新排列列表的顺序
csv_df = csv_df[['合约代码', '结算价', '交易手续费率(%)', '交易手续费额(元/手)', '交割手续费', '投机买保证金率(%)', '投机卖保证金率(%)', '套保买保证金率(%)', '套保卖保证金率(%)', '平今折扣率(%)']]
csv_df = csv_df.set_index('合约代码')
# 处理列表数据,更换保留位数及记录方式,将百分率转化成100为单位的百分比形式
csv_df['结算价'] = csv_df['结算价'].astype(int)
csv_df['交割手续费'] = csv_df['交割手续费'].astype(int)
csv_df['交易手续费额(元/手)'] = csv_df['交易手续费额(元/手)'].astype(int)
csv_df['交易手续费率(%)'] = csv_df['交易手续费率(%)']*1000
csv_df['投机买保证金率(%)'] = (csv_df['投机买保证金率(%)']*100).astype(int)
csv_df['投机卖保证金率(%)'] = (csv_df['投机卖保证金率(%)']*100).astype(int)
csv_df['套保买保证金率(%)'] = (csv_df['套保买保证金率(%)']*100).astype(int)
csv_df['套保卖保证金率(%)'] = (csv_df['套保卖保证金率(%)']*100).astype(int)
csv_df['平今折扣率(%)'] = (csv_df['平今折扣率(%)']*100).astype(int)
csv_df.to_csv(csv_file)
## 2.1 大连商品交易所
def dce_getForm():
url1 = 'http://www.dce.com.cn/publicweb/businessguidelines/exportFutAndOptSettle.html?variety=all&trade_type=0&year='+year+'&month='+monthsub+'&day='+day0+'&exportFlag=excel'
f1 = urlopen(url1)
data = f1.read()
with open("DCE_Margin.xls", "wb") as code:
code.write(data)
## 2.2 郑州商品交易所
def czce_getForm():
# 示例下载链接2021-6-2: 'http://www.czce.com.cn/cn/DFSStaticFiles/Future/2020/20200602/FutureDataClearParams.xls'
url2 = 'http://www.czce.com.cn/cn/DFSStaticFiles/Future/'+year+'/'+year+month0+day0+'/FutureDataClearParams.xls'
f2 = urlopen(url2)
data = f2.read()
with open("CZCE_Margin.xls", "wb") as code:
code.write(data)
### 2.3 上海期货交易所
def shfe_getForm():
# 示例下载链接2021-6-8: 'http://www.shfe.com.cn/data/instrument/Settlement20210608.dat'
url3 = 'http://www.shfe.com.cn/data/instrument/Settlement'+year+month0+day0+'.dat'
f3 = urlopen(url3)
data = f3.read().decode('utf-8')
shfe_csvFile(data)
## 2.4 上海能源交易中心
def ine_getForm():
# 示例下载链接2021-6-8日:'http://www.ine.cn/data/dailydata/js/js20210608.dat'
url4 = 'http://www.ine.cn/data/dailydata/js/js'+year+month0+day0+'.dat'
f4 = urlopen(url4)
data = f4.read().decode('utf-8')
ine_csvFile(data)
## 2.5 中国金融期货交易所
def cffex_getForm():
# 示例下载链接2021-5-28: 'http://www.cffex.com.cn/sj/jscs/202105/28/20210528_1.csv'
url5 = 'http://www.cffex.com.cn/sj/jscs/202105/28/20210528_1.csv'
f5 = urlopen(url5)
data = f5.read()
with open("CFFEX_Margin.csv", "wb") as code:
code.write(data)
### 下载全部5个网页最新的结算参数表格
def getAllForms():
try:
dce_getForm()
czce_getForm()
shfe_getForm()
ine_getForm()
cffex_getForm()
except:
print('May not be the right time to download.')
if __name__ == '__main__':
getAllForms() | Katrina0406/My-Projects | autoDownload/lastestPrice/autodownload.py | autodownload.py | py | 7,490 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter"... |
8648522966 | import os, shutil
from pathlib import Path
from tqdm import tqdm
import pickle
import torch
import numpy as np
from IDSL_MINT.utils.MINT_aggregate import MINT_peak_aggregate
from IDSL_MINT.utils.msp_file_utils import MINT_address_check
def FP2MS_DataLoader(pkl_deconvoluted_msp_directory, max_number_ions_per_batch):
pkl_deconvoluted_msp_directory = MINT_address_check(pkl_deconvoluted_msp_directory, address_check = True)
try:
FP2MS_training = f"{pkl_deconvoluted_msp_directory}/FP2MS_training"
if Path(FP2MS_training).is_dir():
shutil.rmtree(FP2MS_training)
os.makedirs(FP2MS_training, exist_ok = False)
except:
raise TypeError(f"Can't remove/create `{FP2MS_training}`!")
mspTrainingSet_name = Path(f"{pkl_deconvoluted_msp_directory}/FP2MS_TrainingSet.pkl")
if Path(mspTrainingSet_name).is_file():
with open(mspTrainingSet_name, "rb") as pkl:
mspTrainingSet = pickle.load(pkl)
else:
raise FileNotFoundError(f"Can't find `{mspTrainingSet_name}`!")
msp_block_indices = MINT_peak_aggregate(mspTrainingSet, max_number_ions_per_batch)
for i in tqdm(range(len(msp_block_indices))):
indices = msp_block_indices[i]
FingerPrint, tokenized_MZ, FingerPrintPaddingMask = [], [], []
for j in indices:
tokenized_MZ1, FingerPrint1, FingerPrintPaddingMask1 = mspTrainingSet[j][2]
tokenized_MZ.append(tokenized_MZ1)
FingerPrint.append(FingerPrint1)
FingerPrintPaddingMask.append(FingerPrintPaddingMask1)
tokenized_MZ = np.stack(tokenized_MZ)
FingerPrint = np.stack(FingerPrint)
FingerPrintPaddingMask = np.stack(FingerPrintPaddingMask)
tokenized_MZ = torch.tensor(tokenized_MZ, dtype = torch.int)
FingerPrint = torch.tensor(FingerPrint, dtype = torch.long)
FingerPrintPaddingMask = torch.tensor(FingerPrintPaddingMask, dtype = torch.bool)
if FingerPrint.dim() == 1:
FingerPrint = FingerPrint.unsqueeze(dim = 0)
training_tensors_name = f"{FP2MS_training}/{indices[0]}_training_tensors.pth"
torch.save({'tokenized_MZ': tokenized_MZ,
'FingerPrint': FingerPrint,
'FingerPrintPaddingMask': FingerPrintPaddingMask},
training_tensors_name)
return FP2MS_training | idslme/IDSL_MINT | IDSL_MINT/FP2MS/FP2MS_DataLoader.py | FP2MS_DataLoader.py | py | 2,433 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "IDSL_MINT.utils.msp_file_utils.MINT_address_check",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 18,
"usage_type": "call"
},
{
"api_... |
19696374653 | import json
import logging
import uuid
import sys
sys.path.append('..')
import errors
from errors import build_response
from models.user_model import UserModel
from models.task_model import TaskModel
from models.task_list_model import TaskListModel
from pynamodb.exceptions import UpdateError, ScanError, GetError
# logの設定
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def tasks_taskLists(event, context):
"""
userが属するtasksおよびtaskListsを返す
"""
try:
logger.info(event)
if not event['pathParameters']:
raise errors.BadRequest('Bad request')
user_id = event['pathParameters']['id']
# userを取得
try:
user = UserModel.get(user_id)
except UserModel.DoesNotExist:
raise errors.NotFound('The user does not exist')
# userの参加するtasksを取得
try:
tasks = user.get_tasks()
except ScanError as e:
logger.exception(e)
raise errors.InternalError('Internal server error')
# taskListIdでグループ化
tasks_group = {}
for task in tasks:
if task.taskListId in tasks_group:
tasks_group[task.taskListId].append(task)
else:
tasks_group[task.taskListId] = [task]
# taskListsを取得
task_lists = []
for task_list_id in tasks_group.keys():
try:
task_list = TaskListModel.get(task_list_id)
except TaskListModel.DoesNotExist as e:
logger.exception(e)
continue
except GetError as e:
logger.exception(e)
task_lists.append(task_list)
# 結果の整形
task_lists = [dict(task_list) for task_list in task_lists]
for task_list in task_lists:
task_list['tasks'] = [dict(task) for task in tasks_group[task_list['id']]]
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Origin': '*',
'Content-Type': 'application/json'
},
'body': json.dumps(
{
'statusCode': 200,
'userId': user_id,
'taskLists': task_lists
}
)
}
except errors.BadRequest as e:
logger.exception(e)
return build_response(e, 400)
except errors.NotFound as e:
logger.exception(e)
return build_response(e, 404)
except errors.InternalError as e:
logger.exception(e)
return build_response(e, 500)
| shimoch-214/todo_api | users/tasks_taskLists.py | tasks_taskLists.py | py | 2,381 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"li... |
15949680682 | import argparse
import os
import pandas as pd
import pathlib
parser = argparse.ArgumentParser(
description="Aggregate simulation results",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("name", type=str,
help="name of the run")
args = vars(parser.parse_args())
name = args.pop("name")
script_dir = os.path.dirname(os.path.abspath(__file__))
results_dir = os.path.join(script_dir, "results")
all_files_in_results = os.listdir(results_dir)
result_paths_in_results_dir = [os.path.join(results_dir, filename) for filename in all_files_in_results if filename.rsplit(".", 1)[0].rsplit("_", 1)[0] == name]
loaded_results = [pd.read_pickle(path) for path in result_paths_in_results_dir]
final_df = pd.concat(loaded_results, axis=1).T.sort_index()
final_df.to_pickle(os.path.join(script_dir, "clean_results", name + ".pkl"))
setup_path = os.path.join(results_dir, name + ".pkl")
pathlib.Path(setup_path).unlink(missing_ok=True)
for path in result_paths_in_results_dir:
pathlib.Path(path).unlink(missing_ok=True) | matthieubulte/MAR | scripts/aggregate_sim_results.py | aggregate_sim_results.py | py | 1,071 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
... |
14114807410 | import sys
from collections import deque
def bfs():
queue = deque()
visited = [[False for _ in range(M)] for _ in range(N)]
visited[0][0] = True
queue.append(((0, 0), 1)) # ((x, y), dist)
while queue:
cur_node = queue.popleft()
curX, curY = cur_node[0]
cur_dist = cur_node[1]
if curX == N - 1 and curY == M - 1:
print(cur_dist)
return
for idx in range(4):
nextX = curX + move[idx][0]
nextY = curY + move[idx][1]
if is_valid(nextX, nextY) and not visited[nextX][nextY] and maze[nextX][nextY] == 1:
visited[nextX][nextY] = True
queue.append(((nextX, nextY), cur_dist + 1))
def is_valid(x, y):
return 0 <= x < N and 0 <= y < M
if __name__ == "__main__":
N, M = map(int, sys.stdin.readline().split())
maze = []
move = [[-1, 0], [1, 0], [0, -1], [0, 1]]
for _ in range(N):
maze.append(list(map(int, sys.stdin.readline().strip("\n"))))
bfs() | nashs789/JGAlgo | Week02/Q2178/Q2178_Inbok.py | Q2178_Inbok.py | py | 1,041 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline... |
4671586615 |
import datetime as dt
from collections import defaultdict
from itertools import chain
import logging
from difflib import get_close_matches, SequenceMatcher
from sqlalchemy import and_
from footie_scores import settings
from footie_scores import utils
from footie_scores.utils.footie import score_from_events
from footie_scores.utils.generic import query_list_of_dicts
from footie_scores.db import queries
from footie_scores.db.schema import Competition, Fixture
logger = logging.getLogger(__name__)
TIME_OVERRIDE = settings.OVERRIDE_TIME or settings.OVERRIDE_DAY
def get_fixture_by_id(session, id_):
fixture = queries.get_fixture_by_id(session, id_)
fixture = filter_fixture_with_override_time(fixture)
return fixture
def get_comp_grouped_fixtures(
session, start_date, comp_ids=settings.COMPS, end_date=None):
grouped_fixtures = []
for id_ in comp_ids:
competition = queries.get_competition_by_id(session, id_)
fixtures = queries.get_fixtures_by_date_and_comp(session, start_date, id_, end_date)
fixtures = filter_fixtures_with_override_time(fixtures)
grouped_fixtures.append({'name': competition.name,
'fixtures': fixtures})
return grouped_fixtures
def get_date_grouped_fixtures(
session, start_date, comp_ids, end_date=None):
grouped_fixtures = []
date_keyed_dict = defaultdict(list)
fixtures = get_fixtures_by_dates_and_comps(session, start_date, comp_ids, end_date)
fixtures = filter_fixtures_with_override_time(fixtures)
for fixture in fixtures:
date_keyed_dict[fixture.date].append(fixture)
date_sorted_keys = sorted(list(date_keyed_dict.keys()))
for date in date_sorted_keys:
fixtures = date_keyed_dict[date]
web_format_time = utils.time.custom_strftime(settings.WEB_DATEFORMAT, date)
grouped_fixtures.append({'name': web_format_time,
'fixtures': fixtures})
return grouped_fixtures
def get_fixtures_by_dates_and_comps(
session, start_date, comp_ids, end_date=None):
fixtures = []
for comp_id in comp_ids:
fixtures.append(queries.get_fixtures_by_date_and_comp(
session, start_date, comp_id, end_date))
fixtures = list(chain(*fixtures))
return filter_fixtures_with_override_time(fixtures)
def get_competitions_by_id(session, ids):
return queries.get_competitions_by_id(session, ids)
def get_competition_by_id(session, id_):
return queries.get_competition_by_id(session, id_)
def filter_fixtures_with_override_time(fixtures):
return [filter_fixture_with_override_time(f) for f in fixtures]
def filter_fixture_with_override_time(fixture):
if TIME_OVERRIDE:
f = fixture
fixture_ko = dt.datetime.combine(f.date, f.time)
minutes_elapsed = (utils.time.now() - fixture_ko).total_seconds() / 60
gametime_elapsed = minutes_elapsed - (15 if minutes_elapsed > 45 else 0)
time_filtered_events = {'home': [], 'away': []}
if gametime_elapsed < 0:
f.override_score = f.time.strftime(settings.DB_TIMEFORMAT)
f.override_events = time_filtered_events
f.override_status = ' '
else:
for team in ('home', 'away'):
for event in f.events[team]:
if gametime_elapsed >= event['minutes_since_ko']:
time_filtered_events[team].append(event)
else:
logger.info('%s vs %s: %s at %s filtered, override game time: %s',
f.team_home, f.team_away,
event['type'], event['time'], gametime_elapsed)
if time_filtered_events != f.events or gametime_elapsed < 115:
f.override_events = time_filtered_events
f.override_score = score_from_events(f.override_events)
# TODO this is unreliable, e.g. delayed games or games with ET
f.override_status = int(gametime_elapsed) if gametime_elapsed <= 115 else 'FT'
logger.info('%s vs %s: override score: %s, status: %s',
f.team_home, f.team_away, f.override_score, f.override_status)
return fixture
return fixture
def determine_substitutions(lineups, events):
sides = ('home', 'away')
for side in sides:
lineup = getattr(lineups, side)
subs = getattr(lineups, side+'_subs')
if not subs:
subs = []
sub_events = [e for e in events[side] if e['type'] == 'subst']
for player in lineup:
sub_event = query_list_of_dicts(
sub_events,
lookups=[('assist_id', player['id'], None),
('assist', player['name'], None),
('assist', player['name'], lambda x: x.lower().split()[-1]),
('assist', player['name'], lambda x: x.lower().replace('\'', '').split()[-1]),
])
if sub_event is None:
player['subbed'] = None
else:
player['subbed'] = 'subbed_off'
player['subst_event_string'] = '({}\') \u2935'.format(sub_event['minute'])
possible_names = [event['player'] for event in sub_events]
for player in subs:
sub_event = query_list_of_dicts(
sub_events,
lookups=[('player_id', player['id'], None),
('player', player['name'], None),
('player', player['name'], lambda x: x.split()[-1]),
('player', player['name'], lambda x: x.lower().replace('\'', '').split()[-1]),
('player', player['name'], lambda x: get_close_matches(x, possible_names, cutoff=0.4)),
])
if sub_event is None:
player['subbed'] = None
else:
player['subbed'] = 'subbed_on'
player['subst_event_string'] = '({}\') \u2934'.format(sub_event['minute'])
sub_event_player, player = sub_event['player'], player['name']
closeness = SequenceMatcher(None, sub_event_player, player).ratio()
if closeness < 0.5:
logger.warning(
'Substitute {} from event paired with {} from subs list, closeness: {:.2f} which is a bit low'.format(
sub_event_player, player, SequenceMatcher(None, sub_event_player, player).ratio()))
try:
assert(len(sub_events) == len([p['subbed'] for p in lineup if p['subbed']]))
assert(len(sub_events) == len([p['subbed'] for p in subs if p['subbed']]))
except:
logger.error('Number of subs not the same as number of sub events')
return lineups
| fdav10/football-scores | footie_scores/interfaces/db_to_web.py | db_to_web.py | py | 6,904 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "footie_scores.settings.OVERRIDE_TIME",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "footie_scores.settings",
"line_number": 19,
"usage_type": "name"
},
{
... |
18134982980 | from datetime import datetime as dt
from datetime import timedelta
import time
import numpy as np
import pandas as pd
import pytz
def gps_to_datetime(gps_time):
def toYearFraction(date):
def sinceEpoch(date): # returns seconds since epoch
return time.mktime(date.timetuple())
s = sinceEpoch
year = date.year
startOfThisYear = dt(year=year, month=1, day=1)
startOfNextYear = dt(year=year+1, month=1, day=1)
yearElapsed = s(date) - s(startOfThisYear)
yearDuration = s(startOfNextYear) - s(startOfThisYear)
fraction = yearElapsed/yearDuration
return date.year + fraction
return(np.array([toYearFraction(dt(1980, 1, 6)+timedelta(seconds = s)) for s in gps_time]))
def orbit_fraction(df):
func = lambda x: (x-np.nanmin(x))/(np.nanmax(x)-np.nanmin(x))
orbit_frac = df.groupby(['orbit'],as_index = False)['time'].apply(func)
return(df['orbit'].values+orbit_frac.values/2)
def spin_to_shcoarse(spinsec):
return(spinsec+2**31)
def shcoarse_to_datetime(timestanp):
#turn start date of IMAP-LO epoch into a time stamp in seconds
epoch=dt(2010, 1, 1, 0, 0, 0)
# convert Shecoarse to spin seconds
spin_sec = timestanp-(2**31)
now = spin_sec+epoch.timestamp()
date_time= dt.fromtimestamp(now)
return date_time
def localize_to_tz(naive,zone = 'est'):
# Set zone information
if zone =='est':
local=pytz.timezone('US/Eastern').localize(naive)
elif zone=='bern':
local=pytz.timezone('Europe/Zurich').localize(naive)
else:
raise Exception("""Please choose a valid time zone : 'est', 'bern'""")
#return date with zone information added
return local
def get_file_times(path):
import os
ti_c = os.path.getctime(path)
ti_m = os.path.getmtime(path)
# Converting the time in seconds to a timestamp
# c_ti = time.ctime(ti_c)
# m_ti = time.ctime(ti_m)
c_ti = dt.fromtimestamp(ti_c)
m_ti = dt.fromtimestamp(ti_m)
return(c_ti,m_ti) | jonbowr/pyMAP | pyMAP/tools/time.py | time.py | py | 2,050 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.mktime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"li... |
73518708263 | import os
import json
curr_dir = os.path.dirname(os.path.abspath(__file__))
def write_order_to_json(item, quantity, price, buyer, date):
filename = os.path.join(curr_dir, 'orders.json')
with open(filename, 'r', encoding="utf-8") as file:
data = json.loads(file.read())
data['orders'].append({'item': item, 'quantity': quantity, 'price': price, 'buyer': buyer, 'date': date})
with open(filename, "w", encoding="utf-8") as f:
json.dump(data, f, indent=4, separators=(',', ': '), ensure_ascii=False)
write_order_to_json('Тест', '11', '5', 'Кто-то', '17.10.2022')
write_order_to_json('Тест 2', '0', '3', 'Путин', '17.10.2022')
| Kederly84/async_chat_python | HomeWork2/Task2/Task_2.py | Task_2.py | py | 679 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
19407950160 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pyqt_fit.nonparam_regression as smooth
from pyqt_fit import npr_methods
import Path
path = Path.GetHomePath()
DataName = "SimulationResults/RData_Lobster.csv"
SaveStarter = "SimulationResults/FOI_Pics/Lobster/Lobster_"
a = pd.read_csv(path + DataName)
Thresholds = list(set(a.Threshold.values))
Thresholds.sort()
p1s = list(set(a.p1.values))
p1s.sort()
p2s = list(set(a.p2.values))
p2s.sort()
time = float(10)**(-2)
for p1 in p1s:
b = a.loc[a.p1 == p1,]
for p2 in p2s:
c = b.loc[b.p2 == p2,]
fig,ax = plt.subplots(2,4,sharex = 'col',sharey = 'row',figsize = [12,8])
for i in range(8):
print(i/2)
print(i%4)
print(">")
d = c.loc[c.Threshold == Thresholds[i],]
if len(d) > 10:
minI = d.I.values.min()
maxI = d.I.values.max()
xs = np.arange(minI,maxI,1)
k = smooth.NonParamRegression(d.I.values,d.Inc.values,method = npr_methods.LocalPolynomialKernel(q=1),bandwidth = 50)
k.fit()
ax[i/4,i%4].plot(d.I.values,d.Inc.values,'.')
ax[i/4,i%4].plot(xs,k(xs),'-r',linewidth = 2)
ax[i/4,i%4].set_title("Threshold: " + str(np.round(Thresholds[i],4)))
ylim = ax[0,0].get_ylim()
ax[i/4,i%4].set_ylim(ylim)
ax[i/4,i%4].set_xlim((0,300))
fig.suptitle("Random Lobster, p1: " + str(np.round(p1,4)) + " p2: " + str(np.round(p2,4)) + ", Time Round: " + str(time))
fig.text(0.5,0.04,"I",ha = "center")
fig.text(0.04,0.5,"Incidence",va = 'center',rotation = 'vertical')
plt.savefig(path + SaveStarter + str(np.round(p1,4)) + "_" + str(np.round(p2,4)) + "_Incidence.png")
plt.close()
| matthewosborne71/NetworkSimulations | PicCode/MakePicCode/MakeLobsterPics.py | MakeLobsterPics.py | py | 1,907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Path.GetHomePath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
28972280127 | import twitter, datetime #importing the libaries
import urllib2
file = open("keys.txt") #reading in the twitter credentials and splitting them up
creds = file.readline().strip().split(',')
currentSession = open("/Users/tobystimpson/Library/Application Support/Google/Chrome/Default/Current Session") #finding the current session in google chrome and getting all the information
twit = currentSession.read()
lines = twit.splitlines()
webaddress = " "
for line in lines:
if (line.find("//") != -1):
startIndex = line.rfind("//") + 2 #loops throught the lines and finds the web url
endIndex = line.rfind("/")
webaddress = line[startIndex:endIndex]
api = twitter.Api(creds[0], creds[1], creds[2], creds[3])
timestamp = datetime.datetime.utcnow()
#response = api.PostUpdate("Tweeted at " + str(timestamp))
response = api.PostUpdate("I like " + webaddress) #posts the web address to twitter
print("Status update to: " + response.text) | Spaceinvadini/Python | twitter/tweeting.py | tweeting.py | py | 963 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twitter.Api",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "attribute"
}
] |
1128012119 |
import os
from copy import deepcopy
from secrets import token_urlsafe
import numpy as np
import torch
from torch import nn
from tqdm.auto import tqdm
from torch.utils.data import Dataset, DataLoader
class nnModule_with_fit(nn.Module):
def fit(self, train, val, iterations=10_000, batch_size=256, loss_kwargs={}, \
print_freq=100, loss_kwargs_val=None, call_back_validation=None, \
val_freq=None, optimizer=None, save_freq=None, save_filename=None,\
scheduler=None, schedule_freq=1, schedular_input_fun=None, dataloader_kwargs={}):
'''The main fitting function
it uses
- self.make_training_arrays, can either return a tuple of arrays or a torch Dataset
- self.loss
'''
### Data preperation ###
loss_kwargs_val = loss_kwargs if loss_kwargs_val is None else loss_kwargs_val
val_data = self.make_training_arrays(val, **loss_kwargs_val)
val_data = dataset_to_arrays(val_data) if isinstance(val_data, Dataset) else val_data #convert val_data to a tuple of arrays if needed
train_data = self.make_training_arrays(train, **loss_kwargs)
ndata = len(train_data) if isinstance(train_data, Dataset) else len(train_data)
batch_size = ndata if ndata < batch_size else batch_size #if the batch_size is larger than number of samples
print(f'Number of datapoints: {ndata:,} \tBatch size: {batch_size} \tIterations per epoch: {ndata//batch_size}')
if isinstance(train_data, Dataset):
print(f'Training arrays constructed when the batch is created since preconstruct==False')
data_iter = LoopDataloader(DataLoader(train_data, batch_size=batch_size, **{'shuffle':True, 'drop_last':True, **dataloader_kwargs}), iterations=iterations)
else:
print(f'Training arrays size: {array_byte_size(train_data)} Validation arrays size: {array_byte_size(val_data)}, consider using preconstruct=False if too much memory is used')
data_iter = Dataloader_iterations(train_data, batch_size=batch_size, iterations=iterations)
data_iter = enumerate(tqdm(data_iter, initial=1), start=1)
### Optimizer and Scheduler ###
if scheduler is not None:
if schedular_input_fun is None:
schedular_input_fun = lambda locs, globs: {}
assert optimizer is not None, 'If a learning rate scheduler is given you need also need to explictly initialize the optimizer yourself'
if optimizer is not None:
self.optimizer = optimizer
elif not hasattr(self,'optimizer'): #if optimizer is not initialized then create a default Adam optimizer
self.optimizer = torch.optim.Adam(self.parameters())
### Monitoring and checkpoints ###
if not hasattr(self, 'loss_train_monitor'):
self.loss_train_monitor, self.loss_val_monitor, self.iteration_monitor = [], [], []
iteration_counter_offset = 0
else:
print('*** Restarting training!!! This might result in weird behaviour ***')
self._check_and_refresh_optimizer_if_needed() #for Adam optimizers you need to referesh them when loading from a file
iteration_counter_offset = self.iteration_monitor[-1] if len(self.iteration_monitor)>0 else 0
lowest_train_loss_seen, loss_train_acc, _ = float('inf'), 0, self.checkpoint_save('lowest_train_loss')
lowest_val_loss_seen, loss_val, _ = float('inf'), float('inf'), self.checkpoint_save('lowest_val_loss')
val_freq = print_freq if val_freq==None else val_freq
save_freq = print_freq if save_freq==None else save_freq
if save_filename is None and save_freq!=False:
code = token_urlsafe(4).replace('_','0').replace('-','a')
save_filename = os.path.join(get_checkpoint_dir(), f'{self.__class__.__name__}-{code}.pth')
### main training loop
try: ### To allow for KeyboardInterrupt
for iteration, batch in data_iter:
def closure():
loss = self.loss(*batch,**loss_kwargs)
self.optimizer.zero_grad()
loss.backward()
return loss
loss = self.optimizer.step(closure)
loss_train_acc += loss.item()
if iteration%val_freq==0: #Validation
with torch.no_grad():
loss_val = self.loss(*val_data, **loss_kwargs_val).item() if call_back_validation is None else call_back_validation(locals(), globals())
if loss_val<lowest_val_loss_seen:
lowest_val_loss_seen = loss_val
self.checkpoint_save('lowest_val_loss')
if scheduler is not None and iteration%schedule_freq==0:
scheduler.step(**schedular_input_fun(locals(), globals()))
if iteration%print_freq==0: #Printing and monitor update
loss_train = loss_train_acc/print_freq
m = '!' if loss_train<lowest_train_loss_seen else ' '
M = '!!' if len(self.loss_val_monitor)==0 or np.min(self.loss_val_monitor)>lowest_val_loss_seen else ' '
print(f'it {iteration:7,} loss {loss_train:.3f}{m} loss val {loss_val:.3f}{M}')
self.loss_train_monitor.append(loss_train)
self.loss_val_monitor.append(loss_val)
self.iteration_monitor.append(iteration+iteration_counter_offset)
if loss_train<lowest_train_loss_seen:
lowest_train_loss_seen = loss_train
self.checkpoint_save('lowest_train_loss')
loss_train_acc = 0
if save_freq!=False and (iteration%save_freq==0): #Saving
self.save_to_file(save_filename)
except KeyboardInterrupt:
print('stopping early, ', end='')
print('Saving parameters to checkpoint self.checkpoints["last"] and loading self.checkpoints["lowest_val_loss"]')
self.checkpoint_save('last')
self.checkpoint_load('lowest_val_loss') #Should this also save the monitors at the point of lowest_val_loss?
if save_freq!=False:
self.save_to_file(save_filename)
def checkpoint_save(self,name): #checkpoints do not use files
if not hasattr(self, 'checkpoints'):
self.checkpoints = {}
self.checkpoints[name] = {'state_dict':deepcopy(self.state_dict()),'optimizer_state_dict':deepcopy(self.optimizer.state_dict())}
def checkpoint_load(self, name):
self.load_state_dict(self.checkpoints[name]['state_dict'])
self.optimizer.load_state_dict(self.checkpoints[name]['optimizer_state_dict'])
def save_to_file(self, file):
torch.save(self, file)
def _check_and_refresh_optimizer_if_needed(self):
if hasattr(self.optimizer, '_cuda_graph_capture_health_check'):
try:
self.optimizer._cuda_graph_capture_health_check()
except AttributeError:
print('*** Refreshing optimizer with _refresh_optimizer (probably due to a restart of training after loading the model from a file)')
self._refresh_optimizer()
def _refresh_optimizer(self):
optimizer = self.optimizer.__class__(self.parameters(), **self.optimizer.defaults)
optimizer.load_state_dict(self.optimizer.state_dict())
self.optimizer = optimizer
def get_checkpoint_dir():
'''A utility function which gets the checkpoint directory for each OS
It creates a working directory called meta-SS-checkpoints
in LOCALAPPDATA/meta-SS-checkpoints/ for windows
in ~/.meta-SS-checkpoints/ for unix like
in ~/Library/Application Support/meta-SS-checkpoints/ for darwin
Returns
-------
checkpoints_dir
'''
from sys import platform
if platform == "darwin": #not tested but here it goes
checkpoints_dir = os.path.expanduser('~/Library/Application Support/meta-SS-checkpoints/')
elif platform == "win32":
checkpoints_dir = os.path.join(os.getenv('LOCALAPPDATA'),'meta-SS-checkpoints/')
else: #unix like, might be problematic for some weird operating systems.
checkpoints_dir = os.path.expanduser('~/.meta-SS-checkpoints/')#Path('~/.deepSI/')
if os.path.isdir(checkpoints_dir) is False:
os.mkdir(checkpoints_dir)
return checkpoints_dir
class Dataloader_iterations:
def __init__(self, data, batch_size, iterations):
self.data = [torch.as_tensor(d,dtype=torch.float32) for d in data] #this copies the data again
self.batch_size = batch_size
self.iterations = iterations
def __iter__(self):
return Dataloader_iterationsIterator(self.data, self.batch_size, self.iterations)
def __len__(self):
return self.iterations
class Dataloader_iterationsIterator:
def __init__(self, data, batch_size, iterations, init_shuffle=True):
self.ids = np.arange(len(data[0]),dtype=int)
if init_shuffle:
np.random.shuffle(self.ids)
self.data = data
self.L = len(data[0])
self.batch_size = self.L if batch_size>self.L else batch_size
self.data_counter = 0
self.it_counter = 0
self.iterations = iterations
def __iter__(self):
return self
def __next__(self):
self.it_counter += 1
if self.it_counter>self.iterations:
raise StopIteration
self.data_counter += self.batch_size
ids_now = self.ids[self.data_counter-self.batch_size:self.data_counter]
if self.data_counter+self.batch_size>self.L: #going over the limit next time, hence, shuffle and restart
self.data_counter = 0
np.random.shuffle(self.ids)
return [d[ids_now] for d in self.data]
def array_byte_size(arrays):
Dsize = sum([d.detach().numpy().nbytes for d in arrays])
if Dsize>2**30:
dstr = f'{Dsize/2**30:.1f} GB'
elif Dsize>2**20:
dstr = f'{Dsize/2**20:.1f} MB'
else:
dstr = f'{Dsize/2**10:.1f} kB'
return dstr
def dataset_to_arrays(dataset):
as_tensor = lambda *x: [torch.as_tensor(np.array(xi), dtype=torch.float32) for xi in x]
return as_tensor(*zip(*[dataset[k] for k in range(len(dataset))]))
class Get_sample_fun_to_dataset(Dataset):
"""Simple class to convert a get_sample function to a Dataset"""
def __init__(self, fun):
super().__init__()
self.fun = fun
assert hasattr(fun, 'length')
def __getitem__(self, k):
return self.fun(k)
def __len__(self):
return self.fun.length
class LoopDataloader:
def __init__(self, dataloader, iterations):
self.dataloader = dataloader
self.iterations = iterations
def __iter__(self):
return LoopDataloaderIterator(self.dataloader, self.iterations)
def __len__(self):
return self.iterations
class LoopDataloaderIterator:
def __init__(self, dataloader, iterations):
self.dataloader = dataloader
self.iterations = iterations
self.dataloader_iter = self.dataloader.__iter__()
self.it_counter = 0
def __iter__(self):
return self
def __next__(self):
self.it_counter += 1
if self.it_counter>self.iterations:
raise StopIteration
try:
return self.dataloader_iter.__next__()
except StopIteration:
self.dataloader_iter = self.dataloader.__iter__()
return self.dataloader_iter.__next__() | GerbenBeintema/metaSI | metaSI/utils/fitting.py | fitting.py | py | 11,726 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "torch.util... |
42893075767 | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/4/10.
"""
__author__ = 'Allen7D'
import hashlib
import os
from flask import current_app
from werkzeug.datastructures import FileStorage
from app.libs.error_code import FileTooLargeException, \
FileTooManyException, FileExtensionException, ParameterException
class Uploader(object):
def __init__(self, files: list or FileStorage, config={}):
self._include = [] # 被允许的文件类型列表
self._exclude = [] # 不被允许的文件类型列表
self._single_limit = 0 # 单个文件的最大字节数
self._total_limit = 0 # 多个文件的最大字节数
self._nums = 0 # 文件上传的最大数量
self._store_dir = '' # 文件存贮目录
self._file_storage = self.__parse_files(files) # 文件存贮对象
self.__load_config(config)
self.__verify()
def upload(self, **kwargs) -> dict:
'''文件上传抽象方法,一定要被子类所实现'''
raise NotImplementedError()
@staticmethod
def _generate_uuid():
import uuid
return str(uuid.uuid1())
@staticmethod
def _get_ext(filename: str):
"""
得到文件的扩展名
:param filename: 原始文件名
:return: string 文件的扩展名
"""
return '.' + filename.lower().split('.')[-1]
@staticmethod
def _generate_md5(data: bytes):
md5_obj = hashlib.md5()
md5_obj.update(data)
ret = md5_obj.hexdigest()
return ret
@staticmethod
def _get_size(file_obj: FileStorage):
"""
得到文件大小(字节)
:param file_obj: 文件对象
:return: 文件的字节数
"""
file_obj.seek(0, os.SEEK_END)
size = file_obj.tell()
file_obj.seek(0) # 将文件指针重置
return size
@staticmethod
def _generate_name(filename: str):
return Uploader._generate_uuid() + Uploader._get_ext(filename)
def __load_config(self, custom_config):
"""
加载文件配置,如果用户不传 config 参数,则加载默认配置
:param custom_config: 用户自定义配置参数
:return: None
"""
default_config = current_app.config.get('FILE')
self._include = custom_config['INCLUDE'] if \
'INCLUDE' in custom_config else default_config['INCLUDE']
self._exclude = custom_config['EXCLUDE'] if \
'EXCLUDE' in custom_config else default_config['EXCLUDE']
self._single_limit = custom_config['SINGLE_LIMIT'] if \
'SINGLE_LIMIT' in custom_config else default_config['SINGLE_LIMIT']
self._total_limit = custom_config['TOTAL_LIMIT'] if \
'TOTAL_LIMIT' in custom_config else default_config['TOTAL_LIMIT']
self._nums = custom_config['NUMS'] if 'NUMS' in custom_config else default_config['NUMS']
self._store_dir = custom_config['STORE_DIR'] if \
'STORE_DIR' in custom_config else default_config['STORE_DIR']
@staticmethod
def __parse_files(files):
'''拆分文件列表'''
ret = []
for key, value in files.items():
ret += files.getlist(key)
return ret
def __verify(self):
"""
验证文件是否合法
"""
if not self._file_storage:
raise ParameterException(msg='未找到符合条件的文件资源')
self.__allowed_file()
self.__allowed_file_size()
def _get_store_path(self, filename: str):
uuid_filename = self._generate_name(filename)
format_day = self.__get_format_day()
store_dir = self._store_dir
return os.path.join(store_dir, uuid_filename), format_day + '/' + uuid_filename, uuid_filename
def mkdir_if_not_exists(self):
'''
日期的规则更新文件储存路径
'''
if not os.path.isabs(self._store_dir):
self._store_dir = os.path.abspath(self._store_dir)
# mkdir by YYYY/MM/DD
self._store_dir += '/' + self.__get_format_day()
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
@staticmethod
def __get_format_day():
'''
返回年/月/日 (2020/08/08)
'''
import time
return str(time.strftime("%Y/%m/%d"))
def __allowed_file(self):
"""
验证扩展名是否合法
"""
if (self._include and self._exclude) or self._include:
for single in self._file_storage:
if '.' not in single.filename or \
single.filename.lower().rsplit('.', 1)[1] not in self._include:
raise FileExtensionException()
return True
elif self._exclude and not self._include:
for single in self._file_storage:
if '.' not in single.filename or \
single.filename.lower().rsplit('.', 1)[1] in self._exclude:
raise FileExtensionException()
return True
def __allowed_file_size(self):
"""
验证文件大小是否合法
"""
file_count = len(self._file_storage)
if file_count > 1:
if file_count > self._nums:
raise FileTooManyException()
total_size = 0
for single in self._file_storage:
if self._get_size(single) > self._single_limit:
raise FileTooLargeException(
single.filename + '大小不能超过' + str(self._single_limit) + '字节'
)
total_size += self._get_size(single)
if total_size > self._total_limit:
raise FileTooLargeException()
else:
file_size = self._get_size(self._file_storage[0])
if file_size > self._single_limit:
raise FileTooLargeException()
| Allen7D/mini-shop-server | app/core/file.py | file.py | py | 5,999 | python | en | code | 663 | github-code | 36 | [
{
"api_name": "werkzeug.datastructures.FileStorage",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "uuid.uuid1",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "werkzeug.d... |
70653037864 | from djoser.serializers import UserSerializer
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Cart, Category, MenuItem, Order, OrderItem
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['id', 'title', 'slug']
class MenuItemSerializer(serializers.ModelSerializer):
category = CategorySerializer(read_only=True)
category_id = serializers.IntegerField(write_only=True)
class Meta:
model = MenuItem
fields = ['id', 'title', 'price', 'featured', 'category', 'category_id']
class CartSerializer(serializers.ModelSerializer):
menuitem = MenuItemSerializer(read_only=True)
menuitem_id = serializers.IntegerField(write_only=True)
def validate_menuitem_id(self, value):
item = MenuItem.objects.filter(pk=value).exists()
if not item:
raise serializers.ValidationError('Invalid menuitem')
return value
class Meta:
model = Cart
fields = ['id', 'menuitem', 'menuitem_id', 'quantity', 'unit_price', 'price']
extra_kwargs = {
'price': {
'read_only': True,
},
'unit_price': {
'read_only': True
}
}
depth = 1
def create(self, validated_data):
item = MenuItem.objects.get(pk=validated_data.get('menuitem_id'))
validated_data['unit_price'] = item.price
validated_data['price'] = item.price * validated_data.get('quantity')
return super().create(validated_data)
class OrderItem(serializers.ModelSerializer):
menuitem = MenuItemSerializer()
class Meta:
model = OrderItem
fields = [
'id',
'menuitem',
'quantity',
'unit_price',
'price'
]
class OrderSerializer(serializers.ModelSerializer):
delivery_crew = UserSerializer(read_only=True)
delivery_crew_id = serializers.IntegerField(write_only=True)
orderitem_set = OrderItem(many=True)
class Meta:
model = Order
fields = ['id', 'delivery_crew', 'delivery_crew_id', 'status', 'total', 'date', 'orderitem_set']
# class OrderPartialUpdateSerializer(serializers.ModelSerializer):
# delivery_crew_id = serializers.IntegerField()
# class Meta:
# model = Order
# fields = ['id', 'delivery_crew_id', 'status']
class OrderPartialUpdateSerializer(serializers.ModelSerializer):
delivery_crew_id = serializers.IntegerField()
class Meta:
model = Order
fields = ['id', 'delivery_crew_id', 'status']
def validate_delivery_crew_id(self, value):
if not User.objects.filter(groups__name='Delivery crew').filter(pk=value).exists():
raise serializers.ValidationError("Incorrect delivery crew")
return value
| lzytourist/LittleLemon | LittleLemonAPI/serializers.py | serializers.py | py | 2,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 11,
"usage_type": "name"
},
... |
35941125845 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 16:21:28 2018
@author: bob.lee
"""
import docx
import re
import xlrd
import os
from xlwt import Workbook
DATA_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'temp/'))
if not os.path.isdir(DATA_HOME):
os.mkdir(DATA_HOME)
def content_main(file_home, classifier_home, key_word_home, save_home):
"""
子函数:三级指标与词汇匹配,,对应step2、process1-4
:param: file_home:政策文件所存放的路径
:param: classifier_home:量词表
:param: classifier_sheet:量词表对应的表名
:param: key_word_home:内容关键词对应的词频表
:param: save_home:三级指标人工处理表所存放的路径
"""
# print(len([name for name in os.listdir(DATA_HOME + '/content') if
# os.path.isfile(os.path.join(DATA_HOME + '/content', name))]))
book = Workbook(encoding='utf-8')
sheet1 = book.add_sheet('list')
sheet1.write(0, 0, '关键词+量词')
sheet1.write(0, 1, '句子')
ans_line = 0
content_liang = []
liang_no = []
no_number = []
workbook_liang = xlrd.open_workbook(classifier_home)
classifier_sheet = workbook_liang.sheet_names()[0]
sheet_liang = workbook_liang.sheet_by_name(classifier_sheet)
for j in range(sheet_liang.ncols):
if sheet_liang.cell(0, j).value == '量词':
for i in range(1, sheet_liang.nrows):
content_liang.append(sheet_liang.cell(i, j).value)
if sheet_liang.cell(0, j).value == '不包含':
for i in range(1, sheet_liang.nrows):
temp = sheet_liang.cell(i, j).value
liang_no.append(temp.split('、'))
if temp:
no_number.append(i - 1)
workbook = xlrd.open_workbook(key_word_home)
sheet_one_content = workbook.sheet_by_name('内容分词')
content_word = []
for i in range(1, sheet_one_content.nrows):
result = sheet_one_content.cell(i, 0).value
if result:
content_word.append(result)
for file in os.listdir(file_home):
try:
print(file)
if file.find('pdf') == -1:
content = []
if file.find('~$') != -1:
continue
file = docx.Document(file_home + file)
for para in file.paragraphs:
content.append(para.text)
result = ''.join(content)
rr = re.compile(r',|。|!|?|;', re.I) # 不区分大小写
match = re.split(rr, result)
result_key = []
result_content = []
# print(len(content_word),len(match),len(content_liang))
for i in range(len(content_word)):
for j in range(len(match)):
for m in range(len(content_liang)):
temp_ans = match[j].find(content_word[i])
ans_temp = match[j].find(content_liang[m])
if temp_ans != -1 and ans_temp != -1:
if temp_ans < ans_temp:
if content_word[i].find(content_liang[m]) != -1:
if len(re.findall(re.compile(content_liang[m]), match[j])) > 1:
if m in no_number:
temp = liang_no[no_number[no_number.index(m)]]
if content_word[i] not in temp:
result_key.append(content_word[i] + ' ' + content_liang[m])
result_content.append(match[j])
else:
result_key.append(content_word[i] + ' ' + content_liang[m])
result_content.append(match[j])
else:
if m in no_number:
temp = liang_no[no_number[no_number.index(m)]]
if content_word[i] not in temp:
result_key.append(content_word[i] + ' ' + content_liang[m])
result_content.append(match[j])
else:
result_key.append(content_word[i] + ' ' + content_liang[m])
result_content.append(match[j])
for i, line in enumerate(result_key):
sheet1.write(ans_line + i + 1, 0, line)
for i, line in enumerate(result_content):
sheet1.write(ans_line + i + 1, 1, line)
ans_line = ans_line + len(result_key)
except Exception as e:
pass
book.save(save_home + '/三级指标人工修正表.xls')
# content_main(DATA_HOME + '/1' + '/content/', r'C:\Users\yry\Documents\WeChat Files\wxid_gp6l5oft4qci42\Files\量词库.xls',
# DATA_HOME + '/1' + '/step_one/内容分词结果.xls',
# DATA_HOME + '/1' + '/step_two')
| lidunwei12/biao | src/step_two/content_create.py | content_create.py | py | 5,519 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line... |
15596002112 | #!/usr/bin/python
"""Estimate conditional probability distributions from walk data.
The point of this program is to compute random walk probabilities over a
graph. This is a simplified problem based on `walk_analyzer.py`. This
program finds the conditional probabilities of reaching a node given a
particular start node. The output consists of a conditional probability
distribution for each start node.
We used a system called GraphChi to do very efficient random walks over the
graph - GraphChi can handle a billion individual walks with 10 steps over a
graph with about 2 million edges in about 20 minutes. That is, start at 1
million separate nodes, and do 1000 walks from each node for 10 steps in
about 20 minutes, on a single (somewhat large) machine. This process
produces a set of data files of the form (walk_id, hop_num, node_id). From
this output, we need to create a set of probabilities as shown above.
"""
# Author: Matt Gardner (mg1@cs.cmu.edu)
# (with some help from Andrew McNabb (amcnabb@cs.byu.edu))
from __future__ import division
import itertools
import logging
import mrs
import os
import struct
from collections import defaultdict
from StringIO import StringIO
from subprocess import Popen, PIPE
NUM_PAIR_TASKS = 400
NUM_COUNT_TASKS = 300
MAX_INPUT_SIZE = 20000000
MIN_COUNT = 2
# Use the mrs logger, so we have the same log level
logger = logging.getLogger('mrs')
walk_struct = struct.Struct('>IHI')
walk_struct_size = walk_struct.size
class RandomWalkAnalyzer(mrs.MapReduce):
def run(self, job):
outdir = self.output_dir()
if not outdir:
return 1
# This is the main part of the program, that gets run on the master.
# This is the initial data (in (key, value) format) that is sent to
# the map. In our case, we just need to give an index to the map task,
# and each mapper will look up the document it needs from that index.
kv_pairs = []
for filename in self.args[:-1]:
size = os.stat(filename).st_size
assert size % walk_struct_size == 0
total_records = size // walk_struct_size
chunks = (size - 1) // MAX_INPUT_SIZE + 1
offset = 0
for i in xrange(chunks):
chunk_records = total_records // chunks
# Spread out the remainder among the first few chunks.
if i < total_records % chunks:
chunk_records += 1
key = filename
value = (offset, chunk_records)
kv_pairs.append((key, value))
offset += chunk_records
source = job.local_data(kv_pairs)
# We pass the initial data into the map tasks
walk_ids = job.map_data(source, self.walk_file_map,
parter=self.mod_partition, splits=NUM_PAIR_TASKS)
source.close()
# If the output of a reduce is going straight into a map, we can do a
# reducemap, which is pretty nice.
node_pairs = job.reducemap_data(walk_ids, self.walk_id_reduce,
self.node_pair_map, splits=NUM_COUNT_TASKS)
walk_ids.close()
# We just output here, which leads to pretty ugly storing of the
# output in an arbitrary directory structure. The alternative is to
# grab it after it's done and do whatever outputting you want in this
# run() method, but then you have to hope that all of the data fits in
# memory. Because we think this output will be rather large, we do
# our outputting directly from the reduce.
output = job.reduce_data(node_pairs, self.normalize_reduce,
outdir=outdir, format=mrs.fileformats.TextWriter)
node_pairs.close()
ready = []
while not ready:
ready = job.wait(output, timeout=10.0)
logger.info('Walk ids: ' + str(job.progress(walk_ids)))
logger.info('Node pairs: ' + str(job.progress(node_pairs)))
logger.info('Output: ' + str(job.progress(output)))
# If you don't return 0, mrs thinks your job failed
return 0
int32_serializer = mrs.make_primitive_serializer('I')
int32_pair_serializer = mrs.make_struct_serializer('=II')
@mrs.output_serializers(key=int32_serializer, value=int32_pair_serializer)
def walk_file_map(self, key, value):
"""Process the input files, emitting one entry for each step."""
filename = key
offset, count = value
logger.info('Got walk file %s (offset %s, count %s)' %
(filename, offset, count))
walk_file = open(filename, 'rb')
walk_file.seek(offset * walk_struct_size)
for i in xrange(count):
walk_buf = walk_file.read(walk_struct_size)
walk_id, hop, node = walk_struct.unpack(walk_buf)
yield (walk_id, (hop, node))
def walk_id_reduce(self, key, values):
"""Consolidate each walk into a single list of nodes."""
value_list = list(itertools.islice(values, 100))
# GraphChi shouldn't ever let this happen, but sometimes there is a
# single walk_id with a pathologically long list of hops that really
# breaks things in map_walk_ids. So we catch that case here.
if len(value_list) < 100:
value_list.sort()
nodes = [node for hop, node in value_list]
yield nodes
@mrs.output_serializers(key=int32_serializer, value=int32_serializer)
def node_pair_map(self, key, value):
"""Emit an entry for each pair of nodes in the walks."""
for i, start_node in enumerate(value):
for end_node in value[i+1:]:
yield (start_node, end_node)
def normalize_reduce(self, key, values):
"""Make a conditional probability distribution given the node `key`."""
counts = defaultdict(int)
for v in values:
counts[v] += 1
distribution = {}
total = 0
for node, count in counts.iteritems():
if count >= MIN_COUNT:
distribution[node] = count
total += count
for node in distribution:
distribution[node] /= total
if distribution:
yield distribution
if __name__ == '__main__':
mrs.main(RandomWalkAnalyzer)
# vim: et sw=4 sts=4
| byu-aml-lab/mrs-mapreduce | examples/contrib/walk_analyzer/conditional_prob.py | conditional_prob.py | py | 6,367 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "struct.Struct",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "mrs.MapReduce",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"lin... |
30240198017 | import nltk
import pandas
import os
import json
import multiprocessing
targetPosts = 'processed_data/posts.csv'
posts_output = 'processed_data/posts_tokenized.json'
targetNodes = 'processed_data/nodes.csv'
nodes_output = 'processed_data/nodes_tokenized.json'
def processLine(l):
return json.dumps(nltk.word_tokenize(json.loads(l)))
def main():
with multiprocessing.Pool(processes=8) as pool:
df_posts = pandas.read_csv(targetPosts, index_col=0)
print("Loaded")
with open(posts_output, 'w') as f:
#rowsIter = df_posts.iterrows()
targets = []
for i, r in df_posts.iterrows():
#print(r)
targets.append(r['body'])
if i % 100000 == 0 and i > 0:
results = pool.map(processLine, targets)
targets = []
f.write('\n'.join(results))
f.write('\n')
print("posts: {}\t{:.2f}%\t{}".format(str(i).rjust(12), i/len(df_posts) * 100, str(r['body'])[:20]))
results = pool.map(processLine, targets)
targets = []
f.write('\n'.join(results))
f.write('\n')
df_nodes = pandas.read_csv(targetNodes, index_col=0)
with open(nodes_output, 'w') as f:
for i, r in df_nodes.iterrows():
json.dump(nltk.word_tokenize(json.loads(r['bio'])), f)
f.write('\n')
if i % 10000 == 0:
print("nodes: {}\t{:.2f}%\t{}".format(str(i).rjust(12), i/len(df_nodes) * 100, str(r['bio'])[:20]))
print("Done")
if __name__ == '__main__':
main()
| reidmcy/csc-2611-final-project | scripts/tokenize_posts.py | tokenize_posts.py | py | 1,642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"... |
69930858666 | import tweepy
consumer_key = "entering the key here"
consumer_secret = "entering the key here"
access_token = "entering the key here"
access_token_secret = "entering the key here"
from tweepy.auth import OAuthHandler
#Creating the Auth Object
auth = OAuthHandler(consumer_key, consumer_secret)
#Setting the access token and secret
auth.set_access_token(access_token, access_token_secret)
#Creating the API object while passing in the auth info
api = tweepy.API(auth)
#The search term we want to find
query = "Climate"
#Language code (follows ISO 639-1 standards)
language = "en"
#Calling the user_timeline function with our parameters
results = api.search(q=query, lang=language)
#Printing all tweets (for each through all tweets pulled)
for tweet in results:
print(tweet.user.screen_name,"Tweeted:",tweet.text)
| JaySRT/Twitter-Sentiment-Analysis | Keyword_based_search.py | Keyword_based_search.py | py | 862 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tweepy.auth.OAuthHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 14,
"usage_type": "call"
}
] |
11297199192 | import math
import cv2
import numpy as np
import subprocess
import imghdr
import traceback
import os
# finds angle between robot's heading and the perpendicular to the targets
class VisionTargetDetector:
# initilaze variables
def __init__(self, input):
self.input_path = input
try:
# if input is a camera port
self.input = cv2.VideoCapture(int(input))
self.set_camera_settings(input)
except:
# if input is a path
self.input = cv2.VideoCapture(input)
frame = self.get_frame()
# height of a vision target
self.TARGET_HEIGHT = 5.5 * math.cos(math.radians(14.5)) + 2 * math.sin(math.radians(14.5))
# intialize screen width and screen height
self.SCREEN_HEIGHT, self.SCREEN_WIDTH = frame.shape[:2]
# intialize angle of field of view in radians
self.FIELD_OF_VIEW_RAD = 70.42 * math.pi / 180.0
# calculates focal length based on a right triangle representing the "image" side of a pinhole camera
# ABC where A is FIELD_OF_VIEW_RAD/2, a is SCREEN_WIDTH/2, and b is the focal length
self.FOCAL_LENGTH_PIXELS = (self.SCREEN_WIDTH / 2.0) / math.tan(self.FIELD_OF_VIEW_RAD / 2.0)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.input.release()
cv2.destroyAllWindows()
print("exited")
# sets exposure of the camera (will only work on Linux systems)
def set_camera_settings(self, camera_port):
camera_path = "/dev/video" + camera_port
try:
subprocess.call(["v4l2-ctl", "-d", camera_path, "-c", "exposure_auto=1"])
subprocess.call(["v4l2-ctl", "-d", camera_path, "-c", "exposure_absolute=1"])
except:
print("exposure adjustment not completed")
# returns a frame corresponding to the input type
def get_frame(self):
frame = None
try:
# if input is an image, use cv2.imread()
if imghdr.what(self.input_path) is not None:
frame = cv2.imread(self.input_path)
# if input is a video, use VideoCapture()
else:
_, frame = self.input.read()
except:
# if input is a camera port, use VideoCapture()
_, frame = self.input.read()
return frame
# returns the closest pair of vision targets
def get_closest_pair(self, pairs):
if len(pairs) == 0:
return []
closest_pair = pairs[int(len(pairs)/2)]
for pair in pairs:
if abs(self.SCREEN_WIDTH/2 - pair.get_center()[0]) < abs(self.SCREEN_WIDTH/2 - closest_pair.get_center()[0]):
closest_pair = pair
return closest_pair
# returns an array of all vision target pairs
def get_all_pairs(self, rotated_boxes):
pairs = []
for c in range(0, len(rotated_boxes)-1):
rect1, rect2 = rotated_boxes[c], rotated_boxes[c+1]
top_distance = math.hypot(rect1.p2.x - rect2.p2.x, rect1.p2.y - rect2.p2.y)
bottom_distance = math.hypot(rect1.p4.x - rect2.p4.x, rect1.p4.y - rect2.p4.y)
if top_distance < bottom_distance:
pairs.append(Pair(rect1, rect2, self))
return pairs
def run_cv(self):
frame = self.get_frame()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_green = np.array([60,90,50])
high_green= np.array([87,255,229])
# isolate the desired shades of green
mask = cv2.inRange(hsv, low_green, high_green)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# sort contours by x-coordinate
contours.sort(key = lambda countour: cv2.boundingRect(countour)[0])
rotated_boxes = []
# convert contours into rectangles
for c in contours:
area = cv2.contourArea(c)
rect = cv2.minAreaRect(c)
_, _, rot_angle = rect
box = cv2.boxPoints(rect)
box = np.int0(box)
if area > 100:
rotated_boxes.append(RotatedRectangle(box, area, rot_angle))
# draw red rectangles around vision targets
for rect in rotated_boxes:
cv2.drawContours(frame, [rect.box], 0, (0,0,255), 2)
cv2.drawContours(frame, [rect.box], 0, (0,0,255), 2)
# draw bluerectangles around vision target pairs
for pair in self.get_all_pairs(rotated_boxes):
cv2.drawContours(frame, [pair.left_rect.box], 0, (255,0,0), 2)
cv2.drawContours(frame, [pair.right_rect.box], 0, (255,0,0), 2)
# show windows
cv2.imshow("contours: " + str(self.input_path), mask)
cv2.imshow("frame: " + str(self.input_path), frame)
# this class defines the bounding rectangle of a vision target
class RotatedRectangle:
def __init__(self, box, area, rot_angle):
self.box = box
self.area = area
self.rot_angle = rot_angle
points = []
for coordinates in box:
points.append(Point(coordinates[0], coordinates[1]))
# sorts points based on y value
points.sort(key = lambda x: x.y)
# highest = 1, lowest = 4
self.points = points
self.p1, self.p2, self.p3, self.p4 = points[0], points[1], points[2], points[3]
def get_width(self):
return math.hypot(self.p1.x - self.p2.x, self.p1.y - self.p2.y)
def get_height(self):
return math.hypot(self.p1.x - self.p3.x, self.p1.y - self.p3.y)
def get_center(self):
x = sum(point.x for point in self.points)/4
y = sum(point.x for point in self.points)/4
return Point(x, y)
# this class defines a point
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
# this class defines a pair of vision targets
class Pair:
def __init__(self, left_rect, right_rect, vtd):
self.left_rect = left_rect
self.right_rect= right_rect
self.vtd = vtd
def get_center(self):
r1 = self.left_rect
r2 = self.right_rect
x = (self.left_rect.get_center().x + self.right_rect.get_center().x)/2
y = (self.left_rect.get_center().y + self.right_rect.get_center().y)/2
return Point(x, y)
| rithue/DepthCamera | cv.py | cv.py | py | 5,497 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_n... |
17832877059 | from flask import Blueprint, jsonify, request
from jwt_functions import write_token, validate_token, decode
import bcrypt
from os import getenv
from dotenv import load_dotenv
routes_users = Blueprint('routes_users', __name__)
@routes_users.route('/signup', methods=['POST'])
def signup():
from api import db
exists = db.users.find_one({'userName': request.json['userName']})
if exists is None:
hashpw = bcrypt.hashpw(
request.json['password'].encode('UTF-8'), bcrypt.gensalt())
db.users.insert_one({
'name': request.json['name'],
'userName': request.json['userName'],
'email': request.json['email'],
'password': hashpw,
'pp': "//ssl.gstatic.com/accounts/ui/avatar_2x.png",
'kanji_sets': [],
'achievements': [
{
'title': 'New Katakana Record',
'progress': 0,
'description': 'You broke your record studying Katakana'
},
{
'title': 'New Hiragana Record',
'progress': 0,
'description': 'You broke your record studying Hiragana'
},
{
'title': 'First Time Katakana',
'progress': 0,
'description': 'You practiced Katakana for the first time'
},
{
'title': 'First Time Hiragana',
'progress': 0,
'description': 'You practiced Hiragana for the first time'
},
{
'title': 'Random',
'progress': 0,
'description': 'You generated your first random Kanji'
},
{
'title': 'Kanji Deck',
'progress': 0,
'description': 'You created your first Kanji deck'
},
{
'title': 'Deck Master',
'progress': 0,
'description': 'You created 10 Kanji sets'
},
{
'title': 'Dedicated Student',
'progress': 0,
'description': 'You logged in more than 10 times'
},
{
'title': 'Quiz Master',
'progress': 0,
'description': 'Yo got more than 10 in a quiz'
}
],
'katakanaHighScore': 0,
'hiraganaHighScore': 0,
'kanjiHighScore': 0
})
return jsonify({'message': 'Account created succesfully!'})
return jsonify({'message': 'Username already exists!'})
@routes_users.route('/login', methods=['POST'])
def login():
from api import db
user = db.users.find_one({'userName': request.json['userName']})
if user:
if bcrypt.checkpw(request.json['password'].encode('UTF-8'), user['password']):
return write_token(data=request.get_json())
else:
response = jsonify({'message': 'Password is ncorrect!'})
response.status_code = 404
return response
response = jsonify({'message': 'Username or password are incorrect!'})
response.status_code = 404
return response
@routes_users.route('/setpp', methods=['POST'])
def set_pp():
from api import db
token = request.headers['Authorization'].split(" ")[1]
validate_token(token, display=False)
user = decode(token, key=getenv('SECRET'), algorithms=['HS256'])
db.users.update_one(
{'userName': user['userName']},
{'$set': {'pp': request.json['pp']}}
)
return jsonify({'message': 'New profile picture set succesfully!'})
@routes_users.route('/verifytoken', methods=['GET'])
def verify():
token = request.headers['Authorization'].split(" ")[1]
return validate_token(token, display=True)
| CesarMtzV/Hajime-Web | server/api/routes/users.py | users.py | py | 4,036 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "api.db.users.find_one",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "api.db.users",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "api.db",
"... |
43328312480 | from django.urls import path, re_path
from .views import MainPageView, ArticlePageView, CreateArticle, redirect_m
urlpatterns = [
path('news/', MainPageView.as_view()),
path('', redirect_m),
re_path('news/create/', CreateArticle.as_view()),
re_path('news/(?P<number_of_link>[^/]*)/?', ArticlePageView.as_view()),
]
# (?P<candy_name>[^/]*)/?
# <int:number_of_article>
| maksimkamekspeks/Jet-Brains | HyperNews Portal/task/news/urls.py | urls.py | py | 384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.MainPageView.as_view",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.MainPageView",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.ur... |
17453371949 | import os
import json
from subslide import Cutter
json_path = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_gl/trainval_test_slide.json'
file_dir = '/media/ldy/7E1CA94545711AE6/OSCC/coarse-key/orig_data/'
mask_dir = '/media/ldy/7E1CA94545711AE6/OSCC/coarse-key/seg/filtered_mask/'
anno_dir = '/media/ldy/7E1CA94545711AE6/OSCC/coarse-key/seg/label_mask/'
save_dir = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_seg/subslide/train/'
target_dir = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_seg/subslide/target_train/'
label_map = dict(bgd=0,
normal=1,
mucosa=2,
tumor=3)
# storage_type = 'png'
with open(json_path, 'r') as f:
slide_info = json.load(f)
# run_file = os.listdir('/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_seg/subslide/target_train/')
slide_list = slide_info['train']
# slide_list = os.listdir('/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_seg/subslide/train/')
# print(len(slide_list))
# print(len(run_file))
# slide_list = [c for c in slide_list if not c in run_file]
# slide_list = ['_20190719181501', '_20190718200940', '_20190403083921', '_20190403101949']
print(slide_list)
cutter = Cutter(slide_list,
file_dir,
mask_dir,
anno_dir,
save_dir,
target_dir,
label_map,
storage_type)
patch_size = 14000
level = 0
overlap = 2000
filter_rate = 0.1
rows_per_iter = 1
resize_factor = 2
cutter.sample_and_store_patches(patch_size=patch_size,
level=level,
overlap=overlap,
filter_rate=filter_rate,
rows_per_iter=rows_per_iter)
| yida2311/OSCC_SF | subslide/test.py | test.py | py | 1,777 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subslide.Cutter",
"line_number": 29,
"usage_type": "call"
}
] |
21009255890 | from collections import OrderedDict
import hydra
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
class Encoder(nn.Module):
def __init__(self, obs_shape):
super().__init__()
assert len(obs_shape) == 3
self.repr_dim = 32 * 35 * 35
self.convnet = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU())
self.apply(utils.weight_init)
def forward(self, obs):
obs = obs / 255.0 - 0.5
h = self.convnet(obs)
h = h.view(h.shape[0], -1)
return h
class Sarsa:
""" Sarsa算法 for skill"""
def __init__(self, name,
reward_free,
obs_type,
obs_shape,
action_shape,
device,
lr,
feature_dim,
hidden_dim,
critic_target_tau,
num_expl_steps,
update_every_steps,
stddev_schedule,
nstep,
batch_size,
stddev_clip,
init_critic,
use_tb,
use_wandb,
ncol, nrow, epsilon, alpha, gamma, n_action=4,meta_dim=0, **kwargs):
self.Q_table = np.zeros([nrow * ncol, meta_dim, n_action]) # 初始化Q(s,z,a)表格
self.n_action = n_action # 动作个数
self.alpha = alpha # 学习率
self.gamma = gamma # 折扣因子
self.epsilon = epsilon # epsilon-贪婪策略中的参数
self.reward_free = reward_free
self.obs_type = obs_type
self.obs_shape = obs_shape
self.action_dim = action_shape[0]
self.hidden_dim = hidden_dim
self.lr = lr
self.device = device
self.critic_target_tau = critic_target_tau
self.update_every_steps = update_every_steps
self.use_tb = use_tb
self.use_wandb = use_wandb
self.num_expl_steps = num_expl_steps
self.stddev_schedule = stddev_schedule
self.stddev_clip = stddev_clip
self.init_critic = init_critic
self.feature_dim = feature_dim
self.solved_meta = None
self.nstep = nstep
# models
if obs_type == 'pixels':
self.aug = utils.RandomShiftsAug(pad=4)
self.encoder = Encoder(obs_shape).to(device)
self.obs_dim = self.encoder.repr_dim + meta_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = obs_shape[0] + meta_dim
if obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(),
lr=lr)
else:
self.encoder_opt = None
def aug_and_encode(self, obs):
obs = self.aug(obs)
return self.encoder(obs)
def train(self):
...
def act(self, state, skill): # 选取下一步的操作,具体实现为epsilon-贪婪
if torch.is_tensor(skill):
skill_num = torch.argmax(skill, dim=1).cpu().numpy()
else:
skill_num = np.argmax(skill)
if np.random.random() < self.epsilon:
action = np.random.randint(self.n_action, size=state.shape[0])
else:
action = np.argmax(self.Q_table[state, skill_num], axis=-1)
return action
def best_action(self, state, skill): # 用于打印策略
skill_num = torch.argmax(skill, dim=1).item()
Q_max = np.max(self.Q_table[state, skill_num])
a = [0 for _ in range(self.n_action)]
for i in range(self.n_action): # 若两个动作的价值一样,都会记录下来
if self.Q_table[state, i] == Q_max:
a[i] = 1
return a
def _update(self, s0, a0, r, s1, a1):
td_error = r + self.gamma * self.Q_table[s1, a1] - self.Q_table[s0, a0]
self.Q_table[s0, a0] += self.alpha * td_error
| Rooshy-yang/Four_Room_For_Exploartion | agent/sarsa.py | sarsa.py | py | 4,277 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
27187115333 | import array as arr
import pandas as pd
import numpy as np
import random
from run_dclamp_simulation import run_ind_dclamp
from cell_recording import ExperimentalAPSet
from multiprocessing import Pool
from scipy.stats import lognorm
from algorithms import eaMuCommaLambda
from deap import base
from deap import creator
from deap import tools
indir = '/home/drew/projects/iPSC_EA_Fitting_Sep2021/cell_2/EA_fit_092421/EA_output/run_1_092921/'
filename = indir + 'pop_final_093021_020021_mini.txt'
pop_params = pd.read_csv(filename, delimiter=' ')
filename = indir + 'pop_strategy_093021_020021_mini.txt'
pop_strategy = pd.read_csv(filename, delimiter=' ')
filename = indir + 'pop_fitness_093021_020021_mini.txt'
pop_fitness = pd.read_csv(filename, delimiter=' ')
filename = indir + 'hof_093021_020021_mini.txt'
hof_params = pd.read_csv(filename, delimiter=' ')
filename = indir + 'hof_fitness_093021_020021_mini.txt'
hof_fitness = pd.read_csv(filename, delimiter=' ')
pop_ = (pop_params, pop_strategy, pop_fitness)
hof_ = (hof_params, hof_fitness)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", arr.array, typecode="d",
fitness=creator.FitnessMin, strategy=None)
creator.create("Strategy", arr.array, typecode="d")
def rstrtES(ind_clss, strategy_clss, fit_clss, data):
"""This function constructs an individual from a prior EA population."""
# Pass parameter to individual class
ind = ind_clss(data[0])
ind.strategy = strategy_clss(data[1])
ind.fitness = fit_clss(data[2])
return ind
def rstrtHOF(hof, ind_clss, fit_clss, data):
"""This function constructs a HallOfFame from a prior EA optimization."""
if (data[0].shape[0] == len(data[1])):
for i in range(data[0].shape[0]):
ind = ind_clss(data[0].iloc[i, :])
ind.fitness = fit_clss(data[1].iloc[i])
hof.insert(ind)
return hof
else:
print('\tHallofFame did not load successfully.')
return(hof)
def initRstrtPop(container, rstInd, pop_data):
pop = []
N = pop_data[0].shape[0]
for i in range(N):
ind_data = (list(pop_data[0].iloc[0, :]), list(pop_data[1].iloc[0, :]),
tuple(pop_data[2].iloc[0, :]))
pop.append(rstInd(data=ind_data))
return container(pop)
def fitness(ind, ExperAPSet):
model_APSet = run_ind_dclamp(ind, dc_ik1=ExperAPSet.dc_ik1, printIND=False)
rmsd_total = (sum(ExperAPSet.score(model_APSet).values()),)
return rmsd_total
def mutateES(ind, indpb=0.3):
for i in range(len(ind)):
if (indpb > random.random()):
# Mutate
ind[i] *= lognorm.rvs(s=ind.strategy[i], size=1)
ind.strategy[i] *= lognorm.rvs(s=ind.strategy[i], size=1)
# Check that Phi is
if (ind[0] > 1.0):
# Reset
ind[0] = random.random()
ind.strategy[0] = random.random()
return ind,
def cxESBlend(ind1, ind2, alpha):
for i, (x1, s1, x2, s2) in enumerate(zip(ind1, ind1.strategy,
ind2, ind2.strategy)):
# Blend the values
gamma = 1.0 - random.random() * alpha
ind1[i] = gamma * x1 + (1.0 - gamma) * x2
ind2[i] = gamma * x2 + (1.0 - gamma) * x1
# Blend the strategies
gamma = 1.0 - random.random() * alpha
ind1.strategy[i] = (1. - gamma) * s1 + gamma * s2
ind2.strategy[i] = gamma * s1 + (1. - gamma) * s2
return ind1, ind2
# Load in experimental AP set
# Cell 2 recorded 12/24/20 Ishihara dynamic-clamp 1.0 pA/pF
path_to_aps = '/home/drew/projects/iPSC_EA_Fitting_Sep2021/cell_2/AP_set'
cell_2 = ExperimentalAPSet(path=path_to_aps, file_prefix='cell_2_',
file_suffix='_SAP.txt', cell_id=2, dc_ik1=1.0)
toolbox = base.Toolbox()
toolbox.register("individual", rstrtES, creator.Individual, creator.Strategy,
creator.FitnessMin, data=None)
toolbox.register("population", initRstrtPop, list, toolbox.individual, pop_)
pop = toolbox.population()
NGEN = 2
MU = len(pop)
LAMBDA = 2 * MU
NHOF = int((0.1) * LAMBDA * NGEN)
hof = rstrtHOF(tools.HallOfFame(NHOF), creator.Individual, creator.FitnessMin, hof_)
# These functions allow the population to evolve.
toolbox.register("mate", cxESBlend, alpha=0.3)
toolbox.register("mutate", mutateES)
# Selection
toolbox.register("evaluate", fitness, ExperAPSet=cell_2)
toolbox.register("select", tools.selTournament, tournsize=3)
# Register some statistical functions to the toolbox.
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# To speed things up with multi-threading
p = Pool()
toolbox.register("map", p.map)
print('(mu,lambda): ('+str(MU)+','+str(LAMBDA)+')')
pop, logbook = eaMuCommaLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
cxpb=0.6, mutpb=0.3, ngen=NGEN, stats=stats,
halloffame=hof, verbose=False, writeGENS=True)
| dtilley/EA_fit_to_AP_set | scratch.py | scratch.py | py | 5,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
72748742823 | import pygame
from src import constants
class Button:
def __init__(self, text, size, position, action):
self.action = action
self.rect = pygame.Rect((0, 0), size)
self.rect.center = position
self.idle_image = pygame.Surface(size, pygame.SRCALPHA)
self.hover_image = self.idle_image.copy()
self.image_rect = self.idle_image.get_rect()
self.radius = 7
pygame.draw.rect(
self.idle_image,
constants.BUTTON_BACKGROUND_COLOR,
self.image_rect,
border_radius=self.radius
)
pygame.draw.rect(
self.idle_image,
constants.BUTTON_OUTLINE_COLOR,
self.image_rect,
1,
border_radius=self.radius
)
pygame.draw.rect(
self.hover_image,
constants.BUTTON_BACKGROUND_COLOR_HOVER,
self.image_rect,
border_radius=self.radius
)
pygame.draw.rect(
self.hover_image,
constants.BUTTON_OUTLINE_COLOR,
self.image_rect,
1,
border_radius=self.radius
)
self.image = self.idle_image
self.mask = pygame.mask.from_surface(self.image)
self.font = pygame.freetype.Font(
constants.BUTTON_FONT_PATH,
constants.BUTTON_FONT_SIZE
)
self.font.pad = True
self.font.fgcolor = constants.BUTTON_FONT_COLOR
self.text_rect = pygame.Rect(0, 0, 0, 0)
self.update_text(text)
def update_text(self, text):
# First erase the previous text:
pygame.draw.rect(
self.idle_image,
constants.BUTTON_BACKGROUND_COLOR,
self.text_rect
)
pygame.draw.rect(
self.hover_image,
constants.BUTTON_BACKGROUND_COLOR_HOVER,
self.text_rect
)
text_surf, self.text_rect = self.font.render(text)
self.text_rect.center = self.image_rect.center
self.idle_image.blit(text_surf, self.text_rect)
self.hover_image.blit(text_surf, self.text_rect)
def collidepoint(self, x, y):
if self.rect.collidepoint(x, y):
# Only collide if the point is inside the rounded corners:
if self.mask.get_at((x - self.rect.x, y - self.rect.y)):
self.image = self.hover_image
return True
self.image = self.idle_image
return False
| Farbfetzen/dimetric | src/button.py | button.py | py | 2,487 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.Rect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
... |
7477122277 | import urllib.request
import time
from lxml import etree
from selenium import webdriver
from utils.Utils import url_request
import chardet
# 资源解析类
# 百度搜索解析
# return: dic_source{}
def baidu_search(word='', pagesNum=3, sleep=2):
baidu_dic_source = {} # 百度元素资源
driver = webdriver.Chrome()
# driver = webdriver.Firefox()#这里是火狐的浏览器运行方法
driver.get('http://www.baidu.com')
# 选择网页元素
element_keyword = driver.find_element_by_id('kw')
# 输入字符
element_keyword.send_keys(word)
# 找到搜索按钮
element_search_button = driver.find_element_by_id('su')
element_search_button.submit()
num = 0
# 加载资源
for j in range(1, pagesNum + 1):
time.sleep(sleep)
root = etree.HTML(driver.page_source)
source_list = root.xpath('//div[@class="result c-container "]/h3')
for i, item in enumerate(source_list):
num += 1
title = item.xpath('string(.)')
url = item.xpath('./a/@href')[0]
print("百度数据检索:#{}{}--{}".format(num, title, url))
baidu_dic_source[title] = url
print("----------------------------------------------------------")
driver.find_element_by_xpath("//a[contains(text(),'下一页')]").click()
print("百度检索数据量:", len(baidu_dic_source))
return baidu_dic_source
# 下书网解析
# retrun: download_source[]
def xiashu_resolve(dict):
# print(source) # 打印网页源代码
print(list(dict.keys())[0])
source = list(dict.values())[0]
root = etree.HTML(source)
source_list = root.xpath('//*[contains(text(),"下载")]')
# 解析资源
print('开始解析资源')
download_id = ""
download_url = ""
for i, item in enumerate(source_list):
print("匹配元素:{}--{}".format(i, item.xpath("string(.)")))
download_id = item.xpath('./@href')
if (len(download_id)):
print("检索:", download_id)
download_url = "https://www.xiashu.cc{}{}".format(download_id[0], "down")
break
page_source = url_request(download_url)
root = etree.HTML(list(page_source.values())[0])
source_list = root.xpath("//div[@id='downlist']//*[contains(@href,'" + download_id[0] + "')]")
download_source = []
for i, item in enumerate(source_list):
print("下载元素:{}--{}--{}".format(i, item.xpath("string(.)"), item.xpath("./@href")))
download_source.append("https://www.xiashu.cc{}".format(item.xpath("./@href")[0]))
print("下载资源解析结束,量:", len(download_source))
return download_source
# 通用测试解析
def universal_resolve(dict, url='',
xpath='//a[contains(text(),"下载") and @href and not(contains(@href,"game") or contains(@href,"apk") or contains(@href,"app"))]'):
print(list(dict.keys())[0])
universal_result = {}
# print(source) # 打印网页源代码
source = list(dict.values())[0]
root = etree.HTML(source)
source_list = root.xpath(xpath)
# 解析资源
print('开始解析资源-----------------------------------------------------------------------')
download_id = ""
download_url = ""
for i, item in enumerate(source_list):
download_id = item.xpath('./@href')[0]
cent_str = item.xpath("string(.)")
print("download_id:", download_id)
if "/" in download_id:
if 'http' not in download_id and 'www.' not in download_id:
url = '{}{}'.format(list(dict.keys())[0], download_id)
else:
url = download_id
url = url.replace("//", "/").replace(":", ":/")
print("匹配元素:{}--{}--{}".format(i, cent_str, url))
universal_result[cent_str] = url
return universal_result
# if (len(download_id)):
# print("检索:", download_id)
# page_source = url_request(download_url)
# root = etree.HTML(page_source)
# source_list = root.xpath("//div[@id='downlist']//*[contains(@href,'" + download_id[0] + "')]")
# download_source = []
# for i, item in enumerate(source_list):
# print("下载元素:{}--{}--{}".format(i, item.xpath("string(.)"), item.xpath("./@href")))
# download_source.append("https://www.xiashu.cc{}".format(item.xpath("./@href")[0]))
# print("下载资源解析结束,量:", len(download_source))
# return download_source
| AnubisASN/IVA_C | utils/Parsing.py | Parsing.py | py | 4,485 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HT... |
17883239165 | """
该模块用于承载数据管理的功能,即工作空间。
``DataManager`` 类,其实例就是工作空间,提供了包括添加变量、删除变量、历史记录回溯、历史记录访问量等内容。
该模块在设计时考虑了历史记录功能,不过该功能是否有必要,还有待商榷。
该模块的操作对象为 ``DataAdapter``。
将数据外面包上一层可以确保其元数据的识别等便利性。
关于 ``DataAdapter`` 的详细内容请参见相关文档。
"""
from collections import OrderedDict
from typing import Dict, Tuple, List, Any
from lib.workspace.data_adapter import BaseAdapter
from lib.workspace.data_adapter import Detector
from .signals import workspace_data_created, workspace_data_deleted, workspace_data_changed
class DataManager(object):
"""
数据管理类。
应当注意的时,数据管理的对象,是 ``DataAdapter`` 而不是原生的数据。
如果需要写入一个原生数据,可以采用 ``set_raw_data`` 方法。
值得一提的是,最初的设想,通过 ``__setitem__`` 写入原生数据,然后通过 ``__getitem__`` 读出数据适配器。
但是这样相当于与字典的功能发生了较大的差异,一个 python 用户的习惯应该是写入什么就取出什么,这不符合 python 用户的习惯。
因此,采用了一个独立的方法 ``set_raw_data`` 用于写入原生数据并自动进行识别。
"""
def __init__(self):
# TODO 将历史记录的上限和回收站的上限作为对象初始化的参数进行传值
# Container数据结构中管理器的主要内容。
# 其键为变量名,值为两个列表构成的元组,共同用来表示历史记录。
# 变量的历史记录和大多数程序一致,都是采用单线式的历史记录管理策略,如下所示:
# 当前记录:[1,2,3,4,5,6,7],[]
# 撤销一次:[1,2,3,4,5,6],[7]
# 撤销一次:[1,2,3,4,5],[6,7]
# 重做一次:[1,2,3,4,5,6],[7]
# 写入一次:[1,2,3,4,5,6,8],[] # 写入时删除重做列表
self.container: Dict[str, Tuple[List[BaseAdapter], List[BaseAdapter]]] = dict()
# RecycleBin用于存储用户明确删除的变量,其基本工作流程的伪代码如下所示:
# 当前空间:container={a,b,c}, recycle_bin={}
# 删除a: container={b,c}, recycle_bin={a}
# 删除b: container={c}, recycle_bin={a,b}
# 恢复a: container={a,c}, recycle_bin={b}
self.recycle_bin = OrderedDict()
# 数据适配器自动识别类
self.detector = Detector()
self.detector.init_builtin_adapters()
def __getitem__(self, key: str) -> BaseAdapter:
"""
从工作空间读取变量。
Args:
key: 变量名
Returns:
BaseAdapter: 变量值的Adapter,不是原始值
"""
current, future = self.container[key]
current or self.__raise_key_error(key)
return current[-1]
def __setitem__(self, key: str, value: BaseAdapter):
"""
将变量写入工作空间
Args:
key: 变量名
value: 变量值,应该是 ``BaseAdapter`` 的子类。
"""
created = False # 用于记录本次操作是新建了一个变量还是修改了一个变量
assert isinstance(value, BaseAdapter)
# 首先确保工作空间中有该变量的历史记录容器
if key not in self.container: # 如果工作空间中没有该变量
created = True
if key not in self.recycle_bin: # 回收站中也没有,新建该变量的历史记录
self.container[key] = ([], [])
else: # 从回站中恢复
self.container[key] = self.recycle_bin[key]
del self.recycle_bin[key]
# 处理历史记录相关内容
current, future = self.container[key]
if future:
future.clear()
if not current:
created = True
current.append(value)
if len(current) > 15: # 对每个变量的最多保存的历史记录数量
current.pop(0)
if created:
workspace_data_created.send(self, key=key)
else:
workspace_data_changed.send(self, key=key)
def __delitem__(self, key: str):
"""
在工作空间中删去一个变量。
Args:
key: 需要删去的变量名。
"""
# TODO (panhaoyu) 这里实际应当进行当前工作空间的变量和回收站中的变量的合并,此处时间原因先采用直接替换的方式
key in self.container or self.__raise_key_error(key)
self.recycle_bin[key] = self.container[key]
del self.container[key]
workspace_data_deleted.send(self, key=key)
def __contains__(self, item: str):
"""检查工作空间中是否已存在某个变量"""
return item in self.container
def __iter__(self):
yield from self.container
def set_raw_data(self, key: str, value: Any):
"""将一个原生变量写入数据管理器。
Args:
key: 变量名。
value: 原生变量。
"""
self[key] = self.detector.detect(value)
def back(self, key: str) -> bool:
"""
将变量撤回到前一个历史记录点。
Args:
key: 变量名
Returns:
bool: 变量是否撤销成功
"""
key in self.container or self.__raise_key_error(key)
current, future = self.container[key]
if len(current) < 2:
return False
future.insert(0, current.pop())
return True
def forward(self, key: str) -> bool:
"""
重做变量,即使得变量前进一个历史记录点。
Args:
key: 变量名
Returns:
变量是否重做成功
"""
key in self.container or self.__raise_key_error(key)
current, future = self.container[key]
if not future:
return False
current.append(future.pop(0))
if len(current) > 15:
current.pop(0)
return True
def restore_from_recycle_bin(self, key: str):
"""
从回收站中恢复一个变量。
这将覆盖工作空间中的同名变量!需要弹窗警告!
这个方法的名字很长,就是为了防止与“从历史记录中前移一位”功能相混淆。
Args:
key: 变量名
"""
key in self.recycle_bin or self.__raise_key_error(key, '回收站')
self.container[key] = self.recycle_bin[key]
workspace_data_created.send(key=key)
del self.recycle_bin[key]
def __raise_key_error(self, key: str, position='工作空间'):
raise KeyError(f'{position}未定义变量:{key}')
def keys(self) -> List[str]:
"""
将工作空间内的名字作为一个列表返回。
每次都返回一个新列表。
Returns:
变量名的列表。
"""
return list(self.container.keys())
def values(self) -> List[BaseAdapter]:
"""
将工作空间内的值作为一个列表返回。
每次都返回一个新列表。
Returns:
变量值的列表。
"""
return [current[-1] for current, future in self.container.values()]
def items(self) -> List[Tuple[str, BaseAdapter]]:
"""
将工作空间的键值对作为一个列表返回。
每次都返回一个新列表。
Returns:
工作空间的数据的键值对
"""
return [(key, history[0][-1]) for key, history in self.container.items()]
# 请不要直接使用此变量!
# 目前已知的用法仅有两处,一个是在 workspace_old ,一个是在 extension_lib 。
data_manager = DataManager()
| pyminer/pyminer | pyminer/lib/workspace/data_manager.py | data_manager.py | py | 8,124 | python | zh | code | 77 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "lib.workspace.data_adapter.BaseA... |
15441184700 | import numpy as np
import torch
def squash(x,
squashing_constant=1.,
dim=-1,
eps=1e-7,
safe=True,
p=2,
**kwargs):
if safe:
squared_norm = torch.sum(torch.square(x), axis=dim, keepdim=True)
safe_norm = torch.sqrt(squared_norm + eps)
squash_factor = squared_norm / (squashing_constant + squared_norm)
unit_vector = x / safe_norm
return squash_factor * unit_vector
else:
norm = x.norm(dim=dim, keepdim=True, p=p)
norm_squared = norm * norm
return (x / norm) * (norm_squared / (squashing_constant + norm_squared))
def smsquash(x, caps_dim=1, atoms_dim=2, eps=1e-7, **kwargs):
"""Softmax Squash (Poiret, et al., 2021)
Novel squash function. It rescales caps 2-norms to a probability
distribution over predicted output classes."""
squared_norm = torch.sum(torch.square(x), axis=atoms_dim, keepdim=True)
safe_norm = torch.sqrt(squared_norm + eps)
a = torch.exp(safe_norm) / torch.sum(torch.exp(safe_norm), axis=caps_dim)
b = x / safe_norm
return a * b
def safe_length(x, dim=2, keepdim=False, eps=1e-7):
squared_norm = torch.sum(torch.square(x), axis=dim, keepdim=keepdim)
return torch.sqrt(squared_norm + eps)
def calc_same_padding(
input_,
kernel=1,
stride=1,
dilation=1,
transposed=False,
):
if transposed:
return (dilation * (kernel - 1) + 1) // 2 - 1, input_ // (1. / stride)
else:
return (dilation * (kernel - 1) + 1) // 2, input_ // stride
def get_number_of_learnable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
def number_of_features_per_level(init_channel_number, num_levels):
return [init_channel_number * 2**k for k in range(num_levels)]
def expand_as_one_hot(input, C, ignore_index=None):
"""
Converts NxDxHxW label image to NxCxDxHxW, where each label gets converted to its corresponding one-hot vector
:param input: 4D input image (NxDxHxW)
:param C: number of channels/labels
:param ignore_index: ignore index to be kept during the expansion
:return: 5D output image (NxCxDxHxW)
"""
assert input.dim() == 4
# expand the input tensor to Nx1xDxHxW before scattering
input = input.unsqueeze(1)
# create result tensor shape (NxCxDxHxW)
shape = list(input.size())
shape[1] = C
if ignore_index is not None:
# create ignore_index mask for the result
mask = input.expand(shape) == ignore_index
# clone the src tensor and zero out ignore_index in the input
input = input.clone()
input[input == ignore_index] = 0
# scatter to get the one-hot tensor
result = torch.zeros(shape).to(input.device).scatter_(1, input, 1)
# bring back the ignore_index in the result
result[mask] = ignore_index
return result
else:
# scatter to get the one-hot tensor
return torch.zeros(shape).to(input.device).scatter_(1, input, 1)
| clementpoiret/bagginghsf | bagginghsf/models/helpers.py | helpers.py | py | 3,122 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.sum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.square",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 29,
... |
41165140913 | # -*- coding: utf-8 -*-
'''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Apr 30, 2013
@author: Stefan Guna
'''
from datetime import date
from django.db.models.aggregates import Sum
from django.db.models.query_utils import Q
from habitam.downloads.common import signatures, habitam_brand, MARGIN
from habitam.entities.models import ApartmentConsumption, ServiceConsumption
from habitam.financial.models import Quota
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.pagesizes import A4, cm, landscape
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
from reportlab.platypus.flowables import PageBreak, Spacer
from reportlab.platypus.paragraph import Paragraph
import logging
import tempfile
logger = logging.getLogger(__name__)
__HEIGHT__ = A4[0]
__WIDTH__ = A4[1]
__FONT_SIZE__ = 9
def __add_amounts(breakdown, service_info, service, op_docs):
sname = service.__unicode__()
si = service_info[sname]
si['amount'] = 0
for op_doc in op_docs:
for op in op_doc.operation_set.all():
tmp = breakdown[op.dest.name][sname]['amount']
breakdown[op.dest.name][sname]['amount'] = tmp + op.amount
si['amount'] = si['amount'] + op.amount
def __add_consumption(breakdown, service_info, service, op_docs):
sname = service.__unicode__()
if not service.quota_type == 'consumption':
return
si = service_info[sname]
si['consumed_declared'] = 0
for op_doc in op_docs:
for ac in ApartmentConsumption.objects.filter(doc=op_doc):
apname = ac.apartment.__unicode__()
tmp = breakdown[apname][sname]['consumed']
breakdown[apname][sname]['consumed'] = tmp + ac.consumed
tmp = si['consumed_declared']
si['consumed_declared'] = tmp + ac.consumed
q = ServiceConsumption.objects.filter(doc=op_doc, service=service)
tmp = q.aggregate(Sum('consumed'))
si['consumed_invoiced'] = tmp['consumed__sum']
if si['consumed_invoiced'] == None:
si['consumed_invoiced'] = 0
si['consumed_loss'] = si['consumed_invoiced'] - si['consumed_declared']
if si['amount'] == None:
si['amount'] = 0
si['price_per_unit'] = si['amount'] / si['consumed_declared']
def __add_quotas(billed, service):
if service.quota_type not in ['equally', 'inhabitance', 'area', 'rooms',
'manual']:
return
sname = service.__unicode__()
quotas = Quota.objects.filter(src=service.account)
for quota in quotas:
billed[quota.dest.name][sname]['quota'] = quota.ratio
def __list_format(canvas, doc):
canvas.saveState()
building_style = ParagraphStyle(name='building_title',
fontSize=__FONT_SIZE__)
t = u'%s<br/>Data afișării: %s<br/>Luna: %s' % (doc.habitam_building.name,
doc.habitam_display,
doc.habitam_month)
p = Paragraph(t, building_style)
p.wrapOn(canvas, 5 * cm, 2 * cm)
p.drawOn(canvas, .5 * cm, __HEIGHT__ - 1.7 * cm)
habitam_brand(canvas, __WIDTH__, __HEIGHT__)
canvas.restoreState()
def download_display_list(building, begin_ts, end_ts):
services = building.services()
breakdown = {}
service_info = {}
balance = {}
penalties_exclude = Q(dest=building.penalties_account)
for ap in building.apartments():
apname = ap.__unicode__()
breakdown[apname] = {}
balance[apname] = {}
a = breakdown[apname]
b = balance[apname]
b['penalties'] = {}
b['penalties']['at_begin'] = ap.penalties(begin_ts)
b['penalties']['at_end'] = ap.penalties(end_ts)
b['balance'] = {}
b['balance']['at_begin'] = ap.account.balance(begin_ts, penalties_exclude)
b['balance']['at_end'] = ap.account.balance(end_ts, penalties_exclude)
for service in services:
if service.archived:
continue
sname = service.__unicode__()
a[sname] = {}
a[sname]['amount'] = 0
if service.quota_type == 'consumption':
a[sname]['consumed'] = 0
for service in services:
if service.archived:
continue
sname = service.__unicode__()
service_info[sname] = {}
op_docs = service.account.operation_list(begin_ts, end_ts)
__add_amounts(breakdown, service_info, service, op_docs)
__add_consumption(breakdown, service_info, service, op_docs)
__add_quotas(breakdown, service)
staircase_breakdown = {}
staircase_balance = {}
for sc in building.apartment_groups():
if sc == building:
continue
scname = sc.__unicode__()
staircase_breakdown[scname] = {}
staircase_balance[scname] = {}
for ap in sc.apartments():
apname = ap.__unicode__()
staircase_breakdown[scname][apname] = breakdown[apname]
staircase_balance[scname][apname] = balance[apname]
temp = tempfile.NamedTemporaryFile()
__to_pdf(temp, breakdown, building, begin_ts, end_ts)
# TODO (Stefan) this file should be persisted and downloaded on subsequent calls
return temp
def __to_pdf(tempFile, breakdown, building, begin_ts, end_ts):
doc = SimpleDocTemplate(tempFile, pagesize=landscape(A4), leftMargin=MARGIN,
rightMargin=MARGIN, topMargin=MARGIN,
bottomMargin=MARGIN,
title=u'Lista de întreținere %s' % building.name,
author='www.habitam.ro')
flowables = []
sc_title_style = ParagraphStyle(name='staircase_title', alignment=TA_CENTER)
for sc in building.apartment_groups():
if sc == building:
continue
sc_title = Paragraph(u'Lista de întreținere scara %s' %
sc.name, sc_title_style)
data = __list_data(sc, breakdown, building.services())
table = Table(data, repeatRows=1)
table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('VALIGN', (0, 0), (0, -1), 'TOP'),
('FONTSIZE', (0, 0), (-1, -1), __FONT_SIZE__),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
]))
flowables.extend([Spacer(1, .5 * cm), sc_title,
Spacer(1, cm), table,
Spacer(1, .5 * cm), signatures(__FONT_SIZE__),
PageBreak()])
doc.habitam_building = building
doc.habitam_month = begin_ts.strftime('%B %Y')
doc.habitam_display = date.today().strftime('%d %B %Y')
doc.build(flowables, onFirstPage=__list_format, onLaterPages=__list_format)
def __list_data(ap_group, d_billed, building_services):
data = []
header = []
header.append('Apartament')
header.append('Numele')
header.append('Nr Pers')
totalRow = []
totalRow.append('')
totalRow.append(u'Total Scară')
totalRow.append('')
totalDict = {}
data.append(header)
firstTime = True
for ap in ap_group.apartments():
apname = ap.__unicode__()
ownername = ap.owner.name
inhabitance = ap.inhabitance
row = []
row.append(apname)
row.append(ownername)
row.append(inhabitance)
total_amount = 0
for service in building_services:
sname = service.__unicode__()
if firstTime is True:
header.append(sname + ' cost')
totalDict[sname] = 0
total_amount = total_amount + d_billed[apname][sname]['amount']
row.append(str(d_billed[apname][sname]['amount']))
# calculate total cost for a service/staircase
totalDict[sname] = totalDict[sname] + d_billed[apname][sname]['amount']
if firstTime is True:
if service.quota_type == 'consumption':
header.append(sname + ' consum')
consValue = '-'
logger.info('[toData] - ap=%s service=%s cost=%s' % (apname, sname, d_billed[apname][sname]['amount']))
if service.quota_type == 'consumption':
consValue = str(d_billed[apname][sname]['consumed'])
row.append(consValue)
if firstTime is True:
header.append('Total')
row.append(total_amount)
data.append(row)
firstTime = False
for service in building_services:
sname = service.__unicode__()
totalRow.append(totalDict[sname])
if service.quota_type == 'consumption':
totalRow.append('')
data.append(totalRow)
return data
| habitam/habitam-core | habitam/downloads/display_list.py | display_list.py | py | 9,863 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 43,
"usage_type": "name"
},
{
"api_name"... |
72177085225 | #coding = utf-8
import requests
def GET_Token_And_C_Header(URL): #获取token并返回header
id = input("请输入学号:")
password = input("请输入密码:")
url = URL
data={
"student_id": id,
"password": password
}
res = requests.post(url,data).json()
token = res["data"]["token"]
token = "Bearer "+token
header={
"Authorization": token
}
print("登录成功!")
return header
def C_Game(URL,header,attribute): #创建房间,并返回uuid
pro_res = requests.post(URL, headers=header,data=attribute).json()
print("创建对局成功!")
return pro_res["data"]["uuid"]
def Join_Game(URL,uuid,header): #加入房间
URL=URL+"/"+uuid
print("加入对局成功")
return requests.post(URL, headers=header).json()
def Player_operation(URL,uuid,header,operation): #玩家操作
URL=URL+"/"+uuid
print("玩家操作成功!")
return requests.put(URL, data=operation, headers=header).json()
def Get_Previous_operation(URL,uuid,header): #返回上步操作
URL=URL+"/"+uuid+"/last"
print("获取上步操作成功!")
return requests.get(URL,headers=header).json()
def Get_Match_info(URL,uuid,header): #获取对局信息
URL=URL+"/"+uuid
print("获取对局信息成功!")
return requests.get(URL,headers=header).json()
def Get_Match_list(URL,header,data): #获取对局列表
URL=URL+"/index"
print("获取对局列表成功!")
return requests.get(URL,params=data, headers=header).json()
# '''
# 登录接口
# '''
# url1="http://172.17.173.97:8080/api/user/login"
# header = GET_Token_And_C_Header(url1)
# print(header["Authorization"])
# '''
# 创建对局
# '''
# url2="http://172.17.173.97:9000/api/game"
# attribute={
# "pravite":False
# }
# uuid=C_Game(url2,header,attribute)
# print(uuid)
# print(Join_Game(url2,uuid,header))
# '''
# 获取上步操作
# '''
# res3=Get_Previous_operation(url2,uuid,header)
# print(res3)
# print(len(res3["data"]["last_code"]))
# '''
# 执行玩家操作
# '''
# operation1={
# #摸牌
# "type":0
# }
# operation2={
# #出牌
# "type":1,
# "card":"SQ"
# }
#
# res2=Player_operation(url2,uuid,header,operation1)
# print(res2)
#
# '''
# 获取对局信息
# '''
# res4=Get_Match_info(url2,uuid,header)
# print(res4)
# '''
# 获取对局列表
# '''
# data_ye={
# "page_size":3,
# "page_num":2
#
# }
# res4=Get_Match_list(url2,header,data_ye)
# print(res4)
| czl411/Pair-Programming | 联机游戏/online.py | online.py | py | 2,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_nu... |
40502817176 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from .models import Athlete
from random import shuffle, randrange
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, 'draw/index.html')
def submit(request):
name = request.POST.get('name')
group_id = int(request.POST.get('group'))
user = Athlete.objects.filter(name=name)
if name is '':
return render(request, 'draw/info.html', {'text': '姓名不能为空!'})
if user.exists():
return render(request, 'draw/info.html', {'text': '该名字已经存在!'})
else:
if group_id is 1:
num = 32
group_name = 'A'
elif group_id is 2:
num = 64
group_name = 'B'
elif group_id is 3:
num = 96
group_name = 'C'
elif group_id is 4:
num = 128
group_name = 'D'
else:
return render(request, 'draw/info.html', {'text': '组别不能为空!'})
users = Athlete.objects.filter(name='', number__lte=num, number__gte=num-31)
if len(users) is 0:
return render(request, 'draw/info.html', {'text': group_name + '组已满,请选择其他组别!'})
user = users[randrange(0, len(users))]
user.name = name
user.group = group_id
user.save()
return render(request, 'draw/info.html', {'text': '您的编号是:' + str(user.number) + '\n' + '您的组别是:' + group_name})
def init(request):
array = []
for i in range(1, 129):
array.append(i)
shuffle(array)
for i in array:
user = Athlete(number=i)
user.save()
return HttpResponse('初始化完毕。')
| TJCA/ballot | draw/views.py | views.py | py | 1,763 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Athlete.objects.filter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Athlete.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
... |
21432524861 | import boto3
def main():
# remember to `$ export AWS_PROFILE=a-profile` & `$ export AWS_REGION=a-region`
# to pick up credentials for this exercise, or set env vars appropriately for AK/SK access
ec2_conn = boto3.client('ec2')
if __name__ == "__main__":
main() | SteveL1/InterviewTest | scripting_py_bash/test.py | test.py | py | 283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 7,
"usage_type": "call"
}
] |
13782307429 | from fastapi import APIRouter
from conn import conn
from model.event import Event
event_router = APIRouter(
prefix="/event",
tags=["event"],
)
@event_router.get("/")
async def read_items(attr: list, where: dict):
cursor = conn.cursor()
sql = Event.querySql(attr=attr, where=where)
cursor.execute(sql)
lines = cursor.fetchall()
return {'values': lines}
@event_router.post("/")
async def insert_item(event: Event):
cursor = conn.cursor()
sql = event.insertSql()
cursor.execute(sql)
conn.commit()
return {'added':event}
@event_router.delete("/")
async def delete_item(where: dict):
cursor = conn.cursor()
sql = Event.deleteSql(where=where)
cursor.execute(sql)
conn.commit()
return {'deleted': where}
@event_router.put("/")
async def update_items(attrDict: dict, where: dict):
cursor = conn.cursor()
sql = Event.updateSql(where=where, attrDict=attrDict)
cursor.execute(sql)
conn.commit()
return {'updated': attrDict, 'where': where}
| JulioHey/Banco-de-Dados---EP | server/router/event.py | event.py | py | 1,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "conn.conn.cursor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "conn.conn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "model.event.Event.querySq... |
14480873433 | from django.conf import settings
from django.core.mail import send_mail
from mailing_list.models import Contact
def send_email(contact_info: dict):
return send_mail(
subject=f'Hi, {contact_info.get("username")}. You were subscribe on mailing list',
message='Now every day, we are going to send you lots of spam',
from_email=settings.EMAIL_HOST_USER,
recipient_list=[contact_info.get('email')],
fail_silently=True
)
| kinfi4/django-examples | src/django-celery/project/mailing_list/service.py | service.py | py | 466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.mail.send_mail",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.EMAIL_HOST_USER",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
}
] |
44333686531 | from flask import Blueprint
from flask import jsonify
from flask import request
from ..services import BrandService
from ..models import BrandModel
brand = Blueprint('brand', __name__, url_prefix="/brand")
@brand.route('/<int:id>', methods=["GET"])
def get_by_id(id: int):
try:
if request.method == "GET":
service = BrandService()
model = service.get_by_id(id)
if model is not None:
return jsonify(
success=True,
message="data founded",
data=model,
)
except Exception as ex:
print(ex)
return jsonify(
error="data not found"
)
@brand.route('/', methods=["POST"])
def post():
try:
if request.method == "POST":
service = BrandService()
address = BrandModel(
name=request.json['name'],
number=int(request.json['number']),
city_id=int(request.json['city_id']),
state_id=int(request.json['state_id']),
country_id=int(request.json['country_id'])
)
address_created = service.create_model(model=address)
return jsonify(
success=True,
message="data created",
data=address_created
)
except Exception:
return jsonify(
error="couldn't be created"
)
@brand.route('/<int:id>', methods=["PUT"])
def update(id: int):
try:
if request.method == "PUT":
address = request.json['address']
service = BrandService()
response = service.update_model(
id = id,
model = address)
return jsonify(data=response)
except Exception as ex:
return f"Error: {ex}" | EliezerRamirezRuiz/RestAPI-ecommerce | app/blueprints/brand.py | brand.py | py | 1,942 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "services.Brand... |
20972435328 | import matplotlib.pyplot as plt
import numpy
### load the data for the map intervals in nanoseconds
data1 = numpy.loadtxt("3clients.txt")
data2 = numpy.loadtxt("13clients.txt")
data3 = numpy.loadtxt("103clients.txt")
data4 = numpy.loadtxt("1003clients.txt")
### get the data in milliseconds
milli_data1 = [item / 1000000 for item in data1]
milli_data2 = [item / 1000000 for item in data2]
milli_data3 = [item / 1000000 for item in data3]
milli_data4 = [item / 1000000 for item in data4]
plt.scatter(range(len(milli_data1)), milli_data1, s=2, label="3 clients")
plt.scatter(range(len(milli_data2)), milli_data2, s=2, label="13 clients")
plt.scatter(range(len(milli_data3)), milli_data3, s=2, label="103 clients")
plt.scatter(range(len(milli_data4)), milli_data4, s=2, label="1003 clients")
plt.legend(bbox_to_anchor=(1,1), shadow=True, ncol=1, markerscale=2)
plt.xlabel("Server tick")
plt.ylabel("Map message interval (ms)")
plt.title("Interval between map receipt with varying client load")
plt.savefig("freq_data_4.png", dpi=1200, bbox_inches="tight")
| abarg12/MinotaurGame | client/test_client/data_client/plot_map_scatter.py | plot_map_scatter.py | py | 1,056 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_numb... |
14042648229 | import pytest
data1 = ['linda', 'sevenruby']
@pytest.fixture(params=data1)
def data2(request):
print("数据准备unpack") # 解包
# return request.param
a = request.param
print(a)
return a
# @pytest.mark.parametrize('data2',data1)
def test_data(data2):
print(data2)
if __name__ == '__main__':
pytest.main(
['-s', '/Users/duheng/Documents/project/testframework/frameworkDemo/pytestDemoR/review/test_param_fixture3.py'])
| duheng18/python-study | testframework/frameworkDemo/pytestDemoR/review/test_param_fixture3.py | test_param_fixture3.py | py | 465 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.main",
"line_number": 21,
"usage_type": "call"
}
] |
24196970445 | # -*- coding: utf-8 -*-
import clr
clr.AddReference("RevitAPI")
clr.AddReference("System")
from System.Collections.Generic import List
from Autodesk.Revit.DB import FilteredElementCollector as Fec
from Autodesk.Revit.DB import BuiltInCategory as Bic
from Autodesk.Revit.DB import Transaction, FillPattern, FillPatternElement
from Autodesk.Revit.DB import OverrideGraphicSettings, View, ElementId
import Autodesk
from pyrevit import revit, DB
__doc__ = 'Visualizes the structural property of Walls, Floors,' \
' Structural Columns and Structural Framing.'
# reference the current open revit model to work with:
doc = __revit__.ActiveUIDocument.Document
# class containing information about the elements of includet categories
class StructuralElement:
def __init__(self, id, structural):
self.id = id
self.structural = structural
# this function takes all walls and floors and creates an object of the
# StructuralElement class and appends it to the elem_info list.
def GetElemProps(elem_lst):
for elem in elem_lst:
if not (elem.Name.startswith("AW-FA_") or elem.Name.startswith("IW-FA_")):
try:
id = elem.Id
structural = elem.LookupParameter("Tragwerk").AsInteger()
elem_info.append(StructuralElement(id, structural))
element_ids.append(id)
except:
pass
# this function takes all structural columns and structurals framings
# and creates an object of the
# StructuralElement class and appends it to the elem_info list.
def ElemCnvrt(elem_lst):
for elem in elem_lst:
id = elem.Id
elem_info.append(StructuralElement(id, 1))
element_ids.append(id)
# get all fill patterns
fill_patterns = Fec(doc).OfClass(FillPatternElement).WhereElementIsNotElementType().ToElements()
# get id of solid fill
solid_fill = fill_patterns[0].Id
# set colors
color_true = Autodesk.Revit.DB.Color(78,199,190)
color_true2 = Autodesk.Revit.DB.Color(0,77,71)
color_false = Autodesk.Revit.DB.Color(236,77,0)
color_false2 = Autodesk.Revit.DB.Color(153,51,0)
# create graphical overrides
try:
ogs_true = OverrideGraphicSettings().SetProjectionFillColor(color_true)
ogs_true.SetProjectionFillPatternId(solid_fill)
except:
ogs_true = OverrideGraphicSettings().SetSurfaceForegroundPatternColor(color_true)
ogs_true.SetSurfaceForegroundPatternId(solid_fill)
ogs_true.SetSurfaceTransparency(10)
ogs_true.SetProjectionLineColor(color_true2)
try:
ogs_false = OverrideGraphicSettings().SetProjectionFillColor(color_false)
ogs_false.SetProjectionFillPatternId(solid_fill)
except:
ogs_false = OverrideGraphicSettings().SetSurfaceForegroundPatternColor(color_false)
ogs_false.SetSurfaceForegroundPatternId(solid_fill)
ogs_false.SetSurfaceTransparency(0)
ogs_false.SetProjectionLineColor(color_false2)
# connect to revit model elements via FilteredElementCollector
# collect all the elements of selected elements category
walls = Fec(doc).OfCategory(Bic.OST_Walls).WhereElementIsNotElementType().ToElements()
floors = Fec(doc).OfCategory(Bic.OST_Floors).WhereElementIsNotElementType().ToElements()
columns = Fec(doc).OfCategory(Bic.OST_StructuralColumns).WhereElementIsNotElementType().ToElements()
framing = Fec(doc).OfCategory(Bic.OST_StructuralFraming).WhereElementIsNotElementType().ToElements()
# prepare lists
elem_info = []
element_ids = []
# process elements
GetElemProps(walls)
GetElemProps(floors)
ElemCnvrt(columns)
ElemCnvrt(framing)
# create a collection from all element ids
col1 = List[ElementId](element_ids)
# entering a transaction to modify the revit model database
# start transaction
tx = Transaction(doc, "check structural elements")
tx.Start()
# isolate all elements of category
doc.ActiveView.IsolateElementsTemporary(col1)
# set graphical overrides
for elem in elem_info:
if elem.structural == 1:
doc.ActiveView.SetElementOverrides((elem.id), ogs_true)
if elem.structural == 0:
doc.ActiveView.SetElementOverrides((elem.id), ogs_false)
# commit the changes to the revit model database
# end transaction
tx.Commit()
| karthi1015/Quality-Management | StructuralIntegrity_step1_script.py | StructuralIntegrity_step1_script.py | py | 4,145 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "clr.AddReference",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "clr.AddReference",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "Autodesk.Revit.DB.FillPatternElement",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name... |
70437856743 | import wikipedia
import numpy as np
import re
import nltk
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
import random as rd
import collections
import nltk.data
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
import random
import pandas as pd
import gensim
from gensim import corpora, models
import math
import os
nltk.download('stopwords')
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
from string import punctuation
punctuation = list(punctuation)
stemmer = WordNetLemmatizer()
# Data cleaning function
def clean(ss):
newdoc = []
alllens = []
topn = []
ds = re.sub(r'\W', ' ', str(ss))
ds = re.sub(r'\s+[a-zA-Z]\s+', ' ', ds)
ds = re.sub(r'\^[a-zA-Z]\s+', ' ', ds)
ds = re.sub(r'\s+', ' ', ds, flags=re.I)
ds = re.sub(r'^b\s+', '', ds)
ds = ds.lower()
tokens = ds.split()
tokens = [stemmer.lemmatize(word) for word in tokens]
tokens = [token for token in tokens if token not in stopwords and token not in punctuation]
tokens = [word for word in tokens if len(word) > 4]
st = ' '.join(tokens)
wordcount = collections.Counter(tokens)
ts.append(tokens)
alllens.append(len(tokens))
newdoc.append(st)
if st != '':
topn.append(tn+1)
return ds, wordcount, ts, alllens, newdoc, topn, tokens
# 39 topics
topics = ['Academic disciplines', 'Business', 'Communication', 'Concepts', 'Culture', 'Economy', 'Education', 'Energy',
'Engineering', 'Entertainment', 'Entities', 'Ethics', 'Food and drink', 'Geography', 'Government', 'Health',
'History', 'Human behavior', 'Humanities', 'Information', 'Internet', 'Knowledge', 'Language', 'Law', 'Life',
'Mass media', 'Mathematics', 'Military', 'Nature', 'People', 'Philosophy', 'Politics', 'Religion', 'Science',
'Society', 'Sports', 'Technology', 'Time', 'Universe']
ts = []
tn = 0
cs = []
for l in topics:
tn = tn+1
tops = wikipedia.search(l, results=50) # Take first 50 results
for i in tops:
try:
bti = wikipedia.page(i, auto_suggest=False).content
except wikipedia.DisambiguationError as e:
s = e.options[1]
try:
bti = wikipedia.page(s, auto_suggest=False).content
except wikipedia.DisambiguationError as e2:
s2 = e2.options[1]
#s2 = random.choice(e2.options) # Cause of the differing lengths
bti = wikipedia.page(s2, auto_suggest=False).content
except wikipedia.PageError:
continue
document = bti
test = nltk.sent_tokenize(document)
stemmer = WordNetLemmatizer()
ci = ' '.join(test[0:5]) # Change depending on how many sentences
ci = clean(ci)[4][0]
cs.append(ci)
# Some statistics for the data
cssplit = [d.split() for d in cs]
cslen = [len(i) for i in cssplit]
csmean = np.mean(cslen)
plt.hist(cslen)
np.percentile(cslen,75)
lamml = [min(cslen), max(cslen), np.mean(cslen), np.std(cslen)]
cs = [' '.join(i) for i in cs]
# Creating corpus
os.chdir('/Users/keeganstlegerdenny/Documents/Postgraduate/ResearchReport/Code/CreateCorp2')
with open('LargeCor4.txt', 'a') as f: # Change name if new corpus
for line in cs:
f.write(line)
f.write('\n')
| keeganstlegerdenny/STTM-How-Short-Is-Short | CorporaCreation.py | CorporaCreation.py | py | 3,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "strin... |
18192807224 | import re
import math
import functools
import copy
inFile = open('inputs/11.txt', 'r')
input = inFile.read()
input = map(lambda s: s.split("\n"), input.split("\n\n"))
class Monkey():
def __init__(self, num, items, operation, test, ifTrue, ifFalse):
self.num = num
self.items = items
self.operation = operation
self.test = test
self.ifTrue = ifTrue
self.ifFalse = ifFalse
self.totalInspect = 0
initialMonkeys = []
for monkeyLines in input:
monkeyNum = re.search("\d+", monkeyLines[0]).group()
monkeyNum = int(monkeyNum)
items = re.findall("\d+", monkeyLines[1])
items = list(map(int, items))
operation = monkeyLines[2]
operation = operation.replace('Operation: new = old ', '').strip().split(' ')
test = re.search("(\d+)", monkeyLines[3]).group()
test = int(test)
ifTrue = re.search("\d+", monkeyLines[4]).group()
ifTrue = int(ifTrue)
ifFalse = re.search("\d+", monkeyLines[5]).group()
ifFalse = int(ifFalse)
monkey = Monkey(monkeyNum, items, operation, test, ifTrue, ifFalse)
initialMonkeys.append(monkey)
divisors = []
for monkey in initialMonkeys:
if (monkey.test not in divisors):
divisors.append(monkey.test)
# since they're all prime numbers, we're safe to use their product as smallest common multiple
scm = functools.reduce(lambda x,y: x*y, divisors)
def solve(loop):
monkeys = copy.deepcopy(initialMonkeys)
for i in range(1, loop + 1):
for monkey in monkeys:
for item in monkey.items:
monkey.totalInspect += 1
operation, operand = monkey.operation
operand = item if operand == "old" else int(operand)
if (operation == '*'):
item = item * operand
else:
item = item + operand
if (loop == 20):
item = math.floor(item/3)
elif (item > scm):
times = math.floor(item/scm)
item -= scm*times
throwMonkeyNum = monkey.ifTrue if (item % monkey.test == 0) else monkey.ifFalse
monkeys[throwMonkeyNum].items.append(item)
monkey.items = []
for monkey in monkeys:
print("Monkey %d inspected items %d times." % (monkey.num, monkey.totalInspect))
x = sorted(monkeys, key=lambda monki: monki.totalInspect, reverse=True)[:2]
x = [monki.totalInspect for monki in x]
x = functools.reduce(lambda a, b: a * b, x, 1)
print("Part %d: %d" % (1 if loop == 20 else 2, x))
solve(20)
print("\n")
solve(10000) | keune/aoc-2022 | 11.py | 11.py | py | 2,319 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.search",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 34,
... |
37532715553 | from flask_restful import Resource
from flask import jsonify, abort, request
from backend.app import db, logger, app
from sqlalchemy import exc
from backend.models import Patient, Visit, Lab, Imaging, Appointment
from webargs import fields
from marshmallow import validate
from webargs.flaskparser import parser
from flask_jwt_extended import jwt_required
class ChildResource(Resource):
@jwt_required
def get(self, hn=None, child_type=None, record_id=None):
# Change HN format
if not hn or child_type not in Patient.__children__:
logger.error(
"Either resource type or patient HN is not supplied.".format(
hn, child_type
)
)
abort(404)
else:
hn = hn.replace("^", "/")
# Read from DB
try:
patient = Patient.query.filter_by(hn=hn).first()
if not patient:
logger.error(
"Child: {}, HN: {}, not found.".format(hn, child_type)
)
abort(404)
# Return all children
if hn and not record_id:
page = request.args.get("page", default=1, type=int)
logger.debug(
"Returning HN {} {}; page {}.".format(hn, child_type, page)
)
childrenPaginate = getattr(patient, child_type).paginate(
page=page, per_page=app.config["MAX_PAGINATION"]
)
return jsonify(
{
"items": childrenPaginate.items,
"page": childrenPaginate.page,
"pages": childrenPaginate.pages,
"total": childrenPaginate.total,
"perPage": app.config["MAX_PAGINATION"],
}
)
# Return specific child
elif hn and record_id:
logger.debug(
"Returning a row from: {}, HN: {} on {}.".format(
child_type, hn, record_id
)
)
child = (
getattr(patient, child_type)
.filter_by(id=record_id)
.first()
)
if child:
return jsonify(child)
else:
abort(404)
# How can you reach this point?
else:
logger.error("Unknown error.")
abort(500)
except (IndexError, exc.SQLAlchemyError) as e:
logger.error("Unable to read to DB")
logger.error(e)
abort(500)
@jwt_required
def put(self, hn=None, child_type=None, record_id=None):
if not hn or child_type not in Patient.__children__:
logger.error(
"No valid HN {} or child type {} supplied.".format(
hn, child_type
)
)
abort(404)
else:
hn = hn.replace("^", "/")
if record_id:
try:
record_id = int(record_id)
except ValueError:
record_id = None
try:
# Find HN
patient = Patient.query.filter(Patient.hn == hn).first()
if not patient:
logger.error("HN {} not found .".format(hn))
abort(404)
# Find if there is a record
child_column = getattr(patient, child_type)
if record_id:
record = child_column.filter_by(id=record_id).first()
else:
record = None
if record:
# Update the value
data = {}
# Form data validation
if child_type == "visits":
data = self.visit_form_data()
elif child_type == "labs":
data = self.lab_form_data()
elif child_type == "imaging":
data = self.imaging_form_data()
elif child_type == "appointments":
data = self.appointment_form_data()
else:
logger.error("Undefined child type {}.".format(child_type))
abort(404)
logger.debug(
"Patching a {} record ID {}, patient HN {}.".format(
child_type, record_id, hn
)
)
import sys
print(data["date"], file=sys.stdout)
record.update(**data)
db.session.add(record)
db.session.commit()
return jsonify({"status": "success"})
else:
# Add new value
child_column = getattr(patient, child_type)
data = {}
new_record = None
# Form data validation
if child_type == "visits":
data = self.visit_form_data()
new_record = Visit(**data)
elif child_type == "labs":
data = self.lab_form_data()
new_record = Lab(**data)
elif child_type == "imaging":
data = self.imaging_form_data()
new_record = Imaging(**data)
elif child_type == "appointments":
data = self.appointment_form_data()
new_record = Appointment(**data)
else:
logger.error("Undefined child type {}.".format(child_type))
abort(404)
# Add a new record
logger.debug(
(
"Adding record for {} on {} "
"for patient HN {} in the DB."
).format(child_type, data["date"], hn)
)
child_column.append(new_record)
db.session.add(patient)
db.session.commit()
return jsonify({"status": "success"})
except (IndexError, exc.SQLAlchemyError) as e:
logger.error("Unable to connect to DB")
logger.error(e)
abort(500)
@jwt_required
def delete(self, hn=None, child_type=None, record_id=None):
"""
Delete visit record
"""
logger.debug("Recieved Delete request.")
if not hn or not record_id or child_type not in Patient.__children__:
logger.error(
(
"No HN or Record Date or Child "
"Type was passed along, unable to Delete the record."
)
)
abort(400)
else:
hn = hn.replace("^", "/")
# Check if the patient exists in the db
try:
patient = Patient.query.filter_by(hn=hn).first()
if patient is None:
logger.error(
"No patient with the specified HN existed in the DB."
)
abort(404)
# Check if the record exists in the db
child_column = getattr(patient, child_type)
record = child_column.filter_by(id=record_id).first()
if record is None:
logger.debug(
"Record on {} does not exist in the DB.".format(record_id)
)
abort(400)
db.session.delete(record)
db.session.commit()
return jsonify({"status": "success"})
except (IndexError, exc.SQLAlchemyError) as e:
logger.error("Unable to write to DB")
logger.error(e)
abort(500)
def visit_form_data(self):
"""
Prase JSON from request
"""
# JSON Schema
json_args = {
"date": fields.Date(required=True),
"is_art_adherence": fields.String(
validate=validate.OneOf(["Yes", "No"])
),
"art_delay": fields.Float(),
"art_adherence_scale": fields.Float(),
"art_adherence_problem": fields.String(),
"hx_contact_tb": fields.String(),
"bw": fields.Float(),
"abn_pe": fields.List(fields.String(allow_missing=True)),
"imp": fields.List(
fields.String(), validate=validate.Length(min=1)
),
"arv": fields.List(fields.String(allow_missing=True)),
"why_switched_arv": fields.String(),
"oi_prophylaxis": fields.List(fields.String(allow_missing=True)),
"anti_tb": fields.List(fields.String(allow_missing=True)),
"vaccination": fields.List(fields.String(allow_missing=True)),
}
# Phrase post data
data = parser.parse(json_args, request, locations=["json"])
# Modify list datatype to JSON
data = Visit.convert_to_json(data)
return data
def lab_form_data(self):
"""
Prase JSON from request
"""
# JSON Schema
json_args = {
"date": fields.Date(required=True),
"anti_hiv": fields.String(
validate=validate.OneOf(["+", "-", "+/-"])
),
"cd4": fields.Integer(),
"p_cd4": fields.Float(),
"vl": fields.Integer(),
"hiv_resistence": fields.String(),
"hbsag": fields.String(validate=validate.OneOf(["+", "-", "+/-"])),
"anti_hbs": fields.String(
validate=validate.OneOf(["+", "-", "+/-"])
),
"anti_hcv": fields.String(
validate=validate.OneOf(["+", "-", "+/-"])
),
"afb": fields.String(
validate=validate.OneOf(["3+", "2+", "1+", "Scantly", "-"])
),
"sputum_gs": fields.String(),
"sputum_cs": fields.String(),
"genexpert": fields.String(),
"vdrl": fields.String(validate=validate.OneOf(["+", "-", "+/-"])),
"rpr": fields.String(validate=validate.Regexp(r"^1:\d+$")),
}
# Phrase post data
data = parser.parse(json_args, request, locations=["json"])
# Modify list datatype to JSON
data = Lab.convert_to_json(data)
return data
def imaging_form_data(self):
"""
Prase JSON from request
"""
# JSON Schema
json_args = {
"date": fields.Date(required=True),
"film_type": fields.String(required=True),
"result": fields.String(required=True),
}
# Phrase post data
data = parser.parse(json_args, request, locations=["json"])
# Modify list datatype to JSON
data = Imaging.convert_to_json(data)
return data
def appointment_form_data(self):
"""
Prase JSON from request
"""
# JSON Schema
json_args = {
"date": fields.Date(required=True),
"appointment_for": fields.String(required=True),
}
# Phrase post data
data = parser.parse(json_args, request, locations=["json"])
# Modify list datatype to JSON
data = Appointment.convert_to_json(data)
return data
| LedoKun/hiv-clinic-backend | resources/child_resource.py | child_resource.py | py | 11,450 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "backend.models.Patient.__children__",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Patient",
"line_number": 17,
"usage_type": "name"
},
{... |
15231900014 | import sys
import io
import urllib.request
print('hi')
print('한글')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
imgUrl = "http://blogfiles.naver.net/20130502_54/dbsgusrl77_136748336507323OOv_JPEG/%BF%B5%C8%AD_%C0%BA%B9%D0%C7%CF%B0%D4_%C0%A7%B4%EB%C7%CF%B0%D4_%B8%DE%C0%CE_%BF%B9%B0%ED%C6%ED_%B5%BF%BF%B5%BB%F3_%281%29.jpg"
savePath = "/Users/yuri/Documents/section2/test1.jpg"
urllib.request.urlretrieve(imgUrl,savePath)
print("다운로드 완료!")
# import sys
# import io
# import urllib.request as dw
#
# sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
# sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
#
# imgUrl ="http://post.phinf.naver.net/20160621_169/1466482468068lmSHj_JPEG/If7GeIbOPZuYwI-GI3xU7ENRrlfI.jpg"
# htmlURL ="http://google.com"
#
# savePath1 ="/library/desktop pictures/qwe.jpg"
# savePath2 ="/library/desktop pictures/1.html"
#
# dw.urlretrieve(imgUrl, savePath1)
# dw.urlretrieve(htmlURL, savePath2)
#
# print("다운로드 완료!")
| leyuri/Crawling | Chapter2/download2-1.py | download2-1.py | py | 1,112 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdout.detach",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"lin... |
72466882663 | """
WSGI config for mintemplate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
import site
prev_sys_path = list(sys.path)
root = os.path.normpath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(root)
site.addsitedir(os.path.join(root, ".env/lib/python%d.%d/site-packages" % sys.version_info[:2]))
site.addsitedir(os.path.join(root, ".env/lib64/python%d.%d/site-packages" % sys.version_info[:2]))
# addsitedir adds its directories at the end, but we want our local stuff
# to take precedence over system-installed packages.
# See http://code.google.com/p/modwsgi/issues/detail?id=112
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
os.environ.setdefault("DJANGO_SETTINGS_MODULE", os.path.basename(os.path.dirname(__file__)) + ".settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| kfarr2/Minimal-Template | mintemplate/wsgi.py | wsgi.py | py | 1,128 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.normpath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_... |
36050060179 | from typing import List, Dict
from aiohttp import ClientSession
from common.decorators import catch_timeout
from web.integrations.email_client.abstract import AbstractEmailClient
from web.integrations.http import BaseHTTPClient
class ClientError(Exception):
pass
class EmailHTTPClient(AbstractEmailClient, BaseHTTPClient):
SETTINGS_ENDPOINT = "api/v1/settings"
EMAIL_ENDPOINT = "api/v1/email"
def __init__(self, session: ClientSession, base_url: str):
self._session = session
self._base_url = base_url
@catch_timeout
async def get_custom_headers_and_email_from(self) -> Dict:
response = await self._session.get(f"{self._base_url}{self.SETTINGS_ENDPOINT}")
if response.status != 200:
await self._raise_for_code(response)
return await response.json()
@catch_timeout
async def update_email_client_settings(self, settings: Dict):
response = await self._session.patch(
f"{self._base_url}{self.SETTINGS_ENDPOINT}", json=settings
)
if response.status != 200:
await self._raise_for_code(response)
@catch_timeout
async def schedule_mailing_jobs(
self, jobs: List[Dict], template: str, subject: str
):
response = await self._session.post(
f"{self._base_url}{self.EMAIL_ENDPOINT}",
json={"jobs": jobs, "template": template, "subject": subject},
)
if response.status != 202:
await self._raise_for_code(response)
| MFrackowiak/sc_r_mailmarketing | web/integrations/email_client/http.py | http.py | py | 1,526 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web.integrations.email_client.abstract.AbstractEmailClient",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "web.integrations.http.BaseHTTPClient",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 18,
... |
2291746376 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: HuHao <huhao1@cmcm.com>
Date: '2018/8/25'
Info:
"""
# 获取系统环境
import os
# 创建app实例和数据库实例
from app import create_app,db
# 获取数据据类模板
from app.models import User,Role,Post,Permission
# 使用 Manage 丰富启动参数支持,和 Shell 环境支持
from flask_script import Manager,Shell
# 获取脚本迁移模块支持
from flask_migrate import Migrate,MigrateCommand,upgrade
import click
# 必须放在 from .. import 之后,app 实例化之前,否则统计不全
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*') # 覆盖率统计扫描包
COV.start()
app = create_app(os.getenv('FLASKY_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app,db)
def make_shell_context():
return dict(app=app,db=db,User=User,Role=Role,Post=Post,Permission=Permission)
# 使用 python manage.py shell 目录启动,自动执行make_shell_context,并将相应 实例字典引入shell环境
manager.add_command('shell',Shell(make_context=make_shell_context))
# 使用 python manage.py db 启动时,pye自动映射 MigrateCommand类
manager.add_command('db',MigrateCommand)
# -------- 单元测试 --------
@manager.command # 通过此注解可将函数名注册为启动参数,如通过: python manage.py test_basic 就可以调度到函数
def test_basic():
import unittest
tests = unittest.TestLoader().discover('tests') # 扫描根路径下 tests 目录下的 unittest.TestCase 子类
# 执行测试
unittest.TextTestRunner(verbosity=2).run(tests) # 输出测试案例的执行结果详细程度 verbosity
# -------- 单元测试覆盖率报告(在单元测试基础上添加了覆盖率统计) --------
# python manage.py coverable 不执行覆盖率统计
# python manage.py coverable --coverage 执行覆盖率统计
@manager.command # 将下面函数名注册为启动参数
def coverable(coverage=False):
"""Run the unit tests."""
# 如果命令行启动传入了 --coverage参数,并且环境中未设置 FLASK_COVERAGE
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
# 将上面顶级代码调度,执行
os.execvp(sys.executable, [sys.executable] + sys.argv)
# 执行单元测试
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
# 如果开启了覆盖率统计开关,则保存统计结果
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage') # 统计结果输出路径
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase() # 擦除
@manager.command
def profile(length=25, profile_dir=None):
# 最多保留最近的 25次查询,如果设置了profile_dir 则可以将分析结果保存下来
"""Start the application under the code profiler."""
print(length,profile_dir)
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],profile_dir=profile_dir)
app.run(debug=False)
@manager.command
def deploy():
"""Run deployment tasks."""
# migrate database to latest revision
upgrade()
# create or update user roles
Role.insert_roles()
# ensure all users are following themselves
User.add_self_follows()
if __name__=="__main__":
manager.run()
| happy-place/flasky | manage.py | manage.py | py | 3,574 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "coverage.coverage",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "app.create_app",
... |
2531159065 | from __future__ import print_function,division
import numpy as np
import six.moves.cPickle as pickle
import theano
import theano.tensor as T
from optimize import rmsprop
from lmlp import LMLP
class Simple_Discriminator(object):
def __init__(self,rng,input_fake,input_real,info_layers):
self.input=input_fake
self.input_r=input_real
self.network1=LMLP(rng,self.input,info_layers)
self.layers=self.network1.layers
self.params=self.network1.params
self.output=self.network1.output
self.gradient_cost=self.network1.gradient_cost
self.max_gradient=self.network1.max_gradient
self.network2=LMLP(rng,self.input_r,
info_layers,params=self.params)
self.output_r=self.network2.output
self.mean_difference=(self.output-self.output_r).mean()
def generate_data(length,data_num):
if data_num == 0:
prearray=np.asarray(2*np.random.random(length)-1.0,
dtype=theano.config.floatX)
return theano.shared(np.reshape(prearray,(length,1)),borrow=True)
elif data_num == 1:
prearray=np.asarray(np.random.normal(0,1,length),
dtype=theano.config.floatX)
return theano.shared(np.reshape(prearray,(length,1)),borrow=True)
elif data_num == 2:
prearray=np.asarray(np.random.normal(-1,1,length),
dtype=theano.config.floatX)
return theano.shared(np.reshape(prearray,(length,1)),borrow=True)
elif data_num == 3:
prearray=np.asarray(np.random.normal(0,0.1,length),
dtype=theano.config.floatX)
return theano.shared(np.reshape(prearray,(length,1)),borrow=True)
def example_train(n_epochs=100, batch_size=20,gradient_reg=1.0):
import timeit
print_initial_parameters = False
print_initial_gradient_cost = False
print_initial_gradient_norms = False
plot_time=10
fake_x_data = generate_data(10000,0)
real_x_data = generate_data(10000,3)
fake_x_valid = generate_data(1000,0)
real_x_valid = generate_data(1000,3)
index = T.lscalar()
x_fake = T.matrix('x_f')
x_real = T.matrix('x_r')
n_train_batches = fake_x_data.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = fake_x_valid.get_value(borrow=True).shape[0] // batch_size
print('... building the model')
rng = np.random.RandomState(1000)
network = Simple_Discriminator(
rng=rng,
input_fake=x_fake,
input_real=x_real,
#info_layers=[(5,1,20),(5,20,20),(5,20,20),(5,20,1)]
info_layers=[(5,1,20),(1,20,1)]
)
cost = -network.mean_difference+gradient_reg/(1.0-network.gradient_cost)
if print_initial_parameters:
print('printing initial parameters')
for param in network.params:
print(param.get_value())
get_max_gradient = theano.function(
inputs=[],
outputs=network.max_gradient,
givens={
}
)
get_gradient_norms = theano.function(
inputs=[],
outputs=[layer.gradient_norms for layer in network.layers],
givens={
}
)
get_gradient_cost = theano.function(
inputs=[],
outputs=network.gradient_cost,
givens={
}
)
if print_initial_gradient_cost:
print('initial gradient cost: %f '% get_gradient_cost())
if print_initial_gradient_norms:
print('printing gradient norms')
for matrix in get_gradient_norms():
print(matrix)
validate_model = theano.function(
inputs=[index],
outputs=network.mean_difference,
givens={
x_fake: fake_x_valid[index * batch_size:(index + 1) * batch_size],
x_real: real_x_valid[index * batch_size:(index + 1) * batch_size]
}
)
updates=rmsprop(cost,network.params)
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x_fake: fake_x_data[index*batch_size:(index+1)*batch_size],
x_real: real_x_data[index*batch_size:(index+1)*batch_size]
}
)
print('... training')
validation_frequency = n_train_batches
plot_frequency = n_train_batches*plot_time
start_time = timeit.default_timer()
epoch = 0
while (epoch < n_epochs):
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
iter = epoch * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
this_gradient_max = get_max_gradient()
print(
'epoch %i, minibatch %i/%i, validation mean square error %f, max_gradient %f' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss,
this_gradient_max
)
)
if (iter + 1) % plot_frequency == 0:
with open('test_discriminator_model.pkl', 'wb') as f:
pickle.dump(network, f)
example_graph()
epoch+=1
end_time = timeit.default_timer()
print(('The code ran for %.2fs' % (end_time - start_time)))
if print_end_parameters:
print('printing end parameters')
for param in network.params:
print(param.get_value())
def example_graph(length=1000):
import matplotlib.pyplot as plt
network = pickle.load(open('test_discriminator_model.pkl','rb'))
predict_model = theano.function(
inputs=[network.input],
outputs=network.output)
data_input = np.reshape(np.linspace(-2,2,length),(length,1))
predicted_values = predict_model(data_input)
uniform_graph = data_input*0.+0.5
normal_graph_1 = np.exp(-data_input**2/2.0)/np.sqrt(2*np.pi)
normal_graph_2 = np.exp(-(1+data_input)**2/2.0)/np.sqrt(2*np.pi)
normal_graph_3 = np.exp(-(10*data_input)**2/2.0)/(0.1*np.sqrt(2*np.pi))
plt.plot(data_input,predicted_values)
plt.plot(data_input,uniform_graph)
plt.plot(data_input,normal_graph_3)
plt.show()
if __name__ == "__main__":
example_train()
| davikrehalt/WGAN-mod | simple_discriminator.py | simple_discriminator.py | py | 6,543 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lmlp.LMLP",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lmlp.LMLP",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_num... |
28875886668 | from __future__ import absolute_import, unicode_literals
from draftjs_exporter.dom import DOM
from draftjs_exporter.error import ExporterException
from draftjs_exporter.options import Options
class EntityException(ExporterException):
pass
class EntityState:
def __init__(self, entity_decorators, entity_map):
self.entity_decorators = entity_decorators
self.entity_map = entity_map
self.entity_stack = []
self.completed_entity = None
self.element_stack = []
def apply(self, command):
if command.name == 'start_entity':
self.entity_stack.append(command.data)
elif command.name == 'stop_entity':
expected_entity = self.entity_stack[-1]
if command.data != expected_entity:
raise EntityException('Expected {0}, got {1}'.format(expected_entity, command.data))
self.completed_entity = self.entity_stack.pop()
def has_no_entity(self):
return not self.entity_stack
def get_entity_details(self, entity_key):
details = self.entity_map.get(str(entity_key))
if details is None:
raise EntityException('Entity "%s" does not exist in the entityMap' % entity_key)
return details
def render_entities(self, style_node):
if self.completed_entity is not None:
entity_details = self.get_entity_details(self.completed_entity)
opts = Options.for_entity(self.entity_decorators, entity_details['type'])
props = entity_details['data'].copy()
props['entity'] = {
'type': entity_details['type'],
}
nodes = DOM.create_element()
for n in self.element_stack:
DOM.append_child(nodes, n)
elt = DOM.create_element(opts.element, props, nodes)
self.completed_entity = None
self.element_stack = []
elif self.has_no_entity():
elt = style_node
else:
self.element_stack.append(style_node)
elt = None
return elt
| mohit-n-rajput/BT-Real-Estate | venv/lib/python3.6/site-packages/draftjs_exporter/entity_state.py | entity_state.py | py | 2,098 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "draftjs_exporter.error.ExporterException",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "draftjs_exporter.options.Options.for_entity",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "draftjs_exporter.options.Options",
"line_number": 47,
... |
16413307502 | import googlemaps
import json
import random
from django.conf import settings
from datetime import date as todaysDate
from django.utils.timezone import make_aware
from django.db import models
from usuarios.models import MyUser, Cliente, Transportista, Unidades
from django.urls import reverse
from django.utils import timezone
from django.dispatch import receiver
from django.utils.text import slugify
from django.db.models.signals import post_save, post_delete
from django.core.exceptions import ValidationError
ESTADOS = (
('Aguascalientes','Aguascalientes'),
('Baja California','Baja California'),
('Baja California Sur','Baja California Sur'),
('Campeche','Campeche'),
('Coahuila de Zaragoza','Coahuila de Zaragoza'),
('Colima','Colima'),
('Chiapas','Chiapas'),
('Chihuahua','Chihuahua'),
('CDMX','CDMX'),
('Durango','Durango'),
('Guanajuato','Guanajuato'),
('Guerrero','Guerrero'),
('Hidalgo','Hidalgo'),
('Jalisco','Jalisco'),
('México','México'),
('Michoacán de Ocampo','Michoacán de Ocampo'),
('Morelos','Morelos'),
('Nayarit','Nayarit'),
('Nuevo León','Nuevo León'),
('Oaxaca','Oaxaca'),
('Puebla','Puebla'),
('Querétaro','Querétaro'),
('Quintana Roo','Quintana Roo'),
('San Luis Potosí','San Luis Potosí'),
('Sinaloa','Sinaloa'),
('Sonora','Sonora'),
('Tabasco','Tabasco'),
('Tamaulipas','Tamaulipas'),
('Tlaxcala','Tlaxcala'),
('Veracruz de Ignacio de la Llave','Veracruz de Ignacio de la Llave'),
('Yucatán','Yucatán'),
('Zacatecas','Zacatecas'),
)
def validate_name_domicilio(name):
if Domicilios.objects.filter(nombre=name).exists():
raise ValidationError("No se puede agregar un domiclio con el mismo nombre {name}")
class Domicilios(models.Model):
cliente_id = models.ForeignKey(Cliente, on_delete=models.CASCADE)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
nombre = models.CharField(verbose_name="Nombre para identificar domicilio", max_length=200)
calle = models.CharField(verbose_name="Calle", max_length=200)
num_ext = models.CharField(verbose_name="Numero exterior", max_length=200)
num_int = models.CharField(verbose_name="Numero interior", max_length=200, blank=True)
colonia = models.CharField(verbose_name="Colonia", max_length=200)
municipio = models.CharField(verbose_name="Municipio o alcadía", max_length=200)
cp = models.CharField(verbose_name="Código postal",max_length=200,)
estado = models.CharField(verbose_name="Estado", choices=ESTADOS, max_length=100)
referencias = models.TextField(verbose_name="Refrencias del domicilio")
longitud = models.FloatField(verbose_name="Longitud", blank=True)
latitud = models.FloatField(verbose_name="Latitud", blank=True)
is_valid = models.BooleanField(verbose_name="Es válido", default=False)
google_format = models.CharField(verbose_name="Dirección completa", max_length=200, blank=True)
google_place_id = models.CharField(verbose_name="Google place ID", max_length=250, blank=True)
slug = models.SlugField(null=True, blank=True, max_length=250)
class Meta:
verbose_name_plural = "Domicilios"
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.creado = timezone.now()
gmaps = googlemaps.Client(key=settings.GOOGLE_API_KEY)
direction = f'{self.calle} {self.num_ext} {self.colonia} {self.estado}'
geocode_result = gmaps.geocode(direction)
direccion_google = geocode_result[0]["formatted_address"]
if len(geocode_result) == 0 or len(direccion_google) < 50:
self.is_valid = False
self.google_format = "Invalid"
self.latitud = 0
self.longitud = 0
self.google_place_id = "not valid"
self.google_format = "not valid"
else:
self.latitud = geocode_result[0]["geometry"]["location"]["lat"]
self.longitud = geocode_result[0]["geometry"]["location"]["lng"]
self.google_place_id = geocode_result[0]["place_id"]
self.google_format = direccion_google
self.is_valid = True
self.modificado = timezone.now()
if self.slug is None:
self.slug = slugify(f"{self.nombre}-{self.cliente_id}")
return super(Domicilios, self).save(*args, **kwargs)
def __str__(self):
return f'{self.nombre}'
@property
def direccion_completa(self):
return f'{self.calle } {self.num_ext} {self.num_int}, C.P {self.cp} {self.colonia} {self.municipio}, {self.estado}'
@property
def get_domiclios_user(self, user):
pass
ESTADO_SOLICITUD = (
('Guardada','Guardada'),
('Publicada','Publicada'),
('Cotizada','Cotizada'),
('Asignada','Asignada'),
('Pagada','Pagada'),
('Cancelada','Cancelada'),
('Vencida','Vencida'),
)
def validate_date(date):
if date.date() <= timezone.now().date():
raise ValidationError("La fecha tiene que ser mayor a hoy")
class Solicitud(models.Model):
cliente_id = models.ForeignKey(Cliente, on_delete=models.CASCADE)
folio = models.CharField(verbose_name="Folio", max_length=100, editable=False, unique = True)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
descripcion_servicio = models.TextField(verbose_name="Descripción de servicio")
caracteristicas_carga = models.TextField(verbose_name="Tipo de carga")
peso_carga = models.FloatField(verbose_name="Peso de la carga(kg)")
volumen_carga = models.FloatField(verbose_name="Volumen de la carga(mts3)")
unidades_totales = models.IntegerField(verbose_name="Unidades totales de la carga")
fecha_servicio = models.DateTimeField(verbose_name="Fecha de servicio", validators=[validate_date])
hora = models.TimeField(verbose_name="Hora de servicio")
tiempo_carga = models.IntegerField(verbose_name="Tiempo máximo para la carga(min)")
domicilio_id = models.ForeignKey(Domicilios, on_delete=models.PROTECT, verbose_name="Origen")
estado_solicitud = models.CharField(verbose_name="Estado de la solicitud", choices=ESTADO_SOLICITUD, max_length=40, default="Guardada")
tiempo_total = models.FloatField(verbose_name="Tiempo total del viaje", null=True, blank=True)
km_total = models.FloatField(verbose_name="Km totales del viaje", null=True, blank=True)
slug = models.SlugField(null=True, blank=True)
material_peligroso = models.BooleanField(
verbose_name="Es material peligroso",
default=False,)
estado_origen = models.CharField(verbose_name="Estado", choices=ESTADOS, max_length=40, null=True, blank=True)
motivo_cancelacion = models.TextField(verbose_name="Motivo de cancelación", null=True, blank=True)
activo = models.BooleanField(
verbose_name="Activo",
default=True,)
class Meta:
verbose_name_plural = "Solicitudes"
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.creado = timezone.now()
self.modificado = timezone.now()
if not self.estado_origen:
self.estado_origen = self.domicilio_id.estado
return super(Solicitud, self).save(*args, **kwargs)
def has_destinos(self):
destinos = Destino.objects.filter(solicitud_id=self.pk)
if destinos:
destinos = list(destinos)
return "Ruta" if len(destinos) > 1 else "Sencillo"
else:
return "Sencillo"
def __str__(self):
return f'{self.folio}'
def get_absolute_url(self):
return reverse('detail-solicitud', kwargs={'pk':self.pk})
def get_domiciliosid_destinos(self):
destinos = Destino.objects.filter(solicitud_id=self.id)
lista =[]
for destino in destinos:
lista.append(destino.domicilio_id.id)
return lista
def has_cotizaciones(self):
cotizaciones = Cotizacion.objects.filter(solicitud_id=self.pk)
return True if cotizaciones else False
def has_cotizacion_aceptada(self):
cotizaciones = Cotizacion.objects.filter(solicitud_id=self.pk)
if cotizaciones:
flag = False
for cotizacion in cotizaciones:
if cotizacion.estado_cotizacion == 'Aceptada':
return True
return flag
else:
return False
def cotizacionFinal(self):
cotizacion = Cotizacion.objects.filter(solicitud_id=self.id).filter(estado_cotizacion='Confirmada') | Cotizacion.objects.filter(solicitud_id=self.id).filter(estado_cotizacion='Pagada') | Cotizacion.objects.filter(solicitud_id=self.id).filter(estado_cotizacion='Pendiente de pago') | Cotizacion.objects.filter(solicitud_id=self.id).filter(estado_cotizacion='Aceptada')
return cotizacion[0] if cotizacion else False
def cotizaciones(self):
return Cotizacion.objects.filter(solicitud_id=self.id)
class Destino(models.Model):
solicitud_id = models.ForeignKey(Solicitud, on_delete=models.CASCADE)
domicilio_id = models.ForeignKey(Domicilios, on_delete=models.PROTECT)
tiempo_descarga = models.IntegerField(verbose_name="Tiempo máximo para la descarga(min)")
unidades_entregar = models.IntegerField(verbose_name="Unidades a entregar en este destino")
foto1 = models.ImageField(verbose_name="Foto 1 evidencia de entrega", upload_to='unidades_pics', null=True, blank=True)
foto2 = models.ImageField(verbose_name="Foto 2 evidencia de entrega", upload_to='unidades_pics', null=True, blank=True)
foto3 = models.ImageField(verbose_name="Foto 3 evidencia de entrega", upload_to='unidades_pics', null=True, blank=True)
foto4 = models.ImageField(verbose_name="Foto 4 evidencia de entrega", upload_to='unidades_pics', null=True, blank=True)
foto5 = models.ImageField(verbose_name="Foto 5 evidencia de entrega", upload_to='unidades_pics', null=True, blank=True)
#registro de llegada
#registo de hora de llegada
class Meta:
verbose_name_plural = "Destinos"
def __str__(self):
return f'Destino {self.domicilio_id} de {self.solicitud_id}'
def hasEvidencias(self):
return True if self.foto1 else False
NIVEL_SEGURO = (
('Sin seguro', 'Sin seguro'),
('Nivel 1','Nivel 1'),
('Nivel 2','Nivel 2'),
('Nivel 3','Nivel 3'),
)
class Seguro(models.Model):
nombre = models.CharField(verbose_name="Seguro", max_length=40, default="")
costo = models.FloatField(verbose_name="Costo del seguro")
cobertura = models.FloatField(verbose_name="Cobertura del seguro")
class Meta:
verbose_name_plural = "Seguros"
def __str__(self):
return f'{self.nombre}'
DEDUCCIONES_NOMBRES = (
('iva','iva'),
('comision_viaje','comision_viaje'),
)
class Deduccion(models.Model):
#check optiosn of default
nombre = models.CharField(verbose_name="Nombre de deducción", choices=DEDUCCIONES_NOMBRES, max_length=40, default="", unique=True)
porcentaje = models.FloatField (verbose_name="%", help_text = "Ejemplo 0.16, agregar 0 al incio")
descripcion = models.CharField(verbose_name="Descripcion de la deduccion", max_length=40, default="", blank=True)
class Meta:
verbose_name = "Deducciones"
verbose_name_plural = "Deducciones"
def __str__(self):
return f'{self.nombre}'
#Cambiar Rechazada a No exitosa
ESTADO_COTIZACION = (
('Pendiente','Pendiente'),
('Aceptada','Aceptada'),
('Rechazada','Rechazada'),
('Confirmada','Confirmada'),
('Cancelada','Cancelada'),
('Solicitud cancelada','Solicitud cancelada'),
('Pagada','Pagada'),
('Pendiente de pago','Pendiente de pago')
)
class Cotizacion(models.Model):
transportista_id = models.ForeignKey(
Transportista,
verbose_name="Transportista",
on_delete=models.CASCADE)
solicitud_id = models.ForeignKey(
Solicitud,
verbose_name="Solicitud",
on_delete=models.CASCADE)
unidad_id = models.ForeignKey(
Unidades,
verbose_name="Unidad",
on_delete=models.CASCADE)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
monto = models.FloatField(
verbose_name="Monto")
folio = models.CharField(verbose_name="Folio", max_length=100, editable=True, unique = True)
estado_cotizacion = models.CharField(verbose_name="Estado", choices=ESTADO_COTIZACION, max_length=40, default="Pendiente")
motivo_cancelacion = models.TextField(verbose_name="Motivo de cancelación", null=True, blank=True)
fecha_servicio = models.DateTimeField(verbose_name="Fecha de servicio de solictud",null=True, blank=True)
correo_recordatorio = models.IntegerField(verbose_name="Corrreos enviados para recordatorio de confirmación", default=0)
total = models.FloatField(
verbose_name="Total", null=True, blank=True)
iva = models.FloatField(verbose_name="IVA", default=0)
comision = models.FloatField(verbose_name="Comision de viaje", default=0)
slug = models.SlugField(null=True, blank=True)
nivel_seguro = models.ForeignKey(
Seguro,
verbose_name="Nivel de Seguro",
on_delete=models.CASCADE,
null=True,
blank=True)
es_asegurada = models.BooleanField(
verbose_name="Viaje asegurado",
default=False,)
aceptar_tyc = models.BooleanField(
verbose_name="Aceptación de términos y condiciones de seguro",
default=False,)
activo = models.BooleanField(
verbose_name="Activo",
default=True,)
class Meta:
verbose_name = "Cotizaciones"
verbose_name_plural = "Cotizaciones"
def save(self, *args, **kwargs):
''' On save, update timestamps '''
iva = Deduccion.objects.filter(nombre='iva').values()[0]['porcentaje']
if self.iva == 0:
self.iva = iva
if self.comision == 0:
self.comision = Deduccion.objects.filter(nombre='comision_viaje').values()[0]['porcentaje']
if not self.id:
self.creado = timezone.now()
self.total = 0
if self.es_asegurada:
subtotal = self.monto + self.nivel_seguro.costo
else:
subtotal = self.monto
#print(iva[0]['porcentaje'])
self.total = int(subtotal + subtotal * iva)
if self.fecha_servicio == None or self.fecha_servicio == "":
self.fecha_servicio = self.solicitud_id.fecha_servicio
self.modificado = timezone.now()
return super(Cotizacion, self).save(*args, **kwargs)
def __str__(self):
return f'{self.folio}'
@property
def getClienteId(self):
return self.solicitud_id.cliente_id
@property
def getSubtotal(self):
return self.monto + self.nivel_seguro.costo if self.es_asegurada else self.monto
@property
def getIva(self):
return self.getSubtotal * self.iva
@property
def getSubTotalComision(self):
return self.monto * self.comision
@property
def getIvaComision(self):
return self.getSubTotalComision * self.iva
@property
def getTotalComision(self):
return self.getSubTotalComision + self.getIvaComision
ESTADO_ORDEN = (
('paid','paid'),
('Pendiente','Pendiente'),
)
class Orden(models.Model):
cotizacion_id = models.OneToOneField(Cotizacion, on_delete=models.CASCADE)
link_id = models.CharField(max_length = 500, null=True, blank=True)
link_url = models.URLField(max_length = 500, null=True, blank=True)
link_status = models.CharField(max_length = 200, null=True, blank=True)
orden_id = models.CharField(max_length = 200, null=True, blank=True)
orden_status = models.CharField(max_length = 200, null=True, blank=True)
correo_recordatorio = models.IntegerField(verbose_name="Corrreos enviados para recordatorio de pago", default=0)
class Meta:
verbose_name = "Ordenes"
verbose_name_plural = "Ordenes"
def __str__(self):
return f'Orden de cotización {self.cotizacion_id}'
def has_orden(self):
return True if self.orden_id else False
ESTADO_VIAJE = (
('Creado','Creado'),
('Iniciado','Iniciado'),
('Terminado','Terminado'),
('Pendiente de pago','Pendiente de pago'),
('Pendiente de validación','Pendiente de validación'),
('Cerrado','Cerrado'),
('Vencido','Vencido'),
('Disputa','Disputa'),
('Accidente','Accidente'),
('Cancelado por cliente','Cancelado por cliente'),
('Cancelado por transportista','Cancelado por transportista'),
)
class Viaje(models.Model):
orden_id = models.OneToOneField(Orden, on_delete=models.CASCADE)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
folio = models.CharField(verbose_name="Folio", max_length=100, editable=True, unique = True)
slug = models.SlugField(null=True, blank=True)
estado_viaje = models.CharField(verbose_name="Estado", choices=ESTADO_VIAJE, max_length=40, default="Creado")
hora_inicio = models.TimeField(verbose_name="Hora de inicio", null=True, blank=True)
hora_llegada = models.TimeField(verbose_name="Hora de llegada", null=True, blank=True)
nip_checkin = models.IntegerField(verbose_name="NIP de seguridad checkin", null=True, blank=True)
nip_checkout = models.IntegerField(verbose_name="NIP de seguridad checkout", null=True, blank=True)
comentarios = models.TextField(null=True, blank=True)
fecha_servicio = models.DateTimeField(verbose_name="Fecha de servicio de solictud",null=True, blank=True)
factura_pdf = models.FileField(upload_to='uploads/%Y/%m/%d/', verbose_name="Factura pdf", null=True, blank=True)
factura_xml = models.FileField(upload_to='uploads/%Y/%m/%d/', verbose_name="Factura xml", null=True, blank=True)
es_validado = models.BooleanField(verbose_name="Validado por administrador", default=False)
motivo_cancelacion = models.TextField(verbose_name="Motivo de cancelación", null=True, blank=True)
activo = models.BooleanField(
verbose_name="Activo",
default=True,)
is_calificado = models.BooleanField(
verbose_name="Calificado",
default=False,)
#facturado_cliente = models.BooleanField(verbose_name="Es válido", default=False)
#facturado_transportista = models.BooleanField(verbose_name="Es válido", default=False)
class Meta:
verbose_name_plural = "Viajes"
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.creado:
self.creado = timezone.now()
self.modificado = timezone.now()
if self.nip_checkin is None:
number = f'{random.randint(1,9)}{random.randint(0,9)}{random.randint(0,9)}{random.randint(0,9)}'
self.nip_checkin = int(number)
if self.nip_checkout is None:
number = f'{random.randint(1,9)}{random.randint(0,9)}{random.randint(0,9)}{random.randint(0,9)}'
self.nip_checkout = int(number)
if self.folio is None or self.folio == "":
self.folio = f'FS{self.orden_id.cotizacion_id.getClienteId}A{self.orden_id.cotizacion_id.id}'
if self.slug is None:
self.slug = f'FS{self.orden_id.cotizacion_id.getClienteId}A{self.orden_id.cotizacion_id.id}'
if self.fecha_servicio == None or self.fecha_servicio == "":
self.fecha_servicio = self.orden_id.cotizacion_id.solicitud_id.fecha_servicio
return super(Viaje, self).save(*args, **kwargs)
def __str__(self):
return f'{self.folio}'
@property
def getClienteId(self):
return self.orden_id.cotizacion_id.getClienteId
@property
def hasLlegada(self):
return True if self.hora_llegada else False
@property
def hasInicio(self):
return True if self.hora_inicio else False
@property
def getTransportista(self):
return self.orden_id.cotizacion_id.transportista_id
@property
def solicitud(self):
return self.orden_id.cotizacion_id.solicitud_id
@property
def cotizacion(self):
return self.orden_id.cotizacion_id
@property
def hasCalificacion(self):
return True if self.calificacionesviajes else False
@property
def hasFacturaCliente(self):
try:
return True if self.facturascliente else False
except:
return False
@property
def hasFacturaTransportista(self):
try:
return True if self.facturastransportista else False
except:
return False
STARS = (
(1,'1'),
(2,'2'),
(3,'3'),
(4,'4'),
(5,'5'),
)
class CalificacionesViajes(models.Model):
viaje_id = models.OneToOneField(Viaje, on_delete=models.CASCADE)
#foreign key transportista
transportista_id = models.ForeignKey(
Transportista,
verbose_name="Transportista",
on_delete=models.CASCADE)
calificacionTransportista = models.IntegerField(choices=STARS, default=5)
comentarios = models.TextField(verbose_name="Comentarios", null=False, default="")
slug = models.SlugField(null=True, blank=True)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.creado = timezone.now()
self.modificado = timezone.now()
if self.slug is None:
self.slug = f'FS{self.viaje_id}-{self.transportista_id}'
return super(CalificacionesViajes, self).save(*args, **kwargs)
def __str__(self):
return f'{self.slug}'
class FacturasCliente(models.Model):
viaje_id = models.OneToOneField(Viaje, on_delete=models.CASCADE)
cliente_id = models.ForeignKey(Cliente, on_delete=models.CASCADE)
folio = models.CharField(verbose_name="Folio", max_length=100, editable=True, unique = True)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
slug = models.SlugField(null=True, blank=True)
def __str__(self):
return f'{self.folio}'
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.creado = timezone.now()
self.modificado = timezone.now()
if self.slug is None:
self.slug = f'FACL{self.viaje_id}'
return super(FacturasCliente, self).save(*args, **kwargs)
class FacturasTransportista(models.Model):
viaje_id = models.OneToOneField(Viaje, on_delete=models.CASCADE)
transportista_id = models.ForeignKey(Transportista, on_delete=models.CASCADE)
folio = models.CharField(verbose_name="Folio", max_length=100, editable=True, unique = True)
creado = models.DateTimeField(editable=False)
modificado = models.DateTimeField()
slug = models.SlugField(null=True, blank=True)
def __str__(self):
return f'{self.folio}'
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.creado = timezone.now()
self.modificado = timezone.now()
if self.slug is None:
self.slug = f'FATR{self.viaje_id}'
return super(FacturasTransportista, self).save(*args, **kwargs)
#SIGNALS
#CREATE FOLIO AND SLUG OF SOLICITUD
@receiver(post_save, sender=Solicitud)
def createFolioSolicitud(sender,instance,**kwargs):
folio = instance.id
if instance.folio is None or instance.folio == "":
Solicitud.objects.filter(
id=folio
).update(
folio=f'SCO{instance.cliente_id.user.username}A{folio}',
)
if instance.slug is None:
Solicitud.objects.filter(
id=folio
).update(
slug=slugify(f"SCO{instance.cliente_id.user.username}A{folio}")
)
#post_save.connect(createFolioSolicitud, sender=Solicitud)
#CREATE FOLIO AND SLUG OF COTIZACION
@receiver(post_save, sender=Cotizacion)
def createFolioCotizacion(sender,instance,**kwargs):
folio = instance.id
if instance.folio is None or instance.folio == "":
Cotizacion.objects.filter(
id=folio
).update(
folio=f'CO{instance.transportista_id.user.username}A{folio}',
)
if instance.slug is None:
Cotizacion.objects.filter(
id=folio
).update(
slug=slugify(f"{instance.transportista_id.user.username}A{folio}")
)
#CREATE FOLIO AND SLUG OF FACTURACIONES CLIENTE
@receiver(post_save, sender=FacturasCliente)
def createFolioFacturacion(sender,instance,**kwargs):
folio = instance.id
if instance.folio is None or instance.folio == "":
FacturasCliente.objects.filter(
id=folio
).update(
folio=f'FACL{instance.cliente_id.user.username}A{folio}',
)
if instance.slug is None:
FacturasCliente.objects.filter(
id=folio
).update(
slug=slugify(f"FACL{instance.cliente_id.user.username}A{folio}")
)
#CREATE FOLIO AND SLUG OF FACTURACIONES TRANSPORTISTA
@receiver(post_save, sender=FacturasTransportista)
def createFolioFacturacion(sender,instance,**kwargs):
folio = instance.id
if instance.folio is None or instance.folio == "":
FacturasTransportista.objects.filter(
id=folio
).update(
folio=f'FATR{instance.transportista_id.user.username}A{folio}',
)
if instance.slug is None:
FacturasTransportista.objects.filter(
id=folio
).update(
slug=slugify(f"FATR{instance.transportista_id.user.username}A{folio}")
)
#CREATE FacturaCliente if viaje don´t have it
@receiver(post_save, sender=Viaje)
def createFacturacionCliente(sender,instance,**kwargs):
if not instance.hasFacturaCliente and not instance.hasFacturaTransportista and instance.estado_viaje == 'Pendiente de pago':
facturaC = FacturasCliente.objects.create(viaje_id=instance,cliente_id=instance.getClienteId)
facturaT = FacturasTransportista.objects.create(viaje_id=instance,transportista_id=instance.getTransportista)
@receiver(post_save, sender=Cotizacion)
def create_ruta(sender, instance, **kwargs):
solicitud = instance.solicitud_id
if solicitud.estado_solicitud != 'Asignada':
solicitud.estado_solicitud = "Cotizada"
solicitud.save()
@receiver(post_delete, sender=Cotizacion)
def validarEsatdoCotizacion(sender,instance,**kwargs):
solicitud = instance.solicitud_id
if not solicitud.has_cotizaciones():
solicitud.estado_solicitud = "Publicada"
solicitud.save()
@receiver(post_save, sender=Domicilios)
def addLonLat(sender, instance, **kwargs):
gmaps = googlemaps.Client(key=settings.GOOGLE_API_KEY)
direction = f'{instance.calle} {instance.num_ext} {instance.colonia} {instance.estado}'
geocode_result = gmaps.geocode(direction)
direccion_google = geocode_result[0]["formatted_address"]
if len(geocode_result) == 0 or len(direccion_google) < 50:
Domicilios.objects.filter(id=instance.id).update(
is_valid = False,
google_format = "Invalid"
)
else:
Domicilios.objects.filter(id=instance.id).update(
latitud = geocode_result[0]["geometry"]["location"]["lat"],
longitud = geocode_result[0]["geometry"]["location"]["lng"],
google_place_id = geocode_result[0]["place_id"],
google_format = direccion_google,
is_valid = True
)
#CREATE VIAJE WHEN ORDEN IS 'Pagada'
@receiver(post_save, sender=Orden)
def crearViaje(sender, instance, **kwargs):
orden = instance
if orden.orden_status == 'Pagada':
viaje = Viaje.objects.create(orden_id=orden)
#CREATE FACTURA CLIENTE WHEN VIAJE IS 'CERRADO'
# @receiver(post_save, sender=Viaje)
# def crearViaje(sender, instance, **kwargs):
# viaje = instance
# if orden.orden_status == 'Pagada':
# viaje = Viaje.objects.create(orden_id=orden) | Zarate96/userTestFS | fletes/models.py | models.py | py | 28,527 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 56,
"usage_type": "name"
},
{
... |
17233781871 | import argparse
rows = 128 ### sliced spectrogram height
cols = 1024 ### sliced spectrogram width
channels = 2
max_width = 10337 ### maximum raw spectrogram width
split_count = 10 ### number of slices per song
epochs = 100
batch_size = 32
spectrogram_features = ['h', 'p'] ### Percussive & harmonic component spectrogram
CLASSES = ['Cai Luong', 'Cach Mang', 'Dan Ca - Que Huong', 'Dance', 'Khong Loi',
'Thieu Nhi', 'Trinh', 'Tru Tinh', 'Rap Viet', 'Rock Viet']
num_classes = len(CLASSES)
parser = argparse.ArgumentParser()
parser.add_argument("--train_csv", type=str, default="data/train_set.csv", help='path to train set csv')
parser.add_argument("--val_csv", type=str, default="data/val_set.csv", help='path to validation set csv')
parser.add_argument("--spectr_dir", type=str, default="data/spectr/train", help='path to train spectrogram images')
parser.add_argument("--model", type=str, default="resnet18",
choices=["resnet18", "resnet34", "CRNN", "simpleCNN"], help='model type')
parser.add_argument("--checkpoint", type=str, default='music_genre_cnnit .h5', help='path to checkpoint')
parser.add_argument('--evaluate', action='store_true', help='evaluate trained model with validation data')
parser.add_argument('--use_cache', action='store_true', help='use cached .npy data from disk')
parser.add_argument('--resume', action='store_true', help='resume training from latest checkpoint')
args = parser.parse_args()
model_name = args.checkpoint ### Saved model name | taprosoft/music-genre-classification | src/config.py | config.py | py | 1,525 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
}
] |
34198323203 | import pytest
from loguru import logger
from pytest_mock import MockerFixture
from fastapi_cloud_logging.fastapi_cloud_logging_handler import FastAPILoggingHandler
@pytest.fixture
def logging_handler(mocker: MockerFixture) -> FastAPILoggingHandler:
return FastAPILoggingHandler(
mocker.Mock(), transport=mocker.Mock(), structured=True
)
def test_with_logger_message(logging_handler: FastAPILoggingHandler):
logger.add(logging_handler, format="{message}")
logger.info("Hello")
(_, message_payloads), args = logging_handler.transport.send.call_args
assert args["labels"]["python_logger"] == "tests.test_loguru"
assert args["source_location"] is not None
assert message_payloads == {"message": "Hello"}
def divide(a, b):
return a / b
def test_with_logger_exception(logging_handler: FastAPILoggingHandler):
logger.add(logging_handler, format="{message}")
try:
divide(5, 0)
except ZeroDivisionError:
logger.exception("An error has occurred")
(record, message_payloads), args = logging_handler.transport.send.call_args
assert record.exc_info is None
assert args["labels"]["python_logger"] == "tests.test_loguru"
assert args["source_location"] is not None
assert message_payloads["message"].startswith("An error has occurred\n")
assert len(message_payloads["traceback"]) == 2
| quoth/fastapi-cloud-logging | tests/test_loguru.py | test_loguru.py | py | 1,376 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pytest_mock.MockerFixture",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "fastapi_cloud_logging.fastapi_cloud_logging_handler.FastAPILoggingHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 8,
"us... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.