seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42579037186 | import math
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn import preprocessing, model_selection, svm
from sklearn.linear_model import LinearRegression
style.use('ggplot')
#reading from excel converting into data frame
df=pd.read_excel("stock_data.xlsx")
df=df.set_index('Date')
#doing basic operation to get "high- low" percentage change
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
#df.set_index('Date', inplace=True)
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
#defining the label
forecast_col = 'Adj. Close'
df.fillna(value=-99999, inplace=True)
forecast_out = int(math.ceil(0.01 * len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
#preprocessing of data before applying the algorithm
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
X_lately = X[-forecast_out:]
X = X[:-forecast_out]
df.dropna(inplace=True)
y = np.array(df['label'])
#defining the trainin set and testing set from data.
# 80% is the traning set and 20% is the testing you can also modify this as per your requirement
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
#so we are using linearRegression model
#using all the thread available for processing
clf = LinearRegression(n_jobs=-1)
clf.fit(X_train, y_train)
#this is the score for your algorithm
#you should always go with algorith with the highest score.
confidence = clf.score(X_test, y_test)
print(confidence)
#now using the algorith to predict values
forecast_set = clf.predict(X_lately)
df['Forecast'] = np.nan
#86400 is the number of seconds in one year
#df.set_index('Date', inplace=True)
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += 86400
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]
#ploting the prediction on a graph
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
| rajdeep7dev/Prediction-of-stock-prices | ml_1.py | ml_1.py | py | 2,335 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.ceil",
... |
9193307146 | import os
import copy
import pytorch_lightning as pl
from pytorch_lightning import profiler
import pytorch_lightning.core.lightning as lightning
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import torch.nn as nn
from pytorch_lightning.loggers import WandbLogger
from datetime import datetime
from lightly.embedding._callback import CustomModelCheckpoint
class BaseEmbedding(lightning.LightningModule):
"""All trainable embeddings must inherit from BaseEmbedding.
"""
def __init__(self,
model,
criterion,
optimizer,
dataloader,
scheduler=None):
""" Constructor
Args:
model: (torch.nn.Module)
criterion: (torch.nn.Module)
optimizer: (torch.optim.Optimizer)
dataloader: (torch.utils.data.DataLoader)
"""
super(BaseEmbedding, self).__init__()
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.dataloader = dataloader
self.scheduler = scheduler
self.checkpoint = None
self.cwd = os.getcwd()
self.checkpoint_callback = None
self.init_checkpoint_callback()
self.save_hyperparameters()
def forward(self, x0, x1):
return self.model(x0, x1)
def training_step(self, batch, batch_idx):
# get the two image transformations
(x0, x1), _, _ = batch
# forward pass of the transformations
y0, y1 = self(x0, x1)
# calculate loss
loss = self.criterion(y0, y1)
# log loss and return
self.log('loss', loss)
return loss
def configure_optimizers(self):
if self.scheduler is None:
return self.optimizer
else:
return [self.optimizer], [self.scheduler]
def train_dataloader(self):
return self.dataloader
def train_embedding(self, **kwargs):
""" Train the model on the provided dataset.
Args:
**kwargs: pylightning_trainer arguments, examples include:
min_epochs: (int) Minimum number of epochs to train
max_epochs: (int) Maximum number of epochs to train
gpus: (int) number of gpus to use
weights_summary: (str) how to print a summary of the model and weights (number, size)
Returns:
A trained encoder, ready for embedding datasets.
"""
project_name=datetime.today().strftime('%Y-%m-%d_%H-%M')
wandb_logger = WandbLogger(project=project_name)
trainer = pl.Trainer(**kwargs, callbacks=[self.checkpoint_callback], profiler="pytorch", logger=wandb_logger)
trainer.fit(self)
self.checkpoint = self.checkpoint_callback.best_model_path
self.checkpoint = os.path.join(self.cwd, self.checkpoint)
def embed(self, *args, **kwargs):
"""Must be implemented by classes which inherit from BaseEmbedding.
"""
raise NotImplementedError()
def init_checkpoint_callback(self,
save_last=False,
save_top_k=0,
monitor='loss',
dirpath=None):
"""Initializes the checkpoint callback.
Args:
save_last:
Whether or not to save the checkpoint of the last epoch.
save_top_k:
Save the top_k model checkpoints.
monitor:
Which quantity to monitor.
dirpath:
Where to save the checkpoint.
"""
if pl.__version__[:3] in ['1.0', '1.1', '1.2']:
# initialize custom model checkpoint
self.checkpoint_callback = CustomModelCheckpoint()
self.checkpoint_callback.save_last = save_last
self.checkpoint_callback.save_top_k = save_top_k
self.checkpoint_callback.monitor = monitor
dirpath = self.cwd if dirpath is None else dirpath
self.checkpoint_callback.dirpath = dirpath
else:
self.checkpoint_callback = ModelCheckpoint(
dirpath=self.cwd if dirpath is None else dirpath,
filename='lightly_epoch_{epoch:d}',
save_last=save_last,
save_top_k=save_top_k,
monitor=monitor,
auto_insert_metric_name=False)
| tibe97/thesis-self-supervised-learning | lightly/embedding/_base.py | _base.py | py | 4,499 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pytorch_lightning.core.lightning.LightningModule",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.core.lightning",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 43,
"usage_type": "call... |
2483212505 | '''
Descripttion:
version:
Author: WGQ
Date: 2021-11-11 14:40:28
LastEditors: WGQ
LastEditTime: 2021-11-12 17:58:46
'''
from . import adminApi
import time
from fastapi import Query, Depends, Body, Form,Request
from playhouse.shortcuts import model_to_dict
from model.RModel import *
from common import Func, Utils
from utils import UserAuthUtil
@adminApi.post('/country/save', tags=['Admin-Country'],summary="新增/编辑Country")
async def save(req:Request,countryId:int = Form(0),countryName:str=Form(...),countryCode3:str=Form(...),countryCode2:str=Form(...),countryTimezoneUtc:int=Form(...),signInUser: dict = Depends(UserAuthUtil.verifyToken)):
try:
if countryId > 0:
RCountry.update(c_name = countryName,c_code3 = countryCode3,c_code2 = countryCode2, c_timezone_utc = countryTimezoneUtc ).where(RCountry.c_id == countryId).execute()
else:
cty = RCountry.create(c_name = countryName, c_code3 = countryCode3,c_code2 = countryCode2, c_timezone_utc = countryTimezoneUtc )
countryId = cty.c_id
return Func.jsonResult({"countryId":countryId})
except Exception as e:
return Func.jsonResult({"countryId":countryId},"发生错误,出现冲突",100000500)
@adminApi.get('/country/list', tags=['Admin-Country'],summary="Country列表")
async def countryList(signInUser: dict = Depends(UserAuthUtil.verifyToken)):
countries = RCountry.select().where(RCountry.c_status == 1).order_by(RCountry.c_id.desc()).dicts()
countryList = []
for _country in countries:
countryList.append({
"countryId":_country['c_id'],
"countryName":_country['c_name'],
"countryCode3":_country['c_code3'],
"countryCode2":_country['c_code2'],
"countryTimezoneUtc":_country['c_timezone_utc'],
})
return Func.jsonResult({"countryList":countryList})
@adminApi.delete('/country/remove', tags=['Admin-Country'],summary="删除Country")
async def remove(countryId:int = Query(...,description="CountryID"), signInUser: dict = Depends(UserAuthUtil.verifyToken)):
RCountry.update(c_status = 0).where(RCountry.c_id == countryId).execute()
return Func.jsonResult({"countryId":countryId},"adx removed")
| foreversun52/cgserver | adminapi/Country.py | Country.py | py | 2,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.Request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "fastapi.Form",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.UserAuthUtil.verif... |
39279563732 | import sys
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDialog, QApplication, QPushButton, QVBoxLayout, QShortcut
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
import pandas as pd
from readFitsSlim import Spectra
class Window(QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.initUI()
def initUI(self):
self.setWindowTitle('Spectral Classification')
self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint |
QtCore.Qt.WindowStaysOnTopHint)
self.classification = []
self.spec = Spectra('/data2/cpb405/DR1/*.fits')
self.spec.specList = self.spec.specList[:20]
self.index = 0
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.starButton = QPushButton('Star')
self.starButton.setStyleSheet("background-color: rgb(31, 119, 180);")
self.starButton.clicked.connect(self.STAR)
QShortcut(QtCore.Qt.Key_1, self.starButton, self.starButton.animateClick)
self.galaxyButton = QPushButton('Galaxy')
self.galaxyButton.setStyleSheet("background-color: rgb(31, 119, 180);")
self.galaxyButton.clicked.connect(self.GALAXY)
QShortcut(QtCore.Qt.Key_2, self.galaxyButton, self.galaxyButton.animateClick)
self.unknownButton = QPushButton('Unknown')
self.unknownButton.setStyleSheet("background-color: rgb(31, 119, 180);")
self.unknownButton.clicked.connect(self.UNKNOWN)
QShortcut(QtCore.Qt.Key_3, self.unknownButton, self.unknownButton.animateClick)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.canvas)
layout.addWidget(self.starButton)
layout.addWidget(self.galaxyButton)
layout.addWidget(self.unknownButton)
self.setLayout(layout)
self.plot()
def plot(self):
self.figure.clear()
ax = self.figure.add_subplot(111)
ax.plot(self.spec.specList[self.index].wavelength,self.spec.specList[self.index].flux)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
ax.set_yscale('log')
if self.index < (len(self.spec.specList)-1):
self.index += 1
else:
print(self.classification)
df = pd.DataFrame(columns=['designation','class'])
for i in range(len(self.classification)):
df.loc[len(df)] = [self.spec.desig[i],self.classification[i]]
df.to_csv('spectralTrainingSet.csv')
self.close()
# refresh canvas
self.canvas.draw()
def STAR(self):
self.classification.append('STAR')
self.plot()
def GALAXY(self):
self.classification.append('GALAXY')
self.plot()
def UNKNOWN(self):
self.classification.append('UNKNOWN')
self.plot()
print(self.classification)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_())
| grd349/LearningLAMOST | Chris/Temp_Model/SpectraUI.py | SpectraUI.py | py | 3,429 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCor... |
4430086553 | '''Given a string S, you need to remove all the duplicates.
That means, the output string should contain each character only once.
The respective order of characters should remain same, as in the input string.
Sample Input 1 :
ababacd
Sample Output 1 :
abcd
'''
from collections import OrderedDict
def uniqueChar(s):
# Write your code here
d=OrderedDict()
for char in s:
d[char]=d.get(char,0)+1
uniq=''
for char in d:
uniq=uniq+char
return uniq
# Main
s=input()
print(uniqueChar(s))
| Riyachauhan11/Python-learning-Concepts | dictionaries/Extract Unique characters.py | Extract Unique characters.py | py | 553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 20,
"usage_type": "call"
}
] |
35827193676 | """
This is the core file in the `gradio` package, and defines the Interface class, including methods for constructing the
interface using the input and output types.
"""
import tempfile
import traceback
import webbrowser
import gradio.inputs
import gradio.outputs
from gradio import networking, strings
from distutils.version import StrictVersion
import pkg_resources
import requests
import random
import time
import inspect
from IPython import get_ipython
import sys
import weakref
import analytics
PKG_VERSION_URL = "https://gradio.app/api/pkg-version"
analytics.write_key = "uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy"
analytics_url = 'https://api.gradio.app/'
try:
ip_address = requests.get('https://api.ipify.org').text
except requests.ConnectionError:
ip_address = "No internet connection"
class Interface:
"""
The Interface class represents a general input/output interface for a machine learning model. During construction,
the appropriate inputs and outputs
"""
instances = weakref.WeakSet()
def __init__(self, fn, inputs, outputs, saliency=None, verbose=False, examples=None,
live=False, show_input=True, show_output=True,
capture_session=False, title=None, description=None,
thumbnail=None, server_name=networking.LOCALHOST_NAME):
"""
:param fn: a function that will process the input panel data from the interface and return the output panel data.
:param inputs: a string or `AbstractInput` representing the input interface.
:param outputs: a string or `AbstractOutput` representing the output interface.
"""
def get_input_instance(iface):
if isinstance(iface, str):
return gradio.inputs.shortcuts[iface.lower()]
elif isinstance(iface, gradio.inputs.AbstractInput):
return iface
else:
raise ValueError("Input interface must be of type `str` or "
"`AbstractInput`")
def get_output_instance(iface):
if isinstance(iface, str):
return gradio.outputs.shortcuts[iface.lower()]
elif isinstance(iface, gradio.outputs.AbstractOutput):
return iface
else:
raise ValueError(
"Output interface must be of type `str` or "
"`AbstractOutput`"
)
if isinstance(inputs, list):
self.input_interfaces = [get_input_instance(i) for i in inputs]
else:
self.input_interfaces = [get_input_instance(inputs)]
if isinstance(outputs, list):
self.output_interfaces = [get_output_instance(i) for i in outputs]
else:
self.output_interfaces = [get_output_instance(outputs)]
if not isinstance(fn, list):
fn = [fn]
self.output_interfaces *= len(fn)
self.predict = fn
self.verbose = verbose
self.status = "OFF"
self.saliency = saliency
self.live = live
self.show_input = show_input
self.show_output = show_output
self.flag_hash = random.getrandbits(32)
self.capture_session = capture_session
self.session = None
self.server_name = server_name
self.title = title
self.description = description
self.thumbnail = thumbnail
self.examples = examples
self.server_port = None
self.simple_server = None
Interface.instances.add(self)
data = {'fn': fn,
'inputs': inputs,
'outputs': outputs,
'saliency': saliency,
'live': live,
'capture_session': capture_session,
'ip_address': ip_address
}
if self.capture_session:
try:
import tensorflow as tf
self.session = tf.get_default_graph(), \
tf.keras.backend.get_session()
except (ImportError, AttributeError): # If they are using TF >= 2.0 or don't have TF, just ignore this.
pass
try:
requests.post(analytics_url + 'gradio-initiated-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
def get_config_file(self):
config = {
"input_interfaces": [
(iface.__class__.__name__.lower(), iface.get_template_context())
for iface in self.input_interfaces],
"output_interfaces": [
(iface.__class__.__name__.lower(), iface.get_template_context())
for iface in self.output_interfaces],
"function_count": len(self.predict),
"live": self.live,
"show_input": self.show_input,
"show_output": self.show_output,
"title": self.title,
"description": self.description,
"thumbnail": self.thumbnail
}
try:
param_names = inspect.getfullargspec(self.predict[0])[0]
for iface, param in zip(config["input_interfaces"], param_names):
if not iface[1]["label"]:
iface[1]["label"] = param.replace("_", " ")
for i, iface in enumerate(config["output_interfaces"]):
ret_name = "Output " + str(i + 1) if len(config["output_interfaces"]) > 1 else "Output"
if not iface[1]["label"]:
iface[1]["label"] = ret_name
except ValueError:
pass
return config
def process(self, raw_input):
processed_input = [input_interface.preprocess(
raw_input[i]) for i, input_interface in
enumerate(self.input_interfaces)]
predictions = []
durations = []
for predict_fn in self.predict:
start = time.time()
if self.capture_session and not(self.session is None):
graph, sess = self.session
with graph.as_default():
with sess.as_default():
prediction = predict_fn(*processed_input)
else:
try:
prediction = predict_fn(*processed_input)
except ValueError as exception:
if str(exception).endswith("is not an element of this "
"graph."):
raise ValueError("It looks like you might be using "
"tensorflow < 2.0. Please "
"pass capture_session=True in "
"Interface to avoid the 'Tensor is "
"not an element of this graph.' "
"error.")
else:
raise exception
duration = time.time() - start
if len(self.output_interfaces) == len(self.predict):
prediction = [prediction]
durations.append(duration)
predictions.extend(prediction)
processed_output = [output_interface.postprocess(
predictions[i]) for i, output_interface in enumerate(self.output_interfaces)]
return processed_output, durations
def validate(self):
if self.validate_flag:
if self.verbose:
print("Interface already validated")
return
validation_inputs = self.input_interface.get_validation_inputs()
n = len(validation_inputs)
if n == 0:
self.validate_flag = True
if self.verbose:
print(
"No validation samples for this interface... skipping validation."
)
return
for m, msg in enumerate(validation_inputs):
if self.verbose:
print(
"Validating samples: {}/{} [".format(m+1, n)
+ "=" * (m + 1)
+ "." * (n - m - 1)
+ "]",
end="\r",
)
try:
processed_input = self.input_interface.preprocess(msg)
prediction = self.predict(processed_input)
except Exception as e:
data = {'error': e}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
if self.verbose:
print("\n----------")
print(
"Validation failed, likely due to incompatible pre-processing and model input. See below:\n"
)
print(traceback.format_exc())
break
try:
_ = self.output_interface.postprocess(prediction)
except Exception as e:
data = {'error': e}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
if self.verbose:
print("\n----------")
print(
"Validation failed, likely due to incompatible model output and post-processing."
"See below:\n"
)
print(traceback.format_exc())
break
else: # This means if a break was not explicitly called
self.validate_flag = True
if self.verbose:
print("\n\nValidation passed successfully!")
return
raise RuntimeError("Validation did not pass")
def close(self):
if self.simple_server and not(self.simple_server.fileno() == -1): # checks to see if server is running
print("Closing Gradio server on port {}...".format(self.server_port))
networking.close_server(self.simple_server)
def launch(self, inline=None, inbrowser=None, share=False, validate=True, debug=False):
"""
Standard method shared by interfaces that creates the interface and sets up a websocket to communicate with it.
:param inline: boolean. If True, then a gradio interface is created inline (e.g. in jupyter or colab notebook)
:param inbrowser: boolean. If True, then a new browser window opens with the gradio interface.
:param share: boolean. If True, then a share link is generated using ngrok is displayed to the user.
:param validate: boolean. If True, then the validation is run if the interface has not already been validated.
"""
# if validate and not self.validate_flag:
# self.validate()
output_directory = tempfile.mkdtemp()
# Set up a port to serve the directory containing the static files with interface.
server_port, httpd = networking.start_simple_server(self, output_directory, self.server_name)
path_to_local_server = "http://{}:{}/".format(self.server_name, server_port)
networking.build_template(output_directory)
self.server_port = server_port
self.status = "RUNNING"
self.simple_server = httpd
is_colab = False
try: # Check if running interactively using ipython.
from_ipynb = get_ipython()
if "google.colab" in str(from_ipynb):
is_colab = True
except NameError:
data = {'error': 'NameError in launch method'}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
pass
try:
current_pkg_version = pkg_resources.require("gradio")[0].version
latest_pkg_version = requests.get(url=PKG_VERSION_URL).json()["version"]
if StrictVersion(latest_pkg_version) > StrictVersion(current_pkg_version):
print("IMPORTANT: You are using gradio version {}, "
"however version {} "
"is available, please upgrade.".format(
current_pkg_version, latest_pkg_version))
print('--------')
except: # TODO(abidlabs): don't catch all exceptions
pass
if not is_colab:
print(strings.en["RUNNING_LOCALLY"].format(path_to_local_server))
else:
if debug:
print("Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
"To turn off, set debug=False in launch().")
else:
print("Colab notebook detected. To show errors in colab notebook, set debug=True in launch()")
if share:
try:
share_url = networking.setup_tunnel(server_port)
print("Running on External URL:", share_url)
except RuntimeError:
data = {'error': 'RuntimeError in launch method'}
try:
requests.post(analytics_url + 'gradio-error-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
share_url = None
if self.verbose:
print(strings.en["NGROK_NO_INTERNET"])
else:
if (
is_colab
): # For a colab notebook, create a public link even if share is False.
share_url = networking.setup_tunnel(server_port)
print("Running on External URL:", share_url)
if self.verbose:
print(strings.en["COLAB_NO_LOCAL"])
else: # If it's not a colab notebook and share=False, print a message telling them about the share option.
if self.verbose:
print(strings.en["PUBLIC_SHARE_TRUE"])
share_url = None
if inline is None:
try: # Check if running interactively using ipython.
get_ipython()
inline = True
if inbrowser is None:
inbrowser = False
except NameError:
inline = False
if inbrowser is None:
inbrowser = True
else:
if inbrowser is None:
inbrowser = False
if inbrowser and not is_colab:
webbrowser.open(
path_to_local_server
) # Open a browser tab with the interface.
if inline:
from IPython.display import IFrame, display
if (
is_colab
): # Embed the remote interface page if on google colab;
# otherwise, embed the local page.
print("Interface loading below...")
while not networking.url_ok(share_url):
time.sleep(1)
display(IFrame(share_url, width=1000, height=500))
else:
display(IFrame(path_to_local_server, width=1000, height=500))
config = self.get_config_file()
config["share_url"] = share_url
processed_examples = []
if self.examples is not None:
for example_set in self.examples:
processed_set = []
for iface, example in zip(self.input_interfaces, example_set):
processed_set.append(iface.process_example(example))
processed_examples.append(processed_set)
config["examples"] = processed_examples
networking.set_config(config, output_directory)
if debug:
while True:
sys.stdout.flush()
time.sleep(0.1)
launch_method = 'browser' if inbrowser else 'inline'
data = {'launch_method': launch_method,
'is_google_colab': is_colab,
'is_sharing_on': share,
'share_url': share_url,
'ip_address': ip_address
}
try:
requests.post(analytics_url + 'gradio-launched-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
return httpd, path_to_local_server, share_url
@classmethod
def get_instances(cls):
return list(Interface.instances) #Returns list of all current instances
def reset_all():
for io in Interface.get_instances():
io.close()
| parvez0722/Sugesstion_of_next_word | venv/Lib/site-packages/gradio/interface.py | interface.py | py | 17,457 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "analytics.write_key",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.ConnectionError",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "w... |
5818279983 | from Algorithms.Usefull_elements import Step, intersection, addition, get_edges, invert_Graph, vertex_list_to_str, hsv_to_hex, replace_color
import copy
from collections import defaultdict
def algorithm_depth_first_search(matrix):
mass = list() # массив смежных вершин
vertex_mark = dict() # объявление пустого словаря (соотв. вершин меткам)
vertex = list() # объявление пустого списка (вершины без меток)
stack = list() # объявление пустого списка (стек)
all_vertex = [] # список вершин
steps = [] # список шагов
alg_result = [] # шаг-результат
edges = [] # список рёбер
route = [] # маршрут
loop = False # нет контура
# вложенная функция, реализующая алгоритм
def dfs(prev_ver, cur_ver):
print(f' Текущая вершина: {cur_ver}')
#h_step.node_options[cur_ver] = replace_color(h_step.node_options[cur_ver], "#DC143C") # изменение цвета по маршруту
h_step.node_options[cur_ver] += ', borderWidth: 3, color: {border: "#DC143C", background: "#1E90FF", highlight: { border: "#DC143C" }}'; # изменение цвета границы по маршруту
vertex_mark[cur_ver] = False # вершина просмотрена
while mass[cur_ver]: # пока есть смежные вершины
# h_step.edge_options[(cur_ver, mass[cur_ver][0])] += replace_color(h_step.edge_options[(cur_ver, mass[cur_ver][0])], "#DC143C") # подкрашиваем ребро
if vertex_mark[mass[cur_ver][0]] == None: # МОЖЕТ БЫТЬ ПЕТЛЯ or vertex_mark[mass[cur_ver][0]] == False
h_step.edge_options[(cur_ver, mass[cur_ver][0])] += ', "color": "#DC143C", width: 3' # подкрашиваем ребро
if vertex_mark[mass[cur_ver][0]] == None:
print(f' Переходим к смежной вершине: {mass[cur_ver][0]}')
route.append(cur_ver) # добавляем вершину в маршрут
# переходим к первой смежной вершине
if not dfs(cur_ver, mass[cur_ver][0]): # обнаружен контур
return False
print(f' Возвращаемся к вершине {cur_ver}')
h_step.text = f'<p class="mb-2 text-gray-500 dark:text-gray-400">Возвращаемся к вершине {cur_ver}</p>' + h_step.text
print(f' Текущая вершина: {cur_ver}')
mass[cur_ver].pop(0) # удаляем просмотренную смежную вершину
elif vertex_mark[mass[cur_ver][0]]:
mass[cur_ver].pop(0) # удаляем просмотренную смежную вершину
else:
return False # обнаружен контур
print(f'Смежных непомеченных вершин нет, помещаем в стек вершину {cur_ver}')
vertex_mark[cur_ver] = True # определён порядок вершины
stack.append(cur_ver) # помещаем вершину в стек
vertex.remove(cur_ver) # исключаем вершину для повторного просмотра
for ver in route:
h_step.text += f'{ver}->'
if route:
route.pop()
h_step.text += f'{cur_ver}</p><p class="mb-2 text-gray-500 dark:text-gray-400">Вершина {cur_ver} не имеет смежных вершин, добавляем её в стек {stack}</p>' # последний текст шага
else:
h_step.text = f'<p class="mb-2 text-gray-500 dark:text-gray-400">Возвращаемся к вершине {cur_ver}</p><p class="mb-2 text-gray-500 dark:text-gray-400">Некуда шагать!</p><p class="mb-2 text-gray-500 dark:text-gray-400">Вершина {cur_ver} не имеет смежных вершин, добавляем её в стек {stack}</p>' # последний текст шага
h_step.step_label = f'Добавление вершины x<sub>{cur_ver}</sub> в стек' # название шага
h_step.node_options[cur_ver] += ', borderWidth: 1, "color": "#00FA9A"' # изменение цвета
new_step = copy.deepcopy(h_step)
h_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">Маршрут обхода: ' # текст шага
if prev_ver != cur_ver and (prev_ver, cur_ver) in edges:
h_step.edge_options[(prev_ver, cur_ver)] += ', "color": "#1E90FF", width: 1' # возвращаем цвет ребру
# print(new_step.edge_options)
steps.append(new_step) # добавляем шаг в список
new_step = Step(True, True) # создаём новый шаг
return True
# инициализация
size_of_matrix = len(matrix) # получаем размер матрицы
for i in range(size_of_matrix):
# словарь соответствия исходных вершин меткам
vertex_mark.update({i: None})
# формирование множеста непомеченных вершин
vertex.append(i)
# формирование массива смежных вершин
neighbor = list() # смежные вершины
for j in range(size_of_matrix):
if matrix[i][j] == 1:
neighbor.append(j)
mass.append(neighbor)
edges = get_edges(matrix) # список рёбер
all_vertex = vertex.copy()
print(f'Вершины: {all_vertex}')
# исходный граф
first_step = Step(True, True) # создаём первый шаг (исходный граф)
first_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">Это граф по введённой матрице</p>' # текст шага
first_step.nodes = all_vertex # список вершин
first_step.edges = edges # список ребер
# общие опции для рёбер
for edge in edges.keys():
first_step.edge_options[edge] = 'label: "1"'
first_step.edge_options[edge] += ', "color": "#1E90FF"'
print(f'рёбра: {first_step.edge_options}')
for i in all_vertex: # метки для вершин
first_step.node_options[i] = f'label: "x{i}"'
first_step.node_options[i] += ', shape: "circle"'
first_step.node_options[i] += ', "color": "#1E90FF"'
# выбор начальной вершины обхода
h_step = copy.deepcopy(first_step) # создаём вспомогательный объект (шаг)
print(vertex)
while vertex:
new_step = copy.deepcopy(first_step) # создаём первый шаг
h_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">Маршрут обхода: ' # текст шага
if not dfs(0, vertex[0]): # запуск алгоритма
loop = True
print('Выполнение алгоритма прервано из-за наличия контура')
break
print(f'Вершины в стеке:', list(map(lambda el: el, stack)))
if not loop:
print('Алгоритм успешно завершен')
result_step = copy.deepcopy(first_step)
result_step.text = f'<p class="mb-2 text-gray-500 dark:text-gray-400">Стек - {stack} ({stack[-1]} - вершина стека)</p>'
result_step.text += '<p class="mb-2 text-gray-500 dark:text-gray-400">Это граф, разбитый на уровни</p>' # текст шага
stack.reverse() # переворачиваем список для следования вершин по уровням
for ver in stack: # установка уровней для вершин
result_step.node_options[ver] = f'label: "x{ver}"'
result_step.node_options[ver] += ', shape: "circle"'
result_step.node_options[ver] += ', "color": "#1E90FF"'
result_step.node_options[ver] += f', level: {stack.index(ver)}'
neighbor_ver = [] # пары вершин соседних уровней
for i in range(len(stack)-1):
neighbor_ver.append(tuple([stack[i], stack[i+1]]))
print(f'Пары смежных вершин: {neighbor_ver}')
result_step.general_options += ', layout: { hierarchical: { direction: "LR", levelSeparation: 100} }'
flag = True
for edge in edges.keys():
# result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCCW", "forceDirection": "none" }, width: 1'
if edge in neighbor_ver:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "dynamic", roundness: 0 }, width: 1'
elif flag:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCW", roundness: 0.5 }, width: 1'
flag = False
else:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCCW", roundness: 0.5 }, width: 1'
flag = True
alg_result.append(result_step)
else:
print('ОШИБКА')
result_step = Step(True, True)
result_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400"">АЛГОРИТМ ПРЕРВАН ИЗ-ЗА НАЛИЧИЯ КОНТУРА В ГРАФЕ!</p>' # текст шага
alg_result.append(result_step)
# добавление таблицы в исходные данные
alg_input = Step(True, True, True)
alg_input.text = copy.deepcopy(first_step.text)
alg_input.nodes = copy.deepcopy(first_step.nodes)
alg_input.edges = copy.deepcopy(first_step.edges)
alg_input.edge_options = copy.deepcopy(first_step.edge_options)
alg_input.node_options = copy.deepcopy(first_step.node_options)
first_line = []
first_line.append('')
for i in range(size_of_matrix):
first_line.append(f'x<sub>{i}</sub>')
alg_input.matrix.append(list(first_line))
for i in range(size_of_matrix):
next_line = []
next_line.append(f'x<sub>{i}</sub>')
next_line += (list(matrix[i]))
alg_input.matrix.append(list(next_line))
for i in range(1, size_of_matrix+1):
alg_input.matrix[i][i] = -1
return [ alg_input, steps, alg_result ]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
# топологическая соритровка, алгоритм Демукрона
# диалог с пользователем
def demukron(matrix):
vertex_level = dict() # объявление пустого словаря (соотв. вершин уровням)
vertex = set() # объявление пустого множества (вершины без уровня)
all_vertex = [] # список вершин
edges = [] # список рёбер
steps = [] # список шагов
alg_result = [] # шаг-результат
# реализация алгоритма
def dm(vertex):
step = Step(False, True, True) # создание первого шага
# формирование исходной матрицы
first_line = []
first_line.append('')
for i in range(size_of_matrix):
first_line.append(f'x<sub>{i}</sub>')
step.matrix.append(list(first_line))
for i in range(size_of_matrix):
next_line = []
next_line.append(f'x<sub>{i}</sub>')
next_line += (list(matrix[i]))
step.matrix.append(list(next_line))
for i in range(1, size_of_matrix+1):
step.matrix[i][i] = -1
# формирование уровня
level = 0
while vertex:
step = copy.deepcopy(step)
step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">'
flag = False # уровень отсутствует
level_v = set() # вершины формируемого уровня
for i in vertex: # просмотр столбца матрицы
sum = 0
# просмотр входящих вершин
for j in range(len(matrix)):
sum += matrix[j][i]
if sum == 0:
level_v.add(i) # добавление вершины в уровень
vertex_level[i] = level # обновление уровня вершины
flag = True # уровень найден
if flag:
print(f'Вершины {level} уровня: ', set(map(lambda el: el, level_v)))
else:
return False # уровень не сформирован
for i in level_v:
matrix[i] = list(map(lambda el: 0, matrix[i])) # удаление(зануление) строки
# удаление строки
for ver in level_v:
for i in range(1, size_of_matrix+1):
step.matrix[ver+1][i] = -1
step.text += f'Вершина x<sub>{ver}</sub> не имеет входящих рёбер<br/>'
step.text += f'Формируем уровень N<sub>{level}</sub> = ' + '{ '
for ver in level_v:
step.text += f'x<sub>{ver}</sub> '
step.text += '}<br/>'
for ver in level_v:
step.text += f'Порядковая функция O(x<sub>{ver}</sub>) = {level}<br/>'
step.text += '</p>'
step.step_label = f'Формирование уровня N <sub>{level}</sub>'
steps.append(step)
print(f'матрица {matrix}')
vertex -= level_v # исключение вершин с определённым уровнем
level += 1
return True
# инициализация
for i in range(len(matrix)):
# словарь соответствия исходных вершин уровням
vertex_level.update({i: None})
# формирование множеста вершин без уровня
vertex.add(i)
edges = get_edges(matrix) # список рёбер
all_vertex = vertex.copy() # список вершин
# исходный граф
alg_input = Step(True, True, True) # создаём первый шаг (исходный граф)
alg_input.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">Это граф по введённой матрице</p>' # текст шага
alg_input.nodes = all_vertex # список вершин
alg_input.edges = edges # список ребер
# общие опции для рёбер
for edge in edges.keys():
alg_input.edge_options[edge] = 'label: "1"'
alg_input.edge_options[edge] += ', "color": "#1E90FF"'
print(f'рёбра: {alg_input.edge_options}')
for i in all_vertex: # метки для вершин
alg_input.node_options[i] = f'label: "x{i}"'
alg_input.node_options[i] += ', shape: "circle"'
alg_input.node_options[i] += ', "color": "#1E90FF"'
# добавление таблицы в исходные данные
size_of_matrix = len(matrix)
first_line = []
first_line.append('')
for i in range(size_of_matrix):
first_line.append(f'x<sub>{i}</sub>')
alg_input.matrix.append(list(first_line))
for i in range(size_of_matrix):
next_line = []
next_line.append(f'x<sub>{i}</sub>')
next_line += (list(matrix[i]))
alg_input.matrix.append(list(next_line))
for i in range(1, size_of_matrix+1):
alg_input.matrix[i][i] = -1
res = dm(vertex) # запуск алгоритма
if res:
print('Алгоритм успешно завершен')
print(f'Вершины по уровням: {vertex_level}')
result_step = copy.deepcopy(alg_input)
result_step.matrix = []
result_step.text = f'<p class="mb-2 text-gray-500 dark:text-gray-400">Разделение вершин по уровням - {vertex_level})</p>'
result_step.text += '<p class="mb-2 text-gray-500 dark:text-gray-400">Это граф, разбитый на уровни</p>' # текст шага
for ver, level in vertex_level.items(): # установка уровней для вершин
result_step.node_options[ver] = f'label: "x{ver}"'
result_step.node_options[ver] += ', shape: "circle"'
result_step.node_options[ver] += ', "color": "#1E90FF"'
result_step.node_options[ver] += f', level: {level}'
neighbor_ver = [] # пары вершин соседних уровней
sorted_levels = sorted(set(vertex_level.values())) # Получение уникальных значений уровней и их сортировка
for level in sorted_levels[:-1]: # Проход по уровням, исключая последний
current_level_vertices = [vertex for vertex, vertex_level in vertex_level.items() if vertex_level == level] # Вершины текущего уровня
next_level_vertices = [vertex for vertex, vertex_level in vertex_level.items() if vertex_level == level + 1] # Вершины следующего уровня
neighbor_pairs = [(v1, v2) for v1 in current_level_vertices for v2 in next_level_vertices] # Пары соседних вершин
neighbor_ver.extend(neighbor_pairs) # Добавление пар в список
result_step.general_options += ', layout: { hierarchical: { direction: "LR", levelSeparation: 100, nodeSpacing: 150} }'
print(edges)
print(neighbor_ver)
flag = True
for edge in edges.keys():
# result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCCW", "forceDirection": "none" }, width: 1'
if edge in neighbor_ver:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "dynamic", roundness: 0 }, width: 1'
elif flag:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCW", roundness: 0.5 }, width: 1'
flag = False
else:
result_step.edge_options[edge] = 'smooth: { "enabled": true, "type": "curvedCCW", roundness: 0.5 }, width: 1'
flag = True
sorted_dict = defaultdict(list)
for vertex, level in vertex_level.items():
sorted_dict[level].append(vertex)
sorted_dict = dict(sorted(sorted_dict.items()))
result_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400">'
for level, ver in sorted_dict.items():
result_step.text += f'Уровень N<sub>{level}</sub> = ' + '{ '
for x in (ver):
result_step.text += f'x<sub>{x}</sub> '
result_step.text += '}<br/>'
result_step.text += '</p>'
alg_result.append(result_step)
else:
print('Выполнение алгоритма прервано из-за наличия контура')
result_step = Step()
result_step.text = '<p class="mb-2 text-gray-500 dark:text-gray-400"">АЛГОРИТМ ПРЕРВАН ИЗ-ЗА НАЛИЧИЯ КОНТУРА В ГРАФЕ!</p>' # текст шага
alg_result.append(result_step)
return [ alg_input, steps, alg_result ] | VelandMerl/graph_bauman_centuary_presents | Algorithms/Topological_Sort.py | Topological_Sort.py | py | 21,006 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "Algorithms.Usefull_elements.Step",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "Algorithms.Usefull_elements.get_edges",
"line_number": 79,
"usage_type": "call"
},
{
... |
12560528102 | from flask import Flask, jsonify, request, redirect, Response, render_template
import requests
from config import api_key, cam_names, rover_det
app = Flask(__name__)
@app.route('/')
def home():
return render_template("index.html")
@app.route('/rover', methods = ['POST'])
def rover():
rov_name = request.form['optrover']
return redirect(f'/{rov_name}.html?emsg=OK')
@app.route('/<rov_name>.html')
def pic_criteria(rov_name):
err_msg = request.args.get('emsg')
if err_msg == None:
err_msg = ""
rov_det = rover_det[rov_name]
rov_pic = rov_det["rov_pic"]
st_date = rov_det["landing_date"]
end_date = rov_det["max_date"]
cameras = rov_det["cameras"]
cam_list = {}
for cam in cameras:
cam_list.update({cam:cam_names[cam]})
return render_template('pic_criteria.html', rname=rov_name, rpic=rov_pic, sdat=st_date, edat=end_date, clist=cam_list, emsg=err_msg)
@app.route('/img_criteria', methods = ['POST'])
def imgcrit():
rov_name = request.args.get('rov_name')
form_date = request.form['date']
try:
form_cam = request.form['optcam']
except:
form_cam = ""
return redirect(f'/list.html?rov_name={rov_name}&img_date={form_date}&sel_cam={form_cam}')
@app.route('/list.html')
def img_list():
opts=""
rov_name = request.args.get('rov_name')
img_date = request.args.get('img_date')
sel_cam = request.args.get('sel_cam')
opts = "earth_date=" + img_date
if sel_cam != "":
opts += "&camera=" + sel_cam
opts += "&api_key=" + api_key
emsg = ""
api_list = requests.get(f'https://api.nasa.gov/mars-photos/api/v1/rovers/{rov_name}/photos?{opts}')
if api_list.text == "":
emsg = 'No images for that camera and date. Please try again.'
return redirect(f'/{rov_name}.html?emsg="No images for that camera and date. Please try again."')
else:
api_list = eval(api_list.text)
img_list = api_list["photos"]
max_rows = len(img_list)
rend_list = {}
for i in range(1, max_rows+1):
row_cam = img_list[i-1]["camera"]["full_name"]
row_url = img_list[i-1]["img_src"]
row_date = img_list[i-1]["earth_date"]
dictval = {
"camera":row_cam,
"img_url":row_url,
"earth_date":row_date,
}
rend_list.update({i:dictval})
return render_template('list.html', rname=rov_name, ilist=rend_list, alist=api_list)
if __name__ == '__main__':
app.run(debug=True) | brianr0922/mars_rover | main.py | main.py | py | 2,308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
72393817384 | import sys
import time
import random
import pygame as pg
pg.init()
WIDTH, HEIGHT = 800, 600
FPS = 60
window = pg.display.set_mode((WIDTH, HEIGHT))
clock = pg.time.Clock()
"""Добавление иконки и названия игры"""
pg.display.set_caption('Flappy bird')
pg.display.set_icon(pg.image.load(r'images/icon.png'))
"""Загрузка изображений"""
img_bg = pg.image.load(r'images/background.png')
img_bird = pg.image.load(r'images/bird.png')
img_pipe_top = pg.image.load(r'images/pipe_top.png')
img_pipe_bottom = pg.image.load(r'images/pipe_bottom.png')
"""Загрузка звука"""
pg.mixer.music.load(r'sounds/music.mp3') # Музыка загружена, но не воспроизводится
pg.mixer.music.set_volume(0.1) # Громкость музыки
pg.mixer.music.play(-1) # Запуск звука -1 для зацикленности музыки
sound_fall = pg.mixer.Sound(r'sounds/fall.wav')
"""Механика персонажа"""
player_position_y, player_speed_y, player_acceleration_y = HEIGHT // 2, 0, 0
player = pg.Rect(WIDTH // 3, player_position_y, 34, 24)
frame = 0
state = 'start'
"""Загрузка шрифта"""
min_font = pg.font.Font(None, 35)
max_font = pg.font.Font(None, 80)
pipes = list()
backgrounds = list()
lives = 3
scores = 0
pipes_scores = list()
"""Скорость движения труб"""
pipe_speed = 3
"""Добавление первого фона перед циклом"""
backgrounds.append(pg.Rect(0, 0, 288, 600))
play = True
while play:
for event in pg.event.get():
if event.type == pg.QUIT:
play = False
screen = window.get_rect()
"""Изменение номера кадра"""
frame = (frame + 0.2) % 4
"""Перемещение труб"""
for pipe in reversed(pipes):
pipe.x -= pipe_speed # Вместо 3 отнимаем значение pipe_speed
"""Уничтожение игры если труба вышла за экран"""
if pipe.right < screen.left:
pipes.remove(pipe)
"""Перемещение фона"""
for bg in reversed(backgrounds):
bg.x -= pipe_speed // 2 # Для перемещения фона обязательно целочисленное деление
"""Уничтожение игры если труба вышла за экран"""
if bg.right < screen.left:
backgrounds.remove(bg)
if backgrounds[-1].right <= screen.right:
backgrounds.append(pg.Rect(backgrounds[-1].right, 0, 288, 600))
"""Обработка нажатия на левую кнопку мыши"""
press = pg.mouse.get_pressed()
keys = pg.key.get_pressed()
click = press[0] or keys[pg.K_SPACE]
if click:
player_acceleration_y = -2
else:
player_acceleration_y = 0
"""Работа с состояниями игры"""
if state == 'start':
if click:
state = 'play'
"""Обновление положения, скорости и ускорения"""
player_position_y += (
HEIGHT // 2 - player_position_y)
player.y = player_position_y
player_speed_y = 0
player_acceleration_y = 0
elif state == 'play':
"""Механика падения"""
player_position_y += player_speed_y
player_speed_y = (player_speed_y + player_acceleration_y + 1) * 0.98
player.y = player_position_y
"""Проверка списка труб"""
if len(pipes) == 0 or pipes[-1].x < screen.width - 200:
correction = random.randint(-60, 60)
pipes.append(pg.Rect(screen.width, screen.top, 52, 200 + correction))
pipes.append(pg.Rect(screen.width, screen.bottom - 200 + correction, 52, 200))
"""Отслеживание падения птички вверх, либо вниз"""
if player.top <= screen.top or player.bottom >= screen.bottom:
sound_fall.play() # Проигрывание звука падения один раз
state = 'fall'
time.sleep(1)
"""Столкновение птички с трубами"""
for pipe in pipes:
if player.colliderect(pipe):
sound_fall.play() # Проигрывание звука падения один раз
state = 'fall'
pipes_scores.clear()
scores = 0
time.sleep(1)
"""Отслеживание перелета через трубу"""
if pipe.right <= player.left and pipe not in pipes_scores:
pipes_scores.append(pipe)
scores += 5
pipe_speed = 3 + scores // 100 # Каждые 100 очков к скорости будет прибавляться 1
elif state == 'fall':
pipes.clear()
"""Вычитание жизней"""
lives -= 1
if lives > 0:
state = 'start'
else:
state = 'game over'
else: # Game Over
play = False
"""Отрисовка"""
# window.fill(pg.Color('black')) # Нет необходимости закрашивать экран
for bg in backgrounds:
window.blit(img_bg, bg)
"""Отрисовка труб (обязательно перед игроком для того, чтобы при столкновении птица была на переднем фоне"""
for pipe in pipes:
"""Отображение труб в виде картинки"""
if pipe.y == 0:
rect = img_pipe_top.get_rect(bottomleft=pipe.bottomleft)
window.blit(img_pipe_top, rect)
else:
rect = img_pipe_bottom.get_rect(topleft=pipe.topleft)
window.blit(img_pipe_bottom, rect)
image = img_bird.subsurface(34 * int(frame), 0, 34, 24)
"""Наклон птички вверх и вниз"""
image = pg.transform.rotate(image, -player_speed_y * 2)
window.blit(image, player)
"""Отрисовка очков и жизней"""
score_text = min_font.render(f'Очки: {scores}', True, pg.Color('black'))
window.blit(score_text, (screen.left + 10, screen.top + 10))
lives_text = min_font.render(f'Жизни: {lives}', True, pg.Color('black'))
window.blit(lives_text, (screen.left + score_text.get_rect().width + 30, screen.top + 10))
pg.display.update()
clock.tick(FPS)
pg.quit()
| ArtemTroshkin/FlappyBird | main.py | main.py | py | 6,760 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Cl... |
43810341653 | import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
distance = 40
def create_poles(poles):
y = np.zeros(distance)
for p in poles:
y[p] = 1
x = range(distance)
plt.stem(x, y, use_line_collection=True)
def plot_robot_measurement(poles, pos, gs):
plt.subplot(gs[2:3, 0])
plt.yticks([])
plt.xticks([])
plt.xlim([pos - 1.5, pos + 3.5])
plt.ylim([-0.1, 1.1])
plt.plot([pos + 0.2], [0.6], 'g<', markersize=40)
plt.plot([pos], [0.4], 'bo', markersize=40)
create_poles(poles)
def plot_simple(particles, poles, pos=None, j=None):
gs = gridspec.GridSpec(3, 1)
# Plot Main Display
plt.subplot(gs[0:2, 0])
if j is not None:
plt.title(str(j))
plt.yticks([])
plt.xlim([-0.9, distance + 0.9])
for particle in particles:
if particle.belief == 0:
continue
plt.plot([particle.pos], [0.5], '*', color=particle.color)
create_poles(poles)
# Plot Robot Measurement
if pos is not None:
plot_robot_measurement(poles, pos, gs)
plt.show(block=True)
def plot(
particles,
poles,
pos,
resampled_particles=None,
j=None,
autorun=False):
gs = gridspec.GridSpec(3, 1)
# Plot Main Display
plt.subplot(gs[0:2, 0])
if j is not None:
plt.title(str(j))
plt.yticks([])
plt.xlim([-0.9, distance + 0.9])
for particle in particles:
plt.plot([particle.pos], [0.5], 'b*', label="Particles")
if resampled_particles is not None:
for particle in resampled_particles:
plt.plot([particle.pos], [0.25], 'g*', label="Resampled")
plt.plot([pos], [0.65], 'r*', label="Robot")
# Remove duplicates in legend (because of way I plotted one at a time.
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='upper right')
create_poles(poles)
# Plot Robot Measurement
if pos is not None:
plot_robot_measurement(poles, pos, gs)
if autorun:
if j == 0:
# Not sure why this is needed but it is.
plt.pause(1)
plt.show(block=False)
plt.pause(1)
plt.close()
else:
plt.show()
def print_particle_error(robot, particles):
weights = []
for particle in particles:
weights += [particle.weight]
best_particle = weights.index(max(weights))
print("Error: " +
str(round(abs(particles[best_particle].pos - robot.pos), 2)))
print("Weight Sum: " + str(sum(weights)))
print()
| WuStangDan/localization | assignment3/sim/plot.py | plot.py | py | 2,643 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.stem",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
73744164585 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
import ielex.lexicon.models as models
def forwards_func(apps, schema_editor):
print('Updating clades for all languages..')
Language = apps.get_model('lexicon', 'Language')
for l in Language.objects.all():
models.Language.updateClades(l)
def reverse_func(apps, schema_editor):
LanguageClade = apps.get_model('lexicon', 'LanguageClade')
print('Deleting all LanguageClade entries..')
LanguageClade.objects.delete()
class Migration(migrations.Migration):
dependencies = [('lexicon', '0048_auto_20160502_1338')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| lingdb/CoBL-public | ielex/lexicon/migrations/0049_update_languageClade.py | 0049_update_languageClade.py | py | 748 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "ielex.lexicon.models.Language.updateClades",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ielex.lexicon.models.Language",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ielex.lexicon.models",
"line_number": 11,
"usage_type": "na... |
74481651624 | from copy import deepcopy
from config.irl_config import IRLConfig
from config.rl_config import RLConfig
from env_design.envs import ENV_MAKERS
class ConfigBuilder(dict):
def __init__(
self,
num_gpus=0,
num_workers=0,
rl_algo=None,
irl_algo=None,
env=None,
# additional overriding args
**kwargs
):
super(ConfigBuilder, self).__init__()
self.rl_algo = rl_algo
self.irl_algo = irl_algo
self.rl_config = RLConfig(env, rl_algo, irl_algo)
self.irl_config = IRLConfig(env, irl_algo)
self.update(
num_gpus=num_gpus,
num_workers=num_workers,
env=env,
)
self.cli_args = kwargs
def build_base_rl(
self,
env_params,
**kwargs,
):
base = self.rl_config.rl_config.pre_build()
base.update(
**self
)
if env_params is not None:
base.update(env_config=env_params.get())
base.update(**kwargs)
for cli_arg in self.cli_args:
if cli_arg in base:
base[cli_arg] = self.cli_args[cli_arg]
return base
def build_base_irl(
self
):
base = self.irl_config.pre_build()
base.update(
**self
)
for cli_arg in self.cli_args:
if cli_arg in base:
base[cli_arg] = self.cli_args[cli_arg]
base.postprocess()
return base
def build(
self,
env_params=None, # Mandatory, to ensure proper initialization
*args,
**kwargs
):
new = deepcopy(self)
rl = self.rl_config.pre_build()
if env_params is not None:
rl.update(
env_config=env_params.get()
)
irl = self.build_base_irl()
new.update(**rl)
new.update(**irl)
return dict(new)
| Ojig/Environment-Design-for-IRL | ed_airl/config/builder.py | builder.py | py | 2,019 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.rl_config.RLConfig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "config.irl_config.IRLConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 80,
"usage_type": "call"
}
] |
15744675717 | import functools
import hashlib
import os
import sys
import time
from typing import NamedTuple
from git_command import git_require
from git_command import GitCommand
from git_config import RepoConfig
from git_refs import GitRefs
_SUPERPROJECT_GIT_NAME = "superproject.git"
_SUPERPROJECT_MANIFEST_NAME = "superproject_override.xml"
class SyncResult(NamedTuple):
"""Return the status of sync and whether caller should exit."""
# Whether the superproject sync was successful.
success: bool
# Whether the caller should exit.
fatal: bool
class CommitIdsResult(NamedTuple):
"""Return the commit ids and whether caller should exit."""
# A dictionary with the projects/commit ids on success, otherwise None.
commit_ids: dict
# Whether the caller should exit.
fatal: bool
class UpdateProjectsResult(NamedTuple):
"""Return the overriding manifest file and whether caller should exit."""
# Path name of the overriding manifest file if successful, otherwise None.
manifest_path: str
# Whether the caller should exit.
fatal: bool
class Superproject:
"""Get commit ids from superproject.
Initializes a local copy of a superproject for the manifest. This allows
lookup of commit ids for all projects. It contains _project_commit_ids which
is a dictionary with project/commit id entries.
"""
def __init__(
self,
manifest,
name,
remote,
revision,
superproject_dir="exp-superproject",
):
"""Initializes superproject.
Args:
manifest: A Manifest object that is to be written to a file.
name: The unique name of the superproject
remote: The RemoteSpec for the remote.
revision: The name of the git branch to track.
superproject_dir: Relative path under |manifest.subdir| to checkout
superproject.
"""
self._project_commit_ids = None
self._manifest = manifest
self.name = name
self.remote = remote
self.revision = self._branch = revision
self._repodir = manifest.repodir
self._superproject_dir = superproject_dir
self._superproject_path = manifest.SubmanifestInfoDir(
manifest.path_prefix, superproject_dir
)
self._manifest_path = os.path.join(
self._superproject_path, _SUPERPROJECT_MANIFEST_NAME
)
git_name = hashlib.md5(remote.name.encode("utf8")).hexdigest() + "-"
self._remote_url = remote.url
self._work_git_name = git_name + _SUPERPROJECT_GIT_NAME
self._work_git = os.path.join(
self._superproject_path, self._work_git_name
)
# The following are command arguemnts, rather than superproject
# attributes, and were included here originally. They should eventually
# become arguments that are passed down from the public methods, instead
# of being treated as attributes.
self._git_event_log = None
self._quiet = False
self._print_messages = False
def SetQuiet(self, value):
"""Set the _quiet attribute."""
self._quiet = value
def SetPrintMessages(self, value):
"""Set the _print_messages attribute."""
self._print_messages = value
@property
def project_commit_ids(self):
"""Returns a dictionary of projects and their commit ids."""
return self._project_commit_ids
@property
def manifest_path(self):
"""Returns the manifest path if the path exists or None."""
return (
self._manifest_path if os.path.exists(self._manifest_path) else None
)
def _LogMessage(self, fmt, *inputs):
"""Logs message to stderr and _git_event_log."""
message = f"{self._LogMessagePrefix()} {fmt.format(*inputs)}"
if self._print_messages:
print(message, file=sys.stderr)
self._git_event_log.ErrorEvent(message, fmt)
def _LogMessagePrefix(self):
"""Returns the prefix string to be logged in each log message"""
return (
f"repo superproject branch: {self._branch} url: {self._remote_url}"
)
def _LogError(self, fmt, *inputs):
"""Logs error message to stderr and _git_event_log."""
self._LogMessage(f"error: {fmt}", *inputs)
def _LogWarning(self, fmt, *inputs):
"""Logs warning message to stderr and _git_event_log."""
self._LogMessage(f"warning: {fmt}", *inputs)
def _Init(self):
"""Sets up a local Git repository to get a copy of a superproject.
Returns:
True if initialization is successful, or False.
"""
if not os.path.exists(self._superproject_path):
os.mkdir(self._superproject_path)
if not self._quiet and not os.path.exists(self._work_git):
print(
"%s: Performing initial setup for superproject; this might "
"take several minutes." % self._work_git
)
cmd = ["init", "--bare", self._work_git_name]
p = GitCommand(
None,
cmd,
cwd=self._superproject_path,
capture_stdout=True,
capture_stderr=True,
)
retval = p.Wait()
if retval:
self._LogWarning(
"git init call failed, command: git {}, "
"return code: {}, stderr: {}",
cmd,
retval,
p.stderr,
)
return False
return True
def _Fetch(self):
"""Fetches a superproject for the manifest based on |_remote_url|.
This runs git fetch which stores a local copy the superproject.
Returns:
True if fetch is successful, or False.
"""
if not os.path.exists(self._work_git):
self._LogWarning("git fetch missing directory: {}", self._work_git)
return False
if not git_require((2, 28, 0)):
self._LogWarning(
"superproject requires a git version 2.28 or later"
)
return False
cmd = [
"fetch",
self._remote_url,
"--depth",
"1",
"--force",
"--no-tags",
"--filter",
"blob:none",
]
# Check if there is a local ref that we can pass to --negotiation-tip.
# If this is the first fetch, it does not exist yet.
# We use --negotiation-tip to speed up the fetch. Superproject branches
# do not share commits. So this lets git know it only needs to send
# commits reachable from the specified local refs.
rev_commit = GitRefs(self._work_git).get(f"refs/heads/{self.revision}")
if rev_commit:
cmd.extend(["--negotiation-tip", rev_commit])
if self._branch:
cmd += [self._branch + ":" + self._branch]
p = GitCommand(
None,
cmd,
cwd=self._work_git,
capture_stdout=True,
capture_stderr=True,
)
retval = p.Wait()
if retval:
self._LogWarning(
"git fetch call failed, command: git {}, "
"return code: {}, stderr: {}",
cmd,
retval,
p.stderr,
)
return False
return True
def _LsTree(self):
"""Gets the commit ids for all projects.
Works only in git repositories.
Returns:
data: data returned from 'git ls-tree ...' instead of None.
"""
if not os.path.exists(self._work_git):
self._LogWarning(
"git ls-tree missing directory: {}", self._work_git
)
return None
data = None
branch = "HEAD" if not self._branch else self._branch
cmd = ["ls-tree", "-z", "-r", branch]
p = GitCommand(
None,
cmd,
cwd=self._work_git,
capture_stdout=True,
capture_stderr=True,
)
retval = p.Wait()
if retval == 0:
data = p.stdout
else:
self._LogWarning(
"git ls-tree call failed, command: git {}, "
"return code: {}, stderr: {}",
cmd,
retval,
p.stderr,
)
return data
def Sync(self, git_event_log):
"""Gets a local copy of a superproject for the manifest.
Args:
git_event_log: an EventLog, for git tracing.
Returns:
SyncResult
"""
self._git_event_log = git_event_log
if not self._manifest.superproject:
self._LogWarning(
"superproject tag is not defined in manifest: {}",
self._manifest.manifestFile,
)
return SyncResult(False, False)
_PrintBetaNotice()
should_exit = True
if not self._remote_url:
self._LogWarning(
"superproject URL is not defined in manifest: {}",
self._manifest.manifestFile,
)
return SyncResult(False, should_exit)
if not self._Init():
return SyncResult(False, should_exit)
if not self._Fetch():
return SyncResult(False, should_exit)
if not self._quiet:
print(
"%s: Initial setup for superproject completed." % self._work_git
)
return SyncResult(True, False)
def _GetAllProjectsCommitIds(self):
"""Get commit ids for all projects from superproject and save them.
Commit ids are saved in _project_commit_ids.
Returns:
CommitIdsResult
"""
sync_result = self.Sync(self._git_event_log)
if not sync_result.success:
return CommitIdsResult(None, sync_result.fatal)
data = self._LsTree()
if not data:
self._LogWarning(
"git ls-tree failed to return data for manifest: {}",
self._manifest.manifestFile,
)
return CommitIdsResult(None, True)
# Parse lines like the following to select lines starting with '160000'
# and build a dictionary with project path (last element) and its commit
# id (3rd element).
#
# 160000 commit 2c2724cb36cd5a9cec6c852c681efc3b7c6b86ea\tart\x00
# 120000 blob acc2cbdf438f9d2141f0ae424cec1d8fc4b5d97f\tbootstrap.bash\x00 # noqa: E501
commit_ids = {}
for line in data.split("\x00"):
ls_data = line.split(None, 3)
if not ls_data:
break
if ls_data[0] == "160000":
commit_ids[ls_data[3]] = ls_data[2]
self._project_commit_ids = commit_ids
return CommitIdsResult(commit_ids, False)
def _WriteManifestFile(self):
"""Writes manifest to a file.
Returns:
manifest_path: Path name of the file into which manifest is written
instead of None.
"""
if not os.path.exists(self._superproject_path):
self._LogWarning(
"missing superproject directory: {}", self._superproject_path
)
return None
manifest_str = self._manifest.ToXml(
groups=self._manifest.GetGroupsStr(), omit_local=True
).toxml()
manifest_path = self._manifest_path
try:
with open(manifest_path, "w", encoding="utf-8") as fp:
fp.write(manifest_str)
except OSError as e:
self._LogError("cannot write manifest to : {} {}", manifest_path, e)
return None
return manifest_path
def _SkipUpdatingProjectRevisionId(self, project):
"""Checks if a project's revision id needs to be updated or not.
Revision id for projects from local manifest will not be updated.
Args:
project: project whose revision id is being updated.
Returns:
True if a project's revision id should not be updated, or False,
"""
path = project.relpath
if not path:
return True
# Skip the project with revisionId.
if project.revisionId:
return True
# Skip the project if it comes from the local manifest.
return project.manifest.IsFromLocalManifest(project)
def UpdateProjectsRevisionId(self, projects, git_event_log):
"""Update revisionId of every project in projects with the commit id.
Args:
projects: a list of projects whose revisionId needs to be updated.
git_event_log: an EventLog, for git tracing.
Returns:
UpdateProjectsResult
"""
self._git_event_log = git_event_log
commit_ids_result = self._GetAllProjectsCommitIds()
commit_ids = commit_ids_result.commit_ids
if not commit_ids:
return UpdateProjectsResult(None, commit_ids_result.fatal)
projects_missing_commit_ids = []
for project in projects:
if self._SkipUpdatingProjectRevisionId(project):
continue
path = project.relpath
commit_id = commit_ids.get(path)
if not commit_id:
projects_missing_commit_ids.append(path)
# If superproject doesn't have a commit id for a project, then report an
# error event and continue as if do not use superproject is specified.
if projects_missing_commit_ids:
self._LogWarning(
"please file a bug using {} to report missing "
"commit_ids for: {}",
self._manifest.contactinfo.bugurl,
projects_missing_commit_ids,
)
return UpdateProjectsResult(None, False)
for project in projects:
if not self._SkipUpdatingProjectRevisionId(project):
project.SetRevisionId(commit_ids.get(project.relpath))
manifest_path = self._WriteManifestFile()
return UpdateProjectsResult(manifest_path, False)
@functools.lru_cache(maxsize=10)
def _PrintBetaNotice():
"""Print the notice of beta status."""
print(
"NOTICE: --use-superproject is in beta; report any issues to the "
"address described in `repo version`",
file=sys.stderr,
)
@functools.lru_cache(maxsize=None)
def _UseSuperprojectFromConfiguration():
"""Returns the user choice of whether to use superproject."""
user_cfg = RepoConfig.ForUser()
time_now = int(time.time())
user_value = user_cfg.GetBoolean("repo.superprojectChoice")
if user_value is not None:
user_expiration = user_cfg.GetInt("repo.superprojectChoiceExpire")
if (
user_expiration is None
or user_expiration <= 0
or user_expiration >= time_now
):
# TODO(b/190688390) - Remove prompt when we are comfortable with the
# new default value.
if user_value:
print(
(
"You are currently enrolled in Git submodules "
"experiment (go/android-submodules-quickstart). Use "
"--no-use-superproject to override.\n"
),
file=sys.stderr,
)
else:
print(
(
"You are not currently enrolled in Git submodules "
"experiment (go/android-submodules-quickstart). Use "
"--use-superproject to override.\n"
),
file=sys.stderr,
)
return user_value
# We don't have an unexpired choice, ask for one.
system_cfg = RepoConfig.ForSystem()
system_value = system_cfg.GetBoolean("repo.superprojectChoice")
if system_value:
# The system configuration is proposing that we should enable the
# use of superproject. Treat the user as enrolled for two weeks.
#
# TODO(b/190688390) - Remove prompt when we are comfortable with the new
# default value.
userchoice = True
time_choiceexpire = time_now + (86400 * 14)
user_cfg.SetString(
"repo.superprojectChoiceExpire", str(time_choiceexpire)
)
user_cfg.SetBoolean("repo.superprojectChoice", userchoice)
print(
"You are automatically enrolled in Git submodules experiment "
"(go/android-submodules-quickstart) for another two weeks.\n",
file=sys.stderr,
)
return True
# For all other cases, we would not use superproject by default.
return False
def PrintMessages(use_superproject, manifest):
"""Returns a boolean if error/warning messages are to be printed.
Args:
use_superproject: option value from optparse.
manifest: manifest to use.
"""
return use_superproject is not None or bool(manifest.superproject)
def UseSuperproject(use_superproject, manifest):
"""Returns a boolean if use-superproject option is enabled.
Args:
use_superproject: option value from optparse.
manifest: manifest to use.
Returns:
Whether the superproject should be used.
"""
if not manifest.superproject:
# This (sub) manifest does not have a superproject definition.
return False
elif use_superproject is not None:
return use_superproject
else:
client_value = manifest.manifestProject.use_superproject
if client_value is not None:
return client_value
elif manifest.superproject:
return _UseSuperprojectFromConfiguration()
else:
return False
| GerritCodeReview/git-repo | git_superproject.py | git_superproject.py | py | 17,995 | python | en | code | 267 | github-code | 36 | [
{
"api_name": "typing.NamedTuple",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.join",
... |
42778925613 | import json
import logging
from io import BytesIO
from typing import Optional
import pandas as pd
import requests
from pydantic import Field, SecretStr
from toucan_connectors.common import ConnectorStatus
from toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource
class NetExplorerDataSource(ToucanDataSource):
file: str
sheet: Optional[str] = 0
class NetExplorerConnector(ToucanConnector):
data_source_model: NetExplorerDataSource
instance_url: str = Field(
None,
Title='Instance URL',
placeholder='exemple.netexplorer.pro',
)
user: str
password: SecretStr
def _retrieve_token(self):
login_url = f'https://{self.instance_url}/api/auth'
data = json.dumps({'user': self.user, 'password': self.password.get_secret_value()})
headers = {'Content-Type': 'application/json'}
resp = requests.post(login_url, data=data, headers=headers)
return resp.json()['token']
def _retrieve_folders(self, token):
folders_url = f'https://{self.instance_url}/api/folders?depth=-1'
headers = {'Authorization': f'Bearer {token}'}
resp = requests.get(folders_url, data={}, headers=headers)
return resp.json()
def _retrieve_file_id(self, folders, data_source):
basedir = data_source.file.split('/')[0]
path = data_source.file.split('/')[1:]
_id = None
def _search(iterate_on, compare_to, for_id=False):
for element in iterate_on:
if element['name'] == compare_to:
return element['id'] if for_id else element['content']
try:
# Search among base directories
folders = _search(folders, basedir)
# Search among paths
for elem in path:
if elem.endswith(('xlsx', 'xls', 'csv')):
_id = _search(folders['files'], elem, True)
assert _id
else:
folders = _search(folders['folders'], elem)
assert folders
except AssertionError:
raise ValueError('Unable to find the file')
return _id
def _retrieve_file(self, token, _id):
download_url = f'https://{self.instance_url}/api/file/{_id}/download'
headers = {'Authorization': f'Bearer {token}'}
resp = requests.get(download_url, data={}, headers=headers)
return BytesIO(resp.content)
def _retrieve_data(self, data_source: NetExplorerDataSource) -> pd.DataFrame:
logging.getLogger(__name__).debug('_retrieve_data')
self.instance_url = self.instance_url.replace('https://', '').strip('/')
data_source.file = data_source.file.strip('/')
token = self._retrieve_token()
folders = self._retrieve_folders(token)
_id = self._retrieve_file_id(folders, data_source)
data = self._retrieve_file(token, _id)
df = pd.DataFrame()
if data_source.file.endswith('csv'):
df = pd.read_csv(data)
else:
df = pd.read_excel(data, sheet_name=data_source.sheet)
return df
def get_status(self) -> ConnectorStatus:
"""
Test the Net Explorer's connexion.
:return: a ConnectorStatus with the current status
"""
try:
self._retrieve_token()
return ConnectorStatus(status=True)
except Exception:
return ConnectorStatus(status=False, error='Unable to connect')
| ToucanToco/toucan-connectors | toucan_connectors/net_explorer/net_explorer_connector.py | net_explorer_connector.py | py | 3,536 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "toucan_connectors.toucan_connector.ToucanDataSource",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "toucan_connectors.toucan_connector.ToucanConnector",
"line_number": 19,
... |
25966299187 |
import Tkinter as tk
import ScrolledText
import numpy as np
import matplotlib as mpl
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import sklearn.gaussian_process as skgp
import evaluatorGUI as eg
import matplotlib.pyplot as plt
import scipy.optimize
import time
def draw_figure(canvas, figure, loc=(0, 0)):
""" Draw a matplotlib figure onto a Tk canvas
from https://matplotlib.org/gallery/user_interfaces/embedding_in_tk_canvas_sgskip.html
loc: location of top-left corner of figure on canvas in pixels.
Inspired by matplotlib source: lib/matplotlib/backends/backend_tkagg.py
"""
figure_canvas_agg = FigureCanvasAgg(figure)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds
figure_w, figure_h = int(figure_w), int(figure_h)
photo = tk.PhotoImage(master=canvas, width=figure_w, height=figure_h)
# Position: convert from top-left anchor to center anchor
canvas.create_image(loc[0] + figure_w/2, loc[1] + figure_h/2, image=photo)
# Unfortunately, there's no accessor for the pointer to the native renderer
tkagg.blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
# Return a handle which contains a reference to the photo object
# which must be kept live or else the picture disappears
return photo
class PointSelector(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.changepoints=lambda x,y:None
self.inputcanvas=tk.Canvas(self,width=410,height=410,borderwidth=1,relief=tk.RAISED,background="white")
self.inputcanvas.bind("<Button-1>", self.__inputcanvasmouseclick)
self.xlist=[]
self.ylist=[]
self.inputcanvas.pack(side=tk.TOP)
def __inputcanvasmouseclick(self, event):
x = event.x
y = event.y
if x < 5:
x = 5
if x > 405:
x = 405
if y < 5:
y = 5
if y > 405:
y = 405
xc = (x - 205) / 200.0
yc = (205 - y) / 200.0
self.xlist.append(xc)
self.ylist.append(yc)
self.lastx=xc
self.lasty=yc
self.inputcanvas.create_oval(x-1,y-1,x+1,y+1)
self.changepoints(self.xlist,self.ylist)
class GPdisplay(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.dispcanvas=tk.Canvas(self,width=410,height=410,borderwidth=1, relief=tk.RAISED, background="white")
self.dispcanvas.pack(side=tk.TOP)
self.x=[]
self.y=[]
self.gp=None
self.log = ScrolledText.ScrolledText(self, width=50, height=15)
self.log.pack(side=tk.TOP)
def updatePoints(self,x,y):
self.x=x
self.y=y
self.updateDisplay()
def updateGP(self,gp):
self.gp=gp
self.updateDisplay()
def updateDisplay(self):
self.log.delete(1.0,tk.END)
if len(self.x)>0 and self.gp is not None:
self.dispcanvas.delete("all")
start=time.time()
self.gp.fit(np.array(self.x).reshape(-1,1),np.array(self.y).reshape(-1,1))
stop=time.time()
self.log.insert(tk.END,"log marginal likelihood: "+str(self.gp.log_marginal_likelihood())+"\nparams: \n "+"\n ".join([param+" : "+str(val) for param,val in self.gp.get_params(True).items()]))
self.log.insert(tk.END,"\ntime: "+str(stop-start))
self.log.insert(tk.END,"\nfinal params:"+"\n ".join([param+" : "+str(val) for param,val in self.gp.kernel_.get_params(True).items()]))
mean, std = self.gp.predict(np.arange(-1, 1, .01).reshape(-1, 1), return_std=True)
fig=mpl.figure.Figure(figsize=(4, 3))
ax=fig.add_axes([0, 0, 1, 1])
ax.plot(np.arange(-1, 1, .01), mean)
ax.fill_between(np.arange(-1, 1, .01), np.squeeze(mean) - std, np.squeeze(mean) + std, alpha=.1)
ax.scatter(self.x, self.y, c="red", s=50)
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
fig_x, fig_y = 0, 0
self.fig_photo = draw_figure(self.dispcanvas, fig, loc=(fig_x, fig_y))
fig_w, fig_h = self.fig_photo.width(), self.fig_photo.height()
def dispGP(self):
self.gp.fit(np.array(self.x).reshape(-1, 1), np.array(self.y).reshape(-1, 1))
mean,std=self.gp.predict(np.arange(-1,1,.01).reshape(-1,1),return_std=True)
plt.figure(figsize=(16,9))
plt.plot(np.arange(-1,1,.01),mean)
plt.fill_between(np.arange(-1,1,.01),np.squeeze(mean)-std,np.squeeze(mean)+std,alpha=.1)
plt.scatter(self.x,self.y,c="red",s=50)
plt.xlim(-1,1)
plt.ylim(-2,2)
plt.show()
class GPselector(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.changeGP=lambda x:None
buttonpanel = tk.Frame(self)
buttonpanel.pack(side=tk.LEFT)
updateButton=tk.Button(buttonpanel,text="Update",command=self.updateGP)
updateButton.pack(side=tk.TOP)
self.generalparamselect=eg.ParameterPanel(self,[("alpha: ",tk.DoubleVar,.0000001),("restarts: ",tk.IntVar,25),("optimize: ",tk.BooleanVar,True)])
self.generalparamselect.pack(side=tk.LEFT)
buttonpanel=tk.Frame(self)
buttonpanel.pack(side=tk.LEFT)
tk.Button(buttonpanel, text="Matern", command=self.setMatern).pack(side=tk.TOP)
tk.Button(buttonpanel, text="RBF", command=self.setRBF).pack(side=tk.TOP)
tk.Button(buttonpanel, text="RBFnoise", command=self.setRBFnoise).pack(side=tk.TOP)
self.paramselect=eg.ParameterPanel(self,[("nu: ",tk.DoubleVar,1.5),("length_scale: ",tk.DoubleVar,1.0),("length_scale_min",tk.DoubleVar,1e-5),("length_scale_max",tk.DoubleVar,1e5)])
self.paramselect.pack(side=tk.LEFT)
self.kerneltype="Matern"
def updateGP(self):
generalparams=self.generalparamselect.getparameters()
params=self.paramselect.getparameters()
if self.kerneltype=="Matern":
kernel=skgp.kernels.Matern(nu=params[0],length_scale=params[1],length_scale_bounds=(params[2],params[3]))
elif self.kerneltype=="RBF":
kernel=skgp.kernels.RBF(length_scale=params[0],length_scale_bounds=(params[1],params[2]))
elif self.kerneltype=="RBFnoise":
kernel=skgp.kernels.RBF(length_scale=params[0],length_scale_bounds=(params[3],params[4]))+params[2]*skgp.kernels.WhiteKernel(noise_level=params[1])
else:
raise ValueError("Unrecognized kernel type: "+str(self.kerneltype))
gp=skgp.GaussianProcessRegressor(kernel=kernel,n_restarts_optimizer=generalparams[1],alpha=generalparams[0],optimizer="fmin_l_bfgs_b" if generalparams[2] else None)
self.changeGP(gp)
def setMatern(self):
self.paramselect.pack_forget()
self.paramselect=eg.ParameterPanel(self,[("nu: ",tk.DoubleVar,1.5),("length_scale: ",tk.DoubleVar,1.0),("length_scale_min",tk.DoubleVar,1e-5),("length_scale_max",tk.DoubleVar,1e5)])
self.paramselect.pack(side=tk.LEFT)
self.kerneltype="Matern"
def setRBF(self):
self.paramselect.pack_forget()
self.paramselect=eg.ParameterPanel(self,[("length_scale: ",tk.DoubleVar,1.0),("length_scale_min",tk.DoubleVar,1e-5),("length_scale_max",tk.DoubleVar,1e5)])
self.paramselect.pack(side=tk.LEFT)
self.kerneltype="RBF"
def setRBFnoise(self):
self.paramselect.pack_forget()
self.paramselect=eg.ParameterPanel(self,[("length_scale: ",tk.DoubleVar,1.5),("noise_level: ",tk.DoubleVar,1.0),("noise weight",tk.DoubleVar,1.0),("length_scale_min",tk.DoubleVar,1e-5),("length_scale_max",tk.DoubleVar,1e5)])
self.paramselect.pack(side=tk.LEFT)
self.kerneltype="RBFnoise"
class GPvisualizer(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
pointselector=PointSelector(self)
pointselector.pack(side=tk.LEFT)
gpdisp=GPdisplay(self)
gpdisp.pack(side=tk.LEFT)
gpselect=GPselector(self)
gpselect.pack(side=tk.LEFT)
pointselector.changepoints=gpdisp.updatePoints
gpselect.changeGP=gpdisp.updateGP
if __name__=="__main__":
master = tk.Tk()
GPvisualizer(master).pack(side=tk.TOP)
tk.mainloop() | Hampswitch/ReciprocationGUI | reciprocation/GPvisualizer.py | GPvisualizer.py | py | 8,347 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.backends.backend_agg.FigureCanvasAgg",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Tkinter.PhotoImage",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.tkagg.blit",
"line_number": 31,
"usage_type": "cal... |
12685763897 | from moduleBaseClass import ModuleBaseClass
from StringIO import StringIO
from PIL import Image
class Module(ModuleBaseClass):
def __init__(self):
self.header = 'x42\x4d'
self.name = 'bmp'
def final_check(self, raw):
try:
Image.open(StringIO(raw))
return True
except:
return False
| tengwar/xorstuff | modules/bmp.py | bmp.py | py | 360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "moduleBaseClass.ModuleBaseClass",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "StringIO.Stri... |
38192330286 | import os
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from xhtml2pdf import pisa
from ..models import *
from django.contrib.auth.models import User
from django.contrib.staticfiles import finders
def link_callback(uri, rel):
"""
Convert HTML URIs to absolute system paths so xhtml2pdf can access those
resources
"""
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path = result[0]
else:
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % (sUrl, mUrl)
)
return path
def admission_letter(request):
# HttpResponse("workng")
# get_student = get_object_or_404(Student, user=request.user)
# user = User.objects.get(id=request.user.id)
signature = CollegeSettings.objects.first()
entry = Registration.objects.all()
template_path = 'KCHS/registration/admission_letter.html'
context = {'logo': signature,
'registration': entry}
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
# if the file is dowloaded
# response['Content-Disposition'] = 'attachment; filename="fieldApplicationForm.pdf"'
# if display
response['Content-Disposition'] = 'filename="FieldApplicationLetter.pdf"'
# find the template and render it.
template = get_template(template_path)
html = template.render(context)
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response)
# if error then show some funny view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
| luggiestar/kahama | KCHS/views/download_pdf_files_views.py | download_pdf_files_views.py | py | 2,550 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.staticfiles.finders.find",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.staticfiles.finders",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.realpath",
"line_number": 21,
"usage_type": "call"
}... |
37373287317 | import math
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionLayer(nn.Module):
'''
Self attention layer
'''
def __init__(self, hidden_size, num_attention_heads, dropout_prob):
super().__init__()
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attention_head_size = hidden_size // num_attention_heads
assert self.hidden_size % self.num_attention_heads == 0
self.query = nn.Linear(self.hidden_size, self.attention_head_size * self.num_attention_heads)
self.key = nn.Linear(self.hidden_size, self.attention_head_size * self.num_attention_heads)
self.value = nn.Linear(self.hidden_size, self.attention_head_size * self.num_attention_heads)
# self.dropout = nn.Dropout(dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(self, hidden_states, attention_mask=None):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores.float().masked_fill_((1-attention_mask.unsqueeze(1).unsqueeze(1)).to(torch.bool), float(-1e8)) # remove padding token
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class FFNIntermediate(nn.Module):
def __init__(self, hidden_size, intermediate_size):
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
self.intermediate_act_fn = nn.GELU()
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class FFNOutput(nn.Module):
def __init__(self, intermediate_size, hidden_size, dropout_prob):
super().__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
# self.dropout = nn.Dropout(dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
# hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class FFNLayer(nn.Module):
def __init__(self, hidden_size, intermediate_size, dropout_prob):
super().__init__()
self.intermediate_layer = FFNIntermediate(hidden_size, intermediate_size)
self.output_layer = FFNOutput(intermediate_size, hidden_size, dropout_prob)
def forward(self, hidden_states):
intermediate_output = self.intermediate_layer(hidden_states)
layer_output = self.output_layer(intermediate_output, hidden_states)
return layer_output
class TransformerLayer(nn.Module):
def __init__(self, hidden_size, num_attention_heads, intermediate_size, dropout_prob):
super().__init__()
self.sa_layer = SelfAttentionLayer(hidden_size, num_attention_heads, dropout_prob)
self.ffn_layer = FFNLayer(hidden_size, intermediate_size, dropout_prob)
def forward(self, hidden_states, attention_mask=None):
hidden_states = self.sa_layer(hidden_states, attention_mask)
hidden_states = self.ffn_layer(hidden_states)
return hidden_states | ZZR8066/SEMv2 | SEMv2/libs/model/transformer.py | transformer.py | py | 4,423 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
44850642568 | #!/usr/bin/env python3
#
# Add metadata from Apple Podcasts to cached mp3s
# so they sync to Garmin Watches with appropriate
# metadata
# ---------------
# Michael Oliver, 2022, MIT License
#
# Standing on the shoulders of giants:
# Modified prior art and inspiration by Douglas Watson
# https://douglas-watson.github.io/post/2020-05_export_podcasts/
#
# Intended for use as a cron job or to be run before Garmin Express
#
# Queries the Apple Podcasts database for episodes that have been
# downloaded, then updates the metadata embeded in those files
# so that the mp3's have the correct metadata
#
# https://mcoliver.com
import os
import urllib.parse
import sqlite3
SQL = """
SELECT p.ZAUTHOR, p.ZTITLE, e.ZTITLE, e.ZASSETURL, e.ZPUBDATE
from ZMTEPISODE e
join ZMTPODCAST p
on e.ZPODCASTUUID = p.ZUUID
where ZASSETURL NOTNULL;
"""
def check_imports():
''' Prompts for password to install dependencies, if needed '''
try:
import mutagen
except ImportError:
os.system(
"""osascript -e 'do shell script "/usr/bin/pip3 install mutagen" with administrator privileges'""")
def get_downloaded_episodes(db_path):
'''Run SQL Query'''
return sqlite3.connect(db_path).execute(SQL).fetchall()
def main(db_path):
'''Itterate through the database and re-encode the mp3s'''
for author, podcast, title, path, zpubdate \
in get_downloaded_episodes(db_path):
src_path = urllib.parse.unquote(path[len('file://'):])
print(f"Updating: {src_path}")
if os.path.exists(src_path):
try:
mp3 = MP3(src_path, ID3=EasyID3)
if mp3.tags is None:
mp3.add_tags()
mp3.tags['artist'] = author
mp3.tags['album'] = podcast
mp3.tags['title'] = title
mp3.save()
except HeaderNotFoundError:
print(f"Corrupted file: {podcast} - {title}")
continue
except IsADirectoryError:
print(
f"Failed to export {podcast} - {title}, media file is a movie")
continue
except FileNotFoundError:
print("File does not exist. skipping")
continue
else:
print (f"File does not Exist {src_path}")
if __name__ == "__main__":
db_path = os.path.expanduser(
"~/Library/Group Containers/243LU875E5.groups.com.apple.podcasts/Documents/MTLibrary.sqlite")
check_imports()
from mutagen.mp3 import MP3, HeaderNotFoundError
from mutagen.easyid3 import EasyID3
main(db_path)
| mcoliver/fixPodcastMetadata | fixPodcastMetadata.py | fixPodcastMetadata.py | py | 2,652 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.unquote",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "urllib.parse.par... |
41510671413 | #!/usr/bin/env python3
"""
Session Authentication Module
"""
from api.v1.auth.auth import Auth
from api.v1.views.users import User
import uuid
from typing import TypeVar
class SessionAuth(Auth):
"""
Responsible for session Authentication
Inherits From auth class
"""
user_id_by_session_id = {}
def create_session(self, user_id: str = None) -> str:
"""
creates a session ID for a user_id
args:
user_id: str - id of user
return:
Session id
"""
if user_id is None or type(user_id) is not str:
return None
else:
session_id: str = str(uuid.uuid4())
self.user_id_by_session_id[session_id] = user_id
return session_id
def user_id_for_session_id(self, session_id: str = None) -> str:
"""
Return User id based on the session id
Args:
session_id: str : session id
Return:
user_id: str : user id
"""
if session_id is None or type(session_id) is not str:
pass
else:
user_id: str = self.user_id_by_session_id.get(session_id)
return user_id
def current_user(self, request=None):
"""
Returns the user Id
Args:
request
Return:
user_id
"""
session_cookie = self.session_cookie(request)
user_id = self.user_id_for_session_id(session_cookie)
return User.get(user_id)
def destroy_session(self, request=None):
"""
deletes the user session / logout:
"""
if request is None:
return False
else:
session_token = self.session_cookie(request)
if session_token:
user_id = self.user_id_for_session_id(session_token)
if user_id:
del self.user_id_by_session_id[session_token]
return True
else:
return False
else:
return False
| tommyokoyo/alx-backend-user-data | 0x02-Session_authentication/api/v1/auth/session_auth.py | session_auth.py | py | 2,157 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.v1.auth.auth.Auth",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "api.v1.views.users.User.get",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "api.v1.v... |
26599759147 | from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QHeaderView
from db.models import *
from gui.widgets.custom_widgets import DialogWithDisablingOptions
class MainWidget(QtWidgets.QWidget):
def __init__(self, parent, model):
super().__init__(parent)
self.layout = QtWidgets.QHBoxLayout(self)
self.league_list = QtWidgets.QTableView(self)
self.league_list.setModel(model)
self.league_list.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.league_list.clearSelection()
self.league_list.resizeColumnsToContents()
self.league_list.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.layout.addWidget(self.league_list, 2)
self.button_layout = QtWidgets.QVBoxLayout(self)
self.new_league_button = QtWidgets.QPushButton("New league", self)
self.button_layout.addWidget(self.new_league_button)
self.new_league_button.clicked.connect(self.on_new_league)
self.new_round_button = QtWidgets.QPushButton("New round", self)
self.button_layout.addWidget(self.new_round_button)
self.new_round_button.clicked.connect(self.on_new_round)
self.del_button = QtWidgets.QPushButton("Delete league", self)
self.button_layout.addWidget(self.del_button)
self.del_button.clicked.connect(self.on_delete)
self.results_overview_button = QtWidgets.QPushButton("Results overview", self)
self.button_layout.addWidget(self.results_overview_button)
self.results_overview_button.clicked.connect(self.on_load)
self.quit_button = QtWidgets.QPushButton("Quit", self)
self.button_layout.addWidget(self.quit_button)
self.quit_button.clicked.connect(self.on_quit)
self.button_layout.addStretch()
self.jsolutions_label = QtWidgets.QLabel("JSolutions")
self.jsolutions_label.setAlignment(QtCore.Qt.AlignCenter)
self.button_layout.addWidget(self.jsolutions_label)
self.button_layout.setAlignment(QtCore.Qt.AlignTop)
self.button_layout.setAlignment(self.jsolutions_label, QtCore.Qt.AlignBottom)
self.layout.addLayout(self.button_layout, 1)
self.setLayout(self.layout)
def get_selected_league(self):
selected_indexes = self.league_list.selectedIndexes()
if len(selected_indexes) != 2 or (selected_indexes[0].row() != selected_indexes[1].row()):
return None
else:
return self.league_list.model().get_league(selected_indexes[0])
@pyqtSlot()
def on_quit(self):
QtCore.QCoreApplication.instance().quit()
@pyqtSlot()
def on_load(self):
league = self.get_selected_league()
if league is None:
return
if league.max_round == 0 or league.max_round is None:
QtWidgets.QMessageBox.warning(self, "Zero rounds error", "Zero rounds have been played in this league. "
"Unable to show the results overview.")
else:
from gui.windows import LeagueOverviewWindow
win = LeagueOverviewWindow(league, parent=self)
win.show()
@pyqtSlot()
def on_delete(self):
league = self.get_selected_league()
if league is None:
return
league = League.get_by_name(league.name)
reply = QtWidgets.QMessageBox.question(self, "Message", f"Are you sure you want to delete {league.name} league?",
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
Result.delete_all_from_league(league)
league.delete_instance()
self.league_list.model().refresh()
else:
return
@pyqtSlot()
def on_new_league(self):
league_names = list(map(lambda x: x.name, League.get_all()))
dialog = DialogWithDisablingOptions("New league", "Please enter valid league name:", league_names)
if dialog.exec_():
league = League.create(name=dialog.ret_str)
league.save()
self.league_list.model().refresh()
@pyqtSlot()
def on_new_round(self):
league = self.get_selected_league()
if league is None:
return
from gui.windows import InputWindow
new_round_win = InputWindow(self, league)
new_round_win.show()
def refresh_leagues_overview(self):
self.league_list.model().refresh()
| jsaric/quiz-manager | gui/widgets/main_widget.py | main_widget.py | py | 4,612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 12,
"usage_type": "call"
},
{
"api_name":... |
3581561120 | # Import necessary libraries
import openai
import subprocess
import sys
import json
import html
import re
import ssl
import os
import math
import glob
import pprint
import nltk
import pdb
import requests
import time
import random
from PIL import Image, ImageDraw, ImageFont
from PIL import UnidentifiedImageError
if not nltk.data.find('tokenizers/punkt'):
nltk.download('punkt', quiet=True)
sitelist = [
{ "subdomain": "alamo", "site_id": 29 },
{ "subdomain": "burlingame", "site_id": 30 },
{ "subdomain": "campbell", "site_id": 7 },
{ "subdomain": "castrovalley", "site_id": 25 },
{ "subdomain": "concord", "site_id": 31 },
{ "subdomain": "danville", "site_id": 9 },
{ "subdomain": "dublin", "site_id": 8 },
{ "subdomain": "hillsborough", "site_id": 12 },
{ "subdomain": "lafayette", "site_id": 13 },
{ "subdomain": "livermore", "site_id": 14 },
{ "subdomain": "orinda", "site_id": 34 },
{ "subdomain": "pittsburg", "site_id": 28 },
{ "subdomain": "pleasanthill", "site_id": 35 },
{ "subdomain": "sanramon", "site_id": 33 },
{ "subdomain": "walnutcreek", "site_id": 32 }
]
def get_site_id(subdomain):
for site in sitelist:
if site["subdomain"] == subdomain:
return site["site_id"]
return None
# Get the first command line argument
location = sys.argv[1]
sku = sys.argv[2]
# Initialize an empty dictionary for credentials
credentials = {}
# Define the file path to the credentials file
creds_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), # Get the directory of the current file
"../creds2.txt" # Append the relative path to the credentials file
)
if os.path.exists('product.json'):
os.remove('product.json')
class Location:
def __init__(self, website, user, city, phone, consumer_key, consumer_secret, api_key):
self.website = website
self.user = user
self.city = city
self.phone = phone
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.api_key = api_key # Here's the new attribute
def scp_file_to_remote(local_file, remote_file):
try:
# Run SCP command
subprocess.Popen(["scp", local_file, remote_file])
print("File transfer initiated.")
except Exception as e:
print("Error while copying the file:", e)
def download_image(url, filename):
try:
response = requests.get(url, stream=True)
response.raise_for_status()
with open(filename, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
print(f"Image downloaded successfully: {filename}")
except requests.exceptions.RequestException as e:
print(f"Error downloading image: {str(e)}")
def add_watermark_and_save(image_path, watermark_text, output_path):
try:
# Open the image
image = Image.open(image_path).convert("RGBA")
# Define the watermark text and font style
font = ImageFont.truetype("font.ttf", 40)
# Create a transparent overlay and draw the watermark text
overlay = Image.new("RGBA", image.size, (0, 0, 0, 0))
draw = ImageDraw.Draw(overlay)
text_width, text_height = draw.textbbox((0, 0), watermark_text, font=font)[:2]
# position = ((image.width - text_width) // 2, (image.height - text_height) // 2)
position = (image.width - text_width - 10, image.height - text_height - 10) # Position the watermark in the lower right corner
draw.text(position, watermark_text, font=font, fill=(128, 128, 128, 128))
# Composite the image and watermark overlay
watermarked = Image.alpha_composite(image, overlay)
# Save the watermarked image with the specified output path
watermarked.save(output_path)
print(f"Watermarked image saved as {output_path}")
except Exception as e:
print(f"Error: {str(e)}")
def makeunique(new_unique_product_name):
ai_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful budtender who knows all about the cannabis industry.",
},
{
"role": "user",
"content": f"Use this product name '{new_unique_product_name}'. Use this phrase to come up with a slightly different name that means the same thing."
f"Come up with a new name that is max 70 chars long and will rank well with regard to SEO. If there is a mention of price. Change it to some other descriptive language instead."
},
]
)
def generate_new_product_name(sku):
ai_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful budtender who knows all about the cannabis industry.",
},
{
"role": "user",
"content": f"Use this product slug '{product['slug']}' to rewrite the product title. The slug contains words separated by a -."
f"Use them to come up with a new name that is max 70 chars long and will rank well with regard to SEO. If there is a mention of price. Change it to some other descriptive language. Dont put spaces in the names. Use underscores to separate words."
},
]
)
new_product_name = ai_response['choices'][0]['message']['content'].strip()
new_product_name = html.unescape(re.sub('<.*?>', '', new_product_name))
return new_product_name
def generate_new_image_name(image_name):
ai_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a creative AI assistant and California Budtender for a delivery service.",
},
{
"role": "user",
"content": f"I have an image with the name '{image_name}'. Please suggest a new name for the image that does not use dates or times in the name. Limit the name to 70 characters. Dont put spaces in the names. Use underscores to separate words."
},
]
)
new_image_name = ai_response['choices'][0]['message']['content'].strip()
new_image_name = html.unescape(re.sub('<.*?>', '', new_image_name))
return new_image_name
def remove_keys(images_data):
keys_to_remove = [
'date_created',
'date_created_gmt',
'date_modified',
'date_modified_gmt',
'id',
'alt'
]
new_images_data = []
for index, image_data in enumerate(images_data):
if index < 4:
new_image_data = {key: value for key, value in image_data.items() if key not in keys_to_remove}
else:
new_image_data = {}
new_images_data.append(new_image_data)
return new_images_data
def generate(new_pics_prompt):
res = openai.Image.create(
prompt=new_pics_prompt,
n=1,
size="256x256",
)
return res["data"][0]["url"]
locations = []
# Open the credentials file
with open(creds_file_path) as f:
# Initialize variables for parsing the file
website = None
user = None
city = None
phone = None
consumer_key = None
consumer_secret = None
openai.api_key = None
for line in f:
line = line.strip() # Remove trailing and leading whitespace
if line.startswith("[") and line.endswith("]"):
if website and user and city and phone and consumer_key and consumer_secret and openai.api_key:
locations.append(Location(website, user, city, phone, consumer_key, consumer_secret, openai.api_key))
website = line[1:-1].lstrip() # Remove the brackets and any leading whitespace
user = None
city = None
phone = None
consumer_key = None
consumer_secret = None
openai.api_key = None
elif website and " = " in line:
key, value = line.split(" = ")
if key == "user":
user = value
elif key == "city":
city = value
elif key == "phone":
phone = value
elif key.lower().endswith("_consumer_key"):
consumer_key = value
elif key.lower().endswith("_consumer_secret"):
consumer_secret = value
elif key == "openai.api_key":
openai.api_key = value
aikey = value
elif key == "website":
website = value
locations.append(
Location(website, user, city, phone, consumer_key,
consumer_secret, openai.api_key)
)
#fetches the first product dataset to be edited and pushed to the other sites.
for locationa in locations[:1]:
base_url = "https://" + locationa.website + "/wp-json/wc/v3/products"
consumer_key = locationa.website + "_consumer_key:" + locationa.consumer_key
consumer_secret = locationa.website + "_consumer_secret:" + locationa.consumer_secret
city = locationa.city
phone = locationa.phone
website = locationa.website
aikey = openai.api_key
auth = (
locationa.consumer_key,
locationa.consumer_secret,
)
response = requests.get(f'{base_url}', auth=auth, params={'sku': sku})
response.raise_for_status()
product = response.json()[0]
source_product = product
source_product['images'] = remove_keys(source_product['images'])
source_images = source_product['images'][:4]
imagecounter = 0
for item in source_images:
imagecounter = imagecounter + 1
print("Image:",imagecounter)
#source_product_name = product['name'].strip()
item['src'] = item['src'].replace("/29/","/30/")
item['src'] = item['src'].replace("alamo","burlingame")
#imgcnt = 0
#pprint.pprint(source_images)
#source_image_url = item['src']
# for item in source_images:
# source_product_name = product['name'].strip()
# print("Source Product\n",source_product_name)
# print(website, aikey)
# print("Source Images")
# imgcnt = 0
# pprint.pprint(source_images)
# source_image_url = item['src']
# new_product_name = generate_new_product_name(sku)
# print("New name suggestion:", new_product_name)
seq = 0
#fetches all but the first product and applies the updated first site product details.
print("Destination Products\n")
for locationb in locations[1:]:
seq = seq + 1
base_url = "https://" + locationb.website + "/wp-json/wc/v3/products"
consumer_key = locationb.website + "_consumer_key:" + locationb.consumer_key
consumer_secret = locationb.website + "_consumer_secret:" + locationb.consumer_secret
city = locationb.city
city = city.replace('"', '')
phone = locationb.phone
phone = phone.replace(' ', '').replace('-', '').replace('"', '').replace('(', '').replace(')', '')
website = locationb.website
aikey = openai.api_key
auth = (
locationb.consumer_key,
locationb.consumer_secret,
)
response = requests.get(f'{base_url}', auth=auth, params={'sku': sku})
response.raise_for_status()
product = response.json()[0]
#source_product = product
source_product['images'] = remove_keys(source_product['images'])
product['images'] = source_product['images']
msgg = "#" + str(seq) + " " + str(sku)
print(msgg)
subdomain = website.split('.')[0]
print("Domain: ", subdomain)
site_id = get_site_id(subdomain)
print("Site ID:", site_id)
print(city, "Doap")
print(city, " Ca ", phone)
print("Sku: ", sku)
# First AI call: generate new product name
product['name'] = generate_new_product_name(sku).replace('"','').replace('"','').replace("'","").replace(" ","_").replace("(","").replace(")","").replace(",","").replace("$","")
print("New dest product name: ", product['name'])
print("New Images")
imgcnt = 0
for item in source_images:
imgcnt = imgcnt + 1
itemname = item['name'].replace('-',' ').capitalize()
print("Image #", imgcnt)
itemname = item['name'].replace('-',' ').capitalize()
# print("Image #", imgcnt)
new_unique_product_name = generate_new_image_name(product['name']).replace('"','').replace('"','').replace("'","").replace("!","").replace("(","").replace(")","").replace(",","").replace("→","")
new_unique_file_name = new_unique_product_name
item['name'] = new_unique_product_name
# print(item['name'], " : ", item['src'])
source_image_url = item['src']
source_image_filename = os.path.basename(source_image_url)
new_unique_file_name = new_unique_file_name + ".png"
download_image(source_image_url, source_image_filename)
print("Source image url: ", source_image_url)
replaced_url = source_image_url.replace("https://alamo.", "/var/www/")
stripped_path = "/".join(replaced_url.split("/")[:-1])
print("Orig file path: ", stripped_path)
new_path = stripped_path.split("/")
new_path[7] = str(site_id)
new_path = "/".join(new_path)
print("New remote file path: ", new_path)
#item['src'] = "https://" + subdomain + ".doap.com/" + stripped_path + "/" + new_unique_file_name
item['src'] = "https://" + subdomain + ".doap.com/" + stripped_path + "/" + new_unique_file_name
item['src'] = item['src'].replace("/var/www/doap.com/","")
watermark_text = city + " Doap " + phone
add_watermark_and_save(source_image_filename, watermark_text, new_unique_file_name)
local_file = '/Users/dmenache/Nextcloud/Projects/doap-api/ai_product_updater/' + new_unique_file_name
remote_server = 'dmenache@debian.doap.com'
testpath = stripped_path.replace("https://burlingame.","/var/www/")
remote_file = f'{remote_server}:{testpath}/{new_unique_file_name}'
scp_file_to_remote(local_file, remote_file)
pdb.set_trace()
#pprint.pprint(item)
#pprint.pprint(source_images)
product['images'] = source_images
#pprint.pprint(product)
# pprint.pprint(product)
for image in product['images']:
image['src'] = image['src'].replace('https://burlingame.doap.com/https://burlingame.doap.com/', 'https://burlingame.doap.com/')
print("product[images]",product['images'])
print("source_images",source_images)
print("product[images]",product['images'])
break
pprint.pprint(product)
pdb.set_trace()
update_url = f'{base_url}/{product["id"]}'
update_response = requests.put(update_url, json=product, auth=auth)
update_response.raise_for_status()
| menached/ai_product_updater | t1.py | t1.py | py | 14,935 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.data.find",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "nltk.download",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numb... |
15859509196 | # -*- coding: utf-8 -*-
#importando as bibliotecas
from matplotlib.pyplot import text
import yfinance as yf
import pandas as pd
import numpy as np
import os.path
import telegram
pd.options.mode.chained_assignment = None
#escolher uma ação
wege = yf.Ticker('WEGE3.SA')
#escolher inteervalo de dados
wege_dia = wege.history(period='id', interval='5m')
#pegar preço de fechamento
wege_dia = wege_dia.Close
#transformando em dataframe
df_wege_dia = pd.DataFrame(wege_dia)
#reset index
df_wege_dia.reset_index(inplace=True)
#pegar o ultimo valor negociado
wege_dia_ultimo_preco = df_wege_dia.tail(1)
#renomear as colunas
wege_dia_ultimo_preco.rename(columns={'Datetime':'data_pregao', 'Close':'preco_fechamento'}, inplace=True)
#Ajustar a data
wege_dia_ultimo_preco['data_pregao']=pd.to_datetime(wege_dia_ultimo_preco['data_pregao'], format='%Y-%m-%d')
#Usar o data frame historico e pegar apenas o preço de fechamento e data pregão
if os.path.isfile('wege.csv'):
df_wege = pd.read_csv('wege.csv', delimiter=';')
else:
df = pd.read_csv('all_bovesta.csv', delimiter=';') #colocar aqui o seu arquivo do bovespa
df_wege = df[df['silga_acao']=='WEGE3']
df_wege = df_wege[['data_pregao', 'preco_fechamento']]
#Ajustar a data
df_wege['data_pregao']=pd.to_datetime(df_wege['data_pregao'], format='%Y-%m-%d')
#Retirar a ultima data que queremos calcular
df_remove = df_wege.loc[(df_wege['data_pregao'] == pd.to_datetime('today').normalize())]
df_wege = df_wege.drop(df_wege.index)
#append data atual
df_wege_total = df_wege.append(wege_dia_ultimo_preco)
#Ajuste data atual
df_wege_total['data_pregao']=pd.to_datetime(df_wege_total['data_pregao'], utc=True).dt.date
df_wege_total.to_csv('wege.csv', sep=';', index=False)
#Calcular MACD
rapidaMME=df_wege_total.preco_fechamento.ewm(span=12).mean()
lentaMME = df_wege_total.preco_fechamento.ewm(span=26).mean()
MACD= rapidaMME - lentaMME
sinal=MACD.ewm(span=9).mean()
df_wege_total['MACD'] = MACD
df_wege_total['sinal'] = sinal
#Ajuste de indx e retirar o campo data pregão
df_wege_total = df_wege_total.set_index(pd.DatetimeIndex(df_wege_total['data_pregao'].values))
df_wege_total = df_wege_total.drop('data_pregao',1)
# Criar codigo para verificar a compra ou a venda
df_wege_total['flag']=''
df_wege_total['preco_compra']=np.nan
df_wege_total['preco_venda']=np.nan
for i in range(1, len(df_wege_total.sinal)):
if df_wege_total['MACD'][i] > df_wege_total['sinal'][i]:
if df_wege_total['flag'][i-1] == 'c':
df_wege_total['flag'][i]='C'
else:
df_wege_total['flag'][i]='C'
df_wege_total['preco_compra'][i] = df_wege_total['preco_fechamento'][i]
elif df_wege_total['MACD'][i] < df_wege_total['sinal'][i]:
if df_wege_total['flag'][i-1] =='V':
df_wege_total['flag'][i]='V'
else:
df_wege_total['flag'][i]='V'
df_wege_total['preco_venda'][i] = df_wege_total['preco_fechamento'][i]
#Verifica os 2 ultimos dias
hoje = df_wege_total.flag[-1]
ontem = df_wege_total.flag[-2]
flag= hoje
preco_fechamento = round(df_wege_total.preco_fechamento.tail(1)[-1],2)
print(flag, preco_fechamento)
my_token = '1840232813:AAHxoVmcDWHK3jAxiTWsMqTsiw9vTHaICpY'
chat_id = '-476980685'
def envia_mensagem(msg, chat_id, token=my_token):
bot=telegram.Bot(token = token)
bot.SendMessage(chat_id = chat_id, text=msg)
msg = f'WEGE3 (WEGE), {flag} preço de fechamento: {preco_fechamento}'
if ontem == hoje:
envia_mensagem(msg, chat_id, my_token)
| bertuci/compra_e_venda_acoes | bot_MACD/macd_bot.py | macd_bot.py | py | 3,561 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "yfinance.Ticker",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.to_dateti... |
41737860574 | from functools import cmp_to_key
def custom_split(s):
if s == '': return []
cnt = 0
res = []
last_comma = -1
brackets = 0
while (cnt < len(s)):
if s[cnt] == '[': brackets += 1
if s[cnt] == ']': brackets -= 1
if s[cnt] == ',':
if brackets == 0:
res.append(s[last_comma+1:cnt])
last_comma = cnt
cnt += 1
res.append(s[last_comma+1:])
return res
def compare(l1, l2):
len1 = len(l1)
len2 = len(l2)
cnt = 0
while cnt < len1 and cnt < len2:
len1 = len(l1)
len2 = len(l2)
if l1[cnt][0] != '[' and l2[cnt][0] != '[': # both values are integers
if int(l1[cnt]) < int(l2[cnt]): return 1
if int(l1[cnt]) > int(l2[cnt]): return -1
else:
if l1[cnt][0] == '[':
l1_new = custom_split(l1[cnt][1:-1])
else:
temp = l1[cnt]
l1_new = []
l1_new.append(temp)
if l2[cnt][0] == '[':
l2_new = custom_split(l2[cnt][1:-1])
else:
temp = l2[cnt]
l2_new = []
l2_new.append(temp)
compare_res = compare(l1_new,l2_new)
if compare_res == 1: return 1
if compare_res == -1: return -1
cnt += 1
if not cnt < len1 and cnt < len2: return 1
if cnt < len1 and not cnt < len2: return -1
return 0
def custom_sort(a,b):
return True if compare(a,b) == 1 else False
packets = []
DIVIDER_PACKET_1 = ["[[2]]"]
DIVIDER_PACKET_2 = ["[[6]]"]
with open('input.txt') as f:
lines = [line.rstrip('\n') for line in f]
cnt = 1
sum = 0
while 3*(cnt-1) < len(lines):
packet_one = lines[3*(cnt-1)]
packet_two = lines[3*(cnt-1)+1]
list_one = custom_split(packet_one[1:-1])
list_two = custom_split(packet_two[1:-1])
packets.append(list_one)
packets.append(list_two)
cnt += 1
packets.append(DIVIDER_PACKET_1)
packets.append(DIVIDER_PACKET_2)
packets = sorted(packets, key=cmp_to_key(compare), reverse=True)
cnt = 1
decoder_key = 1
for packet in packets:
print(packet)
if packet == DIVIDER_PACKET_1: decoder_key *= cnt
if packet == DIVIDER_PACKET_2: decoder_key *= cnt
cnt += 1
print("decoder_key: ", decoder_key) | Jiggzawyr/advent-of-code-2022 | Day 13 Distress Signal/part2.py | part2.py | py | 2,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.cmp_to_key",
"line_number": 74,
"usage_type": "call"
}
] |
6433676498 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
import yaml
import pickle
import pprint
import os
import logging
import sys
import data_loaders
import nets
import losses
import utils
import setup
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(787)
torch.cuda.manual_seed(787)
'''
#SECTION VAE Helpers
'''
def sample_n_frames(init_frames, ts, dt, ae, mu, sigma, n_generate=64):
# function to generate the next n frames conditioned on the input
with torch.no_grad():
# Get the latent variables
q_mu, q_sigma, det = ae.encode(init_frames)
_, z, _ = ae.get_increments(q_mu, q_sigma)
z_samples = torch.zeros((n_generate, z.shape[1]))
z = z[-1,:].unsqueeze(0)
# sample in z according to the learned SDE
for i in range(n_generate):
z_n = ae.get_next_z(z, ts[-1].unsqueeze(0) + i*dt, dt, mu, sigma)
z_samples[i,:] = z_n.clone()
z = z_n
global savepath
plots_list = [z_samples.detach().cpu().numpy()]
plot_titles = ['Latent Traj']
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'latent_traj.png'), plot_type='plot', axis=True)
conditional_frame = init_frames[0].unsqueeze(0).repeat(z_samples.size(0),1,1,1)
if det is not None:
in_z = torch.cat((z_samples.to(device), det[-1].repeat(z_samples.shape[0],1)), dim = 1)
else:
in_z = z_samples.to(device)
frames = ae.decode(in_z, x=conditional_frame)
return z_samples, frames
def plot_images(ae, mu, sigma, frames, frames_hat, dt, ts, l2_small):
# plot latent trajectories
# plot next frame reconstructions
z_samples, sampled_frames = sample_n_frames(frames[:2],
ts[:2],
dt,
ae.eval(),
mu,
sigma)
_, sampled_frames2 = sample_n_frames(frames[:2],
ts[:2],
dt,
ae.eval(),
mu,
sigma*2)
# create the image grids
im_grid_hat_single = torchvision.utils.make_grid(frames_hat[:64].detach().cpu(), pad_value=1, normalize=True)
im_grid_hat = torchvision.utils.make_grid(sampled_frames[:64].detach().cpu(), pad_value=1, normalize=True)
im_grid = torchvision.utils.make_grid(frames[:64].detach().cpu(), pad_value=1, normalize=True)
odd_rows = []
for row in range(4):
odd_rows.append(frames[row*8:(row+1)*8])
odd_rows.append(sampled_frames[row*8:(row+1)*8])
comp_grid = torchvision.utils.make_grid(torch.cat(odd_rows), pad_value=1, normalize=True)
plots_list = [comp_grid.cpu().numpy().transpose((1,2,0))]
plot_titles = ['Comparison']
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'train_comparison.png'))
plots_list = [im_grid_hat.numpy().transpose((1,2,0))]
plot_titles = ['Sampled (trajectory)']
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'train_sample_traj.png'))
# save the images
plots_list = [im_grid.numpy().transpose((1,2,0)),im_grid_hat_single.numpy().transpose((1,2,0))]
plot_titles = ['Original','Sampled (single)']
if l2_small:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'train_sample_best.png'))
else:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'train_sample.png'))
# save the movies
utils.save_gif(sampled_frames.detach().cpu(), os.path.join(savepath, 'movies/train_sample_traj.gif'))
utils.save_gif(sampled_frames2.detach().cpu(), os.path.join(savepath, 'movies/train_sample_traj2.gif'))
def save_nets(ae, mu, sigma, suffix):
'''
Routine to save the current state of our network
ae : autoencoder network
mu : latent drift network
sigma : latent diffusion network
suffix : str that defines how we want to save the network
'''
# save all the networks
torch.save(ae.state_dict(), os.path.join(savepath,'saved_nets/ae_{}.pth'.format(suffix)))
if type(mu) == nets.MLP or type(mu) == nets.Lin or type(mu) == nets.Well:
torch.save(mu.state_dict(), os.path.join(savepath,'saved_nets/mu_{}.pth'.format(suffix)))
else:
with open(os.path.join(savepath,'mu_{}.pkl'.format(suffix)),'wb') as f:
pickle.dump(mu, f)
if type(sigma) == nets.MLP:
torch.save(sigma.state_dict(), os.path.join(savepath,'saved_nets/sigma_{}.pth'.format(suffix)))
else:
with open(os.path.join(savepath,'sigma_{}.pkl'.format(suffix)),'wb') as f:
pickle.dump(sigma, f)
def train(ae,
mu,
sigma,
dt,
train_data,
val_data,
optimizer,
scheduler,
n_epochs,
data_params,
**kwargs):
'''
The main training routine:
ae : neural network (torch.Module subclass) that represents our autoencoder
mu : network or parameter that describes the latent drift
sigma : network or parameter that describes the latent diffusion
dt : time step
train_data : dataloader with the training data
val_data : dataloader with validation data
optimizer : optimization algorithm torch.optim
scheduler : lr decay schedule
n_epochs : number of epochs to run
data_params : parameters associated with the dataset
returns statistics with respect to training
'''
global savepath
global loss_type
train_dataset = train_data.dataset.dataset
val_dataset = val_data.dataset.dataset
try:
inner_num = data_params['inner_iter']
except:
inner_num = 1
if n_epochs > 1000:
reserve_epoch = 499
else:
reserve_epoch = 49
# plotting parameters
l2_small = True
l2_small_valid = True
losses_train = []
losses_valid = []
try:
plot_freq = data_params['plot_freq']
except KeyError:
plot_freq = 50
try:
plot_train = data_params['plot_train']
except KeyError:
plot_train = True
# setup the stats dict
stats = {'kl': np.Inf,
'l2' : np.Inf,
'l2_valid': np.Inf,
'kl_valid': np.Inf,
'mu_mse': 0,
'mu_mse_valid': 0,
'mu_rel': 0,
'mu_rel_valid': 0,
'sde_mse': 0,
'sde_mse_valid': 0,
'sde_rel': 0,
'sde_rel_valid': 0,
'val_cond_met': False}
for epoch in range(n_epochs):
ae.train()
mu.train()
#sigma.train()
for idx, (frames, ts) in enumerate(train_data):
# save a gif of the data
if len(frames.shape) > 2:
if idx == 0 and epoch == 0:
utils.save_gif(frames.detach().cpu(), os.path.join(savepath, 'orig_data.gif'))
# transfer the data to the device
# the rest is boilerplate
frames = frames.float().to(device)
ts = ts.float().to(device)
for _ in range(inner_num):
optimizer.zero_grad()
kl_loss, l2_loss,\
frames_hat, mu_hat, q_mu, sigma_hat_full, q_sigma_full, inc, z = ae.step(frames, ts, dt, mu, sigma)
kl_loss1, l2_loss1,\
_, _, _, _, _, _, _ = ae.step(frames, ts, dt, mu, sigma, plus_one=True)
sigma.data = sigma / sigma.norm(2) * torch.ones(z.shape[1]).norm(2)
loss = kl_loss + kl_loss1 + l2_loss + l2_loss1 + 20*sigma.norm(1)
losses_train.append((kl_loss.item(), l2_loss.item()))
loss.backward()
optimizer.step()
# And that's the end of the train routine
'''
PLOT SECTION
This is still quite messy and needs to be refactored,
but this is all visualization calls
'''
if kl_loss < stats['kl']:
stats['kl'] = kl_loss.item()
stats['mu'] = mu_hat.mean().item()
if plot_train and (epoch % plot_freq) == 0 and idx == 0:
if l2_loss < stats['l2']:
l2_small = True
stats['l2'] = l2_loss.item()
else:
l2_small = False
if len(frames.shape) > 2:
plot_images(ae, mu, sigma, frames, frames_hat, dt, ts, l2_small)
# plot mu hat
mu_hat_samples, hat_domain = utils.plot_mu_hat(mu, sigma, q_mu, ts, os.path.join(savepath, 'mu_hat_plot.png'))
if len(frames.shape) < 3:
plots = [frames.cpu(), frames_hat.detach().cpu()]
names = ['Original', 'Sampled']
utils.plot_subplots(plots, names, os.path.join(savepath, 'train_recon.png'), plot_type='plot', axis=True)
_, sampled_frames = sample_n_frames(frames[:2], ts[:2], dt, ae, mu, sigma, n_generate=1000)
plots = [frames.cpu(), sampled_frames.detach().cpu()]
names = ['Original', 'Sampled']
utils.plot_subplots(plots, names, os.path.join(savepath, 'train_sampled.png'), plot_type='plot', axis=True)
if frames.shape[1] == 1:
with torch.no_grad():
inx = torch.linspace(frames.min().item(), frames.max().item()).unsqueeze(1)
oned_enc = ae.encode(inx.cuda(0))[0].detach().data.clone().cpu()
enc_scale = ( inx.log() / oned_enc ).mean()
enc_shift = (inx.log() - enc_scale * oned_enc).mean()
plt.plot(inx.detach().cpu(), enc_scale * oned_enc.cpu(), label='encoder')
plt.plot(inx.detach().cpu(), inx.log().detach().cpu(),label='log')
plt.legend()
plt.savefig(os.path.join(savepath, 'encoder_plot.pdf'))
plt.close('all')
'''
AFFINE TRANSFORM SECTION
'''
# calculate the affine map between xt and z
current_run = train_dataset.xt_orig[idx*z.shape[0]:(idx+1)*z.shape[0]]
scale = (train_dataset.xt_orig.max() - train_dataset.xt_orig.min())
q_mu = q_mu[:, :train_dataset.xt_orig.shape[1]]
z = z[:, :train_dataset.xt_orig.shape[1]]
if not 'stocks' in savepath:
# if this is the stocks dataset, don't compute the scaling since there is none
if data_params['affine']:
transformed_xt, Q, b, sde_mse, sde_rel = utils.calc_affine(
current_run,
z.detach().cpu().numpy(),
savepath,
affine=data_params['affine'])
if z.shape[1] == mu_hat.shape[1]:
mu_residuals, mu_rel, mu_crlb = utils.compare_mu2(
mu,
q_mu,
ts,
Q,
b,
dt,
train_dataset,
os.path.join(savepath,'mu_comp_scaled.png'),
affine=data_params['affine'],
loss_type=loss_type)
else:
mu_residuals = torch.Tensor([np.NaN]).numpy()
mu_crlb = torch.Tensor([np.NaN]).numpy()
mu_rel = torch.Tensor([np.NaN]).numpy()
else:
q_max = q_mu.max()
q_min = q_mu.min()
if loss_type == 'exact':
q_scaled = ((q_mu - q_min ) / (q_max - q_min) * (scale) ).detach().cpu().numpy()
#q_scaled = q_mu.detach().cpu().numpy() / np.sqrt(scale)
else:
q_scaled = q_mu.detach().cpu().numpy() / scale
transformed_xt, Q, b, sde_mse, sde_rel = utils.calc_affine(
current_run,
q_scaled,
#z.detach().cpu().numpy() / scale,
savepath,
affine=data_params['affine'])
if z.shape[1] == mu_hat.shape[1]:
mu_residuals, mu_rel, mu_crlb = utils.compare_mu2(
mu,
q_mu,
ts,
Q,
b,
dt,
train_dataset,
os.path.join(savepath,'mu_comp_scaled.png'),
affine=data_params['affine'],
loss_type=loss_type)
else:
mu_residuals = torch.Tensor([np.NaN]).numpy()
mu_crlb = torch.Tensor([np.NaN]).numpy()
mu_rel = torch.Tensor([np.NaN]).numpy()
stats['sde_mse'] = sde_mse.copy()
stats['sde_rel'] = sde_rel.copy()
# compare the estimated mu to the true mu with the affine map q
stats['mu_mse'] = mu_residuals.copy()
stats['mu_rel'] = mu_rel.copy()
stats['mu_crlb'] = mu_crlb.copy()
else:
mu_residuals = torch.Tensor([np.NaN]).numpy()
mu_crlb = torch.Tensor([np.NaN]).numpy()
mu_rel = torch.Tensor([np.NaN]).numpy()
stats['sde_mse'] = torch.Tensor([np.NaN]).numpy()
stats['sde_rel'] = torch.Tensor([np.NaN]).numpy()
# compare the estimated mu to the true mu with the affine map Q
stats['mu_mse'] = torch.Tensor([np.NaN]).numpy()
stats['mu_rel'] = torch.Tensor([np.NaN]).numpy()
stats['mu_crlb'] = torch.Tensor([np.NaN]).numpy()
# plot and print
print('Epoch {} iter {}'.format(epoch, idx))
print('L2 loss {}'.format(l2_loss.item()))
print('KL loss {}'.format(kl_loss.item()))
plots_list = [(q_mu[1:]-q_mu[:-1]).detach().cpu().numpy(), mu_hat.detach().cpu().numpy()]
plot_titles = ['q_mu', 'mu_hat']
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'mu_comp.png'), plot_type='plot', axis=True)
if scheduler:
if type(scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(l2_loss)
else:
scheduler.step()
if (epoch % plot_freq) == 0:
# save all the networks
#if len(frames.shape) < 3:
# utils.plot_mu_hat(mu, None, z, ts, os.path.join(savepath, 'mu_hat_est.pdf'))
save_nets(ae, mu, sigma, 'latest')
with open(os.path.join(savepath, 'latent.pkl'), 'wb') as f:
#lat_d = {'q_mu' : q_mu.detach().cpu().numpy(), 'ts' : ts, 'xt_orig' : dataset.xt_orig}
lat_d = {'q_mu' : transformed_xt, 'ts' : ts, 'xt_orig' : train_dataset.xt_orig}
pickle.dump(lat_d, f)
if type(sigma) == nn.Parameter:
print('Update sigma_hat')
print(sigma)
stats['sigma_hat'] = (sigma.sort(descending=True)[0]).detach().cpu().numpy()
if (epoch % plot_freq) == 0 and plot_train:
'''
EVAL
'''
# with our validataion data, see how well we're predicting
with torch.no_grad():
ae.eval()
# first, compute how well we predict the next step on the validation data
for idxt, (frames_test, ts_test) in enumerate(val_data):
frames_test = frames_test.float().to(device)
ts_test = ts_test.float().to(device)
kl_loss_test, l2_loss_test,\
frames_hat_test, mu_hat_test, q_mu_test, sigma_hat_full, q_sigma_full, \
inc_test, z_test = ae.step(frames_test,
ts_test,
dt,
mu,
sigma)
losses_valid.append((kl_loss_test.item(), l2_loss_test.item()))
q_mu_test = q_mu_test[:,:train_dataset.xt_orig.shape[1]]
z_test = z_test[:,:train_dataset.xt_orig.shape[1]]
if len(frames_hat_test.shape) < 3 and l2_loss_test < stats['l2_valid']:
stats['l2_valid'] = l2_loss_test.item()
stats['kl_valid'] = kl_loss_test.item()
l2_small_valid = True
stats['val_cond_met'] = True
save_nets(ae, mu, sigma, 'best_val')
plots = [frames_test.cpu(), frames_hat_test.detach().cpu()]
names = ['Original', 'Sampled']
utils.plot_subplots(plots, names, os.path.join(savepath, 'valid_recon.png'), plot_type='plot')
# if the l2 and kl are sufficiently small, save these as our current best networks
if ((l2_loss_test < stats['l2_valid'] and epoch > reserve_epoch) or ('dna' in savepath)) and ('z={}'.format(train_dataset.xt_orig.shape[1]) in savepath):
stats['val_cond_met'] = True
#stats['l2_valid'] = kl_loss_test.item()*l2_loss_test.item()
stats['l2_valid'] = l2_loss_test.item()
stats['kl_valid'] = kl_loss_test.item()
l2_small_valid = True
save_nets(ae, mu, sigma, 'best_val')
# Compute the mapping over the training data since we want to see the fit within the whole time series
frames = torch.Tensor(train_dataset.frames).float().to(device)[:z.shape[0]]
ts = torch.Tensor(train_dataset.ts).float().to(device)[:z.shape[0]]
kl_loss, l2_loss,\
frames_hat, mu_hat, q_mu, sigma_hat_full, q_sigma_full, inc, z = ae.step(frames,
ts,
dt,
mu,
sigma)
if 'gbm' in savepath:
gbm = True
else:
gbm = False
# compare the estimated mu to the true mu with the affine map Q
current_run = train_dataset.xt_orig[:z.shape[0]]
scale = train_dataset.xt_orig.max() - train_dataset.xt_orig.min()
if gbm:
scale = (np.log(train_dataset.xt_orig[:]).max() - np.log(train_dataset.xt_orig[:]).min())
if len(frames.shape) < 3:
plots = [frames.cpu(), frames_hat.detach().cpu()]
names = ['Original', 'Sampled']
utils.plot_subplots(plots, names, os.path.join(savepath, 'valid_recon.png'), plot_type='plot')
continue
if data_params['affine']:
transformed_xt, Q, b, sde_mse, sde_rel = utils.calc_affine(current_run,
z.detach().cpu().numpy(),
savepath,
affine=data_params['affine'])
mu_mse, mu_rel, mu_crlb = utils.compare_mu2(mu,
q_mu,
ts,
Q,
b,
dt,
train_dataset,
os.path.join(savepath,'mu_comp_best_val.png'),
affine=data_params['affine'],
loss_type=loss_type)
else:
q_max = q_mu.max()
q_min = q_mu.min()
if loss_type == 'exact':
q_scaled = ((q_mu - q_min ) / (q_max - q_min) * (scale) ).detach().cpu().numpy()
else:
q_scaled = q_mu.detach().cpu().numpy() / scale
transformed_xt, Q, b, sde_mse, sde_rel = utils.calc_affine(
current_run,
q_scaled,
#z.detach().cpu().numpy() / scale,
savepath,
affine=data_params['affine'],
gbm=gbm)
mu_mse, mu_rel, mu_crlb = utils.compare_mu2(mu,
q_mu,
ts,
Q,
b,
dt,
train_dataset,
os.path.join(savepath,'mu_comp_best_val.png'),
affine=data_params['affine'],
loss_type=loss_type)
stats['mu_mse_val'] = mu_mse.copy()
stats['mu_rel_val'] = mu_rel.copy()
stats['mu_crlb_val'] = mu_crlb.copy()
stats['sde_mse_valid'] = sde_mse.copy()
stats['sde_rel_valid'] = sde_rel.copy()
else:
l2_small_valid = False
plt.plot(torch.arange(sigma.shape[0]).detach().cpu().numpy(), (sigma.sort(descending=True)[0]).detach().cpu().numpy())
plt.savefig(os.path.join(savepath, 'sigma_hat.pdf'))
plt.close('all')
if 'dna' in savepath or 'balls' in savepath:
stats['val_cond_met'] = True
save_nets(ae, mu, sigma, 'best_val')
im_grid_test = torchvision.utils.make_grid(frames_test[:64].detach().cpu(), pad_value=1, normalize=True)
im_grid_hat_single_test = torchvision.utils.make_grid(frames_hat_test[:64].detach().cpu(), pad_value=1, normalize=True)
# sample the frames for the next n images
_, sampled_frames_test = sample_n_frames(frames_test[:2], ts_test[:2], dt, ae.eval(), mu, sigma)
_, sampled_frames_test2 = sample_n_frames(frames_test[:2], ts_test[:2], dt, ae.eval(), mu, sigma*2)
im_grid_hat_test = torchvision.utils.make_grid(sampled_frames_test[:64].detach().cpu(), pad_value=1, normalize=True)
odd_rows = []
for row in range(4):
odd_rows.append(frames_test[row*8:(row+1)*8])
odd_rows.append(sampled_frames_test[row*8:(row+1)*8])
comp_grid = torchvision.utils.make_grid(torch.cat(odd_rows), pad_value=1, normalize=True)
plots_list = [comp_grid.cpu().numpy().transpose((1,2,0))]
plot_titles = ['Comparison']
utils.plot_subplots(plots_list,
plot_titles,
os.path.join(savepath, 'valid_comparison.png'))
if val_dataset.xt.shape[1] < 10:
utils.calc_affine(
val_dataset.xt[:z_test.shape[0]],
np.sqrt(dt)*z_test.detach().cpu().numpy(),
savepath, suffix='test')
plots_list = [im_grid_test.numpy().transpose((1,2,0)), im_grid_hat_single_test.numpy().transpose((1,2,0))]
plot_titles = ['Original','Sampled (single)']
if l2_small_valid:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'valid_sample_best.png'))
else:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'valid_sample.png'))
plots_list = [im_grid_hat_test.numpy().transpose((1,2,0))]
plot_titles = ['Sampled (trajectory)']
if l2_small_valid:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'valid_sample_traj_best.png'))
else:
utils.plot_subplots(plots_list, plot_titles, os.path.join(savepath, 'valid_sample_traj.png'))
if len(sampled_frames_test.shape) > 2:
utils.save_gif(sampled_frames_test.detach().cpu(), os.path.join(savepath, 'movies/valid_sample_traj.gif'))
utils.save_gif(sampled_frames_test2.detach().cpu(), os.path.join(savepath, 'movies/valid_sample_traj_2.gif'))
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title('NLL')
plt.plot([kp[0] for kp in losses_train])
plt.subplot(1,2,2)
plt.title('l2')
plt.yscale('log')
plt.plot([kp[1] for kp in losses_train])
plt.savefig(os.path.join(savepath, 'losses_train.png'))
plt.close('all')
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title('NLL')
plt.plot([kp[0] for kp in losses_valid])
plt.subplot(1,2,2)
plt.title('l2')
plt.yscale('log')
plt.plot([kp[1] for kp in losses_valid])
plt.savefig(os.path.join(savepath, 'losses_valid.png'))
plt.close('all')
return stats
def get_parser():
"""Get parser object."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--file",
dest="filename",
help="experiment definition file",
metavar="FILE",
required=True,
)
return parser
if __name__ == '__main__':
import shutil
args = get_parser().parse_args()
yaml_filepath = args.filename
with open(yaml_filepath, 'r') as f:
cfg = yaml.load(f, yaml.SafeLoader)
global savepath
all_stats = {'config':cfg, 'runs':[]}
try:
n_runs = cfg['n_runs']
except KeyError:
n_runs = 5
try:
n_tries = cfg['n_tries']
except KeyError:
n_tries = 1
print(n_tries)
for run in range(n_runs):
savepath = 'results/{}_d={}w={}z={}det={}lat={}loss={}sigma={}/run{}'.format(
cfg['head'],
cfg['dataset']['name'],
cfg['ae']['net']['width'],
cfg['ae']['net']['latent_dim'],
cfg['ae']['net']['add_det'],
cfg['sde']['type'],
cfg['ae']['net']['loss'],
cfg['ae']['net']['sigma_type'],
run)
global loss_type
loss_type = cfg['ae']['net']['loss']
#if os.path.isfile(os.path.join(savepath, 'data.pkl')):
# os.remove(os.path.join(savepath, 'data.pkl'))
if not os.path.exists(savepath):
os.makedirs(savepath)
if not os.path.exists(os.path.join(savepath,'movies')):
os.makedirs(os.path.join(savepath,'movies'))
if not os.path.exists(os.path.join(savepath,'saved_nets')):
os.makedirs(os.path.join(savepath,'saved_nets'))
log_format = "%(asctime)s %(message)s"
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p",
)
fh = logging.FileHandler(os.path.join(savepath, "log.txt"))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("config = %s", cfg)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(cfg)
best_loss = np.Inf
for t_num in range(n_tries):
while True:
initialized = setup.setup(cfg, savepath)
stats = train(**initialized)
val_cond_met = stats['val_cond_met']
if val_cond_met or 'dna' in cfg['head'] or 'stocks' in cfg['head']:
break
src_ae = os.path.join(savepath,'saved_nets/ae_best_val.pth')
dst_ae = os.path.join(savepath,'saved_nets/ae_best_val_{}.pth'.format(t_num))
src_mu = os.path.join(savepath,'saved_nets/mu_best_val.pth')
dst_mu = os.path.join(savepath,'saved_nets/mu_best_val_{}.pth'.format(t_num))
shutil.copyfile(src_ae, dst_ae)
shutil.copyfile(src_mu, dst_mu)
print('=========== End of Training ===========')
print('Printing results for try {}'.format(t_num))
print('STAT: L2 on Train: {}'.format(stats['l2']))
print('STAT: KL on Train: {}'.format(stats['kl']))
print('STAT: L2 on Validation: {}'.format(stats['l2_valid']))
print('STAT: KL on Validation: {}'.format(stats['kl_valid']))
print('STAT: mu mse on Validation: {}'.format(stats['mu_mse']))
print('STAT: SDE mse on Validation: {}'.format(stats['sde_mse']))
print('========== End of Results ============')
if stats['kl_valid'] + stats['l2_valid'] < best_loss:
best_loss = stats['kl_valid'] + stats['l2_valid']
src_ae = os.path.join(savepath,'saved_nets/ae_best_val.pth')
dst_ae = os.path.join(savepath,'saved_nets/ae_best_val_bt.pth')
src_mu = os.path.join(savepath,'saved_nets/mu_best_val.pth')
dst_mu = os.path.join(savepath,'saved_nets/mu_best_val_bt.pth')
shutil.copyfile(src_ae, dst_ae)
shutil.copyfile(src_mu, dst_mu)
all_stats['runs'].append(stats)
print(stats)
with open(os.path.join(savepath,'saved_stats.pkl'), 'wb') as f:
pickle.dump(all_stats, f)
| alluly/ident-latent-sde | train.py | train.py | py | 31,019 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"... |
71738073384 | import unittest
from selenium import webdriver
from data.constants import Constants
from helpers.keywords import Helpers
from pom.pages.login import Login
from pom.pages.project import Project
from pom.locators.base_loc import BaseLoc
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from pom.pages.home import Home
from pom.locators.sidebar_loc import SideBarLoc
class ProjectTest(unittest.TestCase):
def setUp(self):
options = Options()
options.headless = True
print("\n========== PROJECTS TESTS ==========")
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
self.driver.maximize_window()
self.driver.get(Constants.url["prod"])
self.driver.find_element(*BaseLoc.sign_in_lnk).click()
Login.login_form(self, Constants.credentials["users"]["real"]["user"], Constants.credentials["users"]["real"]["pass"])
Helpers.click_visible_element(self, SideBarLoc.inbox_li_btn)
def test_create_project(self):
Project.create_projects(self, 1, Constants.project_data["name"], False, Constants.project_data["view"]["panel"])
Home.validate_project(self)
def test_create_project_fav(self):
Project.create_projects(self, 1, Constants.project_data["name"], True, Constants.project_data["view"]["list"])
Home.validate_project(self)
def test_create_projects(self):
Project.create_projects(self, 3, Constants.project_data["name"], True, Constants.project_data["view"]["list"])
Home.validate_project(self)
def test_delete_all_projects(self):
Project.delete_all_projects(self)
def tearDown(self):
Helpers.wait_seconds(self, 3)
self.driver.quit()
if __name__ == "__main__":
unittest.main()
| jaime-contreras-98/todoist-python-selenium | tests/e2e/test/test_projects.py | test_projects.py | py | 1,846 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 20,
"usage_type": "call"
},... |
3650801558 | from flask import Blueprint, jsonify, g, request
from wrappers.auth_required import auth_required, rate_limited
from models.jobs import TOPJob
from utils.json_helper import jsonify_payload
bp = Blueprint("management", __name__, url_prefix="/management")
@bp.route("/jobs", methods=["GET"])
@auth_required
def get_jobs():
print('user info: ', g.user_id)
jobs = TOPJob.find_by_user_id(g.user_id)
print('jobs: ', [job.serialize() for job in jobs])
return jsonify_payload({'jobs': [job.serialize() for job in jobs]})
@bp.route("/jobs", methods=["POST"])
@auth_required
@rate_limited
def create_job():
payload = request.get_json()
print('payload: ', payload)
print('subscription data: ', g.subscription)
job_name = payload.get('job_name')
job_description = payload.get('job_description')
job_id = payload.get('job_id')
if not job_id:
print('creating job')
job = TOPJob(job_name, job_description, g.user_id)
job.save()
else:
print('updating job ', job_id)
job = TOPJob.find_by_id(job_id)
job.update(job_name, job_description)
return jsonify_payload({'job': job.serialize()}) | matthewlouisbrockman/the_one_plugin | backend/management/management_routes.py | management_routes.py | py | 1,181 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.g.user_id",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.jobs.TOPJob.find_b... |
496529237 | import datetime
import sys
import uuid
import pandas as pd
import pytest
from dagster_gcp import (
bigquery_resource,
bq_create_dataset,
bq_delete_dataset,
bq_solid_for_queries,
import_df_to_bq,
import_gcs_paths_to_bq,
)
from dagster_pandas import DataFrame
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
from dagster import (
DagsterExecutionStepExecutionError,
InputDefinition,
List,
ModeDefinition,
Nothing,
OutputDefinition,
Path,
execute_pipeline,
pipeline,
solid,
)
from dagster.config.validate import validate_config
from dagster.core.definitions import create_environment_type
from dagster.seven import mock
def dataset_exists(name):
'''Check if dataset exists - ensures we have properly cleaned up after tests and haven't leaked
any datasets'''
client = bigquery.Client()
dataset_ref = client.dataset(name)
try:
client.get_dataset(dataset_ref)
return True
except NotFound:
return False
def get_dataset():
'''Creates unique dataset names of the form: test_ds_83791a53
'''
return 'test_ds_' + str(uuid.uuid4()).replace('-', '_')
def bq_modes():
return [ModeDefinition(resource_defs={'bigquery': bigquery_resource})]
def test_simple_queries():
@pipeline(mode_defs=bq_modes())
def bq_pipeline():
bq_solid_for_queries(
[
# Toy example query
'SELECT 1 AS field1, 2 AS field2;',
# Test access of public BQ historical dataset (only processes ~2MB here)
# pylint: disable=line-too-long
'''SELECT *
FROM `weathersource-com.pub_weather_data_samples.sample_weather_history_anomaly_us_zipcode_daily`
ORDER BY postal_code ASC, date_valid_std ASC
LIMIT 1''',
]
).alias('bq_query_solid')()
res = execute_pipeline(bq_pipeline).result_for_solid('bq_query_solid')
assert res.success
values = res.output_value()
for df in values:
assert isinstance(df, pd.DataFrame)
assert values[0].to_dict('list') == {'field1': [1], 'field2': [2]}
assert values[1].to_dict('list') == {
'postal_code': ['02101'],
'country': ['US'],
'date_valid_std': [datetime.date(2014, 1, 1)],
'doy_std': [1],
'avg_temperature_air_2m_f': [25.05],
'avg_temperature_anomaly_air_2m_f': [-7.81],
'tot_precipitation_in': [0.0],
'tot_precipitation_anomaly_in': [-0.28],
'tot_snowfall_in': [0.0],
'tot_snowfall_anomaly_in': [-1.36],
'avg_wind_speed_10m_mph': [7.91],
'avg_wind_speed_10m_anomaly_mph': [-1.85],
}
# pylint: disable=line-too-long
def test_bad_config():
configs_and_expected_errors = [
(
# Create disposition must match enum values
{'create_disposition': 'this is not a valid create disposition'},
'Value not in enum type BQCreateDisposition',
),
(
# Dataset must be of form project_name.dataset_name
{'default_dataset': 'this is not a valid dataset'},
'Value at path root:solids:test:config:query_job_config:default_dataset is not valid. Expected "_Dataset"',
),
(
# Table must be of form project_name.dataset_name.table_name
{'destination': 'this is not a valid table'},
'Value at path root:solids:test:config:query_job_config:destination is not valid. Expected "_Table"',
),
(
# Priority must match enum values
{'priority': 'this is not a valid priority'},
'Value not in enum type BQPriority',
),
(
# Schema update options must be a list
{'schema_update_options': 'this is not valid schema update options'},
'Value at path root:solids:test:config:query_job_config:schema_update_options must be list. Expected: [BQSchemaUpdateOption]',
),
(
{'schema_update_options': ['this is not valid schema update options']},
'Value not in enum type BQSchemaUpdateOption',
),
(
{'write_disposition': 'this is not a valid write disposition'},
'Value not in enum type BQWriteDisposition',
),
]
@pipeline(mode_defs=bq_modes())
def test_config_pipeline():
bq_solid_for_queries(['SELECT 1']).alias('test')()
env_type = create_environment_type(test_config_pipeline)
for config_fragment, error_message in configs_and_expected_errors:
config = {'solids': {'test': {'config': {'query_job_config': config_fragment}}}}
result = validate_config(env_type, config)
assert result.errors[0].message == error_message
def test_create_delete_dataset():
dataset = get_dataset()
@pipeline(mode_defs=bq_modes())
def create_pipeline():
bq_create_dataset.alias('create_solid')()
config = {'solids': {'create_solid': {'config': {'dataset': dataset, 'exists_ok': True}}}}
assert execute_pipeline(create_pipeline, config).result_for_solid('create_solid').success
config = {'solids': {'create_solid': {'config': {'dataset': dataset, 'exists_ok': False}}}}
with pytest.raises(DagsterExecutionStepExecutionError) as exc_info:
execute_pipeline(create_pipeline, config)
assert 'Dataset "%s" already exists and exists_ok is false' % dataset in str(
exc_info.value.user_exception
)
@pipeline(mode_defs=bq_modes())
def delete_pipeline():
bq_delete_dataset.alias('delete_solid')()
# Delete should succeed
config = {'solids': {'delete_solid': {'config': {'dataset': dataset}}}}
assert execute_pipeline(delete_pipeline, config).result_for_solid('delete_solid').success
# Delete non-existent with "not_found_ok" should succeed
config = {'solids': {'delete_solid': {'config': {'dataset': dataset, 'not_found_ok': True}}}}
assert execute_pipeline(delete_pipeline, config).result_for_solid('delete_solid').success
# Delete non-existent with "not_found_ok" False should fail
config = {'solids': {'delete_solid': {'config': {'dataset': dataset, 'not_found_ok': False}}}}
with pytest.raises(DagsterExecutionStepExecutionError) as exc_info:
execute_pipeline(delete_pipeline, config)
assert 'Dataset "%s" does not exist and not_found_ok is false' % dataset in str(
exc_info.value.user_exception
)
assert not dataset_exists(dataset)
# See: https://github.com/dagster-io/dagster/issues/1711
@pytest.mark.skip
def test_pd_df_load():
dataset = get_dataset()
table = '%s.%s' % (dataset, 'df')
test_df = pd.DataFrame({'num1': [1, 3], 'num2': [2, 4]})
create_solid = bq_create_dataset.alias('create_solid')
load_solid = import_df_to_bq.alias('load_solid')
query_solid = bq_solid_for_queries(['SELECT num1, num2 FROM %s' % table]).alias('query_solid')
delete_solid = bq_delete_dataset.alias('delete_solid')
@solid(
input_defs=[InputDefinition('success', Nothing)], output_defs=[OutputDefinition(DataFrame)]
)
def return_df(_context): # pylint: disable=unused-argument
return test_df
config = {
'solids': {
'create_solid': {'config': {'dataset': dataset, 'exists_ok': True}},
'load_solid': {'config': {'destination': table}},
'delete_solid': {'config': {'dataset': dataset, 'delete_contents': True}},
}
}
@pipeline(mode_defs=bq_modes())
def bq_pipeline():
delete_solid(query_solid(load_solid(return_df(create_solid()))))
result = execute_pipeline(bq_pipeline, config)
assert result.success
values = result.result_for_solid('query_solid').output_value()
assert values[0].to_dict() == test_df.to_dict()
# BQ loads should throw an exception if pyarrow and fastparquet aren't available
with mock.patch.dict(sys.modules, {'pyarrow': None, 'fastparquet': None}):
with pytest.raises(DagsterExecutionStepExecutionError) as exc_info:
result = execute_pipeline(bq_pipeline, config)
assert (
'loading data to BigQuery from pandas DataFrames requires either pyarrow or fastparquet'
' to be installed' in str(exc_info.value.user_exception)
)
cleanup_config = {
'solids': {'delete_solid': {'config': {'dataset': dataset, 'delete_contents': True}}}
}
@pipeline(mode_defs=bq_modes())
def cleanup():
delete_solid()
assert execute_pipeline(cleanup, cleanup_config).success
assert not dataset_exists(dataset)
# See: https://github.com/dagster-io/dagster/issues/1711
@pytest.mark.skip
def test_gcs_load():
dataset = get_dataset()
table = '%s.%s' % (dataset, 'df')
create_solid = bq_create_dataset.alias('create_solid')
query_solid = bq_solid_for_queries(
[
'SELECT string_field_0, string_field_1 FROM %s ORDER BY string_field_0 ASC LIMIT 1'
% table
]
).alias('query_solid')
delete_solid = bq_delete_dataset.alias('delete_solid')
@solid(
input_defs=[InputDefinition('success', Nothing)], output_defs=[OutputDefinition(List[Path])]
)
def return_gcs_uri(_context): # pylint: disable=unused-argument
return ["gs://cloud-samples-data/bigquery/us-states/us-states.csv"]
config = {
'solids': {
'create_solid': {'config': {'dataset': dataset, 'exists_ok': True}},
'import_gcs_paths_to_bq': {
'config': {
'destination': table,
'load_job_config': {
'autodetect': True,
'skip_leading_rows': 1,
'source_format': 'CSV',
'write_disposition': 'WRITE_TRUNCATE',
},
}
},
'delete_solid': {'config': {'dataset': dataset, 'delete_contents': True}},
}
}
@pipeline(mode_defs=bq_modes())
def bq_pipeline():
delete_solid(query_solid(import_gcs_paths_to_bq(return_gcs_uri(create_solid()))))
result = execute_pipeline(bq_pipeline, config)
assert result.success
values = result.result_for_solid('query_solid').output_value()
assert values[0].to_dict() == {'string_field_0': {0: 'Alabama'}, 'string_field_1': {0: 'AL'}}
assert not dataset_exists(dataset)
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-gcp/dagster_gcp_tests/bigquery_tests/test_solids.py | test_solids.py | py | 10,520 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "google.cloud.exceptions.NotFound",
"line_number": 45,
"usage_type": "name"
},
{
... |
27370619161 | import matplotlib.pyplot as plt
from random_walk import RandomWalk
# cd Documents/python_work/data_visualization
while True:
# Create instance of RandomWalk.
rw = RandomWalk(5000)
rw.fill_walk()
# Set the size of the interactive window.
plt.figure(dpi=128, figsize=(10, 5))
# Plot random walk with gradient.
point_numbers = list(range(rw.num_points))
# plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues,
# s=1)
plt.plot(rw.x_values, rw.y_values, c='blue')
# Start and end points (green and red, respectively).
plt.scatter(0, 0, c='green', s=100)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', s=100)
# Remove axes.
# plt.axes().get_xaxis().set_visible(False)
# plt.axes().get_yaxis().set_visible(False)
plt.show()
keep_running = input("Make another walk? (y/n): ")
if keep_running == 'n':
break
| nazeern/python_crash_course | data_visualization/rw_visual.py | rw_visual.py | py | 923 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random_walk.RandomWalk",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matp... |
9369243974 | import imaplib
import email
from time import sleep
from random import randint
import importlib
from src.Analyser import mark_email
from src.Email import Email
import numpy as np
from goto import with_goto
from src.save import save
ai = importlib.import_module("Neural_Network", package=None)
"""
Fonction qui efface tous les fichiers emailX.txt
De nouveaux fichiers seront créés, avec X commençant à 1
"""
def efface_old_mail():
import os
arret = False
x = 1
while not arret:
if os.path.exists("email" + str(x) + ".txt"):
os.remove("email" + str(x) + ".txt")
x += 1
else:
print("Fichiers supprimés")
arret = True
old_mes_nb = -1
x = 1
efface_old_mail()
@with_goto
def run():
print('start')
label.connexion
### Récupération de l'adresse mail à tester, et du mot de passe ###
connexion_pos = False
while not connexion_pos:
adresse = input("Adresse mail: ")
mdp = input("Mot de passe: ")
if("@gmail.com" in adresse):
connexion_pos = True
else:
print("Adresse mail non valide\n")
continue
label.start
try:
old_mes_nb = -1
x = 1
### Connexion à la boite mail ###
try :
mail = imaplib.IMAP4_SSL('imap.gmail.com')
#mail.login('yncrea.test.projet.M1@gmail.com', 'ujikolpm')
mail.login(adresse, mdp)
except Exception:
### Cas où la connexion échoue ###
print("Echec connexion\n")
goto.connexion
while True:
mail.list()
mail.select('inbox')
result, data = mail.uid('search', None, "ALL")
i = len(data[0].split())
new_mes_nb = i
if (old_mes_nb == -1):
old_mes_nb = new_mes_nb
### Un nouveau message arrive dans la boite mail ###
if (new_mes_nb > old_mes_nb):
print("\n---NOUVEAU MESSAGE : %i---" % x)
latest_email_uid = data[0].split()[new_mes_nb - 1]
result, email_data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = email_data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
### Création d'un fichier texte contenant le message ###
### Création d'un objet Email récupérant les infos du fichier texte ###
for part in email_message.walk():
save_string = r"email" + str(x) + ".txt"
myfile = open(save_string, 'a')
myfile.write(str(part))
mailo = Email(save_string)
myfile.close()
### L'email est déplacé dans le dossier traitement ###
cible_dossier = 'traitement'
result_move, err_mes = mail.uid('move', latest_email_uid, cible_dossier)
if (result_move == 'OK'):
print("Mail déplacé avec succès")
else:
print(err_mes)
mail.select(cible_dossier)
result, data = mail.uid('search', None, "ALL")
latest_email_uid = data[0].split()[- 1]
### Analyse du message et attribution de son niveau de dangerosit ###
mark = mark_email(mailo)
marks = np.array([mark])
sortie_traitement = ai.analyse_mail(marks)[0][0]
save(mailo, marks=mark, grade=sortie_traitement.item())
print("Résultat traitement :", sortie_traitement)
if (sortie_traitement >= 0.6): ### Cas d'un message sur ###
result_move, err_mes = mail.uid('move', latest_email_uid, "sur")
if (result_move == 'OK'):
print("Mail déplacé dans sur")
else:
print(err_mes)
elif (sortie_traitement >= 0.4 and sortie_traitement < 0.6): ### Cas d'un message pour lequel l'IA a un doute ###
result_move, err_mes = mail.uid('move', latest_email_uid, "moyen")
if (result_move == 'OK'):
print("Mail déplacé dans moyen")
else:
print(err_mes)
else: ### Cas d'un message dangereux ###
result_move, err_mes = mail.uid('move', latest_email_uid, "danger")
if (result_move == 'OK'):
print("Mail déplacé dans danger")
else:
print(err_mes)
x += 1
old_mes_nb = new_mes_nb
print("Analyse effectuée")
elif (new_mes_nb < old_mes_nb):### Cas où des messages ont été supprimés ###
old_mes_nb = new_mes_nb
except TimeoutError:### Timeout de la connexion avec la boite mail atteint, retour au label start pour rafraichir la connexion ###
goto.start
except KeyboardInterrupt:
goto.end
label.end
mail.logout()
print("Good bye")
run()
| PtspluS/Phising-Analising | src/Recevoir_email_complet.py | Recevoir_email_complet.py | py | 5,361 | python | fr | code | 1 | github-code | 36 | [
{
"api_name": "importlib.import_module",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"... |
28986176573 | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.358
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.bulk_user_delete_details import BulkUserDeleteDetails # noqa: E501
from yapily.rest import ApiException
class TestBulkUserDeleteDetails(unittest.TestCase):
"""BulkUserDeleteDetails unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test BulkUserDeleteDetails
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.bulk_user_delete_details.BulkUserDeleteDetails() # noqa: E501
if include_optional :
return BulkUserDeleteDetails(
id = '0',
invalid_application_user_ids = [
'0'
],
invalid_user_uuids = [
'0'
],
status = 'IN_PROGRESS',
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
users = [
yapily.models.user_delete_response.UserDeleteResponse(
id = '0',
delete_status = 'SUCCESS',
creation_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
user_consents = [
yapily.models.consent_delete_response.ConsentDeleteResponse(
id = '0',
delete_status = 'SUCCESS',
institution_id = '0',
institution_consent_id = '0',
creation_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
], )
],
links = {
'key' : '0'
}
)
else :
return BulkUserDeleteDetails(
)
def testBulkUserDeleteDetails(self):
"""Test BulkUserDeleteDetails"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| alexdicodi/yapily-sdk-python | sdk/test/test_bulk_user_delete_details.py | test_bulk_user_delete_details.py | py | 2,749 | python | en | code | null | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "yapily.models.bulk_user_delete_details.BulkUserDeleteDetails",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 47,
"usag... |
70781957544 | from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from devup.views import UpList, UpDetail, UpCreate, UpUpdate
app_name = 'devup'
urlpatterns = [
url(r'^up_list$', UpList.as_view(), name='up_list'),
url(r'^up_create$', UpCreate.as_view(), name='up_create'),
url(r'^up_detail/(?P<pk>[-\w]+)/$', UpDetail.as_view(), name='up_detail'),
url(r'^(?P<pk>[-\w]+)/$', UpDetail.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/update$', UpUpdate.as_view(), name='update'),
]
| maherrub/aot | devup/urls.py | urls.py | py | 539 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "devup.views.UpList.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "devup.views.UpList",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "djang... |
71148994983 | from utils import connector
async def declare_queue(queue_name, durable=False):
conct = connector.Connector()
channel = await conct.get_channel()
await channel.queue_declare(
queue=queue_name,
durable=durable,
)
async def bind_queue(queue_name, exchange_name, routing_key):
conct = connector.Connector()
channel = await conct.get_channel()
await channel.queue_bind(
queue=queue_name,
exchange=exchange_name,
routing_key=routing_key,
)
async def declare_exchange(
exchange_name,
exchange_type="direct",
durable=False,
):
conct = connector.Connector()
channel = await conct.get_channel()
await channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
durable=durable,
passive=False,
# if passive is True - it will raise exception if exchange
# doesn't exist
internal=False,
# If set, the exchange may not be used directly by publishers,
# but only when bound to other exchanges. Internal exchanges are
# used to construct wiring that is not visible to applications.
# Hint: could be used as "dead-letter-exchange" for queues
)
| Yuriy-Leonov/python-rabbitmq-example | utils/funcs.py | funcs.py | py | 1,256 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.connector.Connector",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "utils.connector",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "utils.connector.Connector",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "uti... |
24304374080 | from argparse import ArgumentParser
from gitrello import Gitrello
import github
import trello
import settings
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--pr_id', required=True)
parser.add_argument('--repo', required=True)
args = parser.parse_args()
g = github.Github(settings.GITHUB_TOKEN).get_user()
client = trello.TrelloClient(api_key=settings.API_KEY, token=settings.API_TOKEN)
board = client.get_board(settings.BOARD_ID)
repo = [x for x in g.get_repos() if x.name == args.repo][0]
pull = repo.get_pull(int(args.pr_id))
gitrello = Gitrello(pull, board)
card = gitrello.create_card()
| jakobpederson/gitrello | convert_pr.py | convert_pr.py | py | 667 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "github.Github",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "settings.GITHUB_TOKEN",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "trel... |
13231147951 | import json
import os
import subprocess
import sys
from pathlib import Path
import youtube_dl
ydl_opts_download = {
"format": "bestaudio/best",
"cachedir": False,
"outtmpl": "%(id)s%(ext)s",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
def download_single_yt(url_list):
skipped = []
for i in range(len(url_list)):
try:
with youtube_dl.YoutubeDL(ydl_opts_download) as ydl:
ydl.download([url_list[i]])
except:
skipped.append(url_list[i])
if skipped == []:
return 0
else:
download_single_yt(skipped)
def ytdownload(link):
with youtube_dl.YoutubeDL(
{
"outtmpl": "%(id)s%(ext)s",
"quiet": True,
}
) as ydl:
result = ydl.extract_info(link, download=False)
if "entries" in result:
# Can be a playlist or a list of videos
video = result["entries"]
playlist_urls = [
result["entries"][i]["webpage_url"] for i, item in enumerate(video)
]
download_single_yt(playlist_urls)
print("-" * 15)
def download(title, link, out_folder, i):
print("downloading ", title, " OTS")
os.chdir("./" + out_folder)
fname = ""
if i < 10:
fname = "0" + str(i) + " - " + title
else:
fname = str(i) + " - " + title
os.mkdir(fname)
os.chdir("./" + fname)
if "spotify" in link.lower():
subprocess.check_call(["spotdl", link, "--output-format wav"])
elif "youtube" in link.lower():
ytdownload(link)
os.chdir("..")
os.chdir("..")
def download_all(json_source, out_folder):
print("open file...")
file = open(json_source)
movies = json.load(file)
print("creating main folder...")
ost = Path(out_folder)
if not ost.exists():
ost.mkdir()
for i, movie in enumerate(movies):
link = movie["link"].replace(" ", "_")
title = movie["title"].replace(" ", "_")
download(title, link, out_folder, i + 1)
print("--- DONE ---")
# download_all("ESC.json", "ESC")
| RiccardoPeron/competitions-music-analysis | Functions/downloader.py | downloader.py | py | 2,304 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "youtube_dl.YoutubeDL",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "youtube_dl.YoutubeDL",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"li... |
12673510789 | from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import torchvision.transforms.functional as TF
from PIL import Image
from src.draw_utils import save_img_with_kps
from src.readers.image_reader import ImageReader
from typing import Dict
from torch import Tensor
from typing import Tuple
class Phase0PointsDataset(Dataset):
MEAN = [0.5, 0.5, 0.5]
STD = [0.2, 0.2, 0.2]
IMG_SIZE = 768
TRANSFORMS = transforms.Compose(
[
transforms.Resize(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD),
]
)
def __init__(self, reader: ImageReader, augment: bool = False):
assert isinstance(reader, ImageReader)
self.reader = reader
self.augment = augment
def __len__(self):
return len(self.reader)
def __getitem__(self, idx: int) -> Dict[str, Tensor]:
sample = self.reader[idx]
img = sample.phase_0_image
kps = torch.tensor(sample.phase_0_points).to(dtype=torch.float32)
if self.augment:
if np.random.rand() < 0.5:
img, kps = Phase0PointsDataset.color_augment(img, kps)
if np.random.rand() < 0.5:
img, kps = Phase0PointsDataset.rotate(img, kps)
if np.random.rand() < 0.5:
img, kps = Phase0PointsDataset.perspective_augment(img, kps)
if np.random.rand() < 0.5:
img, kps = Phase0PointsDataset.crop_augment(img, kps)
kps = kps / torch.tensor([img.width, img.height])
kps = kps.flatten()
img_tensor = Phase0PointsDataset.TRANSFORMS(img)
sample_t = {
"img": img_tensor,
"kps": kps,
}
return sample_t
@staticmethod
def color_augment(img: Image.Image, kps: Tensor) -> Tuple[Image.Image, Tensor]:
img = TF.adjust_brightness(img, 0.7 + np.random.rand() * 1.5)
img = TF.adjust_contrast(img, 0.5 + np.random.rand() * 1.5)
img = TF.adjust_gamma(img, gamma=0.5 + np.random.rand(), gain = 0.5 + np.random.rand())
img = TF.adjust_hue(img, -0.5 + np.random.rand())
img = TF.adjust_saturation(img, np.random.rand() * 1.5)
return img, kps
@staticmethod
def rotate(img: Image.Image, kps: Tensor) -> Tuple[Image.Image, Tensor]:
rotation_angle_deg = np.random.rand() * 30 - 15
rotation_angle_rad = np.deg2rad(rotation_angle_deg)
rotation_matrix = np.array(
[
[np.cos(rotation_angle_rad), -np.sin(rotation_angle_rad)],
[np.sin(rotation_angle_rad), np.cos(rotation_angle_rad)],
]
)
rot_torch = torch.from_numpy(rotation_matrix.astype(np.float32))
img = TF.rotate(img, np.rad2deg(rotation_angle_rad))
center = torch.tensor([img.width, img.height]) / 2
kps = kps - center
kps = torch.matmul(kps, rot_torch)
kps = kps + center
return img, kps
@staticmethod
def perspective_augment(img: Image.Image, kps: Tensor) -> Tuple[Image.Image, Tensor]:
topleft = kps[0]
topright = kps[1]
bottomleft = kps[2]
bottomright = kps[3]
startpoints = [
topleft.to(dtype=torch.int32).tolist(),
topright.to(dtype=torch.int32).tolist(),
bottomright.to(dtype=torch.int32).tolist(),
bottomleft.to(dtype=torch.int32).tolist(),
]
a = min([torch.linalg.norm(topleft - topright) * 0.1, torch.linalg.norm(topleft - bottomleft) * 0.1])
new_topleft = topleft + (-a + np.random.rand() * 2*a)
new_topleft = torch.clip(new_topleft, torch.tensor([0, 0]), torch.tensor([img.width, img.height]))
new_topright = topright + (-a + np.random.rand() * 2*a)
new_topright = torch.clip(new_topright, torch.tensor([0, 0]), torch.tensor([img.width, img.height]))
new_bottomleft = bottomleft + (-a + np.random.rand() * 2*a)
new_bottomleft = torch.clip(new_bottomleft, torch.tensor([0, 0]), torch.tensor([img.width, img.height]))
new_bottomright = bottomright + (-a + np.random.rand() * 2*a)
new_bottomright = torch.clip(new_bottomright, torch.tensor([0, 0]), torch.tensor([img.width, img.height]))
endpoints = [
new_topleft.to(dtype=torch.int32).tolist(),
new_topright.to(dtype=torch.int32).tolist(),
new_bottomright.to(dtype=torch.int32).tolist(),
new_bottomleft.to(dtype=torch.int32).tolist(),
]
img = transforms.functional.perspective(img, startpoints, endpoints)
kps = torch.stack([new_topleft, new_topright, new_bottomleft, new_bottomright])
return img, kps
@staticmethod
def crop_augment(img: Image.Image, kps: Tensor) -> Tuple[Image.Image, Tensor]:
kps_x0 = kps[:, 0].min().item()
kps_x1 = kps[:, 0].max().item()
kps_y0 = kps[:, 1].min().item()
kps_y1 = kps[:, 1].max().item()
crop_x0 = int(kps_x0 * np.random.rand())
crop_x1 = int(kps_x1 + np.random.rand() * (img.width - kps_x1))
crop_y0 = int(kps_y0 * np.random.rand())
crop_y1 = int(kps_y1 + np.random.rand() * (img.height - kps_y1))
# make square
crop_1 = max(crop_x1 - crop_x0, crop_y1 - crop_y0)
crop_y1 = crop_y0 + crop_1
crop_x1 = crop_x0 + crop_1
img = img.crop((crop_x0, crop_y0, crop_x1, crop_y1))
kps = kps - torch.tensor([crop_x0, crop_y0])
return img, kps
@staticmethod
def img_from_tensor(img_tensor: Tensor) -> Image.Image:
img: np.ndarray = img_tensor.permute(1, 2, 0).numpy()
img = (
img * np.array(Phase0PointsDataset.STD) + np.array(Phase0PointsDataset.MEAN)
) * 255
img = img.astype(np.uint8)
img = Image.fromarray(img)
return img
def show(self, idx: int, out_folder: Path, repeat_idx=0, verbose: bool = False):
sample_t = self[idx]
img_tensor = sample_t["img"]
kps_tensor = sample_t["kps"]
img = Phase0PointsDataset.img_from_tensor(img_tensor)
kps = kps_tensor.reshape(-1, 2).numpy() * Phase0PointsDataset.IMG_SIZE
filename = out_folder / f"sample_{idx}_{repeat_idx}.jpg"
save_img_with_kps(img, kps, filename, circle_radius=10, verbose=verbose)
| AvanDavad/receipt_extractor | src/datasets/phase0points_dataset.py | phase0points_dataset.py | py | 6,430 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 19,
"usage_type": "name"
},
{
"ap... |
6797727811 | # django imports
from django import template
import itertools
import datetime
import pytz
import dateutil
register = template.Library()
@register.filter
def group_by_date(dates, timezone):
tz = pytz.timezone(timezone)
dates_parser = []
for day in dates:
try:
new_date = pytz.utc.localize(dateutil.parser.parse(day))
except ValueError:
new_date = dateutil.parser.parse(day)
dates_parser.append(new_date)
days = [
tz.normalize(day.replace(tzinfo=pytz.utc)) for day in dates_parser
]
days2 = [
list(group) for k, group in itertools.groupby(
days, key=datetime.datetime.toordinal,
)
]
return [(day[0].date, day) for day in days2]
| tomasgarzon/exo-services | service-exo-mail/mail/templatetags/group_by.py | group_by.py | py | 747 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pytz.timezone",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytz.utc.localize... |
34682609282 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 16:18:01 2019
@author: cacquist
"""
# coding: utf-8
# In[1]:
# ------------------------------------------------------------------------
# date : 12.04.2018
# author : Claudia Acquistapace
# goal : routine to read 1D meteogram for a given date and site ( Joyce ) and extract data for the site and also
# level2 variables for the site Store them in a ncdf file to be copied on ostro for comparison 1to1 with observations from the ground
# DAYS WITH BOUNDARY LAYER CLOUDS OF INTEREST:
# - 20130502 (folder 20130502-default )
# - 20130505 (folder 20130505-default-redone_v1)
# - 20130511 (folder 20130511-default )
# - 20160603 (folder 20160603-default-redone_v2 )
# ------------------------------------------------------------------------
# In[1]:
# ---- importing libraries
import numpy as np
import matplotlib
import scipy
import numpy.ma as ma
import pandas as pd
import netCDF4 as nc4
import glob
from netCDF4 import Dataset
import matplotlib.dates as mdates
import xarray as xr
from myFunctions import f_closest
import matplotlib.pyplot as plt
from myFunctions import f_calcPblHeightRN
from myFunctions import f_calcWvariance
from myFunctions import f_runningMeanSkewnessVarianceStd_W
from myFunctions import f_PBLClass
from myFunctions import f_calcCloudBaseTopPBLcloudsV2
from myFunctions import f_calcCloudBaseTopPBLclouds
from myFunctions import f_calcPblHeightTW
from myFunctions import f_cloudmask
from myFunctions import f_calcWindSpeed_Dir
from myFunctions import f_calculateCloudBaseTopThickness
def f_processModelOutput(path_icon, \
iconFilename, \
modelInputParameters, \
date, \
humanInfo, \
debuggingFlag, \
verboseFlag, \
pathDebugFig, \
pathOut, \
domSel):
print('processing meteograms for the '+date)
# ---- reading datafile selected
data = Dataset(path_icon+iconFilename, mode='r')
time = data.variables['time'][:].copy()
datetime_ICON = nc4.num2date(data.variables['time'][:],data.variables['time'].units)
Qi = data.variables['QI'][:].copy()
Qc = data.variables['QC'][:].copy()
T = data.variables['T'][:].copy() # in [K]
zonalWind = data.variables['U'][:].copy()
merWind = data.variables['V'][:].copy()
vertWind = data.variables['W'][:].copy()
LWP = data.variables['TQC'][:].copy()
IWV = data.variables['TQV'][:].copy()
thetaV = data.variables['THETAV'][:].copy()
height = data.variables['height'][:].copy()
P = data.variables['P'][:].copy() # [Pa]
RH = data.variables['REL_HUM'][:].copy()
q = data.variables['QV_DIA'][:].copy() # [kg/kg]
Hsurf = float(data.station.split('_hsurf=')[-1].split('\n')[0])
height2 = data.variables['height_2'][:].copy()
rho = data.variables['RHO'][:].copy()
SWSurfFlux = data.variables['SOBS'][:].copy() # shortwave net flux at surface
LWSurfFlux = data.variables['THBS'][:].copy() # longwave net flux at surface
LHFL = data.variables['LHFL'][:].copy() # latent heat flux (surface)
SHFL = data.variables['SHFL'][:].copy() # sensible heat flux (surface)
TempSurf = data.variables['T_S'][:]
#print(Hsurf)
#print(height2[-1])
#print(height2[0])
#print(len(height2))
# subtracting from model height arrays the height of the ground level at JOYCE
# and make it comparable with the observations
height2 = height2 - np.repeat(Hsurf, len(height2))
height = height -np.repeat(Hsurf, len(height))
# --- reading dimension of height and time arrays
dimTime = len(datetime_ICON)
dimHeight = len(height2)
if verboseFlag == 1:
print('variable extracted from the data')
print('data loaded for '+date)
print('dimension for height_2 :', dimHeight)
print('dimension for time :', dimTime)
# ------------------------------------------------------------------
# plot meridional and zonal wind for checking fields
# ------------------------------------------------------------------
if debuggingFlag == 1:
if verboseFlag == 1:
print('no plots')
# =============================================================================
# fig, ax = plt.subplots(figsize=(14,6))
# ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
# ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
# ax.xaxis_date()
# ax.set_xlabel("time [hh:mm]", fontsize=16)
# ax.set_ylabel("Fluxes at the surface [W/m2]", fontsize=16)
# plt.plot(datetime_ICON, SWSurfFlux, label='Shortwave net flux')
# plt.plot(datetime_ICON, LWSurfFlux, label='Longwave net flux')
# plt.legend()
# plt.savefig(pathDebugFig+'surface_LWSW_surfFlux_iconlem_'+date+'.png', format='png')
#
# =============================================================================
# =============================================================================
# fig, ax = plt.subplots(figsize=(14,6))
# ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
# ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
# ax.xaxis_date()
# ax.set_xlabel("time [hh:mm]", fontsize=16)
# ax.set_ylabel("Latent/sensible heat fluxes at the surface [W/m2]", fontsize=16)
# plt.plot(datetime_ICON, LHFL, label='Latent heat flux')
# plt.plot(datetime_ICON, SHFL, label='Sensible heat flux')
# plt.legend()
# plt.savefig(pathDebugFig+'surface_LatentSensible_heatFlux_iconlem_'+date+'.png', format='png')
# =============================================================================
if verboseFlag == 1:
print('end of plotting graphs for debugging in debugging mode')
# ------------------------------------------------------------------
# defining constants needed for calculations
# ------------------------------------------------------------------
Rw = 462.
Rl = 287.
g = 9.81
P_0 = 100*1000.
const = 0.286 # R/Cp
P_0 = 100*1000.
const = 0.286 # R/Cp
Lv = 2260 # J / kg
Cp = 1005.7 # /K Kg
# ------------------------------------------------------------------
# derivation of water vapor mixing ratio
# ------------------------------------------------------------------
r = np.zeros((dimTime, dimHeight))
for itempo in range(dimTime):
for ih in range(dimHeight):
r[itempo,ih] = q[itempo,ih]/(1. - q[itempo,ih] )
if verboseFlag == 1:
print('water vapor mixing ratio calculated')
# ------------------------------------------------------------------
# --- calculating cloud mask for ice and liquid clouds using thresholds on Qi, Qc
# ------------------------------------------------------------------
QcThreshold = modelInputParameters['QcThresholdVar']
QiThreshold = modelInputParameters['QiThresholdVar']
cloudMask = f_cloudmask(time,height2,Qc,Qi,QiThreshold,QcThreshold)
# =============================================================================
#
# for indT in range(len(datetime_ICON)):#
# if (~np.isnan(CBMatrix_ICON[indT,0]) == True) and (~np.isnan(CTMatrix_ICON[indT,0])== True):
#
# indCB = f_closest(height, CBMatrix_ICON[indT,0])
# indCT = f_closest(height, CTMatrix_ICON[indT,0])
#
# if (indCB == 0) or (indCT == 0):
# CT_array_ICON[indT] = np.nan
# CB_array_ICON[indT] = np.nan
# else:
# CT_array_ICON[indT] = height[indCT] # saving cloud top height
# CB_array_ICON[indT] = height[indCB] # saving cloud base height
#
# =============================================================================
# calculating cloud base , cloud top and cloud thickness for all clouds and for pbl clouds
clouds, PBLclouds = f_calculateCloudBaseTopThickness(cloudMask, datetime_ICON, height2, humanInfo)
# deriving lowest cloud base and corresponding cloud top for PBL clouds
CBarr = np.zeros(dimTime)
CBarr.fill(np.nan)
CTarr = np.zeros(dimTime)
CTarr.fill(np.nan)
iPBL = 0
for itime in range(dimTime):
if iPBL < len(PBLclouds.time.values):
if clouds.time.values[itime] == PBLclouds.time.values[iPBL]:
CBarray = PBLclouds.cloudBase.values[iPBL, :]
if CBarray.size - np.count_nonzero(np.isnan(CBarray)) != 0:
minCB = np.nanmin(PBLclouds.cloudBase.values[iPBL, :])
CBarr[itime] = minCB
indexLevelMin = np.nanargmin(PBLclouds.cloudBase.values[iPBL, :])
CTarr[itime] = PBLclouds.cloudTop[iPBL, indexLevelMin]
iPBL = iPBL + 1
print('cloud base and cloud top for ICON-LEM calculated ')
# ------------------------------------------------------------------
# ---- calculating potential temperature and equivalent potential temperature
# ------------------------------------------------------------------
theta = np.zeros((dimTime, dimHeight))
theta_e = np.zeros((dimTime, dimHeight))
theta.fill(np.nan)
theta_e.fill(np.nan)
for iTime in range(dimTime):
for iHeight in range(dimHeight):
if height[iHeight] < Hsurf:
theta[iTime, iHeight] = 0.
else:
theta[iTime, iHeight] = T[iTime, iHeight] * (float(P_0)/float(P[iTime, iHeight]))**(const)
if verboseFlag == 1:
print('potential temperature calculated')
for iTime in range(dimTime):
for iHeight in range(dimHeight):
lv = (2500.-2.42*(T[iTime, iHeight]-273.15))*1000. # latent heat of vaporization in J/kg
theta_e[iTime, iHeight] = theta[iTime, iHeight]+(lv*r[iTime, iHeight]/Cp)* (np.power(100000./P[iTime, iHeight], Rl/Cp)) # equivalent potential temperature in K
if verboseFlag == 1:
print('equivalent potential temperature calculated')
# ------------------------------------------------------------------
# --- Calculating Boundary layer height using the richardson number derivation according to Seidel Et al, 2010
# ------------------------------------------------------------------
device = 'mod'
PBLHeightArrRN = f_calcPblHeightRN(thetaV,zonalWind,merWind,height2,time, device)
if verboseFlag == 1:
print('height of the PBL (RN) calculated')
# ------------------------------------------------------------------
# --- calculation of the variance, std, skewness of the vertical velocity using a running mean window
# ------------------------------------------------------------------
timeWindowSk = modelInputParameters['timeWindowSkVar']
runningWindow = modelInputParameters['runningWindowVar']
resultDyn = f_runningMeanSkewnessVarianceStd_W(time, timeWindowSk, runningWindow, height2, vertWind)
# output of the function : varianceW, stdWmatrix, SKmatrix
varianceWmatrix = resultDyn[0]
stdWmatrix = resultDyn[1]
SKmatrix = resultDyn[2]
if verboseFlag == 1:
print('variance, std and skewness of w calculated')
print('std max = '+str(np.nanmax(stdWmatrix)))
# ------------------------------------------------------------------
# --- Calculating Boundary layer height using the threshold on variance of w ()
# ------------------------------------------------------------------
device = 'mod'
sigmaW = stdWmatrix
sigmaThreshold = modelInputParameters['SigmaWThresStd'] # m/s, threshold for std of w from Schween et al, 2014.AMT
PBLHeightArrTW = f_calcPblHeightTW(sigmaW,sigmaThreshold,height2,time, device)
if verboseFlag == 1:
print('height of the PBL (TW) calculated')
# ------------------------------------------------------------------
# --- Calculating variance over the timewindow using running mean
# ------------------------------------------------------------------
#timewindow = modelInputParameters['timewindowVar']
#varianceWmatrix = f_calcWvariance(vertWind,time,height2,timewindow)
#if verboseFlag == 1:
# print('variance of vertical velocity calculated')
# ------------------------------------------------------------------
# --- calculation of the connection of the turbulence to the surface.
# ------------------------------------------------------------------
#Turbulence is connected to the surface if checks if variance at 200 m of height is greater than 0.03 for turbulence
# calculating the time serie of difference of the sigmaW and the threshold value at 200 m height
deltaSigma = np.subtract(varianceWmatrix, 0.03)[:,f_closest(height,200.)]
connection2Surface = [] # array indicating connection of the turbulence to the surface
# calculating connection to the surface. =0 ( not connected, if sigmaW(200)-sigmaGround)<0,
# =1 (connected thus turbulent, if sigmaW(200)-sigmaGround)>0)
for itime in range(dimTime):
if deltaSigma[itime] < 0.:
connection2Surface.append(0)
else:
connection2Surface.append(1)
if verboseFlag == 1:
print('connection of turbulence with the surface calculated')
# ------------------------------------------------------------------
# ---- calculation of the stability array
# ------------------------------------------------------------------
stabilityArr = []
# difference of temperature between 150m and closest level to surface
deltaT = np.subtract(T, T[f_closest(height,Hsurf),:])[:,f_closest(height,150.)]
for itime in range(dimTime):
#print(Tarray[indRef]-Tarray[indGround])
if deltaT[itime] < 0.3:
stabilityArr.append(1)
else:
stabilityArr.append(0)
if verboseFlag == 1:
print('stability at the surface calculated')
# ------------------------------------------------------------------
# --- Calculation of wind shear as done for PBL ( running mean over 30 min of sqrt(Delta U^2 + delta V^2))/delta H
# where variations are calculated over 5 range gates
# ------------------------------------------------------------------
windData = f_calcWindSpeed_Dir(datetime_ICON, height2, zonalWind, merWind)
windSpeed = windData['windSpeed']
windDirection = windData['windDirection']
# =============================================================================
# --- calculating shear of horizontal wind
u_rm = np.zeros((len(datetime_ICON), len(height2)))
v_rm = np.zeros((len(datetime_ICON), len(height2)))
# --- defining running mean values of zonal and meridional wind
for indH in range(0,len(height2)):
zonal = pd.Series(zonalWind[:,indH])
mer = pd.Series(merWind[:,indH])
#u_rm[:,indH] = pd.rolling_mean(zonalWind[:,indH], window=200)
#v_rm[:,indH] = pd.rolling_mean(merWind[:,indH], window=200)
u_rm[:,indH] = zonal.rolling(200).mean()
v_rm[:,indH] = mer.rolling(200).mean()
#
# calculating wind shear and horizontal wind
shear_ICON = np.zeros((len(datetime_ICON), len(height2)))
for indT in range(0,len(datetime_ICON)):
for indH in range(0,len(height2)):
if (indH < 2.) or (indH > len(height2)-3):
shear_ICON[indT, indH] = 0.
else:
deltaV = (np.absolute(v_rm[indT, indH+2] - v_rm[indT, indH-2]))**2
deltaU = (np.absolute(u_rm[indT, indH+2] - u_rm[indT, indH-2]))**2
deltaH = np.absolute(height[indH+2] - height[indH-2])
shear_ICON[indT, indH] = (np.sqrt(deltaU + deltaV))/deltaH
# =============================================================================
if verboseFlag == 1:
print('horizontal wind speed, direction and shear calculated')
# ------------------------------------------------------------------
# ----calculating boundary layer classification (version from submitted paper)
# ------------------------------------------------------------------
ylim = np.repeat(3000, dimTime) # defining array of heights up to which PBL classification is calculated
gradWindThr = 0.01
SigmaWThres = 0.2
outputClass = f_PBLClass(datetime_ICON, \
height2, \
gradWindThr, \
SigmaWThres, \
ylim, \
cloudMask, \
varianceWmatrix, \
SKmatrix, \
stabilityArr, \
connection2Surface, \
shear_ICON, \
CBarr)
PBLclass = outputClass[0]
if verboseFlag == 1:
print('PBL classification calculated')
if debuggingFlag == 1:
print('dimensions of PBL class')
print(np.shape(PBLclass))
# =============================================================================
# # plotting classification
# fig, ax = plt.subplots(figsize=(10,4))
# ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
# ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
# ax.xaxis_date()
# cax = ax.pcolormesh(datetime_ICON, height2, PBLclass.transpose(), vmin=0., vmax=6., cmap=plt.cm.get_cmap("jet", 7))
# ax.set_ylim(Hsurf,3000.) # limits of the y-axes
# #ax.set_xlim(0,24) # limits of the x-axes
# ax.set_title("PBL classification", fontsize=14)
# ax.set_xlabel("time [UTC]", fontsize=12)
# ax.set_ylabel("height [m]", fontsize=12)
# cbar = fig.colorbar(cax, ticks=[0, 1, 2, 3, 4, 5, 6], orientation='vertical')
# cbar.ticks=([0,1,2,3,4,5,6])
# cbar.ax.set_yticklabels(['no class','in cloud','non turb','cloud driven','convective', 'intermittent','wind shear'])
# cbar.set_label(label="PBL type",size=12)
# cbar.ax.tick_params(labelsize=12)
# cbar.aspect=20
# plt.savefig(pathDebugFig+'PBLclassification_iconlem_'+date+'.png', format='png')
# =============================================================================
# ------------------------------------------------------------------
# --- calculation of the LCL
# ------------------------------------------------------------------
# determining P, T and RH at the surface
Psurf = data.variables['P_SFC'][:].copy()
Tsurf = data.variables['T2M'][:].copy()
RHsurf = RH[:,149]
LCLarray = []
from myFunctions import lcl
for iTime in range(dimTime):
LCLarray.append(lcl(Psurf[iTime],Tsurf[iTime],RHsurf[iTime]/100.))
if verboseFlag == 1:
print('LCL calculated')
# ------------------------------------------------------------------
# calculate LTS index for lower tropospheric stability (Wood and Bretherton, 2006)
# ------------------------------------------------------------------
LTS = np.zeros(dimTime)
H700 = np.zeros(dimTime)
Pthr = 700 * 100. # Pressure level of 700 Hpa used as a reference
# calculating height of the surface
indSurf = 146# f_closest(height,Hsurf)
for iTime in range(dimTime):
indP700 = f_closest(P[iTime,:],Pthr)
LTS[iTime] = theta[iTime, indP700] - theta[iTime, indSurf]
H700[iTime] = height[indP700]
if verboseFlag == 1:
print('LTS calculated')
#print(theta[4500, indP700])
#print(theta[4500, indSurf])
#print(theta[4500, :])
# ------------------------------------------------------------------
# ---- calculating liquid potential temperature
# ------------------------------------------------------------------
theta_liquid = np.zeros((dimTime, dimHeight))
theta_liquid.fill(np.nan)
for iTime in range(dimTime):
for iHeight in range(dimHeight):
if height[iHeight] < Hsurf:
theta_liquid[iTime, iHeight] = 0.
else:
theta_liquid[iTime, iHeight] = theta[iTime, iHeight] - (Lv/Cp)* Qc[iTime, iHeight]
if verboseFlag == 1:
print('liquid potential temperature calculated')
# ------------------------------------------------------------------
# ------- saving mean outputs as ncdf file
# ------------------------------------------------------------------
f = nc4.Dataset(pathOut+'icon_lem_derivedproperties'+date+'.nc','w', format='NETCDF4') # creates a netCDF file for writing
tempgrp = f.createGroup('Temp_data') # creates a group: A netCDF group is basically a directory or folder within the netCDF dataset
# specify dimensions of the data ( each dimension of multidimensiona array needs to be given a name and a length)
tempgrp.createDimension('dimH', len(height2)) # dimension for height
tempgrp.createDimension('dimHlong', len(height)) # dimension for height
tempgrp.createDimension('dimHsurf', 1) # dimension for scalar values
tempgrp.createDimension('dimT', len(datetime_ICON)) # dimension for time
tempgrp.createDimension('NclassesPBL', 8) # dimension for the number of cloud layers found
tempgrp.createDimension('dimHlarger', len(height)) # dimension for height
tempgrp.createDimension('nchar', 5)
# preallocating netCDF variables for data storage
varHeight2 = tempgrp.createVariable('height2', 'f4', 'dimH')
varHeight = tempgrp.createVariable('height', 'f4', 'dimHlong')
vardomain = tempgrp.createVariable('domain', 'S1', 'nchar')
vartime = tempgrp.createVariable('datetime_ICON', 'f4', 'dimT')
varLTS = tempgrp.createVariable('LTS', 'f4', 'dimT')
varPBLheight = tempgrp.createVariable('PBLHeightArrRN', 'f4', 'dimT')
varPBLheight2 = tempgrp.createVariable('PBLHeightArrTW', 'f4', 'dimT')
varCloudLayers = tempgrp.createVariable('NcloudLayers', 'f4', 'dimT')
varHsurf = tempgrp.createVariable('HeightSurface', 'f4', 'dimHsurf')
varLCL = tempgrp.createVariable('LCLarray', 'f4', 'dimT')
varLWP = tempgrp.createVariable('LWP', 'f4', 'dimT')
varIWV = tempgrp.createVariable('IWV', 'f4', 'dimT')
varLHFL = tempgrp.createVariable('LHFL', 'f4', 'dimT')
varSHFL = tempgrp.createVariable('SHFL', 'f4', 'dimT')
varLWSF = tempgrp.createVariable('LWSurfFlux', 'f4', 'dimT')
varSWSF = tempgrp.createVariable('SWSurfFlux', 'f4', 'dimT')
# PBL class and connected flags, LTS clouds, SW clouds, PBL height, CB height
varPBL_class = tempgrp.createVariable('PBLclass', 'f4', ('dimT','dimH'))
varflagCloud = tempgrp.createVariable('flagCloud', 'f4', ('dimT','dimH'))
varQc = tempgrp.createVariable('Qc', 'f4', ('dimT','dimH'))
varQi = tempgrp.createVariable('Qi', 'f4', ('dimT','dimH'))
varflagTurb = tempgrp.createVariable('flagTurb', 'f4', ('dimT','dimH'))
varflagcloudDriven = tempgrp.createVariable('flagcloudDriven', 'f4', ('dimT','dimH'))
varflagInstability = tempgrp.createVariable('flagInstability', 'f4',('dimT','dimH'))
varflagWindShear = tempgrp.createVariable('flagWindShear', 'f4', ('dimT','dimH'))
varflagSurfDriven = tempgrp.createVariable('flagSurfaceDriven', 'f4', ('dimT','dimH'))
varvarianceW = tempgrp.createVariable('varianceW', 'f4', ('dimT','dimH'))
varHwind = tempgrp.createVariable('windSpeed', 'f4', ('dimT','dimH'))
varWindDirection = tempgrp.createVariable('windDirection', 'f4', ('dimT','dimH'))
varShearHwind = tempgrp.createVariable('shearHwind', 'f4', ('dimT','dimH'))
varcloudMask = tempgrp.createVariable('cloudMask', 'f4', ('dimT','dimH'))
varthetaPot = tempgrp.createVariable('theta', 'f4', ('dimT','dimH'))
varskewnessW = tempgrp.createVariable('skewnessW', 'f4', ('dimT','dimH'))
varstdWmatrix = tempgrp.createVariable('stdWmatrix', 'f4', ('dimT','dimH'))
varMixingRatio = tempgrp.createVariable('r', 'f4', ('dimT','dimH'))
varthetaL = tempgrp.createVariable('theta_liquid', 'f4', ('dimT','dimH'))
varthetaPot_e = tempgrp.createVariable('theta_e', 'f4', ('dimT','dimH'))
varw = tempgrp.createVariable('vertWind', 'f4', ('dimT','dimHlarger'))
varP = tempgrp.createVariable('P', 'f4', ('dimT','dimH'))
varRH = tempgrp.createVariable('RH', 'f4', ('dimT','dimH'))
varQ = tempgrp.createVariable('q', 'f4', ('dimT','dimH'))
varT = tempgrp.createVariable('T', 'f4', ('dimT','dimH'))
varMerWind = tempgrp.createVariable('merWind', 'f4', ('dimT','dimH'))
varZonWind = tempgrp.createVariable('zonalWind', 'f4', ('dimT','dimH'))
varRho = tempgrp.createVariable('rho', 'f4', ('dimT','dimH'))
varT_surf = tempgrp.createVariable('TempSurf', 'f4', 'dimT')
# passing data into the variables
varHeight2[:] = height2
varHeight[:] = height
vardomain = domSel
vartime[:] = time
varLTS[:] = LTS
varPBLheight[:] = PBLHeightArrRN
varPBLheight2[:] = PBLHeightArrTW
varHsurf = Hsurf
varLCL[:] = LCLarray
varLWP[:] = LWP
varIWV[:] = IWV
varLHFL[:] = LHFL
varSHFL[:] = SHFL
varLWSF[:] = LWSurfFlux
varSWSF[:] = SWSurfFlux
varPBL_class[:,:] = PBLclass
varflagCloud[:] = outputClass[1]
varflagTurb[:] = outputClass[2]
varflagcloudDriven[:] = outputClass[3]
varflagInstability[:] = outputClass[4]
varflagWindShear[:] = outputClass[5]
varflagSurfDriven[:] = outputClass[6]
varvarianceW[:,:] = varianceWmatrix
varHwind[:,:] = windSpeed
varWindDirection[:,:] = windDirection
varShearHwind[:,:] = shear_ICON
varcloudMask[:,:] = cloudMask
varthetaPot[:,:] = theta
varskewnessW[:,:] = SKmatrix
varstdWmatrix[:,:] = stdWmatrix
varMixingRatio[:,:] = r
varthetaL[:,:] = theta_liquid
varthetaPot_e[:,:] = theta_e
varw[:,:] = vertWind
varP[:,:] = P
varRH[:,:] = RH
varQ[:,:] = q
varT[:,:] = T
varMerWind[:,:] = merWind
varZonWind[:,:] = zonalWind
varRho[:,:] = rho
varQc[:,:] = Qc
varQi[:,:] = Qi
varT_surf[:] = TempSurf
#Add global attributes
f.description = "icon lem model derived physical quantities and PBL classification"
f.history = "Created by Claudia Acquistapace cacquist@meteo.uni-koeln.de - University of Cologne"
#Add local attributes to variable instances
varPBL_class.units = '1=in cloud, 2=non turb, 3=cloud driven, 4=convective, 5=intermittent, 6=wind shear'
vartime.units = 'seconds since '+date[0:4]+'-'+date[4:6]+'-'+date[6:8]+' 00:00:00'
# closing ncdf file
f.close()
print('File Saved ') | ClauClouds/PBL_paper_repo | f_processModelOutput.py | f_processModelOutput.py | py | 29,461 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "netCDF4.num2date",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"li... |
6783941465 | """Change column distance_bin to distance_cat
Revision ID: 2524785502b4
Revises: c137e7385dd7
Create Date: 2020-03-20 16:47:15.648707
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2524785502b4'
down_revision = 'c137e7385dd7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('race', sa.Column('distance_cat', sa.String(), nullable=True))
op.create_foreign_key(None, 'race', 'runner_contact', ['runner_contact_id'], ['id'])
op.drop_column('race', 'distance_bin')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('race', sa.Column('distance_bin', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'race', type_='foreignkey')
op.drop_column('race', 'distance_cat')
# ### end Alembic commands ###
| dcjohnson24/gugs_db | migrations/versions/2524785502b4_change_column_distance_bin_to_distance_.py | 2524785502b4_change_column_distance_bin_to_distance_.py | py | 980 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
73743833704 | import re
import logging
import ROOT
import plottingConfig as cfg
class Config(cfg.PlottingConfig):
def __init__ (self, options):
self.options = options
super(Config, self).__init__()
sigma = 1 # at mu=1 (arbitrary for AZh)
sigma_units = 'fb'
# self.force_mu = (True, 0.16) # 700 GeV
self.force_mu = (True, 10) # 600 GeV
# for child classes to use
# self.loggingLvl = logging.INFO
self.loggingLvl = logging.DEBUG
self.verbose = False
self.formats = [ 'eps', 'pdf', 'png', 'root', 'C' ]
self.blind = True
self.thresh_drop_legend = 0.01
self.restrict_to = []
self.excludes = []
self.additionalPlots = []
self.add_sig_to_ratio_plot = True
self.use_exp_sig = True
# self.transferResults_fitName = "HiggsNorm"
# self.get_binning_hist_removal = ["_meas2l2q2v2q"]
self.bkg_substr_name = "Diboson"
self.bkg_substr_list = ["diboson", "Diboson", "WZ", "ZZ", "VZ"]
self.file_tags = ["Y", "L", "J", "T", "TType", "Flv", "Sgn", "isMVA", "dist", "Spc", "D", "nAddTag", "BMax", "BMin", "Fat", "incFat", "incJet", "incAddTag"]
self.weight_tags = ["Higgsweighted", "Dibosonweighted"]
self.sig_names = ["VH"]
self.signal = ["A#rightarrow Zh (best fit)", self._STACK, ROOT.kRed + 1, 1] # last = mult factor
self.expected_signal = ["VHbb", self._STACK, ROOT.kRed +1, self.force_mu[1]] # last = expected mu
#self.expected_signal = ["A#rightarrow Zh (#sigma={0} {1})".format(int(sigma*self.force_mu[1]), sigma_units), self._STACK, ROOT.kRed +1, self.force_mu[1]] # last = expected mu
# self.additional_signal = ["A#rightarrow Zh", self._OVERPRINT, ROOT.kRed +1, 1.]
self.bkg_tuple = {'ttbar': ("t#bar{t}", 42, ROOT.kOrange, []),
'stopt': ("t, s+t chan", 41, ROOT.kOrange - 1, ["stops"]),
'stops': ("t, s+t chan", 41, ROOT.kOrange - 1, ["stopt"]),
'stopWt': ("Wt", 40, ROOT.kYellow - 7, []),
'stop': ("Single top", 40, ROOT.kOrange - 1, []),
'Zbb': ("Z+bb", 25, ROOT.kAzure + 3, []),
'Zbc': ("Z+bc", 24, ROOT.kAzure + 2, []),
'Zclbl': ("Z+(bl,cl)", 23, ROOT.kAzure + 1, []),
'Zbl': ("Z+bl", 23, ROOT.kAzure + 1, []),
'Zcl': ("Z+cl", 21, ROOT.kAzure - 8, []),
'Zcc': ("Z+cc", 22, ROOT.kAzure - 4, []),
'Zhf': ("Z+(bb,bc,cc)", 22, ROOT.kAzure + 2, []),
'Zl': ("Z+l", 20, ROOT.kAzure - 9, []),
'Wbl': ("W+bl", 33, ROOT.kGreen + 2, []),
'Wbb': ("W+bb", 35, ROOT.kGreen + 4, []),
'Wbc': ("W+bc", 34, ROOT.kGreen + 3, []),
'Wcc': ("W+cc", 32, ROOT.kGreen + 1, []),
'Whf': ("W+(bb,bc,cc,bl)", 32, ROOT.kGreen + 3, []),
'Wcl': ("W+cl", 31, ROOT.kGreen - 6, []),
'Wl': ("W+l", 30, ROOT.kGreen - 9, []),
'WZ': ("WZ", 53, ROOT.kGray + 1, ["ZZ"]),
'ZZ': ("ZZ", 52, ROOT.kGray + 1, ["WZ"]),
'VZ': ("VZ", 51, ROOT.kGray + 1, []),
'diboson': ("Diboson", 51, ROOT.kGray + 1, []),
'WW': ("WW", 50, ROOT.kGray + 3, []),
'Diboson': ("Diboson", 50, ROOT.kGray + 1, []),
#'VH125': ("Vh", 49, ROOT.kRed - 6, []),
'multijet': ("Multijet", 39, ROOT.kViolet-9, ["multijetMu", "multijetEl"]),
'multijetEl': ("Multijet", 39, ROOT.kViolet-9, ["multijetMu", "multijet"]),
'multijetMu': ("Multijet", 39, ROOT.kViolet-9, ["multijetEl", "multijet"])}
# self.ATLAS_suffix = "Internal"
# self.ATLAS_suffix = "Simulation"
self.ATLAS_suffix = "Preliminary"
# self.ATLAS_suffix = ""
# for yields
self.make_slides = False
self.window = None
self.priorities = {
"data" : 80,
"S/sqrt(S+B)" : 73,
"S/B" : 72,
"Bkg" : 60,
"MC" : 75,
"SignalExpected" : 71,
"Signal" : 70,
"VH125" : 57,
"ZvvH125" : 67,
"ggZvvH125" : 67,
"qqZvvH125" : 67,
"WlvH125" : 68,
"ZllH125" : 69,
"ggZllH125" : 69,
"qqZllH125" : 69,
"ZvvH150" : 67,
"ggZvvH150" : 67,
"qqZvvH150" : 67,
"WlvH150" : 68,
"ZllH150" : 69,
"AZhllbb1200" : 70,
"AZhvvbb1200" : 70,
"AZhllbb1000" : 70,
"AZhvvbb1000" : 70,
"AZhllbb400" : 70,
"AZhvvbb400" : 70,
"AZhllbb300" : 70,
"AZhvvbb300" : 70,
"AZhllbb600" : 70,
"AZhvvbb600" : 70,
"bbAZhllbb600" : 70,
"bbAZhvvbb600" : 70,
"ggZllH150" : 69,
"qqZllH150" : 69,
"ttbar" : 45,
"stops" : 43,
"stopt" : 42,
"stopst" : 41,
"stopWt" : 40,
"stop" : 40,
"Zhf" : 27,
"Zb" : 24,
"Zbl" : 25,
"Zbb" : 27,
"Zbc" : 26,
"Zc" : 21,
"Zcl" : 100,
"Zclbl" : 22,
"Zcc" : 23,
"Zl" : 20,
"Whf" : 37,
"Wb" : 34,
"Wbl" : 35,
"Wbb" : 37,
"Wbc" : 36,
"Wcc" : 33,
"Wc" : 31,
"Wcl" : 32,
"Wl" : 30,
"WZ" : 53,
"ZZ" : 52,
"VZ" : 51,
"WW" : 50,
"Diboson" : 50,
"diboson" : 50,
"multijet" : 45,
"multijetEl" : 45,
"multijetMu" : 45,
"MJ0lep" : 45,
"MJ1lep" : 45,
"MJ2lep" : 45,
"MJ2lepEl" : 45,
"MJ1lepEl" : 45,
"MJ1lepMu" : 45,
}
# for reduced diag plots only
self.exclude_str = 'HiggsNorm'
self.cov_classification = {
"BTag": [False, ["SysFT_EFF_Eigen", "SysFT_EFF_extrapolation"], []],
"Top": [False, ["SysWt", "SysTop", "SysTtbar", "SysMVH"], []],
"ModelBoson": [False, ["SysVV", "SysWM","SysZM","SysWD","SysZD","SysWP","SysZP","SysVj"], []],
"Norm": [False, ["Norm","Ratio"], []],
"norm": [False, ["norm"], []],
"Lepton": [False, ["SysMUON","SysEL","SysEG"], []],
"Jet": [False, ["SysJET","FATJET"], []],
"MET": [False, ["SysMET"], []],
"LUMI": [False, ["LUMI"], []],
"Shifted": [True, [], ["blablabla"]]
}
self.cov_special = {
"noMCStat": [[], ["gamma"]],
"JES": [["SigX", "norm_", "Jet"], []],
"BTag": [["SigX", "norm_", "BTag"], []],
"Mbb": [["SigX", "norm_", "Mbb"], []],
"Modelling": [["SigX", "norm_", "Norm", "Ratio", "PtBi"], []],
"SF": [["SigX", "norm_"], []],
"Norm": [["3JNorm", "norm_", "Norm", "Ratio"], []]
}
self.syst_to_study = ["JetEResol", "Mbb_Whf", "V_Whf", "METScale", "TChanP",
"ttbarHigh", "BJetReso", "ZblZbb", "BTagB1", "norm_Wbb", "WblWbbRatio"]
self.suspicious_syst = ["norm_"]
# for yield ratios only
self.category_condenser = {
# "_HistSyst": ["_Exp", False],
# "_dist(mva|mjj)": ["_dist", False],
# "_distMV1cBTag": ["_dist", False],
"_distmV": ["_dist", False],
# "_isMVA[01]": ["_isMVA", False],
# "_B[0-5]_": ["_B9_", False],
"_B(Max500_BMin0|BMin500)_": ["_Bresolvedmerged_", False],
# "_TType(ll|mm|tt|xx)": ["_TType", False],
"_T[012]": ["_Tx", False],
"_(incJet1_J|incFat1_Fat|J)[1235]": ["_Jx", False],
# "_Spc[0-9a-z]*top[a-z]*cr": ["_TType", False],
# "(multijet)(.*_L)([0123])(.*)": [r'MJ\3lep\2\3\4', False],
"_L[012]": ["_Lx", False],
"_D(SR|topemucr)": ["_DallRegions", False],
# "_W(bb|bl|bc|cc)_": ["_Whf_", True],
# "_Z(bb|bl|bc|cc)_": ["_Zhf_", True]
}
logging.basicConfig(format='%(levelname)s in %(module)s: %(message)s', level=self.loggingLvl)
def do_rebinning (self, prop):
# NOTE: JWH - ED board requests
if prop["dist"] == "mVH":
if "mBBcr" in prop["D"] or "topemucr" in prop["D"]:
if prop["L"] == "2" or prop["L"] == "0":
if prop.get("incFat", "-1") == "1" or prop.get("incJet", "-1") == "1":
return False
if "SR" in prop["D"]:
if prop["L"] == "2" or prop["L"] == "0":
if prop.get("incFat", "-1") == "1":
return False
if prop["L"] == "0":
return False
return True
def is_signal(self, compname):
""" Check if a component is Higgs. If yes, return mass """
# Spyros: Add ggA to list of signal names - has to be first in list otherwise we get problems
signames = self.sig_names
has_mass = False
mass = ""
# Spyros: if sg in compname matches also mVH so doesn't work for resonance analyses
# remove mVH from compname
compname = re.sub('mVH', '', compname)
for sg in signames:
if sg in compname:
has_mass = True
pos = compname.find(sg) + len(sg)
mass = int(re.sub("[^0-9]", "", compname[pos:pos + compname[pos:].find('_')]))
break
return has_mass, mass
def blind_data (self, setup):
def _do_blinding (title):
#return False, []
return "T2" in title, [110, 140]
do_blinding, blind_range = _do_blinding(setup.title)
if do_blinding:
# blind entire range
if blind_range[0] == 0 and blind_range[1] == 0:
blind_range[0] = setup.data.h.GetXaxis().GetXmin()
blind_range[1] = setup.data.h.GetXaxis().GetXmax()
setup.data.blind(blind_range[0], blind_range[1])
#else:
# # Add general blinding at 2% S/B
# for i in range(1, setup.hsum.GetNbinsX()+1):
# if setup.hsum.GetBinContent(i) > 0:
# sob = setup.exp_sig.h.GetBinContent(i) / ( setup.hsum.GetBinContent(i) )
# if sob > 0.02:
# setup.data.blind(setup.hsum.GetBinLowEdge(i), setup.hsum.GetBinLowEdge(i+1))
# elif setup.exp_sig.h.GetBinContent(i) > 0:
# setup.data.blind(setup.hsum.GetBinLowEdge(i), setup.hsum.GetBinLowEdge(i+1))
def preprocess_main_content_histogram (self, hist, setupMaker):
return hist
# def change_MeV_GeV(hist):
# if isinstance(hist, ROOT.TH1):
# new_hist = hist.Clone()
# bins = new_hist.GetXaxis().GetXbins()
# for i in range(bins.GetSize()):
# bins[i] /= 1000.
# new_hist.SetBins(bins.GetSize()-1, bins.GetArray())
# for i in range(new_hist.GetNbinsX()+2):
# new_hist.SetBinContent(i, hist.GetBinContent(i))
# new_hist.SetBinError(i, hist.GetBinError(i))
# elif isinstance(hist, ROOT.TGraph):
# new_hist = hist
# xbins = new_hist.GetX()
# for i in range(new_hist.GetN()):
# xbins[i] /= 1000.
# if isinstance(hist, ROOT.TGraphAsymmErrors):
# xbinsup = new_hist.GetEXhigh()
# xbinsdo = new_hist.GetEXlow()
# for i in range(new_hist.GetN()):
# xbinsup[i] /= 1000.
# xbinsdo[i] /= 1000.
# return new_hist
#
# new_hist = hist
# props = sm.setup.properties
# if props:
# # Changes for MeV/GeV
# affected_dists = ["MEff", "MEff3", "MET", "mLL", "mTW", "pTB1", "pTB2", "pTJ3", "pTV", "mBB", "mBBJ"]
# if props["L"] == "1" and props["dist"] in affected_dists:
# new_hist = change_MeV_GeV(hist)
#
# return new_hist
def make_sum_plots (self, func):
#add MET for 0 lepton merged+resolved signal region
#add mBB for 0 mbbcr+SR
for tag_i in ["1", "2"] :
func("Region_BMax500_BMin0_incJet1_J2_T"+tag_i+"_L2_Y2015_distmBB_Dtopemucr",
rt=["_L2", "_T"+tag_i, "_distmBB", "_Dtopemucr"], ea=[])
func("Region_BMax500_BMin0_incJet1_J2_T"+tag_i+"_L2_Y2015_distmBB",
rt=["_L2", "_T"+tag_i, "_distmBB"], ea=["_Dtopemucr"])
func("Region_BMax500_BMin150_incJet1_J2_T"+tag_i+"_L0_Y2015_distmBB",
rt=["_L0", "_T"+tag_i, "_distmBB"], ea=[])
func("Region_BMin150_T"+tag_i+"_L0_Y2015_distMET_DSR",
rt=["_L0","_T"+tag_i, "_distMET","_DSR"], ea=["_L2","_DmBBcr","_Dtopemucr"])
func("Region_BMin0_T"+tag_i+"_L2_Y2015_distpTV_DSR",
rt=["_L2","_T"+tag_i, "_distpTV","_DSR"], ea=["_DmBBcr","_Dtopemucr"])
def get_run_info (self):
lumi = {}
if self._year == "4023":
lumi["2011"] = ["4.7", 7]
lumi["2012"] = ["20.3", 8]
if self._year == "2011":
lumi["2011"] = ["4.7", 7]
if self._year == "2012":
lumi["2012"] = ["20.3", 8]
if self._year == "2015":
lumi["2015"] = ["3.2", 13]
return lumi
def get_title_height (self):
return 3.5 if self._year == "4023" else 2
def draw_category_ids (self, props, l, pos, nf):
merged = False
plural_jets = False
nf += 0.25*nf # a bit more vertical spacing
nleps = props.get("L", "-100")
if nleps == '3':
nleps = "0+1+2"
njets = props.get("J", "-1")
nincjets = props.get("incJet", "-1")
if njets == "23":
plural_jets = True
njets = "2+3"
elif nincjets == '1':
plural_jets = True
# njets += '+'
njets = '#geq {}'.format(njets)
elif int(njets) > 1:
plural_jets = True
nfatjets = props.get("Fat", "-1")
nincfatjets = props.get("incFat", "-1")
if int(nfatjets) > 0 and nincfatjets == '1':
plural_jets = True
merged = True
# nfatjets += '+'
nfatjets = '#geq {}'.format(nfatjets)
# nfatjets += ' #leq'
elif int(nfatjets) > 1:
plural_jets = True
ntags = props.get("T", "-100")
region = ""
if not nleps == '-100':
if len(region) > 0:
region += ', '
region += "{} lep.".format(nleps)
if not njets == '-1' or not nfatjets == '-1':
if len(region) > 0:
region += ', '
region += "{} {}jet{}".format(nfatjets if merged else njets,
"large-R " if merged else "",
"s" if plural_jets else "")
if not ntags == '-100':
if len(region) > 0:
region += ', '
region += "{} tag{}".format(ntags,
"s" if not int(ntags) == 1 else "")
pTVBin = ""
pTVmin = props.get("BMin", "-999")
pTVmax = props.get("BMax", "-999")
if not pTVmin == "-999" and pTVmax == "-999" and not pTVmin == "0":
pTVBin = "{0} GeV #leq p_{{T}}^{{V}}".format(pTVmin)
elif (pTVmin == "0" or pTVmin == "-999") and not pTVmax == "-999":
pTVBin = "p_{{T}}^{{V}} < {0} GeV".format(pTVmax)
elif not pTVmin == "-999" and not pTVmax == "-999":
pTVBin = "{0} GeV #leq p_{{T}}^{{V}} < {1} GeV".format(pTVmin, pTVmax)
signalControl = props.get("D", "")
if not signalControl == "":
def add_strings (base, addition):
if base == "":
return addition
else:
return base + ", " + addition
temp = signalControl
signalControl = ""
reduce_SR_CR_mBB = props["dist"] == "pTV" or props["dist"] == "MET"
if temp.find('SR') == 0:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} SR"
elif merged: signalControl = add_strings(signalControl, "75 GeV #leq m_{b#bar{b}} < 145 GeV")
else: signalControl = add_strings(signalControl, "110 GeV #leq m_{b#bar{b}} < 140 GeV")
temp = temp[2:]
if "highmBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} upper CR"
elif merged: signalControl = add_strings(signalControl, "145 GeV #leq m_{b#bar{b}}")
else: signalControl = add_strings(signalControl, "140 GeV #leq m_{b#bar{b}}")
temp = temp.replace("highmBBcr", "")
if "lowmBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} lower CR"
elif merged: signalControl = add_strings(signalControl, "m_{b#bar{b}} < 75 GeV")
else: signalControl = add_strings(signalControl, "m_{b#bar{b}} < 110 GeV")
temp = temp.replace("lowmBBcr", "")
if "mBBcr" in temp:
if reduce_SR_CR_mBB: signalControl = "m_{b#bar{b}} CR"
elif merged: signalControl = add_strings(signalControl, "m_{b#bar{b}} #leq 75 GeV, 145 GeV < m_{b#bar{b}}")
else: signalControl = add_strings(signalControl, "m_{b#bar{b}} #leq 110 GeV, 140 GeV < m_{b#bar{b}}")
temp = temp.replace("mBBcr", "")
if "topemucr" in temp:
signalControl = add_strings(signalControl, "e#mu")
temp = temp.replace("topemucr", "")
if "topaddbjetcr" in temp:
signalControl = add_strings(signalControl, "+1 b-jet")
temp = temp.replace("topaddbjetcr", "")
pos_next = pos[1] - 0.1*nf # a bit more spacing
l.DrawLatex(pos[0], pos_next, region)
if not pTVBin == "":
pos_next -= nf
l.DrawLatex(pos[0], pos_next, pTVBin)
if not signalControl == "":
pos_next -= nf
l.DrawLatex(pos[0], pos_next, signalControl)
pos_next -= nf
return (pos[0], pos_next)
def force_mu_value (self):
return self.force_mu
def get_year_str (self):
return self._year if int(self._year) < 2015 else ""
def get_xbound_from_properties (self, prop):
return (40, 400) if prop["dist"] == "pTB1" else None
def get_legend_pos_from_properties (self, prop):
result = None
if prop["L"] == '0' and prop["dist"] == "VpT":
result = [0.155, 0.13, 0.375, 0.65]
if prop["dist"] == "dPhiVBB":
result = [0.16, 0.16, 0.38, 0.68]
return result
def get_yscale_factor_from_properties (self, prop, logy):
# if prop["dist"] == "MV1cB1" or prop["dist"] == "MV1cB2" or prop["dist"] == "MV1cBTag":
# if not logy: return 1.5
# if prop["dist"] == "dPhiVBB" :
# if logy: return 5
# else : return 0.7
# if prop["dist"] == "dPhiLBmin" :
# if not logy: return 1.3
# if prop["dist"] == "mjj" :
# if not logy: return 1.1
# if prop["dist"] == "dRBB" :
# if logy: return 500
# if prop["dist"] == "MV1cBTag" :
# if not logy: return 0.75
# if prop["L"] == "0" :
# if prop["dist"] == "MV1cB1" or prop["dist"] == "MV1cB2" or prop["dist"] == "mjj" :
# if not logy: return 1.1
# if prop["dist"] == "MET" :
# if not logy: return 1.0/1.15
return 1.0
def postprocess_main_content_histogram (self, prop, hist):
# draw line denoting the transition of merged and resolved
if prop["dist"] == "MET" or prop["dist"] == "pTV":
max_value = hist.GetMaximum()
min_value = 0#hist.GetYaxis().GetXmin()
x_value = hist.GetXaxis().GetBinLowEdge(hist.GetXaxis().FindBin(500))
l = ROOT.TLine(x_value, min_value, x_value, max_value)
l.SetLineStyle(2)
l.SetLineWidth(4)
l.SetNDC(False)
l.DrawLine(x_value, min_value, x_value, max_value)
logging.debug("drawing line with endpoint coordinates ({},{}) and ({},{})".format(x_value, min_value, x_value, max_value))
return hist
def get_xTitle (self, prop, data_hist):
""" get title of X-axis from properties """
if not prop:
return ""
varname = prop["dist"]
result = varname
labels = {
# new
"MV1cB1": "MV1c(b_{1}) OP",
"MV1cB2": "MV1c(b_{2}) OP",
"MV1cBTag": "MV1c(b) OP",
"dEtaBB": "#Delta#eta(b_{1},b_{2})",
"dEtaVBB": "#Delta#eta(V,bb)",
"dPhiLBmin": "#Delta#phi(lep,b)_{min}",
"dPhiVBB": "#Delta#phi(V,bb)",
"dRBB": "#DeltaR(b_{1},b_{2})",
#"MEff": "M_{eff} [GeV]",
#"MEff3": "M_{eff3} [GeV]",
"MEff": "H_{T} [GeV]",
"MEff3": "H_{T} [GeV]",
"MET": "E_{T}^{miss} [GeV]",
"mLL": "M_{ll} [GeV]",
"mTW": "m_{T}(W) [GeV]",
"mva": "BDT_{VH}",
"mvaVZ": "BDT_{VZ}",
"pTB1": "p_{T}(b_{1}) [GeV]",
"pTB2": "p_{T}(b_{2}) [GeV]",
"pTJ3": "p_{T}(j_{3}) [GeV]",
"pTV": "p_{T}^{V} [GeV]",
"VpT": "p_{T}^{V} [GeV]",
"mVH": "m_{T}(Vh) [GeV]"
}
if "mjj" in varname:
# nominal
tmp_extra = ""
tmp_extra2 = " [GeV]"
# hack for mjj trafo D
#tmp_extra = "Transformed "
#tmp_extra2 = ""
#
if prop["T"] == "2":
result = tmp_extra+"m_{bb}"+tmp_extra2
elif prop["T"] == "1":
result = tmp_extra+"m_{bj}"+tmp_extra2
else:
result = tmp_extra+"m_{jj}"+tmp_extra2
elif "mBBJ" in varname:
if prop["T"] == "2":
result = "m_{bbj} [GeV]"
elif prop["T"] == "1":
result = "m_{bjj} [GeV]"
else:
result = "m_{jjj} [GeV]"
elif "mBB" in varname:
if prop["T"] == "2":
result = "m_{bb} [GeV]"
elif prop["T"] == "1":
result = "m_{bj} [GeV]"
else:
result = "m_{jj} [GeV]"
elif "mVH" in varname:
if prop["L"] == "1" or prop["L"] == "0":
result = "m_{T}(Vh) [GeV]"
else:
result = "m(Vh) [GeV]"
elif varname in labels:
result = labels[varname]
#for k in labels:
#if k in varname:
#return labels[k]
return result
def get_yTitle_tag (self, prop, data_hist):
extra_unit = ""
if prop["dist"] == "MEff" : extra_unit = " GeV"
if prop["dist"] == "MEff3" : extra_unit = " GeV"
if prop["dist"] == "MET" : extra_unit = " GeV"
if prop["dist"] == "mLL" : extra_unit = " GeV"
if prop["dist"] == "mTW" : extra_unit = " GeV"
if prop["dist"] == "pTB1" : extra_unit = " GeV"
if prop["dist"] == "pTB2" : extra_unit = " GeV"
if prop["dist"] == "pTJ3" : extra_unit = " GeV"
if prop["dist"] == "pTV" : extra_unit = " GeV"
#if prop["dist"] == "VpT" : extra_unit = " GeV" # new
if prop["dist"] == "mjj" : extra_unit = " GeV" # hack -> comment when trafoD
if prop["dist"] == "mBB" : extra_unit = " GeV"
if prop["dist"] == "mBBJ" : extra_unit = " GeV"
if prop["dist"] == "mVH" : extra_unit = " GeV"
# NOTE: JWH - ED board requests
if not self.do_rebinning(prop):
# if not (prop["dist"] == "mVH" and prop.get("incFat", "-1") == "-1" and
# prop.get("D", "") == "SR" and prop.get("L", "0") == "2") :
extra_number = str(data_hist.GetBinWidth(1))
if not extra_number.find('.') == -1: extra_number = extra_number[:extra_number.find('.')]
extra_unit = " " + extra_number + extra_unit
y_ratio = round(data_hist.GetBinWidth(1), 2)
if (y_ratio*10) % 10 == 0 and (y_ratio*100) % 100 == 0: y_ratio = int(y_ratio)
if prop["dist"] == "VpT": extra_str = " / bin" # new
elif prop["dist"] == "mVH": extra_str = " /" + extra_unit
else: extra_str = " / " + str(y_ratio) + extra_unit # new
if prop["dist"] == "MV1cB1": extra_str = ""
if prop["dist"] == "MV1cB2": extra_str = ""
if prop["dist"] == "MV1cBTag": extra_str = ""
return extra_str
def set_y_range (self, hist, nlegend_items, miny, maxy, log_scale, prop):
# if log_scale and prop["dist"] == "mVH":
# hist.SetMaximum(maxy * 100)
# hist.SetMinimum(0.001)
# return
bottom_padding = 1.0/16.0
content_faction = 4.0/7.0 if nlegend_items <= 8 else 3.0/7.0
if prop["dist"] == "mVH":
# figures 2)a-d in conf note
if (prop["L"] == "0" or prop["L"] == "2") and log_scale:
if prop["T"] == "1" or prop["T"] == "2":
if prop["D"] == "mBBcr":
if prop.get("BMax", "-999") == "500":
content_faction *= 1.25
# figures 3)a,b in conf note
if prop["D"] == "topemucr" and log_scale:
if prop["T"] == "1":
content_faction *= 1.15
if prop["T"] == "2":
content_faction *= 1.25
if "SR" in prop["D"]:
# figures 6)a-d in conf note
if prop.get("BMax", "-999") == "500" and log_scale:
if prop["L"] == "0":
if prop["T"] == "1":
content_faction *= 1.15
if prop["T"] == "2":
content_faction *= 1.25
if prop["L"] == "2":
content_faction *= 1.25
# figures 7)a,c,d in conf note
if prop.get("BMin", "-999") == "500" and not log_scale:
if prop["L"] == "0":
if prop["T"] == "1":
content_faction *= 1.5
if prop["L"] == "2":
if prop["T"] == "1":
content_faction *= 2.15
if prop["T"] == "2":
content_faction *= 1.15
# figures 4)a-d in conf note
if prop["dist"] == "mBB" and not log_scale:
if prop.get("BMax", "-999") == "500" and not (prop.get("D", "") == "topemucr"):
# if prop["L"] == "0":
# if prop["T"] == "1":
content_faction *= 1.5
if prop.get("BMax", "-999") == "500" and prop.get("D", "") == "topemucr":
content_faction *= 1.15
# figures 10)a-d in conf note
if (prop["dist"] == "MET" or prop["dist"] == "pTV") and log_scale:
content_faction *= 1.25
if not log_scale:
if miny < 1e-6: miny = 0
plot_scale = (maxy - miny)
bottom = miny - bottom_padding*plot_scale
top = bottom + plot_scale/content_faction
# hist.SetMinimum(bottom)
# hist.SetMaximum(top)
hist.GetYaxis().SetLimits(bottom, top)
# hist.GetHistogram().GetYaxis().SetRangeUser(bottom, top)
logging.debug("setting plot y-range to ({0}, {1})".format(hist.GetHistogram().GetYaxis().GetXmin(), hist.GetHistogram().GetYaxis().GetXmax()))
return
else:
log_miny = ROOT.TMath.Log10(miny)
log_maxy = ROOT.TMath.Log10(maxy)
plot_scale = (log_maxy - log_miny)
# 0.25 is just fine tuning
# bottom = log_miny - 0.25*bottom_padding*plot_scale
bottom = log_miny
top = bottom + plot_scale/content_faction
# hist.SetMinimum(ROOT.TMath.Power(10, bottom))
# hist.SetMaximum(ROOT.TMath.Power(10, top))
hist.GetYaxis().SetLimits(ROOT.TMath.Power(10, bottom), ROOT.TMath.Power(10, top))
# hist.GetHistogram().GetYaxis().SetRangeUser(ROOT.TMath.Power(10, bottom), ROOT.TMath.Power(10, top))
logging.debug("setting log scale plot y-range to ({0}, {1})".format(hist.GetHistogram().GetYaxis().GetXmin(), hist.GetHistogram().GetYaxis().GetXmax()))
return
# if not log_scale and miny > 0:
# miny = 0
# if log_scale and miny <= 1:
# miny = 0.25
# mini = miny
#
# if mini < 0:
# hist.SetMinimum(mini*1.25)
# else:
# mini = 0
# # fix 0 cut in the Y axis
# #hist.SetMinimum(0.01)
# if log_scale:
# hist.SetMaximum(maxy * 100)
# hist.SetMinimum(miny / 2.5)
# else:
# hist.SetMaximum(mini + (maxy - mini) * 1.5)
def auto_compute_ratio_yscale_from_properties (self, prop):
return (prop["dist"] == "mva" or prop["dist"] == "mvaVZ")
def scale_all_yvals(self, prop):
return prop["dist"] == "mva", 0.05
def postprocess_dataMC_ratio_histogram (self, prop, hist):
return hist
def determine_year_from_title (self, title):
if "2015" in title:
return "2015"
elif "2012" in title:
return "2012"
elif "2011" in title:
return "2011"
elif "both" in title:
return "4023"
def add_additional_signal_info_to_legend (self, legend, signal):
if signal.mode == self._STACK:
legend.AddEntry(ROOT.NULL, "m_{H}=" + str(signal.mass) + " GeV", "")
else:
legend.AddEntry(ROOT.NULL, "m_{H}=" + str(signal.mass) + " GeV", "")
| btannenw/physics-dihiggs | statCode/scripts/VHbbRun2/analysisPlottingConfig.py | analysisPlottingConfig.py | py | 30,133 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "plottingConfig.PlottingConfig",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "ROOT.kRed",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "R... |
72640402983 | import os, argparse, traceback, glob, random, itertools, time, torch, threading, queue
import numpy as np
import torch.optim as optim
from models.tacotron import post_CBHG
from torch.nn import L1Loss
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from util.hparams import *
data_dir = './data'
mel_list = sorted(glob.glob(os.path.join(data_dir + '/mel', '*.npy')))
spec_list = sorted(glob.glob(os.path.join(data_dir + '/spec', '*.npy')))
mel_len = np.load(os.path.join(data_dir + '/mel_len.npy'))
def DataGenerator():
while True:
idx_list = np.random.choice(len(mel_list), batch_group, replace=False)
idx_list = sorted(idx_list)
idx_list = [idx_list[i : i + batch_size] for i in range(0, len(idx_list), batch_size)]
random.shuffle(idx_list)
for idx in idx_list:
random.shuffle(idx)
mel = [torch.from_numpy(np.load(mel_list[mel_len[i][1]])) for i in idx]
spec = [torch.from_numpy(np.load(spec_list[mel_len[i][1]])) for i in idx]
mel = pad_sequence(mel, batch_first=True)
spec = pad_sequence(spec, batch_first=True)
yield [mel, spec]
class Generator(threading.Thread):
def __init__(self, generator):
threading.Thread.__init__(self)
self.queue = queue.Queue(8)
self.generator = generator
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def train(args):
train_loader = Generator(DataGenerator())
model = post_CBHG(K=8, conv_dim=[256, mel_dim]).cuda()
optimizer = optim.Adam(model.parameters())
step, epochs = 0, 0
if args.checkpoint is not None:
ckpt = torch.load(args.checkpoint)
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
step = ckpt['step'],
step = step[0]
epoch = ckpt['epoch']
print('Load Status: Epoch %d, Step %d' % (epoch, step))
torch.backends.cudnn.benchmark = True
try:
for epoch in itertools.count(epochs):
for _ in range(batch_group):
start = time.time()
mel, target = train_loader.next()
mel = mel.float().cuda()
target = target.float().cuda()
pred = model(mel)
loss = L1Loss()(pred, target)
model.zero_grad()
loss.backward()
optimizer.step()
step += 1
print('step: {}, loss: {:.5f}, {:.3f} sec/step'.format(step, loss, time.time() - start))
if step % checkpoint_step == 0:
save_dir = './ckpt/' + args.name + '/2'
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'step': step,
'epoch': epoch
}, os.path.join(save_dir, 'ckpt-{}.pt'.format(step)))
except Exception as e:
traceback.print_exc()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', default=None)
parser.add_argument('--name', '-n', required=True)
args = parser.parse_args()
save_dir = os.path.join('./ckpt/' + args.name, '2')
os.makedirs(save_dir, exist_ok=True)
train(args)
| chldkato/Tacotron-pytorch | train2.py | train2.py | py | 3,633 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 13... |
73701609385 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, nChannels, growthRate, dropout_rate):
super(Bottleneck, self).__init__()
self.dropout_rate = dropout_rate
interChannels = 4 * growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1,
bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
if self.dropout_rate > 0:
out = F.dropout(out, p=self.dropout_rate, training=self.training)
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
def __init__(self, nChannels, growthRate, dropout_rate):
super(SingleLayer, self).__init__()
self.dropout_rate = dropout_rate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
if self.dropout_rate > 0:
out = F.dropout(out, p=self.dropout_rate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1,
bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class Model(nn.Module):
def __init__(self, nClasses=256, growthRate=12, depth=40, bottleneck=False,
dropout_rate=0.0, n_layers=3, **kwargs):
super().__init__()
self.n_layers = n_layers
print("n_layers", n_layers)
# dense blocks per layer
nDenseBlocks = (depth - 4) // n_layers
if bottleneck:
nDenseBlocks //= 2
if bottleneck:
reduction = 0.5
else:
reduction = 1.0
# initial convolution
nChannels = 2 * growthRate
self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1,
bias=False)
for layer_n in range(1, n_layers + 1):
dense_layer = self._make_dense(
nChannels, growthRate, nDenseBlocks, bottleneck, dropout_rate)
setattr(self, f'dense{layer_n}', dense_layer)
nChannels += nDenseBlocks * growthRate
if layer_n < n_layers:
nOutChannels = int(math.floor(nChannels * reduction))
trainsition_layer = Transition(nChannels, nOutChannels)
setattr(self, f'trans{layer_n}', trainsition_layer)
nChannels = nOutChannels
self.bn1 = nn.BatchNorm2d(nChannels)
self.fc = nn.Linear(nChannels, nClasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck, dropout_rate):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(Bottleneck(nChannels, growthRate, dropout_rate))
else:
layers.append(SingleLayer(nChannels, growthRate, dropout_rate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
for i in range(1, self.n_layers):
dense_layer = getattr(self, f'dense{i}')
trans_layer = getattr(self, f'trans{i}')
out = trans_layer(dense_layer(out))
last_dense_layer = getattr(self, f'dense{self.n_layers}')
out = last_dense_layer(out)
out = F.avg_pool2d(F.relu(self.bn1(out)), out.size()[-1])
out = torch.squeeze(torch.squeeze(out, 2), 2)
out = F.log_softmax(self.fc(out))
return out
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
state_dict = torch.load(path)
self.load_state_dict(state_dict)
| ikhlestov/caltech-ml-courses | models/model_dense.py | model_dense.py | py | 4,801 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
23251362882 | import os
from zipfile import ZipFile, ZIP_DEFLATED
class Zip():
"""Zip up all the contents of a directory into the output file."""
def __init__(self, input_directory, output_file):
self.input_directory = input_directory
self.output_file = output_file
def zip(self):
try:
zip_file = ZipFile(self.output_file, 'w', ZIP_DEFLATED)
for root, dirs, files in os.walk(self.input_directory, topdown=True):
dir_prefix = root[len(os.path.commonprefix([self.input_directory, root]))+1:]
if len(dirs) is 0 and len(files) is 0:
zip_file.write(root, dir_prefix[:-1])
else:
for name in files:
zip_file.write(os.path.join(root, name), os.path.join(dir_prefix, name))
finally:
zip_file.close()
| thewtex/odt-respace | source/odt_respace/zip.py | zip.py | py | 809 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "zipfile.ZIP_DEFLATED",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "os.walk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.commonprefix... |
3407130666 | #!/usr/bin/env python
"""A script to normalized interview transcripts.
It outputs a single text file with cleaned lines
one sentence per line"""
import argparse
import re
import string
import spacy
fillers = [
"eh",
"m",
"mm",
"mmm",
"ah",
"ahm",
"ehm",
"yy",
"y",
"aha",
"a-ha",
"aa",
"e",
"ee",
"łyy",
"ym",
"yym",
"ymm",
"yyym",
"oh",
"am",
"oo",
"hm",
"em",
"emm",
"eem",
"yyo",
"ii",
"nnn",
"nn",
"no",
"mhm",
"am",
"amm",
"aam",
"eey",
"eeyy",
"mmyy",
"yhm",
"ymhm",
"mmy",
"yynn",
"li",
"cc",
]
nlp = spacy.load("pl_core_news_lg")
punctuation = string.punctuation + '…' +'–' + '’' + "‘"
def get_data(path: str) -> list:
"""reads .txt file into a list of strings"""
list_of_lines = []
with open(path, "r") as source:
for line in source:
if line == False:
continue
else:
line = line.lstrip().rstrip()
list_of_lines.append(line)
return list_of_lines
def write_data(data: list, path: str):
"""writes data line by line into a file"""
with open(path, "w") as sink:
for sentence in data:
print(sentence, file=sink)
def remove_fillers(line: str) -> str:
"""removes filler expresisons"""
tokens = line.split()
for word in tokens:
if word in fillers:
tokens.remove(word)
no_fillers = " ".join(word for word in tokens)
return no_fillers
def pre_tokenization(data: list) -> list:
"""data normalization to be performed before sentence tokenization"""
cleaned = []
for line in data:
# replace ';' and '%' with 'ł'
add_l = re.sub(r"[;%]", "ł", line)
# replace the elipses with whitespaces to account for certain types of stutters
no_elipses = re.sub(r"[…]", " ", add_l)
# replace two period elipses with whitespace
two_periods = re.sub(r"\.\.", " ", no_elipses)
# remove hyphenated stutters
no_stutters = re.sub(r"\b[a-zA-ZżźćńółęąśŻŹĆĄŚĘŁÓŃ]+-+\W", "", two_periods)
# remove digits and numbers
no_numbers = re.sub(r"(?:[+-]|\()?\$?\d+(?:,\d+)*(?:\.\d+)?\)?", "", no_stutters)
# remove bracketed content
no_brackets = re.sub(r"\[.*?\]", "", no_numbers)
# remove content in parentheses
no_parens = re.sub(r"\(.*?\)", "", no_brackets)
# remove all duplicate words
# retain only the first word
no_duplicates = re.sub(r"\b(\w+)(?:\W+\1\b)+", r"\1", no_parens)
# append only non-empty strings
if no_duplicates:
cleaned.append(no_duplicates)
return cleaned
def make_continuous(data: list) -> string:
"""joins a list of strings into one long string"""
one_line = " ".join(data)
return one_line
def post_tokenization(data: list) -> list:
"""data normaization to be performed after sentence tokenization"""
cleaned = []
for sentence in data:
# casefold and strip
casefolded = sentence.lower().lstrip().rstrip()
# standardize quotation marks
standard_quotes = re.sub(r"[„”“]", '"', casefolded)
# remove punctuation
no_polish_punctuation = standard_quotes.translate(str.maketrans("", "", punctuation))
# remove the hyphens
no_hyphens = re.sub(r"-", " ", no_polish_punctuation)
# remove the fillers
no_fillers = remove_fillers(no_hyphens)
# remove duplicates left over after the fillers were removed
# leave only the first word
no_duplicates = re.sub(r"\b(\w+)(?:\W+\1\b)+", r"\1", no_fillers)
# remove multiple white spaces
single_spaces = re.sub(" +", ' ', no_duplicates)
if single_spaces:
cleaned.append(single_spaces)
return cleaned
def main(args: argparse.Namespace) -> None:
lines = get_data(args.input)
cleaned = pre_tokenization(lines)
one_line = make_continuous(cleaned)
to_tokenize = nlp(one_line)
sent_tokenized = [sentence.text for sentence in to_tokenize.sents]
cleaned_again = post_tokenization(sent_tokenized)
write_data(cleaned_again, args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input", help="path to source file")
parser.add_argument("--output", help="path to output")
main(parser.parse_args())
| zoobereq/He-write-age | data cleaning/normalize.py | normalize.py | py | 4,556 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number":... |
43734774169 | """
@创建日期 :2022/4/25
@修改日期 :2022/4/26
@作者 :jzj
@功能 :模型库,输出统一以字典格式
dqn 输出 value
a2c 输出 policy value
fixme: 可能会抽象为参数构建的模式,不确定
"""
from typing import List
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
def make_model(id, args):
if id == "cartpole_dqn":
return CartPoleDQN(**args)
elif id == "cartpole_a2c":
return CartPoleA2C(**args)
elif id == "flappybirdsimple_dqn":
return FlappyBirdSimpleDqn(**args)
elif id == "flappybirdsimple_a2c":
return FlappyBirdSimpleA2C(**args)
else:
raise NotImplementedError
class ModelWrapper(models.Model):
"""fixme: dev ing"""
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
def call(self, inputs):
return self.model(inputs)
def inference(self, x):
if hasattr(self.model, "inference"):
return self.model.inference(x)
x = tf.expand_dims(x, 0)
outputs = self.call(x)
return outputs
# CartPole DQN
class CartPoleDQN(models.Model):
def __init__(self, action_dim, hidden_dims: List):
super(CartPoleDQN, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(4,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh"))
self.output_layer = layers.Dense(action_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
x = self.output_layer(x)
return {"value": x}
# CartPole A2C
class CartPoleA2C(tf.keras.Model):
def __init__(self, num_action=2, num_hidden_units=128):
super(CartPoleA2C, self).__init__()
self.common = layers.Dense(num_hidden_units, activation=None)
self.activation = layers.ReLU()
self.actor = layers.Dense(num_action)
self.critic = layers.Dense(1)
def call(self, inputs: tf.Tensor):
x = self.common(inputs)
x = self.activation(x)
return {"policy": self.actor(x), "value": self.critic(x)}
# FlappyBirdRGB A2C
class ConvBlock(layers.Layer):
def __init__(self, filter, kernel_size, stride=1):
super(ConvBlock, self).__init__()
self.conv = layers.Conv2D(filter, kernel_size, stride, padding="same")
self.bn = layers.BatchNormalization()
self.activation = layers.ReLU()
def call(self, inputs):
return self.activation(self.bn(self.conv(inputs)))
class ResidualBlock(layers.Layer):
def __init__(self, filter, kernel_size, stride, squeeze_factor, se=False):
"""fixme: 添加Se支持"""
super(ResidualBlock, self).__init__()
self.conv_block1 = ConvBlock(filter//squeeze_factor, kernel_size, stride)
self.conv_block2 = ConvBlock(filter, kernel_size, stride)
self.short_cut = ConvBlock(filter, 1)
self.output_bn = layers.BatchNormalization()
self.output_ac = layers.ReLU()
def call(self, inputs):
x = self.conv_block1(inputs)
x = self.conv_block2(x)
x = x + self.short_cut(inputs)
x = self.output_ac(self.output_bn(x))
return x
class PolicyHead(layers.Layer):
def __init__(self, policy_dim):
super(PolicyHead, self).__init__()
self.conv = layers.Conv2D(1, kernel_size=3, strides=1, padding="same")
self.bn = layers.BatchNormalization()
self.dense = layers.Dense(policy_dim)
def call(self, inputs):
b, h, w, c = inputs.shape
x = self.bn(self.conv(inputs))
x = tf.reshape(x, (-1, h*w))
x = self.dense(x)
return x
class ValueHead(layers.Layer):
def __init__(self, value_dim):
super(ValueHead, self).__init__()
self.conv = layers.Conv2D(1, kernel_size=3, strides=1, padding="same")
self.bn = layers.BatchNormalization()
self.dense = layers.Dense(value_dim)
def call(self, inputs):
b, h, w, c = inputs.shape
x = self.bn(self.conv(inputs))
x = tf.reshape(x, (-1, h*w))
x = self.dense(x)
return x
class FlappyBirdA2C(models.Model):
"""
简单模型,注意policy输出的是logit值为非概率
"""
def __init__(self, filters=[32, 64, 128], blocks=[2, 2, 4]):
super(FlappyBirdA2C, self).__init__()
self.conv1 = layers.Conv2D(32, 5, 2, padding="same")
self.bn1 = layers.BatchNormalization()
self.ac1 = layers.ReLU()
self.pool1 = layers.MaxPooling2D(pool_size=3, strides=2, padding="same")
self.middle_layers = []
for filter, block in zip(filters, blocks):
for n in range(block):
self.middle_layers.append(ResidualBlock(filter, 3, 1, 4))
self.middle_layers.append(layers.MaxPooling2D(pool_size=3, strides=2, padding="same"))
self.policy_head = PolicyHead(policy_dim=2)
self.value_head = ValueHead(value_dim=1)
def call(self, inputs):
x = self.pool1(self.ac1(self.bn1(self.conv1(inputs))))
for layer in self.middle_layers[:-1]:
x = layer(x)
policy = self.policy_head(x)
value = self.value_head(x)
return {"policy": policy, "value": value}
# FlappyBirdSimple A2C
class FlappyBirdSimpleA2C(models.Model):
def __init__(self, policy_dim=2, value_dim=1, hidden_dims=[32, 64]):
super(FlappyBirdSimpleA2C, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(2,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh",))
self.policy_head = layers.Dense(policy_dim)
self.value_head = layers.Dense(value_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
policy = self.policy_head(x)
value = self.value_head(x)
return {"policy": policy, "value": value}
# FlappyBirdSimple DQN
class FlappyBirdSimpleDqn(models.Model):
def __init__(self, value_dim=2, hidden_dims=[256, 256]):
super(FlappyBirdSimpleDqn, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(2,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh"))
self.value_head = layers.Dense(value_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
value = self.value_head(x)
return {"value": value}
| baichii/inspire | rookie/models.py | models.py | py | 6,872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.models.Model",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 43,
"usage_type": "call"
},
{
... |
70677287784 | """
Filename: plot_zonal_mean.py
Author: Damien Irving, irving.damien@gmail.com
Description:
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import matplotlib.pyplot as plt
from matplotlib import gridspec
import iris
import iris.plot as iplt
from iris.experimental.equalise_cubes import equalise_attributes
import seaborn
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import timeseries
import grids
import convenient_universal as uconv
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
experiment_colors = {}
experiment_colors['historical'] = 'green'
experiment_colors['piControl'] = 'black'
experiment_colors['historicalAA'] = 'blue'
experiment_colors['historicalGHG'] = 'red'
experiment_colors['historicalnoAA'] = 'orange'
basins = {'atlantic': 2,
'pacific': 3,
'indian': 5}
def scale_data(cube, var, reverse_sign=False):
"""Scale data"""
if var == 'precipitation_minus_evaporation_flux':
cube.data = cube.data * 86400
units = 'mm/day'
else:
units = cube.units
if reverse_sign:
cube.data = cube.data * -1
return cube, units
def set_plot_grid(tas_trend=False):
"""Set the grid of plots.
Args:
tas_trend (bool): Include a panel for the tas trend?
"""
if tas_trend:
nrows = 4
heights = [2, 1, 1, 1]
else:
nrows = 3
heights = [2, 1, 1]
gs = gridspec.GridSpec(nrows, 1, height_ratios=heights)
return gs
def calculate_climatology(cube, time_bounds, experiment):
"""Calculate annual mean climatology"""
if not experiment == 'piControl':
time_constraint = gio.get_time_constraint(time_bounds)
cube = cube.extract(time_constraint)
cube = cube.collapsed('time', iris.analysis.MEAN)
cube.remove_coord('time')
return cube
def calc_linear_trend(data, xaxis):
"""Calculate the linear trend.
polyfit returns [a, b] corresponding to y = a + bx
"""
if data.mask[0]:
return data.fill_value
else:
return numpy.polynomial.polynomial.polyfit(xaxis, data, 1)[-1]
def get_trend_cube(cube, xaxis='time'):
"""Get the trend data.
Args:
cube (iris.cube.Cube)
xaxis (iris.cube.Cube)
"""
coord_names = [coord.name() for coord in cube.dim_coords]
assert coord_names[0] == 'time'
if xaxis == 'time':
trend_data = timeseries.calc_trend(cube, per_yr=True)
trend_unit = ' yr-1'
else:
trend_data = numpy.ma.apply_along_axis(calc_linear_trend, 0, cube.data, xaxis.data)
trend_data = numpy.ma.masked_values(trend_data, cube.data.fill_value)
trend_unit = ' '+str(xaxis.units)+'-1'
trend_cube = cube[0, ::].copy()
trend_cube.data = trend_data
trend_cube.remove_coord('time')
trend_cube.units = str(cube.units) + trend_unit
return trend_cube
def get_scale_factor(tas_cube):
"""Calculate scale factor (linear warming).
Multiplies the linear trend (K / yr) by the number of years
"""
linear_trend = get_trend_cube(tas_cube)
scale_factor = linear_trend.data * tas_cube.shape[0]
return scale_factor
def plot_climatology(climatology_dict, var, units, legloc, aggregation='Zonal mean'):
"""Plot the zonal mean climatology"""
for experiment in ['historical', 'historicalGHG', 'historicalAA', 'historicalnoAA', 'piControl']:
if climatology_dict[experiment]:
color = experiment_colors[experiment]
iplt.plot(climatology_dict[experiment], color=color, alpha=0.8, label=experiment)
plt.legend(loc=legloc)
plt.ylabel('%s %s (%s)' %(aggregation, var.replace('_', ' '), units) )
def check_lats(climatology_dict, experiment):
"""Sometimes the latitude axes are not exactly equal after regridding."""
experiment_lats = climatology_dict[experiment].coord('latitude')
control_lats = climatology_dict['piControl'].coord('latitude')
if not control_lats == experiment_lats:
diffs = experiment_lats.points - control_lats.points
assert numpy.abs(diffs).max() < 0.0001, "%s and control have very different latitude axes" %(experiment)
climatology_dict[experiment].coord('latitude').points = control_lats.points
climatology_dict[experiment].coord('latitude').bounds = control_lats.bounds
assert climatology_dict[experiment].coord('latitude') == climatology_dict['piControl'].coord('latitude'), \
"Problem with %s latitude axis" %(experiment)
return climatology_dict[experiment]
def plot_difference(climatology_dict):
"""Plot the difference between experiment and control climatology"""
assert climatology_dict['piControl'], 'must have control data for difference plot'
for experiment in ['historical', 'historicalGHG', 'historicalAA', 'historicalnoAA']:
if climatology_dict[experiment]:
climatology_dict[experiment] = check_lats(climatology_dict, experiment)
diff_cube = climatology_dict[experiment] - climatology_dict['piControl']
iplt.plot(diff_cube, color=experiment_colors[experiment], alpha=0.8)
plt.ylabel('Experiment - piControl')
def plot_trend(trend_dict, units, scaled=False):
"""Plot the trend"""
for experiment in ['historical', 'historicalGHG', 'historicalAA', 'historicalnoAA', 'piControl']:
if trend_dict[experiment]:
iplt.plot(trend_dict[experiment], color=experiment_colors[experiment], alpha=0.8)
if not scaled:
plt.ylabel('Trend ($%s \enspace yr^{-1}$)' %(units) )
else:
plt.ylabel('Trend ($%s \enspace yr^{-1}$) scaled by warming' %(units) )
def read_data(inargs):
"""Read input data into appropriate dictionaries."""
file_dict = {'historical': inargs.historical_files,
'historicalGHG': inargs.historicalghg_files,
'historicalAA': inargs.historicalaa_files,
'historicalnoAA': inargs.historicalnoaa_files,
'piControl': inargs.picontrol_files}
tas_dict = {'historical': inargs.historical_tas_file,
'historicalGHG': inargs.historicalghg_tas_file,
'historicalAA': inargs.historicalaa_tas_file,
'historicalnoAA': inargs.historicalnoaa_tas_file,
'piControl': None}
area_dict = {'historical': inargs.historical_area_file,
'historicalGHG': inargs.historicalghg_area_file,
'historicalAA': inargs.historicalaa_area_file,
'historicalnoAA': inargs.historicalnoaa_area_file,
'piControl': inargs.picontrol_area_file}
basin_dict = {'historical': inargs.historical_basin_file,
'historicalGHG': inargs.historicalghg_basin_file,
'historicalAA': inargs.historicalaa_basin_file,
'historicalnoAA': inargs.historicalnoaa_basin_file,
'piControl': inargs.picontrol_basin_file}
return file_dict, tas_dict, area_dict, basin_dict
def get_areacello_data(cube):
"""Generate an area data array."""
dim_coord_names = [coord.name() for coord in cube.dim_coords]
assert 'latitude' in dim_coord_names
assert 'longitude' in dim_coord_names
if not cube.coord('latitude').has_bounds():
cube.coord('latitude').guess_bounds()
if not cube.coord('longitude').has_bounds():
cube.coord('longitude').guess_bounds()
area_data = iris.analysis.cartography.area_weights(cube)
area_data = numpy.ma.masked_where(numpy.ma.getmask(cube.data), area_data)
return area_data
def area_ajustment(data_cube, area_file, metadata_dict):
"""Multipy a data cube by its cell area."""
if area_file:
area_cube = iris.load_cube(area_file[0])
area_data = uconv.broadcast_array(area_cube.data, [1, 2], data_cube.shape)
metadata_dict[area_file[0]] = area_cube.attributes['history']
else:
area_data = get_areacello_data(data_cube)
data_cube.data = data_cube.data * area_data
if 'm-2' in str(data_cube.units):
units = str(data_cube.units).replace('m-2', "")
else:
units = str(data_cube.units) + ' m2'
return data_cube, units, metadata_dict
def main(inargs):
"""Run the program."""
file_dict, tas_dict, area_dict, basin_dict = read_data(inargs)
metadata_dict = {}
climatology_dict = {}
time_trend_dict = {}
tas_scaled_trend_dict = {}
branch_dict = {}
for experiment in ['historical', 'historicalGHG', 'historicalAA', 'historicalnoAA', 'piControl']:
filenames = file_dict[experiment]
if not filenames:
climatology_dict[experiment] = None
time_trend_dict[experiment] = None
tas_scaled_trend_dict[experiment] = None
else:
print(experiment)
try:
time_constraint = gio.get_time_constraint(inargs.total_time)
except (AttributeError, TypeError):
time_constraint = iris.Constraint()
with iris.FUTURE.context(cell_datetime_objects=True):
cube = iris.load(filenames, gio.check_iris_var(inargs.var))
# Merge cubes
metadata_dict[filenames[0]] = cube[0].attributes['history']
equalise_attributes(cube)
iris.util.unify_time_units(cube)
cube = cube.concatenate_cube()
cube = gio.check_time_units(cube)
# Time extraction and branch time info
coord_names = [coord.name() for coord in cube.dim_coords]
assert coord_names[0] == 'time'
if 'historical' in experiment:
original_time_length = cube.shape[0]
cube = cube.extract(time_constraint)
new_time_length = cube.shape[0]
branch_time_index_offset = original_time_length - new_time_length
branch_time = cube.attributes['branch_time']
time_length = cube.shape[0]
branch_dict[experiment] = (branch_time, time_length, branch_time_index_offset)
elif experiment == 'piControl':
branch_time, time_length, branch_time_index_offset = branch_dict['historical']
start_index, error = uconv.find_nearest(cube.coord('time').points, float(branch_time) + 15.5, index=True)
if abs(error) > 15:
print("WARNING: Large error of %f in locating branch time" %(error))
start_index = 0
start_index = start_index + branch_time_index_offset
cube = cube[start_index:start_index+time_length, ::]
# Temporal smoothing
cube = timeseries.convert_to_annual(cube, full_months=True)
# Mask marginal seas
if basin_dict[experiment]:
basin_cube = iris.load_cube(basin_dict[experiment])
cube = uconv.mask_marginal_seas(cube, basin_cube)
# Regrid and select basin
cube, coord_names, regrid_status = grids.curvilinear_to_rectilinear(cube)
if not inargs.basin == 'globe':
if basin_dict[experiment] and not regrid_status:
ndim = cube.ndim
basin_array = uconv.broadcast_array(basin_cube.data, [ndim - 2, ndim - 1], cube.shape)
else:
basin_array = uconv.create_basin_array(cube)
cube.data.mask = numpy.where((cube.data.mask == False) & (basin_array == basins[inargs.basin]), False, True)
# Scale
cube, units = scale_data(cube, inargs.var, reverse_sign=inargs.reverse_sign)
# Zonal statistic
if inargs.area_adjust:
if regrid_status:
area_dict[experiment] = None
cube, units, metadata_dict = area_ajustment(cube, area_dict[experiment], metadata_dict)
zonal_cube = cube.collapsed('longitude', iris.analysis.SUM)
aggregation = 'Zonally integrated'
else:
zonal_cube = cube.collapsed('longitude', iris.analysis.MEAN)
aggregation = 'Zonal mean'
zonal_cube.remove_coord('longitude')
# Climatology and trends
climatology_dict[experiment] = calculate_climatology(zonal_cube, inargs.climatology_time, experiment)
time_trend_dict[experiment] = get_trend_cube(zonal_cube)
if tas_dict[experiment]:
tas_cube = iris.load_cube(tas_dict[experiment], 'air_temperature' & time_constraint)
scale_factor = get_scale_factor(tas_cube)
print(experiment, 'warming:', scale_factor)
tas_scaled_trend_dict[experiment] = time_trend_dict[experiment] * (1. / abs(scale_factor))
metadata_dict[tas_dict[experiment][0]] = tas_cube.attributes['history']
else:
tas_scaled_trend_dict[experiment] = None
# Create the plots
tas_scaled_trend_flag = tas_scaled_trend_dict['historicalGHG'] and tas_scaled_trend_dict['historicalAA']
fig = plt.figure(figsize=[15, 20])
gs = set_plot_grid(tas_trend=tas_scaled_trend_flag)
ax_main = plt.subplot(gs[0])
plt.sca(ax_main)
plot_climatology(climatology_dict, inargs.var, units, inargs.legloc, aggregation)
plt.title('%s (%s), %s' %(inargs.model, inargs.run, inargs.basin) )
ax_diff = plt.subplot(gs[1])
plt.sca(ax_diff)
plot_difference(climatology_dict)
ax_time_trend = plt.subplot(gs[2])
plt.sca(ax_time_trend)
plot_trend(time_trend_dict, units)
if tas_scaled_trend_flag:
ax_tas_trend = plt.subplot(gs[3])
plt.sca(ax_tas_trend)
plot_trend(tas_scaled_trend_dict, units, scaled=True)
plt.xlabel('latitude')
plt.savefig(inargs.outfile, bbox_inches='tight')
gio.write_metadata(inargs.outfile, file_info=metadata_dict)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
note:
"""
description=''
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("var", type=str, help="Variable standard_name")
parser.add_argument("model", type=str, help="Model name")
parser.add_argument("run", type=str, help="Run (e.g. r1)")
parser.add_argument("basin", type=str, choices=('atlantic', 'pacific', 'indian', 'globe'), help="Ocean basin")
parser.add_argument("--historical_files", type=str, default=None, nargs='*',
help="Input files for the historical experiment")
parser.add_argument("--historicalghg_files", type=str, default=None, nargs='*',
help="Input files for the historicalGHG experiment")
parser.add_argument("--historicalaa_files", type=str, default=None, nargs='*',
help="Input files for the historicalAA experiment")
parser.add_argument("--historicalnoaa_files", type=str, default=None, nargs='*',
help="Input files for the historicalnoAA experiment")
parser.add_argument("--picontrol_files", type=str, default=None, nargs='*',
help="Input files for the piControl experiment")
parser.add_argument("--historical_tas_file", type=str, default=None, nargs='*',
help="Global mean surface temperature file for historical experiment")
parser.add_argument("--historicalghg_tas_file", type=str, default=None, nargs='*',
help="Global mean surface temperature file for historicalGHG experiment")
parser.add_argument("--historicalaa_tas_file", type=str, default=None, nargs='*',
help="Global mean surface temperature file for historicalAA experiment")
parser.add_argument("--historicalnoaa_tas_file", type=str, default=None, nargs='*',
help="Global mean surface temperature file for historicalnoAA experiment")
parser.add_argument("--historical_area_file", type=str, default=None, nargs='*',
help="Cell area file for historical experiment")
parser.add_argument("--historicalghg_area_file", type=str, default=None, nargs='*',
help="Cell area file for historicalGHG experiment")
parser.add_argument("--historicalaa_area_file", type=str, default=None, nargs='*',
help="Cell area file for historicalAA experiment")
parser.add_argument("--historicalnoaa_area_file", type=str, default=None, nargs='*',
help="Cell area file for historicalnoAA experiment")
parser.add_argument("--picontrol_area_file", type=str, default=None, nargs='*',
help="Cell area file for piControl experiment")
parser.add_argument("--historical_basin_file", type=str, default=None, nargs='*',
help="Cell basin file for historical experiment")
parser.add_argument("--historicalghg_basin_file", type=str, default=None, nargs='*',
help="Cell basin file for historicalGHG experiment")
parser.add_argument("--historicalaa_basin_file", type=str, default=None, nargs='*',
help="Cell basin file for historicalAA experiment")
parser.add_argument("--historicalnoaa_basin_file", type=str, default=None, nargs='*',
help="Cell basin file for historicalnoAA experiment")
parser.add_argument("--picontrol_basin_file", type=str, default=None, nargs='*',
help="Cell basin file for piControl experiment")
parser.add_argument("--area_adjust", action="store_true", default=False,
help="Adjust plots for area [default=False]")
parser.add_argument("--reverse_sign", action="store_true", default=False,
help="Multiple the data by -1 (CCSM4 has wrong sign for wind stress) [default=False]")
parser.add_argument("--climatology_time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=('1986-01-01', '2005-12-31'), help="Time period for climatology [default = entire]")
parser.add_argument("--total_time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=None, help="Time period for entire analysis. Must go right to end of experiment for control overlap period to be calculated correctly. [default = entire]")
parser.add_argument("--legloc", type=int, default=8,
help="Legend location")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | visualisation/plot_zonal_mean.py | plot_zonal_mean.py | py | 19,621 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
16385745917 | from torch.utils.data import Dataset
import torch
from PIL import Image
from pathlib import Path
import numpy as np
from dataclasses import dataclass
import random
@dataclass
class imageset:
t1: Path
t2: Path
cm: Path
@dataclass
class patch:
imset: imageset
x: tuple
y: tuple
class CDDataset(Dataset):
""""""
imagesets = None
patchsize = None
nx = 0
ny = 0
patches = []
normalize = True
cache = {}
def loadrgb(self, image):
if image not in self.cache:
img = self._loadrgb(image).astype(np.float32)
if self.normalize:
img = (img - img.mean(axis=(-1, -2))[:, None, None]) / img.std(
axis=(-1, -2)
)[:, None, None]
self.cache[image] = img
return self.cache[image]
def loadcm(self, image):
if image not in self.cache:
self.cache[image] = self._loadcm(image).astype(np.int64)
return self.cache[image]
def __init__(self):
if self.imagesets is None or self.patchsize is None:
raise NotImplementedError
m, v = np.zeros(3), np.zeros(3)
self.patches = []
for imset in self.imagesets:
im1 = self.loadrgb(imset.t1)
im2 = self.loadrgb(imset.t2)
cm = self.loadcm(imset.cm)
assert im1.shape[1:] == im2.shape[1:] == cm.shape
assert im1.shape[0] == im2.shape[0] == 3
for ix in range(im1.shape[1] // self.patchsize):
for iy in range(im1.shape[2] // self.patchsize):
self.patches.append(
patch(
imset,
(self.patchsize * ix, self.patchsize * (ix + 1)),
(self.patchsize * iy, self.patchsize * (iy + 1)),
)
)
self.nx += ix / len(self.imagesets)
self.ny += iy / len(self.imagesets)
self._m = m
self._s = np.sqrt(v)
def __getitem__(self, idx):
patch = self.patches[idx]
im1 = self.loadrgb(patch.imset.t1)
im2 = self.loadrgb(patch.imset.t2)
cm = self.loadcm(patch.imset.cm)
im1 = im1[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
im2 = im2[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
# if self.normalize:
# im1=(im1-im1.mean(axis=(-1,-2))[:,None,None])/im1.std(axis=(-1,-2))[:,None,None]
# im2=(im2-im2.mean(axis=(-1,-2))[:,None,None])/im2.std(axis=(-1,-2))[:,None,None]
cm = cm[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
return (im1, im2, cm)
def __len__(self):
return len(self.patches)
class WV_S1(CDDataset):
def __init__(self, path: Path, patchsize: int):
self.imagesets = [imageset(*(path / f for f in ["t1.bmp", "t2.bmp", "gt.bmp"]))]
self.patchsize = patchsize
super(WV_S1, self).__init__()
def _loadrgb(self, image):
return np.array(Image.open(image)).transpose(2, 0, 1) / 255
def _loadcm(self, image):
return np.array(Image.open(image)) < 128
class OSCD(CDDataset):
def __init__(self, path: Path, patchsize: int):
self.imagesets = [
imageset(im1, im2, cm)
for im1, im2, cm in zip(
sorted((path / "images").rglob("imgs_1_rect")),
sorted((path / "images").rglob("imgs_2_rect")),
sorted((path / "labels").rglob("cm")),
)
]
self.patchsize = patchsize
super(OSCD, self).__init__()
def _loadrgb(self, image):
return np.stack(
[np.array(Image.open(image / b)) for b in ("B02.tif", "B03.tif", "B04.tif")]
)
def _loadcm(self, image):
return np.array(Image.open(next(image.glob("*-cm.tif")))) > 1
from typing import Tuple
from torch.utils.data import Subset
def split(
ds: Dataset, validation_ratio: float, test_ratio: float, runsize=16, seed=0
) -> Tuple[Dataset, Dataset, Dataset]:
"""
splits dataset by ratio (0..1) of validation and test in validation, test and train (remainder)
while ensuring somewhat equal distribution between different parts
of the Dataset by randomly choosing out of partitions of size runsize
"""
rng = np.random.RandomState(0)
val = list()
test = list()
train = list()
split = np.array_split(np.arange(len(ds)), len(ds) / runsize)
for s in split:
nv = int(
validation_ratio * (len(val) + len(test) + len(train) + len(s)) - len(val)
)
i = rng.choice(s, nv, replace=False)
s = np.setdiff1d(s, i)
val += i.tolist()
nt = int(test_ratio * (len(val) + len(test) + len(train) + len(s)) - len(test))
i = rng.choice(s, nt, replace=False)
s = np.setdiff1d(s, i)
test += i.tolist()
train += s.tolist()
return CDSubset(ds, train), CDSubset(ds, val), CDSubset(ds, test)
class CDSubset(Subset):
"""
Subset of a CDDataset at specified indices with optional augmentation.
"""
augment = False
def __getitem__(self, idx):
items = super().__getitem__(idx)
if self.augment:
if random.randint(0, 1):
items = [np.swapaxes(item, -1, -2) for item in items]
rot = random.randint(0, 3)
items = [np.copy(np.rot90(item, rot, (-1, -2))) for item in items]
return items
class CDCat(Dataset):
"""
Concats the two images along first dimension
"""
def __init__(self, baseObject):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.baseObject=baseObject
def __getitem__(self, idx):
im1, im2, cm = self.baseObject[idx]
return np.concatenate((im1,im2),0),cm
| fzimmermann89/ml4rs | cd/ds.py | ds.py | py | 6,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"l... |
75174790505 | from vtk import *
# input data, every row is for a different item
positions = [[0, 0, 0],[1.5, 0, 0]]
orientations = [[1.0, 0.0, 0.0],[0.0, 1.0, 1.0]]
colors = [[255, 0, 0],
[0, 255, 255]]
heights = [1,
2]
# rendering of those two defined cylinders
points = vtkPoints()
points.InsertNextPoint(*positions[0])
points.InsertNextPoint(*positions[1])
polydata = vtkPolyData()
polydata.SetPoints(points)
color_def = vtkUnsignedCharArray()
color_def.SetNumberOfComponents(3)
color_def.SetNumberOfTuples(polydata.GetNumberOfPoints())
color_def.InsertTuple3(0, *colors[0])
color_def.InsertTuple3(1, *colors[1])
polydata.GetPointData().SetScalars(color_def)
pointNormalsArray = vtkDoubleArray()
pointNormalsArray.SetNumberOfComponents(3)
pointNormalsArray.SetNumberOfTuples(polydata.GetNumberOfPoints())
pointNormalsArray.SetTuple(0, orientations[0])
pointNormalsArray.SetTuple(1, orientations[1])
polydata.GetPointData().SetNormals(pointNormalsArray)
cyl_source = vtkCylinderSource()
cyl_source.SetResolution(10)
cyl_source.SetHeight(0.8)
cyl_source.SetRadius(0.1)
cyl_source.Update()
glyph = vtkGlyph3D()
glyph.SetInputData(polydata)
glyph.SetSourceConnection(cyl_source.GetOutputPort())
glyph.SetColorModeToColorByScalar()
glyph.SetVectorModeToUseNormal()
glyph.ScalingOff()
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(glyph.GetOutputPort())
actor = vtkActor()
actor.SetMapper(mapper)
ren = vtkRenderer()
ren.AddActor(actor)
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renwin)
renwin.Render()
iren.Initialize()
iren.Start()
| squeakus/bitsandbytes | vtk/glyphpos.py | glyphpos.py | py | 1,632 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "vtk.vtkRenderWindow",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderWindowInteractor",
"line_number": 58,
"usage_type": "call"
}
] |
39056791379 | from obspy import read
from numpy import r_,ones,zeros
path=u'/Users/dmelgar/Slip_inv/Chiapas_hernandez_new/data/waveforms/before_delta_t/'
outpath='/Users/dmelgar/Slip_inv/Chiapas_hernandez_new/data/waveforms/'
def delay_st(st,delta):
d=st[0].data
npts=int(abs(delta)/st[0].stats.delta)
if delta<0:
d=r_[d[npts:-1],d[-1]*ones(npts+1)]
else:
d=r_[ones(npts)*d[0],d[0:-npts]]
return d
sta='43413'
delta=-3*60.
e=read(path+sta+'.sac')
e[0].data=delay_st(e,delta)
e.write(outpath+sta+'.sac',format='SAC')
sta='huat'
delta=-2*60.
e=read(path+sta+'.sac')
e[0].data=delay_st(e,delta)
e.write(outpath+sta+'.sac',format='SAC')
sta='ptan'
delta=-2*60.
e=read(path+sta+'.sac')
e[0].data=delay_st(e,delta)
e.write(outpath+sta+'.sac',format='SAC')
| Ogweno/mylife | chiapas2017/delay_waveforms_tsunami.py | delay_waveforms_tsunami.py | py | 781 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.r_",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 14,
... |
33154135207 | #!/usr/bin/python3
#encoding: UTF-8
import lxml.etree as ET
import markdown as MD
import lib as LIB
#-------------------------------------------------------------------------------
def xpath_list(from_node, xpath):
"""
Return all nodes matching xpath from from_node as dom node list.
"""
if isinstance(from_node, ET._Element):
ret = from_node.xpath(xpath)
else:
ret=[]
return ret
#-------------------------------------------------------------------------------
def xpath_num_sorted(from_node, xpath, xp_key):
"""
Return all nodes matching xpath from from_node as dom node list num sorted
on xp_key xpath (relative to each item).
"""
all = xpath_list(from_node, xpath)
all_sorted = sorted(all, key=lambda itm: xpath_int(itm, xp_key))
return all_sorted
#-------------------------------------------------------------------------------
def xpath_alpha_sorted(from_node, xpath, xp_key):
"""
Return all nodes matching xpath from from_node as dom node list alpha sorted
on xp_key xpath (relative to each item).
"""
all = xpath_list(from_node, xpath)
all_sorted = sorted(all, key=lambda itm: xpath_plain(itm, xp_key))
return all_sorted
#-------------------------------------------------------------------------------
def xpath_node(from_node, xpath):
"""
Return first node matching xpath from from_node as dom node.
"""
return LIB.first(xpath_list(from_node, xpath))
#-------------------------------------------------------------------------------
def xpath_plain(from_node, xpath):
"""
Return first node matching xpath from from_node as plain text.
"""
return LIB.first_str(xpath_list(from_node, xpath))
#-------------------------------------------------------------------------------
def xpath_int(from_node, xpath):
"""
Return first node matching xpath from from_node as integer.
"""
return int(LIB.first_str(xpath_list(from_node, xpath)))
#-------------------------------------------------------------------------------
def xpath_md(from_node, xpath):
"""
Return first node matching xpath from from_node as markdown translated to HTML.
/!\ just for simple paragraphs (no images, no extern refs...)
"""
return MD.markdown(LIB.first_str(xpath_list(from_node, xpath)))
#-------------------------------------------------------------------------------
def add_error(meta, fct, errlevel, errno, errmsg, line, text):
"""
Add new error to meta node of an XML document.
"""
if (meta is not None):
err_node = ET.SubElement(meta, "error", {"fct":fct, "errlevel":errlevel, "errno":errno, "errmsg":errmsg, "line":str(line)})
err_node.text = str(text)
ret = err_node
else:
ret = None
return ret
#-------------------------------------------------------------------------------
def add_external_ref(meta, ext_ref, from_ref):
"""
Add new external_ref to meta node of an XML document.
"""
if (meta is not None):
ext_node = ET.SubElement(meta, "external", {"to_ref":ext_ref, "from_ref":from_ref})
ret = ext_node
else:
ret = None
return ret
#-------------------------------------------------------------------------------
def add_attachment(meta, to_path, ext, file):
"""
Add new attachment to meta node of an XML document.
"""
ret=None
if (meta is not None):
if len(xpath_list(meta,"attachment[@to_path='"+to_path+"']"))==0:
att_node = ET.SubElement(meta, "attachment", {"to_path":to_path, "ext":ext, "file":file})
ret = att_node
return ret
#-------------------------------------------------------------------------------
def extend_to_connected(all_docs, ref_set):
"""
Collect all connected references at any level.
"""
new_ref_set = set() | ref_set
for ref in ref_set:
to_refs = xpath_list(all_docs, ".//external[@from_ref='"+ref+"']/@to_ref")
new_ref_set |= set(to_refs)
#from_refs = xpath_list(all_docs, ".//_meta/external[@to_ref='"+ref+"']/@from_ref")
#new_ref_set |= set(from_refs)
if (len(new_ref_set) != len(ref_set)):
new_ref_set = extend_to_connected(all_docs, new_ref_set)
return new_ref_set
#-------------------------------------------------------------------------------
def add_dict_as_xml(parent, a_dict):
"""
Add to parent all nodes corresponding to tree structure contained in dict.
"""
ret=parent
if (parent is not None):
for name0, value in a_dict.items():
name = name0.lower()
if isinstance(value, str) : #simple string
new = ET.SubElement(parent, name).text=value
elif isinstance(value, list) : #array
for v in value:
new = ET.SubElement(parent, name).text=v
elif isinstance(value, dict) : #dictionnary
new = ET.SubElement(parent, name)
add_dict_as_xml(new, value)
else :
new = ET.SubElement(parent, name)
return ret
| echopen/PRJ-medtec_kit | doc/doc_builder/src/xml_helper.py | xml_helper.py | py | 4,938 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "lxml.etree._Element",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "lxml.etree",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "lib.first",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "lib.first_str",
"li... |
12171333516 | # 2016년 요일 찾기
# 2016년 1월 1일은 금요일
# SUN,MON,TUE,WED,THU,FRI,SAT
from datetime import datetime
def solution(a, b):
date = '2016-{0}-{1}'.format(a, b) # 날짜
datetime_date = datetime.strptime(date, '%Y-%m-%d') # 날짜의 타입을 datetime형으로 변경
dateDict = {0: 'MON', 1:'TUE', 2:'WED', 3:'THU', 4:'FRI', 5:'SAT', 6:'SUN'}
return dateDict[datetime_date.weekday()]
# 1년 중 첫 시작 요일을 알기 때문에
# 이전 달까지 모두 더하고, 일 수를 더한 후 7로 나눠주면 요일을 알 수 있다.
# 조건이 1년 내이고, 첫번째 날의 요일을 알기 때문에 이런 연산 방법이 더 적합할 것 같다.
def solution1(a, b):
months = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
days = ['FRI', 'SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU']
return days[(sum(months[:a-1])+b-1)%7]
print(solution(5,24))
| hi-rev/TIL | Programmers/level_1/date.py | date.py | py | 899 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
}
] |
69905738984 | from django.shortcuts import render, redirect
from django.contrib import messages
from .forms import UserRegisterForm
def register(request):
if request.method =='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'User {username} has been successfully created')
return redirect('all-goods')
else:
form = UserRegisterForm()
return render(
request,
'users/registration.html',
{'title': 'Registration page',
'form': form,
}) | AlekanderOst/python-webstore-drakkar | users/views.py | views.py | py | 648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.UserRegisterForm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 13,
"usage_type": "name"
},
{
"api... |
70077372585 | import pandas as pd
import lxml.html
import requests
import shelve
import os, sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
if not os.path.exists('database'):
os.mkdir('database')
elif not os.path.isdir('database'):
os.remove('database')
os.mkdir('database')
xlsx = 'database/Free+English+textbooks.xlsx'
xfile = pd.ExcelFile(xlsx)
df = xfile.parse()
books = shelve.open('database/serial')
class Book:
PARENT = 'https://link.springer.com'
def __init__(self, idx, title, edition, subject, url):
self.title = title.replace('/', '_')
self.idx = idx
self.name = f'{self.title}, {edition}'
self.subject = self._process(subject)
self._image_url = None
self._url = url
self.pdf = None
self.epub = None
def __repr__(self):
return f'{self.idx}: {self.name}'
def __eq__(self, other):
return self.idx == other.idx
def _process(self, subject):
subject = subject.split(';')[0]
book = self
try:
books[subject].append(book)
except (KeyError, AttributeError):
books[subject] = []
books[subject].append(book)
self._make_template(book, subject)
return subject
def _make_template(self, book, subject):
path = os.path.join('templates', subject)
if os.path.exists(path):
return
else:
html = '''{% extends "base.html" %}
{% block content %}
<hr>
<a href="{{ url_for('index') }}"><< BACK TO INDEX</a>
<hr>
<h1><center>{{ subject }} Books</center></h1>
{% for book in books[subject] %}
<hr>
<h3>{{ book.name }}</h3>
<img src="static/images/{{ book.image}}" />
<p><u>DOWNLOAD</u>:
{% if book.pdf %}
<a href="{{ book.pdf }}">PDF</a>
{% endif %}
{% if book.epub %}
<a href="{{ book.epub }}">EPUB</a>
{% endif %}
{% if not book.pdf and not book.epub %}
<em>unavailable.</em>
{% endif %}
</p>
{% endfor %}
{% endblock %}'''
with open(path + '.html', 'w') as fhand:
fhand.write(html)
def _scrape(self):
name = self.name.replace(' ', '_') + '.html'
path = os.path.join('static', 'cache', name)
if os.path.exists(path):
with open(path, 'rb') as fhand:
html = fhand.read()
html = lxml.html.fromstring(html)
else:
response = requests.get(self._url)
content = response.content
with open(path, 'wb') as fhand:
fhand.write(content)
html = lxml.html.fromstring(content)
try:
xpath, epub = self.__get_xpaths(html)
except IndexError:
print(f'Error: {self.idx} {self.name}'
' server access point missing')
return False
else:
self.__make_links(xpath, epub)
finally:
self.image = self.name.replace(' ', '_').replace('\\', '_') + '.jpeg'
path = os.path.join('static', 'images', self.image)
if not os.path.exists(self.image):
self.__set_image_url(html)
try:
image = requests.get(self._image_url).content
except:
image = ""
finally:
with open(path, 'wb') as fhand:
fhand.write(image)
def __get_xpaths(self, html):
epub = None
xpath = html.xpath('//*[@id="main-content"]/article[1]/'
'div/div/div[2]/div/div/a')
if not bool(xpath):
xpath = html.xpath(
'//*[@id="main-content"]/article[1]/div/div/div[2]/div[1]/a'
)
epub = html.xpath(
'//*[@id="main-content"]/article[1]/div/div/div[2]/div[2]/a'
)
epub = epub[0]
xpath = xpath[0]
return xpath, epub
def __make_links(self, xpath, epub):
stub = xpath.get('href')
self.pdf = __class__.PARENT + stub
if epub is not None:
stub = epub.get('href')
self.epub = __class__.PARENT + stub
def __set_image_url(self, html):
if self.pdf or self.epub:
img_xpath = html.xpath(
'//*[@id="main-content"]/article[1]/div/aside[1]/'
'div/div/div/img'
)
img_xpath = None if not len(img_xpath) else img_xpath[0]
self._image_url = img_xpath.get('src')
else:
self._image_url = ""
def load_data():
for idx, row in df.iterrows():
book = Book(idx,
df['Book Title'].iloc[idx],
df['Edition'].iloc[idx],
df['Subject Classification'].iloc[idx],
df['OpenURL'].iloc[idx])
try:
assert book in books[book.subject]
logger.info(f' SKIPPING : {book.name}')
continue
except (KeyError, AssertionError) as init:
subject = books[book.subject]
book._scrape()
subject.append(book)
books[book.subject] = subject
logger.info(f' SERIALIZED : {book.name}')
else: books.close()
| chris-hamberg/springer_books_web | scraper.py | scraper.py | py | 5,483 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists"... |
70809120103 | #!/usr/bin/python3
"""
Started a Flask web application with these scripts
the web apps was listed on 0.0.0.0, port 5000
declare @app.teardown_appcontext and storage.close()
with routes /cities_by_states: display a HTML page:
in my route def option strict_slashes=False was used
"""
from flask import Flask, render_template
from models import storage
from models.state import State
from operator import getitem
app = Flask(__name__)
@app.route('/states_list', strict_slashes=False)
def list_states():
"""List all the states to the client"""
states = storage.all(State).values()
return render_template('7-states_list.html', states=states)
@app.route('/cities_by_states', strict_slashes=False)
def list_states_cities():
"""List all the states and its cities to the client"""
states = storage.all(State).values()
return render_template('8-cities_by_states.html', states=states)
@app.teardown_appcontext
def close_db(db):
storage.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| Realyoung1/AirBnB_clone_v2 | web_flask/8-cities_by_states.py | 8-cities_by_states.py | py | 1,054 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.storage.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.state.State",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "models.storage"... |
70677233704 | """
Filename: calc_volcello.py
Author: Damien Irving, irving.damien@gmail.com
Description: Calculate the CMIP5 volcello variable
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import spatial_weights
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
def construct_volume_cube(volume_data, data_cube, global_atts):
"""Construct the new volume cube """
volume_cube = data_cube.copy()
volume_cube.data = volume_data
volume_cube.standard_name = 'ocean_volume'
volume_cube.long_name = 'Ocean Grid-Cell Volume'
volume_cube.var_name = 'volcello'
volume_cube.units = 'm3'
volume_cube.cell_methods = ()
if global_atts:
volume_cube.attributes = global_atts
return volume_cube
def main(inargs):
"""Run the program."""
# Depth data
data_cube = iris.load_cube(inargs.dummy_file, inargs.dummy_var)
dim_coord_names = [coord.name() for coord in data_cube.dim_coords]
aux_coord_names = [coord.name() for coord in data_cube.aux_coords]
assert dim_coord_names[0] == 'time'
depth_name = dim_coord_names[1]
data_cube = data_cube[0, ::]
data_cube.remove_coord('time')
depth_data = spatial_weights.get_depth_array(data_cube, depth_name)
# Area data
if inargs.area_file:
area_cube = iris.load_cube(inargs.area_file, 'cell_area')
gio.check_global_ocean_area(area_cube.data.sum())
area_data = uconv.broadcast_array(area_cube.data, [1, 2], depth_data.shape)
else:
area_data = spatial_weights.area_array(data_cube)
volume_data = depth_data * area_data
if inargs.sftof_file:
sftof_cube = iris.load_cube(inargs.sftof_file)
assert sftof_cube.data.max() == 100
sftof_data = uconv.broadcast_array(sftof_cube.data, [1, 2], depth_data.shape)
volume_data = volume_data * (sftof_data / 100.0)
volume_data = numpy.ma.asarray(volume_data)
data = numpy.ma.masked_invalid(data_cube.data)
volume_data.mask = data.mask
global_atts = area_cube.attributes if inargs.area_file else None
volume_cube = construct_volume_cube(volume_data, data_cube, global_atts)
volume_cube.attributes['history'] = gio.write_metadata()
gio.check_global_ocean_volume(volume_cube.data.sum())
iris.save(volume_cube, inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Calculate the CMIP volcello variable'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dummy_file", type=str, help="Dummy file (for depth information)")
parser.add_argument("dummy_var", type=str, help="Dummy variable")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--sftof_file", type=str, default=None,
help="Sea area fraction file name")
parser.add_argument("--area_file", type=str, default=None,
help="Area file name (required for curvilinear grids, optional otherwise)")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | data_processing/calc_volcello.py | calc_volcello.py | py | 3,841 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
25951961583 | import sys
import base64, time, datetime
callbacks = {
'array': lambda x: [v.text for v in x],
'dict': lambda x:
dict((x[i].text, x[i+1].text) for i in range(0, len(x), 2)),
'key': lambda x: x.text or "",
'string': lambda x: x.text or "",
'data': lambda x: base64.b64decode(x.text),
'date': lambda x:
datetime.datetime(
*(time.strptime(x.text, "%Y-%m-%dT%H:%M:%SZ")[0:6])),
'real': lambda x: float(x.text),
'integer': lambda x: int(x.text),
'true': lambda x: True,
'false': lambda x: False,
}
def _xml_plist_parse(xml_input, _iterparse):
parser = _iterparse(xml_input)
for action, element in parser:
callback = callbacks.get(element.tag)
if callback:
data = callback(element)
element.clear()
element.text = data
elif element.tag != 'plist':
raise IOError("unknown plist tag: %s" % element.tag)
return parser.root[0].text
def parse_using_etree(xml_input):
from xml.etree.ElementTree import iterparse as py_iterparse
_xml_plist_parse(xml_input, py_iterparse)
def parse_using_cetree(xml_input):
import xml.etree.cElementTree
from xml.etree.cElementTree import iterparse as c_iterparse
_xml_plist_parse(xml_input, c_iterparse)
if __name__ == '__main__':
xmlin = open(sys.argv[1])
try:
assert parse_using_cetree(xmlin)
finally:
xmlin.close()
| ishikawa/python-plist-parser | tools/performance/etree_parser.py | etree_parser.py | py | 1,444 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "base64.b64decode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTre... |
32519507649 | from fastapi import FastAPI, Request, HTTPException, status, Depends ,File, UploadFile
from fastapi.templating import Jinja2Templates
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi.staticfiles import StaticFiles
from starlette.responses import HTMLResponse
from tortoise.contrib.fastapi import register_tortoise
from tortoise.signals import post_save
from tortoise import BaseDBAsyncClient
from typing import Optional, Type, List
from PIL import Image
import secrets
from datetime import datetime
from emails import *
from db_models import User, Business, Product
from pydentic_models import (
user_pydenticIn,
user_pydentic,
business_pydentic,
business_pydenticIn,
product_pydenticIn,
product_pydentic
)
from authentication import get_hashed_password, verify_token, token_generator
app = FastAPI()
templates = Jinja2Templates(directory='templates')
oauth2_scheme = OAuth2PasswordBearer(tokenUrl='token')
app.mount('/static', StaticFiles(directory='static'), name='static')
async def get_current_user(token: str=Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, config_credentials['SECRET'], algorithms=['HS256'])
user = await User.get(id=payload.get('id'))
except:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Invalid token',
headers={'WWW-Authenticate': 'Bearer'}
)
return await user
@app.post('/products')
async def get_products(product: product_pydenticIn, user: user_pydentic=Depends(get_current_user)):
product = product.dict(exclude_unset=True)
if product['original_price'] > 0:
product['percentage_discount'] = ((product['original_price'] - product['new_price']) / product['original_price']) * 100
product_obj = await Product.create(**product, business=user)
product_obj = await product_pydenticIn.from_tortoise_orm(product_obj)
return {
'status': 'OK',
'data': product_obj
}
else:
return {
'status': 'ERROR'
}
@app.get('/products')
async def get_product():
response = await product_pydentic.from_queryset(Product.all())
return {
'status': 'OK',
'data': response
}
@app.get('/products/{id}')
async def get_product(id: int):
product = await Product.get(id=id)
business = await product.business
owner = await business.owner
response = await product_pydentic.from_queryset_single(Product.get(id=id))
return {
'status': 'OK',
'data': {
'product_details': product,
'business_details': {
'name': business.name,
'city': business.city,
'region': business.region,
'description': business.description,
'logo': business.logo,
'owner_id': owner.id,
'email': owner.email,
'join_date': owner.join_date.strtime('%b %d %Y'),
},
}
}
@app.delete('/products/{id}')
async def delete_product(id: int, user: user_pydentic=Depends(get_current_user)):
product = await Product.get(id=id)
business = await product.business
owner = await business.owner
if user == owner:
product.delete()
return {
'status': 'OK'
}
return HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Invalid token',
headers={'WWW-Authenticate': 'Bearer'}
)
@app.post('/user/me')
async def user_login(user: user_pydenticIn=Depends(get_current_user)):
business = await Business.get(owner=user)
logo = business.logo
logo_path = 'localhost:8000/static/images/' + logo
return {
'status': 'OK',
'data': {
'username': user.username,
'email': user.email,
'verified': user.is_verified,
'join_date': user.join_date.strtime('%b %d %Y'),
'logo': logo_path
}
}
@app.post('/token')
async def generate_token(request_form: OAuth2PasswordRequestForm = Depends()):
token = await token_generator(request_form.username, request_form.password)
return {'access_token': token,
'token_type': 'bearer'}
@app.get('verification/', response_class=HTMLResponse)
async def email_verification(request: Request, token: str):
user = await verify_token(token)
if user and not user.is_verified:
user.is_verified = True
await user.save()
return templates.TemplateResponse(
'verification.html',
{'request': request,
'username': user.username},
)
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Invalid token',
headers={'WWW-Authenticate': 'Bearer'}
)
@post_save(User)
async def create_business(sender: 'Type[User]', instance: User, created:bool,
using_db: 'Optional[BaseDBAsyncClient]', updated_fields: List[str]) -> None :
if created:
business_obj = await Business.create(
name=instance.username,
owner = instance,
)
await business_pydentic.from_tortoise_orm(business_obj)
await send_email([instance.email], instance)
@app.post('/registration')
async def user_registration(user: user_pydenticIn):
user_info = user.dict(exclude_unset=True)
user_info['password'] = get_hashed_password(user_info['password'])
user_obj = await User.create(**user_info)
new_user = await user_pydentic.from_tortoise_orm(user_obj)
return {
'status': 'OK',
'data': f'Hello, {new_user.username}, thanks for your registration, check your email'
}
@app.get('/')
def index():
return {'Message': 'Hello World!'}
@app.post('/uploadfile/profile')
async def create_upload_file(file: UploadFile=File(...), user: user_pydentic=Depends(get_current_user)):
FILEPATH = './static/images/'
filename = file.filename
extension = filename.split('.')[1]
if extension not in ['jpg', 'png']:
return {
'status': 'ERROR',
'detail': 'File extension not allowed'
}
token_name = f'{secrets.token_hex(10)}.{extension}'
generated_name = FILEPATH + token_name
file_content = await file.read()
with open(generated_name, 'wb') as file:
file.write(file_content)
img = Image.open(generated_name)
img = img.resize(size=(200, 200))
img.save(generated_name)
file.close()
business = await Business.get(owner=user)
owner = await business.owner
if owner == user:
business.logo = token_name
await business.save()
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Not Authenticated to perform this action',
headers={'WWW-Authenticate': 'Bearer'}
)
file_url = 'localhost:8000' + generated_name[1:]
return {
'status': 'OK',
'filename': file_url
}
@app.post('/uploadfile/product/{id}')
async def create_upload_file(id: int, file: UploadFile=File(...),
user: user_pydentic=Depends(get_current_user)):
FILEPATH = './static/images/'
filename = file.filename
extension = filename.split('.')[1]
if extension not in ['jpg', 'png']:
return {
'status': 'ERROR',
'detail': 'File extension not allowed'
}
token_name = f'{secrets.token_hex(10)}.{extension}'
generated_name = FILEPATH + token_name
file_content = await file.read()
with open(generated_name, 'wb') as file:
file.write(file_content)
img = Image.open(generated_name)
img = img.resize(size=(200, 200))
img.save(generated_name)
file.close()
product = await Product.get(id=id)
business = await Product.business
owner = await business.owner
if owner == user:
product.product_image = token_name
await product.save()
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Not Authenticated to perform this action',
headers={'WWW-Authenticate': 'Bearer'}
)
file_url = 'localhost:8000' + generated_name[1:]
return {
'status': 'OK',
'filename': file_url
}
@app.put('/product/{id}')
async def update_product(id: int, update_info: product_pydenticIn, user: user_pydentic=Depends(get_current_user)):
product = await Product.get(id=id)
business = await product.business
owner = await business.owner
update_info = update_info.dict(exclude_unset=True)
update_info['date_published'] = datetime.utcnow()
if owner == user and update_info['original_price'] >= 0:
update_info['percentage_discount'] = ((update_info['original_price'] - update_info['new_price']) / update_info['original_price']) * 100
product = await product.update_from_dict(update_info)
await product.save()
response = await product_pydentic.from_tortoise_orm(product)
return {
'status': 'OK',
'data': response
}
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Not Authenticated to perform this action',
headers={'WWW-Authenticate': 'Bearer'}
)
@app.put('/business/{id}')
async def update_business(id: int, update_business: business_pydenticIn, user: user_pydentic=Depends(get_current_user)):
update_business = update_business.dict()
business = await Business.get(id=id)
owner = await business.owner
if user == owner:
await business.update_from_dict(update_business)
await business.save()
response = await business_pydentic.from_tortoise_orm(business)
return {
'status': 'OK',
'data': response
}
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Not Authenticated to perform this action',
headers={'WWW-Authenticate': 'Bearer'}
)
register_tortoise(
app,
db_url='sqlite://database.sqlite3',
modules={'models': ['db_models']},
generate_schemas=True,
add_exception_handlers=True,
) | AlexBabilya/E-Commerce | main.py | main.py | py | 10,643 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fastapi.templating.Jinja2Templates",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "fastapi.security.OAuth2PasswordBearer",
"line_number": 35,
"usage_type": "call"
},
... |
43189726204 | import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from .. import default_config
import numpy
class CustomViewBox(pg.ViewBox):
def __init__(self, *args, **kwds):
pg.ViewBox.__init__(self, *args, **kwds)
self.StromDisplay=None
self.ChannelNum=0
self.ScaleBar = []
self.ScaleSize = 0
self.ScaleText = ''
self.Window = []
self.FreehandRoi = []
self.DrawnRoi = []
self.StormRegistrationChannel = -1
self.ConfRegistrationChannel = -1
self.DrawnRoi = []
self.ConfocalOffset = [0, 0]
self.StormMarkerRois = []
self.ConfMarkerRois = []
self.PanMode = default_config.viewer_input_mode
self.ClickMode ='Norm'
self.AffineTransform = []
## reimplement right-click to zoom out
def mouseClickEvent(self, ev):
if self.ClickMode == 'Reg':
Current = self.mapToView(ev.pos())
Marker= pg.ROI([0, 0])
if len(self.StormMarkerRois)<3:
self.StormMarkerRois.append(Marker)
Marker.addFreeHandle([Current.x(),Current.y()])
Handle=Marker.getHandles()[0]
Handle.sides=4
Handle.startAng=0
Handle.buildPath()
Handle.generateShape()
self.StormDisplay.plot_widget.addItem(Marker)
else:
if len(self.ConfMarkerRois)<3:
self.ConfMarkerRois.append(Marker)
Marker.addFreeHandle([Current.x(),Current.y()])
self.StormDisplay.plot_widget.addItem(Marker)
else:
self.ClickMode='Norm'
else:
pg.ViewBox.mouseClickEvent(self, ev)
def SetRegistrationChannelStorm(self,StormChannelNum):
self.StormRegistrationChannel=StormChannelNum
def SetRegistrationChannelConf(self,ConfChannelNum):
self.ConfRegistrationChannel=ConfChannelNum
def mouseDragEvent(self, ev):
if self.PanMode == 'Pan':
pg.ViewBox.mouseDragEvent(self, ev)
elif self.PanMode == 'Conf':
cursorOffset = ev.screenPos() - ev.lastScreenPos()
# scale to pixel coordinates
XTrans = cursorOffset[0] * self.viewPixelSize()[0] / 8
YTrans = cursorOffset[1] * self.viewPixelSize()[1] / 8
self.ConfocalOffset = [self.ConfocalOffset[0] + YTrans, self.ConfocalOffset[1] + XTrans]
for CN in range(4):
if self.StormDisplay.DisplayedConfocalChannel[CN] != 0:
self.StormDisplay.DisplayedConfocalChannel[CN].translate(YTrans, XTrans)
#move the registration markers if there are any:
Scale=1000.0/self.StormDisplay.ConfocalSizeMultiplier
for RoiInd in range(len(self.main_window.viewer.display.Viewbox.ConfMarkerRois)):
Marker= pg.ROI([0, 0])
OldPoints=self.ConfMarkerRois[RoiInd].getLocalHandlePositions()[0][1]
self.StormDisplay.plot_widget.removeItem(self.ConfMarkerRois[RoiInd])
self.ConfMarkerRois[RoiInd]=Marker
Marker.addFreeHandle([OldPoints.x()+XTrans*Scale * self.StormDisplay.ConfocalMetaData['SizeX'],OldPoints.y()+YTrans*Scale * self.StormDisplay.ConfocalMetaData['SizeY']])
self.StormDisplay.plot_widget.addItem(Marker)
#calcualte correlation between confocal and storm channel
#if event is finished display registration correlation
if ev.isFinish():
#if the displayed channels exist:
if self.ConfRegistrationChannel!=-1 and self.StormRegistrationChannel!=-1:
#if the channels are displayed:
if self.StormDisplay.DisplayedConfocalChannel[self.ConfRegistrationChannel]!=0 and self.StormDisplay.DisplayedStormChannel[self.StormRegistrationChannel]!=0:
#maybe rescale the images if really slow;Or precalculate an image and just index from it
Im1=self.StormDisplay.DisplayedConfocalChannel[self.ConfRegistrationChannel]
Im2=self.StormDisplay.DisplayedStormChannel[self.StormRegistrationChannel]
Scale=1000.0/self.StormDisplay.ConfocalSizeMultiplier
Correlation=0
for ind in range(len(Im2.getData()[0])):
IndX=(int(Im2.getData()[0][ind])/(Scale * self.StormDisplay.ConfocalMetaData['SizeX']))-self.ConfocalOffset[1]
IndY=(int(Im2.getData()[1][ind])/(Scale * self.StormDisplay.ConfocalMetaData['SizeY']))-self.ConfocalOffset[0]
if IndX>-1 and IndX<Im1.image.shape[1] and IndY>-1 and IndY<Im1.image.shape[0]:
Correlation+=Im1.image[IndY,IndX]
Msg=self.main_window.status_bar.currentMessage()
Msg=str.split(str(Msg),' Correlation:')[0]
#find a possible norm of correlation
#mean might be a more representative value for normalization:numpy.mean(Im1.image)
MaxCorr=len(Im2.getData()[0])*Im1.image.max()
self.main_window.status_bar.showMessage(Msg+' Correlation: '+ str(float(Correlation)/float(MaxCorr)) )
else:
Msg=self.main_window.status_bar.currentMessage()
Msg=str.split(str(Msg),' Correlation:')[0]
self.main_window.status_bar.showMessage(Msg+' Correlation: The selected channels are not displayed' )
#print signal.correlate2d(Im1,Im2)
if self.main_window.viewer.display.ConfocalSizeMultiplier==1:
Scale=1000*self.main_window.viewer.display.ConfocalSizeMultiplier
else:
Scale=10*self.main_window.viewer.display.ConfocalSizeMultiplier
self.main_window.doubleSpinBox_confocal_display_offset_x.setValue(
int(self.ConfocalOffset[1] * Scale * self.main_window.viewer.display.ConfocalMetaData['SizeX']))
self.main_window.doubleSpinBox_confocal_display_offset_y.setValue(
int(self.ConfocalOffset[0] * Scale * self.main_window.viewer.display.ConfocalMetaData['SizeX']))
ev.accept()
pos = ev.pos()
modifiers = QtGui.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier and ev.button() == QtCore.Qt.LeftButton:
if ev.isFinish():
# self.traj_widget.update_selection_infos()
self.rbScaleBox.hide()
else:
rect_box = QtCore.QRectF(pg.Point(ev.buttonDownPos(ev.button())), pg.Point(pos))
rect_box = self.childGroup.mapRectFromParent(rect_box)
self.update_selection(rect_box)
self.traj_widget.update_selection_infos()
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
elif self.PanMode == 'Roi':
Current = self.mapToView(ev.pos())
Prev = self.mapToView(ev.lastPos())
r1 = pg.QtGui.QGraphicsLineItem(Prev.x(), Prev.y(), Current.x(), Current.y())
r1.setPen(pg.mkPen('w'))
self.DrawnRoi.append(r1)
self.addItem(r1)
self.FreehandRoi.append(Current)
# closing curve on finish
if ev.isFinish():
Current = self.mapToView(ev.buttonDownPos())
Prev = self.mapToView(ev.pos())
r1 = pg.QtGui.QGraphicsLineItem(Prev.x(), Prev.y(), Current.x(), Current.y())
r1.setPen(pg.mkPen('w'))
self.DrawnRoi.append(r1)
self.addItem(r1)
self.FreehandRoi.append(Current)
ev.accept()
pos = ev.pos()
modifiers = QtGui.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier and ev.button() == QtCore.Qt.LeftButton:
if ev.isFinish():
# self.traj_widget.update_selection_infos()
self.rbScaleBox.hide()
else:
rect_box = QtCore.QRectF(pg.Point(ev.buttonDownPos(ev.button())), pg.Point(pos))
rect_box = self.childGroup.mapRectFromParent(rect_box)
self.update_selection(rect_box)
self.traj_widget.update_selection_infos()
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
def deleteFreehandROI(self, roi):
for r in self.DrawnRoi:
self.removeItem(r)
self.FreehandRoi = []
self.DrawnRoi = []
roi = None
def deleteActiveContourROI(self, DrawnElements):
for r in DrawnElements:
self.removeItem(r)
def deleteActiveContourROI3d(self, DrawnElements):
for r in DrawnElements:
self.removeItem(r)
def deleteEllipseROI(self, roi):
self.removeItem(roi)
def updateMatrix(self, changed=None):
# keep scale bar at same position
if self.ScaleBar != []:
ViewRange = self.viewRange()
XLength = (ViewRange[0][1] - ViewRange[0][0]) * 0.05
YLength = (ViewRange[1][1] - ViewRange[1][0]) * 0.05
Xpos = ViewRange[0][0] + XLength
Ypos = ViewRange[1][0] + YLength
self.ScaleBar.clear()
self.Window.removeItem(self.ScaleText)
self.ScaleBar = self.Window.plot(x=[Xpos, Xpos + self.ScaleSize], y=[Ypos, Ypos], symbol='o')
PosX = Xpos
PosY = Ypos + YLength * 0.1
self.ScaleText = pg.TextItem(text=str(self.ScaleSize) + ' nm', color=(200, 200, 200))
self.Window.addItem(self.ScaleText)
self.ScaleText.setPos(PosX, PosY)
pg.ViewBox.updateMatrix(self, changed=None)
def setScaleBar(self, ScaleBar, Window, Size, Text):
self.ScaleBar = ScaleBar
self.Window = Window
self.ScaleSize = Size
self.ScaleText = Text
def deleteScaleBar(self):
if self.ScaleBar != []:
self.ScaleBar.clear()
self.Window.removeItem(self.ScaleText)
self.ScaleBar = []
self.ScaleSize = 0
self.ScaleText = ''
def setWindow(self, Window):
self.Window = Window
def deleteConfocalImage(self):
self.StromDisplay = None
def setConfocalImage(self, StormDisplay, ChannelNum):
self.StormDisplay = StormDisplay
self.ChannelNum = ChannelNum
| KatonaLab/vividstorm | controllers/viewer/CustomViewBox.py | CustomViewBox.py | py | 10,817 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyqtgraph.ViewBox",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyqtgraph.ViewBox.__init__",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.ViewBox",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": ... |
33008270053 | # load the example image and convert it to grayscale
import os
import cv2
import pytesseract
image = "example_01.jpg"
preprocess = "thresh"
image = cv2.imread(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
if preprocess == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'
text = pytesseract.image_to_string(gray)
os.remove(filename)
print(text)
# show the output images
cv2.imshow("Image", image)
cv2.imshow("Output", gray)
cv2.waitKey(0)
| Marius-Juston/SonnetGeneratorCombination | ocr.py | ocr.py | py | 1,062 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"... |
29412113956 | import cv2
import numpy as np
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v','--video', type=str)
parser.add_argument('-o','--output', type=str, default=None)
args = parser.parse_args()
vid = cv2.VideoCapture(args.video)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc('a','v','c','1')
out = cv2.VideoWriter(args.output, codec, fps, (width, height))
while vid.isOpened():
success, image = vid.read()
if not success:
print("ignoring empty video")
break
cv2.imshow("before", image)
max_x, max_y, z = image.shape
# print('image dimensions: x:', max_x, "by y:", max_y)
# points are (y, x), thickness -1 for solid
start_point = (154, 170)
end_point = (500, 1200)
# setting for 720x1280 (portrait) handstand video at ../add_noise/sleeves.mp4
# draw red rectangle around area
# line_color= (0, 0, 255)
# line_thickness = 3
# cv2.rectangle(image, start_point, end_point, line_color, line_thickness)
# exclude area outside rectangle
start_y, start_x = start_point
end_y, end_x = end_point
mask = np.zeros(image.shape[:2],np.uint8)
mask[start_x:end_x,start_y:end_y] = 255
image = cv2.bitwise_and(image,image,mask = mask)
out.write(image)
cv2.imshow("after", image)
if cv2.waitKey(1) & 0xFF == 27:
break
vid.release()
if __name__ == "__main__":
main()
# https://stackoverflow.com/questions/11492214/opencv-via-python-is-there-a-fast-way-to-zero-pixels-outside-a-set-of-rectangle
# img = cv2.imread('testimg.jpeg')
# start_x = 30
# start_y = 30
# end_x = 200
# end_y = 100
# mask = np.zeros(img.shape[:2],np.uint8)
# mask[start_y:start_y+end_y,start_x:start_x+end_x] = 255
# result = cv2.bitwise_and(img,img,mask = mask)
# cv2.imshow("result", result) | flexinai/flexin-ipod-ad | exclusion.py | exclusion.py | py | 2,097 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": ... |
8574459364 | from django.shortcuts import render, redirect, get_object_or_404
from users.models import Profile
from .models import *
from addproject.models import *
from datetime import datetime
from django.shortcuts import render, redirect
from addproject.models import *
import json
import datetime
from django.http import JsonResponse
from addproject.models import *
from users import *
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from addproject import models
from django.core.paginator import Paginator
def showmain(request):
calendar = Calendar.objects.filter(writer=request.user, endday__contains=datetime.date.today(
)).order_by('endday') # 글을 작성한 유저의 캘린더 정보만 가져오겠다. 가까운 날짜 순으로 정렬
projects = Project.objects.filter(followers=request.user)
profile = Profile.objects.all()
posts = Post.objects.all().order_by('-day')
return render(request, 'mateapp/mainpage.html', {'calendar': calendar, 'projects':projects,'posts':posts, })
def showevent(request):
if request.method == 'POST':
date_num = json.loads(request.body)
year = datetime.date.today().year
month = datetime.date.today().month
calendar = Calendar.objects.filter(writer=request.user, endday__contains=datetime.date(
year, month, int(date_num))).order_by('endday')
if calendar.count() == 1:
c0_title = calendar[0].title
c0_startday = calendar[0].startday
c0_endday = calendar[0].endday
c0_starttime = calendar[0].starttime
c0_endtime = calendar[0].endtime
c0_place = calendar[0].place
c0_body = calendar[0].body
c0_color = calendar[0].color
c1_title = None
c1_startday = None
c1_endday = None
c1_starttime = None
c1_endtime = None
c1_place = None
c1_body = None
c1_color = None
context = {
"status": "exist1",
"title1": c0_title,
"startday1": c0_startday,
"endday1": c0_endday,
"starttime1": c0_starttime,
"endtime1": c0_endtime,
"place1": c0_place,
"body1" : c0_body,
"color1" : c0_color,
"title2": c1_title,
"startday2": c1_startday,
"endday2": c1_endday,
"starttime2": c1_starttime,
"endtime2": c1_endtime,
"place2": c1_place,
"body2" : c1_body,
"color2" : c1_color,
}
elif calendar.count() >= 2:
c0_title = calendar[0].title
c0_startday = calendar[0].startday
c0_endday = calendar[0].endday
c0_starttime = calendar[0].starttime
c0_endtime = calendar[0].endtime
c0_place = calendar[0].place
c0_body = calendar[0].body
c0_color = calendar[0].color
c1_title = calendar[1].title
c1_startday = calendar[1].startday
c1_endday = calendar[1].endday
c1_starttime = calendar[1].starttime
c1_endtime = calendar[1].endtime
c1_place = calendar[1].place
c1_body = calendar[1].body
c1_color = calendar[1].color
context = {
"status": "exist2",
"title1": c0_title,
"startday1": c0_startday,
"endday1": c0_endday,
"starttime1": c0_starttime,
"endtime1": c0_endtime,
"place1": c0_place,
"body1" : c0_body,
"color1" : c0_color,
"title2": c1_title,
"startday2": c1_startday,
"endday2": c1_endday,
"starttime2": c1_starttime,
"endtime2": c1_endtime,
"place2": c1_place,
"body2" : c1_body,
"color2" : c1_color,
}
else:
context = {"status": "null"}
return JsonResponse(context)
def login(request):
if request.user.is_authenticated:
projects = Project.objects.all()
return render(request, 'mateapp/mainpage.html', {'projects':projects})
else:
return render(request, 'account/login.html')
def create_schedule(request):
projecttitles = Project.objects.filter(writer=request.user)
if request.method == 'POST':
new_schedule = Calendar()
new_schedule.title = request.POST['title']
new_schedule.writer = request.user
new_schedule.body = request.POST['body']
new_schedule.startday = request.POST.get('startday')
new_schedule.endday = request.POST.get('endday')
new_schedule.starttime = request.POST.get('starttime')
new_schedule.endtime = request.POST.get('endtime')
new_schedule.place = request.POST['place']
new_schedule.save()
return redirect('mateapp:calendar')
else :
new_schedule = Calendar.objects.all()
return render(request, 'mateapp/create_schedule.html',{'new_schedule':new_schedule, 'projecttitles':projecttitles})
def calendar(request):
calendar = Calendar.objects.filter(writer=request.user) # 글을 작성한 유저의 캘린더 정보만 가져오겠다. 가까운 날짜 순으로 정렬
calendars = Calendar.objects.filter(writer=request.user)
schedules_list = []
schedules = Calendar.objects.filter(writer=request.user)
schedules_list.append(schedules)
# 간트차트
projects = Project.objects.all() # 모델을 전부 불러옴
todos_list = [] # 빈리스트를 만듬 , 담아서 렌더링하는 경우가 많음
todos = Calendar.objects.filter(writer=request.user)
todos_list.append(todos) # 그 프로젝트의 등록된 투두를 불러와서 그걸 넣은거임
# 보내고 싶은거 리스트로 보내서 장고나 뭐든 저런식으로 할 일이 많음
# # 알아두기
return render(request, 'mateapp/calendar.html', {'projects':projects, 'todos_list':todos_list,'calendar':calendar, 'schedules_list':schedules_list, 'calendars':calendars})
# 리스트 자체를 렌더링함
def timetable(request):
if request.method == "POST":
# Profile에서 요청받은 user의 정보만 불러옴
profile = Profile.objects.get(user=request.user)
profile.timetable = request.FILES.get('timetable')
profile.save(update_fields=['timetable'])
return redirect('mateapp:showmain') # render 보단 redirect 가 낫다.
def project_detail(request, project_id):
projects = Project.objects.filter(followers=request.user)
project = Project.objects.get(pk=project_id)
posts = Post.objects.all()
post = Post.objects.filter(project=project)
comment = Comment.objects.filter()
page = int(request.GET.get('p',1))
paginator = Paginator(post,4)
boards = paginator.get_page(page)
return render(request, 'mateapp/project.html', {'boards':boards, 'projects':projects,'project':project,'posts':posts, 'post':post})
# 포스트가 갖고 있는 숫자가 가장 높은걸 필터로 찾아서 오늘 날짜와 비교해서 출력함
# 게시물 CRUD
def create_post(request, project_id):
projects = Project.objects.all()
project = Project.objects.get(pk=project_id)
posts = Post.objects.all()
day = datetime.date.today()
# post = Post.objects.get(project=project)
if request.method == "POST":
# project = Project.objects.get(title=project_title)
post_title = request.POST['title']
post_body = request.POST['body']
Post.objects.create(title=post_title, user=request.user, project=project, body=post_body) # post는 세가지 필드가 있는데,
# 어떤 모델이든간에 pk가 있어야함 Foreign key는 생략이 될 수가 없음, 일대다 관계일때 쓴다는건데
#
return redirect('mateapp:project_detail', project_id)
# return render(request, 'mateapp/project.html', {'posts':posts,'projects':projects})
def create_comment(request, project_id, post_id):
project = Project.objects.get(pk=project_id)
post = Post.objects.get(pk=post_id)
if request.method == "POST":
post = get_object_or_404(Post,pk=post_id) #Post로 등록된 값이 잇으면 불러오고 없으면 404 출력시킴
content = request.POST['content']
file = request.FILES.get('file')
Comment.objects.create(content=content, post=post, user=request.user) # 모델=뷰
return redirect('mateapp:post_detail', project_id, post_id)
# id는 식별값이기 때문에 무조건 존재하는 필드임
def post_detail(request, project_id, post_id):
project = Project.objects.get(pk = project_id)
post = Post.objects.get(pk = post_id)
comments = Comment.objects.filter(post = post)
page = int(request.GET.get('p',1))
paginator = Paginator(comments,4)
boards = paginator.get_page(page)
return render(request, 'mateapp/project_post.html', {'boards':boards,'project':project, 'post':post, 'comments':comments})
| SeongJoon-K/Runningmate | runningmate/mateapp/views.py | views.py | py | 9,157 | python | en | code | null | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
33452924265 | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
fcff_df = pd.read_excel('FCFF_analysis_filtered.xlsx', index_col=[0])
sgx_df = pd.read_csv('myData.csv', index_col=[1])
class StonksApp:
def __init__(self, master):
self.master = master
master.title("StonkApp")
# Initialise app variables
self.idx = 0
self.current_stock = fcff_df.index[self.idx]
# Set up frame with charts and stats
self.update_main_frame(self.generate_info_dict())
# Set up frame for app buttons
self.update_buttons_frame()
def plot_chart(self, row: int, column: int, *args: pd.DataFrame, columnspan: int=2, title: str="", xlabel: str="", ylabel: str=""):
""" Function to plot graphs on same chart from dataframes passed into the function as arguments
:param row, column, and columnspan: variables for Tkinter grid styling
:param title, xlablel, ylabel: variables for matplotlib chart
:param *args: dataframes to be plotted onto chart
"""
# Setting up of chart
figure = plt.Figure(figsize=(6,5), dpi=70)
ax = figure.add_subplot(111)
line_graph = FigureCanvasTkAgg(figure, self.main_frame)
line_graph.get_tk_widget().grid(row=row, column=column, columnspan=columnspan)
# Plotting graphs
for df in args:
df.plot(kind='line', legend=True, ax=ax)
# Chart styling
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def generate_info_dict(self) -> dict:
""" Function to generate a dictionary of info name and info value pairs to be displayed in app
:return: info_dict
"""
info_dict = {}
# Show name of stock
trading_code = self.current_stock.replace(".SI", "")
self.trading_name = sgx_df.loc[trading_code, "Trading Name"]
info_dict["Name"] = self.trading_name
# Show sector of stock
self.sector = sgx_df.loc[trading_code, "Sector"]
info_dict["Sector"] = self.sector
# Show wacc of stock
self.wacc = fcff_df.loc[self.current_stock, "WACC"]
info_dict["WACC"] = self.wacc
# Show fcf of stock
self.fcff = fcff_df.loc[self.current_stock, "FCFF"]
self.shares_out = fcff_df.loc[self.current_stock, "Shares outstanding"]
self.fcf = self.fcff/self.shares_out
info_dict["FCF"] = self.fcf
# Show fair value stat
self.fair_value = fcff_df.loc[self.current_stock, "Fair value"]
info_dict["Fair value"] = self.fair_value
# Show percentage undervalued stat
self.percentage_undervalued = fcff_df.loc[self.current_stock, "Percentage undervalued"]
info_dict["Percentage undervalued"] = self.percentage_undervalued
return info_dict
def update_main_frame(self, info_dict: dict):
""" Function to populate main frame """
self.main_frame = Frame(self.master)
self.main_frame.grid(row=0, column=0)
# Update variables
self.IS_df = pd.read_csv(f"Database/{self.current_stock}/IS.csv", index_col=[0])
self.BS_df = pd.read_csv(f"Database/{self.current_stock}/BS.csv", index_col=[0])
self.CF_df = pd.read_csv(f"Database/{self.current_stock}/CF.csv", index_col=[0])
# Graphs to be plotted
self.revenue_df = self.IS_df.loc["Revenue"]
self.revenue_df = self.revenue_df.astype(float)
self.operating_income_df = self.IS_df.loc["Operating Income"]
self.operating_income_df=self.operating_income_df.astype(float)
# Plot graph of revenue and operating income
self.plot_chart(0, 0, self.revenue_df, self.operating_income_df, title="", xlabel="Year", ylabel="")
# Display useful information
for i, key in enumerate(info_dict):
Label(self.main_frame, text= f"{key}: \n{info_dict[key]}", font='Helvetica 10').grid(row=(i//2)+1, column=i%2)
def update_buttons_frame(self):
""" Function to populate button frame with back, next, like, and watchlist buttons """
""" Arranges layout of buttons """
self.button_frame = Frame(self.master)
# Back button
self.back_button = Button(self.button_frame, text="Back", command=lambda: self.next(self.idx - 1))
self.back_button.grid(row=0, column=0, pady="10", padx="10")
# Next button
self.next_button = Button(self.button_frame, text="Next", command=lambda: self.next(self.idx + 1))
self.next_button.grid(row=0, column=1)
# Like button
self.like_button = Button(self.button_frame, text="Like", command=self.like)
self.like_button.grid(row=1, column=0, pady="5", padx="10")
# Toggle like button if stock is in watchlist
self.toggle_like_button()
# Watchlist button
self.watchlist_button = Button(self.button_frame, text="Watchlist", command=self.watchlist)
self.watchlist_button.grid(row=1, column=1, pady="5", padx="10")
# Frame palcement
self.button_frame.grid(row=1, column=0)
def toggle_like_button(self):
""" Toggle like button based on whether self.current_stock is in watchlist """
with open("Cache/watchlist.txt", "r") as watchlist:
lines = watchlist.readlines()
if str(self.current_stock + '\n') in lines:
self.like_button.config(relief="sunken")
else:
self.like_button.config(relief="raised")
""" Functions to make buttons interactable """
def next(self, idx):
""" Function for next button to show next or previous stock """
# Update variables
self.idx = idx
self.current_stock = fcff_df.index[self.idx]
self.update_main_frame(self.generate_info_dict())
# Toggle like button based on whether stock is in watchlist
self.toggle_like_button()
def like(self):
""" Function for like button to add stock to watchlist """
if self.like_button.config('relief')[-1] == 'sunken':
self.like_button.config(relief="raised")
with open("Cache/watchlist.txt", "r") as f:
lines = f.readlines()
with open("Cache/watchlist.txt", "w") as f:
for line in lines:
if line.strip("\n") != self.current_stock:
f.write(line)
else:
with open("Cache/watchlist.txt", "a") as myfile:
myfile.write(f"{self.current_stock}\n")
self.like_button.config(relief="sunken")
def watchlist(self):
""" Function to see stocks in watchlist """
def view_watchlist_stock(stock):
""" Function for view button to look at selected stock """
watchlist_window.destroy()
self.master.deiconify()
self.update_main_frame(self.generate_info_dict())
self.current_stock = stock
self.update_buttons_frame()
#update self.idx to that of stock
self.idx = list(fcff_df.index).index(stock)
def delete_watchlist_stock(stock):
""" Function for delete button to delete selected stock """
with open("Cache/watchlist.txt", "r") as f:
lines = f.readlines()
with open("Cache/watchlist.txt", "w") as f:
for line in lines:
if line.strip("\n") != stock:
f.write(line)
idx = Lines.index(stock+'\n')
labels[idx].destroy()
view_buttons[idx].destroy()
delete_buttons[idx].destroy()
if len(lines) == 1:
Label(second_frame, text='Watchlist is currently empty', font='Helvetica 10').grid(column=0)
#untoggle like button on main window if stock on that window is removed from watchlist
if stock == self.current_stock:
self.update_buttons_frame()
def search():
""" Function for search button to search for a specified stock by its full ticker """
search_ticker = search_entry.get()
if search_ticker in fcff_df.index:
view_watchlist_stock(search_ticker)
else:
messagebox.showerror("Error","Sorry the ticker you entered was not found within this spreadsheet")
return
def on_closing():
""" Function to make main window reappear on closing of watchlist window """
watchlist_window.destroy()
self.master.deiconify()
def back_to_main_button_command():
""" Function to get back to main app when button is clicked"""
watchlist_window.destroy()
self.master.deiconify()
# Create new window over current window
self.master.withdraw() # hide main window
watchlist_window = Toplevel(self.master)
watchlist_window.protocol("WM_DELETE_WINDOW", on_closing) # make main window reappear on closing
watchlist_window.title("Watchlist")
watchlist_window.geometry("400x500")
# Create search bar
search_frame = Frame(watchlist_window)
search_frame.pack()
search_entry = Entry(search_frame)
search_entry.pack(side=LEFT)
search_button = Button(search_frame, text='Search', command=search)
search_button.pack(side=LEFT)
# Create a button to get back to main app
back_to_main_button = Button(watchlist_window, text="Back to main app", command=back_to_main_button_command)
back_to_main_button.pack(pady=5)
##### scroll button #####
# Create A Main Frame
main_frame = Frame(watchlist_window)
main_frame.pack(fill=BOTH, expand=1)
# Create A Canvas
my_canvas = Canvas(main_frame)
my_canvas.pack(side=LEFT, fill=BOTH, expand=1)
# Add A Scrollbar To The Canvas
my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)
my_scrollbar.pack(side=RIGHT, fill=Y)
# Configure The Canvas
my_canvas.configure(yscrollcommand=my_scrollbar.set)
my_canvas.bind('<Configure>', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox("all")))
def _on_mouse_wheel(event):
my_canvas.yview_scroll(-1 * int((event.delta / 120)), "units")
my_canvas.bind_all("<MouseWheel>", _on_mouse_wheel)
# Create ANOTHER Frame INSIDE the Canvas
second_frame = Frame(my_canvas)
# Add that New frame To a Window In The Canvas
my_canvas.create_window((0,0), window=second_frame, anchor="nw")
##### end of scroll bar #####
# Get list of stocks in watchlist
file1 = open('Cache/watchlist.txt', 'r')
Lines = file1.readlines()
if len(Lines) == 0:
Label(second_frame, text='Watchlist is currently empty', font='Helvetica 10').grid(column=0)
labels = [] # Create empty lists to reference which ones to delete later on
view_buttons = []
delete_buttons = []
# Display stocks in watchlist, with buttons to view or delete stock
for i in range(len(Lines)):
watchlist_stock_label = Label(second_frame, text=Lines[i], font='Helvetica 10')
watchlist_stock_label.grid(row=i, column=0)
watchlist_stock_button = Button(second_frame, text='View', command=lambda i=i: view_watchlist_stock(Lines[i].strip()))
watchlist_stock_button.grid(row=i, column=1)
delete_watchlist_stock_button = Button(second_frame, text='Remove', command=lambda i=i:delete_watchlist_stock(Lines[i].strip()))
delete_watchlist_stock_button.grid(row=i, column=2)
labels.append(watchlist_stock_label)
view_buttons.append(watchlist_stock_button)
delete_buttons.append(delete_watchlist_stock_button)
def settings(self):
pass
if __name__ == "__main__":
root = Tk()
StonksApp(root)
root.mainloop()
| yuliangod/StonksApp | 03_FCFFapp.py | 03_FCFFapp.py | py | 12,301 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pypl... |
18590413266 | import pytest
from sqlalchemy import create_engine
from rebrickable.data.database import Session
from rebrickable.data.models import *
models = [Color, Inventory, InventorySet,
InventoryPart, Part, PartCategory, Set, Theme]
@pytest.fixture(scope='module')
def session():
engine = create_engine('sqlite:///:memory:', echo=True)
Session.configure(bind=engine)
# You probably need to create some tables and
# load some test data, do so here.
# To create tables, you typically do:
Base.metadata.create_all(engine)
yield Session()
Session.close_all()
@pytest.fixture
def objects():
return [
Color(id=1, name='black', rgb='123456', is_trans=True),
Inventory(id=1, version=42, set_num='7189-1'),
InventoryPart(inventory_id=1, part_num='3001', color_id=1, quantity=12),
InventorySet(inventory_id=1, set_num='7189-1', quantity=1),
Part(part_num='3001', name='Brick 2X4', part_cat_id=1),
PartCategory(id=1, name='bricks'),
Set(set_num='7189-1', name='Dumy Test', year=2015, theme_id=42, num_parts=12),
Theme(id=42, name='Town', parent_id=None),
Theme(id=43, name='Police', parent_id=42)
]
def test_models(session, objects):
session.add_all(objects)
session.commit()
for obj in objects:
session.refresh(obj)
print(obj)
| rienafairefr/pyrebrickable | tests/data/test_data.py | test_data.py | py | 1,369 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rebrickable.data.database.Session.configure",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rebrickable.data.database.Session",
"line_number": 14,
"usage_type":... |
70797390505 | import copy
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
from matplotlib import cm
from util.util import info, crop_center, error, print_timestamped
different_colors = ["#FF0000", "#008000", "#0000FF", "#FFD700", # Red, green, blue, gold
"#00BFFF", "#DDA0DD", "#808080", "#800000", # Light blue, magenta, gray, maroon
"#808000", "#00FF00", "#FFFF00", "#800080", # Olive, lime, yellow, purple
"#008080", "#000080"] # Teal, navy
class PlotHandler:
def __init__(self, args, options, complementary=False):
self.output_data_folder = options.output_data_folder
self.phase = options.phase # 0 search, 1 train, 2 test
self.prefix = options.prefix
self.mapping_source = args.mapping_source
self.mapping_target = args.mapping_target
self.plot_names_dict = {}
self.plot_names_dict['source'] = self.mapping_source
self.plot_names_dict['target'] = self.mapping_target
self.sliced = args.sliced
self.chosen_slice = args.chosen_slice
self.plot_only_results = args.plot_only_results
self.nifti_image_extension = ".nii.gz"
self.target_shape = (210, 180)
if self.sliced:
self.image_extension = ".png"
else:
self.image_extension = self.nifti_image_extension
# Set plot folder
base = self.output_data_folder / args.experiment_name
if complementary:
specific_name = "_complementary"
else:
specific_name = "_" + args.method + "_main" + str(args.main_clusters) + "_sub" + str(args.sub_clusters)
if self.phase != 1:
specific_name += "_" + str(args.postprocess)
if self.phase == 0 or (self.phase == 2 and args.model_phase == "search"):
self.plot_folder = base / (args.test_set + "_search" + specific_name)
elif self.phase == 1:
self.plot_folder = base / (self.prefix + specific_name)
else:
self.plot_folder = base / (args.test_set + specific_name)
if self.phase != 1 or not self.plot_only_results:
self.plot_folder.mkdir(parents=True, exist_ok=True)
self.train_folder = None
self.labels_folder = None
info("The plots for the current run will be saved in " + str(base) + ".")
if not args.plot_only_results and self.phase != 2:
self.train_folder = self.plot_folder / "train"
self.labels_folder = self.plot_folder / "labels"
self.train_folder.mkdir(parents=True, exist_ok=True)
self.labels_folder.mkdir(parents=True, exist_ok=True)
print(
"The plots of the training images for the current run will be saved in " + str(self.train_folder) + ".")
print("The plots of the labels for the current run will be saved in " + str(self.labels_folder) + ".")
def plot_reference(self, reference_mri, model_folder, mris_shape, affine, method, main, sub):
plot_nifti(reference_mri,
model_folder / (
"lab_" + method + "_main" + str(main) + "_sub" + str(sub) + self.nifti_image_extension),
mris_shape,
affine)
def plot_train(self, visuals, patient_name, mris_shape, affine):
if not self.plot_only_results:
for label, image in visuals.items():
filename = self.train_folder / (patient_name + "_" + self.plot_names_dict[label] + self.image_extension)
if self.sliced:
reshaped = image.reshape(mris_shape)
cropped = crop_center(reshaped, self.target_shape)
plot_image(cropped,
filename,
colormap=cm.get_cmap('gray'),
mris_shape=mris_shape,
plotbar=False)
else:
reshaped = image.reshape(mris_shape)
cropped = crop_center(reshaped[:, :, self.chosen_slice], self.target_shape)
plot_image(cropped,
str(filename).split(".")[0] + ".png",
colormap=cm.get_cmap('gray'),
plotbar=False)
plot_nifti(image,
filename,
mris_shape,
affine=affine)
def plot_results(self, visuals, patient_name, smoothing, mris_shape, affine):
self.plot_names_dict['learned_target'] = self.mapping_target + "_learned"
self.plot_names_dict['learned_target_smoothed'] = self.mapping_target + "_learned_" + smoothing
for label, image in visuals.items():
if 'truth' not in label:
folder = self.plot_folder / patient_name
folder.mkdir(parents=True, exist_ok=True)
filename = folder / (patient_name + "_" + self.plot_names_dict[label] + self.image_extension)
if self.sliced:
reshaped = image.reshape(mris_shape)
cropped = crop_center(reshaped, self.target_shape)
plot_image(cropped,
filename,
colormap=cm.get_cmap('gray'),
mris_shape=mris_shape,
plotbar=False)
else:
reshaped = image.reshape(mris_shape)
cropped = crop_center(reshaped[:, :, self.chosen_slice], self.target_shape)
plot_image(cropped,
str(filename).split(".")[0] + ".png",
colormap=cm.get_cmap('gray'),
plotbar=False)
plot_nifti(image,
filename,
mris_shape,
affine=affine)
def plot_shaded_labels(self, patient_name, labels1, labels2, method, main_clusters, mris_shape, affine):
m1_filename = self.labels_folder / (
patient_name + "_" + self.mapping_source + "_labels_" + method + self.image_extension)
m2_filename = self.labels_folder / (
patient_name + "_" + self.mapping_target + "_labels_" + method + self.image_extension)
if self.sliced:
reshaped1 = labels1.reshape(mris_shape)
cropped1 = crop_center(reshaped1, self.target_shape)
plot_image(cropped1,
m1_filename,
shaded_labels=1.0,
colormap=colormap_fusion(main_clusters),
mris_shape=mris_shape,
plotbar=False)
reshaped2 = labels2.reshape(mris_shape)
cropped2 = crop_center(reshaped2, self.target_shape)
plot_image(cropped2,
m2_filename,
shaded_labels=1.0,
colormap=colormap_fusion(main_clusters),
mris_shape=mris_shape,
plotbar=False)
else:
reshaped1 = labels1.reshape(mris_shape)
cropped1 = crop_center(reshaped1[:, :, self.chosen_slice], self.target_shape)
plot_image(cropped1,
str(m1_filename).split(".")[0] + ".png",
shaded_labels=1.0,
colormap=colormap_fusion(main_clusters),
plotbar=False)
reshaped2 = labels2.reshape(mris_shape)
cropped2 = crop_center(reshaped2[:, :, self.chosen_slice], self.target_shape)
plot_image(cropped2,
str(m2_filename).split(".")[0] + ".png",
shaded_labels=1.0,
colormap=colormap_fusion(main_clusters),
plotbar=False)
plot_nifti(labels1,
m1_filename,
mris_shape,
affine=affine)
plot_nifti(labels2,
m2_filename,
mris_shape,
affine=affine)
def print_tumour(self, tumour, patient_name, mris_shape, affine):
if not self.plot_only_results:
folder = self.plot_folder / patient_name
folder.mkdir(parents=True, exist_ok=True)
filename = folder / (patient_name + "_truth_tumour" + self.image_extension)
if self.sliced:
plot_image(tumour,
filename,
mris_shape=mris_shape,
plotbar=False)
else:
reshaped = tumour.reshape(mris_shape)
cropped = crop_center(reshaped[:, :, self.chosen_slice], self.target_shape)
plot_image(cropped,
str(filename).split(".")[0] + ".png",
plotbar=False)
plot_nifti(tumour,
filename,
mris_shape,
affine=affine)
def plot_image(image,
filename,
colormap=copy.copy(cm.get_cmap('viridis')),
mris_shape=None,
shaded_labels=None,
one_int_bounds=False,
plotbar=True,
white_bg=False,
verbose=True):
if plotbar:
res_size1 = 6
res_size2 = 5
else:
res_size1 = res_size2 = 5
fig = plt.figure(figsize=(res_size1, res_size2), dpi=300)
# ax = plt.gca()
if len(image.shape) == 1:
if mris_shape is not None:
image = image.reshape(mris_shape)
else:
error("The image cannot be reshaped and showed with imshow")
elif len(image.shape) > 2:
error("The image has a shape greater than 2. You might have forgotten to slice it.")
image = np.rot90(image, k=-1)
image = np.flip(image, axis=1)
if shaded_labels is None:
max_lin = np.max(image)
bounds = None
else:
n_clusters = int(colormap.N / 256)
max_lin = shaded_labels
bounds = np.linspace(0, max_lin, n_clusters + 1)
if one_int_bounds and max_lin < 15:
bounds = range(int(max_lin) + 1)
min_val = np.min(image) + 0.1e-10
if min_val > max_lin:
min_val = max_lin
if colormap != cm.get_cmap('gray'):
colormap.set_under('w')
plt.axis('off')
plt.xticks([])
plt.yticks([])
sc = plt.imshow(image,
cmap=colormap,
vmin=min_val,
vmax=max_lin)
if plotbar:
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="5%", pad=0.05)
cax = None
plt.colorbar(sc, cax=cax, ticks=bounds)
plt.savefig(filename, bbox_inches='tight', transparent=not white_bg)
if verbose:
print_timestamped("Saved in " + str(filename))
plt.close(fig)
def plot_nifti(image, filename, mris_shape=None, affine=None, verbose=True):
if affine is None:
affine = np.array([[-1., 0., 0., -0.],
[0., -1., 0., 239.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
if len(image.shape) == 1:
if mris_shape is not None:
image = image.reshape(mris_shape)
else:
error("The image cannot be reshaped, please add the shape.")
new_nifti = nib.Nifti1Image(image, affine=affine)
nib.save(new_nifti, filename)
if verbose:
print_timestamped("Saved in " + str(filename))
def colormap_fusion(n_clusters):
if n_clusters > len(different_colors):
error("The number of clusters is greater than the available size of colours.")
stacked_colors = []
for i in range(n_clusters):
colormap = shaded_color_map(different_colors[i])
linspace_colormap = colormap(np.linspace(0.20, 1, 256))
stacked_colors.append(linspace_colormap)
newcolors = np.vstack(stacked_colors)
return colors.ListedColormap(newcolors)
def full_colormap_fusion(n_clusters):
if n_clusters > len(different_colors):
error("The number of clusters is greater than the available size of colours.")
return colors.ListedColormap(different_colors[:n_clusters])
def shaded_color_map(rgb_color):
return colors.LinearSegmentedColormap.from_list("", ["white", rgb_color])
| giuliabaldini/brainclustering | util/plot_handler.py | plot_handler.py | py | 12,669 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.util.info",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "util.util.crop_center",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotli... |
29099062357 | from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
from config.errCode import *
from config.kiwoomType import RealType
from config.slack import Slack
from PyQt5.QtTest import *
import os
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__() # == QAxWidget.__init__()
print('class: GetAccountInfo -- api[kiwoom] connected')
self.slack = Slack()
self.realType = RealType()
#_____event loop______#
self.login_event_loop = QEventLoop() # event loop start - for login
self.detail_account_info_event_loop = QEventLoop()
self.calculator_event_loop = QEventLoop()
#_____account_rel_____#
self.account_stock_dict = {}
self.not_signed_stock_dict = {}
self.portfolio_stock_dict = {}
self.account_num = None
self.deposit = 0 # 예수금
self.use_money = 0
self.use_money_percent = 0.5
self.output_deposit = 0
self.total_profit_loss_money = 0 # 총평가손익금액
self.total_profit_loss_rate = 0.0 # 총수익률(%)
#___for_calculate_stock__#
self.calcul_data=[]
#_____screen num______#
self.screen_my_info="2000"
self.screen_calculate_stock='4000'
self.screen_real_stock='5000' # 종목별로 할당할 스크린 번호
self.screen_order_stock='6000' # 종목별로 할당할 '주문용' 스크린 번호
self.screen_start_stop_real = '1000'
#_____initial setting___#
self.get_ocx_instance() # 1. api를 컨트롤하겠다.
self.event_slots() # 2. event slot들을 만들어주자.
self.real_event_slots() # 2+. 실시간 event slot 추가
self.signal_login_commConnect() # 3. login을 요청한다.
self.get_account_info() # 4. 계좌번호를 출력한다.
self.detail_account_info() # 5. 예수금 요청 시그널
self.detail_account_mystock() # 6. 계좌평가잔고내역을 불러온다.
self.not_concluded_account() # 7. 미체결 종목 불러오기.
##self.calculator_fnc() #종목분석
self.read_code() # 8. 저장된 종목 조회
self.screen_number_setting() # 9.스크린번호할당
self.dynamicCall('SetRealReg(QString, QString, QString, QString)',
self.screen_start_stop_real,
'',
self.realType.REALTYPE['장시작시간']['장운영구분'],
'0') # 장시작 시간 받을때만 0으로(최초) 나머지 실시간 조회는 모두 '1'로 지정
for code in self.portfolio_stock_dict.keys():
screen_num = self.portfolio_stock_dict[code]['스크린번호']
fids = self.realType.REALTYPE['주식체결']['체결시간']
self.dynamicCall('SetRealReg(QString, QString, QString, QString)',
screen_num,
code,
fids,
'1') # 실시간 종목조회 - '1'로 지정
print('CODE : {}, SCREEN : {}, FID : {}'.format(code, screen_num, fids))
def get_code_list_by_market(self, market_code):
'''
전체 종목 코드 반환
'''
code_list = self.dynamicCall('GetCodeListByMarket(QString)', market_code)
code_list = code_list.split(';')[:-1]
return code_list
def calculator_fnc(self):
'''
종목 분석
'''
code_list = self.get_code_list_by_market('10') #코스닥 전체 종목 조회
code_list = code_list[100:]
print('코스닥 종목 수 : {}'.format(len(code_list)))
for idx, code in enumerate(code_list):
self.dynamicCall('DisconnectRealData(QString)', self.screen_calculate_stock)
print('{} / {} : KOSDAQ Stock Code : {} is updating.. '.format(idx+1, len(code_list), code))
self.day_kiwoom_db(code=code)
def day_kiwoom_db(self, code=None, date=None, sPrevNext='0'):
QTest.qWait(3600) # 이벤트는 살려두고, 실행 지연만 시킴
self.dynamicCall('SetInputValue(QString, QString)', '종목코드', code)
self.dynamicCall('SetInputValue(QString, QString)', '수정주가구분', '1')
if date != None:
self.dynamicCall('SetInputValue(QString, QString)', '기준일자', date)
self.dynamicCall('CommRqData(QString, QString, int, QString)',\
'주식일봉차트조회', 'opt10081', sPrevNext, self.screen_calculate_stock)
self.calculator_event_loop.exec_()
##___api controller____##
def get_ocx_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1") #.ocx확장자로 저장된 키움 api를 파이썬으로 컨트롤하겠다.
##___group of event slots__##
def event_slots(self): # slot : 이벤트 발생시 slot으로 데이터 회수
self.OnEventConnect.connect(self.login_slot) # 로그인 요청에 대한 응답이 왔을때 응답을 받도록 연결해둠.
self.OnReceiveTrData.connect(self.trdata_slot) # 트랜젝션 요청에 대한 응답이 왔을때 응답을 받도록 연결해둠.
###___slots__###
def login_slot(self, err_code):
print(errors(err_code)[1]) # 로그인 요청에 대한 응답이 오면 에러 코드 출력
self.login_event_loop.exit() # 에러 코드 출력하고 로그인 이벤트 루프 종료.
def real_event_slots(self):
self.OnReceiveRealData.connect(self.realdata_slot)
def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext):
if sRQName == '예수금상세현황요청':
deposit = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '예수금')
self.deposit = int(deposit)
use_money = float(self.deposit)*self.use_money_percent
self.use_money = int(use_money)
self.use_money = self.use_money/4 # 4종목 이상 매수를 위함
# 예수금
output_deposit = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '예수금')
self.output_deposit = int(output_deposit)
print('예수금 : {}'.format(self.output_deposit))
# 출금가능금액
can_exit = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '출금가능금액')
self.can_exit = int(can_exit)
print('출금가능금액 : {}'.format(self.can_exit))
self.detail_account_info_event_loop.exit()
elif sRQName == '계좌평가잔고내역요청':
total_buy_money = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '총매입금액')
self.total_buy_money = int(total_buy_money)
total_profit_loss_money = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '총평가손익금액')
self.total_profit_loss_money = int(total_profit_loss_money)
total_profit_loss_rate = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, 0, '총수익률(%)')
self.total_profit_loss_rate = float(total_profit_loss_rate)
print('[계좌평가잔고내역요청(싱글)]\n총매입액: {}\n총평가손익:{}\n총수익률(%):{}'.format(\
self.total_buy_money, self.total_profit_loss_money, self.total_profit_loss_rate ))
# 보유종목 수 가져오기
rows = self.dynamicCall('GetRepeatCnt(QString, QString)',sTrCode, sRQName) # 최대 20개
for i in range(rows):
code = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '종목번호') # 보유 종목의 종목코드를 순서대로 불러온다
code = code.strip()[1:]
code_name = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '종목명')
code_name = code_name.strip()
count_stock = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '보유수량')
count_stock = int(count_stock)
buy_price = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '매입가')
buy_price = int(buy_price)
profit_rate = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '수익률(%)')
profit_rate = float(profit_rate)
current_price = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '현재가')
current_price = int(current_price)
total_buy_price = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '매입금액')
total_buy_price = int(total_buy_price)
count_can_sell_stock = self.dynamicCall('GetCommData(QString, QString, int, QString)',
sTrCode, sRQName, i, '매매가능수량')
count_can_sell_stock = int(count_can_sell_stock)
mystockMonit = '[보유종목정보(멀티)]\n종목번호: {} | 종목명: {} | 보유수량: {} | 매입가: {} | 수익률(%): {} | 현재가: {} | 매입금액: {} | 매매가능수량: {}'.\
format(code, code_name, count_stock, buy_price, profit_rate, current_price, total_buy_price, count_can_sell_stock)
print(mystockMonit)
# self.slack.notification(
# text=mystockMonit)
self.account_stock_dict[code]={}
self.account_stock_dict[code].update({
'name':code_name,
'count':count_stock,
'buy_price':buy_price,
'profit_rate':profit_rate,
'current_price':current_price,
'total_buy_price':total_buy_price,
'count_sell':count_can_sell_stock
})
print('보유 종목 : {} - {}'.format(code_name,code))
if sPrevNext == '2':
print('현재 조회한 종목 수 : 20')
print('다음 페이지를 조회합니다')
self.detail_account_mystock(sPrevNext='2')
else:
print('현재 조회한 종목 수 : {}'.format(rows))
print('최종페이지입니다.')
self.detail_account_info_event_loop.exit()
elif sRQName == '실시간미체결요청':
rows = self.dynamicCall('GetRepeatCnt(QString, QString)', sTrCode, sRQName)
for i in range(rows):
code = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '종목코드')
code_name = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '종목명')
order_no = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '주문번호')
order_status = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '주문상태')
order_quantity = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '주문수량')
order_price = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '주문가격')
order_sector = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '주문구분')
not_signed_quantity = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '미체결수량')
ok_quantity = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '체결량')
code = code.strip()
code_name = code_name.strip()
order_no = order_no.strip()
order_status = order_status.strip()
order_quantity = int(order_quantity.strip())
order_price = int(order_price.strip())
order_sector = order_sector.strip().lstrip('+').lstrip('-')
not_signed_quantity = int(not_signed_quantity.strip())
ok_quantity = int(ok_quantity.strip())
if order_no in self.not_signed_stock_dict:
pass
else:
self.not_signed_stock_dict[order_no]={}
self.not_signed_stock_dict[order_no].update({
'code':code,
'code_name':code_name,
'order_status':order_status,
'order_quantity':order_quantity,
'order_price':order_price,
'order_sector':order_sector,
'not_signed_quantity':not_signed_quantity,
'ok_quantity':ok_quantity
})
not_signed = '미체결 종목 : {}(주문번호:{})'.format(code_name, order_no)
print(not_signed)
# self.slack.notification(text=not_signed)
elif '주식일봉차트조회' == sRQName:
print('일봉 데이터 요청중..')
code = self.dynamicCall('GetCommData(QString, QString, int, QString)',\
sTrCode, sRQName, 0, '종목코드')
code = code.strip()
rows = self.dynamicCall('GetRepeatCnt(QString, QString)', sTrCode, sRQName)
print('데이터 >> {} , {}개'.format(code, rows))
# data = self.dynamicCall('GetCommDataEx(QString, QString)', sTrCode, sRQName)
# [['', '현재가', '거래량', '거래대금', '날짜', '시가', '고가',' 저가],
# ['', '현재가', '거래량', '거래대금', '날짜', '시가', '고가',' 저가],
# ['', '현재가', '거래량', '거래대금', '날짜', '시가', '고가',' 저가],
# ...]
# 이하 동일 코드(for-loop 사용)
# self.slack.notification(text="['', '현재가', '거래량', '거래대금', '날짜', '시가', '고가',' 저가]")
for i in range(rows):
data = []
current_price = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '현재가')
trade_count = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '거래량')
trade_amount = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '거래대금')
date = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '일자')
start_price = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '시가')
high_price = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '고가')
low_price = self.dynamicCall('GetCommData(QString, QString, int, QString)', sTrCode, sRQName, i, '저가')
data.append("")
data.append(current_price.strip())
data.append(trade_count.strip())
data.append(trade_amount.strip())
data.append(date.strip())
data.append(start_price.strip())
data.append(high_price.strip())
data.append(low_price.strip())
data.append("")
self.calcul_data.append(data.copy())
if sPrevNext == '2':
self.day_kiwoom_db(code=code, sPrevNext=sPrevNext)
else:
## 120일 이평선 조건 | 예시
print('상장 기간(총 일수) : {}'.format(len(self.calcul_data)))
pass_success = False # 반복 조건
# 이평선 그리기 위한 데이터가 충분한지 확인
if self.calcul_data == None or len(self.calcul_data) < 120:
pass_success = False
else:
# 데이터가 충분하다면(120일 이상)
total_price = 0
for value in self.calcul_data[:120]: # 리스트에는 최근일자부터 순서대로 들어가있음(최근 120일 순회)
total_price += int(value[1]) # 현재가(종가) 누적 더하기
moving_avg_price = total_price / 120
bottom_stock_price = False
check_price = None
if int(self.calcul_data[0][7]) <= moving_avg_price and moving_avg_price <= int(self.calcul_data[0][6]): # 가장최근일(오늘) 저가
code_nm = self.dynamicCall('GetMasterCodeName(QString)', code)
msg = '[매수신호] 오늘 {} ({}) 주가 - 120 이평선에 걸쳐있음'.format(code_nm, code)
print(msg)
self.slack.notification(text=msg)
bottom_stock_price = True
check_price = int(self.calcul_data[0][6]) #고가
past_price = None
# 과거 일봉 조회 (120일 이평선보다 밑에 있는지 확인)
if bottom_stock_price == True:
moving_avg_price_past = 0
price_top_moving = False
idx = 1
while True:
if len(self.calcul_data[idx:]) < 120: # 데이터 충분한지(120일) 계속 확인
print('데이터 부족함(120일치 데이터 필요)')
break
else:
total_price = 0
for value in self.calcul_data[idx:idx+120]:
total_price += int(value[1]) # 과거 종가 누적 더하기
moving_avg_price_past = total_price / 120
if moving_avg_price_past <= int(self.calcul_data[idx][6]) and idx <= 20:
price_top_moving = False
break
elif int(self.calcul_data[idx][7]) > moving_avg_price_past and idx > 20:
print('120일 이평선 위에 있는 일봉 확인')
price_top_moving = True
past_price = int(self.calcul_data[idx][7])
break
idx += 1
if price_top_moving == True:
if moving_avg_price > moving_avg_price_past and check_price > past_price:
print('매수신호 포착')
pass_success = true
if pass_success == True:
print('포착된 종목 저장..')
code_nm = self.dynamicCall('GetMasterCodeName(QString)', code)
msg = '{},{},{}\n'.format(code, code_nm, str(self.calcul_data[0][1]))
f = open('files/condition_stock.txt', 'a', encoding='utf8')
f.write('%s,%s,%s\n' % (code, code_nm, str(self.calcul_data[0][1])))
f.close()
self.slack.notification(text=msg)
elif pass_success == False:
code_nm = self.dynamicCall('GetMasterCodeName(QString)', code)
msg = '{} -{} | 조회 | 매수신호 포착되지 않음'.format(code, code_nm)
print(msg)
self.slack.notification(text=msg)
self.calcul_data.clear()
self.calculator_event_loop.exit()
self.calculator_event_loop.exit()
self.stop_screen_cancel(self.screen_my_info)
self.detail_account_info_event_loop.exit()
##_____request_login_____##
def signal_login_commConnect(self):
self.dynamicCall("CommConnect()") # login request signal
self.login_event_loop.exec() ## 요청에 대해 응답이 올때까지 대기
##_____request_account____##
def get_account_info(self):
account_list = self.dynamicCall("GetLoginInfo(QString)", "ACCNO") # request account number signal
account_num = account_list.split(';')[1] # first account(모의투자 계좌)
self.account_num = account_num
print("account : {}".format(account_num))
def detail_account_info(self, sPrevNext='0'): # 첫 조회 : sPrevNext='0'
print('예수금 요청중..')
self.dynamicCall('SetInputValue(QString, QString)','계좌번호', self.account_num)
self.dynamicCall('SetInputValue(QString, QString)','비밀번호', '0000')
self.dynamicCall('SetInputValue(QString, QString)','비밀번호입력매체구분','0000')
self.dynamicCall('SetInputValue(QString, QString)','조회구분','1')
self.dynamicCall('CommRqData(QString, QString, int, QString)',\
'예수금상세현황요청','opw00001',sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop = QEventLoop()
self.detail_account_info_event_loop.exec_()
def stop_screen_cancel(self, sScrNo=None):
self.dynamicCall('DisconnectRealData(QString)',sScrNo)
def detail_account_mystock(self, sPrevNext='0'): #싱글데이터
self.dynamicCall('SetInputValue(QString, QString)', '계좌번호', self.account_num)
self.dynamicCall('SetInputValue(QString, QString)', '비밀번호', '0000') # 모의투자 공통 0000
self.dynamicCall('SetInputValue(QString, QString)', '비밀번호입력매체구분', '00')
self.dynamicCall('SetInputValue(QString, QString)', '조회구분', '1') # 1:합산, 2:개별
self.dynamicCall('CommRqData(QString, QString, int, QString)', '계좌평가잔고내역요청', 'opw00018', sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def not_concluded_account(self, sPrevNext='0'):
print('미체결 종목 요청중..')
self.dynamicCall('SetInputValue(QString, QString)', '계좌번호', self.account_num)
self.dynamicCall('SetInputValue(QString, QString)', '체결구분', '1')
self.dynamicCall('SetInputValue(QString, QString)', '매매구분', '0')
self.dynamicCall('CommRqData(QString, QString, int, QString)',
'실시간미체결요청','opt10075', sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def read_code(self):
file_path = 'files/condition_stock.txt'
if os.path.exists(file_path):
f = open(file_path, 'r', encoding='utf8')
lines = f.readlines()
for line in lines:
if line != '':
ls = line.split(',')
stock_code = ls[0]
stock_name = ls[1]
stock_price = abs(int(ls[2].split('\n')[0]))
self.portfolio_stock_dict.update({
stock_code: {'종목명':stock_name,'현재가':stock_price}
})
f.close()
print(self.portfolio_stock_dict)
def screen_number_setting(self):
screen_overwrite = []
# 계좌평가잔고내역에 있는 종목들
for code in self.account_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
# 미체결에 있는 종목들
for order_number in self.not_signed_stock_dict.keys():
code = self.not_signed_stock_dict[order_number]['종목코드']
if code not in screen_overwrite:
screen_overwrite.append(code)
# 포트폴리오에 담겨있는 종목들
for code in self.portfolio_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
# 스크린번호 할당
cnt = 0 # 스크린번호 하나에 최대 100개 요청가능
for code in screen_overwrite:
real_screen = int(self.screen_real_stock)
order_screen = int(self.screen_order_stock)
if (cnt % 50) == 0: # 스크린번호 하나에 종목코드 최대 50개만 할당함
real_screen += 1 # 5000 => 5001
self.screen_real_stock = str(real_screen)
if (cnt % 50) == 0:
order_screen += 1 # 6000 -> 6001
self.screen_order_stock = str(order_screen)
if code in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict[code].update({
'스크린번호':str(self.screen_real_stock),
'주문용스크린번호':str(self.screen_order_stock)
})
elif code not in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict.update({
code: {'스크린번호': str(self.screen_real_stock),
'주문용스크린번호': str(self.screen_order_stock)}
})
cnt += 1
print(self.portfolio_stock_dict)
def realdata_slot(self, sCode, sRealType, sRealData):
if sRealType == '장시작시간':
fid = self.realType.REALTYPE[sRealType]['장운영구분']
chk = self.dynamicCall('GetCommRealData(QString, int)',
sCode, fid)
if chk == '0':
print('장 시작 전')
elif chk == '3':
print('장 시작')
elif chk == '2':
print('장 종료, 동시호가 전환')
elif chk == '4':
print('장 종료 (3:30)')
elif sRealType == '주식체결':
currtime = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['체결시간'])
currprice = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['현재가'])
currprice = abs(int(currprice))
addprice = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['전일대비'])
addprice = abs(int(addprice))
perprice = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['등락율'])
perprice = float(perprice)
bestsellprice = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가'])
bestsellprice = abs(int(bestsellprice))
bestbuyprice = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가'])
bestbuyprice = abs(int(bestbuyprice))
amount = self.dynamicCall('GetCommRealData(QString,int)',
sCode, self.realType.REALTYPE[sRealType]['거래량'])
amount = abs(int(amount))
if sCode not in self.portfolio_stock_dict:
self.portfolio_stock_dict.update({sCode:{}})
self.portfolio_stock_dict[sCode].update({
'체결시간':currtime,
'현재가':currprice,
'전일대비':addprice,
'등락율':perprice,
'(최우선)매도호가':bestsellprice,
'(최우선)매수호가':bestbuyprice,
'거래량':amount
})
print(self.portfolio_stock_dict[sCode])
self.slack.notification(text = self.portfolio_stock_dict[sCode]) | sw-song/kiwoom | test_api/kiwoom.py | kiwoom.py | py | 29,513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.slack.Slack",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.kiwoomType.RealType",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "os.path... |
14231652942 | '''
File name: Isonet_star_app.py
Author: Hui Wang (EICN)
Date created: 4/21/2021
Date last modified: 06/01/2021
Python Version: 3.6.5
'''
import sys,os
import logging
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QTableWidgetItem,QMessageBox
from PyQt5.QtCore import QProcess
#Isonet packages
from IsoNet.gui.isonet_gui import Ui_MainWindow ##need to change in the package
from IsoNet.gui.model_star import Model, setTableWidget #need to change in the package
from IsoNet.util.metadata import MetaData,Label,Item
class MainWindowUIClass( Ui_MainWindow ):
def __init__( self ):
'''Initialize the super class
'''
super().__init__()
self.model = Model()
#reset process as None
self.p = None
self.previous_log_line = ""
self.setting_file = ".isonet.setting"
# check for pid in last running
#if os.path.isfile(self.model.pid_file):
# os.remove(self.model.pid_file)
def setupUi( self, MW ):
''' Setup the UI of the super class, and add here code
that relates to the way we want our UI to operate.
'''
super().setupUi( MW )
#load default content in tomograms.star
setTableWidget(self.tableWidget, self.model.md)
#set up functions when cells be clicked
#self.tableWidget.cellPressed[int, int].connect(self.browseSlotTable)
self.tableWidget.cellDoubleClicked[int, int].connect(self.browseSlotTable)
self.tableWidget.cellChanged[int,int].connect(self.updateMDItem)
#self.tableWidget.horizontalHeaderItem(1).setToolTip("Header 0");
#for i,lab in enumerate(self.model.header):
# self.tableWidget.horizontalHeaderItem(i-1).setToolTip(self.get_toolTip(lab))
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
########################
# connect function to buttons
########################
'''
self.pushButton_insert.setStyleSheet("background-color : lightblue")
self.pushButton_delete.setStyleSheet("background-color : lightblue")
self.pushButton_open_star.setStyleSheet("background-color : lightblue")
self.pushButton_3dmod.setStyleSheet("background-color : lightblue")
self.button_deconov_dir.setStyleSheet("background-color : lightblue")
self.button_mask_dir.setStyleSheet("background-color : lightblue")
self.button_subtomo_dir.setStyleSheet("background-color : lightblue")
self.button_result_dir_refine.setStyleSheet("background-color : lightblue")
self.button_result_dir_predict.setStyleSheet("background-color : lightblue")
self.button_subtomo_star_refine.setStyleSheet("background-color : lightblue")
self.button_pretrain_model_refine.setStyleSheet("background-color : lightblue")
self.button_tomo_star_predict.setStyleSheet("background-color : lightblue")
self.button_pretrain_model_predict.setStyleSheet("background-color : lightblue")
self.button_continue_iter.setStyleSheet("background-color : lightblue")
self.pushButton_deconv.setStyleSheet("background-color : lightblue")
self.pushButton_generate_mask.setStyleSheet("background-color : lightblue")
self.pushButton_extract.setStyleSheet("background-color : lightblue")
self.pushButton_refine.setStyleSheet("background-color : lightblue")
self.pushButton_predict.setStyleSheet("background-color : lightblue")
self.pushButton_predict_3dmod.setStyleSheet("background-color : lightblue")
'''
self.pushButton_insert.clicked.connect(self.copyRow)
self.pushButton_delete.clicked.connect(self.removeRow)
self.pushButton_open_star.clicked.connect(self.open_star)
self.pushButton_3dmod.clicked.connect(self.view_3dmod)
self.button_deconov_dir.clicked.connect(lambda: self.browseFolderSlot("deconv_dir"))
self.button_mask_dir.clicked.connect(lambda: self.browseFolderSlot("mask_dir"))
self.button_subtomo_dir.clicked.connect(lambda: self.browseFolderSlot("subtomo_dir"))
self.button_result_dir_refine.clicked.connect(lambda: self.browseFolderSlot("result_dir_refine"))
self.button_result_dir_predict.clicked.connect(lambda: self.browseFolderSlot("result_dir_predict"))
self.button_subtomo_star_refine.clicked.connect(lambda: self.browseSlot("subtomo_star_refine"))
self.button_pretrain_model_refine.clicked.connect(lambda: self.browseSlot("pretrain_model_refine"))
self.button_tomo_star_predict.clicked.connect(lambda: self.browseSlot("tomo_star_predict"))
self.button_pretrain_model_predict.clicked.connect(lambda: self.browseSlot("pretrain_model_predict"))
self.button_continue_iter.clicked.connect(lambda: self.browseSlot("continue_from"))
self.pushButton_deconv.clicked.connect(self.deconvolve)
self.pushButton_generate_mask.clicked.connect(self.make_mask)
self.pushButton_extract.clicked.connect(self.extract_subtomo)
self.pushButton_refine.clicked.connect(self.refine)
self.pushButton_predict.clicked.connect(self.predict)
self.pushButton_predict_3dmod.clicked.connect(self.view_predict_3dmod)
self.actionGithub.triggered.connect(self.openGithub)
#########################
#set icon location
#########################
#get the root path for isonet
isonet_path = os.popen("which isonet.py").read()
tmp = isonet_path.split("bin/isonet.py")
root_path = tmp[0]
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(root_path+"gui/icons/icon_folder.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_deconov_dir.setIcon(icon)
self.button_mask_dir.setIcon(icon)
self.button_subtomo_star_refine.setIcon(icon)
self.button_subtomo_dir.setIcon(icon)
self.button_pretrain_model_refine.setIcon(icon)
self.button_result_dir_refine.setIcon(icon)
self.button_tomo_star_predict.setIcon(icon)
self.button_pretrain_model_predict.setIcon(icon)
self.button_result_dir_predict.setIcon(icon)
self.button_continue_iter.setIcon(icon)
self.read_setting()
###Set up log file monitor###
import datetime
now = datetime.datetime.now()
#create a empty log file
if not self.model.isValid(self.model.log_file):
os.system("echo {} > {}".format(now.strftime("%Y-%m-%d %H:%M:%S"), self.model.log_file))
self.textBrowser_log.setText(self.model.getLogContent(self.model.log_file))
self.textBrowser_log.moveCursor(QtGui.QTextCursor.End)
#self.horizontalLayout_48.hide()
#for widgets in self.horizontalLayout_44.children():
#print(widgets.widget())
#for widget in widgets.children():
#print(widget)
# widget.hide()
####################
#self.log_watcher = QtCore.QFileSystemWatcher([self.model.log_file])
#self.log_watcher.fileChanged.connect(self.update_log)
#connect to all the main function button to run the process in the background
#cmd is the command need to be excuted, and btn pass the button object
def start_process(self, cmd, btn):
if self.mw.p is None: # No process running.
self.mw.p = QProcess()
#change the status of the current botton
if btn.text() in ["Deconvolve","Generate Mask","Extract","Refine","Predict"]:
self.model.btn_pressed_text = btn.text()
btn.setText("Stop")
btn.setStyleSheet('QPushButton {color: red;}')
else:
btn.setEnabled(False)
self.mw.p.readyReadStandardOutput.connect(self.dataReady)
self.mw.p.finished.connect(lambda: self.process_finished(btn))
self.mw.p.start(cmd)
elif btn.text() =="Stop":
if self.mw.p:
self.mw.p.kill()
else:
if self.model.btn_pressed_text:
btn.setText(self.model.btn_pressed_text)
else:
self.warn_window("Already runing another job, please wait until it finished!")
def process_finished(self, btn):
if btn.text() == "Stop":
if self.model.btn_pressed_text:
btn.setText(self.model.btn_pressed_text)
#btn.setText("Refine")
self.model.btn_pressed_text = None
btn.setStyleSheet("QPushButton {color: black;}")
else:
btn.setEnabled(True)
self.model.read_star()
setTableWidget(self.tableWidget, self.model.md)
self.mw.p = None
#link to log window to display output of stdout
def dataReady(self):
cursor = self.textBrowser_log.textCursor()
#cursor.movePosition(cursor.End)
# have transfer byte string to unicode string
import string
printable = set(string.printable)
printable.add(u'\u2588')
txt = str(self.mw.p.readAll(),'utf-8')
#txt += self.mw.p.errorString()
printable_txt = "".join(list(filter(lambda x: x in printable, txt)))
if '[' in self.previous_log_line and '[' in printable_txt:
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
cursor.movePosition(cursor.End, cursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
cursor.insertText(printable_txt)
f = open(self.model.log_file, 'a+')
f.write(printable_txt)
f.close()
self.previous_log_line = printable_txt
#self.textBrowser_log.ensureCursorVisible()
verScrollBar = self.textBrowser_log.verticalScrollBar()
scrollIsAtEnd = verScrollBar.maximum() - verScrollBar.value()
if scrollIsAtEnd <=100:
verScrollBar.setValue(verScrollBar.maximum()) # Scrolls to the bottom
#self.textBrowser_log.moveCursor(QtGui.QTextCursor.End)
def removeRow(self):
#print(self.tableWidget.selectionModel().selectedIndexes()[0].row())
#print(self.tableWidget.selectionModel().selectedIndexes()[0].column())
indices = self.tableWidget.selectionModel().selectedRows()
if indices:
for index in sorted(indices,reverse=True):
self.tableWidget.removeRow(index.row())
self.updateMD()
def copyRow(self):
rowCount = self.tableWidget.rowCount()
columnCount = self.tableWidget.columnCount()
if rowCount <=0 :
self.tableWidget.insertRow(self.tableWidget.rowCount())
for j in range(columnCount):
#self.model.md._setItemValue(it,Label(self.model.header[j+1]),self.tableWidget.item(i, j).text())
#print(self.default_value(self.model.header[j+1]))
self.tableWidget.setItem(0, j, QTableWidgetItem(self.default_value(self.model.header[j+1])))
#print(self.tableWidget.item(0, j).text())
else:
indices = self.tableWidget.selectionModel().selectedRows()
if indices:
for index in sorted(indices):
self.tableWidget.insertRow(self.tableWidget.rowCount())
rowCount = self.tableWidget.rowCount()
for j in range(columnCount):
if self.model.header[j+1] in ["rlnDeconvTomoName","rlnMaskName","rlnCorrectedTomoName","rlnMaskBoundary"]:
self.tableWidget.setItem(rowCount-1, j, QTableWidgetItem("None"))
#self.tableWidget.cellChanged[rowCount-1, j].connect(self.updateMD)
else:
self.tableWidget.setItem(rowCount-1, j, QTableWidgetItem(self.tableWidget.item(index.row(), j).text()))
else:
self.tableWidget.insertRow(self.tableWidget.rowCount())
rowCount = self.tableWidget.rowCount()
for j in range(columnCount):
if self.model.header[j+1] in ["rlnDeconvTomoName","rlnMaskName","rlnCorrectedTomoName","rlnMaskBoundary"]:
self.tableWidget.setItem(rowCount-1, j, QTableWidgetItem("None"))
elif not self.tableWidget.item(rowCount-2, j) is None:
self.tableWidget.setItem(rowCount-1, j, QTableWidgetItem(self.tableWidget.item(rowCount-2, j).text()))
self.updateMD()
def default_value(self, label):
switcher = {
"rlnMicrographName": "None",
"rlnPixelSize": "1",
"rlnDefocus": "0",
"rlnNumberSubtomo":"100",
"rlnSnrFalloff":"1",
"rlnDeconvStrength": "1",
"rlnDeconvTomoName":"None",
"rlnMaskBoundary":"None",
"rlnMaskDensityPercentage": "50",
"rlnMaskStdPercentage": "50",
"rlnMaskName": "None"
}
return switcher.get(label, "None")
def switch_btn(self, btn):
switcher = {
"mask_dir": self.lineEdit_mask_dir,
"deconv_dir": self.lineEdit_deconv_dir,
"subtomo_dir": self.lineEdit_subtomo_dir,
"result_dir_refine": self.lineEdit_result_dir_refine,
"result_dir_predict": self.lineEdit_result_dir_predict,
"subtomo_star_refine":self.lineEdit_subtomo_star_refine,
"pretrain_model_refine":self.lineEdit_pretrain_model_refine,
"tomo_star_predict": self.lineEdit_tomo_star_predict,
"pretrain_model_predict":self.lineEdit_pretrain_model_predict,
"continue_from": self.lineEdit_continue_iter
}
return switcher.get(btn, "Invaid btn name")
def file_types(self, item):
switcher = {
"rlnMicrographName":"mrc or rec file (*.mrc *.rec) ;; All Files (*)",
"rlnDeconvTomoName":"mrc or rec file (*.mrc *.rec) ;; All Files (*)",
"rlnMaskName":"mrc or rec file (*.mrc *.rec) ;; All Files (*)",
"rlnMaskBoundary": "mod file (*.mod) ;; All Files (*)"
}
return switcher.get(item, "Invaid file types")
def get_toolTip(self,label):
switcher = {
"rlnMicrographName": "Your tomogram filenames",
"rlnPixelSize": "pixel size of your input tomograms",
"rlnDefocus": "estimated defocus value around 0 degree",
"rlnNumberSubtomo":"number of subtomograms to be extraced",
"rlnSnrFalloff":"SNR fall rate with the frequency",
"rlnDeconvStrength": "(1.0) Strength of the deconvolution",
"rlnDeconvTomoName":"automaticly saved deconved tomogram filename",
"rlnMaskBoundary":"model file that define your mask boundary(optional)",
"rlnMaskDensityPercentage": "The approximate percentage of pixels to keep based on their local pixel density",
"rlnMaskStdPercentage": "The approximate percentage of pixels to keep based on their local standard deviation",
"rlnMaskName": "automaticly saved mask tomogram filename"
}
return switcher.get(label, "None")
def updateMD ( self ):
star_file = self.model.tomogram_star
rowCount = self.tableWidget.rowCount()
columnCount = self.tableWidget.columnCount()
data = self.model.md._data
self.model.md = MetaData()
self.model.md.addLabels('rlnIndex')
for j in range(columnCount):
self.model.md.addLabels(self.model.header[j+1])
#self.model.md.addLabels(self.tableWidget.horizontalHeaderItem(j).text())
for i in range(rowCount):
#TODO check the folder contains only tomograms.
it = Item()
self.model.md.addItem(it)
self.model.md._setItemValue(it,Label('rlnIndex'),str(i+1))
for j in range(columnCount):
try:
#print("update:",Label(self.model.header[j+1]),self.tableWidget.item(i, j).text())
if len(self.tableWidget.item(i, j).text()) <1:
if self.model.header[j+1] != "rlnMaskBoundary":
previous_value = getattr(data[i],self.model.header[j+1])
else:
previous_value = "None"
self.model.md._setItemValue(it,Label(self.model.header[j+1]),previous_value)
self.tableWidget.setItem(i, j, QTableWidgetItem(str(previous_value)))
else:
self.model.md._setItemValue(it,Label(self.model.header[j+1]),self.tableWidget.item(i, j).text())
#self.model.md._setItemValue(it,Label(self.tableWidget.horizontalHeaderItem(j).text()),self.tableWidget.item(i, j).text())
except:
previous_value = getattr(data[i],self.model.header[j+1])
self.model.md._setItemValue(it,Label(self.model.header[j+1]),previous_value)
self.tableWidget.setItem(i, j, QTableWidgetItem(str(previous_value)))
#print("error in seeting values for {}! set it to previous value automatically.".format(self.tableWidget.horizontalHeaderItem(j).text()))
self.model.md.write(star_file)
def updateMDItem ( self, i, j ):
try:
current_value = self.tableWidget.item(i, j).text()
#self.model.md._setItemValue(self.mnodel.md._data[i],Label(self.model.header[j+1]),current_value)
#for row,it in enumerate(self.model.md):
# print(i,j)
# if row == i:
# self.model.md._setItemValue(it,Label(self.tableWidget.horizontalHeaderItem(j).text()),self.tableWidget.item(i, j).text())
self.updateMD()
except:
pass
def browseSlot( self , btn ):
''' Called when the user presses the Browse button
'''
lineEdit = self.switch_btn(btn)
pwd = os.getcwd().replace("\\","/")
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
flt = "All Files (*)"
if btn == "continue_from":
flt = "json file (*.json);;All Files (*)"
if btn == "subtomo_star_refine" or btn == "tomo_star_predict":
flt = "star file (*.star);;All Files (*)"
if btn == "pretrain_model_refine" or btn == "pretrain_model_predict":
flt = "model file (*.h5);;All Files (*)"
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"Choose File",
"",
flt,
options=options)
if fileName:
#self.model.setFileName( fileName )
#######
#cmd = "echo choose file: {} >> log.txt ".format(fileName)
#os.system(cmd)
#self.logWindow.append("choose file: {}".format(fileName) )
simple_name = self.model.sim_path(pwd,fileName)
lineEdit.setText( simple_name )
#self.logWindow.moveCursor(QtGui.QTextCursor.End)
#######
#self.refreshAll()
#self.debugPrint( "Browse button pressed" )
def browseFolderSlot( self , btn):
'''
Called when the user presses the Browse folder button
TODO: add file name filter
'''
lineEdit = self.switch_btn(btn)
try:
pwd = os.getcwd().replace("\\","/")
dir_path=QtWidgets.QFileDialog.getExistingDirectory(None,"Choose Directory",pwd)
#self.model.setFolderName( dir_path )
#cmd = "echo choose folder: {} >> log.txt ".format(dir_path)
#os.system(cmd)
#self.logWindow.append("choose folder: {}".format(dir_path) )
#pwd = os.getcwd().replace("\\","/")
simple_path = self.model.sim_path(pwd,dir_path)
lineEdit.setText( simple_path )
#self.logWindow.moveCursor(QtGui.QTextCursor.End)
#self.refreshAll()
except:
##TODO: record to log.
pass
def browseSlotTable( self , i, j):
''' Called when the user presses the Browse folder button
'''
if self.model.header[j+1] in ["rlnMicrographName", "rlnMaskBoundary","rlnDeconvTomoName","rlnMaskName"]:
try:
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"Choose File",
"",
self.file_types(self.model.header[j+1]),
options=options)
if not fileName:
fileName = self.tableWidget.item(i, j).text()
pwd = os.getcwd().replace("\\","/")
simple_path = self.model.sim_path(pwd,fileName)
self.tableWidget.setItem(i, j, QTableWidgetItem(simple_path))
except:
##TODO: record to log.
pass
else:
pass
def deconvolve( self ):
tomogram_star = self.model.tomogram_star
cmd = "isonet.py deconv {} ".format(tomogram_star)
if self.lineEdit_deconv_dir.text():
cmd = "{} --deconv_folder {}".format(cmd, self.lineEdit_deconv_dir.text())
if self.lineEdit_tomo_index_deconv.text():
cmd = "{} --tomo_idx {}".format(cmd, self.lineEdit_tomo_index_deconv.text())
if self.lineEdit_ncpu.text():
cmd = "{} --ncpu {}".format(cmd, self.lineEdit_ncpu.text())
if self.lineEdit_highpassnyquist.text():
cmd = "{} --highpassnyquist {}".format(cmd, self.lineEdit_highpassnyquist.text())
if self.lineEdit_chunk_size.text():
cmd = "{} --chunk_size {}".format(cmd, self.lineEdit_chunk_size.text())
if self.lineEdit_overlap.text():
cmd = "{} --overlap {}".format(cmd, self.lineEdit_overlap.text())
self.save_setting()
if self.checkBox_only_print_command_prepare.isChecked() and self.pushButton_deconv.text() == 'Deconvolve':
print(cmd)
#logging.info(cmd)
else:
self.start_process(cmd,self.pushButton_deconv)
def make_mask( self ):
#print("#####making mask############")
tomogram_star = self.model.tomogram_star
cmd = "isonet.py make_mask {} ".format(tomogram_star)
if self.lineEdit_mask_dir.text():
cmd = "{} --mask_folder {}".format(cmd, self.lineEdit_mask_dir.text())
if self.lineEdit_patch_size.text():
cmd = "{} --patch_size {}".format(cmd, self.lineEdit_patch_size.text())
if not self.checkBox_use_deconv_mask.isChecked():
cmd = "{} --use_deconv_tomo {}".format(cmd, False)
if self.lineEdit_tomo_index_mask.text():
cmd = "{} --tomo_idx {}".format(cmd, self.lineEdit_tomo_index_mask.text())
if self.lineEdit_z_crop.text():
cmd = "{} --z_crop {}".format(cmd, self.lineEdit_z_crop.text())
self.save_setting()
if self.checkBox_only_print_command_prepare.isChecked() and self.pushButton_generate_mask.text() == 'Generate Mask':
print(cmd)
else:
self.start_process(cmd,self.pushButton_generate_mask)
def extract_subtomo( self ):
tomogram_star = self.model.tomogram_star
cmd = "isonet.py extract {} ".format(tomogram_star)
if self.lineEdit_subtomo_dir.text():
cmd = "{} --subtomo_folder {}".format(cmd, self.lineEdit_subtomo_dir.text())
if self.lineEdit_subtomo_star_extract.text():
cmd = "{} --subtomo_star {}".format(cmd, self.lineEdit_subtomo_star_extract.text())
if self.lineEdit_cube_size_extract.text():
cmd = "{} --cube_size {}".format(cmd, self.lineEdit_cube_size_extract.text())
if not self.checkBox_use_deconv_extract.isChecked():
cmd = "{} --use_deconv_tomo {}".format(cmd, False)
if self.lineEdit_tomo_index_extract.text():
cmd = "{} --tomo_idx {}".format(cmd, self.lineEdit_tomo_index_extract.text())
self.save_setting()
if self.checkBox_only_print_command_prepare.isChecked() and self.pushButton_extract.text() == 'Extract':
print(cmd)
else:
self.start_process(cmd,self.pushButton_extract)
def refine( self ):
subtomo_star = self.lineEdit_subtomo_star_refine.text() if self.lineEdit_subtomo_star_refine.text() else "subtomo.star"
cmd = "isonet.py refine {} ".format(subtomo_star)
if self.lineEdit_gpuID_refine.text():
cmd = "{} --gpuID {}".format(cmd, self.lineEdit_gpuID_refine.text())
if self.lineEdit_pretrain_model_refine.text():
cmd = "{} --pretrained_model {}".format(cmd, self.lineEdit_pretrain_model_refine.text())
if self.lineEdit_continue_iter.text():
cmd = "{} --continue_from {}".format(cmd, self.lineEdit_continue_iter.text())
if self.lineEdit_result_dir_refine.text():
cmd = "{} --result_dir {}".format(cmd, self.lineEdit_result_dir_refine.text())
if self.lineEdit_preprocessing_ncpus.text():
cmd = "{} --preprocessing_ncpus {}".format(cmd, self.lineEdit_preprocessing_ncpus.text())
if self.lineEdit_iteration.text():
cmd = "{} --iterations {}".format(cmd, self.lineEdit_iteration.text())
if self.lineEdit_batch_size.text():
cmd = "{} --batch_size {}".format(cmd, self.lineEdit_batch_size.text())
if self.lineEdit_epoch.text():
cmd = "{} --epochs {}".format(cmd, self.lineEdit_epoch.text())
if self.lineEdit_steps_per_epoch.text():
cmd = "{} --steps_per_epoch {}".format(cmd, self.lineEdit_steps_per_epoch.text())
if self.lineEdit_lr.text():
cmd = "{} --learning_rate {}".format(cmd, self.lineEdit_lr.text())
if self.lineEdit_noise_level.text():
cmd = "{} --noise_level {}".format(cmd, self.lineEdit_noise_level.text())
if self.lineEdit_noise_start_iter.text():
cmd = "{} --noise_start_iter {}".format(cmd, self.lineEdit_noise_start_iter.text())
if not self.comboBox_noise_mode.currentText() == "noFilter":
cmd = "{} --noise_mode {}".format(cmd, self.comboBox_noise_mode.currentText())
if self.lineEdit_drop_out.text():
cmd = "{} --drop_out {}".format(cmd, self.lineEdit_drop_out.text())
if self.lineEdit_network_depth.text():
cmd = "{} --unet_depth {}".format(cmd, self.lineEdit_network_depth.text())
if self.lineEdit_convs_per_depth.text():
cmd = "{} --convs_per_depth {}".format(cmd, self.lineEdit_convs_per_depth.text())
if self.lineEdit_kernel.text():
cmd = "{} --kernel {}".format(cmd, self.lineEdit_kernel.text())
if self.lineEdit_filter_base.text():
cmd = "{} --filter_base {}".format(cmd, self.lineEdit_filter_base.text())
if self.checkBox_pool.isChecked():
cmd = "{} --pool {}".format(cmd, True)
if not self.checkBox_batch_normalization.isChecked():
cmd = "{} --batch_normalization {}".format(cmd, False)
if not self.checkBox_normalization_percentile.isChecked():
cmd = "{} --normalize_percentile {}".format(cmd, False)
self.save_setting()
if self.checkBox_only_print_command_refine.isChecked() and self.pushButton_refine.text() == 'Refine':
print(cmd)
else:
self.start_process(cmd,self.pushButton_refine)
def predict( self ):
tomo_star = self.lineEdit_tomo_star_predict.text() if self.lineEdit_tomo_star_predict.text() else "tomograms.star"
gpuID = self.lineEdit_gpuID_predict.text() if self.lineEdit_gpuID_predict.text() else '0,1,2,3'
cmd = "isonet.py predict {}".format(tomo_star)
if self.lineEdit_pretrain_model_predict.text() and self.model.isValid(self.lineEdit_pretrain_model_predict.text()):
cmd = "{} {}".format(cmd, self.lineEdit_pretrain_model_predict.text())
else:
self.warn_window("no trained model detected")
return
# if self.lineEdit_gpuID_predict.text():
# cmd = "{} --gpuID {}".format(cmd, self.lineEdit_gpuID_predict.text())
cmd = "{} --gpuID {}".format(cmd,gpuID)
if self.lineEdit_tomo_index_predict.text():
cmd = "{} --tomo_idx {}".format(cmd, self.lineEdit_tomo_index_predict.text())
if self.lineEdit_result_dir_predict.text():
cmd = "{} --output_dir {}".format(cmd, self.lineEdit_result_dir_predict.text())
if self.lineEdit_cube_size_predict.text():
cmd = "{} --cube_size {}".format(cmd, self.lineEdit_cube_size_predict.text())
if self.lineEdit_crop_size_predict.text():
cmd = "{} --crop_size {}".format(cmd, self.lineEdit_crop_size_predict.text())
if not self.checkBox_use_deconv_predict.isChecked():
cmd = "{} --use_deconv_tomo {}".format(cmd, False)
self.save_setting()
if self.checkBox_only_print_command_predict.isChecked() and self.pushButton_predict.text() == "Predict":
print(cmd)
else:
self.start_process(cmd,self.pushButton_predict)
def view_3dmod(self):
slected_items = self.tableWidget.selectedItems()
if len(slected_items) > 0:
cmd = "3dmod"
model_file=""
previous_i = -1
for item in slected_items:
i = item.row()
j = item.column()
if previous_i != -1 and i != previous_i:
cmd = "{} {} {}".format(cmd,model_file,"; 3dmod")
model_file=""
item_text = self.tableWidget.item(i, j).text()
if item_text[-4:] == '.mrc' or item_text[-4:] == '.rec':
cmd = "{} {}".format(cmd,item_text)
if self.model.header[j+1]=="rlnMaskBoundary" and item_text != "None":
model_file = "{}".format(item_text)
previous_i = i
cmd = "{} {}".format(cmd,model_file)
#print(cmd)
if cmd != "3dmod":
os.system(cmd)
else:
self.warn_window("selected items are not mrc or rec file(s)")
def view_predict_3dmod(self):
try:
result_dir_predict = self.lineEdit_result_dir_predict.text()
if len(result_dir_predict) < 1:
result_dir_predict = 'corrected_tomos'
list_file = os.listdir(result_dir_predict)
cmd = "3dmod"
for f in list_file:
if f[-4:] == ".mrc" or f[-4:] == ".rec":
cmd = "{} {}/{}".format(cmd,result_dir_predict,f)
if cmd != "3dmod":
os.system(cmd)
else:
self.warn_window("no mrc or rec file(s) detected in results folder: {}!".format(result_dir_predict))
except Exception:
print('pass')
def open_star( self ):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"Choose File",
"",
"Star file (*.star)",
options=options)
if fileName:
try:
tomo_file = self.model.sim_path(self.model.pwd, fileName)
read_result = self.model.read_star_gui(tomo_file)
if read_result == 1:
self.warn_window("The input star file is not legid!")
else:
setTableWidget(self.tableWidget, self.model.md)
except:
print("warning")
pass
def read_setting(self):
if os.path.exists(self.setting_file):
data = {}
try:
with open(self.setting_file) as f:
for line in f:
(k, v) = line.split(":")
data[k] = v.strip()
self.lineEdit_deconv_dir.setText(data['deconv_dir'])
self.lineEdit_tomo_index_deconv.setText(data['tomo_index_deconv'])
self.lineEdit_preprocessing_ncpus.setText(data['preprocessing_ncpus'])
self.lineEdit_chunk_size.setText(data['chunk_size'])
self.lineEdit_highpassnyquist.setText(data['highpassnyquist'])
self.lineEdit_overlap.setText(data['overlap'])
self.lineEdit_mask_dir.setText(data['mask_dir'])
self.lineEdit_tomo_index_mask.setText(data['tomo_index_mask'])
self.checkBox_use_deconv_mask.setChecked(data['use_deconv_mask'] == 'True')
#self.checkBox_use_deconv_mask.setChecked(data['use_deconv_mask'])
self.lineEdit_patch_size.setText(data['patch_size'])
self.lineEdit_z_crop.setText(data['z_crop'])
self.lineEdit_subtomo_dir.setText(data['subtomo_dir'])
self.lineEdit_subtomo_star_extract.setText(data['subtomo_star_extract'])
self.checkBox_use_deconv_extract.setChecked(data['use_deconv_extract'] == 'True')
self.lineEdit_cube_size_extract.setText(data['cube_size_extract'])
self.lineEdit_tomo_index_extract.setText(data['tomo_index_extract'])
self.lineEdit_subtomo_star_refine.setText(data['subtomo_star_refine'])
self.lineEdit_gpuID_refine.setText(data['gpuID_refine'])
self.lineEdit_pretrain_model_refine.setText(data['pretrain_model_refine'])
self.lineEdit_continue_iter.setText(data['continue_iter'])
self.lineEdit_result_dir_refine.setText(data['result_dir_refine'])
self.lineEdit_ncpu.setText(data['ncpu'])
self.lineEdit_epoch.setText(data['epoch'])
self.lineEdit_iteration.setText(data['iteration'])
self.lineEdit_lr.setText(data['lr'])
self.lineEdit_steps_per_epoch.setText(data['steps_per_epoch'])
self.lineEdit_batch_size.setText(data['batch_size'])
self.lineEdit_noise_level.setText(data['noise_level'])
self.lineEdit_noise_start_iter.setText(data['noise_start_iter'])
self.comboBox_noise_mode.setCurrentText(data['noise_mode'])
self.lineEdit_drop_out.setText(data['drop_out'])
self.lineEdit_network_depth.setText(data['network_depth'])
self.lineEdit_convs_per_depth.setText(data['convs_per_depth'])
self.lineEdit_kernel.setText(data['kernel'])
self.lineEdit_filter_base.setText(data['filter_base'])
self.checkBox_pool.setChecked(data['pool'] == 'True')
self.checkBox_batch_normalization.setChecked(data['batch_normalization'] == 'True')
self.checkBox_normalization_percentile.setChecked(data['normalization_percentile'] == 'True')
self.lineEdit_tomo_star_predict.setText(data['tomo_star_predict'])
self.lineEdit_gpuID_predict.setText(data['gpuID_predict'])
self.lineEdit_tomo_index_predict.setText(data['tomo_index_predict'])
self.lineEdit_pretrain_model_predict.setText(data['pretrain_model_predict'])
self.lineEdit_cube_size_predict.setText(data['cube_size_predict'])
self.lineEdit_result_dir_predict.setText(data['result_dir_predict'])
self.lineEdit_crop_size_predict.setText(data['crop_size_predict'])
self.checkBox_use_deconv_predict.setChecked(data['use_deconv_predict'] == 'True')
except:
print("error reading {}!".format(self.setting_file))
def save_setting(self):
param = {}
param['deconv_dir'] = self.lineEdit_deconv_dir.text()
param['tomo_index_deconv'] = self.lineEdit_tomo_index_deconv.text()
param['preprocessing_ncpus'] = self.lineEdit_preprocessing_ncpus.text()
param['chunk_size'] = self.lineEdit_chunk_size.text()
param['highpassnyquist'] = self.lineEdit_highpassnyquist.text()
param['overlap'] = self.lineEdit_overlap.text()
param['mask_dir'] = self.lineEdit_mask_dir.text()
param['tomo_index_mask'] = self.lineEdit_tomo_index_mask.text()
param['use_deconv_mask'] = self.checkBox_use_deconv_mask.isChecked()
param['patch_size'] = self.lineEdit_patch_size.text()
param['z_crop'] = self.lineEdit_z_crop.text()
param['subtomo_dir'] = self.lineEdit_subtomo_dir.text()
param['subtomo_star_extract'] = self.lineEdit_subtomo_star_extract.text()
param['use_deconv_extract'] = self.checkBox_use_deconv_extract.isChecked()
param['cube_size_extract'] = self.lineEdit_cube_size_extract.text()
param['tomo_index_extract'] = self.lineEdit_tomo_index_extract.text()
param['subtomo_star_refine'] = self.lineEdit_subtomo_star_refine.text()
param['gpuID_refine'] = self.lineEdit_gpuID_refine.text()
param['pretrain_model_refine'] = self.lineEdit_pretrain_model_refine.text()
param['continue_iter'] = self.lineEdit_continue_iter.text()
param['result_dir_refine'] = self.lineEdit_result_dir_refine.text()
param['ncpu'] = self.lineEdit_ncpu.text()
param['epoch'] = self.lineEdit_epoch.text()
param['iteration'] = self.lineEdit_iteration.text()
param['lr'] = self.lineEdit_lr.text()
param['steps_per_epoch'] = self.lineEdit_steps_per_epoch.text()
param['batch_size'] = self.lineEdit_batch_size.text()
param['noise_level'] = self.lineEdit_noise_level.text()
param['noise_start_iter'] = self.lineEdit_noise_start_iter.text()
param['noise_mode'] = self.comboBox_noise_mode.currentText()
param['drop_out'] = self.lineEdit_drop_out.text()
param['network_depth'] = self.lineEdit_network_depth.text()
param['convs_per_depth'] = self.lineEdit_convs_per_depth.text()
param['kernel'] = self.lineEdit_kernel.text()
param['filter_base'] = self.lineEdit_filter_base.text()
param['pool'] = self.checkBox_pool.isChecked()
param['batch_normalization'] = self.checkBox_batch_normalization.isChecked()
param['normalization_percentile'] = self.checkBox_normalization_percentile.isChecked()
param['tomo_star_predict'] = self.lineEdit_tomo_star_predict.text()
param['gpuID_predict'] = self.lineEdit_gpuID_predict.text()
param['tomo_index_predict'] = self.lineEdit_tomo_index_predict.text()
param['pretrain_model_predict'] = self.lineEdit_pretrain_model_predict.text()
param['cube_size_predict'] = self.lineEdit_cube_size_predict.text()
param['result_dir_predict'] = self.lineEdit_result_dir_predict.text()
param['crop_size_predict'] = self.lineEdit_crop_size_predict.text()
param['use_deconv_predict'] = self.checkBox_use_deconv_predict.isChecked()
try:
with open(self.setting_file, 'w') as f:
for key, value in param.items():
f.write("{}:{}\n".format(key,value))
except:
print("error writing {}!".format(self.setting_file))
def openGithub(self):
import webbrowser
webbrowser.open(self.model.github_addr)
def warn_window(self,text):
msg = QMessageBox()
msg.setWindowTitle("Warning!")
msg.setText(text)
msg.setStandardButtons(QMessageBox.Ok)
msg.setIcon(QMessageBox.Warning)
msg.exec_()
class MyWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.p = None
def closeEvent(self, event):
if self.p:
result = QtWidgets.QMessageBox.question(self,
"Confirm Exit...",
"Do you want to continue the existing job in the background?",
QtWidgets.QMessageBox.Yes| QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
event.ignore()
if result == QtWidgets.QMessageBox.Yes:
event.accept()
if result == QtWidgets.QMessageBox.No:
self.p.kill()
event.accept()
#kill the old process
else:
result = QtWidgets.QMessageBox.question(self,
"Confirm Exit...",
"Do you want to exit? ",
QtWidgets.QMessageBox.Yes| QtWidgets.QMessageBox.No )
event.ignore()
if result == QtWidgets.QMessageBox.Yes:
event.accept()
if result == QtWidgets.QMessageBox.No:
pass
#kill the old process
stylesheet = """
QWidget #tab, #tab_2, #tab_3{
background-color: rgb(253,247,226)
}
QTabWidget{
background: rgb(144,160,187)
}
QPushButton {
background: rgb(239,221,241)
}
"""
def main():
"""
This is the MAIN ENTRY POINT of our application. The code at the end
of the mainwindow.py script will not be executed, since this script is now
our main program. We have simply copied the code from mainwindow.py here
since it was automatically generated by '''pyuic5'''.
"""
app = QtWidgets.QApplication(sys.argv)
app.setStyleSheet(stylesheet)
MainWindow = MyWindow()
#MainWindow = QtWidgets.QMainWindow()
ui = MainWindowUIClass()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
main()
| IsoNet-cryoET/IsoNet | gui/Isonet_star_app.py | Isonet_star_app.py | py | 43,739 | python | en | code | 49 | github-code | 36 | [
{
"api_name": "IsoNet.gui.isonet_gui.Ui_MainWindow",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "IsoNet.gui.model_star.Model",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "IsoNet.gui.model_star.setTableWidget",
"line_number": 44,
"usage_type": "... |
34287760383 |
import random
from xml.dom.minidom import parseString
file=open('/home/med/Desktop/bioInfo.xml', 'r')
data= file.read()
dom=parseString(data)
f = open('/home/med/Desktop/seedpopulation.txt', "w")
PS=dom.getElementsByTagName('problemSize')[0].toxml()
PopS=dom.getElementsByTagName('populationSize')[0].toxml()
ProblemSize =PS.replace('<problemSize>','').replace('</problemSize>','')
PopulationSize=PopS.replace('<populationSize>','').replace('</populationSize>','')
psint=int(ProblemSize)+1
popsint=int(PopulationSize)+1
for i in range(1,popsint):
for l in range(1, psint):
x=random.randrange(0,2)
y=str(x)
f.write(y)
f.write('\n')
| dogatuncay/GA_Twister_Hadoop | docs/seedpopulation.py | seedpopulation.py | py | 671 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 23,
"usage_type": "call"
}
] |
44034009675 | import sys
from collections import deque
n, k = map(int, sys.stdin.readline().split())
m = 100001
visited = [-1] * m
check = [0] * m
q = deque()
visited[n] = 0
q.append(n)
def path(x):
move = []
temp = x
for _ in range(visited[x] + 1):
move.append(temp)
temp = check[temp]
print(*move[::-1])
while q:
x = q.popleft()
if x == k:
print(visited[x])
path(x)
break
else:
for i in [2*x, x-1, x+1]:
if (0 <= i <= (m-1)) and visited[i] == -1:
visited[i] = visited[x] + 1
q.append(i)
check[i] = x # 이동 경로 저장
| GluteusStrength/Algorithm | 백준/Gold/13913. 숨바꼭질 4/숨바꼭질 4.py | 숨바꼭질 4.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
40056650925 | from decouple import config
import os
class HTML_file:
def __init__(self, group_name: str, measure: str) -> None:
self.group_name = group_name
self.measure = measure
self.png_dir = os.path.join(config('root'), 'work/visual_graphs')
def save_directory(self) -> str:
return os.path.join(config('root'), 'results/assumptions')
def html_markup(self) -> str:
html_head_css = """
<!DOCTYPE html>
<html>
<head>
<style type="text/css" media="screen">
body{background-color: azure; font-family: "Arial", Arial, Sans-serif;}
</style>
"""
html_body = f"""
<title>Assumption graphs for {self.group_name}</title>
</head>
<body>
<h1>Distro plots for average clustering, average shortest path length, assortativity, modularity and efficieny</h1>
<centre>
<img src="{self.png_dir}/distro_plots_for_{self.group_name}_for_{self.measure}.png">
</centre>
<h1>Network measure plots</h1>
<center>
<img src="{self.png_dir}/network_measures_plot_for_{self.group_name}_for_{self.measure}.png">
</center>
</body>
</html>
"""
return html_head_css + html_body
def save_to_file(self) -> None:
directory = self.save_directory() + f'/{self.group_name}_assumptions_for_{self.measure}.html'
html = self.html_markup()
with open(directory, 'w') as file:
file.write(html)
class Group_differences_HTML_file:
def __init__(self, groups: dict, measure: str) -> None:
self.png_dir = os.path.join(config('root'), 'work/visual_graphs')
self.groups = [key for key in groups]
self.measure = measure
def save_directory(self) -> str:
return os.path.join(config('root'), 'results/group_differences')
def img_src(self):
img_src = f"""
<h2> Cluster plot for {self.groups[0]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[0]}_for_{self.measure}.png">
<h2> Cluster plot for {self.groups[1]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[1]}_for_{self.measure}.png">
"""
if len(self.groups) == 3:
img_src = img_src + f"""
<h2> Cluster plot for {self.groups[2]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[2]}_for_{self.measure}.png">
"""
return img_src
def html_markup(self) -> str:
img = self.img_src()
html_head_css = """
<!DOCTYPE html>
<html>
<head>
<style type="text/css" media="screen">
body{background-color: azure; font-family: "Arial", Arial, Sans-serif;}
</style>
"""
html_body = f"""
<title>Group difference graphs</title>
</head>
<body>
<h1>Global measure plots for each group</h1>
<centre>
<img src = "{self.png_dir}/global_measure_plots_for_{self.measure}.png"
</centre>
<h1>Cluster plots</h1>
<centre>
{img}
</centre>
</body>
</html>
"""
return html_head_css + html_body
def save_to_file(self) -> None:
directory = self.save_directory() + f'/group_differences_for_{self.measure}.html'
html = self.html_markup()
with open(directory, 'w') as file:
file.write(html)
| WMDA/SCN | SCN/visualization/create_html_view.py | create_html_view.py | py | 3,593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "decouple.config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
73952795302 | from scipy.special import comb
"""
This file contains a set of functions to practice your
probabilities skills.
It needs to be completed with "vanilla" Python, without
help from any library -- except for the bin_dist function.
"""
def head_tails(p, n):
"""
Given a coin that have probability p of giving a heads
in each toss independently, what is the probability of
having n heads consecutively in a row?
:param p: probability of a head
:param n: number of heads in a row (int)
:return: probability of having n heads in a row
:rtype: float
"""
return p**n
head_tails(0.5,1)
def bin_dist(n, p, x):
"""
Given n number of trials, p the probability of success,
what is the probability of having x successes?
Your function should raise a ValueError if x is higher
than n.
If you need to compute combinations, you can import the
function "comb" from the package "scipy.special"
:param n: number of trials (int)
:param p: probability of success
:param x: number of successes (int)
:return: probability of having x successes
:rtype: float
:raise ValueError: if x > n
"""
if x > n:
# raise ValueError('value error')
return ValueError
return comb(n, x, exact=True) * (p ** x) * ((1 - p) ** (n - x))
bin_dist(10, .5, 6)
bin_dist(3, .7, 4)
def fact(x):
"""
Return the factorial of x.
Your function should raise a ValueError
if x is negative
:param x: a number (int)
:return: the factorial of x
:rtype: float
:raise ValueError:
"""
if x < 0:
raise ValueError('x is negative')
lfact = 1
for i in range(1, x+1):
lfact = lfact*i
return float(lfact)
def bin_cdf(n, p, x):
"""
Given n number of trials, p the probability of successes,
what is the probability of having less than or equal to x successes?
Your function should raise a ValueError if x is higher
than n.
:param n: number of trials (int)
:param p: probability of success
:param x: number of successes (int)
:return: probability of having less than or
equal to x successes
:rtype: float
:raise ValueError: if x > n
"""
if x > n:
return ValueError
if p == 0:
return 0
q = 1 - p
l_outcomes = (fact(n)/(fact(x)*fact(n-x)))
l_probability = (p**n)
return l_outcomes/l_probability
bin_cdf(3, 1, 1)
bin_cdf(3, 0 ,1)
bin_cdf(3, 0.7, 2)
bin_cdf(3, 0.7, 4)
bin_cdf(4, 0.2, 3)
bin_cdf(4, 0.4, 2)
bin_cdf(4, 0.8, 3)
bin_cdf(5, 0.2, 2)
bin_cdf(5, 0.2, 3)
bin_cdf(5, 0.4, 2)
bin_cdf(5, 0.4, 3)
bin_cdf(5, 0.8, 3)
bin_cdf(5, 0.2, 2)
bin_cdf(6, 0.2, 3)
bin_cdf(6, 0.4, 2)
bin_cdf(6, 0.4, 3)
bin_cdf(6, 0.8, 3)
| ashokpanigrahi88/ashokpython | Exercises/Pre-Maths/probabilities.py | probabilities.py | py | 2,880 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.special.comb",
"line_number": 52,
"usage_type": "call"
}
] |
74647027303 | import logging
from igraph import Graph as iGraph
from parvusdb import GraphDatabase
from parvusdb.utils.code_container import DummyCodeContainer
from parvusdb.utils.match import Match, MatchException
from .node_matcher import VectorNodeMatcher
_logger = logging.getLogger()
class GraphMatcher:
def __init__(self, small_graph, metric, relations_metric):
self.small_graph = small_graph
self.metric = metric
self.relations_metric = relations_metric
def apply(self, g):
if not isinstance(g, iGraph):
raise TypeError("GraphRule.apply_to_graph() needs an igraph.Graph as an argument")
db = GraphDatabase(g, node_matcher=VectorNodeMatcher(self.metric, self.relations_metric))
rule = 'MATCH ' + str(self.small_graph) + ' RETURN __RESULT__;'
lst = db.query(rule)
if lst and lst[0]:
return True
return False
class GraphWeightedMatch:
def __init__(self, big_graph, metric, relations_metric):
self.big_graph = big_graph
self.metric = metric
self.relations_metric = relations_metric
def apply(self, g):
if not isinstance(g, iGraph):
raise TypeError("GraphRule.apply_to_graph() needs an igraph.Graph as an argument")
match = Match(matching_code_container=DummyCodeContainer(),
node_matcher=VectorNodeMatcher(self.metric, self.relations_metric, gradient=False))
big_graph = self.big_graph._g
try:
matching_variables = match.get_variables_substitution_dictionaries(g, big_graph)
w = 0
for k, v in matching_variables[0].items():
rindex = big_graph.vs['name' == v]['vector']
lindex = g.vs['name' == k]['vector']
w += self.metric.indices_dot_product(lindex, rindex)
return w
except MatchException as e:
_logger.warning('Cannot find matching variables %s', str(e))
return 0
| fractalego/dgt | dgt/graph/graph_matcher.py | graph_matcher.py | py | 1,990 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "igraph.Graph",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "parvusdb.GraphDatabase",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "node_matche... |
10916034260 | import abc
import sys
from importlib import import_module
from typing import TypeVar
import pytest
from sphinx.ext.autodoc.mock import _MockModule, _MockObject, mock
def test_MockModule():
mock = _MockModule('mocked_module')
assert isinstance(mock.some_attr, _MockObject)
assert isinstance(mock.some_method, _MockObject)
assert isinstance(mock.attr1.attr2, _MockObject)
assert isinstance(mock.attr1.attr2.meth(), _MockObject)
assert repr(mock.some_attr) == 'mocked_module.some_attr'
assert repr(mock.some_method) == 'mocked_module.some_method'
assert repr(mock.attr1.attr2) == 'mocked_module.attr1.attr2'
assert repr(mock.attr1.attr2.meth) == 'mocked_module.attr1.attr2.meth'
assert repr(mock) == 'mocked_module'
def test_MockObject():
mock = _MockObject()
assert isinstance(mock.some_attr, _MockObject)
assert isinstance(mock.some_method, _MockObject)
assert isinstance(mock.attr1.attr2, _MockObject)
assert isinstance(mock.attr1.attr2.meth(), _MockObject)
# subclassing
class SubClass(mock.SomeClass):
"""docstring of SubClass"""
def method(self):
return "string"
obj = SubClass()
assert SubClass.__doc__ == "docstring of SubClass"
assert isinstance(obj, SubClass)
assert obj.method() == "string"
assert isinstance(obj.other_method(), SubClass)
# parametrized type
T = TypeVar('T')
class SubClass2(mock.SomeClass[T]):
"""docstring of SubClass"""
obj2 = SubClass2()
assert SubClass2.__doc__ == "docstring of SubClass"
assert isinstance(obj2, SubClass2)
def test_mock():
modname = 'sphinx.unknown'
submodule = modname + '.submodule'
assert modname not in sys.modules
with pytest.raises(ImportError):
import_module(modname)
with mock([modname]):
import_module(modname)
assert modname in sys.modules
assert isinstance(sys.modules[modname], _MockModule)
# submodules are also mocked
import_module(submodule)
assert submodule in sys.modules
assert isinstance(sys.modules[submodule], _MockModule)
assert modname not in sys.modules
with pytest.raises(ImportError):
import_module(modname)
def test_mock_does_not_follow_upper_modules():
with mock(['sphinx.unknown.module']):
with pytest.raises(ImportError):
import_module('sphinx.unknown')
@pytest.mark.skipif(sys.version_info < (3, 7), reason='Only for py37 or above')
def test_abc_MockObject():
mock = _MockObject()
class Base:
@abc.abstractmethod
def __init__(self):
pass
class Derived(Base, mock.SubClass):
pass
obj = Derived()
assert isinstance(obj, Base)
assert isinstance(obj, _MockObject)
assert isinstance(obj.some_method(), Derived)
def test_mock_decorator():
mock = _MockObject()
@mock.function_deco
def func():
"""docstring"""
class Foo:
@mock.method_deco
def meth(self):
"""docstring"""
@mock.class_deco
class Bar:
"""docstring"""
assert func.__doc__ == "docstring"
assert Foo.meth.__doc__ == "docstring"
assert Bar.__doc__ == "docstring"
| borntocodeRaj/sphinx_configuration | tests/test_ext_autodoc_mock.py | test_ext_autodoc_mock.py | py | 3,242 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sphinx.ext.autodoc.mock.mock",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sphinx.ext.autodoc.mock._MockModule",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sphinx.ext.autodoc.mock._MockObject",
"line_number": 13,
"usage_type": "... |
37225738128 | import nilearn
from nilearn.plotting import plot_carpet, plot_glass_brain, plot_anat, plot_stat_map, plot_design_matrix, plot_epi, plot_contrast_matrix
from nilearn import image, masking, input_data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from nilearn.glm.first_level import make_first_level_design_matrix, FirstLevelModel
from nilearn.glm import threshold_stats_img
from nilearn.reporting import get_clusters_table, make_glm_report
from nilearn.input_data import NiftiLabelsMasker, NiftiMasker, NiftiSpheresMasker
from nilearn import datasets
from nilearn.regions import RegionExtractor
from nilearn import plotting
from nilearn import surface
from nilearn.decoding import Decoder
def get_events_file(events_home_dir, subject_id, run):
events_file = events_home_dir + 'sub-' + subject_id + '/run-' + str(run).zfill(2) + '/events.csv'
#events_file = 'events_run_' + str(i) + '.csv'
events = pd.read_csv(events_file)
events = events.drop('Unnamed: 0', 1)
return events
def fit_glm(subject_id, run):
events = get_events_file(subject_id, run)
tr = 1.25
n_scans = image.load_img(fmri_image[run-1]).shape[-1]
frame_times = np.arange(n_scans) * tr
motion = np.cumsum(np.random.randn(n_scans, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
design_matrix = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names,
hrf_model='spm')
fmri_glm_model = FirstLevelModel(t_r=1.25, minimize_memory=False, noise_model='ar1', mask_img=mask_image[run-1])
fmri_glm_model.fit(fmri_image[run-1], design_matrices=design_matrix)
print("run done: ", run)
return fmri_glm_model, design_matrix
def compute_no_diff_contrasts(glm, run):
z_maps = list()
conditions_label = list()
sessions_label = list()
events = get_events_file(subject_id, run)
conditions = events.trial_type.unique()
for condition_ in conditions:
z_maps.append(glm[run-1].compute_contrast(condition_))
conditions_label.append(condition_)
sessions_label.append(str(run))
return z_maps, conditions_label, sessions_label
def get_movement_minus_wait_contrasts(design_matrices, glms):
z_map_movement_minus_wait = list()
movement_minus_wait_labels = list()
for run in range(1, 11):
contrast_matrix = np.eye(design_matrices[run-1].shape[1])
basic_contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrices[run-1].columns)])
movement_contrasts = basic_contrasts['movement_153'] + basic_contrasts['movement_207'] + basic_contrasts['movement_45'] + basic_contrasts['movement_99'] - basic_contrasts['wait']
z_map_movement_minus_wait.append(glms[run-1].compute_contrast(movement_contrasts))
movement_minus_wait_labels.append('Movement minus wait, run_' + str(run).zfill(2))
return z_map_movement_minus_wait, movement_minus_wait_labels
def get_prep_minus_wait_contrasts(design_matrices, glms):
z_map_prep_minus_wait = list()
prep_minus_wait_labels = list()
for run in range(1, 11):
contrast_matrix = np.eye(design_matrices[run-1].shape[1])
basic_contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrices[run-1].columns)])
movement_contrasts = basic_contrasts['go_153_prep'] + basic_contrasts['go_207_prep'] + basic_contrasts['go_45_prep'] + basic_contrasts['go_99_prep'] + basic_contrasts['nogo_153_prep'] + basic_contrasts['nogo_207_prep'] + basic_contrasts['nogo_45_prep'] + basic_contrasts['nogo_99_prep'] - basic_contrasts['wait']
z_map_prep_minus_wait.append(glms[run-1].compute_contrast(movement_contrasts))
prep_minus_wait_labels.append('Prep minus wait, run_' + str(run).zfill(2))
return z_map_prep_minus_wait, prep_minus_wait_labels
def plot_contrast_maps(z_maps, z_map_no, condition_label, display_mode = 'ortho', correction = 'bonferroni', alpha = 0.05):
_, threshold = threshold_stats_img(
z_maps[z_map_no], alpha= alpha, height_control=correction)
print('Bonferroni-corrected, p<0.05 threshold: %.3f' % threshold)
plot_map = plot_stat_map(z_maps[z_map_no], threshold = threshold,
black_bg=True, display_mode=display_mode, draw_cross=False,
title = condition_label[z_map_no] + ' '+ correction + ' corrected, p<0.05')
masker.fit(z_maps[z_map_no])
#report = masker.generate_report()
#plot_map.add_contours(image.index_img(atlas_filename, 11))
plotting.show()
return plot_map, masker
| tejas-savalia/fmri_project | util.py | util.py | py | 4,890 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nilearn.image.load_img",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "nilearn.image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
... |
18915198493 | import pytest
from src.maximum_twin_sum_of_a_linked_list import Solution
from src.utils.linked_list import to_linked_list
@pytest.mark.parametrize(
"in_list,expected",
(
([5, 4, 2, 1], 6),
([4, 2, 2, 3], 7),
([1, 100_000], 100_001),
),
)
def test_solution(in_list, expected):
head = to_linked_list(in_list)
assert Solution().pairSum(head) == expected
| lancelote/leetcode | tests/test_maximum_twin_sum_of_a_linked_list.py | test_maximum_twin_sum_of_a_linked_list.py | py | 398 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "src.utils.linked_list.to_linked_list",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "src.maximum_twin_sum_of_a_linked_list.Solution",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 7,
"usage_ty... |
26030329346 | import os
import sys
#モジュール探索パス追加
p = ['../','../../']
for e in p: sys.path.append(os.path.join(os.path.dirname(__file__),e))
import discord
from discord.ext import commands
from discord import app_commands
from cmmod.json_module import open_json
from cmmod.time_module import get_currenttime
from cmmod.discord_module import CustomEmbed
from usermenu.cmfunc.userfunc import UserDBFunc
from usermenu.cmfunc.teamfunc import TeamDBFunc
from usermenu.error.usermenu_error import UserMenuError
#app_commandsで使うデータ
cmddata = open_json(r'menu/usermenu/data/apply_team.json')
cmdname = cmddata["name"]
cmddesp = cmddata["description"]
cmddesb = cmddata["describe"]
cmdcho = cmddata["choices"]
cmdcho_apt = [app_commands.Choice(name=c["name"],value=c["value"]) for c in cmdcho["apptype"]]
cmdcho_lgid = [app_commands.Choice(name=c["name"],value=c["value"]) for c in cmdcho["league"]]
cmddix = cmddata["dataindex"]
class ApplyTeam(commands.Cog):
def __init__(self, client):
self.client = client
self.userdbfunc = UserDBFunc()
self.teamdbfunc = TeamDBFunc()
self.custembed = CustomEmbed()
@app_commands.command(name=cmdname, description=cmddesp)
@app_commands.describe(apptype=cmddesb["apptype"],teamname=cmddesb["teamname"],league=cmddesb["league"],
leader=cmddesb["leader"],member1=cmddesb["member1"],member2=cmddesb["member2"],member3=cmddesb["member3"],member4=cmddesb["member4"])
@app_commands.choices(apptype=cmdcho_apt,league=cmdcho_lgid)
@app_commands.guild_only()
async def apply_team_command(self, interaction:discord.Interaction, apptype:app_commands.Choice[int], teamname:str, league:app_commands.Choice[int],
leader:discord.User, member1:discord.User, member2:discord.User, member3:discord.User, member4:discord.User=None) -> None:
author = interaction.user #コマンド実行者
try:
#【thinking処理】
await interaction.response.defer(thinking=True)
#【チーム情報確認処理】
raw_teamdata = await self.teamdbfunc.get_teamdata(leaderid=author.id)
teamdata = raw_teamdata[0]
#[ERROR] 申請区分が「登録」且つ既にチーム情報が存在する場合
if apptype.value == 0 and teamdata:
error = "既にいずれかのチームのリーダーとなっています。新たにチーム登録する場合は、情報更新でリーダーを変更後行ってください"
raise UserMenuError(error)
#[ERROR] 申請区分が「更新」且つチーム情報が存在しない場合
elif apptype.value == 1 and not teamdata:
error = "いずれかのチームのリーダーであることが確認できませんでした。チームリーダーであるにもかかわらず、このエラーメッセージが送信された場合は運営まで連絡してください"
raise UserMenuError(error)
#【ユーザ情報確認処理】
members = [leader, member1, member2, member3, member4]
for member in members:
if member != None:
raw_userdata = await self.userdbfunc.get_userdata(member.id)
userdata = raw_userdata[0]
#[ERROR] 指定ユーザの情報がデータベースに登録されていない場合
if not userdata:
error = f"指定ユーザ{member.mention}の情報がデータベースに登録されていません。ユーザ情報を登録行ってからチーム情報登録・更新を行ってください"
raise UserMenuError(error)
#【メンバー4確定処理】
if member4 == None: MEMBER4 = ''
else: MEMBER4 = str(member4.id)
#【登録日時確定処理】
currenttime = get_currenttime()
if apptype.value == 0: REGISTRATION_DATE = currenttime
else: REGISTRATION_DATE = teamdata[9]
#【チーム情報作成処理】
postdata = {"チーム名":teamname, "リーグ":league.name, "リーダー":str(leader.id), "メンバー1":str(member1.id), "メンバー2":str(member2.id),
"メンバー3":str(member3.id), "メンバー4":MEMBER4, "登録日時":REGISTRATION_DATE, "最終更新日時":currenttime}
#【POST処理】
await self.teamdbfunc.post_teamdata(leaderid=leader.id, postdata=postdata, apptype=apptype.value)
await self.teamdbfunc.log_teamdata(author=author, postdata=postdata, currenttime=currenttime, apptype=apptype.value)
except UserMenuError as e:
await interaction.followup.send(content=author.mention, embed=self.custembed.error(description=str(e)))
except Exception as e:
error = "コマンド実行中に予期せぬエラーが発生しました。このエラーが発生した場合は運営まで連絡をお願いします。\nエラー内容:"+str(e)
print(error)
await interaction.followup.send(content=author.mention,embed=self.custembed.error(description=error))
else:
#【完了送信処理】
success = f"{author.mention}からリーダー:{leader.mention}のチーム情報{apptype.name}を受け付けました。データベースからの完了通知をお待ちください。通知が無かった場合は運営まで連絡をお願いします"
await interaction.followup.send(content=author.mention, embed=self.custembed.success(description=success))
async def setup(client: commands.Bot):
await client.add_cog(ApplyTeam(client))
| rich-bread/bmdb_bot | menu/usermenu/apply_team.py | apply_team.py | py | 5,802 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
4014271672 | # 문제 출처 : https://programmers.co.kr/learn/courses/30/lessons/12973
from collections import deque
def solution(s):
deq = deque(list(s))
# print(deq)
stack = []
while deq:
stack.append(deq.popleft())
if len(stack) > 1:
if stack[-1] == stack[-2]:
stack.pop()
stack.pop()
# print(stack)
if len(stack) == 0:
return 1
else:
return 0
| ThreeFive85/Algorithm | Programmers/level2/removePair/remove_pair.py | remove_pair.py | py | 442 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
23425927839 | import os
from discord.ext import commands, tasks
import motor.motor_asyncio
import util.util
from util.help import HelpCommand
from util.setup import load_text, load_data, mod_data, get_files
import discord
import itertools
bot = commands.Bot(
command_prefix="!", # Change to desired prefix
case_insensitive=True, # Commands aren't case-sensitive
intents=discord.Intents.all()
)
bot.help_command = HelpCommand(bot)
STATUS = itertools.cycle(["a", "b"])
mongo_client = motor.motor_asyncio.AsyncIOMotorClient("") # need to create a database, i used mongo atlas
bot.db = mongo_client.bhv
bot.author_id = 656373241144934420 # Change to your discord id!!!
@bot.event
async def on_ready(): # When the bot is ready
for extension in files:
print(extension)
await bot.load_extension(extension) # Loades every extension.
bot.hdb = bot.get_cog("Database")
bot.util = util.util.setup(bot)
bot.embed = discord.Embed(color=discord.Colour.from_str("#f77394"))
mod_data(bot)
change_status.start()
print("I'm in")
print(bot.user) # Prints the bot's username and identifier
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(STATUS)))
files = [file.replace("/", ".")[:-3] for file in get_files("cogs", [])]
bot.t = load_text()
bot.d = load_data()
token = "" # your own token
bot.run(token) # Starts the bot
| gritor111/bhv-bot | bot.py | bot.py | py | 1,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.Intents.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dis... |
6367637162 | # Look for #IMPLEMENT tags in this file.
'''
All models need to return a CSP object, and a list of lists of Variable objects
representing the board. The returned list of lists is used to access the
solution.
For example, after these three lines of code
csp, var_array = caged_csp_model(board)
solver = BT(csp)
solver.bt_search(prop_FC, var_ord)
var_array[0][0].get_assigned_value() should be the correct value in the top left
cell of the FunPuzz puzzle.
The grid-only models do not need to encode the cage constraints.
1. binary_ne_grid (worth 10/100 marks)
- A model of a FunPuzz grid (without cage constraints) built using only
binary not-equal constraints for both the row and column constraints.
2. nary_ad_grid (worth 10/100 marks)
- A model of a FunPuzz grid (without cage constraints) built using only n-ary
all-different constraints for both the row and column constraints.
3. caged_csp_model (worth 25/100 marks)
- A model built using your choice of (1) binary binary not-equal, or (2)
n-ary all-different constraints for the grid.
- Together with FunPuzz cage constraints.
'''
from cspbase import *
import itertools
def binary_ne_grid(fpuzz_grid):
##IMPLEMENT
constraints, constraint_values, constraint_names = [], [], []
initial_variables = get_initial_variables(fpuzz_grid)
size_of_board = initial_variables["size"]
cell_values = initial_variables["cell_values"]
variables = initial_variables["variables"]
for cell in itertools.product(list(range(size_of_board)), list(range(size_of_board)), list(range(size_of_board))):
col_c1 = cell_name(cell[1], cell[0]) + ", " + cell_name(cell[2], cell[0])
col_c2 = cell_name(cell[2], cell[0]) + ", " + cell_name(cell[1], cell[0])
if col_c1 not in constraint_names and col_c2 not in constraint_names and cell[2] != cell[1]:
satisfying_col_constraints = []
for v1 in cell_values:
for v2 in cell_values:
if v1 != v2:
satisfying_col_constraints.append((v1, v2))
c1 = Constraint(cell_name(cell[1], cell[0]) + ", " + cell_name(cell[2], cell[0]),
[variables[cell[1]][cell[0]], variables[cell[2]][cell[0]]])
c1.add_satisfying_tuples(satisfying_col_constraints)
constraints.append(c1)
constraint_values.append(c1)
constraint_names.append(c1.name)
row_c1 = cell_name(cell[0], cell[1]) + ", " + cell_name(cell[0], cell[2])
row_c2 = cell_name(cell[0], cell[2]) + ", " + cell_name(cell[0], cell[1])
if row_c1 not in constraint_names and row_c2 not in constraint_names and cell[2] != cell[1]:
satisfying_row_constraints = []
for v1 in cell_values:
for v2 in cell_values:
if v1 != v2:
satisfying_row_constraints.append((v1, v2))
added_constraints = Constraint(cell_name(cell[0], cell[1]) + ", " + cell_name(cell[0], cell[2]),
[variables[cell[0]][cell[1]], variables[cell[0]][cell[2]]])
added_constraints.add_satisfying_tuples(satisfying_row_constraints)
constraint_values.append(added_constraints)
constraint_names.append(added_constraints.name)
csp = CSP('binary_ne', [variable for rows in variables for variable in rows])
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def nary_ad_grid(fpuzz_grid):
##IMPLEMENT
constraints, scope = [], []
initial_variables = get_initial_variables(fpuzz_grid)
cell_values = initial_variables["cell_values"]
variables = initial_variables["variables"]
for col in cell_values:
for row in cell_values:
scope.append(variables[row][col])
cells1 = []
for value_pair1 in itertools.permutations(cell_values):
cells1.append(value_pair1)
c1 = Constraint(hash(col), scope)
c1.add_satisfying_tuples(cells1)
constraints.append(c1)
cells2 = []
for value_pair2 in itertools.permutations(cell_values):
cells2.append(value_pair2)
c2 = Constraint(hash(col), variables[col])
c2.add_satisfying_tuples(cells2)
constraints.append(c2)
csp = CSP('nary_ad', [variable for rows in variables for variable in rows])
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def caged_csp_model(fpuzz_grid):
##IMPLEMENT
constraints, constraint_values, constraint_names = [], [], []
initial_variables = get_initial_variables(fpuzz_grid)
size_of_board = initial_variables["size"]
cell_values = initial_variables["cell_values"]
cage_constraints = range(1, size_of_board)
csp, variables = binary_ne_grid(fpuzz_grid)
for cage in cage_constraints:
row = list(fpuzz_grid[cage])
operation, target, scope_values = row[-1], row[-2], row[:-2]
scope, cells = [], []
for scope_value in scope_values:
value = variables[(scope_value // 10) - 1][(scope_value % 10) - 1]
scope.append(value)
constraint_name = "Operation: " + str(operation) + "Target:" + str(target)
constraint = Constraint(constraint_name, scope)
op = check_operation(operation)
if op['addition']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
if sum(cell) == target:
cells.append(cell)
elif op['subtraction']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
difference = cell[i] - sum(cell[:i] + cell[i + 1:])
if difference == target:
cells.append(cell)
elif op['multiplication']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
product = float(cell[i])
for v1 in cell[:i] + cell[i + 1:]:
product *= v1
if product == target:
cells.append(cell)
elif op['division']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
quotient = float(cell[i])
for v1 in cell[:i] + cell[i + 1:]:
quotient = quotient / v1
if quotient == target:
cells.append(cell)
constraint.add_satisfying_tuples(cells)
constraints.append(constraint)
constraint_values.append(constraint)
constraint_names.append(constraint.name)
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def check_operation(operation):
operation_dictionary = {'addition': False, 'subtraction': False, 'multiplication': False, 'division': False}
if operation == 0:
operation_dictionary['addition'] = True
elif operation == 1:
operation_dictionary['subtraction'] = True
elif operation == 3:
operation_dictionary['multiplication'] = True
elif operation == 2:
operation_dictionary['division'] = True
return operation_dictionary
def cell_name(row, column):
# Return cell name used for constraints
return "Row: " + str(row) + " Col: " + str(column)
def get_initial_variables(fpuzz_grid):
# Return size_of_board, cell_values, variables in that order
size_of_board = fpuzz_grid[0][0]
max_value = size_of_board + 1
cell_values = list(range(1, max_value))
variables = []
for r in range(size_of_board):
row = []
for c in range(size_of_board):
variable = Variable(cell_name(r, c), list(range(1, size_of_board + 1))[:])
row.append(variable)
variables.append(row)
return {"size": size_of_board, "cell_values": cell_values, "variables": variables}
| eliasvolonakis/CSC384CourseWork | Constraint Satisfaction Assignment/puzzle_csp.py | puzzle_csp.py | py | 8,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "itert... |
40727926981 | import requests
STEAMDB_SALE_URL = "https://steamdb.info/sales/?merged=true&cc=cn"
class SaleRequester:
def __init__(self):
self.fake_header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
def get_sale_page(self):
try:
content = requests.get(STEAMDB_SALE_URL,headers=self.fake_header).text
except TimeoutError:
pass
return content
if __name__ == "__main__":
s = SaleRequester()
print(s.get_sale_page()) | KIDJourney/sbeamhub | crawler/requester.py | requester.py | py | 779 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
}
] |
19499350817 | from sqlite3 import *
from typing import Union
class DB:
def __init__(self):
self.db = connect("app.db")
self.cr = self.db.cursor()
self.cr.execute("create table if not exists `users`(user_id INTEGER, username TEXT, chat_name TEXT, "
"chat_username TEXT, chat_id INTEGER)")
def __del__(self):
self.db.close()
async def get_user(
self, user_id=None, username=None
) -> list:
self.cr.execute("select * from `users` where user_id = ?", (user_id,)) if user_id else self.cr.execute(
"select * from `users` where username = ?", (username,))
data = self.cr.fetchall()
return data
async def get_all(
self
) -> list:
self.cr.execute("select * from users")
res = self.cr.fetchall()
return res
async def insert_user(
self,
user_id: Union[str, int],
username: str,
chat_id: Union[str, int],
chat_name: str,
chat_username: str
):
self.cr.execute("select * from `users` where user_id = ? and chat_id = ?", (user_id, chat_id))
results = self.cr.fetchall()
if results:
return
self.cr.execute(
"insert into `users`(user_id, username, chat_id, chat_name, chat_username) values(?, ?, ?, ?, ?)",
(user_id, username, chat_id, chat_name, chat_username)
)
self.db.commit()
| cytoo/TgGroupScanner | bot/mods/sql.py | sql.py | py | 1,485 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 35,
"usage_type": "name"
}
] |
36407109164 | """
This script is used for 'writing' songs in musical notation form, with recording of key downs and ups being used to define the time durations and delays of notes. Notes are shown line by line and a single key on your keyboard can be used to set the timing for each note in a song - of course, you'll need to know the song by heart.
All that is needed is the pynput module installed and a file containing your music notes. You will need to pass the music notes file as an argument. To start, call this script from python in the terminal - e.g.:
$ python3 musical_timing_recorder.py <path to your music notes file>
Once all the music notes have been 'played' by you, an out file gets written to in the same directory as your music notes file. This text file will contain 3 separate sections - the notes, the note durations, and the note gaps.
"""
from pynput import keyboard
import time
import os,sys
DEBUG = False
KILL = False
# Keyboard key handling
key_held = False
def key_down(key):
global key_held,KILL
key_held = True
if key == keyboard.Key.esc: KILL = True
def key_up(key):
global key_held
key_held = False
kl = keyboard.Listener(on_press=key_down,on_release=key_up,suppress=True)
kl.start()
# Execute
if __name__ == "__main__":
# Init
if not DEBUG:
args = sys.argv
if os.path.isfile(args[1]): notes_file = args[1]
else: raise FileNotFoundError("Error: missing positional argument - music notes file")
else:
notes_file = "<INSERT MUSIC NOTES FILE HERE>" # Ignore this
with open(notes_file,"r") as f:
x = f.readlines()
lines = [i.strip() for i in x]
# Clear screen and show greeter
os.system("cls" if os.name == "nt" else "clear")
print(f"Musical Timing Recorder ({os.path.basename(notes_file)})\nPress 'Esc' to quit\n\n")
print("When you are ready, Maestro...\n")
# Recording loop system
line_count = len(lines)
durations,gaps = [],[]
t,time_d,time_g = 0,0,0
recording_started = False
for n,line in enumerate(lines):
# Print out the current sheet line for the user
print(f"\nLine: {n+1}\n \t{line}\n> \t",end="")
if not recording_started:
# Wait for user to press key for first time
while not key_held: continue
if KILL: exit()
recording_started = True
# Notes per line recording loop start
notes = line.split()
note_count = len(notes)
for i,note in enumerate(notes):
while not key_held: continue # Failsafe
if key_held:
print(note+" ",end="",flush=True)
t = time.time()
while key_held: time_d = (time.time() - t)
if KILL: exit()
durations.append(time_d)
if not key_held:
# Abruptly stop and don't record gap for last note
if n+1 >= line_count and i+1 >= note_count:
gaps.append(0)
break
t = time.time()
while not key_held: time_g = (time.time() - t)
if KILL: exit()
gaps.append(time_g)
durations.append("\n")
gaps.append("\n")
print("\n")
# Finished recording - cleanup and write data to output file
out_file = os.path.basename(notes_file).split(".")[0]+"_output.txt"
melody = []
for l in lines:
clean = l.split()
x = [f"\"{n}\"," for n in clean]
x.append("\n")
melody.extend(x)
for i in range(len(gaps)):
if gaps[i] == "\n":continue
durations[i] = f"{durations[i]:.3F},"
gaps[i] = f"{gaps[i]:.3F},"
with open(f"{out_file}","w") as f:
f.write("Melody:\n")
f.writelines(melody)
f.write("Durations:\n")
f.writelines(durations)
f.write("Gaps:\n")
f.writelines(gaps)
print(f"Finished - Data written to ./{out_file}") | cwylycode/dumptruck | python/musical_timing_recorder.py | musical_timing_recorder.py | py | 4,076 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pynput.keyboard.Key",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.Listener",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyn... |
22703377702 | from __future__ import print_function
import sys
import xml.etree.ElementTree as ET
import os
sys.path.extend(['.', '..', './pycparser/'])
from pycparser import c_parser, c_ast
filehandle = open('dummy3.c', 'r')
#filehandle = open('reverse_noinclude.c', 'r')
#filehandle = open('reverse.c', 'r')
text = ''.join(filehandle.readlines())
#print(text)
# create a pycparser
parser = c_parser.CParser()
ast = parser.parse(text, filename='<none>')
# generate the XML tree
ast.show()
codeAstXml = open('code_ast.xml','w')
ast.showXml(codeAstXml)
codeAstXml.close()
tree = ET.parse('code_ast.xml')
root = tree.getroot()
kernelsVars=[]
kernelsTyps=[]
kernelNames=['__ungenerated_kernel_function_region__0']
for kn in kernelNames:
# go through all functions in the code (C/C++ code)
# find the function which the kernel is called there
# then find the type of all variables
for func in root.findall(".//FuncDef"):
kernelFound=0
kernelVars=[]
kernelTyps=[]
print('we have found '+str(len(func.findall(".//FuncCall/ID")))+' function calls')
for fcall in func.findall(".//FuncCall/ID"):
if str(fcall.get('uid')).strip()==kn.strip():
kernelFound=1
#print(fcall.get('uid'))
if kernelFound==1:
print('<'+kn+'> is found in <'+func.find('Decl').get('uid')+'>')
# go through all declerations and find the varibales
funcBody=func.find('Compound')
for var in funcBody.findall(".//Decl"):
# single variable Decl
kernelVars.append(var.get('uid'))
kernelTyps.append(var.find('.//IdentifierType').get('uid')+((len(var.findall(".//PtrDecl")))*'*'))
# print('< '+var.get('uid')+' > is defined as <'+var.find('.//IdentifierType').get('uid')+((len(var.findall(".//PtrDecl")))*'*')+'>')
kernelsVars.append(kernelVars)
kernelsTyps.append(kernelTyps)
break
for i in range(0,len(kernelsVars)):
var=kernelsVars[i]
typ=kernelsTyps[i]
print('=======> kernel #'+str(i)+':')
for g in range(0,len(var)):
print(var[g]+'->'+typ[g])
os.remove('code_ast.xml')
| lashgar/ipmacc | src/auxilaries/generate_oacc_ast.py | generate_oacc_ast.py | py | 2,204 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "sys.path.extend",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pycparser.c_parser.CParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pycparser.c_pa... |
40843782656 | from apivk.function_vk import vkinder
from datetime import date
from database.script_bd import check_users_vk, check_search_results, save_users_vk, save_search_results
from botvk.function_botvk import write_msg, send_photo
# определение статуса отношений
def find_relation(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'relation' not in item:
relation = None
elif 'relation' in item:
relation = item['relation']
return relation
# определение пола
def find_sex(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'sex' not in item:
any_sex = None
elif 'sex' in item:
sex = item['sex']
if sex == 2:
any_sex = 1
elif sex == 1:
any_sex = 2
else:
any_sex = 0
return any_sex
# определение города
def find_city(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'city' not in item:
city = None
elif 'city' in item:
city = item['city']['id']
return city
# определение возраста
def find_age(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'bdate' not in item:
age = None
elif 'bdate' in item:
bdate = item['bdate']
if len(bdate) >= 8:
day, mon, year = bdate.split('.')
day = int(day)
mon = int(mon)
year = int(year)
today = date.today()
age = today.year - year - ((today.month, today.day) < (mon, day))
else:
age = None
return age
# отправить ссылку на человека и топ-3 фото
def choose_photo(age_from, age_to, relation, sex, city, search_user_id, user_id):
try:
search = vkinder.users_search(age_from, age_to, relation, sex, city)
people_id = search['response']['items']
for people in people_id:
try:
id_people = int(people['id'])
first_name = people['first_name']
last_name = people['last_name']
status = people['is_closed']
city_ = people['city']['id']
except KeyError:
pass
if status is False and city_ == city:
if city_ == city:
if check_users_vk(id_people) is None:
save_users_vk(id_people, first_name, last_name)
if check_search_results(search_user_id, id_people) is None:
save_search_results(search_user_id, id_people)
write_msg(user_id, f'Зацени {first_name} {last_name} https://vk.com/id{id_people}')
for i in vkinder.photo_user(id_people):
owner_id = i[1][2]
photo_id = i[1][3]
photo = f'photo{owner_id}_{photo_id}'
send_photo(user_id, photo)
write_msg(user_id, 'Для продолжения поиска повторно введите команду "поиск"')
break
except TypeError:
write_msg(user_id, 'Не хватает данных для поиска')
# проверка полноты информации о пользователе
def check(age, city, sex, relation, user_id):
count = 0
if age is not None:
count += 1
if city is not None:
count += 1
if sex is not None:
count += 1
if relation is not None:
count += 1
if count == 4:
write_msg(user_id, 'Отлично! Для начала поиска введите команду "поиск"')
| beloglazovpl/VKinder | function_find/func.py | func.py | py | 4,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "apivk.function_vk.vkinder.about_user",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apivk.function_vk.vkinder",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "apivk.function_vk.vkinder.about_user",
"line_number": 20,
"usage_type": "cal... |
17729946892 | import argparse
import glob
import logging
import os
import random
import timeit
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
#BertConfig,
#BertForQuestionAnswering,
#BertForSequenceClassification,
#BertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
get_linear_schedule_with_warmup,
openqa_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.openqa import OpenQAResult, OpenQAV1Processor, OpenQAV2Processor
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## added by Jong-Hoon Oh
import torchtext
import cnn_utils
import train_utils
NUM_PARALLEL_EXEC_UNITS = 4
os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
os.environ['KMP_WARNINGS'] = 'off'
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in (AlbertConfig, RobertaConfig,)),
(),
)
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
"roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),
}
## added by Jong-Hoon Oh
class TTDataset(torchtext.data.Dataset):
'''Dummy Dataset for build_vocab'''
def __init__(self, words, fields):
data_fields = [('text', fields['text'])]
ex = (words,)
examples = [torchtext.data.Example.fromlist(ex, data_fields)]
super(TTDataset, self).__init__(examples, data_fields)
## added by Jong-Hoon Oh
def load_cnn_model_and_vocab(args, cnn_file, words):
assert args.emb_file and args.min_freq
fields = cnn_utils.get_fields()
train_utils.build_vocab(args, fields, TTDataset(words, fields), [])
vocab = fields['text'].vocab
model, pre_fields = train_utils.load_cnn_model(args, cnn_file, fields)
return model, pre_fields['text'].vocab.stoi
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
# modified by Jong-Hoon Oh
# DATA PROCESSING PART
# - Converting input examples to cached examples
# - cnn_stoi: vocab.stoi for the cnn model
def load_and_cache_examples(args, filename, tokenizer, cnn_stoi, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
bert_token_str = "ot0"
input_dir = args.feat_dir if args.feat_dir else "."
fstem = list(filter(None,filename.split("/"))).pop()
fstem = fstem.split(".")[0]
fstem = fstem
cached_file = "cached_{}_{}_{}_{}_{}_{}".format(
fstem,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
args.cnn_stem,
list(filter(None, args.cnn_model.split("_"))).pop(),
bert_token_str,
str(args.max_seq_length),
)
# split the input data into data, positive_data, feature, and example
dset_dir = input_dir + '/dset'
pdset_dir = input_dir + '/pdset'
feat_dir = input_dir + '/feat'
exset_dir = input_dir + '/exset'
cached_dset_file = os.path.join(dset_dir,cached_file)
cached_feat_file = os.path.join(feat_dir,cached_file)
cached_pdset_file = os.path.join(pdset_dir,cached_file)
cached_exset_file = os.path.join(exset_dir,cached_file)
if evaluate:
logger.info("Specified cached file %s for dev or predict files", cached_dset_file)
else:
logger.info("Specified cached file %s for train files", cached_dset_file)
# Init features and dataset from cache if it exists
if os.path.exists(cached_dset_file) and not args.overwrite_cache:
logger.info("Feature files already exist: %s", cached_dset_file)
else:
logger.info("Creating features from dataset file at %s", input_dir) # input_dir="." by defaults
# if no predict file for evaluation or no train file for training
if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("openqa")
examples = OpenQAV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
else:
# The main part of data processing in our OpenQA experiments
processor = OpenQAV1Processor()
if evaluate:
# initializer
examples = processor.get_dev_examples(args.data_dir, filename=filename)
else:
# initializer
examples = processor.get_train_examples(args.data_dir, filename=filename)
features, dataset, possible_dataset = openqa_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
cnn_stoi=cnn_stoi,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt", # "pt" represents 'pytorch dataset'
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_dset_file)
if evaluate:
logger.info("dataset:{}".format(len(dataset)))
torch.save({"dataset": dataset}, cached_dset_file)
logger.info("features")
torch.save({"features": features}, cached_feat_file)
logger.info("examples")
torch.save({"examples": examples}, cached_exset_file)
else:
logger.info("dataset:{}".format(len(dataset)))
torch.save({"dataset": dataset}, cached_dset_file)
logger.info("possible_dataset:{}".format(len(possible_dataset)))
torch.save({"possible_dataset": possible_dataset}, cached_pdset_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--prep_vocab_file",
default=None,
type=str,
help="preprocessed_vocab_file with the train/dev/predict file. see make_openqa_cnn_vocab.py",
)
parser.add_argument(
"--emb_file",
default=None,
type=str,
help="The embedding vector file used for cnn",
)
parser.add_argument(
"--cnn_model",
default=None,
type=str,
help="The cnn model file name",
)
parser.add_argument(
"--cnn_stem",
default="enwiki",
type=str,
help="stem for cnn models for caching (different vocab.stoi for each model)",
)
parser.add_argument(
"--min_freq",
default=5,
type=int,
help="min freq. for unknown words",
)
parser.add_argument(
"--emb_dim",
default=300,
type=int,
help="dim for representation of fastText",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--dev_file",
default=None,
type=str,
help="The input development file. If a data dir is specified, will look for the file there"
+ "If no data dir or devel files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--feat_dir",
default="",
type=str,
help="Where do you want to store the processed data whose features were extracted from the input data",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
args = parser.parse_args()
assert args.prep_vocab_file is not None
assert args.cnn_model is not None
assert args.cnn_stem is not None
assert args.emb_dim is not None
assert args.emb_file is not None
if (not os.path.exists(args.prep_vocab_file)):
raise ValueError(
"prep_vocab_file ({}) does not exist. Check the --prep_vocab_file option.".format( args.prep_vocab_file) )
if (not os.path.exists(args.cnn_model)):
raise ValueError(
"cnn_model ({}) does not exist. Check the --cnn_model option.".format( args.cnn_model) )
if (not os.path.exists(args.emb_file)):
raise ValueError(
"emb_file ({}) does not exist. Check the --emb_file option.".format( args.emb_file) )
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
# The barrier starts
torch.distributed.barrier()
# added by Jong-Hoon Oh
# - Load cnn model and pre-processed vocab.
# - prep_vocab_file: see vocab/
prep_tokens = torch.load(args.prep_vocab_file)
all_tokens = prep_tokens['tokens']
cnn_model, cnn_stoi = load_cnn_model_and_vocab(args, args.cnn_model, all_tokens)
cnn_dim = len(cnn_model.args.filter_widths) * cnn_model.args.filter_size
args.cnn_dim = cnn_dim
args.model_type = args.model_type.lower()
# "albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.num_of_TIERs = 3
config.cnn_dim = args.cnn_dim
config.emb_dim = args.emb_dim
config.cnn_model = args.cnn_model
config.cnn_stem = args.cnn_stem
# tokenizer_class: AlbertTokenizer
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# model_class: AlbertForQuestionAnswering
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), # ckpt: tensorflow file, pt: pytorch file
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
###########
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
# The barrier ends
torch.distributed.barrier()
model.to(args.device)
cnn_model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
if args.train_file is not None:
load_and_cache_examples(args, args.train_file, tokenizer, cnn_stoi, evaluate=False, output_examples=False)
if args.predict_file is not None:
load_and_cache_examples(args, args.predict_file, tokenizer, cnn_stoi, evaluate=True, output_examples=True)
if args.dev_file is not None:
load_and_cache_examples(args, args.dev_file, tokenizer, cnn_stoi, evaluate=True, output_examples=True)
if __name__ == "__main__":
main()
| nict-wisdom/bertac | src/examples.openqa/run_openqa_preprocess.py | run_openqa_preprocess.py | py | 18,692 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
... |
30397052082 | from os.path import join
from typing import Optional
from dagger.dag_creator.airflow.operator_creator import OperatorCreator
from dagger.dag_creator.airflow.operators.redshift_sql_operator import (
RedshiftSQLOperator,
)
class RedshiftLoadCreator(OperatorCreator):
ref_name = "redshift_load"
def __init__(self, task, dag):
super().__init__(task, dag)
self._input_path = join(self._task.inputs[0].rendered_name, "")
self._input_s3_bucket = self._task.inputs[0].bucket
self._input_s3_prefix = self._task.inputs[0].path
self._output_schema = self._task.outputs[0].schema
self._output_table = self._task.outputs[0].table
self._output_schema_quoted = f'"{self._output_schema}"'
self._output_table_quoted = f'"{self._output_table}"'
self._tmp_table = (
f"{self._task.tmp_table_prefix}_{self._output_table}"
if self._task.tmp_table_prefix
else None
)
self._tmp_table_quoted = f'"{self._tmp_table}"' if self._tmp_table else None
self._copy_ddl_from = self._task.copy_ddl_from
self._alter_columns = self._task.alter_columns
self._sort_keys = self._task.sort_keys
@staticmethod
def _read_sql(directory, file_path):
full_path = join(directory, file_path)
with open(full_path, "r") as f:
sql_string = f.read()
return sql_string
def _get_create_table_cmd(self) -> Optional[str]:
if self._tmp_table and self._task.create_table_ddl:
ddl = self._read_sql(
self._task.pipeline.directory, self._task.create_table_ddl
)
return ddl.format(
schema_name=self._output_schema_quoted,
table_name=self._tmp_table_quoted,
)
if self._tmp_table and self._copy_ddl_from:
return (
f"CREATE TABLE {self._output_schema_quoted}.{self._tmp_table_quoted}"
f"(LIKE {self._copy_ddl_from})"
)
elif self._tmp_table:
return (
f"CREATE TABLE {self._output_schema_quoted}.{self._tmp_table_quoted}"
f"(LIKE {self._output_schema_quoted}.{self._output_table_quoted})"
)
elif self._task.create_table_ddl:
ddl = self._read_sql(
self._task.pipeline.directory, self._task.create_table_ddl
)
return ddl.format(
schema_name=self._output_schema_quoted,
table_name=self._output_table_quoted,
)
elif self._copy_ddl_from:
return (
f"CREATE TABLE IF NOT EXISTS {self._output_schema_quoted}.{self._output_table}"
f"(LIKE {self._copy_ddl_from})"
)
return None
def _get_sort_key_cmd(self) -> Optional[str]:
sort_key_cmd = None
if self._sort_keys:
sort_key_cmd = (
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"ALTER COMPOUND SORTKEY({self._sort_keys})"
)
return sort_key_cmd
def _get_delete_cmd(self) -> Optional[str]:
if self._task.incremental:
return (
f"DELETE FROM {self._output_schema_quoted}.{self._output_table_quoted}"
f"WHERE {self._task.delete_condition}"
)
if not self._task.incremental and self._tmp_table is None:
return f"TRUNCATE TABLE {self._output_schema_quoted}.{self._output_table_quoted}"
return None
def _get_load_cmd(self) -> Optional[str]:
table_name = self._tmp_table_quoted or self._output_table_quoted
columns = "({})".format(self._task.columns) if self._task.columns else ""
extra_parameters = "\n".join(
[
"{} {}".format(key, value)
for key, value in self._task.extra_parameters.items()
]
)
return (
f"copy {self._output_schema_quoted}.{table_name}{columns}\n"
f"from '{self._input_path}'\n"
f"iam_role '{self._task.iam_role}'\n"
f"{extra_parameters}"
)
def _get_replace_table_cmd(self) -> Optional[str]:
if self._tmp_table is None:
return None
return (
f"BEGIN TRANSACTION;\n"
f"DROP TABLE IF EXISTS {self._output_schema_quoted}.{self._output_table_quoted};\n"
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"RENAME TO {self._output_table_quoted};\n"
f"END"
)
def _get_alter_columns_cmd(self) -> Optional[str]:
if self._alter_columns is None:
return None
alter_column_commands = []
alter_columns = self._alter_columns.split(",")
for alter_column in alter_columns:
[column_name, column_type] = alter_column.split(":")
alter_column_commands.append(
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"ALTER COLUMN {column_name} TYPE {column_type}"
)
return ";\n".join(alter_column_commands)
def _get_drop_tmp_table_cmd(self) -> Optional[str]:
if self._tmp_table is None:
return None
return f"DROP TABLE IF EXISTS {self._output_schema_quoted}.{self._tmp_table_quoted}"
def _get_cmd(self) -> str:
raw_load_cmd = [
self._get_drop_tmp_table_cmd(),
self._get_create_table_cmd(),
self._get_alter_columns_cmd(),
self._get_sort_key_cmd(),
self._get_delete_cmd(),
self._get_load_cmd(),
self._get_replace_table_cmd(),
]
load_cmd = [cmd for cmd in raw_load_cmd if cmd]
return ";\n".join(load_cmd)
def _create_operator(self, **kwargs):
load_cmd = self._get_cmd()
redshift_op = RedshiftSQLOperator(
dag=self._dag,
task_id=self._task.name,
sql=load_cmd,
redshift_conn_id=self._task.postgres_conn_id,
autocommit=True,
**kwargs,
)
return redshift_op
| siklosid/dagger | dagger/dag_creator/airflow/operator_creators/redshift_load_creator.py | redshift_load_creator.py | py | 6,239 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "dagger.dag_creator.airflow.operator_creator.OperatorCreator",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
... |
9294959555 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--------------------------------------------------------------------
GENETIC ALGORITHMS EXPERIMENTS
Started on the 2018/01/03
theo.alves.da.costa@gmail.com
https://github.com/theolvs
------------------------------------------------------------------------
"""
from scipy import stats
import seaborn as sns
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
import time
from tqdm import tqdm
import itertools
#=============================================================================================================================
# DISTRIBUTIONS
#=============================================================================================================================
class Dist(object):
def __init__(self,mu = None,std = None,label = None):
self.mu = np.random.rand()*20 - 10 if mu is None else mu
self.std = np.random.rand()*10 if std is None else std
self.label = "" if not label else " - "+label
self.func = lambda x : stats.norm.cdf(x,loc = self.mu,scale = self.std)
def __repr__(self,markdown = False):
return "Norm {1}mu={2}{0}, {0}std={3}{0}{4}".format("$" if markdown else "","$\\" if markdown else "",
round(self.mu,2),round(self.std,2),self.label)
def plot(self,fill = True):
x = np.linspace(-20, 20, 100)
y = stats.norm.pdf(x,loc = self.mu,scale = self.std)
plt.plot(x,y,label = self.__repr__(markdown = True))
if fill:
plt.fill_between(x, 0, y, alpha=0.4)
def __add__(self,other):
mu = np.mean([self.mu,other.mu])
std = np.mean([self.std,other.std])
return Dist(mu,std)
def mutate(self,alpha = 1):
self.mu = self.mu + 1/(1+np.log(1+alpha)) * np.random.randn()
self.std = max(self.std + 1/(1+np.log(1+alpha)) * np.random.randn(),0.5)
def fitness(self,x):
return 1 - stats.kstest(x,self.func).statistic
class Population(object):
def __init__(self,distributions = None,n = 100):
if distributions is not None:
self.distributions = distributions
else:
self.distributions = [Dist() for i in range(n)]
def __getitem__(self,key):
if type(key) == tuple or type(key) == list:
d = []
for i in key:
d.append(self.distributions[i])
return d
else:
return self.distributions[key]
def __iter__(self):
return iter(self.distributions)
def __len__(self):
return len(self.distributions)
def plot(self,title = "Normal distributions",figsize = None):
if figsize:
plt.figure(figsize = figsize)
plt.title(title)
fill = len(self) < 5
for d in self:
d.plot(fill = fill)
plt.legend()
plt.xlabel("x")
plt.show()
def evaluate(self,x):
fitnesses = [(i,dist.fitness(x)) for i,dist in enumerate(self)]
indices,fitnesses = zip(*sorted(fitnesses,key = lambda x : x[1],reverse = True))
return indices,fitnesses
def selection(self,x,top = 0.1):
indices,fitnesses = self.evaluate(x)
n = int(top*len(fitnesses))
return indices[:n]
def crossover(self,indices):
combinations = list(itertools.combinations(indices,2))
np.random.shuffle(combinations)
combinations = combinations[:len(self)]
new_population = []
for i,j in combinations:
new_population.append(self[i]+self[j])
self.distributions = new_population
def mutate(self,generation = 1):
for d in self:
d.mutate(generation)
def evolve(self,x,top = 0.25,n_generations = 20,last_selection = True):
all_fitnesses = [self.evaluate(x)[1]]
for generation in tqdm(range(n_generations)):
indices = self.selection(x,top)
self.crossover(indices)
self.mutate(generation)
indices,fitnesses = self.evaluate(x)
all_fitnesses.append(fitnesses)
self._plot_fitnesses(all_fitnesses)
if last_selection:
indices = self.selection(x,top)
return Population(self[indices])
def _plot_fitnesses(self,fitnesses):
sups = []
infs = []
means = []
for step in fitnesses:
sups.append(np.max(step))
infs.append(np.min(step))
means.append(np.mean(step))
plt.figure(figsize=(10,6))
plt.plot(means)
plt.fill_between(range(len(means)),sups,infs, alpha = 0.2)
plt.xlabel('# Generation')
plt.ylabel('Fitness')
plt.legend()
plt.show()
#=============================================================================================================================
# LOGREG
#=============================================================================================================================
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class LogReg(torch.nn.Module):
def __init__(self, n_feature,n_output = 1,alpha = 10e-1):
self.alpha = alpha
self.args = n_feature,n_output
super(LogReg, self).__init__()
self.out = torch.nn.Linear(n_feature,n_output,bias = False) # output layer
def forward(self, x):
x = Variable(torch.FloatTensor(x))
x = F.sigmoid(self.out(x))
return x
def __add__(self,other):
new = LogReg(*self.args)
new.out.weight.data = torch.FloatTensor(0.5 * (self.out.weight.data.numpy() + other.out.weight.data.numpy()))
return new
def mutate(self,generation):
out = self.out.weight.data.numpy()
noise_out = self.alpha * np.random.randn(*out.shape)
self.out.weight.data = torch.FloatTensor(self.out.weight.data.numpy() + noise_out)
def evaluate(self,x,y):
pred = self.forward(x).data.numpy()
loss_1 = np.sum(np.log(pred + 10e-9)*y.reshape(-1,1))
loss_0 = np.sum(np.log(1-pred + 10e-9)*(1-y).reshape(-1,1))
return loss_1 + loss_0
def plot_coefs(self):
plt.figure(figsize = (15,4))
plt.title("Coefficients")
plt.axhline(0,c = "black")
plt.plot(self.out.weight.data.numpy()[0])
plt.xlabel("# Pixel")
plt.show()
class PopulationLogReg(object):
def __init__(self,x,y,regs = None,n = 20,top = 0.25,**kwargs):
self.x = x
self.y = y
self.kwargs = kwargs
if regs is None:
self.regs = [LogReg(**kwargs) for i in range(n)]
else:
self.regs = regs
def __getitem__(self,key):
if type(key) == tuple or type(key) == list:
d = []
for i in key:
d.append(self.regs[i])
return d
else:
return self.regs[key]
def __iter__(self):
return iter(self.regs)
def __len__(self):
return len(self.regs)
def evaluate(self):
fitnesses = [(i,element.evaluate(self.x,self.y)) for i,element in enumerate(self)]
indices,fitnesses = zip(*sorted(fitnesses,key = lambda x : x[1],reverse = True))
return indices,fitnesses
def selection(self,top = 0.5):
indices,fitnesses = self.evaluate()
n = int(top*len(fitnesses))
return indices[:n]
def crossover(self,indices):
combinations = list(itertools.combinations(indices,2))
np.random.shuffle(combinations)
combinations = combinations[:len(self)]
new_population = []
for i,j in combinations:
new_population.append(self[i]+self[j])
if len(new_population) < len(self):
new_population.extend([LogReg(**self.kwargs) for i in range(len(self)-len(new_population))])
self.regs = new_population
def mutate(self,generation):
for d in self:
d.mutate(generation)
def evolve(self,top = 0.25,n_generations = 20,last_selection = True):
n_fittest = int(top*len(self))
offsprings = len(list(itertools.combinations(range(n_fittest),2)))
print("- Generations {}".format(len(self)))
print("- Fittest : {}".format(n_fittest))
print("- Offsprings : {}".format(offsprings))
all_fitnesses = [self.evaluate()[1]]
for generation in tqdm(range(n_generations)):
indices = self.selection(top)
self.crossover(indices)
self.mutate(generation)
indices,fitnesses = self.evaluate()
all_fitnesses.append(fitnesses)
self._plot_fitnesses(all_fitnesses)
if last_selection:
indices = self.selection(top)
return PopulationLogReg(self.x,self.y,regs = self[indices])
def _plot_fitnesses(self,fitnesses):
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(self.x,self.y)
pred_bench = lr.predict_proba(self.x)
loss_bench = np.sum(np.log(pred_bench + 10e-9)*self.y.reshape(-1,1)) + np.sum(np.log(1-pred_bench + 10e-9)*(1-self.y).reshape(-1,1))
sups = []
infs = []
means = []
for step in fitnesses:
sups.append(np.max(step))
infs.append(np.min(step))
means.append(np.mean(step))
plt.figure(figsize=(10,6))
plt.plot(means)
plt.fill_between(range(len(means)),sups,infs, alpha = 0.2)
plt.axhline(loss_bench)
plt.xlabel('# Generation')
plt.ylabel('Fitness')
plt.legend()
plt.show()
| TheoLvs/reinforcement-learning | 4. Chrome Dino/experiments.py | experiments.py | py | 10,024 | python | en | code | 94 | github-code | 36 | [
{
"api_name": "numpy.random.rand",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
27688294553 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='uploaded_by',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='website',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='SiteOwner',
),
]
| seanlinxs/content-console | main/migrations/0002_auto_20151201_1309.py | 0002_auto_20151201_1309.py | py | 681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 15,
"usage_type": "call"
},
{... |
1327922070 | #!/usr/bin/env python
#
"""
Name: Jesus Hernandez Partner: Zechariah Neak
Email: jherna83@ucsc.edu Email: zneak@ucsc.edu
ID: 1420330
Course: CMPM146 Game AI
Professor: Daniel G Shapiro
\\\\\\\ Program 4 ///////
Description:
This is a bot that is designed to win at Planet Wars against 5 other bots using
a behavior tree. The root acts as a Selector composite parent that checks through
each Sequence composite child top to bottom, and performs the action for whatever
Sequence child returns true. Each Sequence child only returns true if all its
checks and actions come out as successful.
"""
"""
// There is already a basic strategy in place here. You can use it as a
// starting point, or you can throw it out entirely and replace it with your
// own.
"""
import logging, traceback, sys, os, inspect
logging.basicConfig(filename=__file__[:-3] +'.log', filemode='w', level=logging.DEBUG)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from behavior_tree_bot.behaviors import *
from behavior_tree_bot.checks import *
from behavior_tree_bot.bt_nodes import Selector, Sequence, Action, Check
from planet_wars import PlanetWars, finish_turn
# You have to improve this tree or create an entire new one that is capable
# of winning against all the 5 opponent bots
def setup_behavior_tree():
# Top-down construction of behavior tree
root = Selector(name='High Level Ordering of Strategies')
# Define available actions to take.
colonize = Action(take_defenseless_territory)
invade = Action(attack_with_no_mercy)
reinforce = Action(reinforce_with_vengeance)
retaliate = Action(retaliate_with_fury)
# *** Begin preliminary suprise invasion over the galaxy. ***
imperial_ambition = Sequence(name='Expansion Strategy: Manifest Destiny')
imperial_ambition.child_nodes = [colonize, invade]
# *** Consolidate and retaliate if under attack by hostiles. ***
imperial_shield = Sequence(name='Security Strategy: Cereberus')
danger_check = Check(if_under_attack)
imperial_shield.child_nodes = [danger_check, reinforce, retaliate]
# *** If the advantage is ours, attack with full force. ***
imperial_aggression = Sequence(name='Aggressive Strategy: Crush All Remaining Resistance')
largest_fleet_check = Check(have_largest_fleet)
imperial_aggression.child_nodes = [largest_fleet_check, invade]
# Begin selecting strategies.
root.child_nodes = [imperial_ambition, imperial_aggression, imperial_shield, invade.copy()]
logging.info('\n' + root.tree_to_string())
return root
# You don't need to change this function
def do_turn(state):
behavior_tree.execute(planet_wars)
if __name__ == '__main__':
logging.basicConfig(filename=__file__[:-3] + '.log', filemode='w', level=logging.DEBUG)
behavior_tree = setup_behavior_tree()
try:
map_data = ''
while True:
current_line = input()
if len(current_line) >= 2 and current_line.startswith("go"):
planet_wars = PlanetWars(map_data)
do_turn(planet_wars)
finish_turn()
map_data = ''
else:
map_data += current_line + '\n'
except KeyboardInterrupt:
print('ctrl-c, leaving ...')
except Exception:
traceback.print_exc(file=sys.stdout)
logging.exception("Error in bot.")
| JjayaitchH/BehaviorTrees | behavior_tree_bot/bt_bot.py | bt_bot.py | py | 3,634 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
39497612599 | """
Module to handle a local InfoKinds with unique name.
NOTE: this is taken from python-common in nomad-lab-base.
It is copied here to remove the dependency from nomad-lab-base.
For more info on python-common visit:
https://gitlab.mpcdf.mpg.de/nomad-lab/python-common
The author of this code is: Dr. Fawzi Roberto Mohamed
E-mail: mohamed@fhi-berlin.mpg.de
"""
from past.builtins import cmp
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import logging
from ai4materials.external import compact_sha
import json
import os, re
from ai4materials.external.json_support import jsonCompactS, jsonCompactD, jsonIndentD
from io import open
class InfoKindEl(object):
"""Info kind (tipically from a file, without shas but with locally unique names)"""
__slots__ = ["name","description","kindStr","units","superNames","dtypeStr", "repeats", "shape", "extra_args"]
IGNORE_EXTRA_ARGS = 1
ADD_EXTRA_ARGS = 2
RAISE_IF_EXTRA_ARGS = 3
def __init__(self, name, description, kindStr = "type_document_content", units = None, superNames = None,
dtypeStr = None, shape = None, extraArgsHandling = ADD_EXTRA_ARGS, repeats = None, **extra_args):
if superNames is None:
superNames = []
self.name = name
self.description = description
self.kindStr = kindStr
self.superNames = superNames
self.units = units
self.dtypeStr = dtypeStr
if dtypeStr in ["None", "null"]:
self.dtypeStr = None
self.shape = shape
self.repeats = repeats
if extraArgsHandling == self.ADD_EXTRA_ARGS:
self.extra_args = extra_args
elif extraArgsHandling == self.IGNORE_EXTRA_ARGS:
self.extra_args = {}
else:
raise Exception("extra arguments to InfoKindEl:" + str(extra_args))
def __eq__(o1, o2):
try:
if not (o1.name == o2.name and o1.description == o2.description and o1.kindStr == o2.kindStr and
o1.units == o2.units and o1.shape == o2.shape):
return False
if o1.dtypeStr != o2.dtypeStr:
return False
if o1.repeats != o2.repeats:
return False
if o1.extra_args != o2.extra_args:
return False
if o1.superNames == o2.superNames:
return True
if len(o1.superNames) != len(o2.superNames):
return False
if o1.superNames[0] != o2.superNames[0]:
return False
a1 = o1.superNames[1:]
a2 = o2.superNames[1:]
a1.sort()
a2.sort()
for i in range(len(a1)):
if a1[i] != a2[i]:
return False
return True
except:
raise
return False
def __cmp__(k1, k2):
c = cmp(k1.name, k2.name)
if c != 0: return c
c = cmp(k1.kindStr, k2.kindStr)
if c != 0: return c
c = cmp(k1.description, k2.description)
if c != 0: return c
if len(k1.superNames) > 0:
if len(k2.superNames) > 0:
c = cmp(k1.superNames[0], k2.superNames[0])
if c != 0: return c
s1 = k1.superNames[1:]
s2 = k2.superNames[1:]
c = cmp(s1, s2)
if c != 0: return c
else:
return 1
elif len(k2.superNames) > 0:
return -1
if c != 0: return c
c = cmp(k1.units, k2.units)
if c != 0: return c
c = cmp(k1.dtypeStr, k2.dtypeStr)
if c != 0: return c
c = cmp(k1.repeats, k2.repeats)
if c != 0: return c
c = cmp(k1.shape, k2.shape)
if c != 0: return c
if k1.extra_args == k2.extra_args:
return 0
if k1.extra_args is None:
return 1
if k2.extra_args is None:
return -1
extraK1 = list(k1.extra_args.keys())
extraK1.sort()
extraK2 = list(k2.extra_args.keys())
extraK2.sort()
i = 0
while (i < len(extraK1) and i < len(extraK2)):
kk1 = extraK1[i]
kk2 = extraK2[i]
c = cmp(kk1, kk2)
if c != 0: return c # use -c ?
c = cmp(k1.extra_args[kk1], k2.extra_args[kk2])
if c != 0: return c
c = cmp(len(extraK1), len(extraK2))
return c
def __ne__(o1, o2):
return not o1.__eq__(o2)
def prepare(self, env):
if len(self.superNames) > 1:
a = self.superNames[1:]
a.sort(lambda x, y: cmp(env.gidOf(x, precalculatedGid = True), env.gidOf(y, precalculatedGid = True)))
self.superNames[1:] = a
def evalGid(self, env):
self.prepare(env)
sha = env.newSha()
self.serialize(env,sha.update, precalculatedGid = True, selfGid = False)
return 'p' + sha.b64digest()[:28]
def serialize(self, env, writeOut, subGids = True, addExtraArgs = True, precalculatedGid = False, selfGid = True):
d = self.toDict(env, subGids = subGids, addExtraArgs = addExtraArgs, precalculatedGid = precalculatedGid, selfGid = selfGid)
jsonCompactD(d, writeOut)
def toDict(self, env = None, addExtraArgs = True, inlineExtraArgs = True , selfGid = False, subGids = False, precalculatedGid = False):
res = {
"description": self.description,
"name": self.name,
"superNames": self.superNames,
}
try:
if self.kindStr != "type_document_content":
if self.kindStr is None or self.kindStr == "":
res["kindStr"] = "MetaType"
else:
res["kindStr"] = self.kindStr
if env:
if selfGid:
res["gid"] = env.gidOf(self.name, precalculatedGid = precalculatedGid)
if subGids:
res["superGids"] = [ env.gidOf(sName, precalculatedGid = precalculatedGid) for sName in self.superNames ]
elif subGids or selfGid:
raise Exception("env required in toDict for subGids or selfGid")
if self.units is not None:
res["units"] = self.units
if self.dtypeStr is not None:
res["dtypeStr"] = self.dtypeStr
if self.repeats is not None:
res["repeats"] = self.repeats
if self.shape is not None:
res["shape"] = self.shape
if addExtraArgs:
if inlineExtraArgs:
res.update(self.extra_args)
else:
res["extraArgs"] = self.extra_args
except:
logging.exception("error in InfoKindEl.toDict, partial dict is %s", res)
return res
def __unicode__(self):
s = StringIO.StringIO()
self.serialize(s)
return s.string
class RelativeDependencySolver(object):
def __init__(self):
self.deps = {}
def __call__(self, infoKindEnv, source, dep):
if "relativePath" not in dep:
raise Exception('Invalid dependency for relativeDependencySolver there must be a relativePath')
basePath = source.get('path')
if basePath:
baseDir = os.path.dirname(os.path.abspath(basePath))
else:
baseDir = os.getcwd()
dPath = os.path.realpath(os.path.join(baseDir, dep['relativePath']))
if dPath in self.deps:
return self.deps[dPath]
depInfo = None
depIKEnv = InfoKindEnv(path = dPath, dependencyLoader=infoKindEnv.dependencyLoader)
self.deps[dPath] = depIKEnv
with open(dPath, encoding="utf-8") as f:
try:
depInfo = json.load(f)
except:
logging.exception("Error while loading dependency %s" % f)
raise
if depInfo:
depIKEnv.fromJsonList(depInfo, name = os.path.basename(dPath), source = { 'path': dPath }, dependencyLoad = False)
return depIKEnv
class InfoKindEnv(object):
"""An environment keeping locally unique InfoKinds and their gids"""
def __init__(self, infoKinds = None, name = None, description = None, newSha = compact_sha.sha512, gids = None,
dependencyLoader = None, path = None, uri = None, deps = None):
self.newSha = newSha
self.clear()
self.name = name
self.description = description
self.dependencyLoader = dependencyLoader
if dependencyLoader is None:
self.dependencyLoader = RelativeDependencySolver()
self.path = path
self.uri = uri
if not infoKinds is None:
for ik in infoKinds:
self.addInfoKindEl(ik)
if not gids is None:
self.gids = gids
if deps:
self.deps = deps
def __str__(self):
if self.path:
return "InfoKindEnv loaded from {}".format(self.path)
def clear(self):
self.gids = {}
self.infoKinds = {}
self.deps = []
def depNames(self):
res = set()
for dep in self.deps:
for name in dep.infoKinds.keys():
res.add(name)
return res
def noDepNames(self):
return set(self.infoKinds.keys()).difference(self.depNames())
def embedDeps(self):
hidden = []
duplicate = set()
for dep in self.deps:
for name, ikEl in dep.infoKinds.items():
oldVal=self.infoKinds.get(name, None)
if oldVal is None:
self.infoKinds[name] = ikEl
elif ikEl != oldVal:
hidden.append(ikEl)
else:
duplicate.add(name)
return { "hidden": hidden, "duplicate": duplicate }
def addInfoKindEl(self, infoKind):
if infoKind.name in self.infoKinds and infoKind != self.infoKinds[infoKind.name]:
raise Exception('InfoKindEnv has collision for name {0}: {1} vs {2}'
.format(infoKind.name, infoKind, self.infoKinds[infoKind.name]))
self.infoKinds[infoKind.name] = infoKind
def addDependenciesFrom(self, infoKindEnv):
toAdd = set(self.infoKinds.keys())
missing = set()
while len(toAdd):
ikName = toAdd.pop()
ik = self.infoKinds.get(ikName,None)
if ik is None:
depInfoKindEl = infoKindEnv.infoKinds.get(ikName, None)
if depInfoKindEl:
self.infoKinds[ikName] = depInfoKindEl
toAdd.add(ikName)
else:
missing.add(ikName)
else:
for dep in ik.superNames:
if not dep in self.infoKinds:
toAdd.add(dep)
return missing
def gidOf(self, name, precalculatedGid=False):
res = self.gids.get(name,None)
if res is None:
if precalculatedGid:
raise Exception("non precalculated gid for %s" % name)
res = self.calcGid(name)
return res
def calcGid(self, name):
inProgress = []
toDo = [name]
hasPending = False
for i in range(2):
while len(toDo) > 0:
if not hasPending and inProgress:
now = inProgress.pop()
else:
now = toDo.pop()
if now in self.gids and now in inProgress:
inProgress.remove(now)
hasPending = False
nowVal = self.infoKinds.get(now, None)
if nowVal is None:
raise Exception("while calculating gid of %r found unknown key %r" % (name, now))
for subName in nowVal.superNames:
if subName in self.gids:
continue
hasPending = True
if subName in toDo:
toDo.remove(subName)
if subName in inProgress:
raise Exception('found loop to %s evaluating %s, currently in progress: %s' % (subName, now, inProgress))
toDo.append(subName)
if not hasPending:
self.gids[now] = nowVal.evalGid(self)
if now in inProgress:
inProgress.remove(now)
else:
if now in inProgress:
raise Exception('found loop to %s, currently in progress: %s' % (now, inProgress))
inProgress.append(now)
toDo = list(inProgress)
return self.gids[name]
def keyDependsOnKey(self, k1Name, k2Name):
"""partial ordering given by the dependencies
1: k1Name depends on k2Name
0: k1Name == k2Name
-1: k2Name depends on k1Name
None: no dependency"""
if k1Name == k2Name: return 0
k1 = self.infoKinds[k1Name]
k2 = self.infoKinds[k2Name]
if k1.superNames != k2.superNames:
allSuperK1 = set()
toDoK1 = list(k1.superNames)
allSuperK2 = set()
toDoK2 = list(k2.superNames)
while (len(toDoK1) > 0 or len(toDoK2) > 0):
if len(toDoK1) > 0:
el1Name = toDoK1.pop()
if k2Name == el1Name:
return 1
el1 = self.infoKinds[el1Name]
if el1.kindStr in self and not el1.kindStr in allSuperK1:
toDoK1.append(el1.kindStr)
for subEl in el1.superNames:
if not subEl in allSuperK1:
toDoK1.append(subEl)
allSuperK1.update(el1.superNames)
if len(toDoK2) > 0:
el2Name = toDoK2.pop()
if k1Name == el2Name:
return -1
el2 = self.infoKinds[el2Name]
if el2.kindStr in self and not el2.kindStr in allSuperK2:
toDoK2.append(el2.kindStr)
for subEl in el2.superNames:
if not subEl in allSuperK2:
toDoK2.append(subEl)
allSuperK2.update(el2.superNames)
return None
def __contains__(self, name):
"if an item with the given name is contained in this environment"
return name in self.infoKinds
def __len__(self):
"""returns the number of InfoKindEl stored in this environment"""
return len(self.infoKinds)
def __getitem__(self, name):
"""returns a dictionary representing the entry with the given name, or None if it does not exist"""
ikEl = self.infoKinds.get(name, None)
if ikEl:
return ikEl.toDict(self)
return None
def infoKindEls(self):
return list(self.infoKinds.values())
def infoKindEl(self, name):
"""returns the InfoKindEl with the given name, or None if it does not exist"""
return self.infoKinds.get(name, None)
def calcGids(self):
for k in self.infoKinds.keys():
if not k in self.gids:
self.gids[k]=self.calcGid(k)
def serialize(self, writeOut, subGids = True, selfGid = True):
infoKinds = self.sortedIKs()
writeOut("""{
"type": "nomad_meta_info_1_0",
"description": """)
if self.description:
jsonIndentD(self.description, writeOut, extraIndent = 4)
else:
writeOut('""')
writeOut(',\n')
if not self.path:
baseDir = os.getcwd()
else:
baseDir = os.path.normpath(os.path.dirname(os.path.abspath(self.path)))
depKeys = set()
if self.deps:
writeOut(' "dependencies": [ ')
depColon = False
for d in self.deps:
path = d.path
uri = d.uri
depKeys.update(d.infoKinds.keys())
if path:
path = os.path.normpath(os.path.abspath(path))
if path.startswith(baseDir) or not uri:
if depColon:
writeOut(", ")
else:
depColon = True
jsonIndentD({"relativePath": os.path.relpath(path, baseDir)}, writeOut, extraIndent = 4)
continue
if uri:
if depColon:
writeOut(", ")
else:
depColon = True
jsonIndentD({"uri": uri}, writeOut, extraIndent = 4)
continue
raise Exception("Dependency on serializable %s" % d)
writeOut('],\n')
addColon = False
writeOut(' "metaInfos": [ ')
for ik in infoKinds:
if ik.name in depKeys:
continue
if addColon:
writeOut(", ")
else:
addColon = True
jsonIndentD(ik.toDict(env = self, subGids = subGids, selfGid = selfGid), writeOut, extraIndent = 4, check_circular = True)
writeOut("]\n}\n")
def sortedIKs(self):
infoKinds = list(self.infoKinds.values())
infoKinds.sort(lambda x, y: cmp(x.name.lower()+x.name, y.name.lower()+y.name))
return infoKinds # self.sortAndComplete(infoKinds, ignoreMissing = True)
def toJsonList(self, withGids):
infoKinds = list(self.infoKinds.keys())
infoKinds.sort(lambda x, y: self.compareKeys(x.name, y.name))
return [self.infoKinds[x].toDict(self,
self if withGids else None) for x in infoKinds]
def verifyGids(self, preserveAbsent=False):
changes = {}
oldGids = self.gids
self.gids = {}
self.calcGids()
for k,v in oldGids.items():
newVal = self.gids.get(k, None)
if newVal is None:
if preserveAbsent:
self.gids[k] = v
else:
changes[k] = (v, None)
elif v != newVal:
changes[k] = (v, newVal)
return changes
def fromJsonList(self, jsonDict, name, source, extraArgsHandling = InfoKindEl.ADD_EXTRA_ARGS, dependencyLoad=False):
typeStr = jsonDict.get("type","nomad_meta_info_1_0")
typeRe = re.compile(r"nomad_meta_info_(?P<major>[0-9]+)_(?P<minor>[0-9]+)$")
self.name = name
m = typeRe.match(typeStr)
if not m:
raise Exception("unexpected type '%s', expected nomad_meta_info_1_0" % typeStr)
if int(m.group("major")) != 1:
raise Exception("Unsupported major version %s, expeced 1")
dependencies = jsonDict.get("dependencies",[])
jsonList = jsonDict.get("metaInfos",[])
self.description = jsonDict.get("description","")
overwritten = []
gidToCheck = {}
deps = []
for d in dependencies:
if self.dependencyLoader is None:
raise Exception("no dependencyLoader while loading local_in")
dep = self.dependencyLoader(self, source, d)
if dep:
self.deps.append(dep)
index = -1
for ii in jsonList:
index += 1
val = dict(ii)
if not "name" in ii:
raise Exception("InfoKind at %d is without name: %s" % (index, ii) )
oldVal=self.infoKinds.get(ii['name'],None)
gid=None
if "gid" in ii:
gid = ii['gid']
del val['gid']
if "superGids" in ii:
if not "superNames" in ii:
raise Exception("superGids without superNames in fromJsonList")
superNames = ii["superNames"]
superGids = ii["superGids"]
if len(superNames) != len(superGids):
raise Exception("superGids incompatible with superNames in fromJsonList: %s vs %s" % (ii["superGids"], ii["superNames"]))
toCheck = {}
for i in range(len(superNames)):
assert not superNames[i] in toCheck.keys(), "duplicate superName %r in %r" % (superNames[i], ii["name"])
toCheck[superNames[i]] = superGids[i]
gidToCheck[ii["name"]] = toCheck
del val['superGids']
val['extraArgsHandling'] = extraArgsHandling
ikEl = InfoKindEl(**val)
if not oldVal is None and ikEl != oldVal:
overwritten.append((oldVal, ikEl))
if gid:
self.gids[ii['name']] = gid
self.infoKinds[ikEl.name] = ikEl
res = { "overwritten": overwritten }
if not dependencyLoad:
res.update(self.embedDeps())
for childName, gids in gidToCheck.items():
for name, gid in gids.items():
if self.gidOf(name) != gid:
raise Exception("incompatible superGid for superName %s of %s (%s vs %s)" % (name, ii["name"], gid, self.gidOf(name)))
if res.get("overwritten", False) or res.get("duplicate", False) or res.get("hidden", False):
res["hasWarnings"] = True
else:
res["hasWarnings"] = res.get("hasWarnings", False)
return res
def sortAndComplete(self, propsToSort, ignoreMissing = False):
"""builds a list of properties in propsToSort, so that all the dependecies of each
property are present before them"""
toDo = list(propsToSort)
done = set()
deps = []
res = []
while len(toDo)>0:
pAtt = toDo.pop()
nameAtt = pAtt.name
if nameAtt in done:
continue
deps = [nameAtt]
while len(deps)>0:
nameAtt = deps[-1]
pAtt = self.infoKinds.get(nameAtt, None)
if pAtt is None:
if ignoreMissing:
deps.pop()
done.add(nameAtt)
continue
else:
raise Exception("missing dependent InfoKindEl {0} following chain {1}".format(nameAtt, pAtt))
hasDepsToDo = False
kindStr = pAtt.kindStr
kindType = self.infoKindEl(kindStr)
for superName in pAtt.superNames:
if not superName in done:
if superName in deps:
raise Exception("circular dependency {0}, {1}".format(deps,superName))
deps.append(superName)
hasDepsToDo = True
if kindType and not kindStr in done:
if kindStr in deps:
raise Exception("circular dependency in kindStr {0}, {1}".format(deps,kindStr))
deps.append(kindStr)
hasDepsToDo = True
if not hasDepsToDo:
deps.pop()
res.append(pAtt)
done.add(nameAtt)
return res
def metaInfoNameWithAllSuper(self, name):
"""returns the meta info names of name and all its dependencies"""
toAdd = set([name])
res = set([name])
while toAdd:
e = toAdd.pop()
for superName in self.infoKinds[e].superNames:
if not superName in res:
res.add(superName)
toAdd.add(superName)
return res
def firstAncestorsByType(self, name):
"""Returns the first acestors of each type separated in roots and children.
(scala conversion, could be improved a bit)"""
metaInfoNames = self.metaInfoNameWithAllSuper(name)
metaInfoNames.remove(name)
mInfo = list(metaInfoNames)
edges = {}
for i, metaName in enumerate(mInfo):
metaInfo = self.infoKinds[metaName]
edges[i] = [mInfo.index(x) for x in metaInfo.superNames]
typeGroups = {}
for mIdx, metaName in enumerate(mInfo):
kindStr = self.infoKinds[metaName].kindStr
tNow = typeGroups.get(kindStr, None)
if tNow is None:
typeGroups[kindStr] = [mIdx]
else:
tNow.append(mIdx)
childsByType = {}
toDo = set(range(len(mInfo)))
while (toDo):
now = toDo.pop()
kindNow = self.infoKinds[mInfo[now]].kindStr
toDo2 = set(edges[now])
known2 = set(edges[now])
while (toDo2):
now2 = toDo2.pop()
if (self.infoKinds[mInfo[now2]].kindStr == kindNow):
childs = childsByType.get(kindNow, None)
if childs:
childs.add(now2)
else:
childsByType[kindNow] = set([now2])
if now2 in toDo:
toDo.remove(now2)
for el in edges[now2]:
if not el in known2:
toDo2.add(el)
known2.add(el)
res = {}
for typeName, allChilds in typeGroups.items():
childs = childsByType.get(typeName, set())
allForKind = set(allChilds)
rootNames = [mInfo[x] for x in (allForKind - childs)]
childNames = [mInfo[x] for x in childs]
res[typeName] = (rootNames, childNames)
return res
def loadJsonFile(filePath, dependencyLoader = None, extraArgsHandling = InfoKindEl.ADD_EXTRA_ARGS, uri = None):
env = InfoKindEnv(dependencyLoader = dependencyLoader, path = filePath, uri = uri)
try:
with open(filePath, encoding="utf-8") as f:
o = json.load(f)
warnings = env.fromJsonList(o, name = os.path.basename(filePath), source = {'path': filePath}, extraArgsHandling = extraArgsHandling)
except:
logging.exception("Error while loading file %s" % filePath)
raise
return env, warnings
def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):
"""Loads a metainfo environment for a filename. The filename should not
contain the full path, as the full path is resolved here and not by the
caller.
Args:
filename: filename as a string.
Returns:
Tuple containing the metainfo environment, and any possible warnings
that were encountered in the loading.
"""
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../../nomad-meta-info/meta_info/nomad_meta_info/{}".format(filename)))
return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)
def loadJsonStream(fileStream, name = None, dependencyLoader = None, extraArgsHandling = InfoKindEl.ADD_EXTRA_ARGS, filePath = None, uri = None):
if filePath is None:
try:
filePath = fileStream.name
except:
filePath = None
if name is None and not filePath is None:
name = os.path.basename(filePath)
env = InfoKindEnv(dependencyLoader = dependencyLoader, name = name, path = filePath, uri = uri)
try:
o = json.load(fileStream)
warnings = env.fromJsonList(o, source = {'path': filePath}, extraArgsHandling = extraArgsHandling)
except:
logging.exception("Error while loading file %s" % filePath)
raise
return env, warnings
| angeloziletti/ai4materials | ai4materials/external/local_meta_info.py | local_meta_info.py | py | 27,845 | python | en | code | 36 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "builtins.object",
"line_number": 28,
"usage_type": "name"
},
{
"a... |
29858374038 | '''
Created on 9 Apr 2019
@author: qubix
'''
from typing import Tuple
import numpy as np
from sklearn.base import BaseEstimator
from modAL.utils.data import modALinput
from math import floor
from asreview.query_strategies.max_sampling import max_sampling
from asreview.query_strategies.random_sampling import random_sampling
def rand_max_sampling(classifier: BaseEstimator,
X: modALinput,
pool_idx=None,
n_instances: int = 1,
query_kwargs={},
**kwargs
) -> Tuple[np.ndarray, modALinput]:
"""
Combination of random and maximum sampling.
By default samples the 95% of the instances with max sampling,
and 5% of the samples with random sampling.
Parameters
----------
classifier: BaseEstimator
The classifier for which the labels are to be queried.
X: modALinput
The whole input matrix.
pool_idx: np.array
Indices of samples that are in the pool.
n_instances: int
Total number of samples to be queried.
extra_vars: dict
dictionary to pass through settings (such as the max/rand ratio),
as well as the indices that were obtained using max & random sampling.
**kwargs:
Keyword arguments to be passed on to random/max sampling.
Returns
-------
np.ndarray, modALinput
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
n_samples = X.shape[0]
if pool_idx is None:
pool_idx = np.arange(n_samples)
# Set the fraction of maximum sampling. Defaults to 95% max, 5% rand.
rand_max_frac = query_kwargs.get('rand_max_frac', 0.05)
max_frac = 1-rand_max_frac
# Get the discrete number of instances for rand/max sampling.
n_instance_max = floor(n_instances*max_frac)
if np.random.random_sample() < n_instances*max_frac-n_instance_max:
n_instance_max += 1
n_instance_rand = n_instances-n_instance_max
# Do max sampling.
max_idx, _ = max_sampling(classifier, X, pool_idx=pool_idx,
n_instances=n_instance_max,
query_kwargs=query_kwargs,
**kwargs)
# Remove indices found with max sampling from the pool.
query_idx = np.delete(np.arange(n_samples), pool_idx, axis=0)
query_idx = np.append(query_idx, max_idx)
new_pool_idx = np.delete(np.arange(n_samples), query_idx, axis=0)
# Random sampling.
rand_idx, _ = random_sampling(classifier, X, pool_idx=new_pool_idx,
n_instances=n_instance_rand,
query_kwargs=query_kwargs,
**kwargs)
if "max" not in query_kwargs['src_query_idx']:
query_kwargs["src_query_idx"]["max"] = np.array(max_idx, dtype=np.int)
else:
query_kwargs["src_query_idx"]["max"] = np.append(
query_kwargs["src_query_idx"]["max"], max_idx)
if "random" not in query_kwargs['src_query_idx']:
query_kwargs["src_query_idx"]["random"] = np.array(
rand_idx, dtype=np.int)
else:
query_kwargs["src_query_idx"]["random"] = np.append(
query_kwargs["src_query_idx"]["random"], rand_idx)
query_kwargs['rand_max_frac'] = rand_max_frac
query_kwargs['last_bounds'] = [
("max", 0, n_instance_max),
("random", n_instance_max, n_instances),
]
query_idx = np.append(max_idx, rand_idx)
return query_idx, X[query_idx]
| syuanuvt/automated-systematic-review | asreview/query_strategies/rand_max.py | rand_max.py | py | 3,622 | python | en | code | null | github-code | 36 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "modAL.utils.data.modALinput",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "m... |
17134892000 |
# Packages
import pandas as pd
import os
import json
from gensim.utils import simple_preprocess
from gensim.summarization.textcleaner import split_sentences
from functools import reduce
from fuzzywuzzy import fuzz
## Functions
## Returns marked html from iucn notes
def find_country(text, country):
'''Function to id country names in iucn notes and insert mark tags for
highlighting in html'''
# # Split text into individual words
txt_ls = text.split(" ")
q_ls = country.split(" ")
# given length of country
q_len = len(q_ls)
interest = [0]*len(txt_ls)
# check each subset of n words for matches
for i in range(len(txt_ls)-q_len+1):
tmp_txt = (" ").join(txt_ls[i:i+q_len])
if fuzz.token_set_ratio(tmp_txt, country)>=90:
interest[i:i+q_len] = [1]*q_len
# use index list to find words to highlight
for w in range(len(txt_ls)):
if interest[w] == 1:
txt_ls[w] = "<mark>"+txt_ls[w]+"</mark>"
recomb_html = " ".join(txt_ls)
# If consecutive words highlighted, rm end and start
recomb_html = recomb_html.replace("</mark> <mark>", " ")
# for t in range(len(word_ls)):
# # Match word against country
# pr = fuzz.token_set_ratio(word_ls[t], country)
# # If match is good, add html marks...
# if pr>90:
# # print (word_ls[t])
# word_ls[t] = "<mark>"+word_ls[t]+"</mark>"
# Split text into sentences within paragraphs
# split_txt = [split_sentences(para) for para in text.split("\n") if len(split_sentences(para))>0]
# # interest_idx = [[0] * len(inner) for inner in split_txt]
# for p in range(len(split_txt)):
# for s in range(len(split_txt[p])):
# # for each sentence fuzzy match against country
# pr = fuzz.token_set_ratio(split_txt[p][s], country)
# # If match is good, indicate in interest list or add marks...
# if pr>90:
# # interest_idx[p][s] += 1
# # Add "<mark>" to start and "</mark>" end of sentence?
# split_txt[p][s] = "<mark>"+split_txt[p][s]+"</mark>"
# recomb_html = "\n".join([" ".join(inner) for inner in split_txt])
# recomb_html = " ".join(word_ls)
return(recomb_html)
# Extracts data from dictionary level given a list of indices to that level
def get_from_dict(dataDict, pathlist):
"""Iterate nested dictionary"""
return reduce(dict.get, pathlist, dataDict)
## Constants
notes_paths = {"taxonomic_notes" : ["taxonomy", "taxonomic_notes", "value"],
"red_list_notes" : ["iucn_status", "red_list_notes", "value"],
"range_notes" : ["habitat", "range_notes", "value"],
"population_notes" : ["population", "population_notes", "value"],
"use_trate_notes" : ["trade", "use_trade_notes", "value"],
"conservation_notes" : ["conservation", "conservation_notes", "value"],
"threats_notes" : ["threats", "threats_notes", "value"]}
## Main code
# Load cites df for relevant countries
cites_df = pd.read_csv("../Data/CitesParrots.csv")
cites_country_code = list(set(list(cites_df["Importer"])+(list(cites_df["Exporter"]))))
# Load country list data data
country_df = pd.read_csv("../Data/countries.csv")
# Subset to countries of interest
country_df = country_df.loc[country_df["Code"].isin(cites_country_code)]
# Create a simpler, single word country name
country_df["Basic"] = [country.split("(")[0].split(",")[0] for country in country_df["Name"]]
# List all json files
dat_dir = "../Data/parrot_data/"
f_ls = os.listdir(dat_dir)
# Calc no of rows need in output df
n_row = len(f_ls) * country_df.shape[0]
out_df = pd.DataFrame({"SpeciesID" : ["NA"]*n_row,
"Country" : ["NA"]*n_row,
"taxonomic_notes" : ["NA"]*n_row,
"red_list_notes" : ["NA"]*n_row,
"range_notes" : ["NA"]*n_row,
"population_notes" : ["NA"]*n_row,
"use_trate_notes" : ["NA"]*n_row,
"conservation_notes" : ["NA"]*n_row,
"threats_notes" : ["NA"]*n_row})
row_count = 0
for f in f_ls:
# Load json
with open(dat_dir+f) as json_file:
parrot_dat = json.load(json_file)
parrot = f.split(".")[0]
# Is IUCN data there?
if len(parrot_dat["iucn"])>0:
iucn_dat = parrot_dat["iucn"]
for country in country_df["Basic"]:
for key in notes_paths.keys():
# Obtain data
tmp_dat = get_from_dict(iucn_dat, notes_paths[key])
# If not "NA" or "value" add to notes dict
if ((tmp_dat != "NA") & (tmp_dat != "value")):
out_df.iloc[row_count][key] = find_country(tmp_dat, country)
out_df.iloc[row_count]["SpeciesID"] = parrot
out_df.iloc[row_count]["Country"] = country
row_count +=1
# print(row_count)
out_df = out_df.loc[0:row_count-1]
# out_df.to_csv("../../Local_Code/Data/marked_text.csv")
idx = int((row_count-1)/3.0)
out_df1 = out_df.loc[0:idx]
out_df2 = out_df.loc[idx+1:2*idx]
out_df3 = out_df.loc[2*idx+1:row_count-1]
out_df1.to_csv("../Data/parrot_csv/marked_text1.csv")
out_df2.to_csv("../Data/parrot_csv/marked_text2.csv")
out_df3.to_csv("../Data/parrot_csv/marked_text3.csv")
| ConMine/ConMine | Development/Code/sentence_tagging.py | sentence_tagging.py | py | 4,876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fuzzywuzzy.fuzz.token_set_ratio",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.fuzz",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pand... |
73627488425 | '''
Dependencies: gettext, playsound
installing
$ pip install gTTS pyttsx3 playsound soundfile transformers datasets sentencepiece
$ pip install playsound (may need to use "$ pip install --upgrade wheel" if install fails)
'''
import gtts
from playsound import playsound
with open("sample.ini") as fileDescriptor:
data = fileDescriptor.read()
tts = gtts.gTTS(data)
tts.save("audioReader.mp3")
playsound("audioReader.mp3")
| vvMaxwell/U5L2 | audio.py | audio.py | py | 428 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gtts.gTTS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "playsound.playsound",
"line_number": 15,
"usage_type": "call"
}
] |
26771069266 | from pyowm import OWM
from pyowm.utils import config
from pyowm.utils import timestamps
from config import owm_key
owm = OWM(owm_key)
mgr = owm.weather_manager()
# info on looking up cities.
#To make it more precise put the city's name, comma, 2-letter country code (ISO3166). You will get all proper cities in chosen country.
#The order is important - the first is city name then comma then country. Example - London, GB or New York, US.
city = 'Leesburg,US'
short_city = city.split(",", 1)[0]
# Creating an empty "database" or dictionary.
# I'm using this to train myself in dictionaries and how to read them.
database = {}
# Searching location for data on owm
location = mgr.weather_at_place(city)
w = location.weather
# Create key + value
database['wind'] = w.wind()
database['temp'] = w.temperature('fahrenheit')
# printing out the default looking dictionary
print(database)
# Print out the status of the dictionary
print(f"Wind Speed for {city} : {database['wind']['speed']}")
# Printing out the temp instead
print(f"Temp for {short_city} = {database['temp']['temp']}")
print(f"The high for today in {short_city} is, {database['temp']['temp_max']}") | shelmus/owm_weather | weather_dictionary.py | weather_dictionary.py | py | 1,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyowm.OWM",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.owm_key",
"line_number": 6,
"usage_type": "argument"
}
] |
40597432388 | """General purpose tools get fenced code blocks from Markdown."""
from dataclasses import dataclass
from operator import attrgetter
from pathlib import Path
from typing import List, Optional
import phmutest.direct
import phmutest.reader
import phmutest.select
from phmutest.direct import Marker
class FCBChooser:
"""Choose Markdown FCBs matching criteria."""
def __init__(self, markdown_filename: str):
"""Gather all the Markdown fenced code blocks in the file.
Args:
markdown_filename:
Path to the Markdown file as a string.
"""
self.all_blocks = phmutest.select.configure_block_roles(
skips=[], markdown_file=Path(markdown_filename)
)
def select(
self, *, label: str = "", info_string: Optional[str] = None, contains: str = ""
) -> List[str]:
"""Return list of contents of each FCB that matches all criteria.
Args:
label
FCB has phmutest label directive 'label'. Empty string means
select all FCBs (default).
info_string
FCB info string matches 'info_string'. Empty string means match
FCBs with no info string. None means select all FCBs (default).
contains
FCB contents have substring 'contains'. Empty string means
select all FCBs (default).
Returns:
List of strings, in file order, of the contents of selected FCBs.
Empty list if no matches are found.
Fenced code block strings typically end with a newline.
"""
label_blocks = self.all_blocks
info_blocks = self.all_blocks
contains_blocks = self.all_blocks
if label:
label_blocks = []
for block in self.all_blocks:
for directive in block.directives:
if directive.type == Marker.LABEL and directive.value == label:
label_blocks.append(block)
if info_string is not None:
info_blocks = [b for b in self.all_blocks if info_string == b.info_string]
if contains:
contains_blocks = [b for b in self.all_blocks if contains in b.contents]
satisfies_all = set(label_blocks) & set(info_blocks) & set(contains_blocks)
selected = list(satisfies_all)
selected.sort(key=attrgetter("line"))
return [b.contents for b in selected]
def contents(self, label: str = "") -> str:
"""Return contents of the labeled fenced code block with label.
This works the same as phmdoctest.tool.FCBChooser.contents().
Args:
label
FCB has phmutest label directive 'label'.
Returns:
Contents of the labeled fenced code block as a string
or empty string if the label is not found. Fenced code block
strings typically end with a newline.
"""
blocks = self.select(label=label)
return blocks[0] if blocks else ""
@dataclass
class LabeledFCB:
label: str # the label directive's value
line: str # Markdown file line number of block contents
contents: str # fenced code block contents
"""Information about a fenced code block that has a label directive."""
def labeled_fenced_code_blocks(markdown_filename: str) -> List[LabeledFCB]:
"""Return Markdown fenced code blocks that have label directives.
Label directives are placed immediately before a fenced code block
in the Markdown source file. The directive can be placed before any
fenced code block.
The label directive is the HTML comment `<!--phmutest-label VALUE-->`
where VALUE is a string with no embedded whitespace.
The space before VALUE must be present.
If there is more than one label directive on the block, the
label value that occurs earliest in the file is used.
Args:
markdown_filename
Path to the Markdown file.
Returns:
List of LabeledFCB objects.
LabeledFCB is has these fields:
- label is the value of a label directive
placed in a HTML comment before the fenced code block.
- line is the line number in the Markdown file where the block
starts.
- contents is the fenced code block contents as a string.
"""
fcbnodes = phmutest.reader.fcb_nodes(markdown_filename)
labeled_blocks = []
for node in fcbnodes:
directives = phmutest.direct.get_directives(node)
for directive in directives:
if directive.type == Marker.LABEL:
block = LabeledFCB(
label=directive.value,
line=str(directive.line),
contents=node.payload,
)
labeled_blocks.append(block)
break
return labeled_blocks
def fenced_code_blocks(markdown_filename: str) -> List[str]:
"""Return Markdown fenced code block contents as a list of strings.
Args:
markdown_filename
Path to the Markdown file.
Returns:
List of strings, one for the contents of each Markdown
fenced code block.
"""
fcbnodes = phmutest.reader.fcb_nodes(markdown_filename)
return [node.payload for node in fcbnodes]
| tmarktaylor/phmutest | src/phmutest/tool.py | tool.py | py | 5,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "phmutest.direct.select.configure_block_roles",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "phmutest.direct.select",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "phmutest.direct",
"line_number": 23,
"usage_type": "name"
},
... |
2698417886 | from django.shortcuts import render
from markdown import markdown
from .models import *
from django.http import HttpResponseRedirect
def forbid_zhihu(request):
return render(request, 'forbidden_zhihu.html')
def index_redirect(request):
return HttpResponseRedirect('http://blog.alphamj.cn/')
def index(request):
articles = Article.objects.all()
classifications = Classifications.objects.all()
return render(request, 'article_preview.html',
{'navigation': 'nav_classification.html', 'articles': articles,
'nav_classifications': classifications})
def show_article(request, article_id):
article = Article.objects.get(id=article_id)
article.content = markdown(article.content, extentions=['markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc'])
classifications = Classifications.objects.all()
return render(request, 'article.html', {'navigation': 'nav_classification.html',
'article': article, 'nav_classifications': classifications,
'classification_name': '文章分类'})
def show_article_as_classification(request, name):
classification = Classifications.objects.get(name=name)
articles = classification.article_set.all()
return render(request, 'article_preview.html',
{'navigation': 'nav_articles.html', 'articles': articles, 'nav_articles': articles,
'classification_name': '全部文章'})
def post(request):
if request.method == 'GET':
classifications = Classifications.objects.all()
return render(request, 'post.html',
{'navigation': 'nav_classification.html', 'classifications': classifications})
elif request.method == 'POST':
title = request.POST.get('title')
context = request.POST.get('context')
cls = request.POST.getlist('cls')
if len(title) > 0 and len(context) > 0:
clss = Classifications.objects.get(name=cls)
return index(request)
| w-mj/cloud-server | blog/views.py | views.py | py | 2,222 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 18,
"usage_type": "call"
},
{
"a... |
25983391444 | #!/usr/bin/env python3
"""Setup script."""
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
"""Setup the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# Import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
# Add installation instructions as well.
setup(
name='spaced-repetition',
tests_require=['pytest'],
cmdclass={
'test': PyTest
}
) | schedutron/spaced-repetition | setup.py | setup.py | py | 762 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "setuptools.command.test.test",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "setuptools.command.test.test.finalize_options",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "setuptools.command.test.test",
"line_number": 13,
"usage_type":... |
42926082156 | import tempfile
import unittest
import numpy as np
import pandas as pd
import pysam
from hmnfusion import mmej_deletion
from tests.main_test import Main_test
class TestMmejDeletionMain(Main_test):
@classmethod
def load_records(cls, path: str):
vcf_in = pysam.VariantFile(path)
return [x for x in vcf_in.fetch()]
def setUp(self):
# Value
self.value_0 = mmej_deletion.Value()
self.value_1 = mmej_deletion.Value(
id="86ad494080bc9c322a639d3de922e958",
contig="chr1",
start=5,
deletion="TGAGGC",
)
self.value_2 = mmej_deletion.Value(
id="927f1d86b6d899d163efdb245b9aca67",
contig="chr19",
start=5,
deletion="TGA",
)
self.value_1_df = pd.DataFrame(
{
"contig": "chr1",
"start": 5,
"deletion": "TGAGGC",
"sequence": "TGAGGC",
"conclusion": "alignment ambiguous",
},
index=["86ad494080bc9c322a639d3de922e958"],
)
self.value_2_df = pd.DataFrame(
{
"contig": "chr19",
"start": 5,
"deletion": "TGA",
"sequence": "",
"conclusion": "no clear signature",
},
index=["927f1d86b6d899d163efdb245b9aca67"],
)
self.values_unit_one = TestMmejDeletionMain.load_records(path=self.u1_vcf)
# MmejDeletion
self.mmej_deletion_u0 = mmej_deletion.MmejDeletion(name="sample0", values=[])
self.mmej_deletion_u1 = mmej_deletion.MmejDeletion(
name="sample1",
values=[self.value_1],
)
self.mmej_deletion_u2_s1 = mmej_deletion.MmejDeletion(
name="sample1",
values=[self.value_1, self.value_2],
)
self.mmej_deletion_u2_s2 = mmej_deletion.MmejDeletion(
name="sample2",
values=[self.value_1],
)
self.mmej_deletion_u2_df = pd.concat([self.value_1_df, self.value_2_df])
self.mmej_deletion_u2_df["sample1"] = ["o", "o"]
self.mmej_deletion_u2_df["sample2"] = ["o", pd.NA]
self.mmej_deletion_u2_df_xlsx = self.mmej_deletion_u2_df.replace(
{pd.NA: np.nan, "": np.nan}
)
self.mmej_deletion_u2_df_xlsx.reset_index(inplace=True, drop=True)
self.mmej_deletion_empty_df = pd.DataFrame(
columns=["contig", "start", "deletion", "sequence", "conclusion", "N1"]
)
self.mmej_deletion_empty_df_xlsx = pd.DataFrame(
{
"Unnamed: 0": "no deletion found",
"contig": np.nan,
"start": np.nan,
"deletion": np.nan,
"sequence": np.nan,
"conclusion": np.nan,
"N1": np.nan,
},
index=[0],
)
class TestConclude(Main_test):
"""Test Conclude object"""
def test_attribute(self):
"""Test attribute number"""
attrs = [x for x in dir(mmej_deletion.Conclude) if not x.startswith("__")]
self.assertEqual(len(attrs), 4)
class TestValue(TestMmejDeletionMain):
"""Test Value object"""
def test_getters(self):
"""Test getters attributes"""
self.assertEqual(self.value_1.id, "86ad494080bc9c322a639d3de922e958")
self.assertEqual(self.value_1.contig, "chr1")
self.assertEqual(self.value_1.start, 5)
self.assertEqual(self.value_1.deletion, "TGAGGC")
def test_setters(self):
"""Test setters attributes"""
self.value_0.id = self.value_1.id
self.value_0.contig = self.value_1.contig
self.value_0.start = self.value_1.start
self.value_0.deletion = self.value_1.deletion
self.value_0.sequence = self.value_1.sequence
self.assertEqual(self.value_0.id, "86ad494080bc9c322a639d3de922e958")
self.assertEqual(self.value_0.contig, "chr1")
self.assertEqual(self.value_0.start, 5)
self.assertEqual(self.value_0.deletion, "TGAGGC")
def test_get_conclusion(self):
"""Test get_conclusion()"""
self.value_1.sequence = "ATCG"
self.value_1.deletion = "ATCG"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.AMBIGUOUS,
)
self.value_1.sequence = "ATC"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.UNCLEAR,
)
self.value_1.sequence = "ATCGGC"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.VALID,
)
self.value_1.deletion = "A"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.UNINITIALIZED,
)
def test_set_sequence(self):
"""Test set_sequence()"""
self.value_1.set_sequence(path=self.ref_mmej)
self.assertEqual(self.value_1.sequence, "TGAGGC")
def test_from_record(self):
"""Test from_record()"""
rec = mmej_deletion.Value.from_record(self.values_unit_one[0])
self.assertEqual(rec, self.value_1)
def test_to_dataframe(self):
"""Test to_dataframe()"""
self.value_1.set_sequence(path=self.ref_mmej)
self.assertTrue(self.value_1.to_dataframe().equals(self.value_1_df))
def test_to_region(self):
"""Test to_region()"""
self.assertEqual(self.value_1.to_region(), "chr1:5-17")
class TestMmejDeletion(TestMmejDeletionMain):
"""Test MmmejDeletion object"""
def test_getters(self):
"""Test getters attributes"""
self.assertEqual(self.mmej_deletion_u1.name, "sample1")
self.assertEqual(self.mmej_deletion_u1.values, [self.value_1])
def test_setters(self):
"""Test setters attributes"""
self.assertEqual(self.mmej_deletion_u0.name, "sample0")
self.assertEqual(self.mmej_deletion_u0.values, [])
self.mmej_deletion_u0.name = self.mmej_deletion_u1.name
self.mmej_deletion_u0.values = self.mmej_deletion_u1.values
self.assertEqual(self.mmej_deletion_u1.name, "sample1")
self.assertEqual(self.mmej_deletion_u1.values, [self.value_1])
def test_empty(self):
"""Test empty property"""
self.assertTrue(self.mmej_deletion_u0.empty)
self.assertFalse(self.mmej_deletion_u1.empty)
def test_build_empty_dataframe(self):
"""Test build_empty_dataframe"""
self.assertTrue(
mmej_deletion.MmejDeletion.build_empty_dataframe(name="test").equals(
pd.DataFrame(
columns=[
"contig",
"start",
"deletion",
"sequence",
"conclusion",
"test",
]
)
)
)
def test_get_value_ids(self):
"""Test get_value_ids()"""
self.assertEqual(self.mmej_deletion_u0.get_value_ids(), [])
self.assertEqual(
self.mmej_deletion_u2_s1.get_value_ids(),
["86ad494080bc9c322a639d3de922e958", "927f1d86b6d899d163efdb245b9aca67"],
)
def test_set_value_sequence(self):
"""Test set_value_sequence()"""
self.mmej_deletion_u0.set_value_sequence(path=self.ref_mmej)
self.assertEqual(self.mmej_deletion_u0.values, [])
self.assertEqual(self.mmej_deletion_u1.values[0].sequence, "")
self.mmej_deletion_u1.set_value_sequence(path=self.ref_mmej)
self.assertEqual(self.mmej_deletion_u1.values[0].sequence, "TGAGGC")
def test_from_vcf(self):
"""Test from_vcf()"""
dels = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
self.assertEqual(dels, [mmej_deletion.MmejDeletion(name="N1", values=[])])
dels = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
self.assertEqual(
dels,
[self.mmej_deletion_u2_s1, self.mmej_deletion_u2_s2],
)
def test_to_dataframe(self):
"""Test to_dataframe()"""
# Empty
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
df = mmej_deletion.MmejDeletion.to_dataframe(mmej_deletions=mmej_deletions)
self.assertTrue(self.mmej_deletion_empty_df.equals(df))
# Filled
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
df = mmej_deletion.MmejDeletion.to_dataframe(mmej_deletions=mmej_deletions)
self.assertTrue(self.mmej_deletion_u2_df.equals(df))
def test_to_excel(self):
"""Test to_excel()"""
# Empty
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
with tempfile.NamedTemporaryFile(suffix=".xlsx") as fod:
mmej_deletion.MmejDeletion.to_excel(
path=fod.name, mmej_deletions=mmej_deletions
)
df = pd.read_excel(fod.name)
self.assertTrue(self.mmej_deletion_empty_df_xlsx.equals(df))
# Filled
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
with tempfile.NamedTemporaryFile(suffix=".xlsx") as fod:
mmej_deletion.MmejDeletion.to_excel(
path=fod.name, mmej_deletions=mmej_deletions
)
df = pd.read_excel(fod.name)
self.assertTrue(self.mmej_deletion_u2_df_xlsx.equals(df))
if __name__ == "__main__":
unittest.main()
| guillaume-gricourt/HmnFusion | tests/unit/test_mmej_deletion.py | test_mmej_deletion.py | py | 9,961 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tests.main_test.Main_test",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pysam.VariantFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "hmnfusion.mmej_deletion.Value",
"line_number": 19,
"usage_type": "call"
},
{
"api_nam... |
6241790730 | """
PRL 115, 114801 (2015)
Please keep the Python style guide of PEP8: pep8.org.
"""
# %%
import numpy as np
from scipy.special import jv
# %%
# Constants
C = 299792458
EV = 1.60217662e-19
# Machine parameters, to be checked from logbook
C1 = 1
C2 = 0.87
lambdaFEL = 50.52e-9 + 0.07e-9
# Other parameters
E0 = 1.16867e9 * EV # electron beam nominal energy (J)
sigmaE = 150e3 * EV # electron beam energy spread (J)
R56 = 50e-6 # dispersive strength
ebeamlinchirp = 0.19e6 * EV / 1e-12 # electron beam cubic chirp
ebeamquadchirp = 5.42e6 * EV / 1e-12 ** 2 # electron beam quadratic chirp
n = 5 # harmonic number
lambdaseed = lambdaFEL * n # seed laser wavelength
k1 = 2 * np.pi / lambdaseed # seed laser wave number
tau10 = 130e-15 # first seed transform-limited pulse duration
GDD1 = 0 # first seed linear frequency (quadratic phase) chirp
tau1 = (1 + (4*np.log(2)*GDD1/tau10**2) ** 2) ** 0.5 * tau10
tau20 = tau10 # second seed transform-limited pulse duration
GDD2 = 0 # second seed linear frequency (quadratic phase) chirp
tau2 = (1 + (4*np.log(2)*GDD2/tau20**2) ** 2) ** 0.5 * tau20
deltat = 150e-15 # separation between the seeds
def output(t: (float, np.ndarray)) -> (float, np.ndarray):
Psi1 = 1 / (2*GDD1 + tau10**4/(8*np.log(2)**2*GDD1)) * t ** 2
Psi2 = 1 / (2*GDD2 + tau20**4/(8*np.log(2)**2*GDD2)) * (t - deltat) ** 2
deltaphi = 3.146894088480846
ebeamtiming = 1.966066329749903e-12
seedfield = (
C1 * np.exp(-2*np.log(2)*t**2/tau1**2) * np.exp(1j*Psi1)
+ C2 * np.exp(-2*np.log(2)*(t-deltat)**2/tau2**2) * np.exp(1j*Psi2) * np.exp(1j*deltaphi)) # seed electric field; first seed sentered at time=0 fs
seedenvelope = np.abs(seedfield) ** 2 # seed envelope
seedphase = np.unwrap(np.angle(seedfield)) # seed phase
A0 = 3 # amplitude of the energy modulation of the electron beam induced by the seeds
A = A0 * seedenvelope ** 0.5
B = R56 * k1 * sigmaE / E0 # normalized dispersive strength
ebeamenergyprofile = (
E0
+ ebeamlinchirp * (t - ebeamtiming)
+ (1/2) * ebeamquadchirp * (t - ebeamtiming) ** 2
)
# electorn beam energy profile induces a phase onto the FEL pulse
ebeamphase = B / sigmaE * ebeamenergyprofile
# bunching (proportional to the FEL electric field) in the time domain
return (np.exp(-(n*B)**2/2)
* jv(n, -n*B*A)
* np.exp(1j*n*seedphase)
* np.exp(1j*n*ebeamphase))
# %%
t = np.linspace(-5.125e-12, 5.275e-12, 2 ** 12, endpoint=False)
wave = output(t)
freq = C * n / lambdaseed + np.fft.fftshift(np.fft.fftfreq(t.shape[0], t[1] - t[0]))
x = C / freq * 1e9
y = np.abs(np.fft.fftshift(np.fft.fft(np.fft.ifftshift(wave)))) ** 2
# %%
import matplotlib.pyplot as plt
plt.plot(x, y)
plt.xlim(50.5, 50.8)
plt.grid(True)
plt.show()
| DaehyunPY/FERMI_20149100 | Scripts/phase_locked.py | phase_locked.py | py | 2,821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": ... |
70123923304 | #! /usr/bin/env python
from sortrobot.neural import Classifier, OrientationClassifier
from PIL import Image
import sys, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir", default=None,
help="Directory to write sorted files. Default: same directory as file.")
parser.add_option("-c", "--classifier", dest="classifier", default='orient',
help="Classifier from sortrobot.neural to use.")
opts, args = parser.parse_args(sys.argv[1:])
classifier = {
'orient': OrientationClassifier,
'color': Classifier,
}[opts.classifier]()
for i,filename in enumerate(args):
print('{}: Reading {}'.format(i, filename))
im = Image.open(filename)
label = classifier.classify(im)
print(' classified as', label)
outdir, basename = os.path.split(filename)
if opts.outdir is not None:
outdir = opts.outdir
newdir = os.path.join(outdir, label)
if not os.path.exists(newdir):
os.mkdir(newdir)
print(' moving to', newdir)
os.rename(filename, os.path.join(newdir, basename))
| AaronParsons/sortrobot | scripts/sr_sort_files.py | sr_sort_files.py | py | 1,118 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "optparse.OptionParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sortrobot.neural.OrientationClassifier",
"line_number": 16,
"usage_type": "name"
},
{
"api_nam... |
483095221 | import requests
from time import sleep
#听写单词扣词验证PRE环境
header={"Authorization":"Bearer eyJhbGciOiJIUzUxMiJ9.eyJqdGkiOiIxNTYyNjI5MDYwMDc1MTAyMjA5Iiwic3ViIjoie1wiaWRcIjoxNTYyNjI5MDYwMDc1MTAyMjA5LFwibW9iaWxlXCI6XCIrODYxODM4NDI1MzUwNlwifSIsImV4cCI6MTcwMTY3NzU1M30.ByAdhAfbxwS5tTbkbSJIPJXN6bIrzoOjeWMwn6JA8pimm2v1fMTXVJfdXloqInXPY_FsTlc7ZPDwxlCGtFqQ5Q",
"User-Uid":"1562629060075102209",
"Kid-Uid":"1562629060075102209"}
DataGetTcheBox={"uid":1562629060075102209}
#获取教材版本接口
GetTeacherBoxUrl="https://hear-pre.abctime.com/v1/dictation/textbook"
#获取年级信息
GetTcheBoxInfo=requests.post(url=GetTeacherBoxUrl,json=DataGetTcheBox,headers=header)
# print(GetTcheBoxInfo.json()["data"]['grade_list'][0])
# print(GetTcheBoxInfo.json()["data"]['grade_list'][0]['textbook_list'])
bookErrorList=[]
worderror=[]
for grade_id in range(19):
# print(textbook_id)
sleep(1)
for textbook_id in range(1,len(GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['textbook_list'])+1):
grade_name = GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['grade_name']
# print(grade_name)
JX = GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['textbook_list'][textbook_id-1]['textbook_name']
sleep(1)
# 获取每本教材的单元
GetRescourseUrl = "https://hear-pre.abctime.com/v1/dictation/rescourse"
DataGetRescourse = {"grade_id": grade_id+1, "publisher_id": textbook_id, "uid": 1562629060075102209}
GetDataGetRescourse = requests.post(headers=header, json=DataGetRescourse, url=GetRescourseUrl)
# print("年级教材版本:",GetDataGetRescourse.json()['data'])
try:
for i in range(len(GetDataGetRescourse.json()['data']['resource_list'])):
# print(GetDataGetRescourse.json()['data']['resource_list'][i])
book_id=GetDataGetRescourse.json()['data']['resource_list'][i]['unit_id']
DY=book_id
publisher_idd=GetDataGetRescourse.json()['data']['resource_list'][i]['unit_id']
sleep(1)
# print('book_id',book_id)
# 选择单词
selectUrl = "https://hear-pre.abctime.com/v1/dictation/select"
selectData = {"book_id": book_id, "type": 1, "uid": 1562629060075102209}
selctreq = requests.post(json=selectData, url=selectUrl, headers=header)
sleep(1)
# print("选择单词:",selctreq.json()['data']['words_list'])
#遍历保存出单词和单词ids
wordss=[]
wordIdss =[]
for words in range(len(selctreq.json()['data']['words_list'])):
wordsEnd=selctreq.json()['data']['words_list'][words]['word']
# print("单词:",wordsEnd)
wordss.append(wordsEnd)
wordidEnd = selctreq.json()['data']['words_list'][words]['word_id']
# print("单词id:", wordidEnd)
wordIdss.append(wordidEnd)
sleep(1)
# 扣词接口
value=len(wordss)
deductionUrl = 'https://hear-pre.abctime.com/v1/dictation/deduction'
dataDeduction = {"pictureBookIds": [book_id], "value": value,
"word": wordss,
"wordIds": wordIdss, "uid": 1562629060075102209}
deductionReq=requests.post(url=deductionUrl,json=dataDeduction,headers=header)
# print("扣词请求:",deductionReq.json())
# Errorlist=[]
if deductionReq.json()['code']=="200" :
print('年级:',grade_name, '教材:',JX, '单元:',book_id, '正常!')
# print(deductionReq.json())
else:
print('年级:',grade_name, '教材:',JX, '单元:',book_id, '扣词异常!')
print("选择单词",selctreq.json())
worderror.append([grade_name,JX,book_id,[selctreq.json()]])
except:
print("异常请求",GetDataGetRescourse.json())
print('年级:', grade_name, '教材:', JX)
print("请求参数:",DataGetRescourse)
bookErrorList.append([grade_name,JX])
continue
print('教材无单词数据',bookErrorList)
print("扣词异常",worderror)
| wengyuanpei/pandaInterfaceTest | testCase/TingXieWordsCheck.py | TingXieWordsCheck.py | py | 4,624 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.