seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12741310305 | import sqlite3
con = sqlite3.connect('d_students.db')
cur = con.cursor()
# Create table
#cur.execute('''CREATE TABLE s_information
#(first_name text, last_name text, course text, age real)''')
# Insert a row of data
#cur.execute("INSERT INTO s_information VALUES ('Ade','Ola','product_design', 29)")
# Save (commit) the changes
con.commit()
print("successful")
s_data = [
('Ajayi', 'Bayowa', 'software development', 30,),
('Ademide', 'Benson', 'data science', 23,),
('Olawale', 'Saheed', 'UI/UX', 18,),
]
# cur.executemany('INSERT INTO s_information VALUES(?, ?, ?, ?)', s_data)
# print("execution successful")
for row in cur.execute('SELECT * FROM s_information'):
print(row)
print(cur.fetchall())
#alter table statement
#cur.execute("alter table s_info rename to s_information")
#con.commit()
#add a new column
# cur.execute("alter table s_information add column email")
# con.commit()
#update column
cur.execute(""" update s_information set email = 'kehindeorolade@gmail.com' """)
con.commit()
| kehindeorolade/Module_4_lesson_3 | Module_4_less_3.py | Module_4_less_3.py | py | 1,043 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
24512005131 | import datetime as dt
import warnings
import numpy as np
import pandas as pd
from asgiref.sync import async_to_sync
from dateutil.parser import parse
from django.core.cache import cache
from django.utils import timezone
from apps.integration.tasks.sockets.get_kws_object import get_kws_object
warnings.filterwarnings("ignore")
def get_instrument(underlying: str) -> pd.DataFrame:
df = cache.get("OPTION_INSTRUMENTS")
return df[df["underlying"] == underlying].reset_index(drop=True)
def set_initial_fields_for_instruments(instrument, instruments):
tz = timezone.get_current_timezone()
instruments["last_price"] = np.nan
instruments["exchange_timestamp"] = np.nan
instruments["last_trade_time"] = np.nan
instruments["oi"] = np.nan
instruments["expiry"] = instruments["expiry"].apply(
lambda x: parse(f"{x} 15:30:00").replace(tzinfo=tz)
)
instruments["str_expiry"] = instruments["expiry"].apply(
lambda y: y.strftime("%d-%b-%Y").upper()
)
cache.set(f"{instrument}_OPTION_INSTRUMENTS", instruments)
def set_instrument_cache(df, instruments, instrument):
df = df[
[
"instrument_token",
"last_price",
"exchange_timestamp",
"last_trade_time",
"oi",
]
].copy()
df.rename(columns={"instrument_token": "kite_instrument_token"}, inplace=True)
instruments = instruments.merge(df, how="left", on="kite_instrument_token")
instruments["last_price"] = instruments["last_price_y"].fillna(
instruments["last_price_x"]
)
instruments["exchange_timestamp"] = instruments["exchange_timestamp_y"].fillna(
instruments["exchange_timestamp_x"]
)
instruments["last_trade_time"] = instruments["last_trade_time_y"].fillna(
instruments["last_trade_time_x"]
)
instruments["oi"] = instruments["oi_y"].fillna(instruments["oi_x"])
instruments.drop(
columns=[
"last_price_x",
"last_price_y",
"exchange_timestamp_x",
"exchange_timestamp_y",
"last_trade_time_x",
"last_trade_time_y",
"oi_x",
"oi_y",
],
inplace=True,
)
cache.set(f"{instrument}_OPTION_INSTRUMENTS", instruments)
for websocket_id, instruments_buffer in instruments.groupby("websocket_id"):
cache.set(
f"{instrument}_{websocket_id}_OPTION_INSTRUMENTS",
instruments_buffer.sort_values(
["strike", "option_type"], ignore_index=True
),
)
def on_connect(ws, response):
ws.subscribe(ws.instrument_tokens)
ws.set_mode(ws.MODE_FULL, ws.instrument_tokens)
def on_ticks(ws, ticks):
instruments = cache.get(f"{ws.instrument}_OPTION_INSTRUMENTS")
df = pd.DataFrame(ticks)
if not df.empty:
set_instrument_cache(df, instruments, ws.instrument)
if timezone.localtime().time() > dt.time(15, 30):
ws.unsubscribe(ws.instrument_tokens)
ws.close()
def on_close(ws, code, reason):
if not code and not reason:
ws.stop()
def run_option_websocket(instrument: str) -> None:
instruments = get_instrument(instrument)
if instruments.empty:
return
set_initial_fields_for_instruments(instrument, instruments)
kws = get_kws_object()
kws.instrument = instrument
kws.instrument_tokens = instruments["kite_instrument_token"].to_list()
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.on_close = on_close
kws.connect(threaded=True)
| finbyz/trading_child | apps/integration/tasks/sockets/option_websocket.py | option_websocket.py | py | 3,575 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 17,
"usage_type": "name"
},
{
"api_n... |
21888795134 | import time
from pyvisauto import Region
import api.api_core as api
import fleet.fleet_core as flt
import config.config_core as cfg
import nav.nav as nav
import stats.stats_core as sts
import util.kca as kca_u
from kca_enums.kcsapi_paths import KCSAPIEnum
from util.logger import Log
class FactoryCore(object):
enabled = False
disable_timer = 0
order_oil_region = {1 : "order_oil_region_1",
10 : "order_oil_region_10",
100 : "order_oil_region_100"}
order_ammo_region = {1 : "order_ammo_region_1",
10 : "order_ammo_region_10",
100 : "order_ammo_region_100"}
order_steel_region = {1 : "order_steel_region_1",
10 : "order_steel_region_10",
100 : "order_steel_region_100"}
order_bauxite_region = {1 : "order_bauxite_region_1",
10 : "order_bauxite_region_10",
100 : "order_bauxite_region_100"}
order_resource_region = [order_oil_region,
order_ammo_region,
order_steel_region,
order_bauxite_region]
def __init__(self):
self.enabled = cfg.config.factory.enabled
pass
def set_timer(self):
self.disable_timer = time.time()
def disable_time_up(self):
return time.time() > self.disable_timer + (15 * 60)
def develop_logic(self, count):
self.goto()
oil, ammo, steel, bauxite = self.read_config_develop()
return self.develop(oil, ammo, steel, bauxite, count)
def build_logic(self, count):
self.goto()
oil, ammo, steel, bauxite = self.read_config_build()
return self.build(oil, ammo, steel, bauxite, count)
def goto(self):
nav.navigate.to('development')
def develop(self, oil, ammo, steel, bauxite, count):
"""Place the develop order"""
"""Assume currently at factory page when called"""
while count > 0:
"""click develop"""
retry = 0
while not kca_u.kca.exists(
'lower', "factory|develop_menu.png") and retry < 5:
kca_u.kca.r["develop_region"].click()
kca_u.kca.sleep(1)
retry += 1
if retry == 5:
Log.log_error("Cannot open develop menu, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
resource_list = [oil, ammo, steel, bauxite]
for i in range(4):
"""The init 10 point of resource on the order"""
resource = resource_list[i]
resource -= 10
while resource >= 100:
kca_u.kca.r[self.order_resource_region[i][100]].click()
kca_u.kca.sleep
resource -= 100
while resource >= 10:
kca_u.kca.r[self.order_resource_region[i][10]].click()
kca_u.kca.sleep
resource -= 10
while resource >= 1:
kca_u.kca.r[self.order_resource_region[i][1]].click()
kca_u.kca.sleep
resource -= 1
if count >= 3:
"""click triple develop"""
kca_u.kca.r["use_item_region"].click()
kca_u.kca.sleep
count -= 3
else:
count -= 1
kca_u.kca.r["order_confirm_region"].click()
kca_u.kca.wait('lower_right_corner', 'global|next_alt.png', 20)
while kca_u.kca.exists('lower_right_corner', 'global|next_alt.png'):
kca_u.kca.sleep()
kca_u.kca.r['shipgirl'].click()
kca_u.kca.r['top'].hover()
kca_u.kca.sleep()
return True
def build(self, oil, ammo, steel, bauxite, count):
"""Place the build order"""
"""Assume currently at factory page when called"""
while count > 0:
kca_u.kca.sleep(1)
"""return false if both slots are occupied"""
if kca_u.kca.exists("build_slot_1_stat_region",
"factory|build_progressing.png")\
and \
kca_u.kca.exists("build_slot_2_stat_region",
"factory|build_progressing.png"):
return False
build_slot_stat = {1:"build_slot_1_stat_region",
2:"build_slot_2_stat_region"}
build_slot = {1:"build_slot_1_region",
2:"build_slot_2_region"}
"""receive if a build is done"""
for i in range(1,3):
if kca_u.kca.exists(build_slot_stat[i],
"factory|build_finish.png"):
kca_u.kca.r[build_slot[i]].click()
kca_u.kca.sleep(1)
retry = 0
while not kca_u.kca.exists(
build_slot_stat[i], "factory|build_idle.png")\
and retry < 10:
kca_u.kca.r[build_slot_stat[i]].click()
kca_u.kca.sleep(3)
retry += 1
if retry == 10:
Log.log_error("Cannot receive ship, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
while kca_u.kca.exists('lower_right_corner', 'global|next_alt.png'):
kca_u.kca.sleep()
kca_u.kca.r['shipgirl'].click()
kca_u.kca.r['top'].hover()
kca_u.kca.sleep()
kca_u.kca.wait('lower', 'factory|factory_init.png', 20)
"""place the order on a empty slot"""
for j in range(1,3):
if kca_u.kca.exists(build_slot_stat[j],
"factory|build_idle.png"):
"""click build slot"""
retry = 0
while not kca_u.kca.exists(
'lower', "factory|develop_menu.png") and retry < 5:
kca_u.kca.r[build_slot[j]].click()
kca_u.kca.sleep(1)
retry += 1
if retry == 5:
Log.log_error("Cannot open develop menu, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
resource_list = [oil, ammo, steel, bauxite]
for i in range(4):
"""The init 30 point of resource on the order"""
resource = resource_list[i]
resource -= 30
while resource >= 100:
kca_u.kca.r[self.order_resource_region[i][100]].click()
kca_u.kca.sleep
resource -= 100
while resource >= 10:
kca_u.kca.r[self.order_resource_region[i][10]].click()
kca_u.kca.sleep
resource -= 10
while resource >= 1:
kca_u.kca.r[self.order_resource_region[i][1]].click()
kca_u.kca.sleep
resource -= 1
kca_u.kca.r["order_confirm_region"].click()
kca_u.kca.wait('lower', 'factory|factory_init.png', 20)
count -= 1
if count <= 0:
break
"""all requested build seccessfully done"""
return True
def read_config_develop(self):
oil = cfg.config.factory.develop["recipe"][0]
ammo = cfg.config.factory.develop["recipe"][1]
steel = cfg.config.factory.develop["recipe"][2]
bauxite = cfg.config.factory.develop["recipe"][3]
return oil, ammo, steel, bauxite
def read_config_build(self):
oil = cfg.config.factory.build["recipe"][0]
ammo = cfg.config.factory.build["recipe"][1]
steel = cfg.config.factory.build["recipe"][2]
bauxite = cfg.config.factory.build["recipe"][3]
return oil, ammo, steel, bauxite
factory = FactoryCore()
| XVs32/kcauto_custom | kcauto/factory/factory_core.py | factory_core.py | py | 8,911 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "config.config_core.config",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "config.config_core",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "time.time... |
3642629983 | from utils.transformers_utils import SiameseRobertaModel,TrainerLogger,get_preds,compute_metrics,update_metrics
import numpy as np
import pandas as pd
from transformers import RobertaTokenizerFast,RobertaConfig,TrainingArguments
from datasets import Dataset,DatasetDict #!pip install datasets
import evaluate #!pip install evaluate
from sklearn.metrics import roc_auc_score,recall_score,f1_score,precision_score,accuracy_score
DATASETS_PATH = '../data/datasets'
TOKENIZER_PATH = f'../data/tokenizers/tokenizer_opcode_bpe'
MODEL_PATH = f'../data/models/roberta_lm'
RESULTADOS_PATH = f'../data/resultados'
def get_dataset(target,tokenizer=None,remove_columns=None):
dataset = pd.read_csv(f'{DATASETS_PATH}/{target}_balanced.csv')[['opcode_nocds',target,'is_valid']]#.sample(300)
#dataset['arithmetic'] = np.where(dataset['arithmetic']==1,'Danger','Safe')
dataset[target] = np.where(dataset[target]==1,0,1)
dataset.columns = ['text','labels','is_valid']
ds = DatasetDict()
ds['train'] = Dataset.from_pandas(dataset[~dataset.is_valid].drop('is_valid',axis=1), preserve_index=False)
ds['valid'] = Dataset.from_pandas(dataset[dataset.is_valid].drop('is_valid',axis=1), preserve_index=False)
if tokenizer!= None:
ds = ds.map(lambda x:tokenizer(x["text"],truncation=True,padding='max_length'), batched=True,remove_columns=remove_columns)
return ds
def get_trainer(ds,model):
training_args = TrainingArguments("test_trainer",
num_train_epochs=6,
no_cuda=False,
evaluation_strategy="epoch",#steps
#logging_strategy
learning_rate= 5e-05,
lr_scheduler_type= 'linear',
#'linear',#cosine_with_restarts
fp16=True
)
train_dataset = ds['train']
eval_dataset = ds['valid']
return TrainerLogger(
model=model, args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
max_size_sentence = 4048
metric = evaluate.load("accuracy")
tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_len=max_size_sentence)
config = RobertaConfig(
vocab_size=1000,
max_position_embeddings=512 + 2, # 514
hidden_size=216,
num_attention_heads=6,
num_hidden_layers=4,
type_vocab_size=1
# id2label={0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}
)
resultados = {
'fold': ['f1', 'auc-roc', 'auc-roc-th', 'precision', 'recall', 'accuracy', 'size'],
}
vulnerabilities = ['access_control', 'arithmetic', 'denial_service',
'front_running', 'reentrancy', 'time_manipulation',
'unchecked_low_calls']
for target in vulnerabilities:
print(target)
ds = get_dataset(target, tokenizer, ['text'])
model = SiameseRobertaModel(config, n_childs=8, pretrained='')
trainer = get_trainer(ds, model)
trainer.train()
y_real_train, y_pred_train = get_preds(model, ds['train'])
y_real, y_pred = get_preds(model, ds['valid'])
update_metrics(target, resultados, (y_real_train, y_pred_train, y_real, y_pred))
pd.DataFrame(resultados).to_csv(f'{RESULTADOS_PATH}/transfomers_no_lm.csv', index=False)
max_size_sentence = 4048
metric = evaluate.load("accuracy")
tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_len=max_size_sentence)
config = RobertaConfig(
vocab_size=1000,
max_position_embeddings=512 + 2, # 514
hidden_size=216,
num_attention_heads=6,
num_hidden_layers=4,
type_vocab_size=1
# id2label={0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}
)
resultados = {
'fold': ['f1', 'auc-roc', 'auc-roc-th', 'precision', 'recall', 'accuracy', 'size'],
}
vulnerabilities = ['access_control', 'arithmetic', 'denial_service',
'front_running', 'reentrancy', 'time_manipulation',
'unchecked_low_calls']
for target in vulnerabilities:
print(target)
ds = get_dataset(target, tokenizer, ['text'])
model = SiameseRobertaModel(config, n_childs=8, pretrained=MODEL_PATH)
trainer = get_trainer(ds, model)
trainer.train()
y_real_train, y_pred_train = get_preds(model, ds['train'])
y_real, y_pred = get_preds(model, ds['valid'])
update_metrics(target, resultados, (y_real_train, y_pred_train, y_real, y_pred))
pd.DataFrame(resultados).to_csv(f'{RESULTADOS_PATH}/transfomers_yes_lm.csv', index=False)
| matisyo/vulnerability_detection | Notebooks/8. Transformers Classifier.py | 8. Transformers Classifier.py | py | 4,686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datasets.DatasetDict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datasets.Dataset.fro... |
39932475902 | import argparse
import requests
from tabulate import tabulate
def make_api_request(query, filters, page, pagesize):
url = 'http://localhost:3000/log/search'
data = {
'query': query,
'filters': filters,
'page': page,
'pageSize': pagesize
}
response = requests.post(url, json=data)
if response.status_code == 200:
result = response.json()
if result['success']:
pagination = result['pagination']
page = pagination['page']
total_hits = pagination['totalHits']
total_pages = pagination['totalPages']
print(
f"Search Results -> Current Page: {page} ")
if result['data']:
table_data = [{k: v for k, v in item.items()}
for item in result['data']]
print(tabulate(table_data, headers="keys", tablefmt="pretty"))
else:
print("No results found.")
print(f"total {total_hits} hits across {total_pages} pages")
else:
print(f"Error: {result.get('error', 'Unknown error')}")
else:
print(f"Error: {response.status_code}")
print(response.text)
def main():
parser = argparse.ArgumentParser(
description='Make API request to http://localhost:3000/log/search')
parser.add_argument('--query', type=str, default="", help='Search query')
parser.add_argument('--filters', nargs='+', default=[],
help='Additional filters')
parser.add_argument('--page', type=int, default=1, help='Current Page')
parser.add_argument('--pagesize', type=int, default=10, help='Page Size')
args = parser.parse_args()
query = args.query
page = args.page
pagesize = args.pagesize
filters_dict = {}
for filter_arg in args.filters:
key, value = filter_arg.split('=')
filters_dict[key] = value
make_api_request(query, filters_dict, page, pagesize)
if __name__ == "__main__":
main()
| harikrishnanum/LogQube | cli/search.py | search.py | py | 2,041 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 42,
"usage_type": "call"
}
] |
42842609252 | # This is a replacement for test/integration/inflate_tokens.sh.
# The original script had a lot of problems as described in https://app.zenhub.com/workspaces/current-sprint---engineering-615a2e9fe2abd5001befc7f9/issues/sifchain/issues/719.
# See https://www.notion.so/sifchain/TEST-TOKEN-DISTRIBUTION-PROCESS-41ad0861560c4be58918838dbd292497
import json
import re
from typing import Any, Mapping, Iterable, Sequence
from siftool import eth, test_utils, cosmos
from siftool.common import *
log = siftool_logger(__name__)
TokenDict = Mapping[str, Any]
class InflateTokens:
def __init__(self, ctx: test_utils.EnvCtx):
self.ctx = ctx
self.wait_for_account_change_timeout = 120
self.excluded_token_symbols = ["erowan"] # TODO peggy1 only
# Only transfer this tokens in a batch for Peggy1. See #2397. You would need to adjust this if
# test_inflate_tokens_short is passing, but test_inflate_tokens_long is timing out. It only applies to Peggy 1.
# The value of 3 is experimental; if tokens are still not getting across the bridge reliably, reduce the value
# down to 1 (minimum). The lower the value the more time the transfers will take as there will be more
# sequential transfers instead of parallel.
self.max_ethereum_batch_size = 0
def get_whitelisted_tokens(self) -> List[TokenDict]:
whitelist = self.ctx.get_whitelisted_tokens_from_bridge_bank_past_events()
ibc_pattern = re.compile("^ibc/([0-9a-fA-F]{64})$")
result = []
for token_addr, value in whitelist.items():
token_data = self.ctx.get_generic_erc20_token_data(token_addr)
token_symbol = token_data["symbol"]
token = {
"address": token_addr,
"symbol": token_symbol,
"name": token_data["name"],
"decimals": token_data["decimals"],
"is_whitelisted": value,
"sif_denom": self.ctx.eth_symbol_to_sif_symbol(token_symbol),
}
m = ibc_pattern.match(token_symbol)
if m:
token["ibc"] = m[1].lower()
log.debug("Found whitelisted entry: {}".format(repr(token_data)))
assert token_symbol not in result, f"Symbol {token_symbol} is being used by more than one whitelisted token"
result.append(token)
erowan_token = [t for t in result if t["symbol"] == "erowan"]
# These assertions are broken in Tempnet, possibly indicating missing/incomplete chain init, see README.md
# for comparision of steps
assert len(erowan_token) == 1, "erowan is not whitelisted, probably bad/incomplete deployment"
assert erowan_token[0]["is_whitelisted"], "erowan is un-whitelisted"
return result
def wait_for_all(self, pending_txs):
result = []
for txhash in pending_txs:
txrcpt = self.ctx.eth.wait_for_transaction_receipt(txhash)
result.append(txrcpt)
return result
def build_list_of_tokens_to_create(self, existing_tokens: Iterable[TokenDict], requested_tokens: Iterable[TokenDict]
) -> Sequence[Mapping[str, Any]]:
"""
This part deploys SifchainTestoken for every requested token that has not yet been deployed.
The list of requested tokens is (historically) read from assets.json, but in practice it can be
a subset of tokens that are whitelisted in production.
The list of existing tokens is reconstructed from past LogWhiteListUpdate events of the BridgeBank
smart contract (since there is no way to "dump" the contents of a mapping in Solidity).
Deployed tokens are whitelisted with BridgeBank, minted to owner's account and approved to BridgeBank.
This part only touches EVM chain through web3.
"""
# Strictly speaking we could also skip tokens that were un-whitelisted (value == False) since the fact that
# their addresses appear in BridgeBank's past events implies that the corresponding ERC20 smart contracts have
# been deployed, hence there is no need to deploy them.
tokens_to_create = []
for token in requested_tokens:
token_symbol = token["symbol"]
if token_symbol in self.excluded_token_symbols:
assert False, f"Token {token_symbol} cannot be used by this procedure, please remove it from list of requested assets"
existing_token = zero_or_one(find_by_value(existing_tokens, "symbol", token_symbol))
if existing_token is None:
tokens_to_create.append(token)
else:
if not all(existing_token[f] == token[f] for f in ["name", "decimals"]):
assert False, "Existing token's name/decimals does not match requested for token: " \
"requested={}, existing={}".format(repr(token), repr(existing_token))
if existing_token["is_whitelisted"]:
log.info(f"Skipping deployment of smmart contract for token {token_symbol} as it should already exist")
else:
log.warning(f"Skipping token {token_symbol} as it is currently un-whitelisted")
return tokens_to_create
def create_new_tokens(self, tokens_to_create: Iterable[TokenDict]) -> Sequence[TokenDict]:
pending_txs = []
for token in tokens_to_create:
token_name = token["name"]
token_symbol = token["symbol"]
token_decimals = token["decimals"]
log.info(f"Deploying generic ERC20 smart contract for token {token_symbol}...")
txhash = self.ctx.tx_deploy_new_generic_erc20_token(self.ctx.operator, token_name, token_symbol, token_decimals)
pending_txs.append(txhash)
token_contracts = [self.ctx.get_generic_erc20_sc(txrcpt.contractAddress) for txrcpt in self.wait_for_all(pending_txs)]
new_tokens = []
pending_txs = []
for token_to_create, token_sc in [[tokens_to_create[i], c] for i, c in enumerate(token_contracts)]:
token_symbol = token_to_create["symbol"]
token_name = token_to_create["name"]
token_decimals = token_to_create["decimals"]
assert token_sc.functions.totalSupply().call() == 0
assert token_sc.functions.name().call() == token_name
assert token_sc.functions.symbol().call() == token_symbol
assert token_sc.functions.decimals().call() == token_decimals
new_tokens.append({
"address": token_sc.address,
"symbol": token_symbol,
"name": token_name,
"decimals": token_decimals,
"is_whitelisted": True,
"sif_denom": self.ctx.eth_symbol_to_sif_symbol(token_symbol),
})
if not on_peggy2_branch:
txhash = self.ctx.tx_update_bridge_bank_whitelist(token_sc.address, True)
pending_txs.append(txhash)
self.wait_for_all(pending_txs)
return new_tokens
def mint(self, list_of_tokens_addrs, amount_in_tokens, mint_recipient):
pending_txs = []
for token_addr in list_of_tokens_addrs:
token_sc = self.ctx.get_generic_erc20_sc(token_addr)
decimals = token_sc.functions.decimals().call()
amount = amount_in_tokens * 10**decimals
txhash = self.ctx.tx_testing_token_mint(token_sc, self.ctx.operator, amount, mint_recipient)
pending_txs.append(txhash)
self.wait_for_all(pending_txs)
def transfer_from_eth_to_sifnode(self, from_eth_addr, to_sif_addr, tokens_to_transfer, amount_in_tokens, amount_eth_gwei):
sif_balances_before = self.ctx.get_sifchain_balance(to_sif_addr)
sent_amounts = []
pending_txs = []
for token in tokens_to_transfer:
token_addr = token["address"]
decimals = token["decimals"]
token_sc = self.ctx.get_generic_erc20_sc(token_addr)
amount = amount_in_tokens * 10**decimals
pending_txs.extend(self.ctx.tx_approve_and_lock(token_sc, from_eth_addr, to_sif_addr, amount))
sent_amounts.append([amount, token["sif_denom"]])
if amount_eth_gwei > 0:
amount = amount_eth_gwei * eth.GWEI
pending_txs.append(self.ctx.tx_bridge_bank_lock_eth(from_eth_addr, to_sif_addr, amount))
sent_amounts.append([amount, self.ctx.ceth_symbol])
self.wait_for_all(pending_txs)
log.info("{} Ethereum transactions commited: {}".format(len(pending_txs), repr(sent_amounts)))
# Wait for intermediate_sif_account to receive all funds across the bridge
previous_block = self.ctx.eth.w3_conn.eth.block_number
self.ctx.advance_blocks()
log.info("Ethereum blocks advanced by {}".format(self.ctx.eth.w3_conn.eth.block_number - previous_block))
self.ctx.sifnode.wait_for_balance_change(to_sif_addr, sif_balances_before, min_changes=sent_amounts,
polling_time=5, timeout=0, change_timeout=self.wait_for_account_change_timeout)
# Distributes from intermediate_sif_account to each individual account
def distribute_tokens_to_wallets(self, from_sif_account, tokens_to_transfer, amount_in_tokens, target_sif_accounts, amount_eth_gwei):
send_amounts = [[amount_in_tokens * 10**t["decimals"], t["sif_denom"]] for t in tokens_to_transfer]
if amount_eth_gwei > 0:
send_amounts.append([amount_eth_gwei * eth.GWEI, self.ctx.ceth_symbol])
progress_total = len(target_sif_accounts) * len(send_amounts)
progress_current = 0
for sif_acct in target_sif_accounts:
remaining = send_amounts
while remaining:
batch_size = len(remaining)
if (self.ctx.sifnode.max_send_batch_size > 0) and (batch_size > self.ctx.sifnode.max_send_batch_size):
batch_size = self.ctx.sifnode.max_send_batch_size
batch = remaining[:batch_size]
remaining = remaining[batch_size:]
sif_balance_before = self.ctx.get_sifchain_balance(sif_acct)
self.ctx.send_from_sifchain_to_sifchain(from_sif_account, sif_acct, batch)
self.ctx.sifnode.wait_for_balance_change(sif_acct, sif_balance_before, min_changes=batch,
polling_time=2, timeout=0, change_timeout=self.wait_for_account_change_timeout)
progress_current += batch_size
log.debug("Distributing tokens to wallets: {:0.0f}% done".format((progress_current/progress_total) * 100))
def export(self):
return [{
"symbol": token["symbol"],
"name": token["name"],
"decimals": token["decimals"]
} for token in self.get_whitelisted_tokens() if ("ibc" not in token) and (token["symbol"] not in self.excluded_token_symbols)]
def transfer(self, requested_tokens: Sequence[TokenDict], token_amount: int,
target_sif_accounts: Sequence[cosmos.Address], eth_amount_gwei: int
):
"""
It goes like this:
1. Starting with assets.json of your choice, It will first compare the list of tokens to existing whitelist and deploy any new tokens (ones that have not yet been whitelisted)
2. For each token in assets.json It will mint the given amount of all listed tokens to OPERATOR account
3. It will do a single transaction across the bridge to move all tokens from OPERATOR to sif_broker_account
4. It will distribute tokens from sif_broker_account to each of given target accounts
The sif_broker_account and OPERATOR can be any Sifchain and Ethereum accounts, we might want to use something
familiar so that any tokens that would get stuck in the case of interrupting the script can be recovered.
"""
# TODO Add support for "rowan"
n_accounts = len(target_sif_accounts)
total_token_amount = token_amount * n_accounts
total_eth_amount_gwei = eth_amount_gwei * n_accounts
# Calculate how much rowan we need to fund intermediate account with. This is only an estimation at this point.
# We need to take into account that we might need to break transfers in batches. The number of tokens is the
# number of ERC20 tokens plus one for ETH, rounded up. 5 is a safety factor
number_of_batches = 1 if self.ctx.sifnode.max_send_batch_size == 0 else (len(requested_tokens) + 1) // self.ctx.sifnode.max_send_batch_size + 1
fund_rowan = [5 * test_utils.sifnode_funds_for_transfer_peggy1 * n_accounts * number_of_batches, "rowan"]
log.debug("Estimated number of batches needed to transfer tokens from intermediate sif account to target sif wallet: {}".format(number_of_batches))
log.debug("Estimated rowan funding needed for intermediate account: {}".format(fund_rowan))
ether_faucet_account = self.ctx.operator
sif_broker_account = self.ctx.create_sifchain_addr(fund_amounts=[fund_rowan])
eth_broker_account = self.ctx.operator
if (total_eth_amount_gwei > 0) and (ether_faucet_account != eth_broker_account):
self.ctx.eth.send_eth(ether_faucet_account, eth_broker_account, total_eth_amount_gwei)
log.info("Using eth_broker_account {}".format(eth_broker_account))
log.info("Using sif_broker_account {}".format(sif_broker_account))
# Check first that we have the key for ROWAN_SOURCE since the script uses it as an intermediate address
keys = self.ctx.sifnode.keys_list()
rowan_source_key = zero_or_one([k for k in keys if k["address"] == sif_broker_account])
assert rowan_source_key is not None, "Need private key of broker account {} in sifnoded test keyring".format(sif_broker_account)
existing_tokens = self.get_whitelisted_tokens()
tokens_to_create = self.build_list_of_tokens_to_create(existing_tokens, requested_tokens)
log.info("Existing tokens: {}".format(len(existing_tokens)))
log.info("Requested tokens: {}".format(len(requested_tokens)))
log.info("Tokens to create: {}".format(len(tokens_to_create)))
new_tokens = self.create_new_tokens(tokens_to_create)
existing_tokens.extend(new_tokens)
# At this point, all tokens that we want to transfer should exist both on Ethereum blockchain as well as in
# existing_tokens.
tokens_to_transfer = [exactly_one(find_by_value(existing_tokens, "symbol", t["symbol"]))
for t in requested_tokens]
self.mint([t["address"] for t in tokens_to_transfer], total_token_amount, eth_broker_account)
if (self.max_ethereum_batch_size > 0) and (len(tokens_to_transfer) > self.max_ethereum_batch_size):
log.debug(f"Transferring {len(tokens_to_transfer)} tokens from ethereum to sifndde in batches of {self.max_ethereum_batch_size}...")
remaining = tokens_to_transfer
while remaining:
batch = remaining[:self.max_ethereum_batch_size]
remaining = remaining[self.max_ethereum_batch_size:]
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, batch, total_token_amount, 0)
log.debug(f"Batch completed, {len(remaining)} tokens remaining")
# Transfer ETH separately
log.debug("Thansfering ETH from ethereum to sifnode...")
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, [], 0, total_eth_amount_gwei)
else:
log.debug(f"Transferring {len(tokens_to_transfer)} tokens from ethereum to sifnode in single batch...")
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, tokens_to_transfer, total_token_amount, total_eth_amount_gwei)
self.distribute_tokens_to_wallets(sif_broker_account, tokens_to_transfer, token_amount, target_sif_accounts, eth_amount_gwei)
log.info("Done.")
log.info("To see newly minted tokens in UI, you need to edit 'scripts/ibc/tokenregistry/generate-erc20-jsons.sh' "
"and add any tokens that are not already there. Then cd into the directory and run './generate-erc20-jsons.sh devnet' "\
"and commit the results in the sifchain-devnet-1 folder. @tim will pick up the PR and register it on "
"devnet by running './register-one.sh' with the registry key. In the future this might be open for anybody "
"to do on their own for devnet and testnet.")
def transfer_eth(self, from_eth_addr: eth.Address, amount_gwei: int, target_sif_accounts: Iterable[cosmos.Address]):
pending_txs = []
for sif_acct in target_sif_accounts:
txrcpt = self.ctx.tx_bridge_bank_lock_eth(from_eth_addr, sif_acct, amount_gwei * eth.GWEI)
pending_txs.append(txrcpt)
self.wait_for_all(pending_txs)
def run(*args):
# This script should be run with SIFTOOL_ENV_FILE set to a file containing definitions for OPERATOR_ADDRESS,
# ROWAN_SOURCE eth. Depending on if you're running it on Peggy1 or Peggy2 the format might be different.
# See get_env_ctx() for details.
assert not on_peggy2_branch, "Not supported yet on peggy2.0 branch"
ctx = test_utils.get_env_ctx()
script = InflateTokens(ctx)
script.wait_for_account_change_timeout = 1800 # For Ropsten we need to wait for 50 blocks i.e. ~20 min = 1200 s
cmd = args[0]
args = args[1:]
if cmd == "export":
# Usage: inflate_tokens.py export assets.json
ctx.cmd.write_text_file(args[0], json.dumps(script.export(), indent=4))
elif cmd == "transfer":
# Usage: inflate_tokens.py transfer assets.json token_amount accounts.json amount_eth_gwei
assets_json_file, token_amount, accounts_json_file, amount_eth_gwei = args
tokens = json.loads(ctx.cmd.read_text_file(assets_json_file))
accounts = json.loads(ctx.cmd.read_text_file(accounts_json_file))
script.transfer(tokens, int(token_amount), accounts, int(amount_eth_gwei))
else:
raise Exception("Invalid usage")
if __name__ == "__main__":
import sys
basic_logging_setup()
run(*sys.argv[1:])
| Sifchain/sifnode | test/integration/framework/src/siftool/inflate_tokens.py | inflate_tokens.py | py | 18,307 | python | en | code | 106 | github-code | 6 | [
{
"api_name": "typing.Mapping",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "siftool.test_utils.EnvCtx",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "siftool.test... |
14132002645 | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime, Float, UniqueConstraint, Index
from sqlalchemy.orm import relationship
from src.models import Base
class GpsRecord(Base):
__tablename__ = "gps_record"
id = Column(Integer, primary_key=True, index=True)
datetime = Column(DateTime(timezone=True))
latitude = Column(Float())
longitude = Column(Float())
altitude = Column(Float(), nullable=True)
accuracy = Column(Float(), nullable=True)
vertical_accuracy = Column(Float(), nullable=True)
description = Column(String, nullable=True)
device = Column(String(length=128))
app = Column(String(length=128))
user = Column(String(length=32), default='castel')
distance = Column(Float())
Index('device_records', GpsRecord.datetime, GpsRecord.device, GpsRecord.app, GpsRecord.user, unique=True)
Index('desc_date_per_app', GpsRecord.datetime.desc(), GpsRecord.device, GpsRecord.app)
Index('per_app', GpsRecord.device, GpsRecord.app)
Index('desc_date_per_device', GpsRecord.datetime.desc(), GpsRecord.device)
Index('desc_date', GpsRecord.datetime.desc()) | jmcastellote/whereabouts | src/gps_record/model.py | model.py | py | 1,126 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.models.Base",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Co... |
31192442591 | """
Created on Fri Mar 4 19:28:46 2022
@author: Miguel
"""
from _legacy.exe_isotopeChain_taurus import DataTaurus
class Template:
com = 'com'
z = 'z'
a = 'a'
seed = 'seed'
b20 = 'b20'
varN2 = 'varN2'
iterartions = 'iters'
hamil = 'interaction'
TEMPLATE = """NUCLEUS {a:03} XX Z= {z:03} >>> HFB OPT <<< {com} COULECH 2
EPSG 0.000001 MAXITER {iters:05} >>> OUTPUT <<< 0 **** TSTG 0
ETMAX 0.7501 ETMIN 0.0351 DMAX 0.90 DMIN 0.70 TSHH 399.0
GOGNY FORCE {interaction} *** 0=D1S 1=D1 2=D1' 3(t3=0)
INPUT W.F. {seed} *** 0,1=WF FROM UNIT 10 (1 kicks), 2=NEW Function
OSCILLATOR LENGHT 0 *** 0 BP 1.7510000 BZ 1.7510000
>>>>>>>>>> C O N S T R A I N T S <<<<<<<<<<<
C.O.M. 1 1 0.00000000D+00
{b20}{varN2} >>>>>>>>>> E N D <<<<<<<<<<<<<<<<<<<<<<<<<<< """
# BP 1.7719772 BZ 1.7719772
# BP 1.7185258 BZ 1.7185258 (A=25)
temp_noGrad = """NUCLEUS {a:03} He Z= {z:03} >>> HFB OPT <<< {com} COULECH 2
EPSG 0.000001 MAXITER {iters:05} >>> OUTPUT <<< 0 **** TSTG 0
ETMAX 0.0001 ETMIN 0.0001 DMAX 0.01 DMIN 0.01 TSHH 000.1
GOGNY FORCE {interaction} *** 0=D1S 1=D1 2=D1' 3(t3=0)
INPUT W.F. {seed} *** 0,1=WF FROM UNIT 10 (1 kicks), 2=NEW Function
OSCILLATOR LENGHT 0 *** 0 BP 2.0402454 BZ 2.0402454
>>>>>>>>>> C O N S T R A I N T S <<<<<<<<<<<
C.O.M. 1 1 0.00000000D+00
{b20}{varN2} >>>>>>>>>> E N D <<<<<<<<<<<<<<<<<<<<<<<<<<< """
q10_constr_template = "QL 1 {:1} {:1} {:10.8f}D+00\n"
q20_constr_template = "QL 2 {:1} {:1} {:10.8f}D+00\n"
b20_constr_template = "BL 2 {:1} {:1} {:10.8f}D+00\n"
b30_constr_template = "BL 3 {:1} {:1} {:10.8f}D+00\n"
DN2_constr_template = "DN**2 {:1} {:1} {:10.8f}D+00\n"
DJX2_constr_template= "DJX**2 {:1} {:1} {:10.8f}D+00\n"
MSR2_constr_template= "<R**2> {:1} {:1} {:10.8f}D+00\n"
com_template = "CM1 {} CM2 {}"
from collections import OrderedDict
import os
import shutil
import subprocess
from _legacy.exe_isotopeChain_axial import DataAxial
import math as mth
import matplotlib.pyplot as plt
import numpy as np
HAMIL_AXIAL_PROGRAM = 'HFBaxialMZ3'
nucleus = [
# Z N
# (2, 2),
# (2, 4),
# (4, 4),
# (4, 6),
# # (6, 6),
# (6, 8),
# (8, 4),
# (8, 6),
# (8, 8),
# (8, 10),
# (8, 12),
#
(10, 6),
(10, 8),
(10, 10),
(10, 12),
(10, 14),
(10, 16)
#
# (12, 8),
# (12, 10),
# (12, 12),
# (12, 14),
# (12, 16),
# (14, 8),
# (14, 10),
# (14, 12),
# (14, 14),
# (14, 16),
#
# (16, 12),
# (16, 14),
# (16, 16),
# (16, 18),
# (16, 20),
# (36, 34),
# (34, 36),
# (38, 40),
# (40, 38),
]
#nucleus = [(8, n) for n in range(6, 15, 2)]
## put here value in axial (divide by 3/5 to fix with taurus q20)
repeat = {
# # (2, 2),
# (2, 4) : 0.1 / 0.6,
# (4, 4) : 0.4 / 0.6,
# # (4, 6) : -0.4 / 0.6,
# # (6, 6) : -0.4 / 0.6,
# # (6, 8) : -0.1 / 0.6,
# # (8, 8),
# (10, 8) : 0.0,
# # (10, 10): +0.2 / 0.6,
# (12, 10): +0.3 / 0.6,
# (14, 12) : 0.23 / 0.6,
# #(6, 6) : -0.4 / 0.6,
}
def _executeProgram(params, output_filename, q20_const,
print_result=True, save_final_wf=True, force_converg=False,
noGradient=False):
"""
In NOT save_final_wf, the initial wf previous the calculation is restored
"""
res = None
if params[Template.seed] == 1: print("[WARNING] seed 1 in Axial kicks wf!")
try:
status_fin = ''
text = TEMPLATE.format(**params)
if noGradient:
text = temp_noGrad.format(**params)
#print("\n no Grad\n{}".format(text),'\n')
with open(DataAxial.INPUT_FILENAME, 'w+') as f:
f.write(text)
#_e = subprocess.call('cp fort.10 initial_fort.10', shell=True)
_e = subprocess.call('./{} < {} > {}' # noReaHFBMZ2
.format(HAMIL_AXIAL_PROGRAM,
DataAxial.INPUT_FILENAME,
output_filename),
shell=True)
res = DataAxial(z, n, output_filename)
# move shit to the folder
str_q20 = str(int(1000*q20_const)).replace('-','_')
folder_dest = os.getcwd()+'/'+DataAxial.BU_folder+'/'
_e = subprocess.call('mv {} {}'.format(output_filename,
folder_dest+output_filename
+'_Z{}N{}'.format(z,n)
+'_{}'.format(str_q20)),
shell=True)
_e = subprocess.call('cp fort.11 '+folder_dest+
'seed_q{}_'.format(str_q20)+
'_Z{}N{}'.format(z,n)+'.11',
shell=True)
#_e = subprocess.call('cp fort.11 final_fort.11', shell=True)
_e = subprocess.call('rm fort.38 fort.4* fort.5* fort.6*', shell=True)
# refresh the initial function to the new deformation
if save_final_wf and (res.properly_finished or (not force_converg)):
_e = subprocess.call('rm fort.10', shell=True)
_e = subprocess.call('cp fort.11 fort.10', shell=True)
print(" *** exec. [OK] copied the final wf to the initial wf!")
# else:
# _e = subprocess.call('cp initial_fort.10 fort.10', shell=True)
status_fin = 'X' if not res.properly_finished else '.'
if print_result:
print(" {:2} {:2} ( {}) {:9.4f} {:9.4f} {:7.4f} {:5.4f}={:6.2f}"
.format(z, n, status_fin, res.E_HFB, res.kin, res.pair,
res.beta_isoscalar, res.q20_isoscalar))
except Exception as e:
print(" >> EXCEP >>>>>>>>>> ")
print(" >> current b20 =", q20_const)
print(" > [",e.__class__.__name__,"]:", e, "<")
if res and res.E_HFB == None and not res.properly_finished:
print(" > the result is NULL (final_wf wasn't copied to initial_wf)")
print("> RESULT <DataAxial>:\n",res,"\n END RESULT <")
print(" << EXCEP <<<<<<<<<< ")
return None
return res
def _energyDiffRejectionCriteria(curr_energ, old_energ, old_e_diff,
tol_factor=2.0):
new_e_diff = curr_energ - old_energ
# change in direction of the derivative, reject if difference is > 25%
if new_e_diff * old_e_diff < 0:
return abs(new_e_diff) > 1.5 * abs(old_e_diff)
# reject if new difference is tol_factor greater than the last one.
return abs(new_e_diff) > tol_factor * abs(old_e_diff)
def _set_deform_for_PES(res_0, b_min=-0.3, b_max=0.3, N = 20):
"""
Set an evenly spaced grid, dividing in "oblate" for points to the left
of a b_20 minumum and "prolate" to the right.
In case the seed minumum is outside the range, the old range is shifted
and centered to the new b20.
"""
N = 2 * (N // 2) # only even number N/2
b_range = b_max - b_min
assert b_min < b_max, \
"b_max[{}] needs to be extricly greater than b_min[{}]!".format(b_max, b_min)
dq = b_range / N
dq_decimals = int(mth.ceil(abs(np.log10(dq)))) + 1 # 2 significative decimals
dq = round(dq, dq_decimals)
b = getattr(res_0, 'b20_isoscalar', 0.0) # default 0.0
if b > b_max or b < b_min:
b_max = b + (b_range / 2) # * abs(b_max) / abs(b_max))
b_min = b - (b_range / 2) #* abs(b_min) /abs(b_min))
print("Min/Max :: ", b_min, b_max, b_max - b_min)
# b = round(b_min + (dq * ((b - b_min) // dq)), dq_decimals)
# print("b1=", b1," to ",b)
total_def = np.linspace(b_min, b_max, num=N, endpoint=True)
deform_prolate = list(filter(lambda x: x > b, total_def))
deform_oblate = list(filter(lambda x: x <= b, total_def))
deform_oblate.append(b)
deform_oblate.reverse()
Npro = len(deform_prolate)
Nobl = N - Npro
return deform_oblate, deform_prolate
def mainLinuxEvenlyDeform(z, n, b_min=-0.1, b_max=0.1, N=30, voidDD_path=None):
"""
Old process that sets an even single-evaluated step over b range
voidDD_path is the equivalent of the DataTaurus.export_list_results for the
output of the final calculation
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
results = []
results_voidStep = []
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
constr = ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = q20_constr_template.format(1,1, 0.0000)
# constr = b20_constr_template.format(1,0, 0.0000)
constr += b20_constr_template.format(1,1, b_max-0.01)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"",
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
res_0 = _executeProgram(kwargs, output_filename, 0.0)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
# ###
deform_oblate, deform_prolate = _set_deform_for_PES(res_0, b_min,b_max, N)
for i_deform, deform in enumerate((deform_oblate, deform_prolate)):
# copy it.
_e = subprocess.call('cp initial_Spheric.11 fort.10', shell=True)
## ----- execution ----
for b20_const in deform:
# create a spherical seed to proceed
#q20_const *= 2 * np.pi / (np.sqrt(5 * np.pi))
constr = b20_constr_template.format(1,1, b20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, b20_const,
print_result=False)
if res == None:
continue # dont save empty result
if i_deform == 0:
results.insert(0, res)
else:
results.append(res)
## SECOND PROCESS --------------------------------
if voidDD_path == None:
continue
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res2 = _executeProgram(kwargs, output_filename+'_VS_', b20_const,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if i_deform == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results_voidStep.insert(0, res2)
else:
results_voidStep.append(res2)
# intermediate print
_exportResult(results, DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if results_voidStep:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def mainLinuxSweepingPES(z, n, b_min=-0.1, b_max=0.1, N_max=30,
invert=False, voidDD_path=None):
"""
Process that starts from the limit of the PES, advances until the end
and return from the limit point, if the surface fall along the way, in the
backward process it will register the case E' < E and save
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
N_max += 1
b20_base = b_min if not invert else b_max
b20_lim = b_max if not invert else b_min
results = [None] * N_max
results_voidStep = [None] * N_max
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = q10_constr_template.format(1,0, 0.0)
# constr += q10_constr_template.format(0,1, 0.0)
constr = b20_constr_template.format(1,1, b20_base)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"",
Template.hamil : 8,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
res_0 = _executeProgram(kwargs, output_filename, 0.0)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
# ###
deform_array = list(np.linspace(b20_base, b20_lim, num=N_max, endpoint=True))
for reverse in (0, 1):
print('\n==== REVERSE READING [', bool(reverse), '] ==================\n')
for i in range(N_max):
i2 = i
if reverse:
i2 = - i - 1
b20_const = deform_array[i2]
constr = b20_constr_template.format(1,1, b20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 8,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, b20_const,
print_result=True)
if res == None:
continue # dont save empty result
if reverse:
if results[i2] != None:
if results[i2].E_HFB < res.E_HFB:
continue # don't save, new energy is bigger
results[i2] = res
# includes direct result, reverse valid over None, and E' < E
# intermediate print
_exportResult(results, DataAxial.export_list_results)
## SECOND PROCESS --------------------------------
if voidDD_path != None:
## do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res2 = _executeProgram(kwargs, output_filename+'_VS_', b20_const,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if reverse:
if results_voidStep[i2] != None:
if results_voidStep[i2].E_HFB < res2.E_HFB:
continue # don't save, new energy is bigger
results_voidStep[i2] = res2
# intermediate print
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def mainLinuxSecurePES(z, n, b_min=-0.1, b_max=0.1, N_base=50, b20_base=None,
voidDD_path=None):
"""
Process that evaluates the deformation limits fitting the q20 to not
phase breaking, q20 is reduced up to 2^3 of the evenly spaced step.
The criteria to continue the iteration is the HFB/Kin energy jump
for the new step (pair omitted since pair=0.0 is common)
!! Note the increment of N_base will just increase the precision,
dq_base will be progressively smaller (if it's stuck in a point you will
need to increase the factor of the N_MAX limit)
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
results = []
results_voidStep = []
## definitions for the iteration
dq_base = (b_max - b_min) / N_base
b20_base = 0.0000 if not b20_base else b20_base
ener_base = None
N_MAX = 70 * N_base # 7 * N_base
dqDivisionMax = 6
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = b20_constr_template.format(1,1, 0.0000)
constr = b20_constr_template.format(1,1, b20_base)
# constr_N2 = DN2_constr_template.format(1,0,2.6925926)
# constr_N2+= DN2_constr_template.format(0,1,2.7390982)
kwargs = {
Template.com : com_template.format(1,0),
Template.z : z, Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"", #
Template.hamil : 8,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
for iter_ in range(1, 4):
res_0 = _executeProgram(kwargs, output_filename, 0.0)
if res_0.properly_finished:
break
else:
if iter_ == 3:
print("[ERROR], after 4 tries the calculation STOP for", z, n)
return
kwargs[Template.eta_grad] -= 0.007 * iter_
kwargs[Template.eta_grad] = max(kwargs[Template.eta_grad], 0.001)
kwargs[Template.iterations] += 150 * iter_
print(" [WARNING] 1st step non converged, next eta:", iter_, kwargs[Template.eta_grad])
# First convergence done
ener_base = float(res_0.E_HFB)
print("[Ener Base] =", ener_base)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
results.append(res_0)
## WARNING! compromising constraint
b20_base = float(res_0.beta_isoscalar)
print(" WARNING! compromising start point b20=", b20_base)
# ###
for prolate, b_lim in enumerate((b_min, b_max)): #prolate = 1
# copy the first function.
_e = subprocess.call('cp initial_Spheric.11 fort.10', shell=True)
b20_i = b20_base
energ = ener_base
curr_energ = ener_base
e_diff = 10.0 #
i = 0
div = 0
print("runing deform[",prolate,"] up to:", b_lim, N_MAX)
while (abs(b20_i) < abs(b_lim)) and i < N_MAX:
b20 = b20_i - (((-1)**(prolate))*(dq_base / (2**div)))
# execute but do not save the final function
constr = b20_constr_template.format(1,1, b20)
kwargs = {
Template.com : com_template.format(1,0),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 8, # 0,#
Template.varN2: ""
}
res = _executeProgram(kwargs, output_filename, b20,
print_result=True, save_final_wf=False,
force_converg=True)
## Case 1: the program broke and the result is NULL
if res == None:
i += 1
if div < dqDivisionMax:
# reject(increase division)
div += 1
print(" * reducing b20 increment(1): [{}] Ei{:9.2f} - Eim1{:9.2f} ={:8.5f} > {:8.5f}"
.format(div, curr_energ, energ, curr_energ - energ, e_diff))
continue
else:
# accept and continue (DONT copy final function)
# increase the step for valid or deformation precision overflow
div = max(0, div - 1) ## smoothly recover the dq
e_diff = curr_energ - energ
energ = curr_energ
b20_i = b20
print(" * Failed but continue: DIV{} DIFF{:10.4f} ENER{:10.4f} B{:5.3f}"
.format(div, e_diff, energ, b20_i))
continue # cannot evaluate next Step or save results
## Case 2: the program did'nt broke and the result has values
# take the E_HFB energy and compare the previous (acceptance criteria)
curr_energ = float(res.E_HFB)
i += 1
if ((div < dqDivisionMax)
and (_energyDiffRejectionCriteria(curr_energ, energ, e_diff,
tol_factor= 2.0)
or (not res.properly_finished))):
# reject(increase division)
div += 1
print(" * reducing b20 increment(2) [i{}]: [{}] Ei{:9.2f} - Eim1{:9.2f} ={:8.5f} > ({:8.5f}, {:8.5f})"
.format(i, div, curr_energ, energ, curr_energ - energ,
3.0*e_diff, 1.5*e_diff))
continue
else:
print(" * [OK] step accepted DIV:{} CE{:10.4} C.DIFF:{:10.4}"
.format(div, curr_energ, curr_energ - energ))
# accept and continue (copy final function)
_e = subprocess.call('cp fort.11 fort.10', shell=True)
# increase the step for valid or deformation precision overflow
div = max(0, div - 2) ## smoothly recover the dq
e_diff = curr_energ - energ
energ = curr_energ
b20_i = b20
print(" * [OK] WF directly copied [i{}]: DIV:{} DIFF{:10.4f} ENER{:10.4f} B{:5.3f}"
.format(i,div, e_diff, energ, b20_i))
if prolate == 0:
results.insert(0, res)
else:
results.append(res)
## SECOND PROCESS --------------------------------
if voidDD_path != None:
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: ""
}
res2 = _executeProgram(kwargs, output_filename+'_VS', b20,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if prolate == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results_voidStep.insert(0, res2)
else:
results_voidStep.append(res2)
print("-------------------------------------------------------------------------------")
print()
# intermediate print
_exportResult(results, DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if results_voidStep:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def _exportResult(results, path_):
data = []
for res in results:
if res:
line = res.getAttributesDictLike
data.append(line+'\n')
with open(path_, 'w+') as f:
f.writelines(data)
def mainLinux(z, n):
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
results = []
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
deform_prolate = np.linspace(0.0, 40.0, num=45, endpoint=True)
deform_oblate = np.linspace(0.0,-40.0, num=45, endpoint=True) #18,
for i_deform, deform in enumerate((deform_oblate, deform_prolate)):
# create a spherical seed to proceed
constr = q20_constr_template.format(1,1, 0.000)
# constr += b20_constr_template.format(0,1, b20_const/2)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
print(" ... done.")
## ----- execution ----
for q20_const in deform:
# create a spherical seed to proceed
#q20_const *= 2 * np.pi / (np.sqrt(5 * np.pi))
constr = q20_constr_template.format(1,1, q20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 1,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, q20_const,
print_result=False)
if res == None:
continue # dont save empty result
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 1,
Template.iterartions : 500,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, q20_const)
if i_deform == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results.insert(0, res)
else:
results.append(res)
# ## ------ end exec. -----
data = []
for res in results:
# line = ', '.join([k+' : '+str(v) for k,v in res.__dict__.items()])
line = res.getAttributesDictLike
data.append(line+'\n')
# for i, r in enumerate(results):
# print("{} : {},".format(i, r.r_isoscalar))
with open(DataAxial.export_list_results, 'a+') as f:
f.writelines(data)
#%% main
z = 12
n = 12
output_filename = 'aux_output'
tail = ''
# tail = 'B1'
# tail = '1GCOM0'
# tail = 'B1COM0'
# tail = 'D1SnoR'
# tail = 'D1S_voidDD'
tail = 'D1S'
nucleus = []
for z in range(10,15, 2):
for n in range(max(6, z-2), 17, 2):
nucleus.append((z, n))
DataAxial.export_list_results = "export_PESz{}n{}Axial{}.txt".format(z,n,tail)
if __name__ == '__main__':
output_filename = DataAxial.output_filename_DEFAULT
if not os.getcwd().startswith('C:'):
print()
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print(' Running PES with HFBAxial:', HAMIL_AXIAL_PROGRAM)
print(' !!! CHECK, CHECK MZ:', HAMIL_AXIAL_PROGRAM.upper())
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print()
for z, n in nucleus:
print("PES for Z={} N={}".format(z,n))
output_filename = 'aux_output'
DataAxial.export_list_results = "export_PESz{}n{}Axial{}.txt".format(z,n, tail)
voidDD_path = "export_PESz{}n{}Axial{}_voidDD.txt".format(z,n, tail)
# mainLinux(z, n)
# mainLinuxEvenlyDeform(z, n, -0.28, 0.28, 100, voidDD_path)
# mainLinuxSecurePES(z, n, -0.30, 0.32, 100,
# voidDD_path=voidDD_path, b20_base= 0.3093)
# mainLinuxSecurePES(z, n, -0.30, 0.30, 100, b20_base=-0.29)
mainLinuxSweepingPES(z, n, -0.6, 0.6, 300, False, None)
else:
#%% process in windows
results_axial = []
import_file_Axi = 'BU_results_old/export_PESz{}n{}Axial{}.txt'.format(z, n, tail)
with open(import_file_Axi, 'r') as f:
data = f.readlines()
for line in data:
res = DataAxial(None, None, None, True)
res.setDataFromCSVLine(line)
results_axial.append(res)
for attr_ in (
'E_HFB',
'kin',
'var_n', 'pair',#, 'properly_finished'
# 'Jx_var',
# 'Jz',
# 'r_isoscalar',
):
## plot energies
x_tau, y_tau = [], []
x_ax, y_ax = [], []
for r in results_axial:
x_ax.append(r.q20_isoscalar)
if attr_ == 'r_isoscalar':
y_ax.append(getattr(r, attr_, 0.0))#/(r.n + r.z))
else:
y_ax.append(getattr(r, attr_, 0.0))#/(r.n + r.z))
if attr_ == 'properly_finshed':
y_ax = [1 if p == 'True' else 0 for p in y_ax]
plt.figure()
plt.xlabel(r"$Q_{20} [fm^2]$")
plt.plot(x_ax, y_ax, 'o-b', label="HFB axial")
plt.title(attr_+" [Z:{} N:{}] ".format(z, n)+" B1 no LS")
plt.legend()
plt.tight_layout()
plt.show()
| migueldelafuente1/taurus_tools | _legacy/exe_q20pes_axial.py | exe_q20pes_axial.py | py | 33,646 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "_legacy.exe_isotopeChain_axial.DataAxial.INPUT_FILENAME",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "_legacy.exe_isotopeChain_axial.DataAxial",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "subprocess.call",
"line_number": 150,... |
2277882687 | import random
import time
import discord
from discord.ext import commands
import utils
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def profile(self, ctx, user: discord.Member = None):
""" Get information about a Discord user.
Argument 'user', if specified, is the user to get information about.
'user' can be submitted using the users name, nickname, id or a mention.
If 'user' isn't entered it will display your profile.
Included information:
Nickname
Name
ID
Account creation date
Date of joining the server
Current status (online, offline, away, etc)
The top role in the server
"""
user = ctx.author if user is None else user
# Create the embed object for the message
em = utils.embed(title=f"{user.display_name} #{user.discriminator}", thumbnail=user.avatar_url,
colour=user.colour)
# Add fields containing all the information
em.add_field(name="Name", value=user.name)
em.add_field(name="Id", value=user.id)
em.add_field(name="Created", value=utils.format_time(user.created_at))
em.add_field(name="Joined", value=utils.format_time(user.joined_at))
em.add_field(name="Status", value=user.status)
em.add_field(name="Top role", value=user.top_role)
# Adding user activity information
if user.activity is not None:
activity = user.activity.type.name.title()
activity_name = user.activity.name
# Formatting for if the activity is listening to make grammar correct
activity = activity + ' to' if activity == 'Listening' else activity
# Add support for Spotify by displaying the song title and the artist
if activity_name == 'Spotify':
activity_name += f': {user.activity.title} by {user.activity.artist}'
em.add_field(name=activity, value=activity_name)
await ctx.send(embed=em)
@commands.command()
async def ping(self, ctx):
""" Test the latency to the bot and see how fast it responds """
# Create embed for message
em = utils.embed(title=f"Ping", description="Pinging")
start = time.perf_counter()
message = await ctx.send(embed=em)
end = time.perf_counter()
# Work out Time difference and convert to milliseconds
duration = (end - start) * 1000
em.description = f'Pong! {round(duration, 2)}ms'
await message.edit(embed=em)
@commands.command(aliases=["hi", "sup", "hey", "yo", "howdy"])
async def hello(self, ctx):
""" Say hello and get a random greeting from the bot."""
# Pick a random greeting to reply with from the aliases
greeting = random.choice([x.aliases for x in self.bot.commands if x.name == 'hello'][0]).title()
# Send greeting message
await ctx.send(embed=utils.embed(title="Hello", description=f"{greeting} {ctx.author.mention}!",
thumbnail='https://static.tumblr.com/gwp7jk3/QXAma9845/k-on_wave.gif'))
@commands.command(aliases=["calc"])
async def math(self, ctx, equation: str):
""" Get the result to basic arithmetic. """
try:
# Send result
await ctx.send(embed=utils.embed(title="Math", description=f"{equation.strip()} = {eval(equation)}"))
except SyntaxError:
# If a syntax error occured print the result as "SyntaxError"
await ctx.send(embed=utils.embed(title="Math", description=f"{equation.strip()} = SyntaxError"))
@commands.command(aliases=["inv"])
async def invite(self, ctx):
""" Get an invite link for the server.
If a server doesn't have an invite link then a new one will be generated.
Otherwise an existing one will be displayed.
"""
# Check for invite links
if len(await ctx.guild.invites()) < 1:
await ctx.guild.channels[0].create_invite()
# Send invite link
await ctx.send((await ctx.guild.invites())[0].url)
def setup(bot):
bot.add_cog(Misc(bot))
| SkippyTheSnake/Discord-bot | cogs/misc.py | misc.py | py | 3,962 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name":... |
25231410593 | #! /usr/bin/env python
# encoding: utf-8
# vim: ai ts=4 sts=4 et sw=4
##
##
## @author Nadia
## nadia@gmail.com/joel@gmail.com
##
import MySQLdb
from datetime import datetime
from mako.template import Template
from creche import settings
class SysEventService:
"""Class that will be delegated with creating events in the database, after these events have been detected in other places in the system"""
def __init__(self):
self.connection = MySQLdb.connect (host=settings.DATABASES['default']['HOST'],
user=settings.DATABASES['default']['USER'],
passwd=settings.DATABASES['default']['PASSWORD'],
db=settings.DATABASES['default']['NAME'])
self.connection.set_character_set('utf8')#important because the data we are dealing with is unicode
def createSysEvent(self, params, expressions=None):
"""Utility method that will be utilized for making an entry about an event"""
cursor = self.connection.cursor()
params['date_generated'] = datetime.now()
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
params['processed'] = False
#in the params dict we expect the name to have been specified
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO event (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
eventId = cursor.lastrowid
result = {}
result['eventId'] = eventId
cursor.close()
if expressions:
expressions['event_type_id'] = params['event_type_id']
result = self.__scheduleGenericNotification(eventId, expressions)
self.connection.commit()
return result
def scheduleEmailRecipient(self, params, language):
"""Utility method that schedules email recipients"""
cursor = self.connection.cursor()
event = None
subject_suffix = ''
#for i in range(1, 10000):
#we must get the from email and the proposed subject
cursor.execute("""SELECT et.from_email, et.email_subject
FROM event_type et
WHERE et.id = %d""" % (params['event_type_id']))
event_type = cursor.fetchone()
from_email = event_type[0]
subject = event_type[1]
if params['event_type_id'] != settings.APPLICATION_SETTINGS['TEST_EMAIL']:
cursor.execute("""SELECT e.id
FROM event e
WHERE e.entity_reference_id = %d AND e.event_type_id = %d""" % (params['entity_reference_id'], params['event_type_id']))
event = cursor.fetchone()
if event:
eventId = event[0]
else:
params['date_generated'] = datetime.now()
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
params['processed'] = True#it ensure that we know that this is an email not an event
#in the params dict we expect the name to have been specified
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO event (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
cursor.execute("""SELECT from_email
FROM event_type
WHERE id = %d""" % (params['event_type_id']))
event_type = cursor.fetchone()
eventId = cursor.lastrowid
from_email = event_type[0]
self.connection.commit()#lets commit this asap so that any other preparation request doesn't start queueing guys again.
if params['event_type_id'] == settings.APPLICATION_SETTINGS['TEST_EMAIL']:
#lets add the No. of tests to the subject of the test email
cursor.execute("""SELECT COUNT(e.id)
FROM event_type et INNER JOIN event e ON e.event_type_id = et.id
WHERE e.event_type_id = %d AND e.entity_reference_id = %d""" % (params['event_type_id'], params['entity_reference_id']))
test_event = cursor.fetchone()
subject_suffix = ' [Test No. ' + str(test_event[0]) + ']'
#schedule the emails
expressions = {}
expressions['from_email'] = from_email
expressions['message_body'] = ' '
expressions['subject'] = subject + ' ' + str(params['entity_reference_id']) + subject_suffix
expressions['scheduled_for_relay'] = False
expressions['event_id'] = eventId
expressions['last_updated'] = datetime.now()
expressions['date_created'] = datetime.now()
cursor.execute("""
SELECT wu.mail
FROM web_users wu
INNER JOIN system_user_has_event_type suhet ON suhet.system_user_id = wu.uid
INNER JOIN event_type et ON suhet.event_type_id = et.id
WHERE et.id = %d""" % params['event_type_id'])
subscribers = cursor.fetchall()
for user in subscribers:
#check to see if we already have queued this recipient/subscriber
em = user[0].replace("'", "\\'")
cursor.execute("SELECT id FROM email_schedule WHERE event_id = %d AND to_email = '%s'" % (eventId, em))
recipient = cursor.fetchone()
if not recipient:
expressions['to_email'] = user[0]
fields = ', '.join(expressions.keys())
values = ', '.join(['%%(%s)s' % x for x in expressions])
query = 'INSERT INTO email_schedule (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, expressions)
self.connection.commit()#lets commit this asap so that any other preparation request doesn't start queueing guys again.
#get the total number of subscribers
cursor.execute("SELECT count(*) FROM email_schedule WHERE event_id = %d " % (eventId))
subscriber_count = cursor.fetchone()
#get the total number of queued subscribers
cursor.execute("SELECT count(*) FROM email_schedule WHERE event_id = %d AND delivery_date IS NULL" % (eventId))
queued_subscribers = cursor.fetchone()
cursor.close()
self.connection.commit()
self.connection.close()
return [str(subscriber_count[0]), str(queued_subscribers[0]), eventId]
def __scheduleGenericNotification(self, eventId, expressions):
"""Schedule email for any user who would wish to be notified of a notification."""
#first pick the template path that we are to use for building the email body
cursor = self.connection.cursor()
cursor.execute("SELECT template_path, from_email FROM event_type et INNER JOIN event e ON e.event_type_id = et.id WHERE e.id = %d" % eventId)
record = cursor.fetchone()
template = Template(filename=settings.APPLICATION_SETTINGS['COREAPP_HOME'] + record[0], input_encoding='utf-8')
template.output_encoding = 'utf-8'
params = {}
params['from_email'] = record[1]
params['subject'] = 'Creche Parentale Notification'
params['scheduled_for_relay'] = True
params['event_id'] = eventId
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
cursor.execute("""
SELECT wu.mail, wud.full_name
FROM web_users wu
INNER JOIN system_user_has_event_type suhet ON suhet.system_user_id = wu.uid
INNER JOIN web_user_detail wud ON wud.user_id = wu.uid
INNER JOIN event_type et ON suhet.event_type_id = et.id
WHERE et.id = %d""" % expressions['event_type_id'])
subscribers = cursor.fetchall()
for user in subscribers:
recipient = 'User'
if user[1]:
recipient = user[1]
expressions['recipient'] = recipient
params['message_body'] = template.render(params=expressions)#message to be relayed to the subscribers
params['to_email'] = user[0]
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO email_schedule (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
cursor.close()
return params
| projet2019/Creche_Parentale | creche/coreapp/service/sys_event_service.py | sys_event_service.py | py | 8,574 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "MySQLdb.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "creche.settings.DATABASES",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "creche.settings",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "creche... |
10819501559 | import yaml
import librosa
import numpy as np
import os
sr = 22050
namesong = 'LizNelson_Rainfall'
def merge_stems(namesong):
# Merge all instrumental stems into 1 mix and all vocal stems into 1 mix
stream = open("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_METADATA.yaml", "r")
docs = yaml.load_all(stream)
list_vocal = []
list_instru = []
for doc in docs:
for k, v in doc.items():
if k == 'stems':
for cle, valeur in v.items():
for items in valeur.items():
if items[0] == 'instrument':
if "singer" in items[1]:
y, sr = librosa.load("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_STEMS/" + namesong + "_STEM_" + cle[1:3] + ".wav")
if max(abs(y)) != 0:
y = y / max(abs(y))
list_vocal.append(y)
else:
y, sr = librosa.load("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_STEMS/" + namesong + "_STEM_" + cle[1:3] + ".wav")
if max(abs(y)) != 0:
y = y / max(abs(y))
list_instru.append(y)
vocal_sum = np.zeros(len(y))
instru_sum = np.zeros(len(y))
for i in range(len(list_vocal)):
vocal_sum += list_vocal[i]
for j in range(len(list_instru)):
instru_sum += list_instru[j]
if max(abs(vocal_sum)) != 0:
vocal_sum = vocal_sum / max(abs(vocal_sum))
if max(abs(instru_sum)) != 0:
instru_sum = instru_sum / max(abs(instru_sum))
mix_sum = np.zeros(len(y))
for k in range(len(y)):
mix_sum[k] = vocal_sum[k] + instru_sum[k]
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_VOCALMIX.wav", vocal_sum, sr)
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_INSTRUMIX.wav", instru_sum, sr)
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_MIX_SUM.wav", mix_sum, sr)
return 0
def show_intrumental():
a = 0
for namesong in os.listdir("./MedleyDB_sample/Audio/"):
stream = open("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_METADATA.yaml", "r")
docs = yaml.load_all(stream)
for doc in docs:
for k, v in doc.items():
if k == "instrumental":
if v == "yes":
print(namesong)
#if k == "genre":
#if v == "Singer/Songwriter":
#print(namesong, v)
#show_intrumental() | moulinleo/Voice-Isolation | merge_stems.py | merge_stems.py | py | 2,850 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load_all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
30881965405 | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = '0.9.4'
long_description = (
open('README.rst').read()
+ '\n' +
open(os.path.join('docs', 'HISTORY.rst')).read()
+ '\n')
setup(name='plone.jsonapi.routes',
version=version,
description="Plone JSON API -- Routes",
long_description=long_description,
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Framework :: Plone",
"Framework :: Plone :: 4.3",
"Framework :: Plone :: 5.0",
"Framework :: Plone :: 5.1",
"Framework :: Zope2",
],
keywords='',
author='Ramon Bartl',
author_email='rb@ridingbytes.com',
url='https://github.com/collective/plone.jsonapi.routes',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['plone', 'plone.jsonapi'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.api',
'plone.jsonapi.core>=0.6',
# -*- Extra requirements: -*-
],
extras_require={
'test': [
'plone.app.testing',
'unittest2',
'robotsuite',
'robotframework-selenium2library',
'plone.app.robotframework',
'robotframework-debuglibrary',
]
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| collective/plone.jsonapi.routes | setup.py | setup.py | py | 1,641 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",... |
12816084120 | import requests
import os
from dotenv import load_dotenv
class YaUploader:
BASE_URL = 'https://cloud-api.yandex.net/v1/disk/resources'
GET_FILES = '/files'
UPLOAD_LINK = '/upload'
def __init__(self, token) -> None:
self.token = token
def get_headers(self):
headers = {
"Authorization": self.token,
"Content-type": "application/json"
}
return headers
def get_files(self):
url = self.BASE_URL + self.GET_FILES
response = requests.get(url, headers=self.get_headers())
response.raise_for_status()
return response.json()
def _get_upload_link(self, params):
url = self.BASE_URL + self.UPLOAD_LINK
response = requests.get(url, headers=self.get_headers(), params=params)
response.raise_for_status()
response_body = response.json()
href = response_body.get('href', '')
return href
def upload(self, filename, path_to_file):
params = {
"path": path_to_file,
"overwrite": "true"
}
url = self._get_upload_link(params)
response = requests.put(
url,
headers=self.get_headers(),
params=params,
data=open(filename, 'rb')
)
response.raise_for_status()
if response.status_code == 201:
print(f'{params["path"]} successfully created!')
if __name__ == '__main__':
load_dotenv()
TOKEN = os.getenv('YA_TOKEN')
file_name = 'test.txt'
path_to_file = r'netology/' + file_name
ya = YaUploader(TOKEN)
ya.upload(file_name, path_to_file)
| SergeyMMedvedev/8_api_requests | task_2.py | task_2.py | py | 1,652 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line... |
33126934610 | import pandas as pd
from tools.readFile import read_excl
# 读取excel中指定列整列元素,返回一个集合
def readExcelData(filePath, column):
df = pd.read_excel(filePath, usecols=[column - 1]) # 指定读取的列
df_list = df.values.tolist()
backList = []
for i in df_list:
backList.append(i[0])
if len(backList) == 0:
data = read_excl(filePath) # 文件位置
feature1 = data[:, column - 1:column]
m = 0
for i in feature1:
tmpKey = str(feature1[m][0])
backList.append(tmpKey)
m += 1
else:
return backList
| linhe-demo/sync_dataTable | tools/readExcel.py | readExcel.py | py | 632 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "pandas.read_excel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tools.readFile.read_excl",
"line_number": 15,
"usage_type": "call"
}
] |
17573160892 | from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import connectToMySQL
import json
app = Flask(__name__)
@app.route("/")
def index():
mysql = connectToMySQL("leads_and_clients_db")
query = "SELECT concat(clients.first_name, ' ', clients.last_name) as name, count(leads.client_id) as leads FROM clients JOIN leads ON leads.client_id = clients.id GROUP BY leads.client_id"
all_leads = mysql.query_db(query)
print(all_leads)
total_leads = 0
for each in all_leads:
total_leads += int(each['leads'])
print("*************************total number of leads**********************",total_leads)
percentage = int(all_leads[0]["leads"])/total_leads
print(round(percentage, 3))
return render_template("index.html", template_all_leads = all_leads, template_total_leads = total_leads, json_leads = map(json.dumps, all_leads))
if __name__ == "__main__":
app.run(debug = True) | aaronfennig/pythonDjango | flask/flask_mysql/leads_and_clients/server.py | server.py | py | 963 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mysqlconnection.connectToMySQL",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.d... |
72782974907 | import _pickle as pickle
import numpy as np
from mtqt_source import MTQTSource
from pathlib import Path
from pyrocko import orthodrome
import tensorflow_probability as tfp
import waveform_processing as wp
tfd = tfp.distributions
pi = np.pi
def find_closest_grid_point(lat_ev, lon_ev, depth_ev, path_models=None,
gf_store_id=None, min_dist=7000.,
min_dist_depth=10000.):
pathlist = Path(path_models).glob('model_%s_*' % gf_store_id)
k = 0
for path in sorted(pathlist):
path = str(path)
model_coordinates = path.split("_")
lat = float(model_coordinates[3])
lon = float(model_coordinates[4])
depth = float(model_coordinates[5])
dist = orthodrome.distance_accurate50m(lat_ev, lon_ev, lat, lon)
if dist < min_dist:
min_dist = dist
dist_depth = abs(depth-depth_ev)
if dist_depth < min_dist_depth:
min_dist_depth = dist_depth
best_model = path
k = k+1
return best_model
def grid_points_in_error_ellipses(lat_ev, lon_ev, depth_ev, error_h, error_z,
path_models=None, gf_store_id=None):
pathlist = Path(path_models).glob('model_%s_*' % gf_store_id)
region = orthodrome.radius_to_region(lat_ev, lon_ev, error_h)
grid_points = []
for path in sorted(pathlist):
path = str(path)
model_coordinates = path.split("_")
lat = float(model_coordinates[3])
lon = float(model_coordinates[4])
depth = float(model_coordinates[5])
dists = orthodrome.distance_accurate50m_numpy(lat_ev, lon_ev, lat, lon)
if dists < error_h:
if depth_ev-error_z < depth and depth_ev+error_z > depth:
grid_points.append(path)
return grid_points
def find_event(path_events, time):
pathlist = Path(path_events).glob('ev_*')
for path in sorted(pathlist):
path = str(path)+"/"
event = model.load_events(path+"event.txt")[0]
if time-10 < event.time and time+10 > event.time:
return event, path
def loss_function_negative_log_likelihood():
neg_log_likelihood = lambda x, rv_x: -rv_x.log_prob(x)
return neg_log_likelihood
def posterior_mean_field(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
c = np.log(np.expm1(1.))
return tf.keras.Sequential([
tfp.layers.VariableLayer(2 * n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t[..., :n],
scale=1e-5 + tf.nn.softplus(c + t[..., n:])),
reinterpreted_batch_ndims=1)),
])
# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.
def prior_trainable(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
return tf.keras.Sequential([
tfp.layers.VariableLayer(n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t, scale=1),
reinterpreted_batch_ndims=1)),
])
def lambda_dist(scale=1e-3):
return lambda t: tfd.Normal(loc=t, scale=scale)
def getitem__all_values(filenames, idx, batch_size=72):
batch_x = filenames[idx]
data = []
labels = []
for i in range(len(filenames)):
batch_x = filenames[i]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples,\
events = pickle.load(f)
f.close()
for d, l in zip(data_events, labels_events):
labels.append(l[0])
d = d[0]
d = np.asarray(d)
d = d.reshape(d.shape+(1,))
data.append(d)
return np.array(data), np.array(labels), events
def waveform_2dGenerator_from_files(filenames, batchsize=72):
batchsize = batchsize
while 1:
data = []
labels = []
for i in range(len(filenames)):
batch_x = filenames[i]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples,\
events = pickle.load(f)
f.close()
for d, l in zip(data_events, labels_events):
labels.append(l[0])
d = d[0]
d = np.asarray(d)
d = d.reshape(d.shape+(1,))
data.append(d)
if len(labels) == batchsize:
yield np.array(data), np.array(labels)
data = []
labels = []
def convert_norm2real(values):
true_mts = []
true_values = []
for p in values:
p = p[0]
v, w, kappa, sigma, h = p[3], p[4], p[0], p[1], p[2]
v = (1/3)-(((1/3)*2)*v)
w = ((3/8)*pi)-((((3/8)*pi)*2)*w)
kappa = kappa*2.*pi
sigma = (pi/2)-(2*(pi/2)*sigma)
h = h
if h > 1.:
h = 1.
if v > 1.:
v = 1.
mtqt_source = MTQTSource(v=v, w=w, kappa=kappa, sigma=sigma,
h=h)
mt = mtqt_source.pyrocko_moment_tensor()
M = mtqt_source.m6
true_mts.append(mt)
true_values.append(M)
return true_mts, true_values
def convert_waveforms_to_input(waveforms):
waveforms_events = [waveforms[:]]
data_events, nsamples = wp.prepare_waveforms(waveforms_events)
data_events = np.asarray(data_events)
data_events = data_events.reshape((data_events.shape[0],)+data_events.shape[1:]+(1,))
data_events = np.float32(data_events)
return data_events
def convert_data_events_to_input(data_events):
data_events = np.asarray(data_events)
data_events = data_events.reshape((data_events.shape[0],)+data_events.shape[1:]+(1,))
data_events = np.float32(data_events)
return data_events
def getitem_values(filenames, batch_size, idx):
batch_x = filenames[idx]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples, events = pickle.load(f)
f.close()
return np.array(data_events), np.array(labels_events), events
| braunfuss/BNN-MT | cnn_util.py | cnn_util.py | py | 6,023 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "tensorflow_probability.distributions",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "p... |
38793898315 | # import tensorflow libraries
import tensorflow as tf
import numpy as np
# import opencv and find webcam
import cv2
cap = cv2.VideoCapture(0)
if not(cap.isOpened()):
print("Can't find webcam, shutting down...")
quit()
# set resolution of camera capture
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960.0)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 960.0)
# import system libraries
import os
import sys
sys.path.append("..")
# import tf and custom tf libraries
from utils import label_map_util
import custom_utils
# load prediction model
print("\nLoading Saved Model...")
PATH_TO_MODEL = "new_model" # point to folder containing 'model' folder
detect_fn = tf.saved_model.load(os.path.join(PATH_TO_MODEL, "saved_model"))
# load category index for prediction model
print("\nLoading Category Index...")
category_index = label_map_util.create_category_index_from_labelmap(os.path.join(PATH_TO_MODEL, "mscoco_label_map.pbtxt"), use_display_name=True)
# begin main loop
print("\nBegin Live Image Predicting")
while True:
# capture image from webcam
ret, image_np = cap.read()
if ret == False:
print("Error Reading Frame, skipping...")
continue
# convert image to tensor
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
# perform prediction/detection on image tensor
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}
detections['num_detections'] = num_detections
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
# draw detection boxes on image using modified visualization function
custom_utils.visualize_boxes_and_labels_on_image_array(
image_np,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=0.65,
agnostic_mode=False,
line_thickness=8)
# display captured image with detection boxes
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
# exit program when 'q' key is pressed
if cv2.waitKey(25) == ord('q'):
cv2.destroyAllWindows()
break | OSUrobotics/object_detection | mainfile.py | mainfile.py | py | 2,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_... |
28989994372 | if True:
from PyQt5.QtCore import pyqtSlot, QSettings
from PyQt5.QtWidgets import QApplication, QDialog, QDialogButtonBox, QTableWidgetItem
from PyQt5.QtXml import QDomDocument
else:
from PyQt4.QtCore import pyqtSlot, QSettings
from PyQt4.QtGui import QApplication, QDialog, QDialogButtonBox, QTableWidgetItem
from PyQt4.QtXml import QDomDocument
# ------------------------------------------------------------------------------------------------------------
# Imports (Custom Stuff)
import ui_catarina
import ui_catarina_addgroup
import ui_catarina_removegroup
import ui_catarina_renamegroup
import ui_catarina_addport
import ui_catarina_removeport
import ui_catarina_renameport
import ui_catarina_connectports
import ui_catarina_disconnectports
from shared_canvasjack import *
from shared_settings import *
# ------------------------------------------------------------------------------------------------------------
# Try Import OpenGL
try:
from PyQt5.QtOpenGL import QGLWidget
hasGL = True
except:
hasGL = False
# ------------------------------------------------------------------------------------------------------------
# Static Variables
iGroupId = 0
iGroupName = 1
iGroupSplit = 2
iGroupIcon = 3
iGroupPosId = 0
iGroupPosX_o = 1
iGroupPosY_o = 2
iGroupPosX_i = 3
iGroupPosY_i = 4
iPortGroup = 0
iPortId = 1
iPortName = 2
iPortMode = 3
iPortType = 4
iConnId = 0
iConnOutput = 1
iConnInput = 2
# ------------------------------------------------------------------------------------------------------------
# Add Group Dialog
class CatarinaAddGroupW(QDialog, ui_catarina_addgroup.Ui_CatarinaAddGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list_names = []
for group in group_list:
self.m_group_list_names.append(group[iGroupName])
self.accepted.connect(self.slot_setReturn)
self.le_group_name.textChanged.connect(self.slot_checkText)
self.ret_group_name = ""
self.ret_group_split = False
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text and text not in self.m_group_list_names)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
self.ret_group_name = self.le_group_name.text()
self.ret_group_split = self.cb_split.isChecked()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Remove Group Dialog
class CatarinaRemoveGroupW(QDialog, ui_catarina_removegroup.Ui_CatarinaRemoveGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
index = 0
for group in group_list:
twi_group_id = QTableWidgetItem(str(group[iGroupId]))
twi_group_name = QTableWidgetItem(group[iGroupName])
twi_group_split = QTableWidgetItem("Yes" if (group[iGroupSplit]) else "No")
self.tw_group_list.insertRow(index)
self.tw_group_list.setItem(index, 0, twi_group_id)
self.tw_group_list.setItem(index, 1, twi_group_name)
self.tw_group_list.setItem(index, 2, twi_group_split)
index += 1
self.accepted.connect(self.slot_setReturn)
self.tw_group_list.cellDoubleClicked.connect(self.accept)
self.tw_group_list.currentCellChanged.connect(self.slot_checkCell)
self.ret_group_id = -1
@pyqtSlot(int)
def slot_checkCell(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_group_list.rowCount() >= 0:
self.ret_group_id = int(self.tw_group_list.item(self.tw_group_list.currentRow(), 0).text())
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Rename Group Dialog
class CatarinaRenameGroupW(QDialog, ui_catarina_renamegroup.Ui_CatarinaRenameGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list_names = []
for group in group_list:
self.cb_group_to_rename.addItem("%i - %s" % (group[iGroupId], group[iGroupName]))
self.m_group_list_names.append(group[iGroupName])
self.accepted.connect(self.slot_setReturn)
self.cb_group_to_rename.currentIndexChanged[int].connect(self.slot_checkItem)
self.le_new_group_name.textChanged.connect(self.slot_checkText)
self.ret_group_id = -1
self.ret_new_group_name = ""
@pyqtSlot(int)
def slot_checkItem(self, ignored):
self.slot_checkText(self.le_new_group_name.text())
@pyqtSlot(str)
def slot_checkText(self, text):
if self.cb_group_to_rename.count() > 0:
group_name = self.cb_group_to_rename.currentText().split(" - ", 1)[1]
check = bool(text and text != group_name and text not in self.m_group_list_names)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
self.ret_group_id = int(self.cb_group_to_rename.currentText().split(" - ", 1)[0])
self.ret_new_group_name = self.le_new_group_name.text()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Add Port Dialog
class CatarinaAddPortW(QDialog, ui_catarina_addport.Ui_CatarinaAddPortW):
def __init__(self, parent, group_list, port_id):
QDialog.__init__(self, parent)
self.setupUi(self)
self.sb_port_id.setValue(port_id)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
for group in group_list:
self.cb_group.addItem("%i - %s" % (group[iGroupId], group[iGroupName]))
self.accepted.connect(self.slot_setReturn)
self.le_port_name.textChanged.connect(self.slot_checkText)
self.ret_group_id = -1
self.ret_port_name = ""
self.ret_port_mode = patchcanvas.PORT_MODE_NULL
self.ret_port_type = patchcanvas.PORT_TYPE_NULL
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.cb_group.count() > 0:
self.ret_group_id = int(self.cb_group.currentText().split(" ", 1)[0])
self.ret_port_name = self.le_port_name.text()
self.ret_port_mode = patchcanvas.PORT_MODE_INPUT if self.rb_flags_input.isChecked() else patchcanvas.PORT_MODE_OUTPUT
self.ret_port_type = self.cb_port_type.currentIndex() + 1 # 1, 2, 3 or 4 for patchcanvas types
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Remove Port Dialog
class CatarinaRemovePortW(QDialog, ui_catarina_removeport.Ui_CatarinaRemovePortW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_port_list.setColumnWidth(0, 25)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.accepted.connect(self.slot_setReturn)
self.tw_port_list.cellDoubleClicked.connect(self.accept)
self.tw_port_list.currentCellChanged.connect(self.slot_checkCell)
self.rb_input.clicked.connect(self.slot_reAddPorts)
self.rb_output.clicked.connect(self.slot_reAddPorts)
self.rb_audio_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_a2j.clicked.connect(self.slot_reAddPorts)
self.rb_midi_alsa.clicked.connect(self.slot_reAddPorts)
self.ret_port_id = -1
self.reAddPorts()
def reAddPorts(self):
self.tw_port_list.clearContents()
for x in range(self.tw_port_list.rowCount()):
self.tw_port_list.removeRow(0)
port_mode = patchcanvas.PORT_MODE_INPUT if (self.rb_input.isChecked()) else patchcanvas.PORT_MODE_OUTPUT
if self.rb_audio_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaRemovePortW::reAddPorts() - Invalid port type")
return
index = 0
for port in self.m_port_list:
if port[iPortMode] == port_mode and port[iPortType] == port_type:
port_name = port[iPortName]
group_name = self.findPortGroupName(port[iPortGroup])
tw_port_id = QTableWidgetItem(str(port[iPortId]))
tw_port_name = QTableWidgetItem("%s:%s" % (group_name, port_name))
self.tw_port_list.insertRow(index)
self.tw_port_list.setItem(index, 0, tw_port_id)
self.tw_port_list.setItem(index, 1, tw_port_name)
index += 1
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_reAddPorts(self):
self.reAddPorts()
@pyqtSlot(int)
def slot_checkCell(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_port_list.rowCount() > 0:
self.ret_port_id = int(self.tw_port_list.item(self.tw_port_list.currentRow(), 0).text())
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Rename Port Dialog
class CatarinaRenamePortW(QDialog, ui_catarina_renameport.Ui_CatarinaRenamePortW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_port_list.setColumnWidth(0, 25)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.accepted.connect(self.slot_setReturn)
self.tw_port_list.currentCellChanged.connect(self.slot_checkCell)
self.le_new_name.textChanged.connect(self.slot_checkText)
self.rb_input.clicked.connect(self.slot_reAddPorts)
self.rb_output.clicked.connect(self.slot_reAddPorts)
self.rb_audio_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_a2j.clicked.connect(self.slot_reAddPorts)
self.rb_midi_alsa.clicked.connect(self.slot_reAddPorts)
self.ret_port_id = -1
self.ret_new_port_name = ""
self.reAddPorts()
def reAddPorts(self):
self.tw_port_list.clearContents()
for x in range(self.tw_port_list.rowCount()):
self.tw_port_list.removeRow(0)
port_mode = patchcanvas.PORT_MODE_INPUT if (self.rb_input.isChecked()) else patchcanvas.PORT_MODE_OUTPUT
if self.rb_audio_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaRenamePortW::reAddPorts() - Invalid port type")
return
index = 0
for port in self.m_port_list:
if port[iPortMode] == port_mode and port[iPortType] == port_type:
port_name = port[iPortName]
group_name = self.findPortGroupName(port[iPortGroup])
tw_port_id = QTableWidgetItem(str(port[iPortId]))
tw_port_name = QTableWidgetItem("%s:%s" % (group_name, port_name))
self.tw_port_list.insertRow(index)
self.tw_port_list.setItem(index, 0, tw_port_id)
self.tw_port_list.setItem(index, 1, tw_port_name)
index += 1
self.tw_port_list.setCurrentCell(0, 0)
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_reAddPorts(self):
self.reAddPorts()
@pyqtSlot()
def slot_checkCell(self):
self.slot_checkText(self.le_new_name.text())
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text and self.tw_port_list.currentRow() >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_port_list.rowCount() > 0:
self.ret_port_id = int(self.tw_port_list.item(self.tw_port_list.currentRow(), 0).text())
self.ret_new_port_name = self.le_new_name.text()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Connect Ports Dialog
class CatarinaConnectPortsW(QDialog, ui_catarina_connectports.Ui_CatarinaConnectPortsW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.m_ports_audio_jack = []
self.m_ports_midi_jack = []
self.m_ports_midi_a2j = []
self.m_ports_midi_alsa = []
for port in self.m_port_list:
if port[iPortType] == patchcanvas.PORT_TYPE_AUDIO_JACK:
self.m_ports_audio_jack.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_JACK:
self.m_ports_midi_jack.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_A2J:
self.m_ports_midi_a2j.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_ALSA:
self.m_ports_midi_alsa.append(port)
self.accepted.connect(self.slot_setReturn)
self.rb_audio_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_a2j.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_alsa.clicked.connect(self.slot_portTypeChanged)
self.lw_outputs.currentRowChanged.connect(self.slot_checkOutSelection)
self.lw_inputs.currentRowChanged.connect(self.slot_checkInSelection)
self.ret_port_out_id = -1
self.ret_port_in_id = -1
self.slot_portTypeChanged()
def showPorts(self, ports):
self.lw_outputs.clear()
self.lw_inputs.clear()
for port in ports:
if port[iPortMode] == patchcanvas.PORT_MODE_INPUT:
self.lw_inputs.addItem("%i - %s:%s" % (port[iPortId], self.findPortGroupName(port[iPortGroup]), port[iPortName]))
elif port[iPortMode] == patchcanvas.PORT_MODE_OUTPUT:
self.lw_outputs.addItem("%i - %s:%s" % (port[iPortId], self.findPortGroupName(port[iPortGroup]), port[iPortName]))
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
def checkSelection(self, out_row, in_row):
check = bool(out_row >= 0 and in_row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_portTypeChanged(self):
if self.rb_audio_jack.isChecked():
ports = self.m_ports_audio_jack
elif self.rb_midi_jack.isChecked():
ports = self.m_ports_midi_jack
elif self.rb_midi_a2j.isChecked():
ports = self.m_ports_midi_a2j
elif self.rb_midi_alsa.isChecked():
ports = self.m_ports_midi_alsa
else:
print("CatarinaConnectPortstW::portTypeChanged() - Invalid port type")
return
self.showPorts(ports)
@pyqtSlot(int)
def slot_checkOutSelection(self, row):
self.checkSelection(row, self.lw_inputs.currentRow())
@pyqtSlot(int)
def slot_checkInSelection(self, row):
self.checkSelection(self.lw_outputs.currentRow(), row)
@pyqtSlot()
def slot_setReturn(self):
if self.lw_outputs.currentRow() >= 0 and self.lw_inputs.currentRow() >= 0:
self.ret_port_out_id = int(self.lw_outputs.currentItem().text().split(" - ", 1)[0])
self.ret_port_in_id = int(self.lw_inputs.currentItem().text().split(" - ", 1)[0])
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Disconnect Ports Dialog
class CatarinaDisconnectPortsW(QDialog, ui_catarina_disconnectports.Ui_CatarinaDisconnectPortsW):
def __init__(self, parent, group_list, port_list, connection_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_connections.setColumnWidth(0, 225)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.m_connection_list = connection_list
self.accepted.connect(self.slot_setReturn)
self.tw_connections.cellDoubleClicked.connect(self.accept)
self.tw_connections.currentCellChanged.connect(self.slot_checkSelection)
self.rb_audio_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_a2j.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_alsa.clicked.connect(self.slot_portTypeChanged)
self.ret_port_out_id = -1
self.ret_port_in_id = -1
self.slot_portTypeChanged()
def showPorts(self, ptype):
self.tw_connections.clearContents()
for x in range(self.tw_connections.rowCount()):
self.tw_connections.removeRow(0)
index = 0
for connection in self.m_connection_list:
if self.findPortType(connection[iConnOutput]) == ptype:
port_out_id = connection[iConnOutput]
port_out_name = self.findPortName(port_out_id)
port_in_id = connection[iConnInput]
port_in_name = self.findPortName(port_in_id)
tw_port_out = QTableWidgetItem("%i - %s" % (port_out_id, port_out_name))
tw_port_in = QTableWidgetItem("%i - %s" % (port_in_id, port_in_name))
self.tw_connections.insertRow(index)
self.tw_connections.setItem(index, 0, tw_port_out)
self.tw_connections.setItem(index, 1, tw_port_in)
index += 1
def findPortName(self, port_id):
for port in self.m_port_list:
if port[iPortId] == port_id:
return "%s:%s" % (self.findPortGroupName(port[iPortGroup]), port[iPortName])
return ""
def findPortType(self, port_id):
for port in self.m_port_list:
if port[iPortId] == port_id:
return port[iPortType]
return patchcanvas.PORT_TYPE_NULL
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_portTypeChanged(self):
if self.rb_audio_jack.isChecked():
ptype = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaDisconnectPortstW::portTypeChanged() - Invalid port type")
return
self.showPorts(ptype)
@pyqtSlot(int)
def slot_checkSelection(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_connections.currentRow() >= 0:
self.ret_port_out_id = int(self.tw_connections.item(self.tw_connections.currentRow(), 0).text().split(" - ", 1)[0])
self.ret_port_in_id = int(self.tw_connections.item(self.tw_connections.currentRow(), 1).text().split(" - ", 1)[0])
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Main Window
class CatarinaMainW(AbstractCanvasJackClass):
def __init__(self, parent=None):
AbstractCanvasJackClass.__init__(self, "Catarina", ui_catarina.Ui_CatarinaMainW, parent)
self.loadSettings(True)
# -------------------------------------------------------------
# Set-up GUI
setIcons(self, ("canvas",))
self.ui.act_project_new.setIcon(getIcon("document-new"))
self.ui.act_project_open.setIcon(getIcon("document-open"))
self.ui.act_project_save.setIcon(getIcon("document-save"))
self.ui.act_project_save_as.setIcon(getIcon("document-save-as"))
self.ui.b_project_new.setIcon(getIcon("document-new"))
self.ui.b_project_open.setIcon(getIcon("document-open"))
self.ui.b_project_save.setIcon(getIcon("document-save"))
self.ui.b_project_save_as.setIcon(getIcon("document-save-as"))
self.ui.act_patchbay_add_group.setIcon(getIcon("list-add"))
self.ui.act_patchbay_remove_group.setIcon(getIcon("edit-delete"))
self.ui.act_patchbay_rename_group.setIcon(getIcon("edit-rename"))
self.ui.act_patchbay_add_port.setIcon(getIcon("list-add"))
self.ui.act_patchbay_remove_port.setIcon(getIcon("list-remove"))
self.ui.act_patchbay_rename_port.setIcon(getIcon("edit-rename"))
self.ui.act_patchbay_connect_ports.setIcon(getIcon("network-connect"))
self.ui.act_patchbay_disconnect_ports.setIcon(getIcon("network-disconnect"))
self.ui.b_group_add.setIcon(getIcon("list-add"))
self.ui.b_group_remove.setIcon(getIcon("edit-delete"))
self.ui.b_group_rename.setIcon(getIcon("edit-rename"))
self.ui.b_port_add.setIcon(getIcon("list-add"))
self.ui.b_port_remove.setIcon(getIcon("list-remove"))
self.ui.b_port_rename.setIcon(getIcon("edit-rename"))
self.ui.b_ports_connect.setIcon(getIcon("network-connect"))
self.ui.b_ports_disconnect.setIcon(getIcon("network-disconnect"))
self.scene = patchcanvas.PatchScene(self, self.ui.graphicsView)
self.ui.graphicsView.setScene(self.scene)
self.ui.graphicsView.setRenderHint(QPainter.Antialiasing, bool(self.fSavedSettings["Canvas/Antialiasing"] == patchcanvas.ANTIALIASING_FULL))
self.ui.graphicsView.setRenderHint(QPainter.TextAntialiasing, self.fSavedSettings["Canvas/TextAntialiasing"])
if self.fSavedSettings["Canvas/UseOpenGL"] and hasGL:
self.ui.graphicsView.setViewport(QGLWidget(self.ui.graphicsView))
self.ui.graphicsView.setRenderHint(QPainter.HighQualityAntialiasing, self.fSavedSettings["Canvas/HighQualityAntialiasing"])
p_options = patchcanvas.options_t()
p_options.theme_name = self.fSavedSettings["Canvas/Theme"]
p_options.auto_hide_groups = self.fSavedSettings["Canvas/AutoHideGroups"]
p_options.use_bezier_lines = self.fSavedSettings["Canvas/UseBezierLines"]
p_options.antialiasing = self.fSavedSettings["Canvas/Antialiasing"]
p_options.eyecandy = self.fSavedSettings["Canvas/EyeCandy"]
p_features = patchcanvas.features_t()
p_features.group_info = False
p_features.group_rename = True
p_features.port_info = True
p_features.port_rename = True
p_features.handle_group_pos = True
patchcanvas.setOptions(p_options)
patchcanvas.setFeatures(p_features)
patchcanvas.init("Catarina", self.scene, self.canvasCallback, DEBUG)
self.ui.act_project_new.triggered.connect(self.slot_projectNew)
self.ui.act_project_open.triggered.connect(self.slot_projectOpen)
self.ui.act_project_save.triggered.connect(self.slot_projectSave)
self.ui.act_project_save_as.triggered.connect(self.slot_projectSaveAs)
self.ui.b_project_new.clicked.connect(self.slot_projectNew)
self.ui.b_project_open.clicked.connect(self.slot_projectOpen)
self.ui.b_project_save.clicked.connect(self.slot_projectSave)
self.ui.b_project_save_as.clicked.connect(self.slot_projectSaveAs)
self.ui.act_patchbay_add_group.triggered.connect(self.slot_groupAdd)
self.ui.act_patchbay_remove_group.triggered.connect(self.slot_groupRemove)
self.ui.act_patchbay_rename_group.triggered.connect(self.slot_groupRename)
self.ui.act_patchbay_add_port.triggered.connect(self.slot_portAdd)
self.ui.act_patchbay_remove_port.triggered.connect(self.slot_portRemove)
self.ui.act_patchbay_rename_port.triggered.connect(self.slot_portRename)
self.ui.act_patchbay_connect_ports.triggered.connect(self.slot_connectPorts)
self.ui.act_patchbay_disconnect_ports.triggered.connect(self.slot_disconnectPorts)
self.ui.b_group_add.clicked.connect(self.slot_groupAdd)
self.ui.b_group_remove.clicked.connect(self.slot_groupRemove)
self.ui.b_group_rename.clicked.connect(self.slot_groupRename)
self.ui.b_port_add.clicked.connect(self.slot_portAdd)
self.ui.b_port_remove.clicked.connect(self.slot_portRemove)
self.ui.b_port_rename.clicked.connect(self.slot_portRename)
self.ui.b_ports_connect.clicked.connect(self.slot_connectPorts)
self.ui.b_ports_disconnect.clicked.connect(self.slot_disconnectPorts)
self.setCanvasConnections()
self.ui.act_settings_configure.triggered.connect(self.slot_configureCatarina)
self.ui.act_help_about.triggered.connect(self.slot_aboutCatarina)
self.ui.act_help_about_qt.triggered.connect(app.aboutQt)
self.SIGUSR1.connect(self.slot_projectSave)
# Dummy timer to keep events active
self.fUpdateTimer = self.startTimer(1000)
# Start Empty Project
self.slot_projectNew()
def canvasCallback(self, action, value1, value2, value_str):
if action == patchcanvas.ACTION_GROUP_INFO:
pass
elif action == patchcanvas.ACTION_GROUP_RENAME:
group_id = value1
new_group_name = value_str
for group in self.m_group_list:
if group[iGroupName] == new_group_name:
QMessageBox.warning(self, self.tr("Warning"), self.tr("There is already a group with this name"))
return
patchcanvas.renameGroup(group_id, new_group_name)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupName] = new_group_name
break
elif action == patchcanvas.ACTION_GROUP_SPLIT:
group_id = value1
patchcanvas.splitGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupSplit] = True
break
elif action == patchcanvas.ACTION_GROUP_JOIN:
group_id = value1
patchcanvas.joinGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupSplit] = False
break
elif action == patchcanvas.ACTION_PORT_INFO:
port_id = value1
group_id = 0
group_name = ""
port_name = ""
port_mode = patchcanvas.PORT_MODE_NULL
port_type = patchcanvas.PORT_TYPE_NULL
for port in self.m_port_list:
if port[iPortId] == port_id:
group_id = port[iPortGroup]
port_name = port[iPortName]
port_mode = port[iPortMode]
port_type = port[iPortType]
break
for group in self.m_group_list:
if group[iGroupId] == group_id:
group_name = group[iGroupName]
break
if port_mode == patchcanvas.PORT_MODE_INPUT:
mode_text = self.tr("Input")
elif port_mode == patchcanvas.PORT_MODE_OUTPUT:
mode_text = self.tr("Output")
else:
mode_text = self.tr("Unknown")
if port_type == patchcanvas.PORT_TYPE_AUDIO_JACK:
type_text = self.tr("JACK Audio")
elif port_type == patchcanvas.PORT_TYPE_MIDI_JACK:
type_text = self.tr("JACK MIDI")
elif port_type == patchcanvas.PORT_TYPE_MIDI_A2J:
type_text = self.tr("A2J MIDI")
elif port_type == patchcanvas.PORT_TYPE_MIDI_ALSA:
type_text = self.tr("ALSA MIDI")
else:
type_text = self.tr("Unknown")
port_full_name = group_name + ":" + port_name
info = self.tr(""
"<table>"
"<tr><td align='right'><b>Group Name:</b></td><td> %1</td></tr>"
"<tr><td align='right'><b>Group ID:</b></td><td> %2</td></tr>"
"<tr><td align='right'><b>Port Name:</b></td><td> %3</td></tr>"
"<tr><td align='right'><b>Port ID:</b></td><td> %4</i></td></tr>"
"<tr><td align='right'><b>Full Port Name:</b></td><td> %5</td></tr>"
"<tr><td colspan='2'> </td></tr>"
"<tr><td align='right'><b>Port Mode:</b></td><td> %6</td></tr>"
"<tr><td align='right'><b>Port Type:</b></td><td> %7</td></tr>"
"</table>"
).arg(group_name).arg(group_id).arg(port_name).arg(port_id).arg(port_full_name).arg(mode_text).arg(type_text)
QMessageBox.information(self, self.tr("Port Information"), info)
elif action == patchcanvas.ACTION_PORT_RENAME:
port_id = value1
new_port_name = value_str
patchcanvas.renamePort(port_id, new_port_name)
for port in self.m_port_list:
if port[iPortId] == port_id:
port[iPortName] = new_port_name
break
elif action == patchcanvas.ACTION_PORTS_CONNECT:
connection_id = self.m_last_connection_id
port_out_id = value1
port_in_id = value2
patchcanvas.connectPorts(connection_id, port_out_id, port_in_id)
conn_obj = [None, None, None]
conn_obj[iConnId] = connection_id
conn_obj[iConnOutput] = port_out_id
conn_obj[iConnInput] = port_in_id
self.m_connection_list.append(conn_obj)
self.m_last_connection_id += 1
elif action == patchcanvas.ACTION_PORTS_DISCONNECT:
connection_id = value1
patchcanvas.disconnectPorts(connection_id)
for connection in self.m_connection_list:
if connection[iConnId] == connection_id:
self.m_connection_list.remove(connection)
break
def initPorts(self):
for group in self.m_group_list:
patchcanvas.addGroup(group[iGroupId], group[iGroupName], patchcanvas.SPLIT_YES if (group[iGroupSplit]) else patchcanvas.SPLIT_NO, group[iGroupIcon])
for group_pos in self.m_group_list_pos:
patchcanvas.setGroupPosFull(group_pos[iGroupPosId], group_pos[iGroupPosX_o], group_pos[iGroupPosY_o], group_pos[iGroupPosX_i], group_pos[iGroupPosY_i])
for port in self.m_port_list:
patchcanvas.addPort(port[iPortGroup], port[iPortId], port[iPortName], port[iPortMode], port[iPortType])
for connection in self.m_connection_list:
patchcanvas.connectPorts(connection[iConnId], connection[iConnOutput], connection[iConnInput])
self.m_group_list_pos = []
patchcanvas.updateZValues()
def saveFile(self, path):
content = ("<?xml version='1.0' encoding='UTF-8'?>\n"
"<!DOCTYPE CATARINA>\n"
"<CATARINA VERSION='%s'>\n" % VERSION)
content += " <Groups>\n"
for i in range(len(self.m_group_list)):
group = self.m_group_list[i]
group_id = group[iGroupId]
group_name = group[iGroupName]
group_split = group[iGroupSplit]
group_icon = group[iGroupIcon]
group_pos_i = patchcanvas.getGroupPos(group_id, patchcanvas.PORT_MODE_INPUT)
group_pos_o = patchcanvas.getGroupPos(group_id, patchcanvas.PORT_MODE_OUTPUT)
content += " <g%i> <name>%s</name> <data>%i:%i:%i:%f:%f:%f:%f</data> </g%i>\n" % (i, group_name, group_id, group_split, group_icon, group_pos_o.x(), group_pos_o.y(), group_pos_i.x(), group_pos_i.y(), i)
content += " </Groups>\n"
content += " <Ports>\n"
for i in range(len(self.m_port_list)):
port = self.m_port_list[i]
content += " <p%i> <name>%s</name> <data>%i:%i:%i:%i</data> </p%i>\n" % (i, port[iPortName], port[iPortGroup], port[iPortId], port[iPortMode], port[iPortType], i)
content += " </Ports>\n"
content += " <Connections>\n"
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i]
content += " <c%i>%i:%i:%i</c%i>\n" % (i, connection[iConnId], connection[iConnOutput], connection[iConnInput], i)
content += " </Connections>\n"
content += "</CATARINA>\n"
try:
fd = uopen(path, "w")
fd.write(content)
fd.close()
except:
QMessageBox.critical(self, self.tr("Error"), self.tr("Failed to save file"))
def loadFile(self, path):
if not os.path.exists(path):
QMessageBox.critical(self, self.tr("Error"), self.tr("The file '%s' does not exist" % path))
self.m_save_path = None
return
try:
fd = uopen(path, "r")
readState = fd.read()
fd.close()
except:
QMessageBox.critical(self, self.tr("Error"), self.tr("Failed to load file"))
self.m_save_path = None
return
self.m_save_path = path
self.m_group_list = []
self.m_group_list_pos = []
self.m_port_list = []
self.m_connection_list = []
self.m_last_group_id = 1
self.m_last_port_id = 1
self.m_last_connection_id = 1
xml = QDomDocument()
xml.setContent(readState.encode("utf-8"))
content = xml.documentElement()
if content.tagName() != "CATARINA":
QMessageBox.critical(self, self.tr("Error"), self.tr("Not a valid Catarina file"))
return
# Get values from XML - the big code
node = content.firstChild()
while not node.isNull():
if node.toElement().tagName() == "Groups":
group_name = ""
groups = node.toElement().firstChild()
while not groups.isNull():
group = groups.toElement().firstChild()
while not group.isNull():
tag = group.toElement().tagName()
text = group.toElement().text()
if tag == "name":
group_name = text
elif tag == "data":
group_data = text.split(":")
if len(group_data) == 7 and group_data[0].isdigit() and group_data[1].isdigit() and group_data[2].isdigit() and isNumber(group_data[3]) and isNumber(group_data[4]) and isNumber(group_data[5]) and isNumber(group_data[6]):
group_obj = [None, None, None, None]
group_obj[iGroupId] = int(group_data[0])
group_obj[iGroupName] = group_name
group_obj[iGroupSplit] = int(group_data[1])
group_obj[iGroupIcon] = int(group_data[2])
group_pos_obj = [None, None, None, None, None]
group_pos_obj[iGroupPosId] = int(group_data[0])
group_pos_obj[iGroupPosX_o] = float(group_data[3])
group_pos_obj[iGroupPosY_o] = float(group_data[4])
group_pos_obj[iGroupPosX_i] = float(group_data[5])
group_pos_obj[iGroupPosY_i] = float(group_data[6])
self.m_group_list.append(group_obj)
self.m_group_list_pos.append(group_pos_obj)
group_id = group_obj[iGroupId]
if group_id > self.m_last_group_id:
self.m_last_group_id = group_id + 1
group = group.nextSibling()
groups = groups.nextSibling()
elif node.toElement().tagName() == "Ports":
port_name = ""
ports = node.toElement().firstChild()
while not ports.isNull():
port = ports.toElement().firstChild()
while not port.isNull():
tag = port.toElement().tagName()
text = port.toElement().text()
if tag == "name":
port_name = text
elif tag == "data":
port_data = text.split(":")
if len(port_data) == 4 and port_data[0].isdigit() and port_data[1].isdigit() and port_data[2].isdigit() and port_data[3].isdigit():
new_port = [None, None, None, None, None]
new_port[iPortGroup] = int(port_data[0])
new_port[iPortId] = int(port_data[1])
new_port[iPortName] = port_name
new_port[iPortMode] = int(port_data[2])
new_port[iPortType] = int(port_data[3])
self.m_port_list.append(new_port)
if new_port[iPortId] > self.m_last_port_id:
self.m_last_port_id = new_port[iPortId] + 1
port = port.nextSibling()
ports = ports.nextSibling()
elif node.toElement().tagName() == "Connections":
conns = node.toElement().firstChild()
while not conns.isNull():
conn_data = conns.toElement().text().split(":")
if len(conn_data) == 3 and conn_data[0].isdigit() and conn_data[1].isdigit() and conn_data[2].isdigit():
conn_obj = [None, None, None]
conn_obj[iConnId] = int(conn_data[0])
conn_obj[iConnOutput] = int(conn_data[1])
conn_obj[iConnInput] = int(conn_data[2])
connection_id = conn_obj[iConnId]
self.m_connection_list.append(conn_obj)
if connection_id >= self.m_last_connection_id:
self.m_last_connection_id = connection_id + 1
conns = conns.nextSibling()
node = node.nextSibling()
self.m_last_group_id += 1
self.m_last_port_id += 1
self.m_last_connection_id += 1
patchcanvas.clear()
self.initPorts()
self.scene.zoom_fit()
self.scene.zoom_reset()
@pyqtSlot()
def slot_projectNew(self):
self.m_group_list = []
self.m_group_list_pos = []
self.m_port_list = []
self.m_connection_list = []
self.m_last_group_id = 1
self.m_last_port_id = 1
self.m_last_connection_id = 1
self.m_save_path = None
patchcanvas.clear()
@pyqtSlot()
def slot_projectOpen(self):
path, _ = QFileDialog.getOpenFileName(self, self.tr("Load State"), filter=self.tr("Catarina XML Document (*.xml)"))
if path:
self.loadFile(path)
@pyqtSlot()
def slot_projectSave(self):
if self.m_save_path:
self.saveFile(self.m_save_path)
else:
self.slot_projectSaveAs()
@pyqtSlot()
def slot_projectSaveAs(self):
path, _ = QFileDialog.getSaveFileName(self, self.tr("Save State"), filter=self.tr("Catarina XML Document (*.xml)"))
if path:
self.m_save_path = path
self.saveFile(path)
@pyqtSlot()
def slot_groupAdd(self):
dialog = CatarinaAddGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = self.m_last_group_id
group_name = dialog.ret_group_name
group_split = dialog.ret_group_split
group_splitR = patchcanvas.SPLIT_YES if group_split else patchcanvas.SPLIT_NO
group_icon = patchcanvas.ICON_HARDWARE if group_split else patchcanvas.ICON_APPLICATION
patchcanvas.addGroup(group_id, group_name, group_splitR, group_icon)
group_obj = [None, None, None, None]
group_obj[iGroupId] = group_id
group_obj[iGroupName] = group_name
group_obj[iGroupSplit] = group_split
group_obj[iGroupIcon] = group_icon
self.m_group_list.append(group_obj)
self.m_last_group_id += 1
@pyqtSlot()
def slot_groupRemove(self):
if len(self.m_group_list) > 0:
dialog = CatarinaRemoveGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = dialog.ret_group_id
# Remove port connections first
for port in self.m_port_list:
if port[iPortGroup] == group_id:
port_id = port[iPortId]
h = 0
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i-h]
if connection[iConnOutput] == port_id or connection[iConnInput] == port_id:
patchcanvas.disconnectPorts(connection[iConnId])
self.m_connection_list.pop(i-h)
h += 1
# Remove ports
h = 0
for i in range(len(self.m_port_list)):
port = self.m_port_list[i-h]
if port[iPortGroup] == group_id:
port_id = port[iPortId]
patchcanvas.removePort(port[iPortId])
self.m_port_list.pop(i-h)
h += 1
# Now remove group
patchcanvas.removeGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
self.m_group_list.remove(group)
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_groupRename(self):
if len(self.m_group_list) > 0:
dialog = CatarinaRenameGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = dialog.ret_group_id
new_group_name = dialog.ret_new_group_name
patchcanvas.renameGroup(group_id, new_group_name)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupName] = new_group_name
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_portAdd(self):
if len(self.m_group_list) > 0:
dialog = CatarinaAddPortW(self, self.m_group_list, self.m_last_port_id)
if dialog.exec_():
group_id = dialog.ret_group_id
port_name = dialog.ret_port_name
port_mode = dialog.ret_port_mode
port_type = dialog.ret_port_type
patchcanvas.addPort(group_id, self.m_last_port_id, port_name, port_mode, port_type)
new_port = [None, None, None, None, None]
new_port[iPortGroup] = group_id
new_port[iPortId] = self.m_last_port_id
new_port[iPortName] = port_name
new_port[iPortMode] = port_mode
new_port[iPortType] = port_type
self.m_port_list.append(new_port)
self.m_last_port_id += 1
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_portRemove(self):
if len(self.m_port_list) > 0:
dialog = CatarinaRemovePortW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
port_id = dialog.ret_port_id
h = 0
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i-h]
if connection[iConnOutput] == port_id or connection[iConnInput] == port_id:
patchcanvas.disconnectPorts(connection[iConnId])
self.m_connection_list.pop(i-h)
h += 1
patchcanvas.removePort(port_id)
for port in self.m_port_list:
if port[iPortId] == port_id:
self.m_port_list.remove(port)
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Port first!"))
@pyqtSlot()
def slot_portRename(self):
if len(self.m_port_list) > 0:
dialog = CatarinaRenamePortW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
port_id = dialog.ret_port_id
new_port_name = dialog.ret_new_port_name
patchcanvas.renamePort(port_id, new_port_name)
for port in self.m_port_list:
if port[iPortId] == port_id:
port[iPortName] = new_port_name
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Port first!"))
@pyqtSlot()
def slot_connectPorts(self):
if len(self.m_port_list) > 0:
dialog = CatarinaConnectPortsW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
connection_id = self.m_last_connection_id
port_out_id = dialog.ret_port_out_id
port_in_id = dialog.ret_port_in_id
for connection in self.m_connection_list:
if connection[iConnOutput] == port_out_id and connection[iConnInput] == port_in_id:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Ports already connected!"))
return
patchcanvas.connectPorts(connection_id, port_out_id, port_in_id)
conn_obj = [None, None, None]
conn_obj[iConnId] = connection_id
conn_obj[iConnOutput] = port_out_id
conn_obj[iConnInput] = port_in_id
self.m_connection_list.append(conn_obj)
self.m_last_connection_id += 1
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add some Ports first!"))
@pyqtSlot()
def slot_disconnectPorts(self):
if len(self.m_connection_list) > 0:
dialog = CatarinaDisconnectPortsW(self, self.m_group_list, self.m_port_list, self.m_connection_list)
if dialog.exec_():
connection_id = -1
port_out_id = dialog.ret_port_out_id
port_in_id = dialog.ret_port_in_id
for connection in self.m_connection_list:
if connection[iConnOutput] == port_out_id and connection[iConnInput] == port_in_id:
connection_id = connection[iConnId]
self.m_connection_list.remove(connection)
break
patchcanvas.disconnectPorts(connection_id)
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please make some Connections first!"))
@pyqtSlot()
def slot_configureCatarina(self):
dialog = SettingsW(self, "catarina", hasGL)
if dialog.exec_():
self.loadSettings(False)
patchcanvas.clear()
p_options = patchcanvas.options_t()
p_options.theme_name = self.fSavedSettings["Canvas/Theme"]
p_options.auto_hide_groups = self.fSavedSettings["Canvas/AutoHideGroups"]
p_options.use_bezier_lines = self.fSavedSettings["Canvas/UseBezierLines"]
p_options.antialiasing = self.fSavedSettings["Canvas/Antialiasing"]
p_options.eyecandy = self.fSavedSettings["Canvas/EyeCandy"]
patchcanvas.setOptions(p_options)
patchcanvas.init("Catarina", self.scene, self.canvasCallback, DEBUG)
self.initPorts()
@pyqtSlot()
def slot_aboutCatarina(self):
QMessageBox.about(self, self.tr("About Catarina"), self.tr("<h3>Catarina</h3>"
"<br>Version %s"
"<br>Catarina is a testing ground for the 'PatchCanvas' module.<br>"
"<br>Copyright (C) 2010-2022 falkTX") % VERSION)
def saveSettings(self):
settings = QSettings()
settings.setValue("Geometry", self.saveGeometry())
settings.setValue("ShowToolbar", self.ui.frame_toolbar.isVisible())
def loadSettings(self, geometry):
settings = QSettings()
if geometry:
self.restoreGeometry(settings.value("Geometry", b""))
showToolbar = settings.value("ShowToolbar", True, type=bool)
self.ui.act_settings_show_toolbar.setChecked(showToolbar)
self.ui.frame_toolbar.setVisible(showToolbar)
self.fSavedSettings = {
"Canvas/Theme": settings.value("Canvas/Theme", patchcanvas.getDefaultThemeName(), type=str),
"Canvas/AutoHideGroups": settings.value("Canvas/AutoHideGroups", False, type=bool),
"Canvas/UseBezierLines": settings.value("Canvas/UseBezierLines", True, type=bool),
"Canvas/EyeCandy": settings.value("Canvas/EyeCandy", patchcanvas.EYECANDY_SMALL, type=int),
"Canvas/UseOpenGL": settings.value("Canvas/UseOpenGL", False, type=bool),
"Canvas/Antialiasing": settings.value("Canvas/Antialiasing", patchcanvas.ANTIALIASING_SMALL, type=int),
"Canvas/TextAntialiasing": settings.value("Canvas/TextAntialiasing", True, type=bool),
"Canvas/HighQualityAntialiasing": settings.value("Canvas/HighQualityAntialiasing", False, type=bool)
}
def timerEvent(self, event):
if event.timerId() == self.fUpdateTimer:
self.update()
QMainWindow.timerEvent(self, event)
def closeEvent(self, event):
self.saveSettings()
patchcanvas.clear()
QMainWindow.closeEvent(self, event)
#--------------- main ------------------
if __name__ == '__main__':
# App initialization
app = QApplication(sys.argv)
app.setApplicationName("Catarina")
app.setApplicationVersion(VERSION)
app.setOrganizationName("Cadence")
app.setWindowIcon(QIcon(":/scalable/catarina.svg"))
# Show GUI
gui = CatarinaMainW()
# Set-up custom signal handling
setUpSignals(gui)
gui.show()
if len(app.arguments()) > 1:
if not app.arguments()[0].endswith("Python.exe"):
gui.loadFile(app.arguments()[1])
elif len(app.arguments()) > 2:
gui.loadFile(app.arguments()[2])
# App-Loop
sys.exit(app.exec_())
| falkTX/Cadence | src/catarina.py | catarina.py | py | 54,241 | python | en | code | 361 | github-code | 6 | [
{
"api_name": "PyQt4.QtGui.QDialog",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ui_catarina_addgroup.Ui_CatarinaAddGroupW",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui.QDialog.__init__",
"line_number": 63,
"usage_type": "call"... |
18164621881 | import json
# Read the network.json file
with open("network.json", "r") as f:
network = json.load(f)
# Create a set to store all pairs (a, b) such that a follows b but b doesn't follow back a
pairs = set()
# Iterate over all users in the network
for user, data in network.items():
# Get the list of users that the current user follows
following = data.get("Connections", [])
# Iterate over all users that the current user follows
for follower in following:
# Check if the follower doesn't follow back the current user
if user not in network.get(str(follower), {}).get("Connections", []):
pairs.add((user, str(follower)))
# Remove mutual follows from the set
mutual_follows = {(b, a) for (a, b) in pairs if (b, a) in pairs}
pairs -= mutual_follows
# Print all pairs (a, b) such that a follows b but b doesn't follow back a
if len(pairs) > 0:
for pair in pairs:
print(pair)
# Print the number of such pairs
print(f"Number of pairs where 'a' follows 'b' but 'b' doesn't follow back 'a': {len(pairs)}")
else:
# If there are no such pairs, display that the relationship is completely bidirectional
print("The relationship is completely bidirectional.")
| GOVINDFROMINDIA/Twitter-Scam-Victims | GraphEvaluation.py | GraphEvaluation.py | py | 1,265 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
12300846904 | """
This example illustrates how to display the tree of a single TreeGrower for
debugging purpose.
"""
from sklearn.datasets import make_classification
import numpy as np
from pygbm.binning import BinMapper
from pygbm.grower import TreeGrower
from pygbm import plotting
rng = np.random.RandomState(0)
n_samples = int(1e7)
n_leaf_nodes = 5
X, y = make_classification(n_samples=n_samples, n_classes=2, n_features=5,
n_informative=3, n_redundant=0, random_state=rng)
bin_mapper_ = BinMapper(random_state=rng)
X_binned = bin_mapper_.fit_transform(X)
gradients = np.asarray(y, dtype=np.float32).copy()
hessians = np.ones(1, dtype=np.float32)
# First run to trigger the compilation of numba jit methods to avoid recording
# the compiler overhead in the profile report.
TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=n_leaf_nodes).grow()
# New run with to collect timing statistics that will be included in the plot.
grower = TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=n_leaf_nodes)
grower.grow()
plotting.plot_tree(grower)
| ogrisel/pygbm | examples/plot_performance_profile_single_small_tree.py | plot_performance_profile_single_small_tree.py | py | 1,076 | python | en | code | 175 | github-code | 6 | [
{
"api_name": "numpy.random.RandomState",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 17,
"usage_type": "call"
},
{
"a... |
72067619709 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 9 13:31:37 2022
@author: basile
"""
import pandas as pd
df = pd.DataFrame(columns=['index', 'prenom', 'nom', 'email', 'groupe', 'mystere'])
df.to_csv('priants.csv', index=False)
import streamlit as st
input = st.text_input("text", key="text")
but = st.button("clear text input")
if but:
st.session_state["text"] = ""
st.write(input) | BasileR29/chapelet_tournant | test.py | test.py | py | 422 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.sess... |
24494832097 | import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
def get_spy():
url = 'https://www.slickcharts.com/sp500'
request = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
soup = bs(request.text, "lxml")
stats = soup.find('table', class_='table table-hover table-borderless table-sm')
df = pd.read_html(str(stats))[0]
df['% Chg'] = df['% Chg'].str.strip('()-%')
df['% Chg'] = pd.to_numeric(df['% Chg'])
df['Chg'] = pd.to_numeric(df['Chg'])
return df
| reesecake/td_api | util/IndexInfo.py | IndexInfo.py | py | 520 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
... |
36152977435 | import os, sys, logging
from flask import Blueprint, current_app
from flask import request, jsonify
ml_model_bp = Blueprint('ml_model_bp', __name__) # create a Blueprint object
# create 'index' view for testing purposes
@ml_model_bp.route('/', methods=["GET", "POST"])
def index():
return "ML model service is running!"
# helper method for predict/ endpoint
def get_pred(data):
"""Predict from in-memory data on the fly.
"""
try:
nn_model = current_app.model
pred = nn_model.predict(data)
pred = pred.tolist()
except Exception as e:
print(e)
pred = []
return pred
# create route for prediction
@ml_model_bp.route("/predict", methods=["GET", "POST"])
def predict():
"""Performs an inference
"""
if request.method == "POST":
data = request.get_json() # sentences come in through JSON
current_app.logger.debug(f"Input to \"predict\" endpoint: {data['sentences']}")
pred = get_pred( data=data["sentences"])
current_app.logger.debug(f"Sentiment predictions = {pred}")
return jsonify({"input": data, "pred": pred})
if request.method == "GET":
msg = "Please compose your request in POST type with data."
current_app.logger.error(f"Wrong request type {request}.")
return jsonify({"msg": msg})
| bhavenp/docker_sentiment_analysis | ml_service/ml_model_api/ml_model_blueprint.py | ml_model_blueprint.py | py | 1,355 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.current_app.model",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.r... |
20232075182 | from tradingview.client import TradingViewWebSocketClient, fetch_japan_symbols
from datetime import datetime
client = TradingViewWebSocketClient()
symbols = fetch_japan_symbols()
client.add_symbols(symbols[:100])
# client.add_symbols(['TSE:4689'])
for x in client.fetch_ohlc(past_bar=302):
print(datetime.fromtimestamp(x.bar_time), x) | otomarukanta/tradingview | example_ohlc.py | example_ohlc.py | py | 341 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tradingview.client.TradingViewWebSocketClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tradingview.client.fetch_japan_symbols",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 11,
"... |
13913894709 | from __future__ import annotations as _annotations
from typing import TYPE_CHECKING
import pytest
from watchfiles import watch
from watchfiles.main import _default_force_polling
if TYPE_CHECKING:
from .conftest import SetEnv
class MockRustNotify:
@staticmethod
def watch(*args):
return 'stop'
def __enter__(self):
return self
def __exit__(self, *args):
pass
def test_watch_polling_not_env(mocker):
m = mocker.patch('watchfiles.main.RustNotify', return_value=MockRustNotify())
for _ in watch('.'):
pass
m.assert_called_once_with(['.'], False, False, 300, True, False)
def test_watch_polling_env(mocker, env: SetEnv):
env('WATCHFILES_FORCE_POLLING', '1')
m = mocker.patch('watchfiles.main.RustNotify', return_value=MockRustNotify())
for _ in watch('.'):
pass
m.assert_called_once_with(['.'], False, True, 300, True, False)
@pytest.mark.parametrize(
'env_var,arg,expected',
[
(None, True, True),
(None, False, False),
(None, None, False),
('', True, True),
('', False, False),
('', None, False),
('1', True, True),
('1', False, False),
('1', None, True),
('disable', True, True),
('disable', False, False),
('disable', None, False),
],
)
def test_default_force_polling(mocker, env: SetEnv, env_var, arg, expected):
uname = type('Uname', (), {'system': 'Linux', 'release': '1'})
mocker.patch('platform.uname', return_value=uname())
if env_var is not None:
env('WATCHFILES_FORCE_POLLING', env_var)
assert _default_force_polling(arg) == expected
@pytest.mark.parametrize(
'env_var,arg,expected,call_count',
[
(None, True, True, 0),
(None, False, False, 0),
(None, None, True, 1),
('', True, True, 0),
('', False, False, 0),
('', None, True, 1),
('1', True, True, 0),
('1', False, False, 0),
('1', None, True, 0),
('disable', True, True, 0),
('disable', False, False, 0),
('disable', None, False, 0),
],
)
def test_default_force_polling_wsl(mocker, env: SetEnv, env_var, arg, expected, call_count):
uname = type('Uname', (), {'system': 'Linux', 'release': 'Microsoft-Standard'})
m = mocker.patch('platform.uname', return_value=uname())
if env_var is not None:
env('WATCHFILES_FORCE_POLLING', env_var)
assert _default_force_polling(arg) == expected
assert m.call_count == call_count
| samuelcolvin/watchfiles | tests/test_force_polling.py | test_force_polling.py | py | 2,551 | python | en | code | 1,336 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "watchfiles.watch",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "conftest.SetEnv",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "watchfiles.watc... |
11464862154 | import speech_recognition as sr
from requests import get
from bs4 import BeautifulSoup
from gtts import gTTS
from paho.mqtt import publish
import os
##### CONFIGURAÇÕES #####
with open('arquivoConfiguraGoogleSpeech.json') as credenciais_google:
credenciais_google = credenciais_google.read()
executaAcao = False
serverMQTT = 'iot.eclipse.org'
portaMQTT = 1883
topicoLuz = 'iluminacao/status'
hotword = 'verônica'
hotwordNoticias = 'notícias'
hotwordTemperatura = 'temperatura'
hotwordLigarLuz = 'ligar a luz'
hotwordDesligarLuz = 'desativar a luz'
def monitorarAudio():
microfone = sr.Recognizer()
with sr.Microphone() as source:
print("Aguardando o Comando: ")
audio = microfone.listen(source)
try:
trigger = microfone.recognize_google_cloud(audio, credentials_json=credenciais_google, language='pt-BR')
trigger = trigger.lower()
if hotword in trigger and not getStatusTrigger():
print('Comando reconhecido!')
respoder('feedback')
setStatusTrigger(True)
elif getStatusTrigger():
setStatusTrigger(False)
return trigger
except sr.UnknownValueError:
print("Google not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
return None
def setStatusTrigger(status):
global executaAcao
executaAcao = status
def getStatusTrigger():
return executaAcao
def analisarAcao(comando):
if hotwordNoticias in comando:
retornarUltimasNoticias()
elif hotwordTemperatura in comando:
retornarPrevisaoTempo()
elif hotwordLigarLuz in comando:
publicarNoTopico(topicoLuz, 1)
retornarIluminacao(1)
elif hotwordDesligarLuz in comando:
publicarNoTopico(topicoLuz, 0)
retornarIluminacao(0)
else:
criarAudio(comando.strip(hotword), 'comando')
respoder('comando')
respoder('notfound')
def retornarUltimasNoticias():
site = get('https://news.google.com/news/rss?ned=pt_br&gl=BR&hl=pt')
noticias = BeautifulSoup(site.text, 'html.parser')
for item in noticias.findAll('item')[:2]:
noticia = item.title.text
criarAudio(noticia, 'noticia')
respoder('noticia')
respoder('thau')
def retornarPrevisaoTempo():
site = get('http://api.openweathermap.org/data/2.5/weather?id=3462377&q=goiania,br&APPID=1d20fd1ca254ea2797f60e64520675a8&units=metric&lang=pt')
clima = site.json()
temperatura = clima['main']['temp']
#minima = clima['main']['temp_min']
#maxima = clima['main']['temp_max']
descricao = clima['weather'][0]['description']
mensagem = f'No momento a temperatura é de {temperatura} graus com {descricao}'
criarAudio(mensagem, 'clima')
respoder('clima')
respoder('thau')
def retornarIluminacao(status):
if status == 1:
mensagem = 'A luz foi ligada'
else:
mensagem = 'A luz foi desligada'
criarAudio(mensagem, 'iluminacao')
respoder('iluminacao')
respoder('thau')
def publicarNoTopico(topico, payload):
publish.single(topico, payload=payload, qos=1, retain=True, hostname=serverMQTT,
port=portaMQTT, client_id="veronica")
def criarAudio(texto, nome_arquivo):
tts = gTTS(texto, lang='pt-br')
path = 'audios/' + nome_arquivo + '.mp3'
with open(path, 'wb') as file:
tts.write_to_fp(file)
def respoder(nome_arquivo):
path = 'audios/' + nome_arquivo + '.mp3'
os.system('mpg321 ' + path)
def __main__():
while True:
comando = monitorarAudio()
if comando is not None:
analisarAcao(comando)
__main__()
| cicerojmm/assistentePessoalIoT | veronica/veronica.py | veronica.py | py | 3,705 | python | pt | code | 2 | github-code | 6 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "speech_recognition.UnknownValueError",
"line_number": 38,
"usage_type": "attr... |
3549042262 | from django import forms
from catalog.models import Category, Product
class ProductAdminForm(forms.ModelForm):
class Meta:
model = Product
fields = ['name', 'slug','brand','sku','price','old_price',\
'is_active','is_bestseller','is_featured','quantity',\
'description','meta_keywords','meta_description', \
'categories','image','thumbnail','image_caption']
def clean_price(self):
if self.cleaned_data['price'] <= 0:
raise forms.ValidationError('Price must be greater than zero.')
return self.cleaned_data['price'] | Hamfri/shopping | catalog/forms.py | forms.py | py | 626 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "catalog.models.Product",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.f... |
29125164138 | from django.conf.urls import include, patterns, url
view_location = 'customProfile.views'
urlpatterns = patterns(view_location,
# Views
url(r'^(?P<username>[-\w]+)/', include(patterns(view_location,
url(r'^$', 'profileRedirect', {'redirect_url': 'artist:about'}, name='home'),
url(r'^news/$', 'artist_news', name='news'),
url(r'^shows/$','shows', name='shows'),
url(r'^shows/(?P<year>\d{4})/$','shows',name='shows'),
url(r'^photos/$', 'artist_photos', name='photos'),
))),
)
view_location = 'social_links.views'
urlpatterns += patterns(view_location,
# Forms
url(r'^about/links/edit/$', 'artist_social_links', name='socialLinksForm'),
url(r'^photos/links/edit/$', 'artist_photo_links', name='photoLinksForm'),
url(r'^music/links/edit/$', 'music_links', name='musicLinksForm'),
url(r'^videos/links/edit/$', 'artist_video_links', name='videoLinksForm'),
)
view_location = 'artist.views'
urlpatterns += patterns(view_location,
# Views
url(r'^(?P<username>[-\w]+)/', include(patterns(view_location,
url(r'^about/$', 'about', name='about'),
url(r'^music/$', 'music', name='music'),
url(r'^videos/$', 'videos', name='videos'),
))),
#Forms
url(r'^about/', include(patterns(view_location,
url(r'^biography/edit/$', 'biography', name='biographyForm'),
url(r'^contact/edit/$', 'contact_info', name='userContactForm'),
url(r'^member/add/$', 'members', name='memberForm'),
url(r'^member/edit/(?P<member_id>[\.\w-]+)$', 'members', name='memberForm'),
url(r'^(?P<contact_type>[\.\w-]+)/edit/$', 'contacts', name='contactForm'),
))),
url(r'^music/', include(patterns(view_location,
url(r'^album/add/$', 'AddEditAlbum', name='albumForm'),
url(r'^album/(?P<album_id>[\.\w-]+)/edit/$', 'AddEditAlbum', name='editAlbumForm'),
url(r'^album/(?P<album_id>[\.\w-]+)/tracks/add/$', 'AddEditTracks', name='tracksForm'),
#url(r'^interview/add/$', 'interview_form', name='interview_form'),
#url(r'^interview/add/(?P<trackID>[\.\w-]+)$', 'interview_form', name='interview_form'),
))),
url(r'^videos/', include(patterns(view_location,
url(r'^add/$', 'video_form', name='video_form'),
url(r'^album/add/$', 'AddEditAlbum', {'success_url': 'artist:video_tracks_form'}, name='video_album_form'),
url(r'^album/(?P<album_id>[\.\w-]+)/edit/$', 'AddEditAlbum', {'success_url': 'artist:video_tracks_form'}, name='video_edit_album'),
url(r'^album/(?P<album_id>[\.\w-]+)/tracks/add/$', 'add_video_to_album', name='video_tracks_form'),
#url(r'^interview-video/add/$', 'interview_video_form', name='interview_video_form'),
#url(r'^interview-video/add/(?P<trackID>[\.\w-]+)$', 'interview_video_form', name='interview_video_form'),
))),
)
| TimBest/ComposersCouch | artist/urls.py | urls.py | py | 2,864 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "... |
8845375190 | from fastapi import APIRouter, Depends
from fastapi_pagination import Page, Params
from src.admins.dependencies import get_groups_service, get_valid_group
from src.admins.models import Group
from src.admins.schemas import CreateGroupSchema, GroupOut
from src.admins.services import GroupService
router = APIRouter()
@router.get("/")
async def get_groups(
params: Params = Depends(),
groups_service: GroupService = Depends(get_groups_service)
) -> Page[GroupOut]:
return await groups_service.get_all(params=params)
@router.post("/")
async def create_group(
data: CreateGroupSchema,
groups_service: GroupService = Depends(get_groups_service)
) -> GroupOut:
return await groups_service.create(**data.dict())
@router.get("/{group_id}")
async def get_group(
group: Group = Depends(get_valid_group)
) -> GroupOut:
return group
@router.delete("/{group_id}")
async def delete_group(
group: Group = Depends(get_valid_group)
) -> GroupOut:
await group.delete()
return group | Qwizi/fastapi-sourcemod | sourcemod_api/src/admins/views/groups.py | groups.py | py | 1,015 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "fastapi_pagination.Params",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "src.admins.services.GroupService",
"line_number": 15,
"usage_type": "name"
},
{
"api_... |
30357262081 | from threading import Thread
from time import sleep
from traits.api import HasTraits, Int, Button
from traitsui.api import View, Item, VGroup
class ThreadDemo(HasTraits):
# The thread specific counters:
thread_0 = Int()
thread_1 = Int()
thread_2 = Int()
# The button used to start the threads running:
start = Button('Start Threads')
# The count of how many threads ae currently running:
running = Int()
view = View(
VGroup(
Item('thread_0', style='readonly'),
Item('thread_1', style='readonly'),
Item('thread_2', style='readonly'),
),
'_',
Item('start', show_label=False, enabled_when='running == 0'),
resizable=True,
width=250,
title='Monitoring threads',
)
def _start_changed(self):
for i in range(3):
Thread(
target=self.counter,
args=('thread_%d' % i, (i * 10 + 10) / 1000.0),
).start()
def counter(self, name, interval):
self.running += 1
count = 0
for i in range(200):
setattr(self, name, count)
count += 1
sleep(interval)
self.running -= 1
# Create the demo:
demo = ThreadDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
| enthought/traitsui | traitsui/examples/demo/Advanced/Multi_thread_demo.py | Multi_thread_demo.py | py | 1,376 | python | en | code | 290 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "traits.api.Int",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "traits.api.Int",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "traits.api.Int",
... |
30353743561 | # Author: Varun Hiremath <varun@debian.org>
# Enthought library imports.
from traits.api import Instance, Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.filters.filter_base import FilterBase
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `ExtractVectorComponents` class.
######################################################################
class ExtractVectorComponents(FilterBase):
""" This wraps the TVTK ExtractVectorComponents filter and allows
one to select any of the three components of an input vector data
attribute."""
# The version of this class. Used for persistence.
__version__ = 0
# The actual TVTK filter that this class manages.
filter = Instance(tvtk.ExtractVectorComponents, args=(), allow_none=False)
# The Vector Component to be extracted
component = Enum('x-component', 'y-component', 'z-component',
desc='component of the vector to be extracted')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['vectors'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
view = View(Group(Item(name='component')),
resizable=True
)
######################################################################
# `Filter` interface.
######################################################################
def update_pipeline(self):
# Do nothing if there is no input.
inputs = self.inputs
if len(inputs) == 0 or len(inputs[0].outputs) == 0:
return
fil = self.filter
self.configure_connection(fil, inputs[0])
fil.update()
self._component_changed(self.component)
######################################################################
# Non-public interface.
######################################################################
def _component_changed(self, value):
# Obtain output from the TVTK ExtractVectorComponents filter
# corresponding to the selected vector component
if len(self.inputs) == 0 or len(self.inputs[0].outputs) == 0:
return
if value == 'x-component':
self._set_outputs([self.filter.vx_component])
elif value == 'y-component':
self._set_outputs([self.filter.vy_component])
elif value == 'z-component':
self._set_outputs([self.filter.vz_component])
self.render()
| enthought/mayavi | mayavi/filters/extract_vector_components.py | extract_vector_components.py | py | 2,728 | python | en | code | 1,177 | github-code | 6 | [
{
"api_name": "mayavi.filters.filter_base.FilterBase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tvtk.api.tvtk.ExtractVectorComponents",
"line_number": 25,
"usage_type": "attri... |
28726581501 | import logging
import feedparser
import requests
from .. import read_list
log = logging.getLogger(__name__)
class VideoFeed:
def __init__(self, known_path, url):
self.url = url
self.read_list = read_list.ReadList(known_path, url)
def is_new(self, item):
return self.read_list.is_new(item.id)
def fetch(self):
try:
response = requests.get(self.url)
response.raise_for_status()
feed = feedparser.parse(response.content)
items = feed.entries
if len(items) == 0:
raise Exception("Empty feed. Is site down?")
new_items = list(filter(self.is_new, items))
log.info("Feed contains %d items, %d are new." % (len(items), len(new_items)))
# iterate first, then save new read list
for i in new_items:
yield i
self.read_list.save()
except Exception:
log.exception("Unexpected error with %s", self.url, exc_info=True)
def fetch_video_codes(self):
raise NotImplementedError
def append_to_queue(self, queue_path):
codes = self.fetch_video_codes()
links = ["\nhttps://www.youtube.com/watch?v=" + v for v in codes]
f = open(queue_path, "a")
f.writelines(links)
f.close()
| EliseAv/tubeforme | video_feeds/_base.py | _base.py | py | 1,334 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 23,
"usage_type": "call"
}
] |
9790495928 | from django.core.files.storage import FileSystemStorage
from django.http import FileResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.http import JsonResponse
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from journeys.errors import ConflictNotResolvedError
from journeys.errors import JourneysError
from journeys.modifier.conflict.plugins import load_plugins
from ..validators.checks_for_cli import default_checks
from . import forms
from . import logic
from . import models
from . import serializers
def get_supported_features(request):
plugins = load_plugins()
return JsonResponse({"items": [plugin.ID for plugin in plugins]})
def get_supported_validators(request):
return JsonResponse(
{
"validators": {
check.name: {
"require_source": check.require_source,
"require_root": check.require_root,
"description": check.description,
}
for check in default_checks.values()
}
}
)
class SessionsViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.SessionDetailsSerializer
return serializers.SessionSerializer
queryset = models.Session.objects.all() # pylint: disable=E1101
def perform_create(self, serializer):
session = serializer.save()
logic.get_controller(session=session, allow_empty=True)
@action(detail=True, methods=["post"])
def source(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
system_credentials = None
form = forms.SourceForm(request.POST, request.FILES)
form.is_valid()
if form.errors:
# TODO: Fill response content
return HttpResponseBadRequest()
fs = FileSystemStorage(location=session.working_directory)
as3_file = (
fs.save(form.cleaned_data["as3_file"].name, form.cleaned_data["as3_file"])
if "as3_file" in form.cleaned_data and form.cleaned_data["as3_file"]
else None
)
if "ucs_file" in form.cleaned_data and form.cleaned_data["ucs_file"]:
ucs_file = fs.save(
form.cleaned_data["ucs_file"].name, form.cleaned_data["ucs_file"]
)
ucs_passphrase = form.cleaned_data.get("ucs_passphrase", None)
else:
system_credentials = models.SystemCredentials(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"],
host=form.cleaned_data["host"],
)
try:
ucs_file, ucs_passphrase = logic.download_ucs(
session=session, system_credentials=system_credentials
)
system_credentials.save()
except JourneysError:
# TODO: Fill response content
return HttpResponseBadRequest()
clear = True # request.GET.get("clear", False)
logic.initialize(
session=session,
ucs_file=ucs_file,
ucs_passphrase=ucs_passphrase,
as3_file=as3_file,
clear=clear,
credentials=system_credentials,
)
return Response()
@action(detail=True, methods=["post"])
def current_conflict(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
conflict_id = request.data["conflict_id"]
logic.set_current_conflict(session=session, conflict_id=conflict_id)
return Response()
@current_conflict.mapping.delete
def delete_current_conflict(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
logic.reset_current_conflict(session=session)
return Response()
class SessionFilesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
lookup_url_kwarg = "file_path"
def retrieve(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
fs = FileSystemStorage(location=controller.repo_path)
try:
f = fs.open(file_path)
except FileNotFoundError:
return HttpResponseNotFound()
return FileResponse(f, content_type="application/octet-stream")
def update(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
form = forms.FileUploadFrom(data=request.POST, files=request.FILES)
form.full_clean()
if form.errors:
# TODO: Fill response content
return HttpResponseBadRequest()
fs = FileSystemStorage(location=controller.repo_path)
if fs.exists(file_path):
fs.delete(file_path)
fs.save(file_path, form.cleaned_data["file"])
return Response(status=202)
def delete(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
fs = FileSystemStorage(location=controller.repo_path)
try:
fs.delete(file_path)
except FileNotFoundError:
return HttpResponseNotFound()
return Response()
class SessionBranchesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
class SessionBranchesFilesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
lookup_url_kwarg = "file_path"
def retrieve(
self, request, session_pk, session_branch_pk, file_path, *args, **kwargs
):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
git = controller.repo.git
content = git.show(f"{session_branch_pk}:{file_path}")
return FileResponse(content, content_type="application/octet-stream")
class SessionConflictsViewSet(
mixins.ListModelMixin, viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Conflict.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
)
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.ConflictDetailsSerializer
return serializers.ConflictSerializer
class SessionChangesViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Change.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
).order_by("id")
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.ChangeDetailsSerializer
return serializers.ChangeSerializer
def create(self, request, session_pk, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
message = request.data.get("message", None)
try:
logic.process(session=session, commit_name=message)
except ConflictNotResolvedError:
# TODO: Fill response content
return HttpResponseBadRequest()
return Response()
class SessionDeploymentsViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Deployment.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
)
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.DeploymentDetailsSerializer
return serializers.DeploymentSerializer
@action(detail=True, methods=["get"])
def log(self, request, session_pk=None, pk=None):
# TODO: implement
return Response({})
@action(detail=True, methods=["get"])
def report(self, request, session_pk=None, pk=None):
# TODO: implement
return Response({})
| wiksla/f5-bigip-journeys-app | journeys/backend/views.py | views.py | py | 8,720 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "journeys.modifier.conflict.plugins.load_plugins",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 28,
"usage_type": "cal... |
14098850969 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Few more things we might wanna work out here.
Our lack of the module :mod:`inspect` is pretty surprising.
Refer to either `IPython.core.oinspect` or `xonsh.inspectors`
for some good uses of the std lib module.
"""
from pprint import pformat
from pygments import highlight
from pygments.formatters.terminal256 import TerminalTrueColorFormatter
try:
from gruvbox.gruvbox import GruvboxStyle
except ImportError:
from pygments.styles.inkpot import InkPotStyle
style = InkPotStyle()
else:
style = GruvboxStyle()
from IPython.core.getipython import get_ipython
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.lib.lexers import IPyLexer
from IPython.lib.pretty import pprint
@magics_class
class PrettyColorfulInspector(Magics):
"""Implementation for a magic function that inspects a given python object.
The extension then prints a syntax-highlighted and pretty-printed
version of the provided object.
"""
# Use Pygments to do syntax highlighting
lexer = IPyLexer()
if GruvboxStyle is not None:
style = GruvboxStyle
else:
style = InkPotStyle
formatter = TerminalTrueColorFormatter(style=style)
def shell(self):
# why the fuck is this returning none
return get_ipython()
def __repr__(self):
return f"<{self.__class__.__name__}>:"
@line_magic
def ins(self, line=None):
"""Alias for the `%inspect_obj magic defined here."""
self.inspect(line=line)
@line_magic
def inspect_obj(self, line=None):
"""Deviate from the original implementation a bit.
In this version, we'll use the IPython lexer used at IPython.lib
instead of pygments.
Parameters
----------
line : str
Line to be evaluated by the IPython shell.
Note that this invokes the get_ipython().ev() method.
So we might wanna wrap this in a try/except but idk what it'll raise.
"""
if not line:
return
# evaluate the line to get a python object
python_object = self.shell.ev(line)
# Pretty Print/Format the object
# Print the output, but don't return anything (otherwise, we'd
# potentially get a wall of color-coded text.
formatted_dict = pformat(python_object.__dict__)
print(highlight(formatted_dict, lexer, formatter).strip())
pprint(python_object)
def load_ipython_extension(shell=None):
"""Add to the list of extensions used by IPython."""
if shell is None:
shell = get_ipython()
if shell is None:
return
shell.register_magics(PrettyColorfulInspector)
shell.register_magic_function(PrettyColorfulInspector.inspect_obj)
shell.register_magic_function(PrettyColorfulInspector.ins)
if __name__ == "__main__":
load_ipython_extension(get_ipython())
| farisachugthai/dynamic_ipython | default_profile/extensions/extension_inspect.py | extension_inspect.py | py | 2,946 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pygments.styles.inkpot.InkPotStyle",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gruvbox.gruvbox.GruvboxStyle",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "IPython.core.magic.Magics",
"line_number": 32,
"usage_type": "name"
},
... |
19342659029 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
import torchvision.models as models
from torch.autograd import Variable, grad
from copy import deepcopy
from tqdm import tqdm
import torch.nn.init as init
from .DevNet import SemiADNet
import sys
sys.path.append("..")
from data import ALDataset
class waal_clf(nn.Module):
def __init__(self, input_dim, embSize=320, num_classes=1):
super(waal_clf, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(input_dim, embSize),
nn.ReLU(),
nn.Dropout(p=0.01)
)
self.layer2 = nn.Sequential(
nn.Linear(embSize, 50),
nn.ReLU(),
nn.Dropout(p=0.01)
)
self.layer3 = nn.Linear(50, num_classes)
self.embSize = embSize
def forward(self, X, return_embedding=False):
emb = self.layer1(X)
X = self.layer2(emb)
X = self.layer3(X)
if return_embedding:
return X, emb
else:
return X
def get_embedding_dim(self):
return self.embSize
class Net_WAAL:
def __init__(self, model_name='WAAL', config=None, net_clf=None, net_dis=None):
self.config = config
self.batch_size = config['model_batch_size']
self.nb_batch = 20
self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
self.net_clf = net_clf if isinstance(net_clf, nn.Module) else waal_clf
self.net_dis = net_dis if isinstance(net_dis, nn.Module) else Discriminator
self.use_imbalence_train = False
def fit(self, X_train, y_train, X_unlabeled=None, ratio=None, X_valid=None, y_valid=None, alpha=1e-3):
n_epoch = self.config['n_epoch']
dim = X_train.shape[1]
outlier_indices = np.where(y_train == 1)[0]
inlier_indices = np.where(y_train == 0)[0]
self.clf = self.net_clf(input_dim=dim, num_classes=2).to(self.device)
self.dis = self.net_dis(dim = self.clf.get_embedding_dim()).to(self.device)
# setting three optimizers
self.opt_clf = optim.Adam(self.clf.parameters(), lr=0.001, weight_decay=1e-5)
self.opt_dis = optim.Adam(self.dis.parameters(), lr=0.001, weight_decay=1e-5)
# computing the unbalancing ratio, a value betwwen [0,1]
#gamma_ratio = X_labeled.shape[0]/X_unlabeled.shape[0]
gamma_ratio = X_train.shape[0]/X_unlabeled.shape[0]
print(gamma_ratio)
self.clf.train()
self.dis.train()
if not self.use_imbalence_train:
data = ALDataset(X_train, y_train)
loader = DataLoader(data, self.batch_size , shuffle=True)
for epoch in range(n_epoch):
if self.use_imbalence_train:
for i in range(self.nb_batch):
label_x, label_y = self.input_batch_generation_sup(X_train, outlier_indices, inlier_indices, self.batch_size)
label_x, label_y = label_x.to(self.device), label_y.to(self.device)
unlabel_x = self.sample_unlabeled(X_unlabeled, self.batch_size).to(self.device)
self.train(label_x, label_y , unlabel_x, gamma_ratio=gamma_ratio,alpha=alpha)
else:
for batch_idx, (label_x, label_y) in enumerate(loader):
label_y = label_y.unsqueeze(1)
label_x, label_y = label_x.to(self.device), label_y.to(self.device)
unlabel_x = self.sample_unlabeled(X_unlabeled, len(label_x) ).to(self.device)
self.train(label_x, label_y , unlabel_x, gamma_ratio=gamma_ratio,alpha=alpha)
def train(self, label_x, label_y , unlabel_x, gamma_ratio=None,alpha=1e-3):
# training feature extractor and predictor
self.set_requires_grad(self.clf,requires_grad=True)
self.set_requires_grad(self.dis,requires_grad=False)
self.opt_clf.zero_grad()
lb_out, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
# prediction loss (deafult we use F.cross_entropy)
zero_prob = 1-label_y
gt_probs = torch.cat((zero_prob,label_y),1)
pred_loss = torch.mean(F.cross_entropy(lb_out,gt_probs))
# pred_loss = self.deviation_loss(label_y, lb_out)
# Wasserstein loss (unbalanced loss, used the redundant trick)
wassertein_distance = self.dis(unlb_z).mean() - gamma_ratio * self.dis(lb_z).mean()
with torch.no_grad():
_, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
gp = self.gradient_penalty(self.dis, unlb_z, lb_z)
loss = pred_loss + alpha * wassertein_distance + alpha * gp * 5
# for CIFAR10 the gradient penality is 5
# for SVHN the gradient penality is 2
loss.backward()
# clip_grad_norm_(self.clf.parameters(), 1.0)
self.opt_clf.step()
# Then the second step, training discriminator
self.set_requires_grad(self.clf, requires_grad=False)
self.set_requires_grad(self.dis, requires_grad=True)
with torch.no_grad():
_, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
for _ in range(1):
# gradient ascent for multiple times like GANS training
gp = self.gradient_penalty(self.dis, unlb_z, lb_z)
wassertein_distance = self.dis(unlb_z).mean() - gamma_ratio * self.dis(lb_z).mean()
dis_loss = -1 * alpha * wassertein_distance - alpha * gp * 2
self.opt_dis.zero_grad()
dis_loss.backward()
self.opt_dis.step()
def input_batch_generation_sup(self, X_train, outlier_indices, inlier_indices, batch_size):
'''
batchs of samples. This is for csv data.
Alternates between positive and negative pairs.
'''
n_inliers = len(inlier_indices)
n_outliers = len(outlier_indices)
sample_num = batch_size//2
inlier_idx = np.random.choice([i for i in range(n_inliers)], sample_num, replace=True)
outlier_idx = np.random.choice([i for i in range(n_outliers)], sample_num, replace=True)
sampled_X = np.concatenate((X_train[inlier_indices[inlier_idx]], X_train[outlier_indices[outlier_idx]]), axis=0)
sampled_y = np.concatenate((np.expand_dims(np.zeros(sample_num), axis=1), np.expand_dims(np.ones(sample_num), axis=1)), axis=0)
# print(sampled_X.shape)
return torch.from_numpy(sampled_X).float(), torch.from_numpy(sampled_y).float()
def sample_unlabeled(self, X_unlabeled, batch_size):
# is_replace = True if len(X_unlabeled)<batch_size else False
is_replace = True
idx = np.random.choice([i for i in range(len(X_unlabeled))], batch_size, replace=is_replace)
sampled = X_unlabeled[idx]
return torch.from_numpy(sampled).float()
def deviation_loss(self, y_true, y_pred):
'''
z-score-based deviation loss
'''
confidence_margin = 5.
self.ref = torch.normal(mean=0., std=torch.full([5000], 1.)).cuda()
dev = (y_pred - torch.mean(self.ref)) / torch.std(self.ref)
inlier_loss = torch.abs(dev)
outlier_loss = torch.abs((confidence_margin - dev).clamp_(min=0.))
return torch.mean((1 - y_true) * inlier_loss + y_true * outlier_loss)
def predict_prob(self, X, y=None, method="linear", threshold_method="quantile", num=0.95):
self.clf.eval()
with torch.no_grad():
X = torch.from_numpy(X).to(self.device)
out = self.clf(X.float())
prob = F.softmax(out, dim=1).cpu().detach()
return prob
def predict_score(self, X, y=None, return_threshold=False, quantile_num=0.95):
prob = self.predict_prob(X).numpy()
score = prob[:, 1]
if return_threshold:
print('quanitile:')
print(np.quantile(score,[i/10 for i in range(0,11)]))
threshold = np.quantile(score, quantile_num)
return score, threshold
else:
return score
def predict(self, X, y=None, threshold=0.5):
prob = self.predict_prob(X)
label = prob.max(1)[1]
return label
def single_worst(self, probas):
"""
The single worst will return the max_{k} -log(proba[k]) for each sample
:param probas:
:return: # unlabeled \times 1 (tensor float)
"""
value,_ = torch.max(-1*torch.log(probas),1)
return value
# setting gradient values
def set_requires_grad(self, model, requires_grad=True):
"""
Used in training adversarial approach
:param model:
:param requires_grad:
:return:
"""
for param in model.parameters():
param.requires_grad = requires_grad
# setting gradient penalty for sure the lipschitiz property
def gradient_penalty(self, critic, h_s, h_t):
''' Gradeitnt penalty approach'''
alpha = torch.rand(h_s.size(0), 1).to(self.device)
differences = h_t - h_s
interpolates = h_s + (alpha * differences)
interpolates = torch.cat([interpolates, h_s, h_t]).requires_grad_()
# interpolates.requires_grad_()
preds = critic(interpolates)
gradients = grad(preds, interpolates,
grad_outputs=torch.ones_like(preds),
retain_graph=True, create_graph=True)[0]
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = ((gradient_norm - 1)**2).mean()
return gradient_penalty
def get_model(self):
return self.clf
def get_embeddings(self, data):
self.clf.eval()
embeddings = torch.zeros([len(data), self.clf.get_embedding_dim()])
loader = DataLoader(data, shuffle=False, **self.params['loader_te_args'])
with torch.no_grad():
for x, y, idxs in loader:
x, y = x.to(self.device), y.to(self.device)
out, e1 = self.clf(x)
embeddings[idxs] = e1.cpu()
return embeddings
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, dim=20):
super(Discriminator, self).__init__()
self.dim = np.prod(dim)
self.net = nn.Sequential(
nn.Linear(self.dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512,1),
nn.Sigmoid(),
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z).reshape(-1)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
| clarenceluo78/ActiveAD | models/nets_waal.py | nets_waal.py | py | 10,208 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
39911776352 | import os
import re
import json
import pickle
import kss
import pandas as pd
from tqdm import tqdm
from elasticsearch import Elasticsearch
from torch.utils.data import DataLoader, TensorDataset
from datasets import load_metric, load_from_disk, load_dataset, Features, Value, Sequence, DatasetDict, Dataset
from sentence_transformers import SentenceTransformer, util
from data_processing import *
from mask import mask_to_tokens
def save_pickle(save_path, data_set):
file = open(save_path, "wb")
pickle.dump(data_set, file)
file.close()
return None
def get_pickle(pickle_path):
f = open(pickle_path, "rb")
dataset = pickle.load(f)
f.close()
return dataset
def save_data(data_path, new_wiki):
with open(data_path, 'w', encoding='utf-8') as make_file:
json.dump(new_wiki, make_file, indent="\t", ensure_ascii=False)
def passage_split_400(text):
num = len(text) // 400
count = 1
split_datas = kss.split_sentences(text)
data_list = []
data = ""
for split_data in split_datas:
if abs(len(data) - 400) > abs(len(data) + len(split_data) - 400) and count < num:
if len(data) == 0:
data += split_data
else:
data += (" " + split_data)
elif count < num:
data_list.append(data)
count += 1
data = ""
data += split_data
else:
data += split_data
data_list.append(data)
return data_list, len(data_list)
def passage_split(text):
length = len(text) // 2
split_datas = kss.split_sentences(text)
data_1 = ""
data_2 = ""
for split_data in split_datas:
if abs(len(data_1) - length) > abs(len(data_1) + len(split_data) - length):
if len(data_1) == 0:
data_1 += split_data
else:
data_1 += (" " + split_data)
else:
if len(data_2) == 0:
data_2 += split_data
else:
data_2 += (" " + split_data)
return data_1, data_2
def preprocess(text):
text = re.sub(r'\n', ' ', text)
text = re.sub(r"\\n", " ", text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r'#', ' ', text)
text = re.sub(r"[^a-zA-Z0-9가-힣ㄱ-ㅎㅏ-ㅣぁ-ゔァ-ヴー々〆〤一-龥<>()\s\.\?!》《≪≫\'<>〈〉:‘’%,『』「」<>・\"-“”∧]", "", text)
return text
def run_preprocess(data_dict):
context = data_dict["context"]
start_ids = data_dict["answers"]["answer_start"][0]
before = data_dict["context"][:start_ids]
after = data_dict["context"][start_ids:]
process_before = preprocess(before)
process_after = preprocess(after)
process_data = process_before + process_after
ids_move = len(before) - len(process_before)
data_dict["context"] = process_data
data_dict["answers"]["answer_start"][0] = start_ids - ids_move
return data_dict
def run_preprocess_to_wiki(data_dict):
context = data_dict["text"]
process_data = preprocess(context)
data_dict["text"] = process_data
return data_dict
def search_es(es_obj, index_name, question_text, n_results):
query = {
'query': {
'match': {
'document_text': question_text
}
}
}
res = es_obj.search(index=index_name, body=query, size=n_results)
return res
def make_custom_dataset(dataset_path) :
if not (os.path.isdir("../data/train_dataset") or
os.path.isdir("../data/wikipedia_documents.json")) :
raise Exception ("Set the original data path to '../data'")
train_f = Features({'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)})
if not os.path.isfile("../data/preprocess_wiki.json") :
with open("../data/wikipedia_documents.json", "r") as f:
wiki = json.load(f)
new_wiki = dict()
for ids in range(len(wiki)):
new_wiki[str(ids)] = run_preprocess_to(wiki[str(ids)])
with open('../data/preprocess_wiki.json', 'w', encoding='utf-8') as make_file:
json.dump(new_wiki, make_file, indent="\t", ensure_ascii=False)
if not os.path.isfile("/opt/ml/input/data/preprocess_train.pkl"):
train_dataset = load_from_disk("../data/train_dataset")['train']
val_dataset = load_from_disk("../data/train_dataset")['validation']
new_train_data, new_val_data = [], []
for data in train_dataset:
new_data = run_preprocess(data)
new_train_data.append(new_data)
for data in val_dataset:
new_data = run_preprocess(data)
new_val_data.append(new_data)
train_df = pd.DataFrame(new_train_data)
val_df = pd.DataFrame(new_val_data)
dataset = DatasetDict({'train': Dataset.from_pandas(train_df, features=train_f),
'validation': Dataset.from_pandas(val_df, features=train_f)})
save_pickle(dataset_path, dataset)
if 'preprocess' in dataset_path:
return dataset
if 'squad' in dataset_path :
train_data = get_pickle("../data/preprocess_train.pkl")["train"]
val_data = get_pickle("../data/preprocess_train.pkl")["validation"]
korquad_data = load_dataset("squad_kor_v1")["train"]
df_train_data = pd.DataFrame(train_data)
df_val_data = pd.DataFrame(val_data)
df_korquad_data = pd.DataFrame(korquad_data, columns=['answers', 'context', 'id', 'question'])
df_total_train = pd.concat([df_train_data, df_korquad_data])
dataset = DatasetDict({'train': Dataset.from_pandas(df_total_train, features=train_f),
'validation': Dataset.from_pandas(df_val_data, features=train_f)})
save_pickle("../data/korquad_train.pkl", dataset)
return train_dataset
if 'concat' in dataset_path :
base_dataset = get_pickle("../data/preprocess_train.pkl")
train_dataset, val_dataset = base_dataset["train"], base_dataset["validation"]
train_data = [{"id" : train_dataset[i]["id"], "question" : train_dataset[i]["question"],
"answers" : train_dataset[i]["answers"], "context" : train_dataset[i]["context"]}
for i in range(len(train_dataset))]
val_data = [{"id" : val_dataset[i]["id"], "question" : val_dataset[i]["question"],
"answers" : val_dataset[i]["answers"], "context" : val_dataset[i]["context"]}
for i in range(len(val_dataset))]
config = {'host':'localhost', 'port':9200}
es = Elasticsearch([config])
k = 5 # k : how many contexts to concatenate
for idx, train in enumerate(train_data):
res = search_es(es, "wiki-index", question["question"], k)
context_list = [(hit['_source']['document_text'], hit['_score']) for hit in res['hits']['hits']]
contexts = train["context"]
count = 0
for context in context_list:
# if same context already exists, don't concatenate
if train["context"] == context[0]:
continue
contexts += " " + context[0]
count += 1
if count == (k-1):
break
train_data[idx]["context"] = contexts
for idx, val in enumerate(val_data):
res = search_es(es, "wiki-index", question["question"], k)
context_list = [(hit['_source']['document_text'], hit['_score']) for hit in res['hits']['hits']]
contexts = val["context"]
count = 0
for context in context_list:
if val["context"] == context[0]:
continue
contexts += " " + context[0]
count += 1
if count == (k-1):
break
val_data[idx]["context"] = contexts
train_df = pd.DataFrame(train_data)
val_df = pd.DataFrame(val_data)
dataset = DatasetDict({'train': Dataset.from_pandas(train_df, features=train_f),
'validation': Dataset.from_pandas(val_df, features=train_f)})
save_pickle(dataset_path, dataset)
return dataset
if "split_wiki_400" in dataset_path:
with open("/opt/ml/input/data/preprocess_wiki.json", "r") as f:
wiki = json.load(f)
new_wiki = dict()
for i in tqdm(range(len(wiki))):
if len(wiki[str(i)]["text"]) < 800:
new_wiki[str(i)] = wiki[str(i)]
continue
data_list, count = passage_split_400(wiki[str(i)]["text"])
for j in range(count):
new_wiki[str(i) + f"_{j}"] = {"text" : data_list[j], "corpus_source" : wiki[str(i)]["corpus_source"],
"url" : wiki[str(i)]["url"], "domain" : wiki[str(i)]["domain"],
"title" : wiki[str(i)]["title"], "author" : wiki[str(i)]["author"],
"html" : wiki[str(i)]["html"],"document_id" : wiki[str(i)]["document_id"]}
save_data("../data/wiki-index-split-400.json", new_wiki)
if "split_wiki" in dataset_path and dataset_path != "split_wiki_400":
with open("/opt/ml/input/data/preprocess_wiki.json", "r") as f:
wiki = json.load(f)
limit = 0
if "800" in dataset_path:
limit = 800
if "1000" in dataset_path:
limit = 1000
new_wiki = dict()
for i in tqdm(range(len(wiki))):
if len(wiki[str(i)]["text"]) < limit:
new_wiki[str(i)] = wiki[str(i)]
continue
data_1, data_2 = passage_split(wiki[str(i)]["text"])
new_wiki[str(i) + f"_1"] = {"text" : data_1, "corpus_source" : wiki[str(i)]["corpus_source"], "url" : wiki[str(i)]["url"],
"domain" : wiki[str(i)]["domain"], "title" : wiki[str(i)]["title"], "author" : wiki[str(i)]["author"],
"html" : wiki[str(i)]["html"], "document_id" : wiki[str(i)]["document_id"]}
new_wiki[str(i) + f"_2"] = {"text" : data_2, "corpus_source" : wiki[str(i)]["corpus_source"], "url" : wiki[str(i)]["url"],
"domain" : wiki[str(i)]["domain"], "title" : wiki[str(i)]["title"],
"author" : wiki[str(i)]["author"], "html" : wiki[str(i)]["html"], "document_id" : wiki[str(i)]["document_id"]}
save_data(f"../data/split_wiki_{limit}.json")
def make_mask_dataset(dataset_path, k, tokenizer):
base_dataset, opt = None, None
if 'default' in dataset_path:
base_dataset = get_pickle("../data/preprocess_train.pkl")
if 'concat' in dataset_path:
base_dataset = get_pickle("../data/concat_train.pkl")
k = int(re.findall("\d", dataset_path)[0])
data_processor = DataProcessor(tokenizer)
train_dataset, val_dataset = base_dataset['train'], base_dataset['val']
column_names = train_dataset.column_names
train_dataset = data_processor.train_tokenizer(train_dataset, column_names)
val_dataset = data_processor.val_tokenizer(val_dataset, column_names)
model = SentenceTransformer('sentence-transformers/xlm-r-100langs-bert-base-nli-stsb-mean-tokens')
mask_dataset = mask_to_tokens(train_dataset, tokenizer, k, model)
dataset = DatasetDict({'train': mask_dataset,
'validation': val_dataset})
save_pickle(dataset_path, dataset)
return dataset
| TEAM-IKYO/Open-Domain-Question-Answering | code/prepare_dataset.py | prepare_dataset.py | py | 12,138 | python | en | code | 24 | github-code | 6 | [
{
"api_name": "pickle.dump",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "kss.split_sentences",
"line_num... |
21940354299 | r"""Format notebooks using the TensorFlow docs style.
Install the tensorflow-docs package:
$ python3 -m pip install -U [--user] git+https://github.com/tensorflow/docs
Usage:
$ python3 -m tensorflow_docs.tools.nbfmt [options] notebook.ipynb [...]
See the TensorFlow notebook template:
https://github.com/tensorflow/docs/blob/master/tools/templates/notebook.ipynb
And the TensorFlow docs contributor guide:
https://www.tensorflow.org/community/contribute/docs
"""
import enum
import json
import os
import pathlib
import re
import sys
import textwrap
from typing import Any, Dict, List
from absl import app
from absl import flags
from tensorflow_docs.tools.nbfmt import notebook_utils
OSS = True
flags.DEFINE_integer(
"indent", 2, "Indention level for pretty-printed JSON.", lower_bound=0)
flags.DEFINE_bool("oss", None, "Use OSS formatting.")
flags.DEFINE_bool("remove_outputs", False,
"Remove output cells from the notebook")
flags.DEFINE_bool("test", False,
"Test if the notebook is formatted (useful for CI).")
FLAGS = flags.FLAGS
def clean_notebook(data: Dict[str, Any], nb_source: str, filepath: pathlib.Path,
remove_outputs: bool, indent: int) -> bytes:
"""The main notebook formatting logic.
Args:
data: object representing a parsed JSON notebook.
nb_source: JSON string of entire notebook contents.
filepath: String of notebook filepath passed to the command-line.
remove_outputs: Boolean to clear cell output.
indent: Integer indicating the number of spaces to indent the JSON.
Returns:
A byte string for the JSON formatted notebook.
"""
clean_root(data, filepath) # Top-level notebook fields.
clean_cells(data, nb_source, remove_outputs)
update_license_cells(data)
nbjson = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=indent)
if not OSS:
# Serialization differences in environments.
str_replaces = {"<": r"\u003c", ">": r"\u003e", "&": r"\u0026"}
for str_from, str_to in str_replaces.items():
nbjson = nbjson.replace(str_from, str_to)
return (nbjson + "\n").encode("utf-8")
def clean_root(data: Dict[str, Any], filepath: pathlib.Path) -> None:
"""Deletes extra top-level notebook fields and metadata.
Jupyter format spec:
https://nbformat.readthedocs.io/en/latest/format_description.html
Args:
data: object representing a parsed JSON notebook.
filepath: String of notebook filepath passed to the command-line.
"""
# These top-level fields are required:
notebook_utils.del_entries_except(
data, keep=["cells", "metadata", "nbformat_minor", "nbformat"])
# All metadata is optional according to spec, but we use some of it.
notebook_utils.del_entries_except(
data["metadata"], keep=["accelerator", "colab", "kernelspec"])
metadata = data.get("metadata", {})
colab = metadata.get("colab", {})
# Set top-level notebook defaults.
data["nbformat"] = 4
data["nbformat_minor"] = 0
# Colab metadata
notebook_utils.del_entries_except(
colab, keep=["collapsed_sections", "name", "toc_visible"])
colab["name"] = os.path.basename(filepath)
colab["toc_visible"] = True
metadata["colab"] = colab
# Kernelspec metadata
kernelspec = metadata.get("kernelspec", {})
notebook_utils.del_entries_except(kernelspec, keep=["display_name", "name"])
supported_kernels = {"python3": "Python 3", "swift": "Swift"}
kernel_name = kernelspec.get("name")
if kernel_name not in supported_kernels:
kernel_name = "python3" # Notebook defaults to Python3 (same as Colab).
kernelspec["name"] = kernel_name
kernelspec["display_name"] = supported_kernels[kernel_name]
metadata["kernelspec"] = kernelspec
data["metadata"] = metadata
def _clean_code_cell(cell_data: Dict[str, Any], remove_outputs: bool) -> None:
"""Clean an individual code cell and optionally remove outputs.
Args:
cell_data: object representing a parsed JSON cell.
remove_outputs: Boolean to clear cell output.
"""
if remove_outputs:
cell_data["outputs"] = []
cell_data["execution_count"] = None
# Ensure outputs field exists since part of the nbformat spec.
if cell_data.get("outputs", None) is None:
cell_data["outputs"] = []
# Spec allows null or int (null is Colab default).
if cell_data.get("execution_count") == 0:
cell_data["execution_count"] = None
def _clean_metadata_colab(cell_metadata: Dict[str, Any],
remove_outputs: bool) -> None:
"""Clean up a cell's `metadata.colab` field.
Remove all `metadata.colab` contents except for `metadata.colab.resources`, if
present. The Colab resources are used to embed data within the notebook and
can be treated like output cells (kept unless explicitly removed).
Args:
cell_metadata: object representing the parsed JSON metadata from a cell.
remove_outputs: Boolean to clear cell output.
"""
colab = cell_metadata.pop("colab", {})
# If no outputs, just clear out `metadata.colab`.
if remove_outputs:
return
# Clear around `resources` if not empty. Otherwise, clear out `metata.colab`.
if colab.get("resources"):
notebook_utils.del_entries_except(colab, keep=["resources"])
cell_metadata["colab"] = colab
def clean_cells(data: Dict[str, Any], nb_source: str,
remove_outputs: bool) -> None:
"""Remove empty cells and clean code cells.
Args:
data: Object representing a parsed JSON notebook.
nb_source: JSON string of entire notebook contents.
remove_outputs: Boolean True to remove code cell outputs, False to keep.
"""
# Clear leading and trailing newlines.
for cell in data["cells"]:
cell_source = cell["source"]
while cell_source and cell_source[0] == "\n":
cell_source.pop(0)
while cell_source and cell_source[-1] == "\n":
cell_source.pop()
cell["source"] = cell_source
# Remove empty cells.
data["cells"] = [cell for cell in data["cells"] if any(cell["source"])]
# Clean cell metadata.
cell_count = 0
for cell in data["cells"]:
cell_count += 1
cell_metadata = cell.get("metadata", {})
if "id" not in cell_metadata:
cell_metadata["id"] = notebook_utils.generate_cell_id(
cell["source"], cell_count)
notebook_utils.del_entries_except(
cell_metadata, keep=["id", "cellView", "colab"])
_clean_metadata_colab(cell_metadata, remove_outputs)
cell["metadata"] = cell_metadata
# The presence of this field indicates that ouputs are already saved.
has_outputs = True if '"output_type"' in nb_source else False
for cell in data["cells"]:
if cell["cell_type"] == "code":
_clean_code_cell(cell, remove_outputs)
if has_outputs and remove_outputs:
notebook_utils.warn("Removed the existing output cells.")
def update_license_cells(data: Dict[str, Any]) -> None:
"""Format license cell to hide code pane from the Colab form.
Args:
data: object representing a parsed JSON notebook.
"""
# This pattern in Apache and MIT license boilerplate.
license_re = re.compile(r"#@title.*License")
for idx, cell in enumerate(data["cells"]):
src_text = "".join(cell["source"])
if license_re.search(src_text):
# Hide code pane from license form
metadata = cell.get("metadata", {})
metadata["cellView"] = "form"
data["cells"][idx]["metadata"] = metadata
class Status(enum.Enum):
PASS = 0
FAIL = 1
def format_nb(
*,
notebooks: List[str],
remove_outputs: bool = False,
indent: int = 2,
test: bool = False,
) -> Status:
"""Formats a notebook."""
found_error = False # Track errors for final return code.
test_fail_notebooks = []
paths, err_paths = notebook_utils.collect_notebook_paths(notebooks)
if err_paths:
found_error = True
test_fail_notebooks.extend(err_paths)
for path in paths:
print(f"Format notebook: {path}", file=sys.stderr)
data, source = notebook_utils.load_notebook(path)
if not data:
found_error = True
test_fail_notebooks.append(path)
continue
# Returns formatted JSON byte string.
expected_output = clean_notebook(data, source, path, remove_outputs, indent)
if test:
# Compare formatted contents with original file contents.
src_bytes = path.read_bytes()
if expected_output != src_bytes:
test_fail_notebooks.append(path)
else:
path.write_bytes(expected_output)
if test:
if test_fail_notebooks:
error_template = textwrap.dedent("""
[test] The following notebooks are not formatted:
{notebooks}
Please install `nbfmt` and format:
$ python3 -m pip install -U --user git+https://github.com/tensorflow/docs
$ python3 -m tensorflow_docs.tools.nbfmt notebook.ipynb
""")
notebooks = "\n".join([f"- {str(fp)}" for fp in test_fail_notebooks])
print(error_template.format(notebooks=notebooks), file=sys.stderr)
return Status.FAIL
else:
print("[test] Notebooks are formatted", file=sys.stderr)
return Status.PASS
if found_error:
return Status.FAIL
return Status.PASS
def main(argv):
if len(argv) <= 1:
raise app.UsageError("Missing arguments.")
if FLAGS.oss is not None:
global OSS
OSS = FLAGS.oss
exit_code = format_nb(
notebooks=argv[1:],
remove_outputs=FLAGS.remove_outputs,
indent=FLAGS.indent,
test=FLAGS.test)
if exit_code == Status.FAIL:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
app.run(main)
| tensorflow/docs | tools/tensorflow_docs/tools/nbfmt/__main__.py | __main__.py | py | 9,543 | python | en | code | 5,917 | github-code | 6 | [
{
"api_name": "absl.flags.DEFINE_integer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_bool",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "absl.flag... |
21215598425 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.utils.translation import gettext as _
from django.db.models.manager import BaseManager
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def weight_change(
actual_weights: BaseManager, percentile_weights: BaseManager, birthday: datetime
):
"""
Create a graph showing weight over time.
:param actual_weights: a QuerySet of Weight instances.
:param percentile_weights: a QuerySet of Weight Percentile instances.
:param birthday: a datetime of the child's birthday
:returns: a tuple of the the graph's html and javascript.
"""
actual_weights = actual_weights.order_by("-date")
weighing_dates: list[datetime] = list(actual_weights.values_list("date", flat=True))
measured_weights = list(actual_weights.values_list("weight", flat=True))
actual_weights_trace = go.Scatter(
name=_("Weight"),
x=weighing_dates,
y=measured_weights,
fill="tozeroy",
mode="lines+markers",
)
if percentile_weights:
dates = list(
map(
lambda timedelta: birthday + timedelta,
percentile_weights.values_list("age_in_days", flat=True),
)
)
# reduce percentile data xrange to end 1 day after last weigh in for formatting purposes
# https://github.com/babybuddy/babybuddy/pull/708#discussion_r1332335789
last_date_for_percentiles = max(weighing_dates) + timedelta(days=2)
dates = dates[: dates.index(last_date_for_percentiles)]
percentile_weight_3_trace = go.Scatter(
name=_("P3"),
x=dates,
y=list(percentile_weights.values_list("p3_weight", flat=True)),
line={"color": "red"},
)
percentile_weight_15_trace = go.Scatter(
name=_("P15"),
x=dates,
y=list(percentile_weights.values_list("p15_weight", flat=True)),
line={"color": "orange"},
)
percentile_weight_50_trace = go.Scatter(
name=_("P50"),
x=dates,
y=list(percentile_weights.values_list("p50_weight", flat=True)),
line={"color": "green"},
)
percentile_weight_85_trace = go.Scatter(
name=_("P85"),
x=dates,
y=list(percentile_weights.values_list("p85_weight", flat=True)),
line={"color": "orange"},
)
percentile_weight_97_trace = go.Scatter(
name=_("P97"),
x=dates,
y=list(percentile_weights.values_list("p97_weight", flat=True)),
line={"color": "red"},
)
data = [
actual_weights_trace,
]
layout_args = utils.default_graph_layout_options()
layout_args["barmode"] = "stack"
layout_args["title"] = _("<b>Weight</b>")
layout_args["xaxis"]["title"] = _("Date")
layout_args["xaxis"]["rangeselector"] = utils.rangeselector_date()
layout_args["yaxis"]["title"] = _("Weight")
if percentile_weights:
# zoom in on the relevant dates
layout_args["xaxis"]["range"] = [
birthday,
max(weighing_dates) + timedelta(days=1),
]
layout_args["yaxis"]["range"] = [0, max(measured_weights) * 1.5]
data.extend(
[
percentile_weight_97_trace,
percentile_weight_85_trace,
percentile_weight_50_trace,
percentile_weight_15_trace,
percentile_weight_3_trace,
]
)
fig = go.Figure({"data": data, "layout": go.Layout(**layout_args)})
output = plotly.plot(fig, output_type="div", include_plotlyjs=False)
return utils.split_graph_output(output)
| babybuddy/babybuddy | reports/graphs/weight_change.py | weight_change.py | py | 3,806 | python | en | code | 1,766 | github-code | 6 | [
{
"api_name": "django.db.models.manager.BaseManager",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name... |
17178723426 | import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import plotly.express as px
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
import pickle
st.set_page_config(page_title="Air Quality", page_icon="💨", layout='wide', initial_sidebar_state='auto')
@st.cache_data
def load_data():
df = pd.read_csv("data/model_input.csv")
df.drop(['location'],axis=1,inplace=True)
df = df[df['lceq_avg'] != 0]
airquality = pd.read_csv("data/Air_Quality.csv", delimiter=",")
with open('data/xgb_airquality.pkl', 'rb') as f:
model = pickle.load(f)
return df, airquality, model
df, airquality, model = load_data()
airquality['time_stamp'] = pd.to_datetime(airquality['time_stamp'])
airquality['month'] = airquality['time_stamp'].dt.month
airquality['day_month'] = airquality['time_stamp'].dt.day
airquality['day_week'] = airquality['time_stamp'].dt.dayofweek.apply(lambda x: 7 if x == 6 else x + 1)
airquality['hour'] = airquality['time_stamp'].dt.hour
airquality['minute'] = airquality['time_stamp'].dt.minute
merged_df = pd.merge(df, airquality, how='left', on=['month', 'day_month', 'day_week', 'hour', 'minute'])
new_df = merged_df.drop(['lcpeak_avg', 'lceq_avg', 'v85', 'Telraam data', 'avg_pedestrians', 'avg_bikes', 'avg_cars', 'avg_trucks' ], axis=1)
st.title("Air Quality analysis 💨")
st.markdown("""In this section, we will analyse the air quality data found in the PurpleAir API.
We will start by looking at the data and then we will try to find some correlations between the different variables.""")
# Group the data by month and calculate the mean of '2.5_um_count'
grouped_df = new_df.groupby('month')['2.5_um_count'].mean().reset_index()
expander_corr = st.expander("Correlation heatmap explanation")
expander_corr.markdown("We will start by looking at the correlation heatmap of the different variables. This will give us a first idea of the variables that are somewhat correlated with the count of 2.5um particles.")
columns_of_interest = ['LC_TEMP', 'LC_DAILYRAIN', 'LC_RAD', 'LC_WINDDIR', 'month', '2.5_um_count']
corr_matrix = new_df[columns_of_interest].corr()
# Create the correlation heatmap using Plotly
fig = go.Figure(data=go.Heatmap(
z=corr_matrix.values,
x=corr_matrix.columns,
y=corr_matrix.columns,
colorscale='RdBu',
zmin=-1,
zmax=1,
colorbar=dict(title="Correlation")
))
# Add custom annotations for the correlation values inside each square
annotations = []
for i, row in enumerate(corr_matrix.values):
for j, value in enumerate(row):
annotations.append(
dict(
x=corr_matrix.columns[j],
y=corr_matrix.columns[i],
text=str(round(value, 2)),
font=dict(color='white' if abs(value) > 0.5 else 'black'),
showarrow=False
)
)
fig.update_layout(
title='Correlation Heatmap',
xaxis_title='Variables',
yaxis_title='Variables',
width=800,
height=600,
annotations=annotations
)
expander_corr.plotly_chart(fig)
monthly_avg = new_df.groupby('month')['2.5_um_count'].mean().reset_index()
expander_mon = st.expander("Average PM2.5 particles count per Month")
expander_mon.markdown("We will now look at the average PM2.5 particles count per Month. We can see that there is a negative correlation between the 2.5_um_count and the month. This shows that the air quality is better during the summer months.")
fig = px.line(monthly_avg, x='month', y='2.5_um_count', color_discrete_sequence=['#3366cc'])
fig.update_layout(title='Average 2.5_um_count per Month',
xaxis_title='Month', yaxis_title='Average 2.5_um_count')
expander_mon.plotly_chart(fig)
expander_temp = st.expander("Average PM2.5 particles count per Temperature")
expander_temp.markdown("We will now look at the average PM2.5 particles count per Temperature. We can see that there is a negative correlation between the 2.5_um_count and the LC_TEMP. This means that when the temperature is higher, the air quality is better.")
fig = px.scatter(new_df, x="LC_TEMP", y="2.5_um_count", trendline="ols",
animation_frame="month", animation_group="day_month", color="day_month",
hover_name="day_month", range_x=[-5, 25], range_y=[0, 40])
fig.update_layout(title='2.5_um_count by LC_TEMP', xaxis_title='LC_TEMP', yaxis_title='2.5_um_count')
expander_temp.plotly_chart(fig)
merged_df['2.5_um_count'] = merged_df['2.5_um_count'].fillna(method='ffill').rolling(window=10, min_periods=1).mean()
merged_df = merged_df.drop(['time_stamp'], axis=1)
x = merged_df.drop(['2.5_um_count'], axis=1)
y = merged_df['2.5_um_count']
xgb = model
expander_imp = st.expander("Feature importance")
expander_imp.markdown("We will now look at the feature importance of the different variables. The used model is a XGBoost model, with the target variable being the 2.5_um_count. By looking at the feature importance, we can see which variables are the most important in predicting the 2.5_um_count. We can see that the most important variables are the temporal data and weather conditions.")
importance_sorted = sorted(zip(xgb.feature_importances_, x.columns), reverse=True)
importance_values_sorted = [imp for imp, _ in importance_sorted]
variable_names_sorted = [var for _, var in importance_sorted]
fig = px.bar(x=importance_values_sorted, y=variable_names_sorted, orientation='h')
fig.update_layout(
title='Feature importance',
xaxis_title='Importance',
yaxis_title='Variables',
yaxis=dict(
tickmode='array',
ticktext=variable_names_sorted,
tickvals=variable_names_sorted,
showticklabels=True,
automargin=True
)
)
expander_imp.plotly_chart(fig)
| vavald/MDA_streamlit | pages/6_💨_AirQuality.py | 6_💨_AirQuality.py | py | 5,860 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.load... |
18790221797 | import torch
import decord
from decord import cpu, gpu
from tqdm import tqdm
import json
import os
import random
import numpy as np
import pickle
def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif fix_start is not None:
frame_idxs = [x[0] + fix_start for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
return frame_idxs
video_list_train = json.load(open("video_train.json"))
video_list_val = json.load(open("video_val.json"))
video_list_test = json.load(open("video_test.json"))
for video_id in tqdm(video_list_train):
if os.path.exists(f"videos/{video_id}.mp4"):
video_path = f"videos/{video_id}.mp4"
else:
video_path = f"videos/{video_id}.mkv"
video_reader = decord.VideoReader(video_path, width=512, height=512)
decord.bridge.set_bridge('torch')
vlen=len(video_reader)
for i in range(1,10):
frame_idxs = sample_frames(3, vlen, sample="rand")
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2)
frame_ids[f"{video_id}_{i}"]=frame_idxs
pickle.dump(frames,open(f"allinone_data/images/rand_new/{video_id}_{i}","wb"))
for video_id in tqdm(video_list_val+video_list_test):
if os.path.exists(f"videos/{video_id}.mp4"):
video_path = f"videos/{video_id}.mp4"
else:
video_path = f"videos/{video_id}.mkv"
video_reader = decord.VideoReader(video_path, width=512, height=512, num_threads=1)
decord.bridge.set_bridge('torch')
vlen=len(video_reader)
frame_idxs = sample_frames(3, vlen, sample="uniform")
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2)
pickle.dump(frames,open(f"clipbert/images/uniform/{video_id}","wb"))
| MILVLG/anetqa-code | clipbert/sample_imgs_clipbert.py | sample_imgs_clipbert.py | py | 2,196 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number":... |
36994780654 | # -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import os
import sys
import shutil
import time
from utils import get_logger
import network
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
sys.path.append('../..')
from data_helper import to_categorical
from evaluator import cail_evaluator
flags = tf.flags
flags.DEFINE_bool('is_retrain', False, 'if is_retrain is true, not rebuild the summary')
flags.DEFINE_integer('max_epoch', 1, 'update the embedding after max_epoch, default: 1')
flags.DEFINE_integer('max_max_epoch', 1000, 'all training epoches, default: 1000')
flags.DEFINE_float('lr', 1e-3, 'initial learning rate, default: 1e-3')
flags.DEFINE_float('decay_rate', 0.6, 'decay rate, default: 0.65')
flags.DEFINE_float('keep_prob', 0.5, 'keep_prob for training, default: 0.5')
flags.DEFINE_string("log_file_train", "train.log", "File for log")
flags.DEFINE_integer('decay_step', 5000, 'decay_step, default: 5000')
flags.DEFINE_integer('valid_step', 2500, 'valid_step, default: 2500')
flags.DEFINE_float('last_score12', 0.0, 'if valid_score12 > last_score12, save new model. default: 0.0')
FLAGS = flags.FLAGS
lr = FLAGS.lr
last_score12 = FLAGS.last_score12
settings = network.Settings()
summary_path = settings.summary_path
ckpt_path = settings.ckpt_path
model_path = ckpt_path + 'model.ckpt'
log_path = settings.log_path
embedding_path = '../../data/word_embedding_256.npy'
data_train_path = '../../data/wd_pdQS200/train/'
data_valid_path = '../../data/wd_pdQS200/valid/'
tr_batches = os.listdir(data_train_path)
va_batches = os.listdir(data_valid_path)
n_tr_batches = len(tr_batches)
n_va_batches = len(va_batches)
def get_batch(data_path, batch_id):
new_batch = np.load(data_path + str(batch_id) + '.npz')
X_batch = new_batch['X']
y_batch = new_batch['y']
return [X_batch, y_batch]
def valid_epoch(data_path, sess, model):
va_batches = os.listdir(data_path)
n_va_batches = len(va_batches)
_costs = 0.0
predict_labels_list = list()
marked_labels_list = list()
for i in range(n_va_batches):
[X_batch, y_batch] = get_batch(data_path, i)
marked_labels_list.extend(y_batch)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
fetches = [model.loss, model.y_pred]
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: True, model.keep_prob: 1.0}
_cost, predict_labels = sess.run(fetches, feed_dict)
_costs += _cost
predict_labels_list.extend(predict_labels)
f1_micro, f1_macro, score12 = cail_evaluator(predict_labels_list, marked_labels_list)
return f1_micro, f1_macro, score12
def train_epoch(data_path, sess, model, train_fetches,
valid_fetches, train_writer, test_writer, logger):
global last_score12
global lr
time0 = time.time()
batch_indexs = np.random.permutation(n_tr_batches)
for batch in tqdm(range(n_tr_batches)):
global_step = sess.run(model.global_step)
if 0 == (global_step + 1) % FLAGS.valid_step:
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g, time=%g s' % (
global_step, f1_micro, f1_macro, score12, time.time() - time0))
logger.info('END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'.
format(sess.run(model.global_step), f1_micro, f1_macro, score12))
time0 = time.time()
if score12 > last_score12:
last_score12 = score12
saving_path = model.saver.save(sess, model_path, global_step+1)
print('saved new model to %s ' % saving_path)
# training
batch_id = batch_indexs[batch]
[X_batch, y_batch] = get_batch(data_path, batch_id)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: False, model.keep_prob: FLAGS.keep_prob}
summary, _cost, _, _ = sess.run(train_fetches, feed_dict) # the cost is the mean cost of one batch
# valid per 500 steps
if 0 == (global_step + 1) % 500:
train_writer.add_summary(summary, global_step)
batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证batch
[X_batch, y_batch] = get_batch(data_valid_path, batch_id)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: True, model.keep_prob: 1.0}
summary, _cost = sess.run(valid_fetches, feed_dict)
test_writer.add_summary(summary, global_step)
def main(_):
global ckpt_path
global last_score12
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
elif not FLAGS.is_retrain:
shutil.rmtree(summary_path)
os.makedirs(summary_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
if not os.path.exists(log_path):
os.makedirs(log_path)
print('1.Loading data...')
W_embedding = np.load(embedding_path)
print('training sample_num = %d' % n_tr_batches)
print('valid sample_num = %d' % n_va_batches)
logger = get_logger(log_path + FLAGS.log_file_train)
print('2.Building model...')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = network.Atten_TextCNN(W_embedding, settings)
with tf.variable_scope('training_ops') as vs:
learning_rate = tf.train.exponential_decay(FLAGS.lr, model.global_step,
FLAGS.decay_step,
FLAGS.decay_rate, staircase=True)
with tf.variable_scope('Optimizer1'):
tvars1 = tf.trainable_variables()
grads1 = tf.gradients(model.loss, tvars1)
optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op1 = optimizer1.apply_gradients(zip(grads1, tvars1),
global_step=model.global_step)
with tf.variable_scope('Optimizer2'):
tvars2 = [tvar for tvar in tvars1 if 'embedding' not in tvar.name]
grads2 = tf.gradients(model.loss, tvars2)
optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op2 = optimizer2.apply_gradients(zip(grads2, tvars2),
global_step=model.global_step)
update_op = tf.group(*model.update_emas)
merged = tf.summary.merge_all() # summary
train_writer = tf.summary.FileWriter(summary_path + 'train', sess.graph)
test_writer = tf.summary.FileWriter(summary_path + 'test')
training_ops = [v for v in tf.global_variables() if v.name.startswith(vs.name+'/')]
if os.path.exists(ckpt_path + "checkpoint"):
print("Restoring Variables from Checkpoint...")
model.saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('f1_micro=%g, f1_macro=%g, score12=%g' % (f1_micro, f1_macro, score12))
sess.run(tf.variables_initializer(training_ops))
train_op2 = train_op1
else:
print('Initializing Variables...')
sess.run(tf.global_variables_initializer())
print('3.Begin training...')
print('max_epoch=%d, max_max_epoch=%d' % (FLAGS.max_epoch, FLAGS.max_max_epoch))
logger.info('max_epoch={}, max_max_epoch={}'.format(FLAGS.max_epoch, FLAGS.max_max_epoch))
train_op = train_op2
for epoch in range(FLAGS.max_max_epoch):
print('\nepoch: ', epoch)
logger.info('epoch:{}'.format(epoch))
global_step = sess.run(model.global_step)
print('Global step %d, lr=%g' % (global_step, sess.run(learning_rate)))
if epoch == FLAGS.max_epoch:
train_op = train_op1
train_fetches = [merged, model.loss, train_op, update_op]
valid_fetches = [merged, model.loss]
train_epoch(data_train_path, sess, model, train_fetches,
valid_fetches, train_writer, test_writer, logger)
# 最后再做一次验证
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('END:Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g' % (
sess.run(model.global_step), f1_micro, f1_macro, score12))
logger.info('END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'.
format(sess.run(model.global_step), f1_micro, f1_macro, score12))
if score12 > last_score12:
saving_path = model.saver.save(sess, model_path, sess.run(model.global_step)+1)
print('saved new model to %s ' % saving_path)
logger.info('saved new model to {}'.format(saving_path))
if __name__ == '__main__':
tf.app.run()
| shelleyHLX/cail | models/Attention_Textcnn/train.py | train.py | py | 9,638 | python | en | code | 77 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.flags",
... |
70986505148 | #!/usr/bin/env python3
"""
Created on Thu Mar 13 9:31:11 2020
@author: Hanrui Wu
"""
import pysam
import sys
import gzip
def read_cell_names1(pathseq_bam_file, write_bac):
seqbam = pysam.AlignmentFile(pathseq_bam_file, "rb",threads=36)
read_name_pathseq = open(write_bac,'w')
total_pathseq_reads=0
total_YP_reads=0
for each_line in seqbam:
total_pathseq_reads+=1
if each_line.has_tag('YP'):
total_YP_reads+=1
#outline = each_line.query_name + '\t' + each_line.get_tag('YP') + '\t' + str(each_line.mapping_quality) + '\n'#update 0520: added mapping quality column
#022422:use AS tag instead of mapping_quality
outline = each_line.query_name + '\t' + each_line.get_tag('YP') + '\t' + str(each_line.get_tag('AS')) + '\n'
read_name_pathseq.write(outline)
print('Total reads in pathseq bam = ',total_pathseq_reads)
print('Total reads in pathseq bam with YP tag = ',total_YP_reads)
return
def read_readnames(readname_file):
set_for_readnames = set()
dict_name = {}
with open (readname_file,'r') as r:
for each_line in r:
#remove \n
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
set_for_readnames.add(each_line_list[0])
dict_name[each_line_list[0]] = {}
dict_name[each_line_list[0]]["pathogen"] = each_line_list[1]
dict_name[each_line_list[0]]["mapping_quality"] = each_line_list[2]
#print('Number of Pathseq Reads (with YP TAG) = ',len(set_for_readnames))
return set_for_readnames, dict_name
def read_pathseq_report_and_create_dict(pathseq_report_csv):
pathseq_report = open(pathseq_report_csv,'r')
#three dicts: 1. cell-> cell_UMI 2. UMI-> id_sting 3. id -> genus
#id -> genus:
dict_for_genus = {}
set_for_genera = set()
#print('lens of pathseq report: ',len(pathseq_report))
for each_line in pathseq_report:
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
level = each_line_list[2]
tax = each_line_list[3]
if level == 'genus':
set_for_genera.add(tax)
if '|' in each_line_list[1]:
name_string_list = each_line_list[1].split('|')
for n in range(len(name_string_list)):
pointer = -n-1
if not '_' in name_string_list[pointer]:
name = name_string_list[pointer]
break
if 'unclassified' in name_string_list[pointer]:
name = name_string_list[pointer]
break
id = each_line_list[0]
dict_for_genus[id] = name
print ("len(dict_for_genus) = ",len(dict_for_genus))
# 070421:added a veriation function to make sure only annotate genera
#exclude_set = set()
#for each_key in dict_for_genus:
# if not dict_for_genus[each_key] in set_for_genera:
# exclude_set.add(each_key)
#for each_key in list(dict_for_genus):
# if each_key in exclude_set:
# del dict_for_genus[each_key]
# #print("deleting: ", each_key)
#print ("len(dict_for_genus) 2 = ",len(dict_for_genus))
return dict_for_genus
def read_cell_names2(set_of_readnames, dict_name, dict_for_genus,original_bam_file,unmap_cbub_bam_file,unmap_cbub_fasta_file, out_cell_list,out_readname_cell_path):
seqbam = pysam.AlignmentFile(original_bam_file, "rb",threads=36)
readname_cell_path = open(out_readname_cell_path,'w')
# Also output fasta and bam for pathseq/kk2
unmap_cbub_fasta = open(unmap_cbub_fasta_file,'w')
unmap_cbub_bam = pysam.AlignmentFile(unmap_cbub_bam_file, "wb", seqbam)
set_for_infect_cells=set()
total_cellranger_bam_reads = 0
total_cellranger_reads_UB_CB_tags = 0
total_cellranger_reads_UB_CB_unmap = 0
total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads = 0
total_potential_UMI_including_ambigious_reads = set()
#added 102621: output a bam file with only UBCB unmap reads
for each_line in seqbam:
total_cellranger_bam_reads+=1
if each_line.has_tag('CB') and each_line.has_tag('UB'):
total_cellranger_reads_UB_CB_tags+=1
if each_line.has_tag('CB') and each_line.has_tag('UB') and each_line.is_unmapped:#updated 0520: only extract unmapped reads from cellranger
total_cellranger_reads_UB_CB_unmap+=1
# added 102721: output a fasta file for kraken
query_name_in_cellranger_bam = each_line.query_name
seq_in_cellranger_bam = each_line.query_sequence
unmap_cbub_fasta.write('>')
unmap_cbub_fasta.write(query_name_in_cellranger_bam)
unmap_cbub_fasta.write('\n')
unmap_cbub_fasta.write(seq_in_cellranger_bam)
unmap_cbub_fasta.write('\n')
unmap_cbub_bam.write(each_line)
if each_line.query_name in set_of_readnames:
set_for_infect_cells.add(each_line.get_tag('CB'))
readname = each_line.query_name
cellname = each_line.get_tag('CB')
umi = each_line.get_tag('UB')
path = dict_name[readname]["pathogen"]
#print(readname)
#print(path)
#translate id to genus
id_string_list = path.split(',')
genus_list = []
for each_id in id_string_list:
#070421: add function to ignore unfound genus
if each_id in dict_for_genus:
genus = dict_for_genus[each_id]
genus_list.append(genus)
else:
print(each_id," not found!")
genus_list = list(set(genus_list))
genus_list.sort()
genus_list_string = ','.join(genus_list)
#barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_list
mapping_quality = dict_name[readname]["mapping_quality"]
outline = readname+'\t'+cellname+'\t'+umi+'\t'+path+'\t'+mapping_quality+'\t'+genus_list_string+'\n'
readname_cell_path.write(outline)
total_potential_UMI_including_ambigious_reads.add(umi)
total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads+=1
print('total cellranger bam reads = ',total_cellranger_bam_reads)
print('total cellranger bam reads with UB CB tags = ',total_cellranger_reads_UB_CB_tags)
print('total UNMAPPED cellranger bam reads with UB CB tags = ',total_cellranger_reads_UB_CB_unmap)
print('total cellranger reads with UB_CB_unmap Aligned to Pathseq reads with YP tags = ',total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads)
print('total potential UMI including ambigious reads = ',len(total_potential_UMI_including_ambigious_reads))
cell_list = open(out_cell_list,'w')
for each_cell in set_for_infect_cells:
cell_list.write(each_cell)
cell_list.write('\n')
return
def generate_barcode_UMI_dict(out_readname_cell_path):#,pathseq_report_csv,out_genus_file,sample_ident):
cell_path_file = open(out_readname_cell_path,'r')
# pathseq_report = open(pathseq_report_csv,'r')
#genus_file = open(out_genus_file,'w')
barcode_UMI_dict = {}
for each_line in cell_path_file:
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
read_name = each_line_list[0]
cell_barcode = each_line_list[1]
UMI = each_line_list[2]
id_string = each_line_list[3]
id_string_list = id_string.split(',')
barcode_UMI = cell_barcode+'+'+UMI
mapping_quality = each_line_list[4]
genus_string = each_line_list[5]
#then, pick the read with highest mapping quality for each UMI, it's a different strategy then previous idea
if not barcode_UMI in barcode_UMI_dict:
#if not int(mapping_quality) == 0:#052721 tried to test 0 mapping quality
barcode_UMI_dict[barcode_UMI]={}
barcode_UMI_dict[barcode_UMI]["id_string"] = id_string_list
barcode_UMI_dict[barcode_UMI]["mapping_quality"] = int(mapping_quality)
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_string
'''
#also add a translated genus name column (052221)
genus_list = []
for each_id in id_string_list:
genus = dict_for_genus[each_id]
genus_list.append(genus)
genus_list = list(set(genus_list))
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_list
'''
elif int(mapping_quality) > barcode_UMI_dict[barcode_UMI]["mapping_quality"]:
barcode_UMI_dict[barcode_UMI]["id_string"] = id_string_list
barcode_UMI_dict[barcode_UMI]["mapping_quality"] = int(mapping_quality)
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_string
print('Total UMI in barcode_UMI_dict = ',len(barcode_UMI_dict))
return barcode_UMI_dict #fast, no need to generate another temp file
def output_cells_genus_list(barcode_UMI_dict,dict_for_genus):
#genus_file = open(out_genus_file,'w')
#three dicts: 1. cell-> cell_UMI 2. UMI-> id_sting 3. id -> genus
cells_dict = {}
for barcode_UMI in barcode_UMI_dict:
cell = barcode_UMI.split('+')[0]
if not cell in cells_dict:
cells_dict[cell]=[]
cells_dict[cell].append(barcode_UMI)
else:
cells_dict[cell].append(barcode_UMI)
#UMI_id_dict is to store UMI<- ids string
UMI_id_dict = {}
for barcode_UMI in barcode_UMI_dict:
#0523:
if not ',' in barcode_UMI_dict[barcode_UMI]["genus_string"]:
UMI_id_dict[barcode_UMI] = barcode_UMI_dict[barcode_UMI]["id_string"]
#then update UMI_id_dict with genus name
unambigious_UMI = {}
for barcode_UMI in UMI_id_dict:
#id_string = UMI_id_dict[barcode_UMI]
id_list = UMI_id_dict[barcode_UMI]
genus_list = []
for each_id in id_list:
#070421: add function to ignore unfound genus!!
if each_id in dict_for_genus:
genus = dict_for_genus[each_id]
genus_list.append(genus)
genus_list = list(set(genus_list))
if len(genus_list) == 1:#only keep unambigious UMI
unambigious_UMI[barcode_UMI] = genus_list[0]
#next, construct cell_metadata using unambigious_UMI dict,also count the number of UMI in cells
print('Total unambigious UMI = ',len(unambigious_UMI))
cell_metadata_dict = {}
for barcode_UMI in unambigious_UMI:
barcode = barcode_UMI.split('+')[0]
UMI = barcode_UMI.split('+')[1]
genus = unambigious_UMI[barcode_UMI]
if not barcode in cell_metadata_dict:
cell_metadata_dict[barcode] = {} #0527: changed the structure of cell_metadata_dict#0531: update the structure again
cell_metadata_dict[barcode]['genus'] = []
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['barcode_UMI']={}
cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI] = genus
cell_metadata_dict[barcode]['pathogen_count']={}
#cell_metadata_dict[barcode]['pathogen_count'][genus] = 1
else:
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI] = genus
if not genus in cell_metadata_dict[barcode]['pathogen_count']:
cell_metadata_dict[barcode]['pathogen_count'][genus] = 1
else:
cell_metadata_dict[barcode]['pathogen_count'][genus] += 1
#updated 0531 to count pathogen UMI for each cell
'''
if not barcode in cell_metadata_dict:
cell_metadata_dict[barcode] = {}
cell_metadata_dict[barcode]['genus'] = []
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['UMI_count'] = 1
else:
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['UMI_count'] += 1
'''
#then create output
# 052621: put the output part into an individual function
# cell_metadata_dict = update_cell_metadata_list(unambigious_UMI,cell_metadata_dict,threshold_for_min_cell, threshold_for_min_umi)#comment for MULTI
return cell_metadata_dict
def output_cell_metadata(cell_metadata_dict,out_genus_file,sample_ident,barcode_whitelist_file):
#Strategy 0601:
#1. if periority, then -> periority
#2. elif single highest number -> highest genus
#3. else (multiple highest) -> "Multi"
print('total pathogen-associated gems = ', len(cell_metadata_dict))
white_list_set = set()
white_list_dict = {}
white_list = gzip.open(barcode_whitelist_file, 'rt')
for each_line in white_list:
each_line = each_line.rstrip('\n')
white_list_set.add(each_line)
#print("total number of cells = ", len(white_list_set))
for barcode in cell_metadata_dict:
if barcode in white_list_set:
white_list_dict[barcode]= cell_metadata_dict[barcode]
cell_metadata_dict = white_list_dict
print("total filtered pathogen-associated cells = ", len(cell_metadata_dict))
#dict_updated with white list
genus_file = open(out_genus_file,'w')
header = 'cell_name,pathogen,UMI_count,pathogen_count\n'
genus_file.write(header)
for barcode in cell_metadata_dict:
if not sample_ident == '':
cell_name = sample_ident+'_'+barcode
else:
cell_name = barcode
genus_list = []
for barcode_UMI in cell_metadata_dict[barcode]['barcode_UMI']:
genus_list.append(cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI])
sorted_genus_list = list(set(genus_list))
sorted_genus_list.sort()
genus = '+'.join(sorted_genus_list)
UMI_count = len(cell_metadata_dict[barcode]['barcode_UMI'])
#then we need a new item for pathogen count
pathogen_count_list = []
for each_pathogen in cell_metadata_dict[barcode]['pathogen_count']:
pathogen_count=each_pathogen
pathogen_count+=':'
pathogen_count+=str(cell_metadata_dict[barcode]['pathogen_count'][each_pathogen])
pathogen_count_list.append(pathogen_count)
pathogen_count_list.sort()
pathogen_count_str = ';'.join(pathogen_count_list)
#insert new rule to RE-ASSIGN genus and UMI-count
Periority_pathogen = 'Fusobacterium'
pathogen_count_mini_dict = cell_metadata_dict[barcode]['pathogen_count']
temp_max_list = []
UMI_count_sum = 0
#max_UMI = 0
#for each_pathogen in pathogen_count_mini_dict:
max_count = max(pathogen_count_mini_dict.values())
for key,value in pathogen_count_mini_dict.items():
if value == max_count:
temp_max_list.append(key)
max_UMI = value
UMI_count_sum += value
UMI_count = UMI_count_sum
if len(set(temp_max_list)) > 1:
genus = 'MULTI'
UMI_count = UMI_count_sum
else:
genus = temp_max_list[0]
UMI_count = max_UMI
#muted 061821
#if Periority_pathogen in pathogen_count_mini_dict:
# genus = Periority_pathogen
# UMI_count = pathogen_count_mini_dict[Periority_pathogen]
#End of new rule
'''
# 060121 new plotting rule:
plotting_list = [
"Treponema",
"Tannerella",
"Porphyromonas",
"Capnocytophaga",
"Campylobacter",
"Capnocytophaga+Treponema",
"Mycoplasma",
"Fusobacterium",
"Solobacterium",
"Leptotrichia",
"Gelidibacter"
]
if genus in plotting_list:
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
else:
genus = "Other_bacteria"
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
'''
#1. cell_name 2. genus 3. UMI_count 4. pathogen_count_str
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
if UMI_count >= 1:
genus_file.write(output_line)
return
def UMI_table_output(cell_metadata_dict,barcode_whitelist_file,sample_ident,output_UMI_table_csv,output_UMI_validate_table_csv):
#0719: added white list
white_list_set = set()
white_list_dict = {}
white_list = gzip.open(barcode_whitelist_file, 'rt')
for each_line in white_list:
each_line = each_line.rstrip('\n')
white_list_set.add(each_line)
print("total number of cells = ", len(white_list_set))
for barcode in cell_metadata_dict:
if barcode in white_list_set:
white_list_dict[barcode]= cell_metadata_dict[barcode]
cell_metadata_dict = white_list_dict
#print(white_list_dict)
# added 101521: print a UMI table for cross validation (cell+UMI and pathogen)
output_UMI_validate_table = open(output_UMI_validate_table_csv,'w')
for each_cell in cell_metadata_dict:
for each_UMI in cell_metadata_dict[each_cell]['barcode_UMI']:
UMI = each_UMI
pathogen = cell_metadata_dict[each_cell]['barcode_UMI'][UMI]
output_UMI_validate_table.write(UMI+','+pathogen+'\n')
####################
output_UMI_table = open(output_UMI_table_csv,'w')
#added0714 to output everything for metadata
#cell_metadata_dict[barcode]['pathogen_count'][each_pathogen]
#first get a complete list of all genera in this sample:
genera_list_set = set()
for barcode in cell_metadata_dict:
for pathogen in cell_metadata_dict[barcode]['pathogen_count']:
#cell_metadata_dict[barcode]['pathogen_count'][pathogen]
genera_list_set.add(pathogen)
genera_list = sorted(list(genera_list_set))
header = ['barcode']+genera_list
header_out = ','.join(header)
output_UMI_table.write(header_out)
output_UMI_table.write('\n')
#then start each_line
for barcode in cell_metadata_dict:
if not sample_ident == '':
cell_name = sample_ident+'_'+barcode
else:
cell_name = barcode
genera_count_list = []
for each_genus in genera_list:
if each_genus in cell_metadata_dict[barcode]['pathogen_count']:
genus_count = cell_metadata_dict[barcode]['pathogen_count'][each_genus]
else:
genus_count = 0
genera_count_list.append(str(genus_count))
output_line = [cell_name]+genera_count_list
output_line_out = ','.join(output_line)
output_UMI_table.write(output_line_out)
output_UMI_table.write('\n')
return
if __name__ == "__main__":
cellranger_bam_file,sample_ident,barcode_whitelist_file,pathseq_bam_file,pathseq_report_csv,read_name_pathseq,unmap_cbub_bam_file,unmap_cbub_fasta_file,out_cell_list,out_readname_cell_path,out_genus_file,output_UMI_table_csv,output_UMI_validate_table_csv=sys.argv[1:]
dict_for_genus = read_pathseq_report_and_create_dict(pathseq_report_csv)
step1 = read_cell_names1(pathseq_bam_file, read_name_pathseq)
step2 = read_readnames(read_name_pathseq)
step3 = read_cell_names2(step2[0], step2[1], dict_for_genus,cellranger_bam_file,unmap_cbub_bam_file,unmap_cbub_fasta_file, out_cell_list,out_readname_cell_path)
step4 = generate_barcode_UMI_dict(out_readname_cell_path)
step5 = output_cells_genus_list(step4,dict_for_genus)
output_cell_metadata(step5,out_genus_file,sample_ident,barcode_whitelist_file)
cell_metadata_dict = step5
UMI_table_output(cell_metadata_dict,barcode_whitelist_file,sample_ident,output_UMI_table_csv,output_UMI_validate_table_csv)
#0714:added output_UMI_table_csv
# cellranger_bam_file,
# sample_ident,
# barcode_whitelist_file,
# pathseq_bam_file,
# pathseq_report_csv,
# read_name_pathseq,
# unmap_cbub_bam_file,
# unmap_cbub_fasta_file,
# out_cell_list,
# out_readname_cell_path,
# out_genus_file,
# output_UMI_table_csv,
# output_UMI_validate_table_csv=sys.argv[1:]
| FredHutch/invadeseq | bin/UMI_matrix.py | UMI_matrix.py | py | 20,709 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pysam.AlignmentFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pysam.AlignmentFile",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pysam.AlignmentFile",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "gzip.open... |
20946824389 | import datetime
from pysolar import solar
# Calculate the altitude and azimuth of the sun given the location and the time
def sun_pos(payload):
# Input variables
lat = payload["lat"] # Lattitude (deg)
lon = payload["lon"] # Longitude (deg)
epoch = payload["epoch"] # time (Linux epoch in seconds)
# Calculate UTC time
date = datetime.datetime.fromtimestamp(epoch, tz=datetime.timezone.utc)
# Calculate the azimuth and altitude of the sun using [Pysolar](https://pysolar.org/)
altitude_sun = solar.get_altitude(lat, lon, date) # Sun's altitude (deg)
azimuth_sun = solar.get_azimuth(lat, lon, date) # Sun's azimuth (deg)
return {"altitude": altitude_sun, "azimuth": azimuth_sun}
| bsamadi/metadata-processor | app/sun_pos.py | sun_pos.py | py | 727 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 13,
"usage_type": "attribute"
},
{
"api... |
17286729620 | import cv2
import imutils
import numpy as np
cv2.namedWindow("MyImage")
img = cv2.imread("img.jpg")
# translated = imutils.translate(img, 25, -75)
# rotated = imutils.rotate(img, 45)
img = imutils.resize(img, width=600)
# url_images = imutils.url_to_image(
# "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_92x30dp.png")
kernel = np.ones((4, 4))
# conv = cv2.filter2D(img, cv2.CV_16U, kernel)
blur = cv2.GaussianBlur(img, (5, 5), 1)
cv2.imshow("MyImage", blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
| osoulim/Computer-Vision | Python/Week3/pre_proccess.py | pre_proccess.py | py | 532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.namedWindow",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "imutils.resize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number... |
26039871256 | from __future__ import annotations
import dataclasses
import hashlib
import os.path
from collections import deque
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable, Mapping
from pants.backend.go.util_rules import cgo, coverage
from pants.backend.go.util_rules.assembly import (
AssembleGoAssemblyFilesRequest,
FallibleAssembleGoAssemblyFilesResult,
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.cgo import CGoCompileRequest, CGoCompileResult, CGoCompilerFlags
from pants.backend.go.util_rules.coverage import (
ApplyCodeCoverageRequest,
ApplyCodeCoverageResult,
BuiltGoPackageCodeCoverageMetadata,
FileCodeCoverageMetadata,
)
from pants.backend.go.util_rules.embedcfg import EmbedConfig
from pants.backend.go.util_rules.goroot import GoRoot
from pants.backend.go.util_rules.import_config import ImportConfig, ImportConfigRequest
from pants.backend.go.util_rules.sdk import GoSdkProcess, GoSdkToolIDRequest, GoSdkToolIDResult
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestEntries,
DigestSubset,
FileContent,
FileEntry,
MergeDigests,
PathGlobs,
)
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.resources import read_resource
from pants.util.strutil import path_safe
class BuildGoPackageRequest(EngineAwareParameter):
def __init__(
self,
*,
import_path: str,
pkg_name: str,
digest: Digest,
dir_path: str,
build_opts: GoBuildOptions,
go_files: tuple[str, ...],
s_files: tuple[str, ...],
direct_dependencies: tuple[BuildGoPackageRequest, ...],
import_map: Mapping[str, str] | None = None,
minimum_go_version: str | None,
for_tests: bool = False,
embed_config: EmbedConfig | None = None,
with_coverage: bool = False,
cgo_files: tuple[str, ...] = (),
cgo_flags: CGoCompilerFlags | None = None,
c_files: tuple[str, ...] = (),
header_files: tuple[str, ...] = (),
cxx_files: tuple[str, ...] = (),
objc_files: tuple[str, ...] = (),
fortran_files: tuple[str, ...] = (),
prebuilt_object_files: tuple[str, ...] = (),
pkg_specific_compiler_flags: tuple[str, ...] = (),
pkg_specific_assembler_flags: tuple[str, ...] = (),
is_stdlib: bool = False,
) -> None:
"""Build a package and its dependencies as `__pkg__.a` files.
Instances of this class form a structure-shared DAG, and so a hashcode is pre-computed for
the recursive portion.
"""
if with_coverage and build_opts.coverage_config is None:
raise ValueError(
"BuildGoPackageRequest.with_coverage is set but BuildGoPackageRequest.build_opts.coverage_config is None!"
)
self.import_path = import_path
self.pkg_name = pkg_name
self.digest = digest
self.dir_path = dir_path
self.build_opts = build_opts
self.go_files = go_files
self.s_files = s_files
self.direct_dependencies = direct_dependencies
self.import_map = FrozenDict(import_map or {})
self.minimum_go_version = minimum_go_version
self.for_tests = for_tests
self.embed_config = embed_config
self.with_coverage = with_coverage
self.cgo_files = cgo_files
self.cgo_flags = cgo_flags
self.c_files = c_files
self.header_files = header_files
self.cxx_files = cxx_files
self.objc_files = objc_files
self.fortran_files = fortran_files
self.prebuilt_object_files = prebuilt_object_files
self.pkg_specific_compiler_flags = pkg_specific_compiler_flags
self.pkg_specific_assembler_flags = pkg_specific_assembler_flags
self.is_stdlib = is_stdlib
self._hashcode = hash(
(
self.import_path,
self.pkg_name,
self.digest,
self.dir_path,
self.build_opts,
self.go_files,
self.s_files,
self.direct_dependencies,
self.import_map,
self.minimum_go_version,
self.for_tests,
self.embed_config,
self.with_coverage,
self.cgo_files,
self.cgo_flags,
self.c_files,
self.header_files,
self.cxx_files,
self.objc_files,
self.fortran_files,
self.prebuilt_object_files,
self.pkg_specific_compiler_flags,
self.pkg_specific_assembler_flags,
self.is_stdlib,
)
)
def __repr__(self) -> str:
# NB: We must override the default `__repr__` so that `direct_dependencies` does not
# traverse into transitive dependencies, which was pathologically slow.
return (
f"{self.__class__}("
f"import_path={repr(self.import_path)}, "
f"pkg_name={self.pkg_name}, "
f"digest={self.digest}, "
f"dir_path={self.dir_path}, "
f"build_opts={self.build_opts}, "
f"go_files={self.go_files}, "
f"s_files={self.s_files}, "
f"direct_dependencies={[dep.import_path for dep in self.direct_dependencies]}, "
f"import_map={self.import_map}, "
f"minimum_go_version={self.minimum_go_version}, "
f"for_tests={self.for_tests}, "
f"embed_config={self.embed_config}, "
f"with_coverage={self.with_coverage}, "
f"cgo_files={self.cgo_files}, "
f"cgo_flags={self.cgo_flags}, "
f"c_files={self.c_files}, "
f"header_files={self.header_files}, "
f"cxx_files={self.cxx_files}, "
f"objc_files={self.objc_files}, "
f"fortran_files={self.fortran_files}, "
f"prebuilt_object_files={self.prebuilt_object_files}, "
f"pkg_specific_compiler_flags={self.pkg_specific_compiler_flags}, "
f"pkg_specific_assembler_flags={self.pkg_specific_assembler_flags}, "
f"is_stdlib={self.is_stdlib}"
")"
)
def __hash__(self) -> int:
return self._hashcode
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self._hashcode == other._hashcode
and self.import_path == other.import_path
and self.pkg_name == other.pkg_name
and self.digest == other.digest
and self.dir_path == other.dir_path
and self.build_opts == other.build_opts
and self.import_map == other.import_map
and self.go_files == other.go_files
and self.s_files == other.s_files
and self.minimum_go_version == other.minimum_go_version
and self.for_tests == other.for_tests
and self.embed_config == other.embed_config
and self.with_coverage == other.with_coverage
and self.cgo_files == other.cgo_files
and self.cgo_flags == other.cgo_flags
and self.c_files == other.c_files
and self.header_files == other.header_files
and self.cxx_files == other.cxx_files
and self.objc_files == other.objc_files
and self.fortran_files == other.fortran_files
and self.prebuilt_object_files == other.prebuilt_object_files
and self.pkg_specific_compiler_flags == other.pkg_specific_compiler_flags
and self.pkg_specific_assembler_flags == other.pkg_specific_assembler_flags
and self.is_stdlib == other.is_stdlib
# TODO: Use a recursive memoized __eq__ if this ever shows up in profiles.
and self.direct_dependencies == other.direct_dependencies
)
def debug_hint(self) -> str | None:
return self.import_path
@dataclass(frozen=True)
class FallibleBuildGoPackageRequest(EngineAwareParameter, EngineAwareReturnType):
"""Request to build a package, but fallible if determining the request metadata failed.
When creating "synthetic" packages, use `GoPackageRequest` directly. This type is only intended
for determining the package metadata of user code, which may fail to be analyzed.
"""
request: BuildGoPackageRequest | None
import_path: str
exit_code: int = 0
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class FallibleBuiltGoPackage(EngineAwareReturnType):
"""Fallible version of `BuiltGoPackage` with error details."""
output: BuiltGoPackage | None
import_path: str
exit_code: int = 0
stdout: str | None = None
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stdout:
message += f"\n{self.stdout}"
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class BuiltGoPackage:
"""A package and its dependencies compiled as `__pkg__.a` files.
The packages are arranged into `__pkgs__/{path_safe(import_path)}/__pkg__.a`.
"""
digest: Digest
import_paths_to_pkg_a_files: FrozenDict[str, str]
coverage_metadata: BuiltGoPackageCodeCoverageMetadata | None = None
@dataclass(frozen=True)
class RenderEmbedConfigRequest:
embed_config: EmbedConfig | None
@dataclass(frozen=True)
class RenderedEmbedConfig:
digest: Digest
PATH = "./embedcfg"
@dataclass(frozen=True)
class GoCompileActionIdRequest:
build_request: BuildGoPackageRequest
@dataclass(frozen=True)
class GoCompileActionIdResult:
action_id: str
# TODO(#16831): Merge this rule helper and the AssemblyPostCompilationRequest.
async def _add_objects_to_archive(
input_digest: Digest,
pkg_archive_path: str,
obj_file_paths: Iterable[str],
) -> ProcessResult:
# Use `go tool asm` tool ID since `go tool pack` does not have a version argument.
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
pack_result = await Get(
ProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=(
"tool",
"pack",
"r",
pkg_archive_path,
*obj_file_paths,
),
env={
"__PANTS_GO_ASM_TOOL_ID": asm_tool_id.tool_id,
},
description="Link objects to Go package archive",
output_files=(pkg_archive_path,),
),
)
return pack_result
@dataclass(frozen=True)
class SetupAsmCheckBinary:
digest: Digest
path: str
# Due to the bootstrap problem, the asm check binary cannot use the `LoadedGoBinaryRequest` rules since
# those rules call back into this `build_pkg` package. Instead, just invoke `go build` directly which is fine
# since the asm check binary only uses the standard library.
@rule
async def setup_golang_asm_check_binary() -> SetupAsmCheckBinary:
src_file = "asm_check.go"
content = read_resource("pants.backend.go.go_sources.asm_check", src_file)
if not content:
raise AssertionError(f"Unable to find resource for `{src_file}`.")
sources_digest = await Get(Digest, CreateDigest([FileContent(src_file, content)]))
binary_name = "__go_asm_check__"
compile_result = await Get(
ProcessResult,
GoSdkProcess(
command=("build", "-o", binary_name, src_file),
input_digest=sources_digest,
output_files=(binary_name,),
env={"CGO_ENABLED": "0"},
description="Build Go assembly check binary",
),
)
return SetupAsmCheckBinary(compile_result.output_digest, f"./{binary_name}")
# Check whether the given files looks like they could be Golang-format assembly language files.
@dataclass(frozen=True)
class CheckForGolangAssemblyRequest:
digest: Digest
dir_path: str
s_files: tuple[str, ...]
@dataclass(frozen=True)
class CheckForGolangAssemblyResult:
maybe_golang_assembly: bool
@rule
async def check_for_golang_assembly(
request: CheckForGolangAssemblyRequest,
asm_check_setup: SetupAsmCheckBinary,
) -> CheckForGolangAssemblyResult:
"""Return true if any of the given `s_files` look like it could be a Golang-format assembly
language file.
This is used by the cgo rules as a heuristic to determine if the user is passing Golang assembly
format instead of gcc assembly format.
"""
input_digest = await Get(Digest, MergeDigests([request.digest, asm_check_setup.digest]))
result = await Get(
ProcessResult,
Process(
argv=(
asm_check_setup.path,
*(os.path.join(request.dir_path, s_file) for s_file in request.s_files),
),
input_digest=input_digest,
level=LogLevel.DEBUG,
description="Check whether assembly language sources are in Go format",
),
)
return CheckForGolangAssemblyResult(len(result.stdout) > 0)
# Copy header files to names which use platform independent names. For example, defs_linux_amd64.h
# becomes defs_GOOS_GOARCH.h.
#
# See https://github.com/golang/go/blob/1c05968c9a5d6432fc6f30196528f8f37287dd3d/src/cmd/go/internal/work/exec.go#L867-L892
# for particulars.
async def _maybe_copy_headers_to_platform_independent_names(
input_digest: Digest,
dir_path: str,
header_files: tuple[str, ...],
goroot: GoRoot,
) -> Digest | None:
goos_goarch = f"_{goroot.goos}_{goroot.goarch}"
goos = f"_{goroot.goos}"
goarch = f"_{goroot.goarch}"
digest_entries = await Get(DigestEntries, Digest, input_digest)
digest_entries_by_path: dict[str, FileEntry] = {
entry.path: entry for entry in digest_entries if isinstance(entry, FileEntry)
}
new_digest_entries: list[FileEntry] = []
for header_file in header_files:
header_file_path = PurePath(dir_path, header_file)
entry = digest_entries_by_path.get(str(header_file_path))
if not entry:
continue
stem = header_file_path.stem
new_stem: str | None = None
if stem.endswith(goos_goarch):
new_stem = stem[0 : -len(goos_goarch)] + "_GOOS_GOARCH"
elif stem.endswith(goos):
new_stem = stem[0 : -len(goos)] + "_GOOS"
elif stem.endswith(goarch):
new_stem = stem[0 : -len(goarch)] + "_GOARCH"
if new_stem:
new_header_file_path = PurePath(dir_path, f"{new_stem}{header_file_path.suffix}")
new_digest_entries.append(dataclasses.replace(entry, path=str(new_header_file_path)))
if new_digest_entries:
digest = await Get(Digest, CreateDigest(new_digest_entries))
return digest
else:
return None
# Gather transitive prebuilt object files for Cgo. Traverse the provided dependencies and lifts `.syso`
# object files into a single `Digest`.
async def _gather_transitive_prebuilt_object_files(
build_request: BuildGoPackageRequest,
) -> tuple[Digest, frozenset[str]]:
prebuilt_objects: list[tuple[Digest, list[str]]] = []
queue: deque[BuildGoPackageRequest] = deque([build_request])
while queue:
pkg = queue.popleft()
queue.extend(pkg.direct_dependencies)
if pkg.prebuilt_object_files:
prebuilt_objects.append(
(
pkg.digest,
[
os.path.join(pkg.dir_path, obj_file)
for obj_file in pkg.prebuilt_object_files
],
)
)
object_digest = await Get(Digest, MergeDigests([digest for digest, _ in prebuilt_objects]))
object_files = set()
for _, files in prebuilt_objects:
object_files.update(files)
return object_digest, frozenset(object_files)
# NB: We must have a description for the streaming of this rule to work properly
# (triggered by `FallibleBuiltGoPackage` subclassing `EngineAwareReturnType`).
@rule(desc="Compile with Go", level=LogLevel.DEBUG)
async def build_go_package(
request: BuildGoPackageRequest, go_root: GoRoot
) -> FallibleBuiltGoPackage:
maybe_built_deps = await MultiGet(
Get(FallibleBuiltGoPackage, BuildGoPackageRequest, build_request)
for build_request in request.direct_dependencies
)
import_paths_to_pkg_a_files: dict[str, str] = {}
dep_digests = []
for maybe_dep in maybe_built_deps:
if maybe_dep.output is None:
return dataclasses.replace(
maybe_dep, import_path=request.import_path, dependency_failed=True
)
dep = maybe_dep.output
for dep_import_path, pkg_archive_path in dep.import_paths_to_pkg_a_files.items():
if dep_import_path not in import_paths_to_pkg_a_files:
import_paths_to_pkg_a_files[dep_import_path] = pkg_archive_path
dep_digests.append(dep.digest)
merged_deps_digest, import_config, embedcfg, action_id_result = await MultiGet(
Get(Digest, MergeDigests(dep_digests)),
Get(
ImportConfig,
ImportConfigRequest(
FrozenDict(import_paths_to_pkg_a_files),
build_opts=request.build_opts,
import_map=request.import_map,
),
),
Get(RenderedEmbedConfig, RenderEmbedConfigRequest(request.embed_config)),
Get(GoCompileActionIdResult, GoCompileActionIdRequest(request)),
)
unmerged_input_digests = [
merged_deps_digest,
import_config.digest,
embedcfg.digest,
request.digest,
]
# If coverage is enabled for this package, then replace the Go source files with versions modified to
# contain coverage code.
go_files = request.go_files
cgo_files = request.cgo_files
s_files = list(request.s_files)
go_files_digest = request.digest
cover_file_metadatas: tuple[FileCodeCoverageMetadata, ...] | None = None
if request.with_coverage:
coverage_config = request.build_opts.coverage_config
assert coverage_config is not None, "with_coverage=True but coverage_config is None!"
coverage_result = await Get(
ApplyCodeCoverageResult,
ApplyCodeCoverageRequest(
digest=request.digest,
dir_path=request.dir_path,
go_files=go_files,
cgo_files=cgo_files,
cover_mode=coverage_config.cover_mode,
import_path=request.import_path,
),
)
go_files_digest = coverage_result.digest
unmerged_input_digests.append(go_files_digest)
go_files = coverage_result.go_files
cgo_files = coverage_result.cgo_files
cover_file_metadatas = coverage_result.cover_file_metadatas
# Track loose object files to link into final package archive. These can come from Cgo outputs, regular
# assembly files, or regular C files.
objects: list[tuple[str, Digest]] = []
# Add any prebuilt object files (".syso" extension) to the list of objects to link into the package.
if request.prebuilt_object_files:
objects.extend(
(os.path.join(request.dir_path, prebuilt_object_file), request.digest)
for prebuilt_object_file in request.prebuilt_object_files
)
# Process any Cgo files.
cgo_compile_result: CGoCompileResult | None = None
if cgo_files:
# Check if any assembly files contain gcc assembly, and not Go assembly. Raise an exception if any are
# likely in Go format since in cgo packages, assembly files are passed to gcc and must be in gcc format.
#
# Exception: When building runtime/cgo itself, only send `gcc_*.s` assembly files to GCC as
# runtime/cgo has both types of files.
if request.is_stdlib and request.import_path == "runtime/cgo":
gcc_s_files = []
new_s_files = []
for s_file in s_files:
if s_file.startswith("gcc_"):
gcc_s_files.append(s_file)
else:
new_s_files.append(s_file)
s_files = new_s_files
else:
asm_check_result = await Get(
CheckForGolangAssemblyResult,
CheckForGolangAssemblyRequest(
digest=request.digest,
dir_path=request.dir_path,
s_files=tuple(s_files),
),
)
if asm_check_result.maybe_golang_assembly:
raise ValueError(
f"Package {request.import_path} is a cgo package but contains Go assembly files."
)
gcc_s_files = s_files
s_files = [] # Clear s_files since assembly has already been handled in cgo rules.
# Gather all prebuilt object files transitively and pass them to the Cgo rule for linking into the
# Cgo object output. This is necessary to avoid linking errors.
# See https://github.com/golang/go/blob/6ad27161f8d1b9c5e03fb3415977e1d3c3b11323/src/cmd/go/internal/work/exec.go#L3291-L3311.
transitive_prebuilt_object_files = await _gather_transitive_prebuilt_object_files(request)
assert request.cgo_flags is not None
cgo_compile_result = await Get(
CGoCompileResult,
CGoCompileRequest(
import_path=request.import_path,
pkg_name=request.pkg_name,
digest=go_files_digest,
build_opts=request.build_opts,
dir_path=request.dir_path,
cgo_files=cgo_files,
cgo_flags=request.cgo_flags,
c_files=request.c_files,
s_files=tuple(gcc_s_files),
cxx_files=request.cxx_files,
objc_files=request.objc_files,
fortran_files=request.fortran_files,
is_stdlib=request.is_stdlib,
transitive_prebuilt_object_files=transitive_prebuilt_object_files,
),
)
assert cgo_compile_result is not None
unmerged_input_digests.append(cgo_compile_result.digest)
objects.extend(
[
(obj_file, cgo_compile_result.digest)
for obj_file in cgo_compile_result.output_obj_files
]
)
# Copy header files with platform-specific values in their name to platform independent names.
# For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
copied_headers_digest = await _maybe_copy_headers_to_platform_independent_names(
input_digest=request.digest,
dir_path=request.dir_path,
header_files=request.header_files,
goroot=go_root,
)
if copied_headers_digest:
unmerged_input_digests.append(copied_headers_digest)
# Merge all of the input digests together.
input_digest = await Get(
Digest,
MergeDigests(unmerged_input_digests),
)
# If any assembly files are present, generate a "symabis" file containing API metadata about those files.
# The "symabis" file is passed to the Go compiler when building Go code so that the compiler is aware of
# any API exported by the assembly.
#
# Note: The assembly files cannot be assembled at this point because a similar process happens from Go to
# assembly: The Go compiler generates a `go_asm.h` header file with metadata about the Go code in the package.
symabis_path: str | None = None
extra_assembler_flags = tuple(
*request.build_opts.assembler_flags, *request.pkg_specific_assembler_flags
)
if s_files:
symabis_fallible_result = await Get(
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest(
compilation_input=input_digest,
s_files=tuple(s_files),
import_path=request.import_path,
dir_path=request.dir_path,
extra_assembler_flags=extra_assembler_flags,
),
)
symabis_result = symabis_fallible_result.result
if symabis_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
symabis_fallible_result.exit_code,
stdout=symabis_fallible_result.stdout,
stderr=symabis_fallible_result.stderr,
)
input_digest = await Get(
Digest, MergeDigests([input_digest, symabis_result.symabis_digest])
)
symabis_path = symabis_result.symabis_path
# Build the arguments for compiling the Go coe in this package.
compile_args = [
"tool",
"compile",
"-buildid",
action_id_result.action_id,
"-o",
"__pkg__.a",
"-pack",
"-p",
request.import_path,
"-importcfg",
import_config.CONFIG_PATH,
]
# See https://github.com/golang/go/blob/f229e7031a6efb2f23241b5da000c3b3203081d6/src/cmd/go/internal/work/gc.go#L79-L100
# for where this logic comes from.
go_version = request.minimum_go_version or "1.16"
if go_root.is_compatible_version(go_version):
compile_args.extend(["-lang", f"go{go_version}"])
if request.is_stdlib:
compile_args.append("-std")
compiling_runtime = request.is_stdlib and request.import_path in (
"internal/abi",
"internal/bytealg",
"internal/coverage/rtcov",
"internal/cpu",
"internal/goarch",
"internal/goos",
"runtime",
"runtime/internal/atomic",
"runtime/internal/math",
"runtime/internal/sys",
"runtime/internal/syscall",
)
# From Go sources:
# runtime compiles with a special gc flag to check for
# memory allocations that are invalid in the runtime package,
# and to implement some special compiler pragmas.
#
# See https://github.com/golang/go/blob/245e95dfabd77f337373bf2d6bb47cd353ad8d74/src/cmd/go/internal/work/gc.go#L107-L112
if compiling_runtime:
compile_args.append("-+")
if symabis_path:
compile_args.extend(["-symabis", symabis_path])
# If any assembly files are present, request the compiler write an "assembly header" with API metadata
# about the Go code that can be used by assembly files.
asm_header_path: str | None = None
if s_files:
if os.path.isabs(request.dir_path):
asm_header_path = "go_asm.h"
else:
asm_header_path = os.path.join(request.dir_path, "go_asm.h")
compile_args.extend(["-asmhdr", asm_header_path])
if embedcfg.digest != EMPTY_DIGEST:
compile_args.extend(["-embedcfg", RenderedEmbedConfig.PATH])
if request.build_opts.with_race_detector:
compile_args.append("-race")
if request.build_opts.with_msan:
compile_args.append("-msan")
if request.build_opts.with_asan:
compile_args.append("-asan")
# If there are no loose object files to add to the package archive later or assembly files to assemble,
# then pass -complete flag which tells the compiler that the provided Go files constitute the entire package.
if not objects and not s_files:
# Exceptions: a few standard packages have forward declarations for
# pieces supplied behind-the-scenes by package runtime.
if request.import_path not in (
"bytes",
"internal/poll",
"net",
"os",
"runtime/metrics",
"runtime/pprof",
"runtime/trace",
"sync",
"syscall",
"time",
):
compile_args.append("-complete")
# Add any extra compiler flags after the ones added automatically by this rule.
if request.build_opts.compiler_flags:
compile_args.extend(request.build_opts.compiler_flags)
if request.pkg_specific_compiler_flags:
compile_args.extend(request.pkg_specific_compiler_flags)
# Remove -N if compiling runtime:
# It is not possible to build the runtime with no optimizations,
# because the compiler cannot eliminate enough write barriers.
if compiling_runtime:
compile_args = [arg for arg in compile_args if arg != "-N"]
go_file_paths = (
str(PurePath(request.dir_path, go_file)) if request.dir_path else f"./{go_file}"
for go_file in go_files
)
generated_cgo_file_paths = cgo_compile_result.output_go_files if cgo_compile_result else ()
# Put the source file paths into a file and pass that to `go tool compile` via a config file using the
# `@CONFIG_FILE` syntax. This is necessary to avoid command-line argument limits on macOS. The arguments
# may end up to exceed those limits when compiling standard library packages where we append a very long GOROOT
# path to each file name or in packages with large numbers of files.
go_source_file_paths_config = "\n".join([*go_file_paths, *generated_cgo_file_paths])
go_sources_file_paths_digest = await Get(
Digest, CreateDigest([FileContent("__sources__.txt", go_source_file_paths_config.encode())])
)
input_digest = await Get(Digest, MergeDigests([input_digest, go_sources_file_paths_digest]))
compile_args.append("@__sources__.txt")
compile_result = await Get(
FallibleProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=tuple(compile_args),
description=f"Compile Go package: {request.import_path}",
output_files=("__pkg__.a", *([asm_header_path] if asm_header_path else [])),
env={"__PANTS_GO_COMPILE_ACTION_ID": action_id_result.action_id},
),
)
if compile_result.exit_code != 0:
return FallibleBuiltGoPackage(
None,
request.import_path,
compile_result.exit_code,
stdout=compile_result.stdout.decode("utf-8"),
stderr=compile_result.stderr.decode("utf-8"),
)
compilation_digest = compile_result.output_digest
# TODO: Compile any C files if this package does not use Cgo.
# If any assembly files are present, then assemble them. The `compilation_digest` will contain the
# assembly header `go_asm.h` in the object directory.
if s_files:
# Extract the `go_asm.h` header from the compilation output and merge into the original compilation input.
assert asm_header_path is not None
asm_header_digest = await Get(
Digest,
DigestSubset(
compilation_digest,
PathGlobs(
[asm_header_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the `build_go_package` rule",
),
),
)
assembly_input_digest = await Get(Digest, MergeDigests([input_digest, asm_header_digest]))
assembly_fallible_result = await Get(
FallibleAssembleGoAssemblyFilesResult,
AssembleGoAssemblyFilesRequest(
input_digest=assembly_input_digest,
s_files=tuple(sorted(s_files)),
dir_path=request.dir_path,
import_path=request.import_path,
extra_assembler_flags=extra_assembler_flags,
),
)
assembly_result = assembly_fallible_result.result
if assembly_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
assembly_fallible_result.exit_code,
stdout=assembly_fallible_result.stdout,
stderr=assembly_fallible_result.stderr,
)
objects.extend(assembly_result.assembly_outputs)
# If there are any loose object files, link them into the package archive.
if objects:
assembly_link_input_digest = await Get(
Digest,
MergeDigests(
[
compilation_digest,
*(digest for obj_file, digest in objects),
]
),
)
assembly_link_result = await _add_objects_to_archive(
input_digest=assembly_link_input_digest,
pkg_archive_path="__pkg__.a",
obj_file_paths=sorted(obj_file for obj_file, digest in objects),
)
compilation_digest = assembly_link_result.output_digest
path_prefix = os.path.join("__pkgs__", path_safe(request.import_path))
import_paths_to_pkg_a_files[request.import_path] = os.path.join(path_prefix, "__pkg__.a")
output_digest = await Get(Digest, AddPrefix(compilation_digest, path_prefix))
merged_result_digest = await Get(Digest, MergeDigests([*dep_digests, output_digest]))
# Include the modules sources in the output `Digest` alongside the package archive if the Cgo rules
# detected a potential attempt to link against a static archive (or other reference to `${SRCDIR}` in
# options) which necessitates the linker needing access to module sources.
if cgo_compile_result and cgo_compile_result.include_module_sources_with_output:
merged_result_digest = await Get(
Digest, MergeDigests([merged_result_digest, request.digest])
)
coverage_metadata = (
BuiltGoPackageCodeCoverageMetadata(
import_path=request.import_path,
cover_file_metadatas=cover_file_metadatas,
sources_digest=request.digest,
sources_dir_path=request.dir_path,
)
if cover_file_metadatas
else None
)
output = BuiltGoPackage(
digest=merged_result_digest,
import_paths_to_pkg_a_files=FrozenDict(import_paths_to_pkg_a_files),
coverage_metadata=coverage_metadata,
)
return FallibleBuiltGoPackage(output, request.import_path)
@rule
def required_built_go_package(fallible_result: FallibleBuiltGoPackage) -> BuiltGoPackage:
if fallible_result.output is not None:
return fallible_result.output
raise Exception(
f"Failed to compile {fallible_result.import_path}:\n"
f"{fallible_result.stdout}\n{fallible_result.stderr}"
)
@rule
async def render_embed_config(request: RenderEmbedConfigRequest) -> RenderedEmbedConfig:
digest = EMPTY_DIGEST
if request.embed_config:
digest = await Get(
Digest,
CreateDigest(
[FileContent(RenderedEmbedConfig.PATH, request.embed_config.to_embedcfg())]
),
)
return RenderedEmbedConfig(digest)
# Compute a cache key for the compile action. This computation is intended to capture similar values to the
# action ID computed by the `go` tool for its own cache.
# For details, see https://github.com/golang/go/blob/21998413ad82655fef1f31316db31e23e0684b21/src/cmd/go/internal/work/exec.go#L216-L403
@rule
async def compute_compile_action_id(
request: GoCompileActionIdRequest, goroot: GoRoot
) -> GoCompileActionIdResult:
bq = request.build_request
h = hashlib.sha256()
# All Go action IDs have the full version (as returned by `runtime.Version()` in the key.
# See https://github.com/golang/go/blob/master/src/cmd/go/internal/cache/hash.go#L32-L46
h.update(goroot.full_version.encode())
h.update("compile\n".encode())
if bq.minimum_go_version:
h.update(f"go {bq.minimum_go_version}\n".encode())
h.update(f"goos {goroot.goos} goarch {goroot.goarch}\n".encode())
h.update(f"import {bq.import_path}\n".encode())
# TODO: Consider what to do with this information from Go tool:
# fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
# TODO: Inject cgo-related values here.
# TODO: Inject cover mode values here.
# TODO: Inject fuzz instrumentation values here.
compile_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("compile"))
h.update(f"compile {compile_tool_id.tool_id}\n".encode())
# TODO: Add compiler flags as per `go`'s algorithm. Need to figure out
if bq.s_files:
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
h.update(f"asm {asm_tool_id.tool_id}\n".encode())
# TODO: Add asm flags as per `go`'s algorithm.
# TODO: Add micro-architecture into cache key (e.g., GOAMD64 setting).
if "GOEXPERIMENT" in goroot._raw_metadata:
h.update(f"GOEXPERIMENT={goroot._raw_metadata['GOEXPERIMENT']}".encode())
# TODO: Maybe handle go "magic" env vars: "GOCLOBBERDEADHASH", "GOSSAFUNC", "GOSSADIR", "GOSSAHASH" ?
# TODO: Handle GSHS_LOGFILE compiler debug option by breaking cache?
# Note: Input files are already part of cache key. Thus, this algorithm omits incorporating their
# content hashes into the action ID.
return GoCompileActionIdResult(h.hexdigest())
def rules():
return (
*collect_rules(),
*cgo.rules(),
*coverage.rules(),
)
| pantsbuild/pants | src/python/pants/backend/go/util_rules/build_pkg.py | build_pkg.py | py | 38,872 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.engine.engine_aware.EngineAwareParameter",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pants.backend.go.util_rules.build_opts.GoBuildOptions",
"line_number": ... |
74884399227 | # -*- coding: utf-8 -*-
import re
import sqlite3
from collections import defaultdict
import requests
import gnupg
class OTCDB(object):
gpg_file = 'GPG.db'
rating_file = 'RatingSystem.db'
def __init__(self, path):
self.path = path
self.trusted = {}
def open_db(self):
gpg_path = '{0}/{1}'.format(self.path, self.gpg_file)
self.gdb = sqlite3.connect(gpg_path, check_same_thread=False)
self.gdb.row_factory = sqlite3.Row
rating_path = '{0}/{1}'.format(self.path, self.rating_file)
self.rdb = sqlite3.connect(rating_path, check_same_thread=False)
self.rdb.row_factory = sqlite3.Row
def close_db(self):
self.gdb.close()
self.rdb.close()
def update_db(self):
g = self.update_rating_db()
r = self.update_gpg_db()
def update_rating_db(self):
filename = '{0}/{1}'.format(self.path, self.rating_file)
url = 'http://bitcoin-otc.com/otc/RatingSystem.db'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r.iter_content(10*1024):
f.write(chunk)
return True
else:
return False
def update_gpg_db(self):
filename = '{0}/{1}'.format(self.path, self.gpg_file)
url = 'http://bitcoin-otc.com/otc/GPG.db'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r.iter_content(10*1024):
f.write(chunk)
return True
else:
return False
def update_trust(self):
self.update_db()
self.open_db()
new_trust = self.assbot_trust()
self.close_db()
return new_trust
def trust_diff(self, old, new):
new_keys = set(new.keys())
old_keys = set(old.keys())
added_keys = list(new_keys - old_keys)
removed_keys = list(old_keys - new_keys)
# add metadata in removed list before we throw it away
removed_keys = [(rk, old[rk][0]) for rk in removed_keys]
return (added_keys, removed_keys)
def assbot_trust(self):
assbot_ratings = defaultdict(int)
trusted = {}
sel = """SELECT nick, rated_user_id, rater_user_id, rating FROM ratings
JOIN users ON ratings.rated_user_id = users.id
WHERE rater_user_id = 5506 OR rater_user_id IN (SELECT rated_user_id FROM ratings WHERE rater_user_id = 5506)
"""
cursor = self.rdb.cursor()
cursor.execute(sel)
results = cursor.fetchall()
for row in results:
add = 1 if row['rating'] > 0 else -1
assbot_ratings[ row['nick'] ] += add
selkey = 'SELECT fingerprint FROM users WHERE lower(nick) = ? AND fingerprint IS NOT NULL'
gcursor = self.gdb.cursor()
for nick in assbot_ratings:
if assbot_ratings[nick] > 0:
row = gcursor.execute(selkey, (nick.lower(),)).fetchone()
if row:
trusted[ row['fingerprint'] ] = (nick, assbot_ratings[nick])
return trusted
## make gpg pubring
class GPGManager(object):
def __init__(self, gpghome, keyserver=None):
self.gpghome = gpghome
self.keyserver = keyserver if keyserver else 'hkp://pool.sks-keyservers.net'
self.notfound = re.compile('key ([^ ]+) not found on keyserver')
self.gpg = gnupg.GPG(homedir=gpghome)
self.gpg.keyserver = self.keyserver
def recv_keys(self, fingerprints, batch=10):
not_found = []
imported = 0
for chunk in chunks(fingerprints, batch):
r = self.gpg.recv_keys(*chunk)
missing = self.notfound.findall(r.data)
for k in missing:
if k in chunk: not_found.append(k)
imported += r.counts['imported']
return (imported, not_found)
def verify(self, data):
return self.gpg.verify(data)
def delete_keys(self, fingerprints):
return self.gpg.delete_keys(fingerprints)
def chunks(l, n):
""" Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
| extempore/deedbundler | deedbundler/otc.py | otc.py | py | 3,668 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"l... |
3602691667 |
"""
Description:
使用 Bokeh,基于各国家创建一个 CPI 和童工数据的散点图。
拓展:
Bokeh(http://bokeh.pydata.org/)是一个 Python 绘图库,能够用相当简单的命令来绘制更
复杂的图表类型。如果想要创建一个条形图、散点图或时间序列图,尝试Bokeh,看看是
否合适。使用 Bokeh,基于各国家创建一个 CPI 和童工数据的散点图。
"""
from bokeh.plotting import figure, show, output_file
# NOTE: You'll need to have 'africa_cpi_cl' table from Chapter 9 to use this
# code.
def scatter_point(chart, x, y, marker_type): # 定义一个函数,scatter_point,接受一个图表、x 轴和 y 轴值、标记的类型(圆形、正方形、矩形),并且添加这些点到图表中。
chart.scatter(x, y, marker=marker_type, line_color="#6666ee",
fill_color="#ee6666", fill_alpha=0.7, size=10) # 图表的 scatter 方法需要两个必需的参数(x 轴和 y 轴)和一些不同的关键参数,为这些点添加样式(包括颜色、透明度、大小)。这行代码传递了边缘颜色和填充颜色以及大小和透明度到函数中。
chart = figure(title="Perceived Corruption and Child Labor in Africa") # 使用函数 figure创建图表,同时传入一个标题。
output_file("scatter_plot.html") # 使用函数output_file 定义输出的文件。这会在你运行代码的文件夹下创建文件scatter_plot.html。
for row in africa_cpi_cl.rows:
scatter_point(chart, float(row['CPI 2013 Score']),
float(row['Total (%)']), 'circle') # 对于每一行数据,使用CPI得分作为x 轴,童工雇用率作为 y 轴,添加一个数据点。
show(chart) # 在浏览器窗口中展示这张图表。
| lafitehhq/PythonBook | Python-03数据处理/Reference/code/chp10-presentation-数据展示/chart_bokeh_使用Bokeh绘图1.0.py | chart_bokeh_使用Bokeh绘图1.0.py | py | 1,776 | python | zh | code | 2 | github-code | 6 | [
{
"api_name": "bokeh.plotting.figure",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.output_file",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.show",
"line_number": 27,
"usage_type": "call"
}
] |
11704116245 | import platform
from tkinter import filedialog
from web3 import Web3
from web3.contract import Contract
from web3.providers.rpc import HTTPProvider
from solcx import install_solc
install_solc(version='latest')
from solcx import compile_source
import subprocess
import os
import tkinter as tk
from PIL import Image, ImageTk
import threading
import json
from dotenv import load_dotenv
load_dotenv()
PRIVATE_KEY = os.getenv('PRIVATE_KEY')
onChainSmartContract = None
web3_1 = None
web3_2 = None
web3_3 = None
web3_4 = None
def get_sc(filename):
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, filename), 'r') as file:
text = file.read()
return text
def update_storage(map):
current_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(current_dir, 'onchaindata.json')
try:
with open(filename, 'r') as file:
maps = json.load(file)
except FileNotFoundError:
maps = []
maps.append(map)
with open(filename, 'w') as file:
json.dump(maps, file)
def read_storage(name: str):
current_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(current_dir, 'onchaindata.json')
with open(filename, 'r') as file:
maps = json.load(file)
for map_data in maps:
if map_data['name'] == name:
return map_data
return None
def deploy(w3: Web3, contract: Contract, name: str):
sm_transaction = {
"from": w3.eth.accounts[0],
"maxFeePerGas": w3.to_hex(1000000000000),
"maxPriorityFeePerGas": w3.to_hex(0),
"gas": w3.to_hex(1000000000000),
"nonce": w3.eth.get_transaction_count(w3.eth.accounts[0]),
"data": contract.bytecode,
'chainId': w3.eth.chain_id
}
signedTransaction = w3.eth.account.sign_transaction(sm_transaction, PRIVATE_KEY)
transaction_hash = w3.eth.send_raw_transaction(signedTransaction.rawTransaction)
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)
contract = w3.eth.contract(address=tx_receipt.contractAddress, abi=contract.abi, bytecode=contract.bytecode)
new_map = {
'name': name,
'address': str(contract.address),
'abi': str(contract.abi)
}
update_storage(new_map)
return contract
def read(contract: Contract, function_name: str, args: list):
if len(args) == 0:
# result = contract.functions.askForDeploySmartContract().call()
result = contract.functions[function_name]().call()
elif len(args) == 1:
result = contract.functions[function_name](args[0]).call()
elif len(args) == 2:
result = contract.functions[function_name](args[0], args[1]).call()
else:
result = contract.functions[function_name](args[0], args[1], args[2]).call()
return result
def write(w3: Web3, contract: Contract, function_name: str, args: any):
new_transaction = {
"from": w3.eth.accounts[0],
"to": contract.address,
"data": contract.encodeABI(fn_name=function_name, args=args),
"gas": w3.to_hex(1000000000000),
# "gasPrice": w3.to_wei('0', 'gwei'),
"maxFeePerGas": w3.to_hex(1000000000000),
"maxPriorityFeePerGas": w3.to_wei(0, 'gwei'),
"nonce": w3.eth.get_transaction_count(w3.eth.accounts[0]),
'chainId': w3.eth.chain_id
}
#"gas": w3.to_hex(6721975),
signedTransaction = w3.eth.account.sign_transaction(new_transaction, PRIVATE_KEY)
transaction_hash = w3.eth.send_raw_transaction(signedTransaction.rawTransaction)
receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)
return receipt
def init_web3():
global web3_1
global web3_2
global web3_3
global web3_4
web3_1 = Web3(HTTPProvider("http://127.0.0.1:8545"))
if web3_1.is_connected():
print("Connected to http://127.0.0.1:8545")
else:
print("Not connected to http://127.0.0.1:8545")
web3_2 = Web3(HTTPProvider("http://127.0.0.1:8546"))
if web3_2.is_connected():
print("Connected to http://127.0.0.1:8546")
else:
print("Not connected to http://127.0.0.1:8546")
web3_3 = Web3(HTTPProvider("http://127.0.0.1:8547"))
if web3_3.is_connected():
print("Connected to http://127.0.0.1:8547")
else:
print("Not connected to http://127.0.0.1:8547")
web3_4 = Web3(HTTPProvider("http://127.0.0.1:8548"))
if web3_4.is_connected():
print("Connected to http://127.0.0.1:8548")
else:
print("Not connected to http://127.0.0.1:8548")
def loadOnChainManager():
compiledSmartContract = compile_source(get_sc("onchainmanager.sol"), output_values=['abi', 'bin'])
_, smartContractInterface = compiledSmartContract.popitem()
smartContractBytecode = smartContractInterface['bin']
smartContractAbi = smartContractInterface['abi']
global onChainSmartContract
onChainSmartContract = web3_1.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
count = web3_1.eth.get_transaction_count(web3_1.eth.accounts[0])
sc = read_storage("onchainsc")
if sc is None:
onChainSmartContract = deploy(web3_1, onChainSmartContract, 'onchainsc')
# my_contract = web3_1.eth.contract(address=onChainSmartContract.address, abi=onChainSmartContract.abi)
else:
onChainSmartContract = web3_1.eth.contract(address=sc["address"], abi=smartContractAbi, bytecode=smartContractBytecode)
write(web3_1, onChainSmartContract, 'setAddress1', ["http://127.0.0.1:8546"])
write(web3_1, onChainSmartContract, 'setAddress2', ["http://127.0.0.1:8547"])
write(web3_1, onChainSmartContract, 'setAddress3', ["http://127.0.0.1:8548"])
class Loader(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
parent.title("Progetto Software Security & Blockchain")
self.result_text = tk.Text(self)
tk.Button(self, text="Avvia le Blockchain", command=self.run_script_threaded).grid(row=1, column=0)
tk.Button(self, text="Avvia il Programma", command=self.start_app).grid(row=1, column=1)
self.result_text.grid(row=2, column=0, columnspan=2)
def run_script_threaded(self):
threading.Thread(target=self.run_script).start()
# threading.Thread(target=self.init_web3_thread).start()
def start_app(self):
# self.result_text.insert(tk.END, "Tutte le Blockchain sono state inizializzate correttamente!")
# self.result_text.insert(tk.END, "Caricamento del programma... (impiega circa 15 secondi)")
homepage = HomePage(self.master)
init_web3()
loadOnChainManager()
def run_script(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
system = platform.system()
try:
if system == 'Windows':
dir = os.path.join(current_dir, "init.sh")
process = subprocess.Popen([dir], stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, universal_newlines=True)
elif system == 'Linux':
command = ['bash', os.path.join(current_dir, "init.sh")]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
elif system == 'Darwin':
command = ['bash', os.path.join(current_dir, "init.sh")]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
else:
print(f"Error: Unsupported system '{system}'")
except Exception as e:
print("Errore", "File non trovato al percorso " + e)
while True:
output = process.stdout.readline()
if not output and process.poll() is not None:
break
self.result_text.insert(tk.END, output)
self.result_text.see(tk.END)
class HomePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Home Page")
current_dir = os.path.dirname(os.path.abspath(__file__))
button1_image = Image.open(os.path.join(current_dir, "receipt_long.png"))
button2_image = Image.open(os.path.join(current_dir, "assignment.png"))
button3_image = Image.open(os.path.join(current_dir, "tty.png"))
button4_image = Image.open(os.path.join(current_dir, "delete.png"))
button1_photo = ImageTk.PhotoImage(button1_image)
button2_photo = ImageTk.PhotoImage(button2_image)
button3_photo = ImageTk.PhotoImage(button3_image)
button4_photo = ImageTk.PhotoImage(button4_image)
title_label = tk.Label(self, text="Scegli come deployare il tuo smart contract, oppure se richiamare un metodo di uno smart contract esistente.", font=("Arial", 13))
title_label.grid(row=0, column=0, columnspan=3, pady=20)
button1 = tk.Button(self, image=button1_photo, text="Deploy il file .sol", compound=tk.TOP, font=("Arial", 12), command=self.button1_clicked)
button1.image = button1_photo
button1.grid(row=1, column=0, padx=0, pady=10)
frame1 = tk.Frame(self, height=100, width=100)
frame1.pack_propagate(0)
frame1.grid(row=2, column=0, padx=0, pady=10)
label1 = tk.Label(frame1, text="Carica il tuo\nfile in solidity\nin cui è scritto\nlo Smart Contract", font=("Arial", 13))
label1.pack(fill=tk.BOTH, expand=1)
button2 = tk.Button(self, image=button2_photo, text="Deploy da ABI e Bytecode", compound=tk.TOP, font=("Arial", 12), command=self.button2_clicked)
button2.image = button2_photo
button2.grid(row=1, column=1, padx=0, pady=10)
frame2 = tk.Frame(self, height=100, width=100)
frame2.pack_propagate(0)
frame2.grid(row=2, column=1, padx=0, pady=10)
label2 = tk.Label(frame2, text="Carica il tuo\nSmart Contract\nscrivendo l'ABI\ne il Bytecode", font=("Arial", 13))
label2.pack(fill=tk.BOTH, expand=1)
button3 = tk.Button(self, image=button3_photo, text="Chiama metodo", compound=tk.TOP, font=("Arial", 12), command=self.button3_clicked)
button3.image = button3_photo
button3.grid(row=1, column=2, padx=0, pady=10)
frame3 = tk.Frame(self, height=100, width=100)
frame3.pack_propagate(0)
frame3.grid(row=2, column=2, padx=0, pady=10)
label3 = tk.Label(frame3, text="Chiama un\nmetodo di uno\nSmart Contract\nesistente", font=("Arial", 13))
label3.pack(fill=tk.BOTH, expand=1)
button4 = tk.Button(self, image=button4_photo, text="Elimina Smart Contract", compound=tk.TOP, font=("Arial", 12), command=self.button4_clicked)
button4.image = button4_photo
button4.grid(row=1, column=3, padx=0, pady=10)
frame4 = tk.Frame(self, height=100, width=100)
frame4.pack_propagate(0)
frame4.grid(row=2, column=3, padx=0, pady=10)
label4 = tk.Label(frame4, text="Elimina uno\nSmart Contract", font=("Arial", 13))
label4.pack(fill=tk.BOTH, expand=1)
def get_folder_path():
while True:
folder_path = input("Please enter the path of the folder: ")
if os.path.isdir(folder_path):
return folder_path
else:
print("Invalid folder path. Please try again.")
def button1_clicked(self):
soliditypage = SolidityPage(self.master)
def button2_clicked(self):
abibytecodepage = ABIBytecodePage(self.master)
def button3_clicked(self):
callmethodpage = MethodsPage(self.master)
def button4_clicked(self):
deletepage = DeletePage(self.master)
class SolidityPage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Deploy file .sol")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.file_label = tk.Label(self, text="File:")
self.file_label.pack()
self.file_entry = tk.Entry(self, state="readonly")
self.file_entry.pack()
self.browse_button = tk.Button(self, text="Cerca", command=self.browse_file)
self.browse_button.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Deploy", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def browse_file(self):
filetypes = (("Solidity files", "*.sol"), ("All files", "*.*"))
filename = filedialog.askopenfilename(filetypes=filetypes)
if filename:
self.file_entry.config(state="normal")
self.file_entry.delete(0, tk.END)
self.file_entry.insert(0, filename)
self.file_entry.config(state="readonly")
def ok_button_click(self):
name = self.name_entry.get()
filename = self.file_entry.get()
if not filename.endswith(".sol"):
tk.messagebox.showerror("Error", "Invalid file format. Please select a .sol file.")
else:
compiledSmartContract = compile_source(get_sc(os.path.basename(filename).split('/')[-1]), output_values=['abi', 'bin'])
_, smartContractInterface = compiledSmartContract.popitem()
smartContractBytecode = smartContractInterface['bin']
smartContractAbi = smartContractInterface['abi']
receipt = write(web3_1, onChainSmartContract, "getNextAddress", [])
logs = onChainSmartContract.events.NextAddressReturned().process_receipt(receipt)
nextAddress = logs[0]['args']['nextAddress']
web3_c = Web3(HTTPProvider(nextAddress))
if web3_c.is_connected():
print("Connected to " + nextAddress)
customSmartContract = web3_c.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
customSmartContract = deploy(web3_c, customSmartContract, name)
write(web3_1, onChainSmartContract, 'addContract', [name, str(nextAddress), str(customSmartContract.address), str(smartContractAbi)])
result = read(onChainSmartContract, 'getContract', [name])
print("Result: " + str(result))
show_toast('Lo Smart Contract è stato deployato con successo', '')
else:
print("Not connected to " + nextAddress)
self.destroy()
def cancel_button_click(self):
self.destroy()
class ABIBytecodePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Deploy da ABI e Bytecode")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.abi_label = tk.Label(self, text="ABI:")
self.abi_label.pack()
self.abi_label = tk.Entry(self)
self.abi_label.pack()
self.bytecode_label = tk.Label(self, text="Bytecode:")
self.bytecode_label.pack()
self.bytecode_label = tk.Entry(self)
self.bytecode_label.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Deploy", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
name = self.name_entry.get()
smartContractAbi = self.abi_label.get()
smartContractBytecode = self.bytecode_label.get()
receipt = write(web3_1, onChainSmartContract, "getNextAddress", [])
logs = onChainSmartContract.events.NextAddressReturned().process_receipt(receipt)
nextAddress = logs[0]['args']['nextAddress']
web3_c = Web3(HTTPProvider(nextAddress))
if web3_c.is_connected():
print("Connected to " + nextAddress)
customSmartContract = web3_c.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
customSmartContract = deploy(web3_c, customSmartContract, name)
write(web3_1, onChainSmartContract, 'addContract', [name, str(nextAddress), str(customSmartContract.address), str(smartContractAbi)])
result = read(onChainSmartContract, 'getContract', [name])
print("Result: " + str(result))
show_toast('Lo Smart Contract è stato deployato con successo', '')
else:
print("Not connected to " + nextAddress)
self.destroy()
def cancel_button_click(self):
self.destroy()
class MethodsPage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Chiama metodo")
self.name_label = tk.Label(self, text="Nome dello Smart Contract:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.function_label = tk.Label(self, text="Nome della Funzione:")
self.function_label.pack()
self.function_label = tk.Entry(self)
self.function_label.pack()
self.selected_option = tk.IntVar()
self.selected_option.set(0)
self.option1 = tk.Radiobutton(self, text='Lettura', variable=self.selected_option, value=0)
self.option1.pack(padx=10)
self.option2 = tk.Radiobutton(self, text='Scrittura', variable=self.selected_option, value=1)
self.option2.pack(padx=10)
self.arg1_label = tk.Label(self, text="Arg 1:")
self.arg1_label.pack()
self.arg1_label = tk.Entry(self)
self.arg1_label.pack()
self.arg2_label = tk.Label(self, text="Arg 2:")
self.arg2_label.pack()
self.arg2_label = tk.Entry(self)
self.arg2_label.pack()
self.arg3_label = tk.Label(self, text="Arg 3:")
self.arg3_label.pack()
self.arg3_label = tk.Entry(self)
self.arg3_label.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="OK", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
selected_option = self.selected_option.get()
name = self.name_entry.get()
function = self.function_label.get()
arg1 = self.arg1_label.get()
arg2 = self.arg2_label.get()
arg3 = self.arg3_label.get()
data = read(onChainSmartContract, "getContract", [name])
blockChainAddress = data[0]
address = data[1]
abi = data[2].replace("'", '"').replace('False', 'false').replace('True', 'true')
web3_c = Web3(HTTPProvider(blockChainAddress))
if web3_c.is_connected():
print("Connected to " + blockChainAddress)
customSmartContract = web3_c.eth.contract(address=address, abi=abi)
if arg1 and arg2 and arg3:
if selected_option == 0:
result = read(customSmartContract, function, [arg1, arg2, arg3])
else:
write(web3_c, customSmartContract, function, [arg1, arg2, arg3])
elif arg1 and arg2:
if selected_option == 0:
result = read(customSmartContract, function, [arg1, arg2])
else:
write(web3_c, customSmartContract, function, [arg1, arg2])
elif arg1:
if selected_option == 0:
result = read(customSmartContract, function, [arg1])
else:
write(web3_c, customSmartContract, function, [arg1])
else:
if selected_option == 0:
result = read(customSmartContract, function, [])
else:
write(web3_c, customSmartContract, function, [])
if selected_option == 0:
show_toast('Dati ottenuti correttamente', str(result))
else:
show_toast('Dati scritti correttamente', '')
self.destroy()
else:
show_toast('Lo Smart Contract non esiste', 'Deployalo prima di eseguire un suo metodo')
print("Not connected to " + blockChainAddress)
def cancel_button_click(self):
self.destroy()
def show_toast(title, description):
window = tk.Toplevel()
window.overrideredirect(True)
window.attributes("-topmost", True)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# window.geometry(f"{500}x{100}+{screen_width//2-50}+{screen_height//2-50}")
# create a frame for the toast message
frame = tk.Frame(window, bg='white', bd=1, relief=tk.RAISED)
frame.pack(side=tk.BOTTOM, padx=10, pady=10)
# create a label for the title and add it to the frame
title_label = tk.Label(frame, text=title, font=('Arial', 14, 'bold'), fg='black', bg='white')
title_label.pack(padx=10, pady=5)
# create a label for the description and add it to the frame
desc_label = tk.Label(frame, text=description, font=('Arial', 12), fg='gray', bg='white')
desc_label.pack(padx=10, pady=5)
# function to destroy the window after a short delay
def destroy_window():
window.after(3000, window.destroy)
window.after(3000, destroy_window)
class DeletePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Elimina Smart Contract")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Elimina", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
name = self.name_entry.get()
write(web3_1, onChainSmartContract, "deleteContract", [name])
show_toast('Smart Contract eliminato correttamente', '')
self.destroy()
def cancel_button_click(self):
self.destroy()
root = tk.Tk()
app = Loader(root)
app.pack()
root.mainloop() | MassimilianoPiccinini/SoftwareSecurity-Blockchain | src/offchain.py | offchain.py | py | 23,961 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "solcx.install_solc",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
... |
34953476775 | from django.shortcuts import render_to_response
from read_num.models import get_seven_read_data, get_today_hot_data, get_yesterday_hot_data
from django.contrib.contenttypes.models import ContentType
from blog.models import Blog
from django.utils import timezone
from django.db.models import Sum
from django.core.cache import cache
import datetime
def home(request):
blog_content_type = ContentType.objects.get_for_model(Blog)
dates, read_nums = get_seven_read_data(blog_content_type)
#获取七天热门博客缓存数据
seven_hot_data = cache.get('seven_hot_data')
if seven_hot_data is None:
seven_hot_data = get_seven_hot_data()
cache.set('seven_hot_data', seven_hot_data, 3600)
context = {}
context['dates'] = dates
context['read_nums'] = read_nums
context['today_hot_data'] = get_today_hot_data(blog_content_type)
context['yesterday_hot_data'] = get_yesterday_hot_data(blog_content_type)
context['seven_hot_data'] = seven_hot_data
return render_to_response('home.html', context)
def get_seven_hot_data():
today = timezone.now().date()
date = today - datetime.timedelta(days=6)
blogs = Blog.objects \
.filter(read_details__date__lte=today, read_details__date__gt=date) \
.values('id', 'title') \
.annotate(read_num_sum=Sum('read_details__read_num')) \
.order_by('-read_num_sum')
return blogs[:7] | shane-constantine/mysite | mysite/views.py | views.py | py | 1,499 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "blog.models.Blog",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects"... |
15067404413 | from django.shortcuts import render, redirect
from .forms import Registration, Login
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .models import *
from .custom_manager import CustomUserManager
from django.contrib import messages
import uuid
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.auth.decorators import login_required
# Create your views here.
def registration(request):
if request.user.is_authenticated:
return redirect('home')
form = Registration()
if request.method == "POST":
email = request.POST.get('email')
if CustomUser.objects.filter(email=email).first():
messages.success(request, 'Email is taken.')
return redirect('registration')
try:
auth_token = str(uuid.uuid4())
form = Registration(request.POST)
if form.is_valid():
subject = 'Your accounts need to be verified'
message = f'Welcome to Online Tiffin Service. Thanks for registering on our website. Follow this link to verify your account http://localhost:8000/user/verify/{auth_token}'
email_from = settings.EMAIL_HOST_USER
recipient_list = [email]
res = send_mail(subject, message, email_from, recipient_list)
new_form = form.save(commit=False)
new_form.auth_token = auth_token
new_form.save()
# messages.success(request, 'Registration Successful.')
return redirect('token_send')
except Exception as e:
print(e)
return render(request, 'customuser/registration.html', {'form': form})
def login_view(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == "POST":
form = Login(request.POST)
if request.POST['email'] and request.POST['password']:
email = request.POST['email']
try:
verify_user = CustomUser.objects.get(email=email)
except:
messages.error(request, "User with this email address doesn't exits.")
return redirect('login')
if not verify_user.is_verified:
messages.error(
request, 'Profile is not verified check your mail.')
return redirect('token_send')
if form.is_valid():
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
messages.success(request, 'Login Successfully.')
print("Login Successfully.")
return redirect('home')
else:
messages.error(request, 'Invalid username and password.')
return redirect('login')
else:
messages.error(request, 'Fill both field')
return render(request, 'customuser/login.html', {'form': form})
else:
form = Login()
return render(request, 'customuser/login.html', {'form': form})
def userlogout(request):
if request.user.is_authenticated:
logout(request)
messages.success(request, 'Logout Successfully.')
return redirect('home')
return redirect('login')
def success(request):
return render(request, 'customuser/success.html')
def token_send(request):
return render(request, 'customuser/token_send.html')
def verify(request, auth_token):
try:
profile_obj = CustomUser.objects.filter(auth_token=auth_token).first()
if profile_obj:
if profile_obj.is_verified:
messages.success(request, 'Your account is already verified.')
return redirect('login')
profile_obj.is_verified = True
profile_obj.save()
messages.success(request, 'Your account has been verified.')
return redirect('login')
else:
return redirect('/error')
except Exception as e:
print(e)
return redirect('/')
def error_page(request):
return render(request, 'error.html')
| leenabadgujar/Online_Tiffin_Service | CustomUser/views.py | views.py | py | 4,317 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "forms.Registration",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 22,
"usage_type": "call"
},
{
"api_... |
86572452967 | #!/usr/bin/env python3
"""
Check for and replace aliases with their new names from vk.xml
"""
import argparse
import pathlib
import subprocess
import sys
import xml.etree.ElementTree as et
THIS_FILE = pathlib.Path(__file__)
CWD = pathlib.Path.cwd()
VK_XML = THIS_FILE.parent / 'vk.xml'
EXCLUDE_PATHS = [
VK_XML.relative_to(CWD).as_posix(),
# These files come from other repos, there's no point checking and
# fixing them here as that would be overwritten in the next sync.
'src/amd/vulkan/radix_sort/',
'src/virtio/venus-protocol/',
]
def get_aliases(xml_file: pathlib.Path):
"""
Get all the aliases defined in vk.xml
"""
xml = et.parse(xml_file)
for node in ([]
+ xml.findall('.//enum[@alias]')
+ xml.findall('.//type[@alias]')
+ xml.findall('.//command[@alias]')
):
# Some renames only apply to some APIs
if 'api' in node.attrib and 'vulkan' not in node.attrib['api'].split(','):
continue
yield node.attrib['name'], node.attrib['alias']
def remove_prefix(string: str, prefix: str):
"""
Remove prefix if string starts with it, and return the full string
otherwise.
"""
if not string.startswith(prefix):
return string
return string[len(prefix):]
# Function from https://stackoverflow.com/a/312464
def chunks(lst: list, n: int):
"""
Yield successive n-sized chunks from lst.
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def main(paths: list[str]):
"""
Entrypoint; perform the search for all the aliases and replace them.
"""
def prepare_identifier(identifier: str) -> str:
prefixes_seen = []
for prefix in [
# Various macros prepend these, so they will not appear in the code using them.
# List generated using this command:
# $ prefixes=$(git grep -woiE 'VK_\w+_' -- src/ ':!src/vulkan/registry/' | cut -d: -f2 | sort -u)
# $ for prefix in $prefixes; do grep -q $prefix src/vulkan/registry/vk.xml && echo "'$prefix',"; done
# (the second part eliminates prefixes used only in mesa code and not upstream)
'VK_BLEND_FACTOR_',
'VK_BLEND_OP_',
'VK_BORDER_COLOR_',
'VK_COMMAND_BUFFER_RESET_',
'VK_COMMAND_POOL_RESET_',
'VK_COMPARE_OP_',
'VK_COMPONENT_SWIZZLE_',
'VK_DESCRIPTOR_TYPE_',
'VK_DRIVER_ID_',
'VK_DYNAMIC_STATE_',
'VK_FORMAT_',
'VK_IMAGE_ASPECT_MEMORY_PLANE_',
'VK_IMAGE_ASPECT_PLANE_',
'VK_IMAGE_USAGE_',
'VK_NV_',
'VK_PERFORMANCE_COUNTER_UNIT_',
'VK_PIPELINE_BIND_POINT_',
'VK_SAMPLER_ADDRESS_MODE_',
'VK_SHADER_STAGE_TESSELLATION_',
'VK_SHADER_STAGE_',
'VK_STENCIL_OP_',
'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_',
'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_',
'VK_STRUCTURE_TYPE_',
'VK_USE_PLATFORM_',
'VK_VERSION_',
# Many places use the identifier without the `vk` prefix
# (eg. with the driver name as a prefix instead)
'VK_',
'Vk',
'vk',
]:
# The order matters! A shorter substring will match before a longer
# one, hiding its matches.
for prefix_seen in prefixes_seen:
assert not prefix.startswith(prefix_seen), f'{prefix_seen} must come before {prefix}'
prefixes_seen.append(prefix)
identifier = remove_prefix(identifier, prefix)
return identifier
aliases = {}
for old_name, alias_for in get_aliases(VK_XML):
old_name = prepare_identifier(old_name)
alias_for = prepare_identifier(alias_for)
aliases[old_name] = alias_for
print(f'Found {len(aliases)} aliases in {VK_XML.name}')
# Some aliases have aliases
recursion_needs_checking = True
while recursion_needs_checking:
recursion_needs_checking = False
for old, new in aliases.items():
if new in aliases:
aliases[old] = aliases[new]
recursion_needs_checking = True
# Doing the whole search in a single command breaks grep, so only
# look for 500 aliases at a time. Searching them one at a time would
# be extremely slow.
files_with_aliases = set()
for aliases_chunk in chunks([*aliases], 500):
grep_cmd = [
'git',
'grep',
'-rlP',
'|'.join(aliases_chunk),
] + paths
search_output = subprocess.run(
grep_cmd,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
).stdout.decode()
files_with_aliases.update(search_output.splitlines())
def file_matches_path(file: str, path: str) -> bool:
# if path is a folder; match any file within
if path.endswith('/') and file.startswith(path):
return True
return file == path
for excluded_path in EXCLUDE_PATHS:
files_with_aliases = {
file for file in files_with_aliases
if not file_matches_path(file, excluded_path)
}
if not files_with_aliases:
print('No alias found in any file.')
sys.exit(0)
print(f'{len(files_with_aliases)} files contain aliases:')
print('\n'.join(f'- {file}' for file in sorted(files_with_aliases)))
command = [
'sed',
'-i',
";".join([f's/{old}/{new}/g' for old, new in aliases.items()]),
]
command += files_with_aliases
subprocess.check_call(command, stderr=subprocess.DEVNULL)
print('All aliases have been replaced')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('paths',
nargs=argparse.ZERO_OR_MORE,
default=['src/'],
help='Limit script to these paths (default: `src/`)')
args = parser.parse_args()
main(**vars(args))
| sailfishos-mirror/mesa | src/vulkan/registry/update-aliases.py | update-aliases.py | py | 6,150 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"l... |
31684540429 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class DouluoSpider(CrawlSpider):
name = 'douluo'
allowed_domains = ['tycqxs.com']
start_urls = ['http://www.tycqxs.com/54_54196/']
custom_settings = {'ITEM_PIPELINES': {'shop.pipelines.DoupoPipeline': 300}}
rules = (
Rule(LinkExtractor(restrict_xpaths=r'//div[@id="list"]//dd[10]/a'),callback='parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths=r'//div[@class="bottem1"]/a[4]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
zhangjie = response.xpath('//h1/text()').extract_first()
neirong = response.xpath('//div[@id="content"]/text()').extract()
next_url = response.xpath('//div[@class="bottem1"]/a[4]/@href').extract_first()
print(zhangjie)
yield {
'zhangjie': zhangjie,
'neirong': neirong,
'next_url': response.urljoin(next_url)
}
| zhangcq1/MyCrawler | 爬虫基础梳理/shop/shop/spiders/douluo.py | douluo.py | py | 1,034 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 13,
"usage_type": "call"
},
{
... |
74200612027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : train.py
# @Author: stoneye
# @Date : 2023/09/01
# @Contact : stoneyezhenxu@gmail.com
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils
from models import ModelUtil
from models import NextvladModel
from models import TextExactor
from models import TrnModel
from tensorflow.python.client import device_lib
class VideoModel():
def __init__(self, args):
"""
:param args: config params
"""
# init the config
self.init_config()
model_util_obj = ModelUtil()
if self.model_type == 'nextvlad':
model_obj = NextvladModel() # nextvlad model
else:
model_obj = TrnModel() # trn model
title_obj = TextExactor() # text model
# 加载预先训练的embeddding
word_embeddings = model_util_obj._init_vocab_and_emb(word_vocab=args.word_vocab,
pre_train_emb_path=args.pre_Train_path)
self.word_embeddings = tf.Variable(word_embeddings,
name='word_embeddings',
dtype=tf.float32)
self.init_placeholder()
# build_graph,support sigle gpu or multi gpus
self.total_loss, self.tag_total_prob, self.cate_total_prob, self.train_op = \
self.multi_gpu_bulid_graph(num_gpu=self.num_gpu,
lr=self.lr,
ad_strength=self.ad_strength,
word_embeddings=self.word_embeddings,
tag_gt_label=self.tag_gt_label,
cate_gt_label=self.cate_gt_label,
tag_nums=self.tag_nums,
cate_nums=self.cate_nums,
rgb_fea_input=self.input_video_rgb_feature,
rgb_fea_true_frame=self.rgb_fea_true_frame,
audio_fea_input=self.input_video_audio_feature,
audio_fea_true_frame=self.audio_fea_true_frame,
max_frames_rgb=self.max_frames_rgb,
max_frames_audio=self.max_frames_audio,
title_fea_input=self.title_id_int_list,
word_sequence_length=self.word_sequence_length,
model_obj=model_obj,
title_obj=title_obj,
is_training=self.is_training,
dropout_keep_prob=self.dropout_keep_prob,
model_util_obj=model_util_obj,
task_type=self.task_type)
def init_config(self):
# task name:["cate","tag","cate_and_tag"]
# 1)"cate": only cate task; 2)"tag":only tag task; 3)"cate_and_tag": multi-task, cate and tag
self.task_type = args.task_type
# nums of gpu
self.num_gpu = args.num_gpu
# learning rate
self.lr = args.lr
# ratio of adversarial perturbations
self.ad_strength = args.ad_strength
# the num of tag
self.tag_nums = args.tag_nums
# the num of cate
self.cate_nums = args.cate_nums
# the num of video frames
self.max_frames_rgb = args.rgb_frames
# the num of audio frames
self.max_frames_audio = args.audio_frames
# the max length word(word id) of title
self.title_max_len = args.title_max_len
# main aggregate model : light-weight: trn ; heavy-weight: nextvlad
self.model_type = args.model_type
# the feature size of img frames.
self.rgb_fea_size = args.rgb_fea_size
# the feature size of audio frames.
self.audio_fea_size = args.audio_fea_size
def init_placeholder(self):
"""
:return:
"""
# title:[batch,max_len]
self.title_id_int_list = tf.placeholder(tf.int32,
shape=[None, self.title_max_len])
word_sequence_length = tf.reduce_sum(tf.sign(
self.title_id_int_list), axis=1) # [batch,]
self.word_sequence_length = tf.cast(word_sequence_length, tf.int32) # [batch,]
# cate ground truth
self.cate_gt_label = tf.placeholder(tf.float32,
shape=[None, self.tag_nums],
name="cate_gt_label")
# tag ground truth
self.tag_gt_label = tf.placeholder(tf.float32,
shape=[None, self.tag_nums],
name="tag_gt_label")
# rgb fea
self.input_video_rgb_feature = tf.placeholder(tf.float32,
shape=[None, self.max_frames_rgb,
self.rgb_fea_size])
# the num of rgb frames
self.rgb_fea_true_frame = tf.placeholder(tf.int32,
shape=[None, ])
# the num of audio frames
self.input_video_audio_feature = tf.placeholder(tf.float32,
shape=[None,
self.max_frames_audio,
self.audio_fea_size])
# audio frames
self.audio_fea_true_frame = tf.placeholder(tf.int32,
shape=[None, ])
# keep dropout
self.dropout_keep_prob = tf.placeholder(tf.float32,
name="dropout_keep_prob")
# is train stage or not
self.is_training = tf.placeholder(tf.bool,
name="is_training")
def cal_loss(self, rgb_fea, rgb_fea_true_frame,
audio_fea, audio_fea_true_frame,
title_emb_fea, word_sequence_length,
tag_gt_label, cate_gt_label,
tag_nums, cate_nums,
max_frames_rgb, max_frames_audio,
is_training, dropout_keep_prob, model_obj,
title_obj, model_util_obj, reuse,
task_type,
hidden_size=256, embedding_size=200,
num_filters=100, num_outputs=1024,
):
with tf.variable_scope("cl_loss_from_emb", reuse=reuse):
# rgb dense vector
rgb_cluster_fea = model_obj.forward(is_training=is_training,
fea_input=rgb_fea,
dropout_keep_prob=dropout_keep_prob,
fea_type='rgb',
max_frames=max_frames_rgb,
true_frames=rgb_fea_true_frame,
name_scope='rgb_cluster_fea')
# audio dense vector
audio_cluster_fea = model_obj.forward(is_training=is_training,
fea_input=audio_fea,
dropout_keep_prob=dropout_keep_prob,
fea_type='audio',
max_frames=max_frames_audio,
true_frames=audio_fea_true_frame,
name_scope='audio_cluster_fea')
# title dense vector baesd on bilstm model
bilstm_title_feature = title_obj._bilstm_feature(
embedding_descript=title_emb_fea,
hidden_size=hidden_size,
des_sequence_length=word_sequence_length,
dtype=tf.float32,
reuse=None)
# title dense vector based on textcnn model
textcnn_title_feature = title_obj._text_cnn_feature(
embedding_descript=title_emb_fea,
embedding_size=embedding_size,
filter_sizes=list(map(int, "2,3,4,5".split(","))),
num_filters=num_filters,
reuse=None
)
title_fea = tf.concat([bilstm_title_feature, textcnn_title_feature], axis=1)
title_fea_drop = slim.dropout(title_fea,
keep_prob=dropout_keep_prob,
is_training=is_training,
scope="title_fea_drop")
title_fea_dense = slim.fully_connected(inputs=title_fea_drop,
num_outputs=num_outputs,
activation_fn=None,
scope="title_fea_dense")
# batch normalization
title_fea_dense_bn = slim.batch_norm(
title_fea_dense,
center=True,
scale=True,
is_training=is_training,
scope="title_fea_dense_bn",
fused=False)
with slim.arg_scope([slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training, 'center': True,
'scale': True}):
# multi-modal
total_fea = tf.concat([rgb_cluster_fea, audio_cluster_fea, title_fea_dense_bn], 1)
# se gate
concate_features_se = model_util_obj._se_module(is_training=is_training,
activation=total_fea,
name_scope="concat_se")
concate_features_se_drop = tf.nn.dropout(concate_features_se, dropout_keep_prob)
if task_type == 'cate':
cate_total_loss, cate_total_prob = ModelUtil.cate_hmc_layer(
fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
cate_nums=cate_nums,
ml_tag=cate_gt_label,
name_scope='cate1_total_loss')
tag_total_prob = tf.zeros_like(tag_gt_label)
return cate_total_loss, tag_total_prob, cate_total_prob
elif task_type == 'tag':
tag_total_loss, \
tag_total_prob = ModelUtil.tag_hmc_layer(fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
tag_nums=tag_nums,
ml_tag=tag_gt_label,
name_scope='tag_total_loss')
cate_total_prob = tf.zeros_like(cate_gt_label)
return tag_total_loss, tag_total_prob, cate_total_prob
elif task_type == 'cate_and_tag':
cate_total_loss, cate_total_prob = ModelUtil.cate_hmc_layer(
fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
cate_nums=cate_nums,
ml_tag=cate_gt_label,
name_scope='cate1_total_loss')
tag_total_loss, \
tag_total_prob = ModelUtil.tag_hmc_layer(fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
tag_nums=tag_nums,
ml_tag=tag_gt_label,
name_scope='tag_total_loss')
return cate_total_loss + tag_total_loss, tag_total_prob, cate_total_prob
else:
raise Exception('task_type:{} not in [cate,tag,cate_and_tag]')
def multi_gpu_bulid_graph(self, num_gpu, lr, ad_strength, word_embeddings,
tag_gt_label, cate_gt_label, tag_nums, cate_nums,
title_fea_input, word_sequence_length,
rgb_fea_input, rgb_fea_true_frame,
audio_fea_input, audio_fea_true_frame,
max_frames_rgb, max_frames_audio,
model_obj, title_obj, is_training,
dropout_keep_prob, model_util_obj, task_type):
"""
:param num_gpu: # the nums of gpu
:param lr: #learning rate
:param ad_strength: # adversarial perturbation
:param word_embeddings: #word embedding [batch,emb_size]
:param tag_gt_label: # tag gt label [batch,tag_nums]
:param cate_gt_label: #cate gt label [batch,cate_nums]
:param tag_nums: # the nums of tag
:param cate_nums: # the nums of cate
:param title_fea_input: # title fea [batch,seq_len]
:param word_sequence_length: # the truth length of title
:param rgb_fea_input: #rgb fea [batch,frame,fea_size]
:param rgb_fea_true_frame: #the truth frames of rgb fea
:param audio_fea_input: #audio fea [batch,frame,fea_size]
:param audio_fea_true_frame: #the truth frames of audio fea
:param max_frames_rgb: #the max frames of rgb
:param max_frames_audio: #the max frames of audio
:param model_obj: #aggregate model: nextvlad or trn
:param title_obj: #textcnn or Bi-LSTM
:param is_training: # True or False
:param dropout_keep_prob: # float
:param model_util_obj: #
:param task_type: #the type of task:cate, tag, cate_and_tag:multi-task,cate & tag
:return:
"""
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
print("Using the following GPUs to train: {}".format(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
print("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
self.global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(lr)
tower_rgb_fea_input = tf.split(rgb_fea_input, num_towers)
tower_rgb_fea_true_frame = tf.split(rgb_fea_true_frame, num_towers)
tower_audio_fea_input = tf.split(audio_fea_input, num_towers)
tower_audio_fea_true_frame = tf.split(audio_fea_true_frame, num_towers)
tower_title_fea_input = tf.split(title_fea_input, num_towers)
tower_word_sequence_length = tf.split(word_sequence_length, num_towers)
tower_tag_gt_label = tf.split(tag_gt_label, num_towers)
tower_cate_gt_label = tf.split(cate_gt_label, num_towers)
tower_gradients = []
tower_predict_tag_probs = []
tower_predict_cate_probs = []
tower_total_losses = []
for i in range(num_towers):
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable],
device="/cpu:0" if num_gpu != 1 else "/gpu:0")):
result = self.build_graph(
word_embeddings=word_embeddings,
rgb_fea_input=tower_rgb_fea_input[i],
rgb_fea_true_frame=tower_rgb_fea_true_frame[i],
audio_fea_input=tower_audio_fea_input[i],
audio_fea_true_frame=tower_audio_fea_true_frame[i],
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
title_fea_input=tower_title_fea_input[i],
word_sequence_length=tower_word_sequence_length[i],
tag_gt_label=tower_tag_gt_label[i],
cate_gt_label=tower_cate_gt_label[i],
is_training=is_training,
ad_strength=ad_strength,
tag_nums=tag_nums,
cate_nums=cate_nums,
model_obj=model_obj,
title_obj=title_obj,
dropout_keep_prob=dropout_keep_prob,
model_util_obj=model_util_obj,
task_type=task_type
)
cl_tag_prob = result["tag_prob"]
tower_predict_tag_probs.append(cl_tag_prob)
cl_cate_prob = result["cate_prob"]
tower_predict_cate_probs.append(cl_cate_prob)
loss = result["loss"]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
tower_total_losses.append(loss)
gradients = \
optimizer.compute_gradients(loss, colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
total_loss = tf.reduce_mean(tf.stack(tower_total_losses))
total_tag_prob = tf.concat(tower_predict_tag_probs, 0)
total_cate_prob = tf.concat(tower_predict_cate_probs, 0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
merged_gradients = utils.combine_gradients(tower_gradients)
train_op = optimizer.apply_gradients(merged_gradients, global_step=self.global_step)
return total_loss, total_tag_prob, total_cate_prob, train_op
def build_graph(self, word_embeddings, tag_gt_label,
cate_gt_label, tag_nums, cate_nums, ad_strength,
rgb_fea_input, rgb_fea_true_frame,
audio_fea_input, audio_fea_true_frame,
title_fea_input, word_sequence_length,
max_frames_rgb, max_frames_audio,
is_training, model_obj, title_obj,
dropout_keep_prob, model_util_obj, task_type):
# [batch,25,emb_size]
embedded_title = tf.nn.embedding_lookup(word_embeddings,
title_fea_input)
# sigmoid cross entropy loss
cl_loss, cl_tag_prob, cl_cate_prob = self.cal_loss(rgb_fea=rgb_fea_input,
rgb_fea_true_frame=rgb_fea_true_frame,
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
audio_fea=audio_fea_input,
audio_fea_true_frame=audio_fea_true_frame,
title_emb_fea=embedded_title,
word_sequence_length=word_sequence_length,
is_training=is_training,
tag_gt_label=tag_gt_label,
cate_gt_label=cate_gt_label,
tag_nums=tag_nums,
cate_nums=cate_nums,
dropout_keep_prob=dropout_keep_prob,
model_obj=model_obj,
title_obj=title_obj,
model_util_obj=model_util_obj,
task_type=task_type,
reuse=None)
# add the perturbation on rgb fea
rgb_fea_perturbated = model_util_obj.add_perturbation(rgb_fea_input, cl_loss,
norm_length=ad_strength)
# add the perturbation on audio fea
audio_fea_perturbated = model_util_obj.add_perturbation(audio_fea_input, cl_loss,
norm_length=ad_strength)
# add the perturbation on text(title) fea
title_emb_fea_perturbated = model_util_obj.add_perturbation(embedded_title, cl_loss,
norm_length=ad_strength)
# sigmoid cross entropy loss of perturbation
ad_loss, _, _ = self.cal_loss(rgb_fea=rgb_fea_perturbated,
rgb_fea_true_frame=rgb_fea_true_frame,
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
audio_fea=audio_fea_perturbated,
audio_fea_true_frame=audio_fea_true_frame,
title_emb_fea=title_emb_fea_perturbated,
word_sequence_length=word_sequence_length,
is_training=is_training,
tag_gt_label=tag_gt_label,
cate_gt_label=cate_gt_label,
tag_nums=tag_nums,
cate_nums=cate_nums,
dropout_keep_prob=dropout_keep_prob,
model_obj=model_obj,
title_obj=title_obj,
model_util_obj=model_util_obj,
task_type=task_type,
reuse=True)
return {'loss': cl_loss + ad_loss, 'tag_prob': cl_tag_prob, 'cate_prob': cl_cate_prob}
| stoneyezhenxu/Multimodal_Video_Classification | src/video_model.py | video_model.py | py | 23,282 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.ModelUtil",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.NextvladModel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.TrnModel",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.TextExac... |
4135669430 | #!/usr/bin/env python3
# Covariance Calculation from doc2vec model
import numpy as np
import gensim.models
import gensim
import sys
import pickle
from helpers import get_name
def compute_covariance_matrix(model_name, to_json=True):
model = gensim.models.Doc2Vec.load(model_name)
doctags = list(model.docvecs.doctags)
N = len(doctags)
X = []
for x in doctags:
X.append(model.docvecs[x])
X = np.array(X)
# R[i, j] = R[j, i] = dot(vi, vj) / (norm(vi) * norm(vj))
R = np.corrcoef(X)
if to_json:
RR = {}
for x, dx in enumerate(doctags):
for y, dy in enumerate(doctags):
RR[get_name(dx), get_name(dy)] = R[x,y]
return doctags, RR
else:
return doctags, R
if __name__ == '__main__':
model_name = sys.argv[1]
doctags, R = compute_covariance_matrix(model_name)
pickle.dump(R, open('corrcoef.pickle', 'wb+'))
| papachristoumarios/sade | sade/corrcoef.py | corrcoef.py | py | 930 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "gensim.models.Doc2Vec.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.corrc... |
43956557150 | from flask import flash
from db import db
# Feedback function for registered users
def feedback(user_id, message):
sql = "INSERT INTO messages (user_id, message) VALUES (:user_id, :message)"
db.session.execute(sql, {"user_id":user_id, "message":message})
db.session.commit()
flash("Kiitos palautteestasi")
# Feedback function for non-registered users
def anonymous_feedback(email, message):
sql = "INSERT INTO messages (email, message) VALUES (:email, :message)"
db.session.execute(sql, {"email":email, "message":message})
db.session.commit()
flash("Kiitos palautteestasi")
# returns all messages (feedback)
def get_messages():
sql = "SELECT COALESCE(U.username, '-'), M.message, COALESCE(M.email, '-') FROM messages M LEFT JOIN users U ON M.user_id=U.id"
result = db.session.execute(sql)
messages = result.fetchall()
db.session.commit()
return messages
| asianomainen/tsoha-s2020-tuntivarausjarjestelma | messages.py | messages.py | py | 909 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "db.db.session.execute",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "db.db.session",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "db.db.session.commit",
... |
29017166061 | from django.forms import model_to_dict
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from api.models.facility import Facility
from api.serializers.facility_serializer import FacilitySerializer
@api_view(['GET'])
def facility(request, facility_id):
found = Facility.objects.get(id=facility_id)
if found is not None:
return JsonResponse(model_to_dict(found, fields=[field.name for field in found._meta.fields]), status=200)
else:
return JsonResponse({
"message": "failure"
}, status=404)
@api_view(['POST'])
def reset_tests(request):
Facility.objects.all().delete()
return JsonResponse({
"message": "success"
}, status=200)
@api_view(['POST', 'GET'])
def facilitys(request, *args, **kwargs):
if request.method == 'POST':
newFacility = JSONParser().parse(request)
already = Facility.objects.filter(name=newFacility['name']).first()
if already is None:
serializer = FacilitySerializer(data=newFacility)
if serializer.is_valid():
created = Facility.objects.create(**serializer.validated_data)
return JsonResponse({
"message": "success",
"created": model_to_dict(created, fields=[field.name for field in created._meta.fields])
}, status=201)
else:
return JsonResponse({
"message": "failure"
}, status=400)
else:
return JsonResponse({
"message": "previously created",
"created": model_to_dict(already, fields=[field.name for field in already._meta.fields])
}, status=200)
if request.method == 'GET':
name = request.query_params['name'] if 'name' in request.query_params else None
org_id = request.query_params['org_id'] if 'org_id' in request.query_params else None
if name is not None:
found = Facility.objects.filter(name=name).first()
if found is not None:
return JsonResponse({
"message": "success",
"matched": model_to_dict(found, fields=[field.name for field in found._meta.fields])
}, status=200)
if org_id is not None:
founds = Facility.objects.filter(org_id=org_id)
if founds is not None:
return JsonResponse({
"message": "success",
"matched": [model_to_dict(found, fields=[field.name for field in found._meta.fields]) for found in founds]
}, status=200)
else:
return JsonResponse({
"message": "require name or org_id for facility query"
}, status=400) | jazzhammer/jitgurup | api/views/facilitys_view.py | facilitys_view.py | py | 2,880 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "api.models.facility.Facility.objects.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "api.models.facility.Facility.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "api.models.facility.Facility",
"line_number": 11,
"usa... |
5469207228 | #ライブラリ、モジュールをインポート
import pandas as pd
import openpyxl as px
from openpyxl.formatting.rule import CellIsRule
from openpyxl.styles import Color, PatternFill
#ブック名入力
tdname=input('testdataName?')
edname=input('editordataName?')
#読み込んだブックの同じテストのデータをDataFrameに格納
td=pd.read_excel(tdname,header=1,sheet_name=0)
ed=pd.read_excel(edname,header=1,sheet_name=0)
#テストデータのラベルの部分をリストに変換
tdlabel=td.columns.values
tdlabel=tdlabel.tolist()
#テストデータのラベルの長さを格納(後のループ処理(rangeの引数)に使用)
l=len(tdlabel)
#DataFrameの定義
#定義不要かもしれない
add=pd.DataFrame()
result=pd.DataFrame()
#エディタデータ成形
#テストデータラベルの項目名と正規表現でマッチする部分のエディタデータを検索、成形
for i in range(l):
add = ed.loc[:,ed.columns.str.match(tdlabel[i])]
result = pd.concat([result,add],axis=1)
#TrueFalse判定
tf=pd.DataFrame()
tf=td==result
#出力ブックの名前指定
outname=input('outputName?')
#各データを1ブック2シートに出力
with pd.ExcelWriter(outname) as writer:
tf.to_excel(writer,sheet_name='TrueFalse')
td.to_excel(writer,sheet_name='TestData')
result.to_excel(writer,sheet_name='EditorData')
#FALSEを強調表示する条件付き書式設定
wb=px.load_workbook(outname)
ws=wb['TrueFalse']
ws.conditional_formatting.add('A1:AZ100',CellIsRule(operator='equal',formula=['FALSE'],
fill=PatternFill(start_color='FF0000', end_color='FF0000',
fill_type='solid')))
white=px.styles.PatternFill(patternType='solid',
fgColor='000000', bgColor='000000')
ws['A1'].fill=white
ws.conditional_formatting.add('A1:AZ100',CellIsRule(operator='equal',formula=[''],
fill=PatternFill(start_color='000000', end_color='000000',
fill_type='solid')))
wb.save(outname)
| kobayu0902art/work_snippets | reshape/reshape_v1.4_1.py | reshape_v1.4_1.py | py | 2,175 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_excel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame... |
15764585120 | """
Anisha Kadri 2017
ak4114@ic.ac.uk
A Module containing methods to create networks from different models.
1) For pure preferential attachement:-
pref_att(N, m)
2) For random attachment:-
rand_att(N,m)
3) For a mixture of the two, attachment via random walk:-
walk_att(N,m,L)
References
----------
[1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
import networkx as nx
import random
import math
def pref_att(N, m, seed=None):
"""Returns a graph that is created using the Barabasi-Albert Model,
of N nodes in total and a node with m edges added at each time increment.
Parameters
----------
n = total number of nodes
m = number of edges attached to each new node, or degree of new node.
(value must be < N)
seed = optional argument, initialises random number generator to a starting state.
Returns
-------
A Barabasi Albert Graph, with pure preferential attachment.
"""
#this ensures that the maximum degree is always less than number of nodes
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
nodes = list(range(m))
G = nx.complete_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nodes
# Maintains a list of nodes for random sampling,
# a concantenated edge list
# thus, number of instances of each node in the list is proportional to it's degree
# (i.e. the list has k_i instances of node i)
node_list=[]
for i in nodes:
node_list.extend([i]*m)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
#add new edges to the list
node_list.extend(attach_list)
node_list.extend(new_stubs)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
while len(attach_list)< m:
random_node =random.choice(node_list)
attach_list.add(random_node)
N_tot += 1
attach_list = list(attach_list)
return G
def rand_att(N,m, seed=None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, and no edges
G = nx.generators.classic.empty_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nx.nodes(G)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen at random from the node_list to form new targets.
attach_list =random.sample(node_list, m)
N_tot += 1
return G
def random_walk(N,m, L, seed = None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
G = nx.complete_graph(m)
nodes = list(range(m))
# Target nodes for new edges
attach_list = nodes
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
random_list = set()
#uniformly choose start point of walk
while len(random_list)< m:
random_node =random.choice(node_list)
random_list.add(random_node)
N_tot += 1
#take a random walk of length L
for i in random_list:
node = i
steps=0
if steps<= L:
neighbours = G.neighbours(node)
random_node =random.choice(neighbours)
node = random_node
steps += 1
attach_list.add(node)
attach_list = list(attach_list)
return G | anishakadri/barabasialbert | model.py | model.py | py | 5,291 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "networkx.complete_graph",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "random.seed",
"... |
654986927 | #!/usr/bin/env python
import argparse
from .. import view_container
def tobool(inp):
return inp.lower() in ('y', '1', 'ok', 't')
parser = argparse.ArgumentParser(description='Display datasets in h5 or n5/zarr container.')
parser.add_argument('path', type=str, help='path to container')
parser.add_argument('--ndim', type=int, default=3,
help='expected number of dimensions')
parser.add_argument('--exclude_names', type=str, nargs='+', default=None,
help='names of datasets that will not be loaded')
parser.add_argument('--include_names', type=str, nargs='+', default=None,
help='names of datasets that will ONLY be loaded')
parser.add_argument('--load_into_memory', type=tobool, default='n',
help='whether to load all data into memory')
parser.add_argument('--n_threads', type=int, default=1,
help='number of threads used by z5py')
def main():
args = parser.parse_args()
view_container(args.path, args.ndim,
args.exclude_names, args.include_names,
args.load_into_memory, args.n_threads)
if __name__ == '__main__':
main()
| constantinpape/heimdall | heimdall/scripts/view_container.py | view_container.py | py | 1,184 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
}
] |
71245917627 | import time
import numpy as np
from testing import Ptot
import matplotlib.pyplot as plt
if __name__ == "__main__":
div = int(1e4)
# number of tests
Q = int(8e6)//div
# population size
N = int(40e6)//div
R = 0
Ip_arr = np.arange(500, 3000)
# FNR variations
Ptot_arr_save = []
Ptot_arr_save.append(Ip_arr)
FNR = 0.2
start_time = time.time()
Ptot_arr = [Ptot(Q, I = Ip, S = N - Ip - R, R = R, Q = Q, \
b = 2, FNR = FNR, FPR = 0.05, replacement = True)
for Ip in Ip_arr]
Ptot_arr_save.append(Ptot_arr)
np.savetxt("testing_data_replacement_true_CA.dat", np.c_[Ptot_arr_save].T)
print("--- %s seconds ---" % (time.time() - start_time))
print(Ptot_arr_save)
plt.figure()
plt.plot(Ptot_arr_save[0], Ptot_arr_save[1])
plt.show() | lubo93/disease-testing | sampling/testing_analytical_replacement_true_CA.py | testing_analytical_replacement_true_CA.py | py | 881 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "testing.Ptot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number"... |
74147618108 | # coding:utf-8
"""
Django administration setup
@author: Sébastien Renard <Sebastien.Renard@digitalfox.org>
@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)
"""
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from billing.models import ClientBill, SupplierBill
from billing.forms import ClientBillForm, SupplierBillForm
from core.admin import ReturnToAppAdmin
class BillAdmin(ReturnToAppAdmin):
list_display = ["id", "bill_id", "lead", "state", "amount", "creation_date", "due_date", "payment_date", "comment"]
ordering = ("-creation_date",)
actions = None
list_filter = ["state", "creation_date", "due_date", "payment_date"]
search_fields = ["lead__name", "lead__client__organisation__name", "comment",
"lead__paying_authority__contact__name", "lead__paying_authority__company__name",
"lead__client__contact__name", "lead__client__organisation__company__name"]
class ClientBillAdmin(BillAdmin):
fieldsets = [
(_("Description"), {"fields": ["lead", "bill_id", "bill_file"]}),
(_("Amounts"), {"fields": ["amount", "vat", "amount_with_vat", ]}),
(_("Dates"), {"fields": ["creation_date", "due_date", "payment_date", ]}),
(_("State"), {"fields": ["state", "comment", ]}),
(_("Link with expenses"), {"fields": ["expenses", "expenses_with_vat", ]}),
]
class SupplierBillAdmin(BillAdmin):
search_fields = BillAdmin.search_fields + ["supplier__contact__name", "supplier__company__name"]
list_display = list(BillAdmin.list_display) # Copy list before changing it
list_display.insert(2, "supplier")
fieldsets = [
(_("Description"), {"fields": ["supplier", "lead", "bill_id", "supplier_bill_id", "bill_file"]}),
(_("Amounts"), {"fields": ["amount", "vat", "amount_with_vat", ]}),
(_("Dates"), {"fields": ["creation_date", "due_date", "payment_date", ]}),
(_("State"), {"fields": ["state", "comment", ]}),
(_("Link with expenses"), {"fields": ["expenses", "expenses_with_vat", ]}),
]
admin.site.register(ClientBill, ClientBillAdmin)
admin.site.register(SupplierBill, SupplierBillAdmin)
| digitalfox/pydici | billing/admin.py | admin.py | py | 2,334 | python | en | code | 122 | github-code | 6 | [
{
"api_name": "core.admin.ReturnToAppAdmin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 29,
"usage_type"... |
28514991368 | """
This module transforms the corpus into the format require by each benchmarked tool
"""
import json
def liwc(senders, data):
ds = dict()
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
ds[hashed_addr] = emails
except KeyError:
continue
with open(file="dataset/LIWC/data/dataset.csv", mode='w') as csv_file:
for key in ds:
csv_file.write("\"\"\"{}\"\"\",\"\"\"{}\"\"\"\n".format(key, ds[key]))
def personality_recognizer(senders, data):
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
with open("dataset/PersonalityRecognizer/data/{}.txt".format(hashed_addr), 'w') as f:
f.write("%s\n" % emails)
except KeyError:
continue
def twitpersonality(senders, data):
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
with open("dataset/twitpersonality/Data/{}.txt".format(hashed_addr), 'w') as f:
f.write("%s\n" % emails)
except KeyError:
continue
if __name__ == '__main__':
"""
The file mailcorpus-sha.json contains the emails written by the developers.
"""
with open("dataset/goldstandard/mailcorpus-sha.json", mode="r", encoding="utf-8") as f:
email_corpus = json.load(f)
"""
Here we retrieve the list of developers to perform the emails merging.
"""
with open(file="dataset/goldstandard/address_list_sha.txt", mode="r") as f:
hashed_senders = [line.strip() for line in f.readlines()]
liwc(hashed_senders, email_corpus)
personality_recognizer(hashed_senders, email_corpus)
twitpersonality(hashed_senders, email_corpus)
| collab-uniba/tosem2021-personality-rep-package | src/data_preparation.py | data_preparation.py | py | 1,782 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 46,
"usage_type": "call"
}
] |
19707336879 | # # # -*- coding: utf-8 -*-
# # # @Time :2021/3/22 20:56
# # # @Author :huangzg28153
# # # @File :test.py
# # # @Software :PyCharm
# # import numpy as np
# # import pandas as pd
# # # type = [0,1,1,1,2,0,1,0,1,2,2,0]
# # # ser = [0,1,2,3,4,5,6,0,1,2,3,4]
# # # layer = [0,0,0,0,0,1,1,0,0,0,0,1]
# # # sample = [0,0,0,0,0,0,0,1,1,1,1,1]
# # #
# # # df = pd.DataFrame({"type":type,"ser":ser,"layer":layer,"sample":sample})
# # #
# # #
# # # df.sort_values(by=["ser",'type',"sample","layer"],axis=0)
# # # df.sort_values(by=["layer","sample","type","ser"],axis=0)
# # # df.sort_values(by=["type","layer","sample","ser"],axis=0)
# # # df['order'] = [0,2,4,5,6,9,11,1,3,7,8,10]
# # # df = df.sort_values(by=['order'],axis=0)
# # # df.sort_values(by=['layer','ser','type','sample'],axis=0)
# # # df.sort_values(by=["sample","type",'ser',"layer"],axis=0)
# # #
# # # ########################################################
# # # df.sort_values(by=['layer',"type","sample","ser"],axis=0).reset_index().index
# # # #######################################################
# # # from multiprocess import Process,Manager
# # # from pyHGT.data import Graph, renamed_load
# # # from pyHGT.data import renamed_load
# # # import os
# # # import ray
# # # Manager().register("Graph", Graph)
# # # dir(Manager())
# # # ABSULUTE_DIR = '/data1/huangzg/research/pyHGT_OAG'
# # # graph = renamed_load(open(os.path.join(ABSULUTE_DIR + '/data/oag_output', 'graph_CS.pk'), 'rb'))
# # # func = lambda graph,inp: print(graph.__dir__())
# # #
# # # # graph = Manager().Graph(graph)
# # # ray_graph = ray.put(graph)
# # ###########################
# #
# # import oaglog
# # from pyHGT.data import renamed_load
# # from pyHGT.model import *
# # from pyHGT.SubgraphToTorch import SubgraphToTorch
# # from warnings import filterwarnings
# # filterwarnings("ignore")
# # import ray
# # import os
# # import numpy as np
# # import dill
# # from collections import defaultdict
# # import sys
# # import argparse
# # oaglog.logger.info("流程开始。。。")
# # parser = argparse.ArgumentParser(description='Training GNN on Paper-Venue (Journal) classification task')
# #
# # '''
# # Dataset arguments
# # '''
# # parser.add_argument('--data_dir', type=str, default='/data/oag_output/',
# # help='The address of preprocessed graph.')
# # parser.add_argument('--subgraphs_dir',type=str,default='/data/sampled_subgraphs/',
# # help='The adress of sampled subgraph.')
# # parser.add_argument('--model_dir', type=str, default='/model_save/',
# # help='The address for storing the models and optimization results.')
# # parser.add_argument('--task_name', type=str, default='PV',
# # help='The name of the stored models and optimization results.')
# # parser.add_argument('--cuda', type=int, default=0,
# # help='Avaiable GPU ID')
# # parser.add_argument('--domain', type=str, default='_CS',
# # help='CS, Medicion or All: _CS or _Med or (empty)')
# # '''
# # Model arguments
# # '''
# # parser.add_argument('--conv_name', type=str, default='hgt',
# # choices=['hgt', 'gcn', 'gat', 'rgcn', 'han', 'hetgnn'],
# # help='The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)')
# # parser.add_argument('--n_hid', type=int, default=400,
# # help='Number of hidden dimension')
# # parser.add_argument('--n_heads', type=int, default=8,
# # help='Number of attention head')
# # parser.add_argument('--n_layers', type=int, default=4,
# # help='Number of GNN layers')
# # parser.add_argument('--dropout', type=float, default=0.2,
# # help='Dropout ratio')
# # parser.add_argument('--sample_depth', type=int, default=6,
# # help='How many numbers to sample the graph')
# # parser.add_argument('--sample_width', type=int, default=128,
# # help='How many nodes to be sampled per layer per type')
# # parser.add_argument('--feature_flags', type=tuple, default=('rw','sp'),
# # help='which kind of distance feature to use,"random walk","shortest path" or both')
# # parser.add_argument('--max_sprw', type=tuple, default=(4, 4),
# # help='parameters of distance feature')
# # parser.add_argument('--if_sample_mp',type=bool, default=True,
# # help="whether sample subgraph with multiprocessing or not")
# # parser.add_argument('--sample_n_pool',type=int,default=16,
# # help="how many pools to sample subgraph")
# # '''
# # Optimization arguments
# # '''
# # parser.add_argument('--optimizer', type=str, default='adamw',
# # choices=['adamw', 'adam', 'sgd', 'adagrad'],
# # help='optimizer to use.')
# # parser.add_argument('--data_percentage', type=float, default=1.0,
# # help='Percentage of training and validation data to use')
# # parser.add_argument('--n_epoch', type=int, default=200,
# # help='Number of epoch to run')
# # parser.add_argument('--n_pool', type=int, default=4,
# # help='Number of process to sample subgraph')
# # parser.add_argument('--n_batch', type=int, default=32,
# # help='Number of batch (sampled graphs) for each epoch')
# # parser.add_argument('--repeat', type=int, default=2,
# # help='How many time to train over a singe batch (reuse data)')
# # parser.add_argument('--batch_size', type=int, default=256,
# # help='Number of output nodes for training')
# # parser.add_argument('--clip', type=float, default=0.25,
# # help='Gradient Norm Clipping')
# #
# # args = parser.parse_args()
# #
# # if args.cuda != -1:
# # device = torch.device("cuda:" + str(args.cuda))
# # else:
# # device = torch.device("cpu")
# #
# # ABSULUTE_DIR = '/data1/huangzg/research/pyHGT_OAG'
# #
# # ###############################################data_preparing#########################################################
# # # oaglog.logger.info("读取数据开始。。。")
# # # graph = renamed_load(open(os.path.join(ABSULUTE_DIR + args.data_dir, 'graph%s.pk' % args.domain), 'rb'))
# # # oaglog.logger.info("读取数据完毕。")
# # #
# # # from ReadData import read_data, graph_to_dict
# # #
# # # dict_graph = graph_to_dict(graph)
# #
# # from multiprocess import Manager, Pool, SharedMemoryManager
# # # manager = Manager()
# # # graph_temp = manager.dict(dict_graph)
# #
# # graph = [np.ones(10**8) for i in range(20)]
# #
# # def mp_test(graph):
# # print(id(graph))
# # return 1
# #
# # p = Pool(6)
# #
# # result = p.apply_async(mp_test,graph_temp)
# # # @ray.remote
# # # class Counter(object):
# # # def __init__(self,a):
# # # self.n = 0
# # # self.a = a
# # # def increment(self):
# # # self.n += 1
# # #
# # # def read(self,b,m_graph):
# # # print("a")
# # # self.increment()
# # # print(id(m_graph))
# # # del m_graph
# # # return self.n * b
# # #
# # # counters = [Counter.remote(a=0) for i in range(8)]
# # # futures = [c.read.remote(2, ray_graph) for c in counters]
# # #
# # # print('******************************')
# # # print(ray.get(futures))
# #
# # ray.init()
# # @ray.remote
# # def func(array, param):
# # # print(array.job_id)
# # # print(array.task_id)
# # # print(array.size)
# # # print(type(array))
# # print(id(array))
# # return 1
# #
# # # array = np.ones(10**6)
# # # Store the array in the shared memory object store once
# # # so it is not copied multiple times.
# # # graph = {i: np.ones(10**8) for i in range(20)}
# # graph = [np.ones(10**8) for i in range(20)]
# # array_id = ray.put(graph)
# #
# # result_ids = [func.remote(array_id, i) for i in range(40)]
# # output = ray.get(result_ids)
# # #################################################################
# # #
# # # ray.get(ray_graph)
# # # import ray
# # # import asyncio
# # # ray.init()
# # #
# # # import asyncio
# # #
# # # @ray.remote
# # # class AsyncActor:
# # # async def run_task(self):
# # # print("started")
# # # await asyncio.sleep(1) # Network, I/O task here
# # # print("ended")
# # #
# # # actor = AsyncActor.remote()
# # # # All 50 tasks should start at once. After 1 second they should all finish.
# # # # they should finish at the same time
# # # ray.get([actor.run_task.remote() for _ in range(50)])
# # ###################################################################
# # # import ray
# # # import asyncio
# # # ray.init()
# # #
# # # @ray.remote(num_cpus=40)
# # # class AsyncActor:
# # # # multiple invocation of this method can be running in
# # # # the event loop at the same time
# # # async def run_concurrent(self):
# # # print("started")
# # # await asyncio.sleep(2) # concurrent workload here
# # # print("finished")
# # #
# # # actor = AsyncActor.remote()
# # #
# # # # regular ray.get
# # # ray.get([actor.run_concurrent.remote() for _ in range(80)])
# #
# # # # async ray.get
# # # await actor.run_concurrent.remote()
# #
# # ########################################################################
# from multiprocessing import Pool,Manager,shared_memory
# from multiprocessing.managers import SharedMemoryManager
# import numpy as np
#
#
# a = np.array([np.ones(10**8) for i in range(20)])
#
# shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
# b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
# b[:] = a[:]
#
# def mp_test(graph):
# print(id(graph))
# return 1
#
# p = Pool(6)
# results = []
# for i in range(3):
# result = p.apply_async(mp_test, args=(b,))
# results.append(result)
#
# re = [job.get() for job in results]
# ############################################################################
from multiprocessing import Pool
import multiprocessing as mp
from collections import defaultdict
import pandas as pd
import os
class NewClass(object):
def __init__(self,
a):
self.a = a
self.b = {"a":a}
self.c = pd.DataFrame(self.b)
self.d = {"c":self.c, "b":self.b, "a":a}
def my_method(self,e):
print(id(self.a))
print(id(self.b))
print(id(self.c))
print(id(self.d))
print(id(e))
defaultdict(lambda :[])
return 1
graph = NewClass([1,3,6])
global graph
def my_fun(param,graph):
print(os.getpid(), id(graph))
return 1
def my_mp(param):
my_fun(param, graph)
if __name__ == '__main__':
p = Pool(5)
jobs = []
for i in range(mp.cpu_count()-1):
job = p.apply_async(my_mp, args=(['a','b'],))
jobs.append(job)
result = [job.get() for job in jobs]
print(result)
| hzg0601/cn-annotation | HGT_OAG_cn-annotation/codes/test.py | test.py | py | 10,935 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "multiprocessing... |
1544319758 |
from fastapi import FastAPI, UploadFile, Form,File
import cloudinary
import cloudinary.uploader
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Configure Cloudinary credentials
cloudinary.config(
cloud_name='moibit',
api_key='368643671927417',
api_secret='q2l37dcCA701JQlidDMoJaFtOY4'
)
@app.post('/upload')
async def upload_file(file: UploadFile = File(...), upload_name: str = Form(...)):
try:
# Upload the file to Cloudinary
result = cloudinary.uploader.upload(file.file, public_id=upload_name)
return f"File uploaded successfully. Public ID: {result['public_id']}"
except Exception as e:
return f"Error uploading file: {str(e)}", 500
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host='0.0.0.0', port=8000)
| naman360/courtledger | scripts/main.py | main.py | py | 961 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "cloudinary.config",
"line_number": 17,
"usage_type": "call"
},
{
"api_n... |
15206133090 | """Tests for utils."""
import unittest
import torch
from chemicalx.utils import segment_softmax
class TestPipeline(unittest.TestCase):
"""Test the utils."""
def test_segment_softmax(self):
"""Set up the test case with some data."""
logit = torch.FloatTensor([-0.5, -2.5, 0.5, 1.5])
number_of_segments = torch.LongTensor([2])
segmentation_index = torch.LongTensor([0, 0, 1, 1])
index = torch.LongTensor([0, 1, 2, 3])
temperature = torch.LongTensor([2, 2, 2, 2])
truth = torch.FloatTensor([0.7311, 0.2689, 0.3775, 0.6225])
segment_s = segment_softmax(logit, number_of_segments, segmentation_index, index, temperature)
difference = torch.sum(torch.abs(truth - segment_s))
assert difference < 0.001
| AstraZeneca/chemicalx | tests/unit/test_utils.py | test_utils.py | py | 789 | python | en | code | 672 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.LongT... |
1976029717 | from django.db import models
from equipas.models import Equipa
# Create your models here.
class Campeonato(models.Model):
campeonato_id = models.AutoField(primary_key=True)
modalidade = models.ForeignKey('Modalidade', models.DO_NOTHING)
nome = models.CharField(max_length=100)
epoca = models.CharField(max_length=20)
datainicio = models.DateField(blank=True, null=True)
datafim = models.DateField(blank=True, null=True)
class Meta:
managed = False
db_table = 'campeonato'
class Modalidade(models.Model):
modalidade_id = models.AutoField(primary_key=True)
designacao = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'modalidade'
class Participante(models.Model):
equipa = models.OneToOneField(Equipa, models.DO_NOTHING, primary_key=True)
campeonato = models.ForeignKey(Campeonato, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'participante'
unique_together = (('equipa', 'campeonato'),) | OliveiraRP/django-webapp | src/webapp/campeonatos/models.py | models.py | py | 1,039 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
75131855228 |
from datetime import datetime
import pymysql
import json as j
import flask as f
from flask import Flask, redirect
from flask import request
from flask import send_from_directory
from flaskext.mysql import MySQL
app=Flask(__name__, static_url_path="")
#Povezivanje sa bazom(parametri)
mysql=MySQL(cursorclass=pymysql.cursors.DictCursor)
app.config["MYSQL_DATABASE_USER"] = "root"
app.config["MYSQL_DATABASE_PASSWORD"] = "root"
app.config["MYSQL_DATABASE_DB"] = "racunarske_komponente_shop"
app.config["MYSQL_DATABASE_HOST"] = "localhost"
mysql.init_app(app)
#Korisnici
#Provera autenticnosti korisnika
@app.route("/korisnici", methods=["POST"])
def dobaviKorisnike():
prijava = request.get_json()
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * FROM korisnici")
korisnici = cursor.fetchall()
for korisnik in korisnici:
if korisnik["korisnicko_ime"] == prijava["korisnicko_ime"]:
if korisnik["lozinka"] == prijava["lozinka"]:
return f.jsonify(korisnik)
return "greska"
#Registracija korisnika
@app.route("/dodajKorisnika", methods=["POST"])
def dodajKorisnika():
data = request.get_json()
db = mysql.get_db()
cursor = db.cursor()
cursor.execute("SELECT * FROM korisnici")
korisnici = cursor.fetchall()
for korisnik in korisnici:
if korisnik["korisnicko_ime"] == data["korisnicko_ime"]:
return "greska"
upit = '''INSERT INTO
korisnici(korisnicko_ime, lozinka, ime, prezime, adresa, email)
VALUES(%s, %s, %s, %s, %s, %s)'''
cursor.execute(upit, (data["korisnicko_ime"], data["lozinka"], data["ime"], data["prezime"],
data["adresa"], data["email"]))
db.commit()
return "uspesno"
#Izmena korisnika
@app.route("/izmeniKorisnika/<int:id>", methods=["POST"])
def izmeniKorisnika(id):
data = request.json
print(data)
db = mysql.get_db()
cursor = db.cursor()
upit = '''UPDATE korisnici SET korisnicko_ime=%s, lozinka=%s, ime=%s, prezime=%s, adresa=%s, email=%s WHERE id=%s'''
cursor.execute(upit, (data["korisnicko_ime"], data["lozinka"], data["ime"], data["prezime"], data["adresa"], data["email"], id))
db.commit()
return "uspesno"
#Komponente
#Dobavi komponente
@app.route("/komponente", methods=["GET"])
def dobaviKomponente():
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * FROM komponente")
rows = cursor.fetchall()
return f.jsonify(rows)
#Racuni
#Dodaj racun
@app.route("/dodavanjeKupovine", methods=["POST"])
def dodajKupovinu():
data = request.get_json()
if(data["kolicinaKomponente"] == 0):
return "nemanastanju"
db = mysql.get_db()
cursor = db.cursor()
upit = '''UPDATE komponente SET kolicina = kolicina - 1 WHERE id=%s and kolicina > 0'''
cursor.execute(upit, (data["id_komponente"]))
db.commit()
upit = '''INSERT INTO kupovine(datum_vreme, id_korisnika, naziv_proizvoda, cena)
VALUES(%s, %s, %s, %s)'''
now = datetime.now()
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')
data["datum_vreme"] = formatted_date
cursor.execute(upit, (data["datum_vreme"], data["id_korisnika"], data["naziv_proizvoda"], data["cena"]))
db.commit()
return "uspesno"
#Pokretanje aplikacije
app.run("192.168.0.13",port=5000, debug=True) | haribate98/Android | FlaskZaProjekat/main.py | main.py | py | 3,497 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flaskext.mysql.MySQL",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request.g... |
14852199031 | # coding: utf-8
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion, label_smoothed_nll_loss
def root_mean_squared_difference(v1, v2, clip_max=0.3):
diff = v1 - v2
# rmsd = diff
mean_squared = torch.mean(torch.mul(diff, diff), -1)
rmsd = torch.sqrt(mean_squared + 1e-9) # To avoid NaN caused by sqrt(0).
if clip_max > 0:
rmsd = torch.clamp(rmsd, 0, clip_max)
return rmsd
@register_criterion('tcvae_spacefusion_loss')
class TCVAESpaceFusionCriterion(LabelSmoothedCrossEntropyCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
self.bow_loss_weight = args.bow_loss_weight
self.interp_loss_weight = args.interp_loss_weight
self.fuse_loss_weight = args.fuse_loss_weight
self.euclidean_distance_clip = args.euclidean_distance_clip
if args.bow_loss_weight > 0:
tgt_dict = task.tgt_dict
class_weight = torch.ones(len(tgt_dict))
class_weight[tgt_dict.pad_index] = 0
class_weight[tgt_dict.eos_index] = 0
self.bow_loss_fc = torch.nn.CrossEntropyLoss(
weight=class_weight, ignore_index=tgt_dict.pad_index)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--interp-loss-weight',
default=1, type=float, help='alpha')
parser.add_argument('--fuse-loss-weight',
default=30, type=float, help='beta')
parser.add_argument('--euclidean-distance-clip',
default=0.3, type=float)
def compute_ce_loss(self, model, net_output, sample, reduce=True):
nto = net_output[0]
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = target.reshape(-1, 1)
loss, nll_loss, = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx,
reduce=reduce
)
return loss, nll_loss
def compute_fuse_loss(self, encoder_out, reduce=True):
'''
*NOTE*
The fuse_loss is not divided by the batch size to make the scale equal to other losses.
The reduction method used in Fairseq is summation over examples in a batch and the averaged fuse_loss over batch is computed only in aggregate_logging_outputs().
'''
prior_out = encoder_out['prior_out']
post_out = encoder_out['post_out']
batch_size = prior_out.shape[0]
# Make z_s2s[i] and z_AE[i] close.
fuse1 = root_mean_squared_difference(
prior_out, post_out,
clip_max=self.euclidean_distance_clip
)
# Make z_s2s[i] and z_s2s[j] distant.
fuse2 = torch.sum(root_mean_squared_difference(
prior_out.unsqueeze(1),
prior_out.unsqueeze(0),
clip_max=self.euclidean_distance_clip
), -1) / (batch_size - 1)
# Make z_AE[i] and z_AE[j] distant.
fuse3 = torch.sum(root_mean_squared_difference(
post_out.unsqueeze(1),
post_out.unsqueeze(0),
clip_max=self.euclidean_distance_clip
), -1) / (batch_size - 1)
fuse_loss = fuse1 - (fuse2 + fuse3)
if reduce is True:
fuse_loss = fuse_loss.sum()
return fuse_loss
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
encoder_out = model.encoder(**sample['net_input'])
prev_output_tokens = sample['net_input']['prev_output_tokens']
# z_x -> y
prior_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='prior_out')
# z_y -> y
post_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='post_out')
# u*z_x + (1-u)*z_y -> y
interp_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='interp_out')
prior_loss, prior_nll_loss = self.compute_ce_loss(model, prior_decoder_out, sample, reduce=reduce)
post_loss, post_nll_loss = self.compute_ce_loss(model, post_decoder_out, sample, reduce=reduce)
interp_loss, interp_nll_loss = self.compute_ce_loss(model, interp_decoder_out, sample, reduce=reduce)
# d(x_i, y_i) - d(x_i, x_j) - d(y_i, y_j)
fuse_loss = self.compute_fuse_loss(encoder_out, reduce=reduce)
# As T-CVAE optimizes cross-entropy and KLD, cross-entropy loss should be computed at sentence level but not at token level to make the scale of the losses compatible.
assert self.args.sentence_avg == True
if self.args.sentence_avg:
# When args.sentence_avg == True, all losses directly used for optimization are the sum of losses computed at sentence level. This is for a case where other loss is added to the cross-entropy.
sample_size = sample['target'].size(0)
ntokens_per_sent = sample['ntokens'] / sample['target'].size(0)
# The losses are divided by the avg. length of the outputs to make the scales of NLL_loss and other losses equal. They are computed at sentence level.
prior_loss /= ntokens_per_sent
prior_nll_loss /= ntokens_per_sent
post_loss /= ntokens_per_sent
post_nll_loss /= ntokens_per_sent
interp_loss /= ntokens_per_sent
interp_nll_loss /= ntokens_per_sent
else:
sample_size = sample['ntokens']
loss = prior_loss + post_loss + self.interp_loss_weight * interp_loss + self.fuse_loss_weight * fuse_loss
# sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
prior_mean_norm = torch.sum(torch.norm(encoder_out['prior_mean'], dim=-1))
prior_std_norm = torch.sum(torch.norm(encoder_out['prior_std'], dim=-1))
post_mean_norm = torch.sum(torch.norm(encoder_out['post_mean'], dim=-1))
post_std_norm = torch.sum(torch.norm(encoder_out['post_std'], dim=-1))
loss_log = utils.item(loss.data) if reduce else loss.data
logging_output = {
'loss': loss_log,
'nll_loss': utils.item(post_nll_loss.data) if reduce else post_nll_loss.data,
'prior_nll_loss': utils.item(prior_nll_loss.data) if reduce else prior_nll_loss.data,
'post_nll_loss': utils.item(post_nll_loss.data) if reduce else post_nll_loss.data,
'interp_nll_loss': utils.item(interp_nll_loss.data) if reduce else interp_nll_loss.data,
'fuse_loss': utils.item(fuse_loss.data) if reduce else fuse_loss,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
logging_output_latent = {
'prior_mean_norm': utils.item(prior_mean_norm.data) if reduce else prior_mean_norm.data,
'prior_std_norm': utils.item(prior_std_norm.data) if reduce else prior_std_norm.data,
'post_mean_norm': utils.item(post_mean_norm.data) if reduce else post_mean_norm.data,
'post_std_norm': utils.item(post_std_norm.data) if reduce else post_std_norm.data,
}
logging_output.update(logging_output_latent)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
aggregated = {
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'prior_nll_loss': sum(log.get('prior_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'post_nll_loss': sum(log.get('post_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'interp_nll_loss': sum(log.get('interp_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'fuse_loss': sum(log.get('fuse_loss', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'prior_mu': sum(log.get('prior_mean_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'post_mu': sum(log.get('post_mean_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'prior_std': sum(log.get('prior_std_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'post_std': sum(log.get('post_std_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
}
return aggregated
| jack-and-rozz/speculative_sampling | fairseq/extensions/criterions/spacefusion_loss.py | spacefusion_loss.py | py | 10,265 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.mean",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.mul",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 17,
... |
18132737377 | # In this project, I'm going to desgin a user infterface for translating difference languages in real time
# Library Used: google trans, textblob
import googletrans
import textblob
import customtkinter
from tkinter import END
# Adding languages
language = googletrans.LANGUAGES
translator = googletrans.Translator()
lang_value = list(language.values())
lang_short = language.keys()
def Translate():
# Get the language to translate
to_language = to_language_menu.get()
# Perform the translation
from_text = from_language_input_box.get(1.0,END)
for idx, val in language.items():
if val == to_language:
lan_ = idx
words = translator.translate(from_text, dest=lan_)
# Show the translation
to_language_input_box.delete(0.0, END)
to_language_input_box.insert(0.0,words.text)
# 使用者介面
# 設定系統相關參數
customtkinter.set_appearance_mode("System")
customtkinter.set_default_color_theme("green")
# APP 整體框架
app = customtkinter.CTk()
app.title("Jay's Translator")
app.geometry("750x500")
# From-language selector
from_language_menu = customtkinter.CTkLabel(master=app,text="Please Enter any language:")
from_language_menu.grid(row=0, column=0,
padx=50, pady=20,
)
# To-language selector
to_language_menu = customtkinter.CTkOptionMenu(master=app,values=lang_value)
to_language_menu.grid(row=0, column=1,
padx=50, pady=20,
)
# to-language input box(Inputbox)
to_language_input_box = customtkinter.CTkTextbox(app, width=150, height=150)
to_language_input_box.grid(row=1, column=1,
padx=50, pady=20,
)
# from-language input box(Inputbox)
from_language_input_box = customtkinter.CTkTextbox(app, width=150, height=150)
from_language_input_box.grid(row=1, columns=1,
padx=50, pady=20,
)
# translate button
translate_button = customtkinter.CTkButton(app, text='Translate',command=Translate)
translate_button.grid(row=2, column=0,
padx=(180,0), pady=20, sticky='w',columnspan=3)
app.mainloop()
| JayChen1060920909/Projects | Real Time Translation.py | Real Time Translation.py | py | 2,191 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "googletrans.LANGUAGES",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "googletrans.Translator",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tkinter.END",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "tki... |
39614840706 | #!/usr/bin/env python3
import sys
import os
import time
from datetime import datetime
import textwrap
import argparse
import platform
try:
import influxdb
except ImportError:
print("Trying to Install required module: influxdb\n")
os.system('python3 -m pip install influxdb')
time.sleep(5)
def fioinput(ip, port, database, hostname):
client = influxdb.InfluxDBClient(host=ip, port=8086)
try:
client.ping()
client.create_database(database)
client.switch_database(database)
except:
print("!!Was unable to connect to the Influxdb!!\
\nPlease check that the IP address and port information is correct.\
\nKilling the fio session as well.\
\n")
os.system('pkill fio')
quit()
# minimal format found here: https://www.andypeace.com/fio_minimal.html
for line in sys.stdin:
fullfio_data = line.split(",")
fullfio_data = fullfio_data[0].split(";")
# Run info
terseversion = fullfio_data[0]
fioversion = fullfio_data[1]
jobname = fullfio_data[2]
# Read IO info
readtotalio = (int(fullfio_data[5]) / 1024)
readbandwidthio = (int(fullfio_data[6]) / 1024)
readiopsio = fullfio_data[7]
readpercent = float(fullfio_data[43].strip('%'))
# Read Submission Latency info
rdsubmissionmin = int(fullfio_data[9])
rdsubmissionmax = int(fullfio_data[10])
rdsubmissionmean = int(float(fullfio_data[11]))
rdsubmissiondeviation = int(float(fullfio_data[12]))
# Read Completion Latency info
rdcompletionmin = int(fullfio_data[13])
rdcompletionmax = int(fullfio_data[14])
rdcompletionmean = int(float(fullfio_data[15]))
rdcompletiondeviation = int(float(fullfio_data[16]))
# Read Total Latency info
rdtotalmin = int(fullfio_data[37])
rdtotalmax = int(fullfio_data[38])
rdtotalmean = int(float(fullfio_data[39]))
rdtotaldeviation = int(float(fullfio_data[40]))
# Write IO info
writetotalio = (int(fullfio_data[46]) / 1024)
writebandwidthio = (int(fullfio_data[47]) / 1024)
writeiopsio = fullfio_data[48]
writepercent = float(fullfio_data[84].strip('%'))
# Write Submission Latency info
wrsubmissionmin = int(fullfio_data[50])
wrsubmissionmax = int(fullfio_data[51])
wrsubmissionmean = int(float(fullfio_data[52]))
wrsubmissiondeviation = int(float(fullfio_data[53]))
# Write Completion Latency info
wrcompletionmin = int(fullfio_data[54])
wrcompletionmax = int(fullfio_data[55])
wrcompletionmean = int(float(fullfio_data[56]))
wrcompletiondeviation = int(float(fullfio_data[57]))
# Write Total Latency info
wrtotalmin = int(fullfio_data[78])
wrtotalmax = int(fullfio_data[79])
wrtotalmean = int(float(fullfio_data[80]))
wrtotaldeviation = int(float(fullfio_data[81]))
# IO depth distribution
iodepth01 = float(fullfio_data[92].strip('%'))
iodepth02 = float(fullfio_data[93].strip('%'))
iodepth04 = float(fullfio_data[94].strip('%'))
iodepth08 = float(fullfio_data[95].strip('%'))
iodepth16 = float(fullfio_data[96].strip('%'))
iodepth32 = float(fullfio_data[97].strip('%'))
iodepth64 = float(fullfio_data[98].strip('%'))
# Block size
# Bandwidth / IOPS
if readiopsio == "0":
readblocksize = float(0)
else:
readblocksize = round((int(readbandwidthio) / int(readiopsio)) * 1024, 1)
if writeiopsio == "0":
writeblocksize = float(0)
else:
writeblocksize = round((int(writebandwidthio) / int(writeiopsio)) * 1024, 1)
# Calculate percentage of read vs write IOPS
totaliops = int(readiopsio) + int(writeiopsio)
readiopspercentage = int(readiopsio) / int(totaliops)
writeiopspercentage = int(writeiopsio) / int(totaliops)
# CPU Usage
cpuuser = float(fullfio_data[87].strip('%'))
cpusystem = float(fullfio_data[88].strip('%'))
# print("Read IOPS % : "+str(readiopspercentage))
# print("Write IOPS % : "+str(writeiopspercentage))
current_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
print(current_time+" | Job Name: "+jobname+" | Read IOPS: "+readiopsio+" | Write IOPS: "+writeiopsio+" | Block(read/write): "+str(readblocksize)+" / "+str(writeblocksize), end='\r')
sys.stdout.flush()
json_body = [
{
"measurement": "FIO",
"tags": {
"runId": jobname,
"hostname": hostname
},
"time": current_time,
"fields": {
"Read_IOPS": int(readiopsio),
"Read_Percentage": readpercent,
"Read_Total_I/O_(MB)": readtotalio,
"Read_bandwidth_(MB/s)": readbandwidthio,
"Read_Latency_Submission_min": rdsubmissionmin,
"Read_Latency_Submission_max": rdsubmissionmax,
"Read_Latency_Submission_mean": rdsubmissionmean,
"Read_Latency_Submission_deviation": rdsubmissiondeviation,
"Read_Latency_Completion_min": rdcompletionmin,
"Read_Latency_Completion_max": rdcompletionmax,
"Read_Latency_Completion_mean": rdcompletionmean,
"Read_Latency_Completion_deviation": rdcompletiondeviation,
"Read_Latency_Total_min": rdtotalmin,
"Read_Latency_Total_max": rdtotalmax,
"Read_Latency_Total_mean": rdtotalmean,
"Read_Latency_Total_deviation": rdtotaldeviation,
"Write_IOPS": int(writeiopsio),
"Write_Percentage": writepercent,
"Write_Latency_Submission_min": wrsubmissionmin,
"Write_Latency_Submission_max": wrsubmissionmax,
"Write_Latency_Submission_mean": wrsubmissionmean,
"Write_Latency_Submission_deviation": wrsubmissiondeviation,
"Write_Latency_Completion_min": wrcompletionmin,
"Write_Latency_Completion_max": wrcompletionmax,
"Write_Latency_Completion_mean": wrcompletionmean,
"Write_Latency_Completion_deviation": wrcompletiondeviation,
"Write_Latency_Total_min": wrtotalmin,
"Write_Latency_Total_max": wrtotalmax,
"Write_Latency_Total_mean": wrtotalmean,
"Write_Latency_Total_deviation": wrtotaldeviation,
"Write_Total_I/O_(MB)": writetotalio,
"Write_bandwidth_(MB/s)": writebandwidthio,
"Read Block Size (KB)": readblocksize,
"Write Block Size (KB)": writeblocksize,
"CPU User": cpuuser,
"CPU System": cpusystem,
"IOdepthdist01": iodepth01,
"IOdepthdist02": iodepth02,
"IOdepthdist04": iodepth04,
"IOdepthdist08": iodepth08,
"IOdepthdist16": iodepth16,
"IOdepthdist32": iodepth32,
"IOdepthdist64": iodepth64,
"Read_IOPS_Percentage": readiopspercentage,
"Write_IOPS_Percentage": writeiopspercentage
}
}
]
client.write_points(json_body)
def main():
parser = argparse.ArgumentParser(
prog='fio_to_influxdb',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
The following options must be added to the fio command for this script to function
--status-interval=1
--minimal
Example usage:
fio instructionfile.fio --status-interval=1 --minimal | fio_to_influxdb.py
--
'''))
parser.add_argument("-ip", default='localhost',help="IP or DNS name of host running influxdb. Default is localhost", type=str)
parser.add_argument("-port", default='8086',help="Port used to connect to influxdb. Default is 8086", type=int)
parser.add_argument("-database", default='fio',help="Name of database created in influxdb. Default is fio", type=str)
parser.parse_args()
args = parser.parse_args()
print(\
"\nConnecting to influx database with the following parameters\n\
\tIP/DNS: "+args.ip+"\n\
\tPort: "+str(args.port)+"\n\
\tDatabase: "+args.database+"\n\
"
)
# Get OS host name
hostname = platform.uname()[1]
fioinput(args.ip, args.port, args.database, hostname)
print("\n\nJob complete\n")
main()
| philcanman/fio-to-influxdb | fio_to_influxdb.py | fio_to_influxdb.py | py | 9,043 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_nu... |
31131850381 | import jwt
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import requests
import time
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
# GLOBALS
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
last_token_expiration = 0
# send request to apple for token revoke
# MODEL
revokeURL = "https://appleid.apple.com/auth/revoke"
teamID = "5NMJ2A479W"
filename = "AuthKey_73YATAJ963.p8"
keyID = "73YATAJ963"
tokenType = "access_token"
class RevokeTokenModel(BaseModel):
clientID: str
token: str
tokenTypeGint: str
def generate_tokenv2(bundleID):
with open(filename, "r") as f:
private_key = f.read()
team_id = teamID
client_id = bundleID
key_id = keyID
validity_minutes = 20
timestamp_now = int(time.time())
timestamp_exp = timestamp_now + (60 * validity_minutes)
# Assuming `last_token_expiration` is a class variable defined somewhere else
# cls.last_token_expiration = timestamp_exp
data = {
"iss": team_id,
"iat": timestamp_now,
"exp": timestamp_exp,
"aud": "https://appleid.apple.com",
"sub": client_id
}
token = jwt.encode(
payload=data,
key=private_key.encode('utf-8'),
algorithm="ES256",
headers={"kid": key_id}
)
return token
def generate_token(bundleID):
with open(filename, "r") as f:
private_key = f.read()
team_id = teamID
client_id = bundleID
key_id = keyID
validity_minutes = 20
timestamp_now = int(time.time())
timestamp_exp = timestamp_now + (60 * validity_minutes)
data = {
"iss": team_id,
"iat": timestamp_now,
"exp": timestamp_exp,
"aud": "https://appleid.apple.com",
"sub": client_id
}
token = jwt.encode(payload=data, key=private_key, algorithm="ES256", headers={"kid": key_id})
return token
def revoke_token_request(client_secret: str, clientID: str, tokenTypeGint: str, token: str):
data = {
"client_id": clientID,
"client_secret": client_secret,
"token": token,
"token_type_hint": tokenTypeGint
}
response = requests.post(revokeURL, data=data)
print(response)
if response.status_code == 200:
return True
else:
# You can raise an HTTPException here if you want to handle the error differently
print(f"\n\nRESPONSE -> {response.text}\n\n")
with open("logs.txt", "w+") as f:
f.write(response.text)
print("ERROR")
raise HTTPException(status_code=response.status_code, detail=response.text)
@app.post("/revoke")
def revokeToken(token: str, clientID: str):
client_secret = generate_token(bundleID=bundleID)
with open("log1.txt", "w+") as f:
f.write(client_secret)
revoked = revoke_token_request(token=token, clientID=clientID, tokenTypeGint=tokenType, client_secret=client_secret)
return {"token_revoked": revoked}
apple_token_revocation_url = 'https://appleid.apple.com/auth/token'
@app.post('/revoke-token')
async def revoke_token(token: str):
if not token:
raise HTTPException(status_code=400, detail='Token is missing.')
try:
response = requests.post(
apple_token_revocation_url,
data={
'token': token,
'client_id': 'your_client_id',
'client_secret': 'your_client_secret',
}
)
print(f"STATUS CODE: {response.status_code}")
if response.status_code == 200:
return {'success': True}
else:
raise HTTPException(status_code=400, detail='Token revocation failed.')
except requests.RequestException as e:
raise HTTPException(status_code=500, detail='Internal server error.')
# if __name__ == "__main__":
# uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)
# token = "eyJraWQiOiJZdXlYb1kiLCJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL2FwcGxlaWQuYXBwbGUuY29tIiwiYXVkIjoiY29tLmZvb3RiYWxsdGFya293LmNvbSIsImV4cCI6MTY5MTI2MTI0OCwiaWF0IjoxNjkxMTc0ODQ4LCJzdWIiOiIwMDE3NTguNmQ4ODZlMzQwNDkyNDA1ZThmODU0ZDkxZDRjZGMwNTguMTUyNyIsImNfaGFzaCI6InI2NjNWbTYxbTR0VkJfckxyRkZhSnciLCJlbWFpbCI6IjhmMnBza2t2dGhAcHJpdmF0ZXJlbGF5LmFwcGxlaWQuY29tIiwiZW1haWxfdmVyaWZpZWQiOiJ0cnVlIiwiaXNfcHJpdmF0ZV9lbWFpbCI6InRydWUiLCJhdXRoX3RpbWUiOjE2OTExNzQ4NDgsIm5vbmNlX3N1cHBvcnRlZCI6dHJ1ZX0.TkZtkIgljXOhCc1bp4YAx77yfZOBYz6iHDE3fxIi_l4oSjwOjM1xxUr9rkKEnOriJgHBChTop-CmGlM3zvh4taXwP_ZTb-xzQL9UeQtElM53f9l4w2j-PWfGrjxiX8Dyuyor-vbcxlKtUIcsVFIcDikHWQsI1iLYU40mn7x-399MdSFqyKqIarfk1P6TuBK3Fwf9EBYvPWrizXfFV1v5Kc-7p1mEbV3OChrMXEgLAvmhUWcFg95GKzhglbnHg2NOSWijeDfDFTuZC8EEPDplEhV86RzLi47jrksGReGQteVl8-LobLusceFrvRB-xAIWstEDl6al9SJ4dIAanGnBVA"
# bundleID = "com.footballtarkow.com"
# keyID = "73YATAJ963"
# client_secret = generate_tokenv2(bundleID)
# revoke_token_request(client_secret,bundleID,tokenType, token) | BoogieManN8/FootballTrainerHelper | main.py | main.py | py | 5,139 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 36,
"usage_type": "name"
},
{
"api... |
70929712188 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 17:20:25 2023
@author: Gilberto
"""
""
# streamlit_app.py
import io
import base64
import streamlit as st
import pandas as pd
from datetime import datetime
from mortgagestyle_v2 import MortgageStyle
from straightline_v2 import StraightLineAmortization
from SOFRDataExtractor import SOFRDataExtractor # Assuming the previous code is saved in this file
import streamlit as st
def homepage():
st.title("Amortization Calculator Home")
# Background Information
st.header("Background Information")
st.markdown("""
This application helps to generate an amortization schedule, which is a table detailing each periodic payment on an
amortizing loan.
**Types of Amortization:**
- **Mortgage Style:**
- **Hybrid Style:**
- **Straight Line:**
""")
# Application Features
st.header("Application Features")
st.markdown("""
- Calculate Mortgage Style, Hybrid Style, and Straight Line Amortizations.
- Supports both fixed and floating interest rates.
- Downloadable amortization schedule in Excel format.
""")
# How to Use
st.header("How to Use")
st.markdown("""
1. Enter the required details such as Settlement Date, Maturity Date, Notional Amount, etc.
2. Choose the type of Amortization: Mortgage Style, Hybrid Style, or Straight Line.
3. For floating rate, upload the SOFR data file and select the reset frequency.
4. Click on the "Generate Amortization" button to view the amortization table.
5. You can also download the table in Excel format using the provided link.
""")
# Details about SOFR_Data file
st.header("SOFR Data Formatting and Source")
st.markdown("""
**Formatting Requirements for the SOFR Data File:**
- The file should be in `.xls` or `.xlsx` format.
- Ensure the file contains columns labeled 'Date' and 'Rate'.
- Data should be sorted chronologically.
- Rates should be in decimal form (e.g., 0.03 for 3%).
-A sample format for the data can be found in github
If you don't have the SOFR data file, you can obtain the required data from:
[Pensford Resources - Forward Curve](https://www.pensford.com/resources/forward-curve)
""")
# Details about contacting
st.header("Contact info")
st.markdown("""
**If you find any errors or have any question please feel free to reach out via LinkedIn:**
https://www.linkedin.com/in/gil-de-la-cruz-vazquez-62049b125/""")
def apply_floating_rate(df, f, spread):
for i in range(len(df)):
if i == 0:
continue # No change for the first period
df.at[i, 'Period Interest'] = round(df.at[i, 'Outstanding Balance'] * (f(i) + spread) / 12,2) # Applying spread to the SOFR rate
df.at[i, 'Period Payment'] = round(df.at[i, 'Period Interest'] + df.at[i, 'Principal Payment'],2)
df.at[i, 'Outstanding Balance'] = df.at[i-1, 'Outstanding Balance'] - df.at[i, 'Principal Payment']
return df
def main():
st.title("Amortization Calculator")
# Input parameters
settlement_date = st.date_input("Settlement Date", datetime(2022, 8, 1))
maturity_date = st.date_input("Maturity Date", datetime(2032, 8, 1))
first_payment_date = st.date_input("First Payment Date", datetime(2022, 9, 1))
notional_amount = st.number_input("Notional Amount", value=600000.0, step=5000.0)
rate = st.number_input("Rate (%)", value=7.03, step=0.01)
basis_numerator = st.selectbox("Basis Numerator", ["ACT", "30"])
basis_denominator = st.selectbox("Basis Denominator", [360, 365])
payment_frequency = st.selectbox("Frequency", ["1M", "3M", "6M"])
amortization_years = st.number_input("Amortization Years", value=25, step=1)
# Output format selection
output_format = st.selectbox("Output Format", ["Simple Amortization", "P+I"])
# Choose amortization type
amortization_type = st.selectbox("Choose Amortization Type", ["Mortgage Style", "Hybrid Style", "Straight Line"])
rate_type = st.selectbox("Rate Type", ["Fixed", "Floating"])
if rate_type == "Floating":
sofr_file = st.file_uploader("Upload SOFR Data File", type=['xls', 'xlsx'])
spread = st.number_input("Enter Spread (%)", min_value=0.0, max_value=10.0, value=0.0, step=0.1) / 100.0 # Spread in percentage
if sofr_file:
data_extractor = SOFRDataExtractor(sofr_file)
months_duration = st.selectbox("Reset Frequency", ["1M", "3M", "6M"])
if months_duration == "1M":
f = data_extractor.interpolate_curve(data_extractor.data_1m)
elif months_duration == "3M":
f = data_extractor.interpolate_curve(data_extractor.data_3m)
else: # For 6M, using 3M data for simplicity. Ideally, you'd have 6M data
f = data_extractor.interpolate_curve(data_extractor.data_3m)
if st.button("Generate Amortization"):
if amortization_type == "Mortgage Style":
mortgage = MortgageStyle(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = mortgage.create_mortgage_style_amort()
elif amortization_type == "Hybrid Style":
mortgage = MortgageStyle(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = mortgage.create_hybrid_style_amort()
else:
sla = StraightLineAmortization(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = sla.generate_schedule()
if rate_type == "Floating":
df = apply_floating_rate(df, f, spread)
df['Interest Rate (%)'] = (df['Period Interest'] / df['Outstanding Balance'].shift(1)) * 12 * 100
df['Interest Rate (%)'] = df['Interest Rate (%)'].round(2)
# Calculate additional columns for P+I
if 'Period Payment' in df.columns and 'Outstanding Balance' in df.columns:
df['Remaining Notional Balance'] = df['Outstanding Balance'] - df['Principal Payment']
if 'Period Payment' in df.columns and 'Principal Payment' in df.columns:
df['Period Interest'] = df['Period Payment'] - df['Principal Payment']
# Customize output format
if output_format == "Simple Amortization":
columns = ['Period Start Date', 'Period End Date', 'Outstanding Balance']
else: # "P+I"
columns = ['Payment Number', 'Period Start Date', 'Period End Date', 'Outstanding Balance',
'Period Payment', 'Principal Payment', 'Period Interest', 'Remaining Notional Balance']
if rate_type == "Floating":
df = apply_floating_rate(df, f, spread)
df['Interest Rate (%)'] = (df['Period Interest'] / df['Outstanding Balance'].shift(1)) * 12 * 100
df['Interest Rate (%)'] = df['Interest Rate (%)'].round(2)
# Set the first period's interest rate to the SOFR rate for the first period plus spread
first_period_rate = f(0) + spread
df.at[0, 'Interest Rate (%)'] = round(first_period_rate*100,2) # Convert to annual rate in percentage
columns.append('Interest Rate (%)') # Only add this column if rate_type is Floating
df = df[columns]
# Display the dataframe
st.write(df)
# Download link for Excel
towrite = io.BytesIO()
downloaded_file = df.to_excel(towrite, encoding='utf-8', index=False, engine='openpyxl')
towrite.seek(0)
b64 = base64.b64encode(towrite.read()).decode()
st.markdown(f'<a href="data:application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;base64,{b64}" download="amortization.xlsx">Download Excel File</a>', unsafe_allow_html=True)
if __name__ == "__main__":
page = st.sidebar.radio("Select Page", ["Home", "Amortization Calculator"])
if page == "Home":
homepage()
else:
main()
| gdelacruzv/Amortization_calculator | Amortization_app_V4.py | Amortization_app_V4.py | py | 8,584 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.header"... |
3844969159 | #! /usr/bin/python2
import pefile
import os
import array
import math
import pickle
import time
# from sklearn.externals import joblib
import joblib
import sys
from .ModulePredict import data_extraction
from .XKendworld import pure_import
import pymongo
import hashlib
myclient = pymongo.MongoClient('DATABASE_URL')
mydb = myclient["DATABASE"]
mycol = mydb["COLLECTION"]
def checkpre(filepath):
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifier/classifier.pkl'
))
with open('classifier/features.pkl', 'rb') as f:
features = pickle.load(f)
data = data_extraction(repathfile(filepath))
pe_features = list(map(lambda x:data[x], features))
res= clf.predict([pe_features])[0]
return (['Malicious', 'Legitimate'][res])
def hashcheck(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
return hashlib.md5(data).hexdigest()
def procedureXK001(filepath):
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifierxk/classifierxk.pkl'
))
with open('classifierxk/featuresxk.pkl', 'rb') as f:
features = pickle.load(f)
data = pure_import(repathfile(filepath))
pe_features = list(map(lambda x:data[x], features))
res= clf.predict([pe_features])[0]
return (['Adware','Backdoor','Keylogger','Ransomware','Rootkit','Spyware','Trojan','Virus','Worm'][res])
def repathfile(filepath):
return os.path.join(os.path.dirname(os.path.realpath(__file__)) + filepath)
def savestorage(filepath):
return os.remove(repathfile(filepath))
def sample_extraction(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
y = []
y.append(len(data))
if pe.FILE_HEADER.Machine == 0x14C:
y.append("Architecture : 32 Bits Binary")
elif pe.FILE_HEADER.Machine == 0x8664:
y.append("Architecture : 64 Bits Binary")
y.append(hashlib.md5(data).hexdigest())
y.append(hashlib.sha1(data).hexdigest())
y.append(hashlib.sha256(data).hexdigest())
val = pe.FILE_HEADER.TimeDateStamp
y.append(time.asctime(time.gmtime(val)))
return y
def db_saving(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
dbstr= {}
dbstr["dataSize"] = len(data)
if pe.FILE_HEADER.Machine == 0x14C:
dbstr["arch"] = "32 Bits Binary"
elif pe.FILE_HEADER.Machine == 0x8664:
dbstr["arch"] = "64 Bits Binary"
dbstr["md5"] = hashlib.md5(data).hexdigest()
dbstr["sha1"] = hashlib.sha1(data).hexdigest()
dbstr["sha256"] = hashlib.sha256(data).hexdigest()
val = pe.FILE_HEADER.TimeDateStamp
dbstr["timestamp"] =time.asctime(time.gmtime(val))
if checkpre(filepath) == "Legitimate":
dbstr['status'] = "Legitimate"
else :
dbstr['status'] = "Malicious"
dbstr['type'] = procedureXK001(filepath)
x = mycol.insert_one(dbstr)
return dbstr
| fadzniaidil/imawa | malwr/CheckingFile.py | CheckingFile.py | py | 3,102 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
15912701941 |
"""
Class to deal with the pooling problem (differing amounts of tweets for various days)
"""
import torch
from torch import nn, tensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
# will we have a device setting here? to ensure that the data is being processed? (why is this bad practice)
class pooling(nn.Module):
"""A pooling class, so that the forward pass in this variation of the teanet model is lest complex
Also, perhaps it can be trained separately?
"""
def __init__(self, dim, lag):
super(pooling, self).__init__()
self.dim = dim
self.lag = lag
# multiple pooling layers? A feed forward neural network?
self.adaptive_pooling = nn.AdaptiveMaxPool2d((1, dim))
def forward(self, input):
batch_of_tweets = None
for x_val in input[0]:
processed_tweets = None
# iterate through the days in the x_value
for day in x_val:
processed = self.adaptive_pooling(day.view(1, day.shape[0], day.shape[1]))
if(processed_tweets == None):
processed_tweets = processed
else:
processed_tweets = torch.cat((processed_tweets, processed), 1)
if(batch_of_tweets == None):
batch_of_tweets = processed_tweets.view(1, self.lag, self.dim)
else:
batch_of_tweets = torch.cat((batch_of_tweets, processed_tweets.view(1, self.lag, self.dim)), 0)
return batch_of_tweets.to(device) | biirving/michinaga | src/utils/pooling.py | pooling.py | py | 1,583 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module"... |
8224649664 | import json, requests, io
import pandas as pd
import numpy as np
import sys, argparse
#Managing input
parser = argparse.ArgumentParser(description='Script to download data of lung cancer from TCGA')
parser.add_argument('-t', '--type',
help='Sample type. Ej: NAD',
required='True',
choices=['NAD', 'TAD', 'NSC', 'TSC'],
default='NAD')
results = parser.parse_args(sys.argv[1:])
filename = results.type
if filename == "NAD":
dirname = "Adeno"
dirname2 = "NAD"
elif filename == "TAD":
dirname = "Adeno"
dirname2 = "TAD"
elif filename == "NSC":
dirname = "Squamous"
dirname2 = "NSC"
elif filename == "TSC":
dirname = "Squamous"
dirname2 = "TSC"
#Begining of the programm
cases = pd.read_csv("Data/" + dirname + "/" + dirname2 + "/" + filename + "-cases.tsv", sep='\t')
mirna_fid = []
mirna_fname = []
mirna_count = []
for index, row in cases.iterrows():
print(row['case'])
with open("json/qbyMIRNA.json", 'r') as f:
filters = json.load(f)
filters['content'][0]['content']['value'] = row['case']
cases_endpt = "https://api.gdc.cancer.gov/files"
params = {
"filters": json.dumps(filters),
"fields": "file_name,data_format,file_id",
"format": "TSV",
"size": "10000" #HACK: modificar si los casos superan los hints
}
response = requests.get(cases_endpt, params = params)
try:
df = pd.read_csv(io.StringIO(response.text), sep='\t', header=0)
mirna_count.append(df.shape[0])
mirna_fid.append(df.loc[0, "file_id"])
mirna_fname.append(df.loc[0, 'file_name'])
except:
df = np.nan
mirna_count.append(0)
mirna_fid.append(df)
mirna_fname.append(df)
cases['mirna_count'] = mirna_count
cases['mirna_fname'] = mirna_fname
cases['mirna_fid'] = mirna_fid
cases.rename(columns={'fid':'rnaseq_fid'}, inplace = True)
cases.to_csv("Data/" + dirname + "/" + dirname2 + "/" + filename + "-mirna.tsv", sep="\t", index = False)
| josemaz/lung-mirnas | py/casemirna.py | casemirna.py | py | 1,900 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.load",
... |
25070333505 | from django.db import models
from django.contrib import admin
from django import forms
import purplship.server.providers.models as carriers
def model_admin(model):
class _Admin(admin.ModelAdmin):
list_display = ("__str__", "test", "active")
exclude = ["active_users", "metadata"]
formfield_overrides = {
models.CharField: {
"widget": forms.TextInput(
attrs={
"type": "text",
"readonly": "true",
"class": "vTextField",
"data - lpignore": "true",
"autocomplete": "keep-off",
"onfocus": "this.removeAttribute('readonly');",
}
)
}
}
def get_queryset(self, request):
query = super().get_queryset(request)
return query.filter(created_by=None)
return type(f"{model.__class__.__name__}Admin", (_Admin,), {})
for name, model in carriers.MODELS.items():
admin.site.register(model, model_admin(model))
| danh91/purplship | server/modules/core/purplship/server/providers/admin.py | admin.py | py | 1,114 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 13,
"usage_type": "attribute"
},
... |
28900645911 |
from keras.models import *
from keras.layers import *
import keras
from dlblocks.keras_utils import allow_growth , showKerasModel
allow_growth()
from dlblocks.pyutils import env_arg
import tensorflow as tf
from Utils import Trainer
class GiretTwoCell(keras.layers.Layer):
def __init__(self, cell_1 , cell_2 , nHidden , **kwargs):
self.cell_1 = cell_1
self.cell_2 = cell_2
self.nHidden = nHidden
self.state_size = [nHidden,nHidden]
super(GiretTwoCell, self).__init__(**kwargs)
def build(self, input_shape):
nHidden = self.nHidden
input_shape_n = ( input_shape[0] , input_shape[1]- 2 )
# print "pp", input_shape_n
# self.cell_1.build(input_shape_n)
# self.cell_2.build(input_shape_n)
self._trainable_weights += ( self.cell_1.trainable_weights )
self._trainable_weights += ( self.cell_2.trainable_weights )
self._non_trainable_weights += ( self.cell_1.non_trainable_weights )
self._non_trainable_weights += ( self.cell_2.non_trainable_weights )
self.built = True
def call(self, inputs, states):
nHidden = self.nHidden
gate_val_1 = inputs[ : , 0:1]
gate_val_2 = inputs[ : , 1:2]
inputs = inputs[ : , 2: ]
gate_val_1 = K.repeat_elements(gate_val_1 , nHidden , -1 ) # shape # bs , hidden
gate_val_2 = K.repeat_elements(gate_val_2 , nHidden , -1 ) # shape # bs , hidden
_ , [h1 , c1 ] = self.cell_1.call( inputs , states )
_ , [h2 , c2 ] = self.cell_2.call( inputs , states )
h = gate_val_1*h1 + gate_val_2*h2 + (1 - gate_val_1 - gate_val_2 )*states[0]
c = gate_val_1*c1 + gate_val_2*c2 + (1 - gate_val_1 - gate_val_2 )*states[1]
return h, [h , c ]
class GIRNet_SeqLab(Trainer):
def build_model(self):
config = self.config
embed = Embedding( self.config['vocab_size'] , self.config['embed_dim'] , mask_zero=True)
rnn_hi = LSTM( self.config['nHidden'] , return_sequences=True )
rnn_en = LSTM( self.config['nHidden'] , return_sequences=True )
# en
inp_en = Input(( self.config['sent_len'] , ))
x = embed(inp_en)
x = rnn_en( x )
out_en = TimeDistributed(Dense( config['n_class_en'] , activation='softmax'))(x)
# hi
inp_hi = Input(( self.config['sent_len'] , ))
x = embed(inp_hi)
x = rnn_hi( x )
out_hi = TimeDistributed(Dense( config['n_class_hi'] , activation='softmax'))(x)
cell_combined = GiretTwoCell( rnn_hi.cell , rnn_en.cell , self.config['nHidden'] )
inp_enhi = Input(( self.config['sent_len'] , ))
x = embed(inp_enhi )
x_att = x
x_att = Bidirectional(LSTM(32 , return_sequences=True))( x )
bider_h = x_att
x_att = TimeDistributed( Dense(3, activation='softmax') )(x_att)
x_att = Lambda(lambda x : x[... , 1: ])(x_att)
x = Concatenate(-1)([x_att , x ])
x = RNN(cell_combined , return_sequences=True )( x )
out_enhi = TimeDistributed(Dense( self.config['n_class_enhi'] , activation='softmax'))(x)
self.model = Model( [inp_hi , inp_en , inp_enhi ] , [ out_hi , out_en , out_enhi ] )
Trainer.build_model( self )
# jjj
"""
config = {}
config['epochs'] = 4
config['dataset'] = "/tmp/postag_prepped.h5"
config['exp_name'] = 'pos_girnet_1l'
config['embed_dim'] = 50
config['vocab_size'] = 30003
config['nHidden'] = 100
config['sent_len'] = 150
config['n_class_en'] = 45
config['n_class_hi'] = 25
config['n_class_enhi'] = 19
model = GIRNet_SeqLab( exp_location="./ttt" , config_args = config )
model.train()
""" | divamgupta/mtl_girnet | sequence_labeling/girnet.py | girnet.py | py | 3,916 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "dlblocks.keras_utils.allow_growth",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "Utils.Trainer",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "Util... |
39005103669 | #!/usr/local/bin/python
# The previous line (which must be the first one to work) makes the script self-executing,
# assuming that the system has the Python interpreter at path /usr/local/bin/python.
# This wants to be run in Python 3.
# Reference Pre-Processor
# Given: A string reference
# An integer horizon, which tells us how far to look ahead if we do look ahead
# Return: A suffix array
#
# by Moyaccercchi, 19th of Apr 2015
#
# version 2:
# allowing two-SNIPs for non-nested graphs as long as they are not so close to each other
# that several two-SNIPs appear in the same virtual read
import collections
fo = open('in.txt', 'r')
referencepath = fo.readline().strip()
horizon = int(fo.readline().strip())
fo.close()
def loadTextFromFile(filepath):
fhandle = open(filepath, 'r')
lastread = fhandle.readline().strip()
if (filepath[-6:].lower() == '.fasta'):
# We ignore the first read, as it is a comment line.
res = ''
while (lastread != ""):
lastread = fhandle.readline().strip()
res += lastread
else:
res = lastread
fhandle.close()
return res
def generate_suffix_array(referencepath, horizon):
ret = collections.defaultdict(lambda: '')
reference = loadTextFromFile(referencepath)
if (referencepath[-6:].lower() == '.fasta'):
# string reference in FASTA
for i in range(0, len(reference)):
ret[reference[i:i+horizon]] += str(i) + ","
else:
if '(' in reference:
# (two-SNIP, non-nested) graph reference in STPU
i = 0;
lr = len(reference);
while i < lr:
# ATTENTION!
# So far, this assumes that there only ever is one two-SNIP per read;
# having multiple two-SNIPs in the same read requires an a bit more
# elaborate approach. =)
# imagine the following reads as [ ] and the following reference:
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGAGA without graph
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read GAGA, find '(' in last position, expand by 4
# [ ] <- read GAGAT and GAGAC
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGATA and AGACA
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read GATAG and GACAG
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read ATAGA and ACAGA
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read TAGAG and CAGAG
#
# instead of i+1, do i+5 (or i+4 and then i+1 due to the loop)
# also, unexpand
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGAGA, unexpanded
#
rf = reference[i:i+horizon]
if rf[len(rf)-1] == '(':
horizon += 4
rf = reference[i:i+horizon]
if '(' in rf:
rfs = [];
grStart = rf.find("(", 0, len(rf));
rfs.append(rf[0:grStart] + rf[grStart+1] + rf[grStart+5:len(rf)]);
rfs.append(rf[0:grStart] + rf[grStart+3] + rf[grStart+5:len(rf)]);
for rfline in rfs:
ret[rfline] += str(i) + ","
else:
ret[rf] += str(i) + ","
if rf[0] == '(':
horizon -= 4
i += 4
i += 1
else:
# string reference in STPU
for i in range(0, len(reference)):
ret[reference[i:i+horizon]] += str(i) + ","
return ret
def dicttoadjacency(ourdict):
ret = []
for fromnode, tonode in ourdict.items():
ret.append(fromnode + ' -> ' + tonode[:-1])
return '\n'.join(sorted(ret))
res = dicttoadjacency(generate_suffix_array(referencepath, horizon))
fo = open('out.txt', 'w')
fo.write(res)
fo.close()
| Moyaccercchi/bio-info-graph | python/2_reference_preprocessor/e2.py | e2.py | py | 4,348 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 51,
"usage_type": "call"
}
] |
22895124073 | import serial
import getch
serialport = serial.Serial("/dev/ttyS0")
serialport.baudrate = 115200
while True:
x = getch.getch()
if "W" == x.upper():
# Forwards
command = "+100+10015+00"
elif "S" == x.upper():
# Backwards
command = "-250-25015+00"
elif x == "A" or x == "a":
# Left
command = "-150+15015+00"
elif x == "D" or x == "d":
# Right
command = "+150-15015+00"
elif x == "h" or x == "H":
# Stop
command = "+000+00015+00"
else:
break
serialport.write(command.encode())
| SinaRabiee/Digital_LAB_SSH | ssh-raspberry.py | ssh-raspberry.py | py | 597 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "getch.getch",
"line_number": 8,
"usage_type": "call"
}
] |
5556252937 | from data.legacy_datagen import eddy_forcing,spatial_filter_dataset
from data.high_res_dataset import HighResCm2p6
from constants.paths import FINE_CM2P6_PATH,TEMPORARY_DATA
from utils.xarray_oper import plot_ds,fromtorchdict
from data.load import load_grid,load_xr_dataset
import xarray as xr
import os
import numpy as np
def just_filtering(u_v_dataset, grid_data, scale_filter):
return spatial_filter_dataset(u_v_dataset, grid_data, scale_filter)
def main():
path = FINE_CM2P6_PATH(True,False)
# ds = xr.open_zarr(path).isel(time = [0,])
sigma = 4
# ds = ds.drop('surface_temp').drop('xt_ocean yt_ocean'.split())
# grid_data = load_grid(ds.copy(),spacing = "asdf")
isel_dict = {
key + v :slice(1500,2500) for key in 'u t'.split() for v in 'lon lat'.split()
}
# ds = ds#.isel(**isel_dict)
# grid_data = grid_data#.isel(**isel_dict)
ds,_ = load_xr_dataset('--spacing long_flat --mode data'.split())
ds = ds.isel(**isel_dict)
grid_data = ds.rename(
{'ulat':'yu_ocean','ulon':'xu_ocean','u':'usurf','v':'vsurf'}
).isel(depth = 0,time = [0]).drop(['temp','dxt','dyt']).drop(['tlat','tlon'])
ds1 = grid_data.drop('dxu dyu'.split())
forces = eddy_forcing(ds1,grid_data,sigma)
rename = {'yu_ocean':'lat','xu_ocean':'lon',\
'usurf':'u','vsurf':'v','S_x':'Su','S_y':'Sv'}
# rename1 = {'yu_ocean':'ulat','xu_ocean':'ulon',\
# 'yt_ocean':'tlat','xt_ocean':'tlon',\
# 'usurf':'u','vsurf':'v','surface_temp':'temp'}
forces = forces.rename(
rename
).isel(time = 0)
# path1 = os.path.join(TEMPORARY_DATA,'arthur_data.nc')
# forces.to_netcdf(path1)
# ds = grid_data.rename(rename1)
# ds['depth'] = [0]
hrcm = HighResCm2p6(ds,sigma,filtering = 'gaussian')
data_vars,coords = hrcm[0]
x = xr.Dataset(data_vars = data_vars,coords = coords)
x = x.isel(time = 0,depth = 0).drop('time depth'.split())
# print(x)
# return
plot_ds(np.log10(np.abs(x)),'cem_forces.png',ncols = 3,)
x = x.drop('Stemp temp'.split())
x1 = x.rename(
{key:'cem'+key for key in x.data_vars.keys()}
)
f = xr.merge([x1,forces])
plot_ds(f,'arthur_forces.png',ncols = 3,)
err = np.log10(np.abs(x - forces))
plot_ds(err,'arthur_forces_err.png',ncols = 3,)
if __name__ == '__main__':
main() | CemGultekin1/cm2p6 | temp/data_comparison.py | data_comparison.py | py | 2,408 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "data.legacy_datagen.spatial_filter_dataset",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "constants.paths.FINE_CM2P6_PATH",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "data.load.load_xr_dataset",
"line_number": 26,
"usage_type": "... |
11899942747 | import requests
import json
import datetime
# On importe la liste des jours pou lesquels on a déjà les données
with open("days.txt", "r") as days:
completed_days = days.read()
days.close()
# On importe la date d'aujourd'hui et on la formatte
today = datetime.datetime.now()
today = str(today).split(" ")
today = today[0]
# On regarde si l'on a déjà les données d'aujourd'hui
if today not in completed_days:
auth_key = input("Enter your own API key: ")
# Si on a pas encore les données
# On fait une requête à l'API Stormglass
response = requests.get(
"https://api.stormglass.io/v2/weather/point",
params={
# Villers-sur-Mer : 49.32195479823806, -0.011785196637717673
"lat": 49.322,
"lng": -0.012,
"params": "windSpeed",
},
headers={
"Authorization": auth_key
}
)
# Copies results and formats them too
json_data = response.json()
filename = "wind_villers_"+str(datetime.datetime.now())[0:10]+".json"
with open(filename, "w") as data:
json.dump(json_data, data, indent=4)
data.writelines("\n")
data.close()
with open("days.txt", "a") as days:
days.writelines(today)
days.writelines("\n")
days.close()
| Aarrn33/auto-wind-importer | get_today.py | get_today.py | py | 1,312 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
71455781947 | from flask import Flask,flash, render_template, url_for, request, redirect
import googleapiclient.discovery
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from gtts import gTTS
import heapq
import nltk
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk import pos_tag
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = "AIzaSyDp9zqixqm846mM_kH9LyNsUp95IMNMfiM"
import numpy as np
import pandas as pd
import re
import os
import tensorflow as tf
import tensorflow_hub as hub
import re
from numpy import array
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras.models import load_model
from keras.models import Sequential
import demoji
import re
from textblob import TextBlob
lemmatizer=WordNetLemmatizer()
word_to_id = imdb.get_word_index()
def videoidfetcher(link):
url_data = urlparse.urlparse(link)
query = urlparse.parse_qs(url_data.query)
video = query["v"][0]
return video
def init():
global model,graph
# load the pre-trained Keras model
model = load_model('sentiment_analysis.h5')
graph = tf.compat.v1.get_default_graph()
youtube = googleapiclient.discovery.build(api_service_name, api_version, developerKey = DEVELOPER_KEY)
stop=stopwords.words("english")
punc=[pun for pun in string.punctuation]
stop+=punc
print(stop)
import warnings
warnings.filterwarnings("ignore")
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/')
def hello():
return render_template('index.html')
def videoidfetcher(link):
match = re.search("(?:\/|%3D|v=|vi=)([0-9A-z-_]{11})(?:[%#?&]|$)", link)
if match:
result = match.group(1)
else:
result = ""
return result
def sent_anly_prediction(comment):
words = comment.split()
x_test = [[word_to_id[word] if (word in word_to_id and word_to_id[word]<=5000) else 0 for word in words]]
x_test = sequence.pad_sequences(x_test, maxlen=500)
vector = np.array([x_test.flatten()])
with graph.as_default():
probability = model.predict(array([vector][0]))[0][0]
print(probability)
class1 = model.predict_classes(array([vector][0]))[0][0]
if class1 == 0:
return "Negative"
else:
return "Positive"
@app.route('/text', methods = ['POST'])
def predict_text():
if request.method == 'POST':
text = request.form['text']
strip_special_chars = re.compile("[^A-Za-z0-9 ]+")
comment = text.lower().replace("<br />", " ")
comment=re.sub(strip_special_chars, "", comment.lower())
g=TextBlob(comment).sentiment.polarity
g=int((g+1)*50)
result_dic = {
'positive':g,
'negative':100-g,
'text':text,
}
print(g)
return render_template('index.html',prediction=result_dic)
@app.route('/', methods = ['POST'])
def upload_file():
if request.method == 'POST':
my_colors={}
my_colors[1]="primary"
my_colors[2]="secondary"
my_colors[3]="success"
my_colors[4]="danger"
my_colors[5]="warning"
text = request.form['youtube_video_url']
video_id= videoidfetcher(text)
if(video_id==""):
flash('Looks like you have entered invalid youtube link!!!')
return render_template('index.html')
print(video_id)
heap_of_good_likes=[]
most_liked_comments=[]
query_results =youtube.commentThreads().list(part="snippet",maxResults=100,textFormat="plainText",order="relevance",videoId=video_id).execute()
negative=0
positive=0
for x in (query_results['items']):
comment=x['snippet']['topLevelComment']['snippet']['textDisplay']
strip_special_chars = re.compile("[^A-Za-z0-9 ]+")
comment = comment.lower().replace("<br />", " ")
comment=re.sub(strip_special_chars, "", comment.lower())
cleaned_comment=comment
if(TextBlob(cleaned_comment).sentiment.polarity<0):
print(cleaned_comment)
print(TextBlob(cleaned_comment).sentiment.polarity)
negative=negative+1
else:
print(cleaned_comment)
print(TextBlob(cleaned_comment).sentiment.polarity)
positive=positive+1
get_like_count=x['snippet']['topLevelComment']['snippet']['likeCount']
if len(heap_of_good_likes)<5:
heapq.heappush(heap_of_good_likes,(get_like_count,comment));
else:
top=heapq.heappop(heap_of_good_likes)
if(top[0]<get_like_count):
heapq.heappush(heap_of_good_likes,(get_like_count,comment));
else:
heapq.heappush(heap_of_good_likes,top)
while heap_of_good_likes:
most_liked_comments.append(heapq.heappop(heap_of_good_likes))
most_liked_comments.reverse()
my_positive=int((positive/(positive+negative))*100)
my_negative=100-my_positive
result_dic = {
'positive':my_positive,
'negative':my_negative,
'youtube_video':video_id,
'most_liked_comments':most_liked_comments,
'mycolors':my_colors
}
return render_template('index.html',results=result_dic)
def get_simple_POS(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def deEmojify(text):
regrex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags = re.UNICODE)
return regrex_pattern.sub(r'',text)
def cleanwords(sentence):
sentence_emogis=demoji.findall(sentence)
sentence_emogis_short=" "
for value in sentence_emogis.values():
sentence_emogis_short=sentence_emogis_short+(str(value)+" ")
sentence=deEmojify(sentence)
words=word_tokenize(sentence)
words=[lemmatizer.lemmatize(word,pos=get_simple_POS(pos_tag(word)[0][1])).lower() for word in words if not word.lower() in stop and not word.isdigit()]
return " ".join(words)
if __name__ == '__main__':
init()
app.config['TEMPLATES_AUTO_RELOAD']=True
app.run(debug = False)
| Anmol567/Youtube_Comment_Reviewer | my_new_flask_app.py | my_new_flask_app.py | py | 6,600 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.datasets.imdb.get_word_index",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.datasets.imdb",
"line_number": 34,
"usage_type": "name"
},
{
... |
11454255593 | import torch
import os
import chartmodel
from torch.utils.data import Dataset
import albumentations
from albumentations.pytorch import ToTensorV2 as AT
from charttype import dataset
batch_size = 32
num_workers = 4
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
img_size = 256
data_transforms_test = albumentations.Compose([
albumentations.Resize(img_size, img_size),
albumentations.Normalize(),
AT()
])
test_list = list()
test_path = os.path.dirname(os.path.abspath(__file__)) + "../data/chart_type/test/images/"
for pic in os.listdir(test_path):
test_list.append(test_path + pic)
testset = dataset.ChartsDataset('/', test_list, transform=data_transforms_test, mode="test")
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, num_workers=num_workers)
model = chartmodel.get_model()
model.load_state_dict(torch.load('../data/chart_type/model.pt', map_location=device))
model.eval()
for img, img_filename in testloader:
with torch.no_grad():
img = img.to(device)
output = model(img)
pred = torch.argmax(output, dim=1).cpu().numpy()
types = dataset.ChartsDataset.get_class_names(pred)
for i in range(len(img_filename)):
print(f'filename: {os.path.basename(img_filename[i])}; type: {types[i]}; label: {pred[i]}')
| ksvyatov/chart-recognizer | charttype/test.py | test.py | py | 1,473 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "albumentations.C... |
12712406140 | import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.hierarchy as scp
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
text_model = hub.load(module_url)
def embed_compare(sentence):
text_embedding = text_model(sentence)
sim_mat = np.inner(text_embedding,text_embedding)
return sim_mat
def plot_sim(sim_mat, labels):
im = plt.imshow(sim_mat)
plt.colorbar(im,fraction=0.046, pad=0.04)
plt.xticks(np.arange(len(labels)), labels, rotation=90)
plt.yticks(np.arange(len(labels)), labels)
plt.title('Semantic similarity')
for i in range(len(labels)):
for j in range(len(labels)):
plt.text(i, j, np.round(sim_mat[i, j], 2), color='black', ha='center', va='center', fontsize=8)
words = ['An','All','My','Your','This','That','These','Those']
sim_word = embed_compare(words)
phrases = ['An apple','All apples','My apples','Your apples','This apple','That apple','These apples','Those apples']
sim_phrase = embed_compare(phrases)
f = plt.figure(figsize=(10,8))
plt.suptitle('Universal Sentence Encoder')
plt.subplot(221)
plot_sim(sim_word, words)
plt.subplot(222)
plot_sim(sim_phrase, phrases)
plt.subplot(223)
Zw = scp.linkage(1-sim_word)
dendrow = scp.dendrogram(Zw, labels=words,leaf_font_size=8, leaf_rotation=90)
plt.subplot(224)
Zp = scp.linkage(1-sim_phrase)
dendrop = scp.dendrogram(Zp, labels=phrases,leaf_font_size=8, leaf_rotation=90)
plt.tight_layout()
plt.show()
f.savefig('../Fig/USE_analysis.png')
f.savefig('../Fig/USE_analysis.svg')
nouns = ['apple','apples','onion','onions','carrot','carrots','orange','oranges']
sim_nouns = embed_compare(nouns)
Zn = scp.linkage(1-sim_nouns)
f2 = plt.figure()
plt.suptitle('Universal Sentence Encoder')
plt.subplot(121)
plot_sim(sim_nouns, nouns)
plt.subplot(122)
dendron = scp.dendrogram(Zn, labels=nouns,leaf_font_size=8, leaf_rotation=90)
plt.tight_layout()
plt.show()
f2.savefig('../Fig/USE_noun_analysis.png')
f2.savefig('../Fig/USE_noun_analysis.svg') | mgkumar138/determiners-objdet | submodels/universal_sentence_encoder_analysis.py | universal_sentence_encoder_analysis.py | py | 2,035 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow_hub.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.inner",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.py... |
12010103629 | import time
from appium import webdriver
# 设备连接前置配置
descried_caps = dict()
descried_caps["platformName"] = "android"
descried_caps["platformVersion"] = "5.1.1"
descried_caps["deviceName"] = "emulator-5554"
descried_caps["appPackage"] = "com.bjcsxq.chat.carfriend"
descried_caps["appActivity"] = ".MainActivity"
# 实例化driver驱动对象 # 注意url不要写错
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', descried_caps)
time.sleep(3)
# 将app退到后台
driver.close_app()
time.sleep(3)
# 启动app (包名, 启动名)
driver.start_activity("com.ss.android.ugc.aweme", ".main.MainActivity")
print(driver.current_package)
time.sleep(3)
# 关闭driver对象(关闭了连接对象)
driver.quit()
| 1chott/appAutoStudy | code_D_04/code_03_常见api启动关闭app.py | code_03_常见api启动关闭app.py | py | 747 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "appium.webdriver.Remote",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
36065549678 | from dotenv import load_dotenv
import os
import requests
from pprint import pprint
from datetime import datetime, timedelta
from flight_data import FlightData
load_dotenv()
API_KEY = os.getenv("flight_search_api")
KIWI_ENDPOINT = "https://api.tequila.kiwi.com"
class FlightSearch:
#This class is responsible for talking to the Flight Search API.
def __init__(self):
self.header = {
"apikey":API_KEY,
}
def iata_code(self, city_name):
params = {
'term': city_name,
'location_types': 'city',
'limit': 1,
}
response = requests.get(url=f'{KIWI_ENDPOINT}/locations/query', headers=self.header, params=params)
print(response.status_code)
# Check response status code
if response.status_code == 200:
# Parse JSON response
data = response.json()
# Extract city code from the response
if 'locations' in data and len(data['locations']) > 0:
city_code = data['locations'][0]['code']
if city_code == None and len(data['locations'][0]['alternative_departure_points'])>0:
distance = 200
for nearby in data['locations'][0]['alternative_departure_points']:
if nearby['distance'] < distance:
distance = nearby['distance']
nearby_citycode = nearby['id']
return nearby_citycode
elif city_code == None:
return "City not found"
return city_code
else:
return "City not found"
else:
return f"Error occurd: {response.raise_for_status}"
def find_flights(self, origin_city_code,destination_city_code):
presentday = datetime.now()
tomorrow_date = presentday + timedelta(1)
tomorrow_date = (tomorrow_date).strftime('%d/%m/%Y')
six_months_date = presentday + timedelta(180)
six_months_date = (six_months_date).strftime('%d/%m/%Y')
params={
"fly_from": origin_city_code,
"fly_to": destination_city_code,
"date_from": tomorrow_date,
"date_to": six_months_date,
"nights_in_dst_from": 7,
"nights_in_dst_to": 28,
"flight_type": "round",
"one_for_city": 1,
"max_stopovers": 0,
"curr": "INR"
}
response = requests.get(url=f'{KIWI_ENDPOINT}/v2/search', headers=self.header,params=params)
try:
data = response.json()["data"][0]
except IndexError:
print(f"No flights found for {destination_city_code}.")
return None
flight_data = FlightData(
price=data["price"],
origin_city=data["route"][0]["cityFrom"], #return city name
origin_airport=data["route"][0]["flyFrom"], # return iata code of airport
destination_city=data["route"][0]["cityTo"], #return city name
destination_airport=data["route"][0]["flyTo"], # return iata code of airport
out_date=data["route"][0]["local_departure"].split("T")[0],
return_date=data["route"][1]["local_departure"].split("T")[0]
)
print(f"{flight_data.destination_city}: ₹{flight_data.price}")
return flight_data
# print(f"{arrival_city}: {}")
| Shivam29k/Python_Projects | flight_deals_alert/flight_search.py | flight_search.py | py | 3,453 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"... |
14699326224 | # -*- coding: utf-8 -*-
import sys
from os import listdir
from os.path import isfile, join
import io
def extract_files(path):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles
def get_text(onlyfiles,path):
text = ''
for file in onlyfiles:
f = io.open(join(path,file), mode="r", encoding="utf-8")
text = text + f.read() + '\n'
return text
def main(argv):
path = argv[0]
output_file = argv[1]
onlyfiles= extract_files(path)
text= get_text(onlyfiles,path)
f = io.open(output_file, mode="w", encoding="utf-8")
f.write(text)
if __name__ == '__main__':
main(sys.argv[1:])
| SerPablo/redib_extractor | src/text_extractor.py | text_extractor.py | py | 683 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 2... |
39912096462 | import jwt
from django.contrib.auth import authenticate
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import GenericAPIView
from rest_framework import status
from smtplib import SMTPException
from .serializers import SignupSerializer, VerifyAccountSerializer, LogininSerializer
from .emails import send_otp_via_email
from .models import YourPoolUser
class SignupView(GenericAPIView):
serializer_class = SignupSerializer
def post(self, request):
try:
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
send_otp_via_email(serializer.data["email"])
return Response(
{
"status": 201,
"message": "registration successfully check email",
"data": serializer.data,
}
)
return Response(
{
"status": 400,
"message": "something went wrong",
"data": serializer.errors,
}
)
except SMTPException as e:
return Response(
{
"status": e.smtp_code,
"message": e.smtp_error,
"data": serializer.errors,
}
)
class VerifyOTP(APIView):
def post(self, request):
try:
data = request.data
serializer = VerifyAccountSerializer(data=data)
if serializer.is_valid():
email = serializer.data["email"]
otp = serializer.data["otp"]
user = YourPoolUser.objects.filter(email=email)
if not user.exists():
return Response(
{
"status": 400,
"message": "something went wrong",
"data": "invalid email",
}
)
if user[0].otp != otp:
return Response(
{
"status": 400,
"message": "something went wrong",
"data": "wrong otp",
}
)
user = user.first()
user.is_email_verified = True
user.save()
return Response(
{
"status": 200,
"message": "account verified",
"data": {},
}
)
return Response(
{
"status": 400,
"message": "something went wrong",
"data": serializer.errors,
}
)
except Exception as e:
print(e)
class LoginView(GenericAPIView):
serializer_class = LogininSerializer
def post(self, request):
email = request.data.get("email", None)
password = request.data.get("password", None)
user = authenticate(username=email, password=password)
if user and user.is_email_verified:
serializer = self.serializer_class(user)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(
{"message": "Invalid credentials, try again"},
status=status.HTTP_401_UNAUTHORIZED,
)
| TEAM-ILSAN/yourpool-backend | users/views.py | views.py | py | 3,630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.GenericAPIView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "serializers.SignupSerializer",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "emails.send_otp_via_email",
"line_number": 25,
"usage_type": "call"
... |
41207274862 | """Environment using Gymnasium API for Franka robot.
The code is inspired by the D4RL repository hosted on GitHub (https://github.com/Farama-Foundation/D4RL), published in the paper
'D4RL: Datasets for Deep Data-Driven Reinforcement Learning' by Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, Sergey Levine.
This code was also implemented over the repository relay-policy-learning on GitHub (https://github.com/google-research/relay-policy-learning),
published in Relay Policy Learning: Solving Long-Horizon Tasks via Imitation and Reinforcement Learning, by
Abhishek Gupta, Vikash Kumar, Corey Lynch, Sergey Levine, Karol Hausman
Original Author of the code: Abhishek Gupta & Justin Fu
The modifications made involve separatin the Kitchen environment from the Franka environment and addint support for compatibility with
the Gymnasium and Multi-goal API's
This project is covered by the Apache 2.0 License.
"""
from os import path
import mujoco
import numpy as np
from gymnasium import spaces
from gymnasium.envs.mujoco.mujoco_env import MujocoEnv
from gymnasium_robotics.envs.franka_kitchen.ik_controller import IKController
from gymnasium_robotics.utils.mujoco_utils import MujocoModelNames, robot_get_obs
from gymnasium_robotics.utils.rotations import euler2quat
MAX_CARTESIAN_DISPLACEMENT = 0.2
MAX_ROTATION_DISPLACEMENT = 0.5
DEFAULT_CAMERA_CONFIG = {
"distance": 2.2,
"azimuth": 70.0,
"elevation": -35.0,
"lookat": np.array([-0.2, 0.5, 2.0]),
}
class FrankaRobot(MujocoEnv):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 10,
}
def __init__(
self,
model_path="../assets/kitchen_franka/franka_assets/franka_panda.xml",
frame_skip=50,
ik_controller: bool = True,
control_steps=5,
robot_noise_ratio: float = 0.01,
default_camera_config: dict = DEFAULT_CAMERA_CONFIG,
**kwargs,
):
xml_file_path = path.join(
path.dirname(path.realpath(__file__)),
model_path,
)
self.control_steps = control_steps
self.robot_noise_ratio = robot_noise_ratio
observation_space = (
spaces.Box(low=-np.inf, high=np.inf, shape=(9,), dtype=np.float32),
)
super().__init__(
xml_file_path,
frame_skip,
observation_space,
default_camera_config=default_camera_config,
**kwargs,
)
self.init_ctrl = np.array([0, 0, 0, -1.57079, 0, 1.57079, 0, 255])
if ik_controller:
self.controller = IKController(self.model, self.data)
action_size = 7 # 3 translation + 3 rotation + 1 gripper
else:
self.controller = None
action_size = 8 # 7 joint positions + 1 gripper
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=(action_size,)
)
# Actuator ranges
ctrlrange = self.model.actuator_ctrlrange
self.actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.0
self.actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.0
self.model_names = MujocoModelNames(self.model)
def step(self, action):
action = np.clip(action, -1.0, 1.0)
if self.controller is not None:
current_eef_pose = self.data.site_xpos[
self.model_names.site_name2id["EEF"]
].copy()
target_eef_pose = current_eef_pose + action[:3] * MAX_CARTESIAN_DISPLACEMENT
quat_rot = euler2quat(action[3:6] * MAX_ROTATION_DISPLACEMENT)
current_eef_quat = np.empty(
4
) # current orientation of the end effector site in quaternions
target_orientation = np.empty(
4
) # desired end effector orientation in quaternions
mujoco.mju_mat2Quat(
current_eef_quat,
self.data.site_xmat[self.model_names.site_name2id["EEF"]].copy(),
)
mujoco.mju_mulQuat(target_orientation, quat_rot, current_eef_quat)
ctrl_action = np.zeros(8)
# Denormalize gripper action
ctrl_action[-1] = (
self.actuation_center[-1] + action[-1] * self.actuation_range[-1]
)
for _ in range(self.control_steps):
delta_qpos = self.controller.compute_qpos_delta(
target_eef_pose, target_orientation
)
ctrl_action[:7] = self.data.ctrl.copy()[:7] + delta_qpos[:7]
# Do not use `do_simulation`` method from MujocoEnv: value error due to discrepancy between
# the action space and the simulation control input when using IK controller.
# TODO: eliminate error check in MujocoEnv (action space can be different from simulaton control input).
self.data.ctrl[:] = ctrl_action
mujoco.mj_step(self.model, self.data, nstep=self.frame_skip)
if self.render_mode == "human":
self.render()
else:
# Denormalize the input action from [-1, 1] range to the each actuators control range
action = self.actuation_center + action * self.actuation_range
self.do_simulation(action, self.frame_skip)
if self.render_mode == "human":
self.render()
obs = self._get_obs()
return obs, 0.0, False, False, {}
def _get_obs(self):
# Gather simulated observation
robot_qpos, robot_qvel = robot_get_obs(
self.model, self.data, self.model_names.joint_names
)
# Simulate observation noise
robot_qpos += self.robot_noise_ratio * self.np_random.uniform(
low=-1.0, high=1.0, size=robot_qpos.shape
)
robot_qvel += self.robot_noise_ratio * self.np_random.uniform(
low=-1.0, high=1.0, size=robot_qvel.shape
)
return np.concatenate((robot_qpos.copy(), robot_qvel.copy()))
def reset_model(self):
qpos = self.init_qpos
qvel = self.init_qvel
self.data.ctrl[:] = self.init_ctrl
self.set_state(qpos, qvel)
obs = self._get_obs()
return obs
| Skeli9989/Gymnasium-Robotics | gymnasium_robotics/envs/franka_kitchen/franka_env.py | franka_env.py | py | 6,370 | python | en | code | null | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "gymnasium.envs.mujoco.mujoco_env.MujocoEnv",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "o... |
16502029718 | #! /usr/bin/python3
import logging
import os
from urllib3 import make_headers
from telegram import (InlineKeyboardButton, InlineKeyboardMarkup, InputTextMessageContent,
ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, CallbackQueryHandler,
Filters, RegexHandler, ConversationHandler)
from selects import *
from bot.states import *
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def add_location(bot, update):
user = update.message.from_user
add_user_message(update)
logger.info("user %s. adding new location to db", user.first_name)
update.message.reply_text(
'Okay, let\'s see... Tell me the name of workspace in which new location is .. hm.. located!')
return LOCATION
def add_location_name(bot, update):
user = update.message.from_user
add_user_message(update)
workspace_name = update.message.text.lower().strip()
workspace = get_workspace(workspace_name)
if workspace is not None:
logger.info("user %s. adding location for workspace %s",
user.first_name, update.message.text)
update.message.reply_text('Great! Now tell me the name of your location!')
return LOCATION_NAME
else:
logger.info("user %s. adding location for non-existing workspace %s",
user.first_name, update.message.text)
update.message.reply_text('Sorry, mate. I don\'t know this workspace.\
Please, create one in the main menu and try again.')
reply_keyboard = [['Check meetings', 'Add meeting'],
['Add workspace', 'Add location'],
['Cancel meeting']]
reply_markup = ReplyKeyboardMarkup(reply_keyboard)
update.message.reply_text('Please choose:', reply_markup=reply_markup)
return ACTION
def added_location(bot, update):
user = update.message.from_user
workspace_name = last_message(user.id).text
add_user_message(update)
workspace = get_workspace(workspace_name)
add_location_to_workspace(update.message.text.lower().strip(), workspace.id)
logger.info("user %s. location %s added.", user.first_name, update.message.text)
update.message.reply_text(
'Great! Now you can hold meetings at %s in workspace %s' % (
update.message.text, workspace_name
))
reply_keyboard = [['Check meetings', 'Add meeting'],
['Add workspace', 'Add location'],
['Cancel meeting']]
reply_markup = ReplyKeyboardMarkup(reply_keyboard)
update.message.reply_text('Please choose:', reply_markup=reply_markup)
return ACTION
location_states = {
LOCATION: [MessageHandler(Filters.text, add_location_name)],
LOCATION_NAME: [MessageHandler(Filters.text, added_location)]
}
| oleges1/meet_bot | bot/add_location.py | add_location.py | py | 2,998 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "telegram.Repl... |
41685965854 | from utils import Position
import copy
import pickle
def load_cache(file: str) -> dict:
try:
with open(file, 'rb') as f:
cache = pickle.load(f)
except FileNotFoundError:
cache = {}
return cache
def save_cache(cache, file: str):
with open(file, 'wb') as f:
pickle.dump(cache, f)
def minimax(position: Position, depth=0, alpha=float('-inf'), beta=float('inf'),
maximizing_player: int = 1, cache=None) -> int:
if cache is None:
cache = {}
if position.key in cache:
return cache[position.key]
outcome = position.result
if type(outcome) == int:
return outcome * (10 - depth)
if maximizing_player == 1:
maxEval = float('-inf')
for child in position.get_children():
eval_ = minimax(position=child, depth=depth + 1, alpha=alpha, beta=beta, maximizing_player=-1, cache=cache)
maxEval = max(maxEval, eval_)
alpha = max(alpha, eval_)
if alpha >= beta:
break
cache[position.key] = maxEval
return maxEval
elif maximizing_player == -1:
minEval = float('inf')
for child in position.get_children():
eval_ = minimax(position=child, depth=depth + 1, alpha=alpha, beta=beta, maximizing_player=1, cache=cache)
minEval = min(minEval, eval_)
beta = min(beta, eval_)
if alpha >= beta:
break
cache[position.key] = minEval
return minEval
def get_best_move(position: Position, maximizing_player: int = 1, cache: dict = None):
best_value = float('-inf') if maximizing_player == 1 else float('inf')
best_move = (None, None)
for i in range(3):
for j in range(3):
if position.position[i][j] == 0:
new_position = copy.deepcopy(position)
new_position.make_move((i, j))
value = minimax(position=new_position, depth=0, maximizing_player=-maximizing_player, cache=cache)
if maximizing_player == 1 and value > best_value:
best_value = value
best_move = (i, j)
elif maximizing_player == -1 and value < best_value:
best_value = value
best_move = (i, j)
return best_move
| Epico-Coder/TicTacToe | ai.py | ai.py | py | 2,357 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utils.Position",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "utils.Position",
"line_numb... |
22124073233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#todo: stop words and symbols are mucking things up
import os
import os.path
import nltk
import operator
from nltk import word_tokenize
import collections
import math
import sklearn
import sklearn.cluster
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
def ppmitopwords():
d = os.getcwd()
stupid_symbols = ["(", ")", ".", ",", "'s", "``", "''", "'", "n't", ": ", ";", "?"]
common_words_to_ignore = ["walt", "whitman", "mr", "and", "own", "thy", "thee"]
#import docs into program, placing each in dictionary with content as the key
docs_dict = {}
the_big_corpus = ""
path = 'pages/scripts/ppmi/source_text/'
path = os.path.join(d, path)
for filename in os.listdir(path):
with open(os.path.join(path, filename)) as currentfile:
current_file = currentfile.read()
current_file = current_file.replace('xml', '')
for word in stupid_symbols:
current_file = current_file.replace(word, '')
for word in common_words_to_ignore:
current_file = current_file.replace(word, '')
#current_file = current_file.decode('utf-8')
the_big_corpus = the_big_corpus + current_file
docs_dict[current_file] = filename
#change to numbers so I can print in order of years, otherwise it comes out out of order and I'm picky.
for now in docs_dict.keys():
filename = docs_dict[now]
filename = filename.replace('.txt', '')
file_number = int(filename)
docs_dict[now] = file_number
#ppmi
print("------------------PMI RESULTS, TOP 10 WORDS PER YEAR-----------------------\n")
raw_matrix_words = []
raw_matrix_counts = []
token_to_index = {}
#raw counts of words into matrix, put tokens in raw_matrix_words to create matrix of words
for key in docs_dict.keys():
tokens_dict = {}
content = key
tokens = word_tokenize(content)
raw_matrix_words.append(tokens)
#get raw token count
for token_set in raw_matrix_words:
counter_dict = {}
for token in token_set:
counter_dict[token] = 0
for token in token_set:
counter_dict[token] = counter_dict[token] + 1
list_for_tokens_tups = []
for word in counter_dict.keys():
word_tup = (word, counter_dict[word])
list_for_tokens_tups.append(word_tup)
raw_matrix_counts.append(list_for_tokens_tups)
#now the raw_matrix_counts contains an entry for each list of tuples, for alignment
#idea, don't make a matrix, for each doc find entire sum, find sum of all matching words in lists, work from there...
total = 0 #sum of full 'matrix' starts here
for a_list in raw_matrix_counts:
for a_tup in a_list:
total = total + a_tup[1]
#now get each column (word)
word_dict = {} #represent sum of columns
the_big_tokens = word_tokenize(the_big_corpus)
for a_list in raw_matrix_counts:
for a_tup in a_list:
word = a_tup[0]
word_dict[word] = 0
for a_list in raw_matrix_counts:
for a_tup in a_list:
word = a_tup[0]
word_dict[word] = word_dict[word] + a_tup[1]
#col_dict stores the sum of the column divided by the total
col_dict = {}
for word in word_dict:
value = float(word_dict[word])
value = float(value/total)
col_dict[word] = value
#doc dict will contain sum of all words in a document
docu_dict = {}
list_of_years = list(docs_dict.values())
year_index = 0
for a_list in raw_matrix_counts:
total_in_doc = 0
for a_tup in a_list:
total_in_doc = total_in_doc + a_tup[1]
docu_dict[list_of_years[year_index]] = total_in_doc
year_index = year_index + 1
#so now we have the sum of the rows in docu_dict, with the key being the year the document is associated with
#we also have the sum of the columns, with the word being the key for the raw count, the col_dict contains the sum divided by the scalar value
row_dict = docu_dict
for key in row_dict.keys():
value = row_dict[key]
value = float(value)
value = float(value/total)
row_dict[key] = value
#row_dict = sum/value of docs // col_dict = sum/value of words
col_dict_len = len(col_dict)
row_dict_len = len(row_dict)
#going to do the scalar product now... difficult! (actually, coming back, not scalar, misnamed it, oh well)
scalar_dict = {}
for key_row, value_row in row_dict.items():
scalar_dict_value = {}
for key_col, value_col in col_dict.items():
value = float(col_dict[key_col]*row_dict[key_row])
scalar_dict_value[key_col] = value
scalar_dict[key_row] = scalar_dict_value #keeps in order of year and word for later extraction
#next, we get the "real" values, observed values, all above are "predictive values"... how much we EXPECT to see a word in each doc.
real_count_dict = {}
for key_doc, value_filename in docs_dict.items():
filename = value_filename
content = key_doc
tokens = word_tokenize(content)
tokens_dict = {}
for token in tokens: #initalize all to 0 before raw count
tokens_dict[token] = 0
for token in tokens:
tokens_dict[token] = tokens_dict[token] + 1 #raw counts for THIS DOC should be aquired
#now store doc
for token in tokens:
value = float(tokens_dict[token])
tokens_dict[token] = float(value/total)
real_count_dict[filename] = tokens_dict
#now get the ratio of the observed/predicted
for key in real_count_dict.keys():
for key2 in real_count_dict[key].keys():
real_count_dict[key][key2] = float(real_count_dict[key][key2] / scalar_dict[key][key2])
#now take the log of the new matrix (real_count_dict), according to online that implies taking the log of each value... lets hope this works.
for key in real_count_dict.keys():
for key2 in real_count_dict[key].keys():
if real_count_dict[key][key2] > 0.0:
real_count_dict[key][key2] = float(math.log(real_count_dict[key][key2]))
else:
real_count_dict[key][key2] = 0.0
for key in real_count_dict.keys():
dict_to_sort = real_count_dict[key]
sorted_dict = OrderedDict(sorted(dict_to_sort.items(), key=operator.itemgetter(1)))
real_count_dict[key] = sorted_dict
for key in real_count_dict.keys():
print(key) #key is year
print("-------------------------")
for key2 in real_count_dict[key].keys()[:10]: #key2 is word
#print only top 10
word = key2
value = real_count_dict[key][key2]
print_string = " {} : {} "
print(word, value)
#myprintout.format(unicode(word).encode("iso-8859-2", "replace"), value)
print("\n")
return real_count_dict
#cooccurrence by year
#keyword search
| mcwatera/WWTBHT | wwtbht/pages/scripts/ppmi/mcwatera_fp.py | mcwatera_fp.py | py | 6,495 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 3... |
71239238267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import numpy
import json
import sys
import os.path
def readXYZ(filename):
# read molecular coordinates from .xyz file
# return list of symbols and list of coordinate
geom = []
with open(filename, "r") as f:
for line in f:
tmp=line.split()
if len(tmp)==4:
atom = OrderedDict()
atom["atom"] = tmp[0]
atom["xyz"] = list(map(float,tmp[1:]))
geom.append(atom)
return geom
if len(sys.argv) < 4:
print( "Usage: %s template.json molecule.xyz molecule.json" % os.path.basename(sys.argv[0]) )
print( " " )
print( " create JSON input file for BAGEL" )
print( " All sections are copied from 'template.json' except for the molecule" )
print( " section, which is taken from 'molecule.xyz'." )
print( " " )
exit(-1)
args = sys.argv[1:]
# load input from template
with open(args[0], "r") as f:
input_sec = json.load(f)
# find molecule section
for sec in input_sec["bagel"]:
if sec["title"] == "molecule":
molecule_sec = sec
break
else:
raise RuntimeError("Molecule section not found in JSON template!")
# The geometry in the 'molecule' section is replaced with the one read from the xyz-file.
geom = readXYZ(args[1])
molecule_sec["angstrom"] = True
molecule_sec["geometry"] = geom
# The modified JSON is written to the new input file
input_filename = args[2]
def to_json(o, level=0, nflag=0):
"""
serialize an object in the JSON format
"""
INDENT = 2
SPACE = " "
NEWLINE = "\n"
ret = ""
if isinstance(o, dict):
if len(o) == 2 and "atom" in o:
ret += NEWLINE + SPACE * INDENT * (level+1) + "{"
else:
ret += "{" + NEWLINE
comma = ""
for k,v in o.items():
ret += comma
if k == "atom":
comma = ","
else:
comma = ",\n"
if k != "xyz" and k != "atom":
ret += SPACE * INDENT * (level+1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1, nflag=nflag)
if k == "xyz":
ret += " }"
else:
nflag = 0
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list):
ret += "[" + ",".join([to_json(e, level+1) for e in o]) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%12.8f' % o
elif isinstance(o, numpy.ndarray) and numpy.issubdtype(o.dtype, numpy.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, numpy.ndarray) and numpy.issubdtype(o.dtype, numpy.inexact):
ret += "[" + ','.join(map(lambda x: '%.7g' % x, o.flatten().tolist())) + "]"
else:
raise TypeError("Unknown type '%s' for json serialization" % str(type(o)))
return ret
#print to_json(input_sec)
with open(input_filename, "w") as f:
f.writelines(to_json(input_sec))
| humeniuka/chem-queue | scripts/bagel_template.py | bagel_template.py | py | 3,245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.OrderedDict",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.basename",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.p... |
16786521432 | from flask import render_template, flash, redirect, url_for, request, jsonify, current_app, g, send_from_directory
from flask_login import login_required, login_user, logout_user, current_user
from app import db
from app.helper import clean_list, normalize, convertArrayToString, convertStringToArray, prepFullAddressSearch, Marker, Error, tryconvert, allowed_file
from app.project import bp
from app.models import User, Post, Project, ProjectImage, Address, Link, Tag, UserRole
from app.main.forms import PostForm, SearchForm, EditAddressForm, TaggingForm, DemoForm, ReviewForm, HiddenDataForm
from app.project.forms import ProjectCreateInitialForm, EditProjectForm, ProjectFilterForm, PhotoForm
from app.service import GoogleMaps_api, AWS_api
from datetime import datetime
from guess_language import guess_language
from flask_googlemaps import GoogleMaps, Map
import geocoder
import os
import flask_s3
import boto3
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
def save_tags(asset, func):
# form = ProjectCreateInitialForm(request.form)
form = func(request.form)
tags_data_array = list(map(lambda v: tryconvert(v, v, int), convertStringToArray(form.tags.data)))
for element in tags_data_array:
exists = db.session.query(db.exists().where(Tag.id == element)).scalar()
if not exists:
# TODO: Defaulting category_id to 0; Either create a logic that can self categorize itself or create a process so that tags are created are automatically in a "bucket" category.
# BUG: Newly created tags' name property is not visible from admin panel. Name is however viewable from app view.
tag = Tag(category_id=0, name=element)
db.session.add(tag)
else:
tag = Tag.query.get(element)
asset.add_tag(tag)
db.session.commit()
@bp.route('/explore', methods=['GET', 'POST'])
@login_required
def explore():
geo = geocoder.ip('me')
page = request.args.get('page', 1, type=int)
projects = Project.query.order_by(Project.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)
filtered = list(filter(lambda x: x.site.lat != None, projects.items))
# avgLat = sum(project.site.lat for project in filtered)/len(filtered)
avgLat = geo.latlng[0]
# avgLng = sum(project.site.lng for project in filtered)/len(filtered)
avgLng = geo.latlng[1]
#TODO: Preset center and markers according to filter
mymap = Map(
identifier="view-map",
lat=avgLat,
lng=avgLng,
markers=[(project.site.lat, project.site.lng) for project in filtered],
style="height:400px;width:100%;margin:0;"
)
next_url = url_for('project.explore', page=projects.next_num) \
if projects.has_next else None
prev_url = url_for('project.explore', page=projects.prev_num) \
if projects.has_prev else None
return render_template('index.html', title='Explore', projects=projects.items, next_url=next_url, prev_url=prev_url, mymap=mymap)
@bp.route('/create', methods=['GET', 'POST'])
@login_required
def create():
if not current_user.is_authenticated:
flash('You need to be a registered user to create projects.')
return redirect(url_for('main.index'))
form = ProjectCreateInitialForm()
if form.validate_on_submit():
g = GoogleMaps_api()
citystate = form.city.data + ' ' + form.state.data
full_address = prepFullAddressSearch(form.address1.data, form.address2.data, citystate, form.zipcode.data)
exists = db.session.query(db.exists().where(Address.full_address == full_address)).scalar()
if not exists:
print('form address1: {}, form city: {}, form state: {}'.format(form.address1.data, form.city.data, form.state.data))
geocode = g.getGeocode(form.address1.data, form.city.data, form.state.data)
address = Address(address1=form.address1.data, address2=form.address2.data, city=form.city.data, state = form.state.data, zipcode=form.zipcode.data, country=form.country.data, full_address=full_address, lat=geocode['lat'], lng=geocode['lng'])
db.session.add(address)
else:
address = Address.query.filter_by(full_address=full_address).first()
project = Project(name=form.name.data, creator=current_user, site=address)
db.session.add(project)
save_tags(project, ProjectCreateInitialForm)
db.session.commit()
flash('Congratulations, you just created a project and address!')
return redirect(url_for('project.upload', project_id=project.id))
return render_template('project/create.html', title='Create', form=form)
@bp.route('/upload', methods=['GET', 'POST'])
def upload(*args, **kwargs):
form = HiddenDataForm()
form.data.data = request.args.get('project_id') or args
if request.method == 'POST':
for key, f in request.files.items():
if key.startswith('file'):
f.save(os.path.join(current_app.config['UPLOADED_PATH'], 'project{}-{}'.format(form.data.data, f.filename)))
#TODO: Give user opportunity to add more image related data here
if form.validate_on_submit():
s3 = boto3.client(
"s3",
aws_access_key_id=current_app.config['S3_ACCESS_KEY'],
aws_secret_access_key=current_app.config['S3_SECRET_ACCESS_KEY']
)
project = Project.query.filter_by(id=form.data.data).first()
uploadFileNames = []
sourceDir = os.path.join(current_app.config['APP_ROOT'], 'app/static/uploads/')
for (sourceDir, dirname, filename) in os.walk(sourceDir):
uploadFileNames.extend(filename)
break
for filename in uploadFileNames:
sourcepath = sourceDir + filename
print('########### SOURCEPATH: {}'.format(sourcepath))
with open(sourcepath, 'rb') as data:
s3.upload_fileobj(
data,
current_app.config['S3_BUCKET_NAME'],
filename,
ExtraArgs={
"ACL": 'public-read',
"ContentType": filename.rsplit('.', 1)[1].lower()
}
)
object_url = "https://s3-us-west-2.amazonaws.com/{}/{}".format(current_app.config['S3_BUCKET_NAME'], filename)
project_image = ProjectImage(description='this is a static description placeholder... will need to refactor', image_url=object_url, image_project=project, photo_uploader=current_user)
db.session.add(project_image)
db.session.commit()
return redirect(url_for('project.project', project_id=form.data.data))
return render_template('upload.html', form=form)
# @bp.route('/upload-old', methods=['GET', 'POST'])
# def upload(project_id):
# if form.validate_on_submit():
# f = form.photo.data
# filename = secure_filename(f.filename)
# f.save(os.path.join(
# app.instance_path, 'photos', filename
# ))
# return redirect(url_for('project.project', project_id=project_id))
# return render_template('upload.html', form=form)
@bp.route('/timeline/<address_id>')
def view_timeline(address_id):
address = Address.query.filter_by(id=address_id).first()
projects = Project.query.filter_by(address_id=address_id)
mymap = Map(
identifier="view-map",
lat=address.lat,
lng=address.lng,
markers=[(address.lat, address.lng)],
style="height:400px;width:100%;margin:0;"
)
##TODO: Add functionality that allows user to start the process of creating a review
return render_template('project/timeline.html', title='Timeline', mymap=mymap, projects=projects)
@bp.route('/<project_id>', methods=['GET', 'POST'])
def project(project_id):
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.body.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
project = Project.query.filter_by(id=project_id).first_or_404()
post = Post(body=form.body.data, author=current_user, commented_project=project, language=language)
post.save()
flash('Your post is now live!')
return redirect(url_for('project.project', project_id=project_id))
page = request.args.get('page', 1, type=int)
project = Project.query.filter_by(id=project_id).first_or_404()
user = User.query.filter_by(username=project.creator.username).first()
mymap = Map(
identifier="view-map",
lat=project.site.lat,
lng=project.site.lng,
markers=[(project.site.lat, project.site.lng)],
style="height:400px;width:100%;margin:0;",
fit_markers_to_bounds = True
)
posts = project.posts.order_by(Post.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)
images = project.images.all()
next_url = url_for('project.project', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('project.project', username=user.username, page=posts.prev_num) \
if posts.has_prev else None
return render_template('project/project.html', user=user, project=project, form=form, posts=posts.items, next_url=next_url, prev_url=prev_url, mymap=mymap, images=images)
@bp.route('/image/<id>', methods=['GET', 'POST'])
def viewProjectImage(id):
project_image = ProjectImage.query.filter_by(id=id).first()
return project_image.image_url
@bp.route('/photo-form', methods=['POST'])
@login_required
def review_form():
form = PhotoForm()
form.project_id.data = request.form['project_id']
return render_template('_comment.html', form=form)
#TODO: Include Photo submission and refactor this. There has to be a better way to do this.
@bp.route('/edit_project/<project_id>', methods=['GET', 'POST'])
@login_required
def edit_project(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
form = EditProjectForm()
if form.validate_on_submit():
project.name = form.name.data
project.headline = form.headline.data
project.description = form.description.data
project.completion_date = form.completion_date.data
address = Address.query.get(project.address_id)
address.address1 = form.address1.data
address.address2 = form.address2.data
address.city = form.city.data
address.state = form.state.data
address.zipcode = form.zipcode.data
address.country = form.country.data
save_tags(project, ProjectCreateInitialForm)
flash('Your changes have been saved.')
return redirect(url_for('project.project', project_id=project.id))
elif request.method == 'GET':
form.name.data = project.name
form.headline.data = project.headline
form.description.data = project.description
form.completion_date.data = project.completion_date
form.address1.data = project.site.address1
form.address2.data = project.site.address2
form.city.data = project.site.city
form.state.data = project.site.state
form.zipcode.data = project.site.zipcode
form.country.data = project.site.country
#BUG: tags not populating
form.tags.data = convertArrayToString(project.tags.all())
return render_template('project/edit_project.html', title='Edit Project',
form=form)
@bp.route('/favorite/<project_id>')
@login_required
def favorite(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if current_user.is_favorited(project):
flash('You already favorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.favorite(project)
db.session.commit()
flash('You favorited this project!')
return redirect(url_for('project.project', project_id=project_id))
@bp.route('/unfavorite/<project_id>')
@login_required
def unfavorite(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if not current_user.is_favorited(project):
flash('You already unfavorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.unfavorite(project)
db.session.commit()
flash('You unfavorited this project!')
return redirect(url_for('project.project', project_id=project_id))
##TODO: The contribution feature will need to be refactored; Feature will need the following: 1) contribution request form will need to allow users to indicate which project they are trying to contribute to and attach proof of contribution, 2) send email to platform support for verification, 3) support to send email back to approve or decline contribution request, 4) verified contributors will be identified as verified
@bp.route('/contribute/<project_id>')
@login_required
def contribute(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if current_user.has_contributed(project):
flash('You already unfavorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.contribute(project)
db.session.commit()
flash('You contributed to this project!')
return redirect(url_for('project.project', project_id=project_id))
@bp.route('/uncontribute/<project_id>')
@login_required
def uncontribute(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if not current_user.has_contributed(project):
flash('You already uncontributed this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.uncontribute(project)
db.session.commit()
flash('You uncontributed to this project!')
return redirect(url_for('project.project', project_id=project_id))
| iamjasonkuo/househunt | app/project/routes.py | routes.py | py | 14,693 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.last_seen",
"line_number": 21,
"usage_ty... |
9136295998 | #1.创建一个文件夹
import os
from multiprocessing import Pool,Manager
def copyFileTask(name,oldFolderName,newFolderName,queue):
fr=open(oldFolderName+'/'+name)
fw=open(newFolderName+'/'+name,'w')
content=fr.read()
fw.write(content)
fr.close()
fw.close()
queue.put(name)
def main():
# 0.获取用户要输入的文件夹名字
oldFolderName = input("请输入文件夹的名字:")
newFloderName = oldFolderName + "-复件"
os.mkdir(newFloderName)
# 2.获取旧文件夹中的所有文件的名字
fileName = os.listdir(oldFolderName)
# 3.使用多进程copy文件
pool = Pool(5)
queue=Manager().Queue()
for name in fileName:
pool.apply_async(copyFileTask, args=(name,oldFolderName,newFloderName,queue))
num=0
allnum=len(fileName)
while num!=allnum:
queue.get()
num += 1
copyRate=num / allnum
print('\r copy的进度是:%.2f%%'%(copyRate*100),end='')
print("\n 已完成copy")
if __name__=="__main__":
main() | pgg-pgg/pythonTest | 19-多进程文件copy.py | 19-多进程文件copy.py | py | 1,051 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.mkdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Manager",
... |
6836039340 | from fastapi import APIRouter, Depends, Response
from queries.games import GamesQueries
from typing import Union
router = APIRouter()
@router.get("/api/games/{game_id}")
def get_game(
game_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_game_by_id(game_id)
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games/{game_id}/screenshots")
def get_game(
game_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_screenshots_by_id(game_id)
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games")
def get_games(
response: Response,
queries: GamesQueries = Depends(),
search: Union[str, None] = None,
):
if search is not None:
data = queries.get_games_by_search(search)
if data is None:
response.status_code = 404
else:
return data
else:
data = queries.get_all_games()
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/genres")
def get_genres(
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_all_genres()
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games/genres/{genre_id}")
def get_games_by_genre(
genre_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_games_by_genre(genre_id)
if data is None:
response.status_code = 404
else:
return data
| tinatran079/netstix | games/routers/games.py | games.py | py | 1,682 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.Response",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "queries.games.GamesQueries",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "fastapi.... |
38760601495 | import sys
import netCDF4
import math
import itertools
from functools import reduce
def array_pieces(ndarray, max_bytes=None, overlap=0):
'''
Generator to return a series of numpy arrays less than max_bytes in size and the offset within the complete data from a NetCDF variable
Parameters:
ndarray: Numpy array or NetCDF array variable
overlap: number of pixels to add to each edge
max_bytes: Maximum number of bytes to retrieve. Defaults to 500,000,000 for NCI's OPeNDAP
Yields:
piece_array: array subset less than max_bytes in size
array_offset: start indices of subset in whole array
'''
max_bytes = max_bytes or 500000000 # Defaults to 500MB for NCI's OPeNDAP
array_shape = ndarray.shape
array_dimensions = len(array_shape)
# Determine overall array size in bytes
array_bytes = ndarray.dtype.itemsize * \
reduce(lambda x, y: x * y, array_shape)
if array_bytes > max_bytes: # Multiple pieces required
# Determine number of divisions in each axis required to keep pieces
# under max_bytes in size
axis_divisions = int(math.ceil(
math.pow(math.ceil(array_bytes / float(max_bytes)), 1.0 / array_dimensions)))
# Determine chunk size for pieces or default to natural divisions if no
# chunking set
try:
chunking = ndarray.chunking() or (1, 1)
except: # Numpy arrays don't have chunking
chunking = (1, 1)
# Disregard chunking if it's too big to be useful
chunking = [chunking[index] if chunking[index] < array_shape[index] // axis_divisions else 1
for index in range(array_dimensions)]
# Determine piece shape rounded down to chunk sizes
piece_shape = [array_shape[index] // axis_divisions // chunking[index]
* chunking[index] for index in range(array_dimensions)]
# Determine total number of pieces in each axis
axis_pieces = [int(math.ceil(float(array_shape[index]) // piece_shape[index]))
for index in range(array_dimensions)]
# Iterate over every piece of array
for piece_indices in itertools.product(*[range(axis_pieces[dimension_index])
for dimension_index in range(array_dimensions)]):
# Compute base start indices with no overlap
start_indices = [piece_indices[dimension_index] * piece_shape[dimension_index]
for dimension_index in range(array_dimensions)]
# Compute end indices plus overlap from start indices
end_indices = [min(start_indices[dimension_index] + piece_shape[dimension_index] + overlap,
array_shape[dimension_index])
for dimension_index in range(array_dimensions)]
# Subtract overlap from base start indices
start_indices = [max(0, start_indices[dimension_index] - overlap)
for dimension_index in range(array_dimensions)]
array_slices = [slice(start_indices[dimension_index],
end_indices[dimension_index])
for dimension_index in range(array_dimensions)]
piece_array = ndarray[array_slices]
yield piece_array, tuple(start_indices)
else: # Only one piece required
yield ndarray[...], (0, 0)
def main():
'''
Main function for testing
'''
netcdf_path = sys.argv[1]
netcdf_dataset = netCDF4.Dataset(netcdf_path)
# Find variable with "grid_mapping" attribute - assumed to be 2D data
# variable
try:
data_variable = [variable for variable in netcdf_dataset.variables.values(
) if hasattr(variable, 'grid_mapping')][0]
except:
raise Exception(
'Unable to determine data variable (must have "grid_mapping" attribute')
piece_count = 0
for piece_array, array_offset in array_pieces(data_variable, overlap=0):
piece_count += 1
piece_bytes = data_variable.dtype.itemsize * \
reduce(lambda x, y: x * y, piece_array.shape)
print('piece_array.shape = %s, array_offset = %s, piece_bytes = %d'.format(piece_array.shape, array_offset, piece_bytes))
print('piece_count = %s'.format(piece_count))
if __name__ == '__main__':
main()
| GeoscienceAustralia/geophys_utils | geophys_utils/_array_pieces.py | _array_pieces.py | py | 4,493 | python | en | code | 22 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 32... |
30366634041 | """ Simple polygon plot.
The UI allows you to change some of the attributes of the plot.
"""
import numpy as np
from traits.api import HasTraits, Instance, Range
from traitsui.api import View, UItem, Item, Group, HGroup, VGroup, spring
from chaco.api import Plot, ArrayPlotData, PolygonPlot
from enable.api import ComponentEditor, LineStyle
class PolygonPlotDemo(HasTraits):
# The main plot container.
plot = Instance(Plot)
# Data holder for `plot`.
apd = Instance(ArrayPlotData)
# The polygon plot renderer.
polygon_plot = Instance(PolygonPlot)
# Assorted styles that will be set on `polygon_plot`.
edge_style = LineStyle
edge_width = Range(value=1, low=0, high=8)
edge_alpha = Range(value=1.0, low=0.0, high=1.0)
face_alpha = Range(value=0.4, low=0.0, high=1.0)
alpha = Range(value=1.0, low=0.0, high=1.0)
traits_view = View(
VGroup(
Group(
UItem("plot", editor=ComponentEditor(), style="custom"),
),
VGroup(
HGroup(
Item("edge_style"),
spring,
),
Item("edge_width"),
Item("edge_alpha"),
Item("face_alpha"),
Item("alpha"),
),
),
resizable=True,
)
# ----------------------------------------------------------------------
# Default values
# ----------------------------------------------------------------------
def _apd_default(self):
# Create the data to plot.
px = np.array([0.5, 1.0, 2.0, 2.5, 2.0, 1.5, 0.5, 0.0])
py = np.array([0.0, 0.8, 0.5, 3.0, 3.5, 2.0, 3.0, 0.5])
# Create the ArrayPlotData container used by the Plot.
apd = ArrayPlotData(px=px, py=py)
return apd
def _plot_default(self):
plot = Plot(self.apd, title="PolygonPlot Demo")
return plot
def _polygon_plot_default(self):
p = self.plot.plot(
("px", "py"),
type="polygon",
face_color=(0, 0.8, 1) + (self.face_alpha,),
edge_color=(0, 0, 0) + (self.edge_alpha,),
edge_style=self.edge_style,
alpha=self.alpha,
)
return p[0]
# ----------------------------------------------------------------------
# Trait change handlers
# ----------------------------------------------------------------------
def _edge_style_changed(self):
self.polygon_plot.edge_style = self.edge_style
def _edge_width_changed(self):
self.polygon_plot.edge_width = self.edge_width
def _edge_alpha_changed(self):
self.polygon_plot.edge_color = self.polygon_plot.edge_color[:3] + (
self.edge_alpha,
)
def _face_alpha_changed(self):
self.polygon_plot.face_color = self.polygon_plot.face_color[:3] + (
self.face_alpha,
)
def _alpha_changed(self):
self.polygon_plot.alpha = self.alpha
demo = PolygonPlotDemo()
# Hack to force initial rendering of the plot.
demo.face_alpha = 0.5
if __name__ == "__main__":
demo.configure_traits()
| enthought/chaco | chaco/examples/demo/basic/polygon_plot_demo.py | polygon_plot_demo.py | py | 3,167 | python | en | code | 286 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "chaco.api.Plot",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "traits.ap... |
70097864829 | import pygame as pg
import sys
from pygame.sprite import Group
from constants import *
from settings import Settings
from ship import Ship
import functions as funcs
from stats import Stats
from button import Button
from score import Score
from sound import Sound
def main():
sound = Sound()
pg.init()
clock = pg.time.Clock()
game_settings = Settings() # create an object from a class
game_stats = Stats(game_settings)
screen = pg.display.set_mode((game_settings.screen_width, game_settings.screen_height))
pg.display.set_caption("Alien Invasion")
spaceShip = Ship(screen, game_settings)
bullets = Group()
aliens = Group()
funcs.create_fleet(screen, game_settings, aliens, spaceShip)
play_btn = Button(game_settings, screen, "Play!")
score = Score(game_settings, screen, game_stats)
pg.mouse.set_visible(False)
# sound.bgm.play(loops=-1)
# pg.mixer.Channel(0).play(sound.bgm, loops=-1)
font = pg.font.Font(None, 40)
font_img = font.render("WELCOME", True, (50, 50, 50), (200, 200, 200))
# main game loop
while True:
funcs.check_input_events(spaceShip, game_settings, screen, bullets, aliens, game_stats, play_btn, score, sound)
if game_stats.game_state == GAME_STATE_MENU:
screen.fill((100, 100, 100))
screen.blit(font_img, (200, 200))
pg.display.flip()
elif game_stats.game_state == GAME_STATE_PLAY:
if game_stats.game_over == False:
spaceShip.update()
funcs.update_bullets(bullets, aliens, game_settings, screen, spaceShip, game_stats, score, sound)
funcs.update_fleet(game_settings, screen, game_stats, aliens, spaceShip, bullets, score)
funcs.update_screen(screen, game_settings, game_stats, spaceShip, bullets, aliens, play_btn, score)
clock.tick(60)
if __name__ == '__main__':
main()
| hoangdesu/Alien-Invasion-Pygame | main.py | main.py | py | 2,009 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sound.Sound",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_num... |
8522443313 | import requests
from io import BytesIO
import time
from PIL import UnidentifiedImageError
import warnings
class PlateClient:
def __init__(self, url: str):
self.url = url
def readNumber(self, im) -> str:
res = requests.post(
f'{self.url}/readNumber',
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=im)
return res.json()['name']
def getNumber(self, id) -> str:
res = requests.get(f'{self.url}/getNumber?id={id}')
return res.json()
def getNumbers(self, ids) -> str:
res = requests.get(f'{self.url}/getNumbers?ids={ids}')
return res.json()
if __name__ == '__main__':
client = PlateClient('http://127.0.0.1:8080/')
res = client.getNumbers('10022-9965')
print(res)
# if __name__ == '__main__':
# client = PlateClient('http://127.0.0.1:8080/')
# res = client.getNumber('10022')
# print(res)
# if __name__ == '__main__':
# client = PlateClient('http://127.0.0.1:8080/')
# with open('images/10022.jpg', 'rb') as im:
# res = client.getNumber(im)
# print(res) | alexej-anosov/aaa_backend_hw | src/plate_client.py | plate_client.py | py | 1,142 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.