seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37616127622 | import time
import os
from richxerox import *
from tinydb import TinyDB, where
HOME_DIR = 'static/db'
# Create directory if it doesn't exist
os.system("mkdir %s" % HOME_DIR)
db = TinyDB('%s/db.json' % HOME_DIR)
currently_found_in_clipboard = paste(format='text')
while True:
time.sleep(0.1) # one tenth of a second
if paste(format='text') != currently_found_in_clipboard:
currently_found_in_clipboard = paste(format='text')
# When the user hits CMD+C store the clipboard in a file and take a screenshot of the screen
created_at = time.time()
entry = { 'content': pasteall(), 'created_at': int(created_at),}
entry['screenshot'] = '%s/screen%s.png' % (HOME_DIR, created_at)
os.system("screencapture %s" % entry['screenshot'])
db.insert(entry)
| pantacuzino/personalkb | script.py | script.py | py | 807 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 17,
... |
30609616360 | import numpy as np
import os
import Tools.FilesTool as FilesTool
import imblearn.over_sampling as over_sampling
class DataSetTool:
# 08版的度量补偿
# Mij in Target = (Mij in Target * Mean(Mj in Source)) / Mean(Mj) in Target
@staticmethod
def metric_compensation(source, target):
# 遍历每一个度量属性
for j in range(target.shape[1]):
# 计算每个度量属性的均值
metric_mean_source = np.mean(source[:, j])
metric_mean_target = np.mean(target[:, j])
# 遍历每一个样例
for i in range(target.shape[0]):
target[i, j] = (target[i, j] * metric_mean_source) / metric_mean_target
return target
# 17版进行调整的度量补偿
# Mij in Source = (Mij in Source * Mean(Mj in Target)) / Mean(Mj) in Source
@staticmethod
def metric_compensation_adopt(source, target):
# 遍历每一个度量属性
for j in range(source.shape[1]):
# 计算每个度量属性的均值
metric_mean_source = np.mean(source[:, j])
metric_mean_target = np.mean(target[:, j])
# 遍历每一个样例
for i in range(source.shape[0]):
source[i, j] = (source[i, j] * metric_mean_target) / metric_mean_source
return source
# 读取文件夹下的所有文件,并返回处理好的数据集
# metrics_num 度量数目(原始数据中除开标签列的列数)
# is_sample 是否重采样
# is_normalized 是否数据归一化
@staticmethod
def init_data(folder_path, metrics_num, is_sample=True, is_normalized=True):
# 获取目录下所有原始文件
files = os.listdir(folder_path)
data_list, label_list = [], []
for file in files:
# 每一个子文件的真实路径
file_path = folder_path+file
# txt文件
if 'txt' == FilesTool.file_type(file) or 'TXT' == FilesTool.file_type(file):
# 直接读取文件
data_file = np.loadtxt(file_path, dtype=float, delimiter=',', usecols=range(0, metrics_num+1))
label_file = np.loadtxt(file_path, dtype=float, delimiter=',', usecols=metrics_num+1)
if is_normalized:
# 数据归一化
data_file -= data_file.min()
data_file /= data_file.max()
label_file -= label_file.min()
label_file /= label_file.max()
# 加入列表
data_list.append(data_file)
label_list.append(label_file)
# 重采样
if is_sample:
for index in range(len(data_list)):
data_list[index], label_list[index] = over_sampling.SMOTE(kind='regular').fit_sample(data_list[index],
label_list[index])
return data_list, label_list
| ylxieyu/HYDRA | DataSetTool.py | DataSetTool.py | py | 3,100 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "numpy.mean",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 30,
... |
71520783227 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from detectron2.detectron2.layers import FrozenBatchNorm2d, ShapeSpec, get_norm
_NORM = 'BN'
class Conv2d_BN(nn.Module):
"""Convolution with BN module."""
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
pad=0,
dilation=1,
groups=1,
bn_weight_init=1,
act_layer=None,
):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv = torch.nn.Conv2d(in_ch,
out_ch,
kernel_size,
stride,
pad,
dilation,
groups,
bias=False
)
self.bn = get_norm(_NORM, out_ch)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
self.act_layer = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x):
# ain = self.in_ch
# aout = self.out_ch
x = self.conv(x)
x = self.bn(x)
x = self.act_layer(x)
return x
class Dila_PRM(nn.Module):
def __init__(self,in_embed,out_embed,kernel_size=4,downsample_ratio=1,dilations=[2,4,6],
fusion='cat'):
super().__init__()
self.dilations = dilations
self.in_embed = in_embed
# self.in_embeds=[self.in_embed,self.in_embed//2,self.in_embed//4]
# self.out_embeds=[self.in_embed,self.in_embed//2,self.in_embed//4]
self.out_embed = out_embed
self.fusion = fusion
self.kernel_size = kernel_size
self.stride = downsample_ratio
#self.out_size = img_size//downsample_ratio
self.convs = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(
in_channels=self.in_embed,
out_channels=self.in_embed,
kernel_size=self.kernel_size,
stride=self.stride,
# padding=math.ceil(((self.kernel_size-1)*self.dilations[idx] + 1 - self.stride) / 2),
padding=math.ceil(((self.kernel_size-1)*self.dilations[idx])/2),
dilation=self.dilations[idx]),
# nn.BatchNorm2d(self.in_embed),
nn.GELU()
) for idx in range(len(self.dilations))
]
)
if self.fusion == 'cat':
self.outchans = self.in_embed * len(self.dilations)
'''这里可以改一改,不同尺度的特征维度尽量少'''
#self.aggerate = Conv2d_BN(self.in_embed*len(self.dilations),self.in_embed,act_layer=nn.Hardswish)
self.aggerate = Conv2d_BN(self.in_embed*len(self.dilations),self.out_embed,act_layer=nn.Hardswish)
def forward(self,x):
B,C,H,W = x.shape #1,3,320,320
out = self.convs[0](x).unsqueeze(dim=-1)
for i in range(1,len(self.dilations)):
cur_out = self.convs[i](x).unsqueeze(dim=-1)
out = torch.cat((cur_out,out),dim=-1)
B, C, W, H, N = out.shape
#cur_size = (W,H)
if self.fusion=='cat':
out = out.permute(0,4,1,2,3).reshape(B,N*C,W,H)
out = self.aggerate(out)
# out = out.flatten(2).transpose(1,2) #B,N,C
return out | LiaoYun0x0/BiFormer | models/dila_prm.py | dila_prm.py | py | 4,008 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
71087023548 | """
Tests for the server.
Before running them, the server database should be restarted.
Run as: python server/tests/test_all.py (don't use py.test as it does not pass env variables easily)
"""
import os
import shutil
from pathlib import Path
from typing import List
import pytest
from requests.exceptions import HTTPError
import numpy as np
from src.al_loop import LeadCompound
from server.app import WORKSHOP_ORACLES
from solutions.task1.random_loop import RandomLoop
from src.server_wrapper import FlaskAppClient
from rich.console import Console
console = Console()
PORT = int(os.environ.get("PORT", "5000"))
BASE_URL = "http://127.0.0.1:" + str(PORT)
BASE_URL = "http://mlinpl23.ngrok.io"
TEST_TOKEN_PREFIX = 'test-' # test-0, test-1, ...
def test_submitting_compounds_to_workshop_oracles():
"""Submits three simple molecules to the server using token test-0, to all workshop oracles."""
client = FlaskAppClient(base_url=BASE_URL)
token = TEST_TOKEN_PREFIX + '0'
# Example for scoring compounds
for oracle in WORKSHOP_ORACLES:
compounds = ["CCCCCCCCC", "CCCCCCCC", "CCCCCC=O"]
response = client.score_compounds_and_update_leaderboard(compounds, oracle, token)
print(response)
assert "metrics" in response
assert "compound_scores" in response
assert "compound_sas_scores" in response
def _run_random_exploration(protein, token="test-1", steps=10):
"""Simple random exploration of ZINC. Should get above >0.5 score on each oracle."""
base_dir = Path("tmp")
shutil.rmtree(base_dir, ignore_errors=True)
client = FlaskAppClient(base_url=BASE_URL)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target=protein)
all_result: List[LeadCompound] = []
budget_per_step = 100
for step in range(steps):
console.print(f"[red]Step {step}[/red]")
candidates = loop.propose_candidates(budget_per_step)
loop.test_in_lab_and_save(candidates, client=client)
result: List[LeadCompound] = loop.load(iteration_id=step)
all_result += result
all_result_sorted = sorted(all_result, key=lambda x: x.activity, reverse=True)
metrics = {"top10": np.mean([x.activity for x in all_result_sorted[:10]]),
"top10_synth": np.mean([x.synth_score for x in all_result_sorted[:10]])}
console.log(metrics)
return metrics
def test_random_exploration_gets_reasonable_score():
for protein in [ 'GSK3β', 'DRD2_server', 'JNK3']:
console.log("Testing: " + protein)
metrics = _run_random_exploration(protein=protein)
assert metrics['top10'] > 0.1, "Random search should identify reasonable compounds"
def test_leaderboard_ordering_and_user_names():
_run_random_exploration('DRD2_server', 'test-2', steps=1)
_run_random_exploration('DRD2_server', 'test-3', steps=1)
client = FlaskAppClient(base_url=BASE_URL)
all_results = client.all_results()
users = [r['user'] for r in all_results]
print(users)
assert 'user-2' in users
assert 'user-3' in users
all_proteins = ['DRD2', 'JNK3', 'GSK3β']
sums = [sum([all_results[0]['metrics'][p + "_top_10"] for p in all_proteins]) for r in all_results]
assert sums[0] == max(sums), "First result in the leaderboard should be the maximum sum of top10 scores"
def test_call_limits():
base_dir = Path("tmp")
token = 'test-10'
shutil.rmtree(base_dir, ignore_errors=True)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target='DRD2')
client = FlaskAppClient(base_url=BASE_URL)
# exhaust limit
candidates = loop.propose_candidates(1000)
loop.test_in_lab_and_save(candidates, client=client)
# run one time more
candidates = loop.propose_candidates(100)
with pytest.raises(RuntimeError):
client.score_compounds_and_update_leaderboard([c.smiles for c in candidates], user_token=token, oracle_name='DRD2')
def test_get_all_scores():
base_dir = Path("tmp")
token = 'test-40'
shutil.rmtree(base_dir, ignore_errors=True)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target='GSK3β_server')
client = FlaskAppClient(base_url=BASE_URL)
# exhaust limit
candidates = loop.propose_candidates(100)
candidates = loop.test_in_lab_and_save(candidates, client=client)
# run one time more
console.log(client.all_scores(token))
assert len(client.all_scores(token)['compound_sas_scores']['GSK3β']) == len(candidates)
assert len(client.all_scores(token)['compound_scores']['GSK3β']) == len(candidates)
if __name__ == "__main__":
test_submitting_compounds_to_workshop_oracles()
test_random_exploration_gets_reasonable_score()
test_leaderboard_ordering_and_user_names()
test_call_limits()
test_get_all_scores()
console.log("[green] Tests passed [/green]") | molecule-one/mlinpl-23-workshops | server/tests/test_all.py | test_all.py | py | 4,995 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rich.console.Console",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "src.server_wrappe... |
30137635 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
url = "http://localhost:6332"
headers = {'content-type': 'application/json'}
def get_result(payload):
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
return json.dumps(response)
def get_all_address():
payload = {
"method": "getalladdress",
"params": {},
"jsonrpc": "2.0",
"id": 1,
}
content = json.loads(get_result(payload))
content=content["result"]
address_arr=[]
for (v) in content:
address=v.split("-")[2]
address_arr.append(address)
return json.dumps(address_arr)
def get_balance():
payload = {
"method": "getbalance",
"params": {},
"jsonrpc": "2.0",
"id": 1,
}
content = json.loads(get_result(payload))
pending=(content["result"]["base"]["pending"])/1000000
stable=(content["result"]["base"]["stable"])/1000000
balance=pending+stable
return json.dumps({"balance":balance,"pending":pending,"stable":stable})
def check_address(address):
payload = {
"method": "checkAddress",
"params": [address],
"jsonrpc": "2.0",
"id": 1,
}
return get_result(payload)
def pay(address,amount,msg):
if not msg:
payload = {
"method": "sendtoaddress",
"params": [address,amount*1000000],
"jsonrpc": "2.0",
"id": 1,
}
else:
payload = {
"method": "sendtoaddresswithmessage",
"params": [address,amount*1000000,msg],
"jsonrpc": "2.0",
"id": 1,
}
return get_result(payload)
| taozywu/token_light | rpc.py | rpc.py | py | 1,701 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2... |
29456781892 | from six.moves.urllib import parse
import tarfile
from lxml import etree
from atrope import exception
SPECS = {
'http://www.vmware.com/interfaces/specifications/vmdk.html': 'vmdk',
'https://people.gnome.org/~markmc/qcow-image-format.html': 'qcow',
}
def _get_tarfile(ova):
if not tarfile.is_tarfile(ova):
raise exception.CannotOpenFile(reason="not a valid 'tar' file")
return tarfile.open(ova)
def extract_file(ova, filename):
tf = _get_tarfile(ova)
fd = tf.extractfile(filename)
return fd
def get_disk_name(ovf):
"""Get the disk format and file name from a OVF descriptor."""
root = etree.fromstring(ovf)
ovf_ns = root.nsmap['ovf']
id_attr = '{%s}id' % ovf_ns
href_attr = '{%s}href' % ovf_ns
files = {f.get(id_attr): f.get(href_attr) for f in
root.findall('ovf:References/ovf:File', root.nsmap)}
# we do not care about more than one disk
disk = root.find('ovf:DiskSection/ovf:Disk', root.nsmap)
if disk is not None:
format_attr = '{%s}format' % ovf_ns
fileref_attr = '{%s}fileRef' % ovf_ns
ovf_format = disk.get(format_attr)
if not ovf_format:
raise Exception("Expecting some format!")
(format_url, _) = parse.urldefrag(ovf_format)
try:
disk_format = SPECS[format_url]
except KeyError:
raise Exception("Unknown format!")
try:
disk_file = files[disk.get(fileref_attr)]
except KeyError:
raise Exception("Unknown disk!")
return (disk_format, disk_file)
return None, None
def get_ovf(ova):
"""Return an OVF descriptor as stored in an OVA file, if any."""
tf = _get_tarfile(ova)
ovf = None
for name in tf.getnames():
if name.endswith(".ovf"):
ovf = tf.extractfile(name).read()
break
if ovf is None:
raise exception.InvalidOVAFile(reason="cannot find a .ovf descriptor")
return ovf
| alvarolopez/atrope | atrope/ovf.py | ovf.py | py | 1,986 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tarfile.is_tarfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "atrope.exception.CannotOpenFile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "atrope.exception",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "t... |
37188371889 | import os
import csv
import math
import numpy as np
import nltk
from nltk.corpus import stopwords
import collections
import string
import re
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import time
cachedStopWords = stopwords.words("english")
if __name__ == '__main__':
print("Starting Main")
startTime = time.time()
x = []
y = []
fraud = []
radical = []
violence = []
with open("annotPart1.csv",'r', encoding="utf8") as csvFile:
reader = csv.reader(csvFile)
p = 0
for row in reader:
if(len(row) == 5 and p != 0):
x.append(row[1])
temp = []
temp.append(0 if row[2] == '0' else 1)
temp.append(0 if row[3] == '0' else 1)
temp.append(0 if row[4] == '0' else 1)
fraud.append(0 if row[2] == '0' else 1)
radical.append(0 if row[3] == '0' else 1)
violence.append(0 if row[4] == '0' else 1)
y.append(temp)
p = p + 1
csvFile.close
with open("annot_part2.csv",'r', encoding="utf8") as csvFile:
reader = csv.reader(csvFile)
p = 0
for row in reader:
if(len(row) == 5 and p != 0):
x.append(row[1])
temp = []
temp.append(0 if row[2] == '0' else 1)
temp.append(0 if row[3] == '0' else 1)
temp.append(0 if row[4] == '0' else 1)
fraud.append(0 if row[2] == '0' else 1)
radical.append(0 if row[3] == '0' else 1)
violence.append(0 if row[4] == '0' else 1)
y.append(temp)
p = p + 1
csvFile.close
print("Size of x:",len(x)," Size of y:",len(y))
X = []
for t in x:
t = re.sub(r'[^\w\s]',' ',t)
t = ' '.join([word for word in t.split() if word != " "])
t = t.lower()
t = ' '.join([word for word in t.split() if word not in cachedStopWords])
X.append(t)
print("Type of X:",type(X))
Features = X
Fraud = fraud
Radical = radical
Violence = violence
kf = KFold(n_splits=10)
iteration = 0
gFraudAccu = 0
gRadicalAccu = 0
gViolenceAccu = 0
gTotalAccu = 0
vocabSize = 50000
tokenizer = Tokenizer(num_words= vocabSize)
tokenised = tokenizer.fit_on_texts(X)
for train_index, test_index in kf.split(Features):
iteration += 1
print("\n\n\n\nMaking nueral Network for iteration:",iteration)
iterStart = time.time()
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
fraudTrain = [Fraud[x] for x in train_index]
fraudTest = [Fraud[x] for x in test_index]
radicalTrain = [Radical[x] for x in train_index]
radicalTest = [Radical[x] for x in test_index]
violenceTrain = [Violence[x] for x in train_index]
violenceTest = [Violence[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Fraud
fraudModel = Sequential()
fraudModel.add(Embedding(50000, 100, input_length=max_review_length))
fraudModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
fraudModel.add(Dense(1, activation='sigmoid'))
fraudModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
fraudModel.fit(X_Train,fraudTrain,epochs=10, batch_size=100)
fraudScore = fraudModel.evaluate(X_Test,fraudTest,verbose = 100)
accuFraudLstm = fraudScore[1]
fraudEndTime = time.time()
print("\nFraud Training Done for Iteration",iteration,"\nTime:",fraudEndTime - iterStart)
positiveFraud = [x for x in fraudTest if x == 1]
print("Number of positive Examples : ",len(positiveFraud), " ratio : ", (len(positiveFraud) / len(fraudTest)) )
#Radical
radicalModel = Sequential()
radicalModel.add(Embedding(50000, 100, input_length=max_review_length))
radicalModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
radicalModel.add(Dense(1, activation='sigmoid'))
radicalModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
radicalModel.fit(X_Train,radicalTrain,epochs=10, batch_size=100)
radicalScore = radicalModel.evaluate(X_Test,radicalTest,verbose = 100)
accuRadicalLstm = radicalScore[1]
radicalEndTime = time.time()
print("\nRadical Training Done for Iteration",iteration,"\nTime:",radicalEndTime - fraudEndTime)
positiveRadical = [x for x in radicalTest if x == 1]
print("Number of positive Examples : ",len(positiveRadical), " ratio : ", (len(positiveRadical) / len(radicalTest)) )
#Violence
violenceModel = Sequential()
violenceModel.add(Embedding(50000, 100, input_length=max_review_length))
violenceModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
violenceModel.add(Dense(1, activation='sigmoid'))
violenceModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
violenceModel.fit(X_Train,violenceTrain,epochs=10, batch_size=100)
violenceScore = violenceModel.evaluate(X_Test,violenceTest,verbose = 100)
accuViolenceLstm = violenceScore[1]
violenceEndTime = time.time()
print("\nViolence Training Done for Iteration",iteration,"\nTime:",violenceEndTime - radicalEndTime)
positiveViolence = [x for x in violenceTest if x == 1]
print("Number of positive Examples : ",len(positiveViolence), " ratio : ", (len(positiveViolence) / len(violenceTest)) )
totalAccu = (accuViolenceLstm + accuRadicalLstm + accuFraudLstm) / 3
gFraudAccu += accuFraudLstm
gViolenceAccu += accuViolenceLstm
gRadicalAccu += accuRadicalLstm
gTotalAccu += totalAccu
iterEndTime = time.time()
print("\n\nAccuracyScores for LSTM Iteration:",iteration,"\nFraud: ",accuFraudLstm,"\nRadical: ",accuRadicalLstm,"\nViolence: ",accuViolenceLstm,"\nTotal Accuracy:",totalAccu,"\nTotal Time:",iterEndTime - iterStart)
gFraudAccu /= 10
gViolenceAccu /= 10
gRadicalAccu /= 10
gTotalAccu /= 10
endTime = time.time()
print("\n\n\n\nOverall AccuracyScores for LSTM :","\nFraud: ",gFraudAccu,"\nRadical: ",gRadicalAccu,"\nViolence: ",gViolenceAccu,"\nTotal Accuracy:",gTotalAccu,"\nTime:",endTime - startTime)
| arinjayakhare1/Real-Time-Tweet-Classifier-using-RLAN | test/old codes/testWithThreads/initTrainer/old Programs/initTrainer.py | initTrainer.py | py | 6,270 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.reade... |
73730902266 | import tensorflow as tf
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import pandas as pd
from NS_model_tf import Sampler, Navier_Stokes2D
if __name__ == '__main__':
def U_gamma_1(x):
num = x.shape[0]
return np.tile(np.array([1.0, 0.0]), (num, 1))
def U_gamma_2(x):
num = x.shape[0]
return np.zeros((num, 2))
def f(x):
num = x.shape[0]
return np.zeros((num, 2))
def operator(psi, p, x, y, Re, sigma_x=1.0, sigma_y=1.0):
u = tf.gradients(psi, y)[0] / sigma_y
v = - tf.gradients(psi, x)[0] / sigma_x
u_x = tf.gradients(u, x)[0] / sigma_x
u_y = tf.gradients(u, y)[0] / sigma_y
v_x = tf.gradients(v, x)[0] / sigma_x
v_y = tf.gradients(v, y)[0] / sigma_y
p_x = tf.gradients(p, x)[0] / sigma_x
p_y = tf.gradients(p, y)[0] / sigma_y
u_xx = tf.gradients(u_x, x)[0] / sigma_x
u_yy = tf.gradients(u_y, y)[0] / sigma_y
v_xx = tf.gradients(v_x, x)[0] / sigma_x
v_yy = tf.gradients(v_y, y)[0] / sigma_y
Ru_momentum = u * u_x + v * u_y + p_x - (u_xx + u_yy) / Re
Rv_momentum = u * v_x + v * v_y + p_y - (v_xx + v_yy) / Re
return Ru_momentum, Rv_momentum
# Parameters of equations
Re = 100.0
# Domain boundaries
bc1_coords = np.array([[0.0, 1.0],
[1.0, 1.0]])
bc2_coords = np.array([[0.0, 0.0],
[0.0, 1.0]])
bc3_coords = np.array([[1.0, 0.0],
[1.0, 1.0]])
bc4_coords = np.array([[0.0, 0.0],
[1.0, 0.0]])
dom_coords = np.array([[0.0, 0.0],
[1.0, 1.0]])
# Create boundary conditions samplers
bc1 = Sampler(2, bc1_coords, lambda x: U_gamma_1(x), name='Dirichlet BC1')
bc2 = Sampler(2, bc2_coords, lambda x: U_gamma_2(x), name='Dirichlet BC2')
bc3 = Sampler(2, bc3_coords, lambda x: U_gamma_2(x), name='Dirichlet BC3')
bc4 = Sampler(2, bc4_coords, lambda x: U_gamma_2(x), name='Dirichlet BC4')
bcs_sampler = [bc1, bc2, bc3, bc4]
# Create residual sampler
res_sampler = Sampler(2, dom_coords, lambda x: f(x), name='Forcing')
# Define model
mode = 'M1'
layers = [2, 50, 50, 50, 2]
model = Navier_Stokes2D(layers, operator, bcs_sampler, res_sampler, Re, mode)
# Train model
model.train(nIter=40001, batch_size=128)
# Test Data
nx = 100
ny = 100 # change to 100
x = np.linspace(0.0, 1.0, nx)
y = np.linspace(0.0, 1.0, ny)
X, Y = np.meshgrid(x, y)
X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None]))
# Predictions
psi_pred, p_pred = model.predict_psi_p(X_star)
u_pred, v_pred = model.predict_uv(X_star)
psi_star = griddata(X_star, psi_pred.flatten(), (X, Y), method='cubic')
p_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic')
u_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic')
v_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic')
velocity = np.sqrt(u_pred**2 + v_pred**2)
velocity_star = griddata(X_star, velocity.flatten(), (X, Y), method='cubic')
# Reference
u_ref= np.genfromtxt("reference_u.csv", delimiter=',')
v_ref= np.genfromtxt("reference_v.csv", delimiter=',')
velocity_ref = np.sqrt(u_ref**2 + v_ref**2)
# Relative error
error = np.linalg.norm(velocity_star - velocity_ref.T, 2) / np.linalg.norm(velocity_ref, 2)
print('l2 error: {:.2e}'.format(error))
### Plot ###
###########
# Reference solution & Prediceted solution
fig_1 = plt.figure(1, figsize=(18, 5))
fig_1.add_subplot(1, 3, 1)
plt.pcolor(X.T, Y.T, velocity_ref, cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Reference Velocity')
fig_1.add_subplot(1, 3, 2)
plt.pcolor(x, Y, velocity_star, cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Predicted Velocity')
plt.tight_layout()
fig_1.add_subplot(1, 3, 3)
plt.pcolor(X, Y, np.abs(velocity_star - velocity_ref.T), cmap='jet')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Absolute Error')
plt.show()
## Loss ##
loss_res = model.loss_res_log
loss_bcs = model.loss_bcs_log
fig_2 = plt.figure(2)
ax = fig_2.add_subplot(1, 1, 1)
ax.plot(loss_res, label='$\mathcal{L}_{r}$')
ax.plot(loss_bcs, label='$\mathcal{L}_{u_b}$')
ax.set_yscale('log')
ax.set_xlabel('iterations')
ax.set_ylabel('Loss')
plt.legend()
plt.tight_layout()
plt.show()
## Adaptive Constant
adaptive_constant = model.adpative_constant_bcs_log
fig_3 = plt.figure(3)
ax = fig_3.add_subplot(1, 1, 1)
ax.plot(adaptive_constant, label='$\lambda_{u_b}$')
ax.set_xlabel('iterations')
plt.legend()
plt.tight_layout()
plt.show()
## Gradients #
data_gradients_res = model.dict_gradients_res_layers
data_gradients_bcs = model.dict_gradients_bcs_layers
num_hidden_layers = len(layers) -1
cnt = 1
fig_4 = plt.figure(4, figsize=(13, 4))
for j in range(num_hidden_layers):
ax = plt.subplot(1, 4, cnt)
ax.set_title('Layer {}'.format(j + 1))
ax.set_yscale('symlog')
gradients_res = data_gradients_res['layer_' + str(j + 1)][-1]
gradients_bcs = data_gradients_bcs['layer_' + str(j + 1)][-1]
sns.distplot(gradients_res, hist=False,
kde_kws={"shade": False},
norm_hist=True, label=r'$\nabla_\theta \mathcal{L}_r$')
sns.distplot(gradients_bcs, hist=False,
kde_kws={"shade": False},
norm_hist=True, label=r'$\nabla_\theta \mathcal{L}_{u_b}$')
ax.get_legend().remove()
ax.set_xlim([-1.0, 1.0])
ax.set_ylim([0, 100])
cnt += 1
handles, labels = ax.get_legend_handles_labels()
fig_4.legend(handles, labels, loc="upper left", bbox_to_anchor=(0.35, -0.01),
borderaxespad=0, bbox_transform=fig_4.transFigure, ncol=2)
plt.tight_layout()
plt.show()
| PredictiveIntelligenceLab/GradientPathologiesPINNs | Lid-driven Cavity/NS.py | NS.py | py | 6,568 | python | en | code | 134 | github-code | 6 | [
{
"api_name": "numpy.tile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 2... |
859362304 | from __future__ import division
from vistrails.core.modules.vistrails_module import Module
from ..common import get_numpy
from ..read.read_numpy import NumPyArray
class WriteNumPy(Module):
"""Writes a list as a Numpy file.
NumPy can use one of two schemes: either 'plain' binary arrays, i.e. just
the binary representation of the data format (in this case you must specify
the exact format to get the original data back), or the NPY format, i.e.
.npy files that know what the actual structure of the array is.
"""
_input_ports = [
('array', '(org.vistrails.vistrails.basic:List)'),
('datatype', '(org.vistrails.vistrails.basic:String)',
{'entry_types': "['enum']",
'values': "[%r]" % NumPyArray.FORMATS})]
_output_ports = [('file', '(org.vistrails.vistrails.basic:File)')]
def compute(self):
numpy = get_numpy()
array = self.get_input('array')
if not isinstance(array, numpy.ndarray):
array = numpy.array(array)
dtype = NumPyArray.get_format(self.get_input('datatype'))
if dtype is NumPyArray.NPY_FMT:
fileobj = self.interpreter.filePool.create_file(suffix='.npy')
fname = fileobj.name
# Numpy's ".NPY" format
numpy.save(fname, array)
else:
fileobj = self.interpreter.filePool.create_file(suffix='.dat')
fname = fileobj.name
# Numpy's plain binary format
array.astype(dtype).tofile(fname)
self.set_output('file', fileobj)
_modules = [WriteNumPy]
###############################################################################
import unittest
class WriteNumpyTestCase(unittest.TestCase):
def test_raw_numpy(self):
"""Uses WriteNumPy to write an array in raw format.
"""
import array
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
with intercept_result(WriteNumPy, 'file') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', 'uint32')]),
]),
]))
self.assertEqual(len(results), 1)
expected_bytes = [0, 0, 0, 0,
1, 0, 0, 0,
2, 1, 0, 0,
102, 26, 0, 0]
with open(results[0].name, 'rb') as fp:
self.assertEqual(fp.read(),
array.array('B', expected_bytes).tostring())
def test_npy_numpy(self):
"""Uses WriteNumPy to write an array in .NPY format.
"""
import numpy
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
with intercept_result(WriteNumPy, 'file') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', 'npy')]),
]),
]))
self.assertEqual(len(results), 1)
self.assertEqual(list(numpy.load(results[0].name)), [0, 1, 258, 6758])
def test_write_read(self):
"""Uses WriteNumPy and NumPyArray to write then read an array.
"""
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
for dtype in ('npy', 'uint32'):
with intercept_result(NumPyArray, 'value') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', dtype)]),
]),
('read|NumPyArray', identifier, [
('datatype', [('String', dtype)]),
]),
], [
(0, 'file', 1, 'file'),
]))
self.assertEqual(len(results), 1)
self.assertEqual(list(results[0]), [0, 1, 258, 6758])
| VisTrails/VisTrails | vistrails/packages/tabledata/write/write_numpy.py | write_numpy.py | py | 4,325 | python | en | code | 100 | github-code | 6 | [
{
"api_name": "vistrails.core.modules.vistrails_module.Module",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "read.read_numpy.NumPyArray.FORMATS",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "read.read_numpy.NumPyArray",
"line_number": 21,
"us... |
73928012028 | import collections
import random
import unittest
import mock
from cardboard import card as c, events, zone as z
from cardboard.tests.util import GameTestCase
from cardboard.util import ANY
ENTER, LEAVE = events.ENTERED_ZONE, events.LEFT_ZONE
class ZoneTest(GameTestCase):
card = mock.Mock(spec=c.Card)
def setUp(self):
super(ZoneTest, self).setUp()
self.u = z.UnorderedZone(
name="Emerald Hill", game=self.game, contents=self.library,
)
self.o = z.OrderedZone(
name="Casino Night", game=self.game, contents=self.library,
)
class TestZones(ZoneTest):
def test_name(self):
self.assertEqual(self.u.name, "Emerald Hill")
self.assertEqual(self.o.name, "Casino Night")
def test_ordered(self):
self.assertFalse(self.u.ordered)
self.assertTrue(self.o.ordered)
def test_str_repr(self):
self.assertEqual(str(self.u), "Emerald Hill")
self.assertEqual(str(self.o), "Casino Night")
self.assertEqual(repr(self.u), "<Zone: Emerald Hill>")
self.assertEqual(repr(self.o), "<Zone: Casino Night>")
def test_contains(self):
for i in self.library:
self.assertIn(i, self.u)
self.assertIn(i, self.o)
self.assertNotIn(object(), self.u)
self.assertNotIn(object(), self.o)
def test_iter(self):
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
def test_len(self):
self.assertEqual(len(self.u), len(self.library))
self.assertEqual(len(self.o), len(self.library))
def test_add(self):
with self.assertTriggers(event=ENTER, card=30, zone=self.u):
self.u.add(30)
with self.assertTriggers(event=ENTER, card=30, zone=self.o):
self.o.add(30)
self.assertEqual(set(self.u), set(self.library) | {30})
self.assertEqual(list(self.o), self.library + [30])
def test_add_already_contains(self):
NO_OWNER, OWNER = "on the {}", "in {}'s {}"
u, o = self.u.name, self.o.name
n = mock.Mock()
self.u.add(n)
self.o.add(n)
self.resetEvents()
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(u)):
self.u.add(n)
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(o)):
self.o.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, u)):
self.u.owner = n.owner
self.u.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, o)):
self.o.owner = n.owner
self.o.add(n)
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_add_owner_redirection(self):
"""
Adding a card with a different owner than the zone's redirects.
"""
card = mock.Mock()
self.u.name, self.o.name = "foo", "bar"
self.u.owner, self.o.owner = mock.Mock(), mock.Mock()
self.u.add(card)
self.o.add(card)
card.owner.foo.add.assert_called_once_with(card)
card.owner.bar.add.assert_called_once_with(card)
def test_move(self):
self.o.add(self.card)
self.card.zone = self.o # on actual cards this is a property
with self.assertTriggers(event=ENTER, card=self.card, zone=self.u):
self.u.move(self.card)
self.card.zone = self.u
self.assertIn(self.card, self.u)
with self.assertTriggers(event=ENTER, card=self.card, zone=self.o):
self.o.move(self.card)
self.assertIn(self.card, self.o)
def test_move_to_self(self):
self.resetEvents()
# shouldn't even be checking library[0].zone
with self.assertRaises(ValueError):
self.u.move(self.library[0])
with self.assertRaises(ValueError):
self.o.move(self.library[0])
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_pop(self):
self.resetEvents()
e = self.u.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=e, zone=self.u)])
self.resetEvents()
f = self.o.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=f, zone=self.o)])
self.assertEqual(set(self.u), set(self.library) - {e})
self.assertEqual(list(self.o), self.library[:-1])
def test_remove(self):
e = self.library[-7]
self.library.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.u):
self.u.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.o):
self.o.remove(e)
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
self.assertRaises(ValueError, self.u.remove, object())
self.assertRaises(ValueError, self.o.remove, object())
def test_update(self):
self.u.update(range(4))
for i in range(4):
self.assertIn(i, self.u)
self.assertEqual(len(self.u), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.u) for i in range(4)]
self.assertLastEventsWere(evs)
self.resetEvents()
self.o.update(range(4))
self.assertEqual(self.o[-4:], range(4))
self.assertEqual(len(self.o), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.o) for i in range(4)]
self.assertLastEventsWere(evs)
def test_silent(self):
self.o.add(self.card)
self.card.zone = self.o
self.resetEvents()
self.u.add(20, silent=True)
self.o.add(20, silent=True)
self.u.remove(self.library[0], silent=True)
self.o.remove(self.library[0], silent=True)
self.u.pop(silent=True)
self.o.pop(silent=True)
self.u.move(self.card, silent=True)
self.card.zone = self.u
self.o.move(self.card, silent=True)
self.u.update(range(10), silent=True)
self.o.update(range(10), silent=True)
self.assertFalse(self.events.trigger.called)
def test_iterable(self):
i = range(10)
# TODO: This is incomplete, all the methods don't take iterables
o = z.OrderedZone(game=None, name="Emerald Hill", contents=i)
u = z.UnorderedZone(game=None, name="Emerald Hill", contents=i)
i.pop()
self.assertEqual(list(o), range(10))
self.assertEqual(list(u), range(10))
class TestOrderedZone(ZoneTest):
def test_reversed(self):
self.assertEqual(list(reversed(self.o)), list(reversed(self.library)))
def test_getitem(self):
for i, e in enumerate(self.library):
self.assertEqual(self.o[i], e)
self.assertEqual(self.o[2:7:2], self.library[2:7:2])
def test_set_del_item(self):
self.assertRaises(AttributeError, getattr, self.o, "__setitem__")
self.assertRaises(AttributeError, getattr, self.o, "__delitem__")
def test_count(self):
o = z.OrderedZone(game=None, name="Emerald Hill",
contents=[1, 1, 1, 2, 2, 3])
for i, e in enumerate(range(3, 0, -1), 1):
self.assertEqual(o.count(e), i)
def test_index(self):
e = self.library[4]
self.assertEqual(self.o.index(e), 4)
def test_pop_index(self):
e1 = self.o.pop(0)
e2 = self.o.pop(4)
self.library.pop(0)
self.library.pop(4)
self.assertEqual(list(self.o), self.library)
self.assertLastEventsWere([
{"event" : LEAVE, "card" : e1, "zone" : self.o},
{"event" : LEAVE, "card" : e2, "zone" : self.o},
])
def test_reverse(self):
self.o.reverse()
self.assertEqual(list(self.o), list(reversed(self.library)))
def test_shuffle(self):
with mock.patch("cardboard.zone.random.shuffle") as shuffle:
self.o.shuffle()
shuffle.assert_called_once_with(self.o._order)
class TestZone(unittest.TestCase):
def test_zone(self):
c = mock.Mock()
for zone in ["battlefield", "exile", "hand"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.UnorderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
for zone in ["graveyard", "library", "stack"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.OrderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
| Julian/cardboard | cardboard/tests/test_zone.py | test_zone.py | py | 8,874 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "cardboard.events.ENTERED_ZONE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cardboard.events",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cardboard.events.LEFT_ZONE",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
5118772924 | from flask import current_app, Blueprint,request, jsonify
from vpnresolve import VPNResolve
import json
import logging
logger = logging.getLogger( "ucn_logger" )
ios_api = Blueprint('ios_api', __name__)
@ios_api.route("/viz/ios/log", methods=['POST'])
def log():
vpnres = VPNResolve(current_app.config["CIDR"], {"db":current_app.config["MONGODB"],"collection":current_app.config["VPNLOGSCOLLECTION"],"host":current_app.config["MONGOHOST"], "port":current_app.config["MONGOPORT"]})
host = vpnres.clientip(request)
if host is None:
return jsonify(success="False")
logger.debug("saving ios data for host %s", host)
data = request.get_json(force=False)
logger.debug("received data for host %s" % host)
#shove the processes into the table in bulk!
success= True
if 'processes' in data:
logger.debug("saving ios process data for host %s", host)
success = current_app.config["datadb"].bulk_insert_processes(host,data['processes'])
if success:
logger.debug("sucessfully saved ios process data for host %s", host)
else:
logger.error("failed to save ios process data")
if 'network' in data:
logger.debug("saving ios network for host %s", host)
success = success and current_app.config["datadb"].insert_network_data(host, data['network'])
if success:
logger.debug("sucessfully saved ios network data for host %s", host)
else:
logger.error("failed to save ios network data")
logger.error(data['network'])
return jsonify(success= "True" if success else "False")
| ucn-eu/ucnviz | ucnserver/ios.py | ios.py | py | 1,522 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vpnresolve.VPNResolve",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.current_a... |
22919129693 | from io import TextIOWrapper
import os
import argparse
files = [
'Accurect-Pointer.txt',
'Endonasal-RII.txt',
'HeadBand-Reference.txt',
'Navigation-Pointer.txt',
'Registration-Pointer.txt'
]
def readFromOriginalFormat(file: TextIOWrapper):
lines = file.readlines()
for i, line in enumerate(lines):
if line.startswith('Num Markers:'):
numMarkers = int(line.split(':')[1].strip())
if line.startswith('Marker Positions'):
break
data = lines[i+2:i+numMarkers+2]
data = [float(x) for line in data for x in line.split()]
return data, numMarkers
def writeToNewFormat(data: list, numMarkers: int, file: TextIOWrapper):
file.write(str(numMarkers) + '\n')
for i in range(0, numMarkers*3, 3):
file.write('{} {} {}\n'.format(data[i], data[i+1], data[i+2]))
file.write('\n')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('-d', '--directory', help='root directory of the files', required=True)
args = argparser.parse_args()
if not os.path.exists(args.directory):
os.mkdir(f'{args.directory}/converted/')
for file in files:
with open(f'{args.directory}/{file}', 'r') as f:
data, numMarkers = readFromOriginalFormat(f)
with open(f'{args.directory}/converted/{file}', 'w') as f:
writeToNewFormat(data, numMarkers, f)
print(f'{file} converted')
| odeaxcsh/ParsissCamera | Scripts/CovertToolPatternFilesFormat.py | CovertToolPatternFilesFormat.py | py | 1,486 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "io.TextIOWrapper",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.exi... |
33387732761 | import numpy as np
from sklearn.tree import DecisionTreeClassifier
from .sampler import FeatureSampler, ObjectSampler
class Bagger:
def __init__(self, base_estimator, object_sampler, feature_sampler, n_estimators=10, **params):
"""
n_estimators : int
number of base estimators
base_estimator : class
class for base_estimator with fit(), predict() and predict_proba() methods
feature_sampler : instance of FeatureSampler
object_sampler : instance of ObjectSampler
n_estimators : int
number of base_estimators
params : kwargs
params for base_estimator initialization
"""
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.feature_sampler = feature_sampler
self.object_sampler = object_sampler
self.estimators = []
self.indices = []
self.params = params
def fit(self, X, y):
"""
for i in range(self.n_estimators):
1) select random objects and answers for train
2) select random indices of features for current estimator
3) fit base_estimator (don't forget to remain only selected features)
4) save base_estimator (self.estimators) and feature indices (self.indices)
NOTE that self.base_estimator is class and you should init it with
self.base_estimator(**self.params) before fitting
"""
self.estimators = []
self.indices = []
for i in range(self.n_estimators):
X_sampled, y_sampled = self.object_sampler.sample(X, y)
feature_indices = self.feature_sampler.sample_indices(X.shape[1])
estimator = self.base_estimator(**self.params)
estimator.fit(X_sampled[:, feature_indices], y_sampled)
self.estimators.append(estimator)
self.indices.append(feature_indices)
return self
def predict_proba(self, X):
"""
Returns
-------
probas : numpy ndarrays of shape (n_objects, n_classes)
Calculate mean value of all probas from base_estimators
Don't forget, that each estimator has its own feature indices for prediction
"""
if not (0 < len(self.estimators) == len(self.indices)):
raise RuntimeError('Bagger is not fitted', (len(self.estimators), len(self.indices)))
predicts = []
for estimator, feature_indices in zip(self.estimators, self.indices):
predict = estimator.predict_proba(X[:, feature_indices])
predicts.append(predict)
return np.mean(np.array(predicts), axis=0)
def predict(self, X):
"""
Returns
-------
predictions : numpy ndarrays of shape (n_objects, )
"""
return np.argmax(self.predict_proba(X), axis=1)
class RandomForestClassifier(Bagger):
def __init__(self, n_estimators=30, max_objects_samples=0.9, max_features_samples=0.8,
max_depth=None, min_samples_leaf=1, random_state=None, **params):
base_estimator = DecisionTreeClassifier
object_sampler = ObjectSampler(max_samples=max_objects_samples, random_state=random_state)
feature_sampler = FeatureSampler(max_samples=max_features_samples, random_state=random_state)
super().__init__(
base_estimator=base_estimator,
object_sampler=object_sampler,
feature_sampler=feature_sampler,
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
**params,
)
| TotalChest/MLprograms | RandomForest/random_forest.py | random_forest.py | py | 3,665 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.mean",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifi... |
35544157808 | import calendar
from datetime import datetime
class Util:
DATE_FORMAT = '%Y-%m-%d'
def get_month_start_date(datetime):
return datetime.date().replace(day=1)
def get_month_end_date(datetime):
year = datetime.year
month = datetime.month
monthrange = calendar.monthrange(year, month)
return datetime.date().replace(day=monthrange[1])
class CalendarEvent:
def __init__(self, date, event):
self.date = date
self.event = event
class HTMLEventCalendar(calendar.HTMLCalendar):
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# CSS classes for the day <th>s
cssclasses_weekday_head = cssclasses
# CSS class for the days before and after current month
cssclass_noday = "noday"
# CSS class for the month's head
cssclass_month_head = "month"
# CSS class for the month
cssclass_month = "month"
# CSS class for the year's table head
cssclass_year_head = "year"
# CSS class for the whole year table
cssclass_year = "year"
cssclass_event = "calendar-event"
cssclass_day_number = "day-number"
def __init__(self, firstweekday=calendar.MONDAY, events={}):
super().__init__(firstweekday)
self.events = events
def get_event(self, day, month, year):
date = datetime.strptime("{}-{}-{}".format(year, month, day),
Util.DATE_FORMAT)
date_string = date.strftime(Util.DATE_FORMAT)
return self.events.get(date_string, '')
def formatday(self, day, weekday, themonth=None, theyear=None):
"""
Return a day as a table cell.
"""
if day == 0:
# day outside month
return '<td class="%s"> </td>' % self.cssclass_noday
else:
event = self.get_event(day, themonth, theyear)
html = """
<td class="%s">
<div class="%s">%s</div>
<div class="%s">%d</div>
</td>""" % (self.cssclasses[weekday],
self.cssclass_event, event,
self.cssclass_day_number, day)
return html
def formatweek(self, theweek, themonth=None, theyear=None):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd, themonth, theyear)
for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % (
self.cssclass_month))
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week, theyear=theyear, themonth=themonth))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
| bluepostit/di-python-2019 | daily-exercises/week9/visitors/calendar.py | calendar.py | py | 3,141 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.date",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.year",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "da... |
38760633775 | import netCDF4
import numpy as np
import numexpr as ne
import math
import os
import sys
import re
import tempfile
from collections import OrderedDict
from pprint import pformat
from scipy.interpolate import griddata
from geophys_utils._crs_utils import transform_coords, get_utm_wkt, get_reprojected_bounds, get_spatial_ref_from_wkt
from geophys_utils._transect_utils import utm_coords, coords2distance
from geophys_utils._netcdf_utils import NetCDFUtils, METADATA_CRS
from geophys_utils._polygon_utils import points2convex_hull
from geophys_utils._concave_hull import concaveHull
from shapely.geometry import shape
from scipy.spatial.ckdtree import cKDTree
from shapely.geometry import Polygon, MultiPoint, MultiPolygon
from shapely.geometry.polygon import asPolygon
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform
import shapely.wkt
import logging
# Setup logging handlers if required
logger = logging.getLogger(__name__) # Get logger
logger.setLevel(logging.INFO) # Initial logging level for this module
try:
import memcache
except ImportError:
logger.debug('Unable to import memcache. AWS-specific functionality will not be enabled')
memcache = None
# Default number of points to read per chunk when retrieving data
DEFAULT_READ_CHUNK_SIZE = 8192
# Set this to a number other than zero for testing
POINT_LIMIT = 0
# Metadata shape generation parameters
SHAPE_BUFFER_DISTANCE = 0.02 # Distance to buffer (kerf) shape out then in again (in degrees)
SHAPE_OFFSET = 0.0005 # Distance to buffer (kerf) final shape outwards (in degrees)
SHAPE_SIMPLIFY_TOLERANCE = 0.0005 # Length of shortest line in shape (in degrees)
SHAPE_MAX_POLYGONS=5
SHAPE_MAX_VERTICES=1000
SHAPE_ORDINATE_DECIMAL_PLACES = 6 # Number of decimal places for shape vertex ordinates
class NetCDFPointUtils(NetCDFUtils):
'''
NetCDFPointUtils class to do various fiddly things with NetCDF geophysics point data files.
'''
CACHE_VARIABLE_PARAMETERS = {'complevel': 4,
'zlib': True,
'fletcher32': True,
'shuffle': True,
'endian': 'little',
}
def __init__(self,
netcdf_dataset,
memcached_connection=None,
enable_disk_cache=None,
enable_memory_cache=True,
cache_path=None,
s3_bucket=None,
debug=False):
'''
NetCDFPointUtils Constructor
@parameter netcdf_dataset: netCDF4.Dataset object containing a point dataset
@parameter enable_disk_cache: Boolean parameter indicating whether local cache file should be used, or None for default
@parameter enable_memory_cache: Boolean parameter indicating whether values should be cached in memory or not.
@parameter debug: Boolean parameter indicating whether debug output should be turned on or not
'''
# Start of init function - Call inherited constructor first
super().__init__(netcdf_dataset=netcdf_dataset,
debug=debug
)
logger.debug('Running NetCDFPointUtils constructor')
if memcache is not None:
self.memcached_connection = memcached_connection
else:
self.memcached_connection = None
self.s3_bucket = s3_bucket
self.cache_path = cache_path or os.path.join(os.path.join(tempfile.gettempdir(), 'NetCDFPointUtils'),
re.sub('\W', '_', os.path.splitext(self.nc_path)[0])) + '_cache.nc'
self.cache_basename = os.path.join(self.cache_path,
re.sub('\W', '_', os.path.splitext(self.nc_path)[0]))
#logger.debug('self.cache_path')
#logger.debug(self.cache_path)
#logger.debug('self.cache_path: {}'.format(self.cache_path))
self.enable_memory_cache = enable_memory_cache
# If caching is not explicitly specified, enable it for OPeNDAP access
if enable_disk_cache is None:
self.enable_disk_cache = self.opendap
else:
self.enable_disk_cache = enable_disk_cache
# Initialise private property variables to None until set by property getter methods
self._xycoords = None
self._point_variables = None
self._data_variable_list = None
self._kdtree = None
# Determine exact spatial bounds
xycoords = self.xycoords
xmin = np.nanmin(xycoords[:,0])
xmax = np.nanmax(xycoords[:,0])
ymin = np.nanmin(xycoords[:,1])
ymax = np.nanmax(xycoords[:,1])
# Create nested list of bounding box corner coordinates
self.native_bbox = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
# Define bounds
self.bounds = [xmin, ymin, xmax, ymax]
self.point_count = self.netcdf_dataset.dimensions['point'].size
#===========================================================================
# def __del__(self):
# '''
# NetCDFPointUtils Destructor
# '''
# if self.enable_disk_cache:
# try:
# cache_file_path = self._nc_cache_dataset.filepath()
# self._nc_cache_dataset.close()
# os.remove(cache_file_path)
# except:
# pass
#===========================================================================
def fetch_array(self, source_variable, dest_array=None):
'''
Helper function to retrieve entire 1D array in pieces < self.max_bytes in size
@param source_variable: netCDF variable from which to retrieve data
'''
source_len = source_variable.shape[0]
pieces_required = int(math.ceil((source_variable[0].itemsize * source_len) / self.max_bytes))
max_elements = source_len // pieces_required
# Reduce max_elements to fit within chunk boundaries if possible
if pieces_required > 1 and hasattr(source_variable, '_ChunkSizes'):
chunk_size = (source_variable._ChunkSizes
if type(source_variable._ChunkSizes) in [int, np.int32]
else source_variable._ChunkSizes[0]
)
chunk_count = max(max_elements // chunk_size,
1)
max_elements = min(chunk_count * chunk_size,
max_elements)
pieces_required = int(math.ceil(source_len / max_elements))
logger.debug('Fetching {} pieces containing up to {} {} array elements.'.format(pieces_required, max_elements, source_variable.name))
if dest_array is None:
dest_array = np.zeros((source_len,), dtype=source_variable.dtype)
# Copy array in pieces
start_index = 0
while start_index < source_len:
end_index = min(start_index + max_elements, source_len)
logger.debug('Retrieving {} array elements {}:{}'.format(source_variable.name, start_index, end_index))
array_slice = slice(start_index, end_index)
dest_array[array_slice] = source_variable[array_slice]
start_index += max_elements
return dest_array
def get_polygon(self):
'''
Returns GML representation of convex hull polygon for dataset
'''
return 'POLYGON((' + ', '.join([' '.join(
['%.4f' % ordinate for ordinate in coordinates])
for coordinates in self.get_convex_hull()]) + '))'
def get_spatial_mask(self, bounds, bounds_wkt=None):
'''
Return boolean mask of dimension 'point' for all coordinates within specified bounds and CRS
@parameter bounds: Either an iterable containing [<xmin>, <ymin>, <xmax>, <ymax>] or a shapely (multi)polygon
@parameter bounds_wkt: WKT for bounds CRS. Defaults to dataset native CRS
:return mask: Boolean array of size n
'''
#TODO: Deal with this in a more high-level way
POINT_CHUNK_SIZE = 1048576 # Number of points to check at any one time to keep memory usage down
def get_intersection_mask(points, geometry):
"""
Determine if points lie inside (multi)polygon
N.B: points and geometry must be in the same CRS
:param points: 2 x n array of input coordinates
:param geometry: (multi)polygon
:return mask: Boolean array of size n
"""
mask = np.zeros(shape=(points.shape[0]), dtype=np.bool)
chunk_start_index = 0
while chunk_start_index < len(points):
chunk_end_index = min(chunk_start_index + POINT_CHUNK_SIZE, len(points))
logger.debug('Checking spatial containment for points {} to {} of {}'.format(chunk_start_index, chunk_end_index-1, len(points)))
intersection_points = np.array(MultiPoint(points[slice(chunk_start_index, chunk_end_index)]).intersection(geometry))
#TODO: Find out if there's a better way of getting the mask from the intersection points
# Note that this method would have some issues with duplicated coordinates, but there shouldn't be any
logger.debug('Computing partial mask from {} intersection points'.format(len(intersection_points)))
_x_values, x_indices, _x_intersection_indices = np.intersect1d(points.flatten()[0::2], intersection_points.flatten()[0::2], return_indices=True)
_y_values, y_indices, _y_intersection_indices = np.intersect1d(points.flatten()[1::2], intersection_points.flatten()[1::2], return_indices=True)
intersection_indices = np.intersect1d(x_indices, y_indices, return_indices=False)
mask[intersection_indices] = True
chunk_start_index = chunk_end_index
return mask
coordinates = self.xycoords # Don't transform these - do all spatial operations in native CRS
#logger.debug('coordinates = {}'.format(coordinates))
if isinstance(bounds, BaseGeometry): # Process shapely (multi)polygon bounds
if bounds_wkt is None:
native_crs_bounds = bounds
else:
logger.debug('Original bounds = {}'.format(bounds))
native_crs_bounds = transform((lambda x, y: transform_coords([x, y], bounds_wkt, self.wkt)),
bounds)
logger.debug('native_crs_bounds = {}'.format(native_crs_bounds))
# Shortcut the whole process if the extents are within the bounds geometry
if asPolygon(self.native_bbox).within(native_crs_bounds):
logger.debug('Dataset is completely contained within bounds')
return np.ones(shape=(len(coordinates),), dtype=np.bool)
bounds_half_size = abs(np.array([native_crs_bounds.bounds[2] - native_crs_bounds.bounds[0],
native_crs_bounds.bounds[3] - native_crs_bounds.bounds[1]])) / 2.0
bounds_centroid = np.array(native_crs_bounds.centroid.coords[0])
#logger.debug('bounds_half_size = {}, bounds_centroid = {}'.format(bounds_half_size, bounds_centroid))
# Limit the points checked to those within the same rectangular extent (for speed)
# Set mask element to true for each point which is <= bounds_half_size distance from bounds_centroid
mask = np.all(ne.evaluate("abs(coordinates - bounds_centroid) <= bounds_half_size"), axis=1)
logger.debug('{}/{} points found in initial bounding box intersection'.format(np.count_nonzero(mask), len(coordinates)))
# Apply sub-mask for all points within bounds geometry
(mask[mask])[~get_intersection_mask(coordinates[mask], native_crs_bounds)] = False
#logger.debug('Final shape mask = {}'.format(mask))
else: # Process four-element bounds iterable if possible
assert len(bounds) == 4, 'Invalid bounds iterable: {}. Must be of form [<xmin>, <ymin>, <xmax>, <ymax>]'.format(bounds)
native_crs_bounds = transform_coords(np.array(bounds).reshape((2,2)), bounds_wkt, self.wkt).reshape((4, 1)) # Transform as [xmin, ymin], [xmax, ymax]]
if (self.bounds[0] >= native_crs_bounds[0]
and self.bounds[1] >= native_crs_bounds[1]
and self.bounds[2] <= native_crs_bounds[2]
and self.bounds[3] <= native_crs_bounds[3]
):
logger.debug('Dataset is completely contained within bounds')
return np.ones(shape=(len(coordinates),), dtype=np.bool)
bounds_half_size = abs(np.array([native_crs_bounds[2] - native_crs_bounds[0], native_crs_bounds[3] - native_crs_bounds[1]])) / 2.0
bounds_centroid = np.array([native_crs_bounds[0], native_crs_bounds[1]]) + bounds_half_size
# Return true for each point which is <= bounds_half_size distance from bounds_centroid
mask = np.all(ne.evaluate("abs(coordinates - bounds_centroid) <= bounds_half_size"), axis=1)
logger.debug('{}/{} points found in final mask'.format(np.count_nonzero(mask), len(coordinates)))
return mask
def grid_points(self, grid_resolution,
variables=None,
native_grid_bounds=None,
reprojected_grid_bounds=None,
resampling_method='linear',
grid_wkt=None,
point_step=1):
'''
Function to grid points in a specified bounding rectangle to a regular grid of the specified resolution and crs
@parameter grid_resolution: cell size of regular grid in grid CRS units
@parameter variables: Single variable name string or list of multiple variable name strings. Defaults to all point variables
@parameter native_grid_bounds: Spatial bounding box of area to grid in native coordinates
@parameter reprojected_grid_bounds: Spatial bounding box of area to grid in grid coordinates
@parameter resampling_method: Resampling method for gridding. 'linear' (default), 'nearest' or 'cubic'.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
@parameter grid_wkt: WKT for grid coordinate reference system. Defaults to native CRS
@parameter point_step: Sampling spacing for points. 1 (default) means every point, 2 means every second point, etc.
@return grids: dict of grid arrays keyed by variable name if parameter 'variables' value was a list, or
a single grid array if 'variable' parameter value was a string
@return wkt: WKT for grid coordinate reference system.
@return geotransform: GDAL GeoTransform for grid
'''
assert not (native_grid_bounds and reprojected_grid_bounds), 'Either native_grid_bounds or reprojected_grid_bounds can be provided, but not both'
# Grid all data variables if not specified
variables = variables or self.point_variables
# Allow single variable to be given as a string
single_var = (type(variables) == str)
if single_var:
variables = [variables]
if native_grid_bounds:
reprojected_grid_bounds = get_reprojected_bounds(native_grid_bounds, self.wkt, grid_wkt)
elif reprojected_grid_bounds:
native_grid_bounds = get_reprojected_bounds(reprojected_grid_bounds, grid_wkt, self.wkt)
else: # No reprojection required
native_grid_bounds = self.bounds
reprojected_grid_bounds = self.bounds
# Determine spatial grid bounds rounded out to nearest GRID_RESOLUTION multiple
pixel_centre_bounds = (round(math.floor(reprojected_grid_bounds[0] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[1] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[2] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[3] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6)
)
grid_size = [pixel_centre_bounds[dim_index+2] - pixel_centre_bounds[dim_index] for dim_index in range(2)]
# Extend area for points an arbitrary 4% out beyond grid extents for nice interpolation at edges
expanded_grid_bounds = [pixel_centre_bounds[0]-grid_size[0]/50.0,
pixel_centre_bounds[1]-grid_size[0]/50.0,
pixel_centre_bounds[2]+grid_size[1]/50.0,
pixel_centre_bounds[3]+grid_size[1]/50.0
]
spatial_subset_mask = self.get_spatial_mask(get_reprojected_bounds(expanded_grid_bounds, grid_wkt, self.wkt))
# Create grids of Y and X values. Note YX ordering and inverted Y
# Note GRID_RESOLUTION/2.0 fudge to avoid truncation due to rounding error
grid_y, grid_x = np.mgrid[pixel_centre_bounds[3]:pixel_centre_bounds[1]-grid_resolution/2.0:-grid_resolution,
pixel_centre_bounds[0]:pixel_centre_bounds[2]+grid_resolution/2.0:grid_resolution]
# Skip points to reduce memory requirements
#TODO: Implement function which grids spatial subsets.
point_subset_mask = np.zeros(shape=(self.netcdf_dataset.dimensions['point'].size,), dtype=bool)
point_subset_mask[0:-1:point_step] = True
point_subset_mask = np.logical_and(spatial_subset_mask, point_subset_mask)
coordinates = self.xycoords[point_subset_mask]
# Reproject coordinates if required
if grid_wkt is not None:
# N.B: Be careful about XY vs YX coordinate order
coordinates = np.array(transform_coords(coordinates[:], self.wkt, grid_wkt))
# Interpolate required values to the grid - Note YX ordering for image
grids = {}
for variable in [self.netcdf_dataset.variables[var_name] for var_name in variables]:
grids[variable.name] = griddata(coordinates[:,::-1],
variable[:][point_subset_mask], #TODO: Check why this is faster than direct indexing
(grid_y, grid_x),
method=resampling_method)
if single_var:
grids = list(grids.values())[0]
# crs:GeoTransform = "109.1002342895272 0.00833333 0 -9.354948067227777 0 -0.00833333 "
geotransform = [pixel_centre_bounds[0]-grid_resolution/2.0,
grid_resolution,
0,
pixel_centre_bounds[3]+grid_resolution/2.0,
0,
-grid_resolution
]
return grids, (grid_wkt or self.wkt), geotransform
def utm_grid_points(self, utm_grid_resolution, variables=None, native_grid_bounds=None, resampling_method='linear', point_step=1):
'''
Function to grid points in a specified native bounding rectangle to a regular grid of the specified resolution in its local UTM CRS
@parameter grid_resolution: cell size of regular grid in metres (UTM units)
@parameter variables: Single variable name string or list of multiple variable name strings. Defaults to all point variables
@parameter native_grid_bounds: Spatial bounding box of area to grid in native coordinates
@parameter resampling_method: Resampling method for gridding. 'linear' (default), 'nearest' or 'cubic'.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
@parameter grid_wkt: WKT for grid coordinate reference system. Defaults to native CRS
@parameter point_step: Sampling spacing for points. 1 (default) means every point, 2 means every second point, etc.
@return grids: dict of grid arrays keyed by variable name if parameter 'variables' value was a list, or
a single grid array if 'variable' parameter value was a string
@return wkt: WKT for grid coordinate reference system (i.e. local UTM zone)
@return geotransform: GDAL GeoTransform for grid
'''
native_grid_bounds = native_grid_bounds or self.bounds
native_centre_coords = [(native_grid_bounds[dim_index] + native_grid_bounds[dim_index+2]) / 2.0 for dim_index in range(2)]
utm_wkt = get_utm_wkt(native_centre_coords, self.wkt)
return self.grid_points(grid_resolution=utm_grid_resolution,
variables=variables,
native_grid_bounds=native_grid_bounds,
resampling_method=resampling_method,
grid_wkt=utm_wkt,
point_step=point_step
)
def utm_coords(self, coordinate_array, wkt=None):
'''
Function to convert coordinates to the appropriate UTM CRS
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@param wkt: WKT for source CRS - default to native
@return wkt: WKT for UTM CRS - default to native
@return coordinate_array: Array of shape (n, 2) containing UTM coordinate pairs
'''
wkt = wkt or self.wkt
return utm_coords(coordinate_array, wkt)
def coords2metres(self, coordinate_array, wkt=None):
'''
Function to calculate cumulative distance in metres from coordinates in specified CRS
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@param wkt: WKT for coordinate CRS - default to native
@return distance_array: Array of shape (n) containing cumulative distances from first coord
'''
wkt = wkt or self.wkt # Default to native CRS for coordinates
_utm_wkt, utm_coord_array = utm_coords(coordinate_array, wkt)
return coords2distance(utm_coord_array)
def get_convex_hull(self, to_wkt=None):
'''
Function to return vertex coordinates of a convex hull polygon around all points
Implements abstract base function in NetCDFUtils
@param to_wkt: CRS WKT for shape
'''
return points2convex_hull(transform_coords(self.xycoords, self.wkt, to_wkt))
def get_concave_hull(self, to_wkt=None, smoothness=None, clockwise_polygon_orient=False):
"""\
Returns the concave hull (as a shapely polygon) of all points.
Implements abstract base function in NetCDFUtils
@param to_wkt: CRS WKT for shape
@param smoothness: distance to buffer (kerf) initial shape outwards then inwards to simplify it
"""
hull = concaveHull(transform_coords(self.xycoords, self.wkt, to_wkt))
shapely_polygon = shape({'type': 'Polygon', 'coordinates': [hull.tolist()]})
# from shapely docs:
# A sign of 1.0 means that the coordinates of the product’s exterior ring will be oriented
# counter-clockwise and the interior rings (holes) will be oriented clockwise.
#
# There should not be polygons with interior ring holes and so -1 will be treated as clockwise, and 1 as
# counter-clockwise
if clockwise_polygon_orient:
pol = shapely.geometry.polygon.orient(Polygon(shapely_polygon), -1.0)
else: # reverse polygon coordinates - anti-clockwise
pol = shapely.geometry.polygon.orient(Polygon(shapely_polygon), 1.0)
if smoothness is None:
return pol
return Polygon(pol.buffer(smoothness).exterior).buffer(-smoothness)
def nearest_neighbours(self, coordinates,
wkt=None,
points_required=1,
max_distance=None,
secondary_mask=None):
'''
Function to determine nearest neighbours using cKDTree
N.B: All distances are expressed in the native dataset CRS
@param coordinates: two-element XY coordinate tuple, list or array
@param wkt: Well-known text of coordinate CRS - defaults to native dataset CRS
@param points_required: Number of points to retrieve. Default=1
@param max_distance: Maximum distance to search from target coordinate -
STRONGLY ADVISED TO SPECIFY SENSIBLE VALUE OF max_distance TO LIMIT SEARCH AREA
@param secondary_mask: Boolean array of same shape as point array used to filter points. None = no filter.
@return distances: distances from the target coordinate for each of the points_required nearest points
@return indices: point indices for each of the points_required nearest points
'''
if wkt:
reprojected_coords = transform_coords(coordinates, wkt, self.wkt)
else:
reprojected_coords = coordinates
if secondary_mask is None:
secondary_mask = np.ones(shape=(self.point_count,), dtype=bool)
else:
assert secondary_mask.shape == (self.point_count,)
if max_distance: # max_distance has been specified
logger.debug('Computing spatial subset mask...')
spatial_mask = self.get_spatial_mask([reprojected_coords[0] - max_distance,
reprojected_coords[1] - max_distance,
reprojected_coords[0] + max_distance,
reprojected_coords[1] + max_distance
]
)
point_indices = np.where(np.logical_and(spatial_mask,
secondary_mask
)
)[0]
if not len(point_indices):
logger.debug('No points within distance {} of {}'.format(max_distance, reprojected_coords))
return [], []
# Set up KDTree for nearest neighbour queries
logger.debug('Indexing spatial subset with {} points into KDTree...'.format(np.count_nonzero(spatial_mask)))
kdtree = cKDTree(data=self.xycoords[point_indices])
logger.debug('Finished indexing spatial subset into KDTree.')
else: # Consider ALL points
max_distance = np.inf
kdtree = self.kdtree
distances, indices = kdtree.query(x=np.array(reprojected_coords),
k=points_required,
distance_upper_bound=max_distance)
if max_distance == np.inf:
return distances, indices
else: # Return indices of complete coordinate array, not the spatial subset
return distances, np.where(spatial_mask)[0][indices]
def get_lookup_mask(self,
lookup_value_list,
lookup_variable_name='line',
indexing_variable_name=None,
indexing_dimension='point'
):
'''
Function to return mask array based on lookup variable
'''
if lookup_variable_name:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
if (lookup_variable.shape == ()
or ((len(lookup_variable.shape) == 1) and (lookup_variable.dtype == '|S1'))): # Scalar or string array
dimension = self.netcdf_dataset.get(indexing_dimension)
assert dimension, 'Invalid indexing_dimension {} specified'.format(indexing_dimension)
# Repeat boolean value across dimension size
return np.array([lookup_variable[:] in lookup_value_list] * dimension.size)
indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
try:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
except:
raise BaseException('indexing_variable_name not supplied and cannot be inferred')
elif indexing_variable_name:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
if hasattr(indexing_variable, 'lookup'):
# Get lookup variable name from variable attribute
lookup_variable_name = indexing_variable.lookup
elif indexing_variable_name.endswith('_index'):
# Infer lookup variable name from indexing variable name
lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
else:
raise BaseException('lookup_variable_name not supplied and cannot be inferred')
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
else:
raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
# Handle special case for string arrays via OPeNDAP
if self.opendap and (lookup_variable.dtype == 'S1') and (len(lookup_variable.shape) == 2):
# Convert 2D byte array into 1D array of unicode strings - needed for OPeNDAP
lookup_array = np.array([bytestring[bytestring != b''].tostring().decode('UTF8') for bytestring in lookup_variable[:]])
# OPeNDAP will truncate strings to 64 characters - truncate search strings to match
lookup_indices = np.arange(lookup_array.shape[0])[np.in1d(lookup_array, np.array([lookup_value[0:64]
for lookup_value in lookup_value_list]))]
else:
lookup_indices = np.arange(lookup_variable.shape[0])[np.in1d(lookup_variable[:], np.array(lookup_value_list))]
logger.debug('lookup_indices: {}'.format(lookup_indices))
lookup_mask = np.in1d(indexing_variable, lookup_indices)
logger.debug('lookup_mask: {}'.format(lookup_mask))
return lookup_mask
#===============================================================================
# def lookup_mask_generator(self,
# lookup_value_list,
# lookup_variable_name='line',
# indexing_variable_name=None
# ):
# '''
# Generator to yield mask array based on lookup variable for each of a list of lookup values
# '''
# if lookup_variable_name:
# indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
#
# try:
# indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
# except:
# raise BaseException('indexing_variable_name not supplied and cannot be inferred')
#
# elif indexing_variable_name:
# indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
#
# if hasattr(indexing_variable, 'lookup'):
# # Get lookup variable name from variable attribute
# lookup_variable_name = indexing_variable.lookup
# elif indexing_variable_name.endswith('_index'):
# # Infer lookup variable name from indexing variable name
# lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
# else:
# raise BaseException('lookup_variable_name not supplied and cannot be inferred')
#
# else:
# raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
#
# lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
#
# for lookup_value in lookup_value_list:
# lookup_indices = np.where(lookup_variable[:] == lookup_value)[0]
# logger.debug('lookup_indices: {}'.format(lookup_indices))
#
# lookup_mask = np.in1d(indexing_variable, lookup_indices)
# logger.debug('lookup_mask: {}'.format(lookup_mask))
# yield lookup_mask
#
#===============================================================================
def get_index_mask(self,
lookup_value_list,
lookup_variable_name='line',
start_index_variable_name=None,
count_variable_name=None,
point_count=None
):
'''
Function to return mask array based on index variable
'''
try:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
except:
raise BaseException('Invalid lookup_variable_name')
start_index_variable_name = start_index_variable_name or lookup_variable_name + '_start_index'
try:
start_index_variable = self.netcdf_dataset.variables[start_index_variable_name]
except:
raise BaseException('start_index_variable_name not supplied and cannot be inferred')
count_variable_name = count_variable_name or lookup_variable_name + '_count'
try:
count_variable = self.netcdf_dataset.variables[count_variable_name]
except:
raise BaseException('count_variable_name not supplied and cannot be inferred')
point_count = point_count or self.netcdf_dataset.dimensions['point'].size
lookup_indices = np.arange(lookup_variable.shape[0])[np.in1d(lookup_variable[:], lookup_value_list)]
logger.debug('lookup_indices: {}'.format(lookup_indices))
start_indices = start_index_variable[lookup_indices]
logger.debug('start_indices: {}'.format(start_indices))
counts = count_variable[lookup_indices]
logger.debug('counts: {}'.format(counts))
# Build mask
index_mask = np.zeros(shape=(point_count,), dtype='bool')
for lookup_index in range(len(lookup_indices)):
index_mask[start_indices[lookup_index]:start_indices[lookup_index]+counts[lookup_index]] = True
return index_mask
def expand_lookup_variable(self,
lookup_variable_name='line',
indexing_variable_name=None,
start_index=0,
end_index=0,
mask=None,
indexing_dimension='point'):
'''
Function to expand lookup variables and return an array of the required size
'''
if lookup_variable_name:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
if lookup_variable.shape == (): # Scalar
dimension = self.netcdf_dataset.dimensions.get(indexing_dimension)
assert dimension, 'Invalid indexing_dimension {} specified'.format(indexing_dimension)
# Repeat boolean value across dimension size
return np.array([lookup_variable[:]] * dimension.size)
indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
try:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
except:
raise BaseException('indexing_variable_name not supplied and cannot be inferred')
elif indexing_variable_name:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
if hasattr(indexing_variable, 'lookup'):
# Get lookup variable name from variable attribute
lookup_variable_name = indexing_variable.lookup
elif indexing_variable_name.endswith('_index'):
# Infer lookup variable name from indexing variable name
lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
else:
raise BaseException('lookup_variable_name not supplied and cannot be inferred')
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
else:
raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
end_index = end_index or indexing_variable.shape[0] # Usually this will be the point count
index_range = end_index - start_index
if mask is None: # No mask defined - take all points in range
subset_mask = np.ones(shape=(index_range,), dtype='bool')
else:
subset_mask = mask[start_index:end_index]
lookup_variable.set_auto_mask(False)
indexing_variable.set_auto_mask(False)
result_array = lookup_variable[:][indexing_variable[start_index:end_index][subset_mask]] # Need to index numpy array, not netCDF variable
# Convert 2D byte array into 1D array of unicode strings - needed for OPeNDAP
if result_array.dtype == 'S1':
result_array = np.array([bytestring[bytestring != b''].tostring().decode('UTF8') for bytestring in result_array])
return result_array
def chunk_point_data_generator(self,
start_index=0,
end_index=0,
field_list=None,
mask=None,
yield_variable_attributes_first=False):
'''
Generator to optionally yield variable attributes followed by all point data for the specified point index range
Used to retrieve data as chunks for outputting as point-wise lists of lists
@param start_index: start point index of range to read
@param end_index: end point index of range to read. Defaults to number of points
@param field_list: Optional list of field names to read. Default is None for all variables
@param mask: Optional Boolean mask array to subset points
@param yield_variable_attributes_first: Boolean flag to determine whether variable attribute dict is yielded first. Defaults to False
@yield variable_attributes: dict of netCDF variable attributes. Optionally the first item yielded if yield_variable_attributes_first is True
@yield point_value_list: List of single values for 1D variables or sub-lists for 2D variables for a single point
'''
# Start of point_data_generator function
end_index = end_index or self.point_count
index_range = end_index - start_index
if mask is None: # No mask defined - take all points in range
subset_mask = np.ones(shape=(index_range,), dtype='bool')
else:
subset_mask = mask[start_index:end_index]
index_range = np.count_nonzero(subset_mask)
# If no points to retrieve, don't read anything
if not index_range:
logger.debug('No points to retrieve for point indices {}-{}: All masked out'.format(start_index, end_index-1))
return
# Generate full field list if None provided
if not field_list:
field_list = [variable.name
for variable in self.netcdf_dataset.variables.values()
if (not len(variable.dimensions) # Scalar variable
or variable.dimensions[0] == 'point' # Variable is of point dimension
or (variable.dimensions[0] + '_index' in self.netcdf_dataset.variables.keys() # Variable has an index variable
and len(self.netcdf_dataset.variables[variable.dimensions[0] + '_index'].dimensions) # index variable is not a scalar
and self.netcdf_dataset.variables[variable.dimensions[0] + '_index'].dimensions[0] == 'point' # index variable is of point dimension
)
)
and not variable.name.endswith('_index')
and not hasattr(variable, 'lookup') # Variable is not an index variable
and not variable.name in NetCDFUtils.CRS_VARIABLE_NAMES
and not re.match('ga_.+_metadata', variable.name) # Not an excluded variable
]
logger.debug('field_list: {}'.format(field_list))
variable_attributes = OrderedDict()
memory_cache = OrderedDict()
for variable_name in field_list:
variable = self.netcdf_dataset.variables.get(variable_name)
if variable is None:
logger.warning('Variable {} does not exist. Skipping.'.format(variable_name))
continue
#logger.debug('variable_name: {}'.format(variable_name))
# Scalar variable
if len(variable.shape) == 0:
# Skip CRS variable
if variable_name in NetCDFUtils.CRS_VARIABLE_NAMES or re.match('ga_.+_metadata', variable_name):
continue
# Repeat scalar value for each point
data_array = variable[:]
memory_cache[variable_name] = np.array([data_array] * index_range)
else: # nD array variable
if (variable.dimensions[0] != 'point'): # Variable is NOT of point dimension - must be lookup
memory_cache[variable_name] = self.expand_lookup_variable(lookup_variable_name=variable_name,
start_index=start_index,
end_index=end_index,
mask=mask)
else: # 'point' is in variable.dimensions - "normal" variable
data_array = variable[start_index:end_index]
# Include fill_values if array is masked
if type(data_array) == np.ma.core.MaskedArray:
data_array = data_array.data
memory_cache[variable_name] = data_array[subset_mask]
if yield_variable_attributes_first:
variable_attributes[variable_name] = dict(variable.__dict__)
logger.debug('variable_attributes: {}'.format(pformat(variable_attributes)))
logger.debug('memory_cache: {}'.format(pformat(memory_cache)))
if yield_variable_attributes_first:
yield variable_attributes
for index in range(index_range):
point_value_list = []
for variable_name, variable in iter(memory_cache.items()):
data_array = variable[index]
# Convert array to string if required
if type(data_array) == np.ndarray and data_array.dtype == object:
data_array = str(data_array)
point_value_list.append(data_array)
yield point_value_list
logger.debug('{} points read for point indices {}-{}'.format(index_range, start_index, end_index-1))
def all_point_data_generator(self,
field_list=None,
mask=None,
read_chunk_size=None,
yield_variable_attributes_first=True):
'''
Generator to yield variable attributes followed by lists of values for all points
@param field_list: Optional list of field names to read. Default is None for all variables
@param mask: Optional Boolean mask array to subset points
@param read_chunk_size: Number of points to read from the netCDF per chunk (for greater efficiency than single point reads)
@param yield_variable_attributes_first: Boolean flag to determine whether variable attribute dict is yielded first. Defaults to True
@yield variable_attributes: dict of netCDF variable attributes. Optionally the first item yielded if yield_variable_attributes_first is True
@yield point_value_list: List of single values for 1D variables or sub-lists for 2D variables for a single point
'''
read_chunk_size = read_chunk_size or DEFAULT_READ_CHUNK_SIZE
# Process all chunks
point_count = 0
for chunk_index in range(self.point_count // read_chunk_size + 1):
for line in self.chunk_point_data_generator(field_list=field_list,
start_index=chunk_index*read_chunk_size,
end_index=min((chunk_index+1)*read_chunk_size,
self.point_count
),
mask=mask,
yield_variable_attributes_first=yield_variable_attributes_first
):
if not yield_variable_attributes_first:
point_count += 1
yield_variable_attributes_first = False # Only yield variable attributes from the first chunk
#logger.debug('line: {}'.format(line))
yield line
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
logger.debug('{} points read from netCDF file {}'.format(point_count, self.nc_path))
def get_xy_coord_values(self):
'''
Function to return a full in-memory coordinate array from source dataset
'''
logger.debug('Reading xy coordinates from source dataset')
xycoord_values = np.zeros(shape=(len(self.x_variable), 2), dtype=self.x_variable.dtype)
self.fetch_array(self.x_variable, xycoord_values[:,0])
self.fetch_array(self.y_variable, xycoord_values[:,1])
# Deal with netCDF4 Datasets that have had set_auto_mask(False) called
if hasattr(self.x_variable, '_FillValue'):
xycoord_values[:,0][xycoord_values[:,0] == self.x_variable._FillValue] = np.nan
if hasattr(self.y_variable, '_FillValue'):
xycoord_values[:,1][xycoord_values[:,1] == self.y_variable._FillValue] = np.nan
return xycoord_values
@property
def xycoords(self):
'''
Property getter function to return pointwise array of XY coordinates
The order of priority for retrieval is memory, memcached, disk cache then dataset.
'''
xycoords = None
# assert np.allclose(arr, arr_down)
if self.enable_memory_cache and self._xycoords is not None:
#logger.debug('Returning memory cached coordinates')
return self._xycoords
elif self.memcached_connection is not None:
coord_cache_key = self.cache_basename + '_xycoords'
logger.debug("hit xycoords propery code")
logger.debug(self.memcached_connection)
xycoords = self.memcached_connection.get(coord_cache_key)
if xycoords is not None:
# self.memcached_connection.get(self.cache_path) is True:
logger.debug('memcached key found at {}'.format(coord_cache_key))
#logger.debug('xycoords: {}'.format(xycoords))
else:
xycoords = self.get_xy_coord_values()
logger.debug("key not found at {}. adding key and value".format(coord_cache_key))
self.memcached_connection.add(coord_cache_key, xycoords)
elif self.enable_disk_cache:
if os.path.isfile(self.cache_path):
# Cached coordinate file exists - read it
cache_dataset = netCDF4.Dataset(self.cache_path, 'r')
#assert cache_dataset.source == self.nc_path, 'Source mismatch: cache {} vs. dataset {}'.format(cache_dataset.source, self.nc_path)
if 'xycoords' in cache_dataset.variables.keys():
xycoords = cache_dataset.variables['xycoords'][:]
logger.debug('Read {} coordinates from cache file {}'.format(xycoords.shape[0], self.cache_path))
else:
logger.debug('Unable to read xycoords variable from netCDF cache file {}'.format(self.cache_path))
cache_dataset.close()
else:
logger.debug('NetCDF cache file {} does not exist'.format(self.cache_path))
if xycoords is None:
xycoords = self.get_xy_coord_values() # read coords from source file
os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
if os.path.isfile(self.cache_path):
cache_dataset = netCDF4.Dataset(self.cache_path, 'r+')
else:
cache_dataset = netCDF4.Dataset(self.cache_path, 'w')
if not hasattr(cache_dataset, 'source'):
cache_dataset.source = self.nc_path
#assert cache_dataset.source == self.nc_path, 'Source mismatch: cache {} vs. dataset {}'.format(cache_dataset.source, self.nc_path)
if 'point' not in cache_dataset.dimensions.keys():
cache_dataset.createDimension(dimname='point', size=xycoords.shape[0])
if 'xy' not in cache_dataset.dimensions.keys():
cache_dataset.createDimension(dimname='xy', size=xycoords.shape[1])
if 'xycoords' not in cache_dataset.variables.keys():
cache_dataset.createVariable('xycoords',
xycoords.dtype,
dimensions=['point', 'xy'],
**self.CACHE_VARIABLE_PARAMETERS
)
cache_dataset.variables['xycoords'][:] = xycoords # Write coords to cache file
cache_dataset.close()
logger.debug('Saved {} coordinates to cache file {}'.format(xycoords.shape[0], self.cache_path))
else: # No caching - read coords from source file
xycoords = self.get_xy_coord_values()
if self.enable_memory_cache:
self._xycoords = xycoords
return xycoords
@property
def point_variables(self):
'''
Property getter function to return point_variables as required
'''
if not self._point_variables:
logger.debug('Setting point_variables property')
self._point_variables = list([var_name for var_name in self.netcdf_dataset.variables.keys()
if 'point' in self.netcdf_dataset.variables[var_name].dimensions
and var_name not in ['latitude', 'longitude', 'easting', 'northing', 'point', 'fiducial', 'flag_linetype']
])
return self._point_variables
@property
def data_variable_list(self):
'''
Property getter function to return data_variable_list as required
'''
if not self._data_variable_list:
logger.debug('Setting data_variable_list property')
self._data_variable_list = [key for key, value in self.netcdf_dataset.variables.items()
if 'point' in value.dimensions]
return self._data_variable_list
@property
def kdtree(self):
'''
Property getter function to return data_variable_list as required
'''
if not self._kdtree:
logger.debug('Indexing full dataset with {} points into KDTree...'.format(self.xycoords.shape[0]))
self._kdtree = cKDTree(data=self.xycoords, balanced_tree=False)
logger.debug('Finished indexing full dataset into KDTree.')
return self._kdtree
def copy(self,
nc_out_path,
datatype_map_dict={},
variable_options_dict={},
dim_range_dict={},
dim_mask_dict={},
nc_format=None,
limit_dim_size=False,
var_list=[],
empty_var_list=[],
to_crs=None
):
'''
Function to copy a netCDF dataset to another one with potential changes to size, format,
variable creation options and datatypes.
@param nc_out_path: path to netCDF output file
@param to_crs: WKT of destination CRS
'''
if var_list:
expanded_var_list = list(set(
var_list +
NetCDFUtils.X_DIM_VARIABLE_NAMES +
NetCDFUtils.Y_DIM_VARIABLE_NAMES +
NetCDFUtils.CRS_VARIABLE_NAMES +
['line', 'line_index'] # Always include line numbers (This really should be in an overridden function in NetCDFLineUtils)
))
else:
expanded_var_list = var_list
# Call inherited NetCDFUtils method
super().copy(
nc_out_path,
datatype_map_dict=datatype_map_dict,
variable_options_dict=variable_options_dict,
dim_range_dict=dim_range_dict,
dim_mask_dict=dim_mask_dict,
nc_format=nc_format,
limit_dim_size=limit_dim_size,
var_list=expanded_var_list,
empty_var_list=empty_var_list,
)
# Finish up if no reprojection required
dest_srs = get_spatial_ref_from_wkt(to_crs)
if not to_crs or dest_srs.IsSame(get_spatial_ref_from_wkt(self.wkt)):
logger.debug('No reprojection required for dataset {}'.format(nc_out_path))
return
try:
logger.debug('Re-opening new dataset {}'.format(nc_out_path))
new_dataset = netCDF4.Dataset(nc_out_path, 'r+')
new_ncpu = NetCDFPointUtils(new_dataset, debug=self.debug)
logger.debug('Reprojecting {} coordinates in new dataset'.format(len(new_ncpu.x_variable)))
#TODO: Check coordinate variable data type if changing between degrees & metres
new_ncpu._xycoords = transform_coords(new_ncpu.xycoords, self.wkt, to_crs)
new_ncpu.x_variable[:] = new_ncpu._xycoords[:,0]
new_ncpu.y_variable[:] = new_ncpu._xycoords[:,1]
crs_variable_name, crs_variable_attributes = self.get_crs_attributes(to_crs)
logger.debug('Setting {} variable attributes'.format(crs_variable_name))
# Delete existing crs variable attributes
for key in new_ncpu.crs_variable.__dict__.keys():
if not key.startswith('_'):
delattr(new_ncpu.crs_variable, key)
try:
delattr(new_ncpu.x_variable, key)
delattr(new_ncpu.y_variable, key)
except:
pass
# Set new crs variable attributes
new_ncpu.crs_variable.setncatts(crs_variable_attributes)
new_ncpu.x_variable.setncatts(crs_variable_attributes)
new_ncpu.y_variable.setncatts(crs_variable_attributes)
# Rename variables if switching between projected & unprojected
if crs_variable_name != new_ncpu.crs_variable.name:
logger.debug('Renaming {} variable to {}'.format(new_ncpu.crs_variable.name, crs_variable_name))
new_dataset.renameVariable(new_ncpu.crs_variable.name, crs_variable_name)
if crs_variable_name == 'crs': # Geodetic
xy_varnames = ('longitude', 'latitude')
units = dest_srs.GetAngularUnitsName() + 's' # degrees
elif crs_variable_name in ['transverse_mercator', "albers_conical_equal_area"]: # Projected
xy_varnames = ('x', 'y')
units = units = dest_srs.GetLinearUnitsName() + 's' # metres
else:
raise BaseException('Unhandled crs variable name "{}"'.format(crs_variable_name))
logger.debug('Renaming {} & {} variables to {} & {}'.format(new_ncpu.x_variable.name,
new_ncpu.y_variable.name,
*xy_varnames
))
new_dataset.renameVariable(new_ncpu.x_variable.name, xy_varnames[0])
new_ncpu.x_variable.units = units
new_ncpu.x_variable.long_name = xy_varnames[0]
new_dataset.renameVariable(new_ncpu.y_variable.name, xy_varnames[1])
new_ncpu.y_variable.units = units
new_ncpu.y_variable.long_name = xy_varnames[1]
finally:
new_dataset.close()
def set_global_attributes(self, compute_shape=False, clockwise_polygon_orient=False):
'''\
Function to set global geometric metadata attributes in netCDF file
N.B: This will fail if dataset is not writable
'''
try:
metadata_srs = get_spatial_ref_from_wkt(METADATA_CRS)
assert metadata_srs.IsGeographic(), 'Unable to set geodetic parameters for this dataset'
#===================================================================
# # Reopen as writable dataset
# filepath = self.netcdf_dataset.filepath()
# self.netcdf_dataset.close()
# self.netcdf_dataset = netCDF4.Dataset(filepath, 'r+')
#===================================================================
logger.debug('Setting global geometric metadata attributes in netCDF point dataset with {} points'.format(self.netcdf_dataset.dimensions['point'].size))
attribute_dict = dict(zip(['geospatial_lon_min', 'geospatial_lat_min', 'geospatial_lon_max', 'geospatial_lat_max'],
get_reprojected_bounds(self.bounds, self.wkt, METADATA_CRS)
)
)
attribute_dict['geospatial_lon_units'] = 'degree_east'
attribute_dict['geospatial_lat_units'] = 'degree_north'
attribute_dict['geospatial_bounds_crs'] = metadata_srs.ExportToPrettyWkt()
if compute_shape:
try:
logger.debug('Computing concave hull')
attribute_dict['geospatial_bounds'] = shapely.wkt.dumps(
self.get_concave_hull(
to_wkt=METADATA_CRS,
clockwise_polygon_orient=clockwise_polygon_orient
),
rounding_precision=SHAPE_ORDINATE_DECIMAL_PLACES)
except Exception as e:
logger.warning('Unable to compute concave hull shape: {}'.format(e))
try:
self.netcdf_dataset.geospatial_bounds = shapely.wkt.dumps(asPolygon([
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_min']],
[attribute_dict['geospatial_lon_max'], attribute_dict['geospatial_lat_min']],
[attribute_dict['geospatial_lon_max'], attribute_dict['geospatial_lat_max']],
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_max']],
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_min']],
]))
except:
pass
logger.debug('attribute_dict = {}'.format(pformat(attribute_dict)))
logger.debug('Writing global attributes to netCDF file'.format(self.netcdf_dataset.filepath()))
for key, value in attribute_dict.items():
setattr(self.netcdf_dataset, key, value)
logger.debug('Finished setting global geometric metadata attributes in netCDF point dataset')
except:
logger.error('Unable to set geometric metadata attributes in netCDF point dataset')
raise
def set_variable_actual_range_attribute(self):
'''\
Function to set ACDD actual_range attribute in all non-index point-dimensioned variables
N.B: Will fail if dataset is not writable
'''
self.netcdf_dataset.set_auto_mask(True)
try:
for variable_name, variable in self.netcdf_dataset.variables.items():
# Skip all variables not of point dimensionality
if 'point' not in variable.dimensions:
continue
# Skip index variables
if re.search('_index$', variable_name):
continue
try:
variable.actual_range = np.array(
[np.nanmin(variable[:]), np.nanmax(variable[:])], dtype=variable.dtype)
logger.debug('{}.actual_range = {}'.format(variable_name, variable.actual_range))
except:
logger.warning('Unable to compute actual_range value for point variable {}'.format(variable_name))
except:
logger.error('Unable to set variable actual_range metadata attributes in netCDF point dataset')
raise
def main(debug=True):
'''
Main function for quick and dirty testing
'''
netcdf_path = sys.argv[1]
netcdf_dataset = netCDF4.Dataset(netcdf_path, 'r')
ncpu = NetCDFPointUtils(netcdf_dataset, debug=debug) # Enable debug output here
# Create mask for last ten points
mask = np.zeros(shape=(ncpu.point_count,), dtype='bool')
mask[-10:] = True
# Set list of fields to read
field_list = None
#field_list = ['latitude', 'longitude', 'obsno', 'reliab']
point_data_generator = ncpu.all_point_data_generator(field_list, mask)
# Retrieve point variable attributes first
variable_attributes = next(point_data_generator)
logger.info('variable_attributes: {}'.format(variable_attributes))
# Use long names instead of variable names where they exist
field_names = [variable_attributes[variable_name].get('long_name') or variable_name
for variable_name in variable_attributes.keys()]
logger.info('field_names: {}'.format(field_names))
for point_data in point_data_generator:
#logger.debug('point_data: {}'.format(pformat(point_data)))
result_dict = dict(zip(field_names, point_data))
logger.info('result_dict: {}'.format(result_dict))
if __name__ == '__main__':
# Setup logging handlers if required
console_handler = logging.StreamHandler(sys.stdout)
#console_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.DEBUG)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
if not logger.handlers:
# Set handler for root logger to standard output
logger.addHandler(console_handler)
logger.debug('Logging handlers set up for logger {}'.format(logger.name))
ncu_logger = logging.getLogger('geophys_utils._netcdf_utils')
if not ncu_logger.handlers:
ncu_logger.addHandler(console_handler)
logger.debug('Logging handlers set up for {}'.format(ncu_logger.name))
main()
| GeoscienceAustralia/geophys_utils | geophys_utils/_netcdf_point_utils.py | _netcdf_point_utils.py | py | 66,225 | python | en | code | 22 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "geophys_utils._netcdf_utils.NetCDFUtils",
"line_number": 52,
"usage_type": "name"
},
{
"api_n... |
21112708622 | from .plugin import Plugin
import js2py
# 插件名称
name = '测试插件'
# 描述信息
description = """
仅供测试
"""
# 作者
author = 'kksanyu'
# 是否启用该插件
enable = True
# 演示js代码
jsAddFunc = """
function add(a, b) {
return a + b;
}
"""
class Demo(Plugin):
def run(self, options):
print('运行Demo::run', options.telephone)
# raise RuntimeError('测试异常')
add = js2py.eval_js(jsAddFunc)
a = 1
b = 2
c = add(a, b)
print('计算结果', c)
def instance():
return Demo(name, description, author, enable)
| superdashu/who_are_you | plugins/demo.py | demo.py | py | 623 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "plugin.Plugin",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "js2py.eval_js",
"line_number": 28,
"usage_type": "call"
}
] |
26221879462 | from pyarrow._fs import ( # noqa
FileSelector,
FileType,
FileInfo,
FileSystem,
LocalFileSystem,
SubTreeFileSystem,
_MockFileSystem,
_normalize_path,
FileSystemHandler,
PyFileSystem,
)
# For backward compatibility.
FileStats = FileInfo
_not_imported = []
try:
from pyarrow._hdfs import HadoopFileSystem # noqa
except ImportError:
_not_imported.append("HadoopFileSystem")
try:
from pyarrow._s3fs import ( # noqa
S3FileSystem, S3LogLevel, initialize_s3, finalize_s3)
except ImportError:
_not_imported.append("S3FileSystem")
else:
initialize_s3()
def __getattr__(name):
if name in _not_imported:
raise ImportError(
"The pyarrow installation is not built with support for "
"'{0}'".format(name)
)
raise AttributeError(
"module 'pyarrow.fs' has no attribute '{0}'".format(name)
)
def _ensure_filesystem(filesystem, use_mmap=False):
if isinstance(filesystem, FileSystem):
return filesystem
# handle fsspec-compatible filesystems
try:
import fsspec
except ImportError:
pass
else:
if isinstance(filesystem, fsspec.AbstractFileSystem):
if type(filesystem).__name__ == 'LocalFileSystem':
# In case its a simple LocalFileSystem, use native arrow one
return LocalFileSystem(use_mmap=use_mmap)
return PyFileSystem(FSSpecHandler(filesystem))
# map old filesystems to new ones
from pyarrow.filesystem import LocalFileSystem as LegacyLocalFileSystem
if isinstance(filesystem, LegacyLocalFileSystem):
return LocalFileSystem(use_mmap=use_mmap)
# TODO handle HDFS?
raise TypeError("Unrecognized filesystem: {}".format(type(filesystem)))
class FSSpecHandler(FileSystemHandler):
"""
Handler for fsspec-based Python filesystems.
https://filesystem-spec.readthedocs.io/en/latest/index.html
>>> PyFileSystem(FSSpecHandler(fsspec_fs))
"""
def __init__(self, fs):
self.fs = fs
def __eq__(self, other):
if isinstance(other, FSSpecHandler):
return self.fs == other.fs
return NotImplemented
def __ne__(self, other):
if isinstance(other, FSSpecHandler):
return self.fs != other.fs
return NotImplemented
def get_type_name(self):
protocol = self.fs.protocol
if isinstance(protocol, list):
protocol = protocol[0]
return "fsspec+{0}".format(protocol)
@staticmethod
def _create_file_info(path, info):
size = info["size"]
if info["type"] == "file":
ftype = FileType.File
elif info["type"] == "directory":
ftype = FileType.Directory
# some fsspec filesystems include a file size for directories
size = None
else:
ftype = FileType.Unknown
return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None))
def get_file_info(self, paths):
infos = []
for path in paths:
try:
info = self.fs.info(path)
except FileNotFoundError:
infos.append(FileInfo(path, FileType.NotFound))
else:
infos.append(self._create_file_info(path, info))
return infos
def get_file_info_selector(self, selector):
if not self.fs.isdir(selector.base_dir):
if self.fs.exists(selector.base_dir):
raise NotADirectoryError(selector.base_dir)
else:
if selector.allow_not_found:
return []
else:
raise FileNotFoundError(selector.base_dir)
if selector.recursive:
maxdepth = None
else:
maxdepth = 1
infos = []
selected_files = self.fs.find(
selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True
)
for path, info in selected_files.items():
infos.append(self._create_file_info(path, info))
return infos
def create_dir(self, path, recursive):
# mkdir also raises FileNotFoundError when base directory is not found
self.fs.mkdir(path, create_parents=recursive)
def delete_dir(self, path):
self.fs.rm(path, recursive=True)
def _delete_dir_contents(self, path):
for subpath in self.fs.listdir(path, detail=False):
if self.fs.isdir(subpath):
self.fs.rm(subpath, recursive=True)
elif self.fs.isfile(subpath):
self.fs.rm(subpath)
def delete_dir_contents(self, path):
if path.strip("/") == "":
raise ValueError(
"delete_dir_contents called on path '", path, "'")
self._delete_dir_contents(path)
def delete_root_dir_contents(self):
self._delete_dir_contents("/")
def delete_file(self, path):
# fs.rm correctly raises IsADirectoryError when `path` is a directory
# instead of a file and `recursive` is not set to True
if not self.fs.exists(path):
raise FileNotFoundError(path)
self.fs.rm(path)
def move(self, src, dest):
self.fs.mv(src, dest, recursive=True)
def copy_file(self, src, dest):
# fs.copy correctly raises IsADirectoryError when `src` is a directory
# instead of a file
self.fs.copy(src, dest)
def open_input_stream(self, path):
from pyarrow import PythonFile
if not self.fs.isfile(path):
raise FileNotFoundError(path)
return PythonFile(self.fs.open(path, mode="rb"), mode="r")
def open_input_file(self, path):
from pyarrow import PythonFile
if not self.fs.isfile(path):
raise FileNotFoundError(path)
return PythonFile(self.fs.open(path, mode="rb"), mode="r")
def open_output_stream(self, path):
from pyarrow import PythonFile
return PythonFile(self.fs.open(path, mode="wb"), mode="w")
def open_append_stream(self, path):
from pyarrow import PythonFile
return PythonFile(self.fs.open(path, mode="ab"), mode="w")
| ejnunn/PPE-Object-Detection | env/lib/python3.7/site-packages/pyarrow/fs.py | fs.py | py | 6,213 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pyarrow._fs.FileInfo",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pyarrow._s3fs.initialize_s3",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyarrow._fs.FileSystem",
"line_number": 46,
"usage_type": "argument"
},
{
"api_n... |
21835433544 | """Labels app urls"""
from django.urls import path
from task_manager.labels.views import (
LabelsView,
LabelUpdateView,
LabelCreateView,
LabelDeleteView,
)
app_name = 'labels'
urlpatterns = [
path('', LabelsView.as_view(), name='index'),
path('<int:pk>/update/', LabelUpdateView.as_view(), name='upd_label'),
path('<int:pk>/delete/', LabelDeleteView.as_view(), name='del_label'),
path('create/', LabelCreateView.as_view(), name='create'),
]
| GunGalla/python-project-52 | task_manager/labels/urls.py | urls.py | py | 476 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "task_manager.labels.views.LabelsView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "task_manager.labels.views.LabelsView",
"line_number": 14,
"usage_type": "na... |
75336194426 | import requests
import collections
import secrets
import json
import sqlite3
import scrape
from bs4 import BeautifulSoup
API_KEY = secrets.API_KEY
headers = {
"Authorization": "Bearer %s" % API_KEY
}
BASEURL = 'https://api.yelp.com/v3/businesses/search'
CACHE_DICT = {}
CACHE_FILENAME = 'search_cache.json'
DB_NAME = 'yelp.sqlite'
class Filter:
def __init__(self):
self.cities = []
self.terms = []
def add_city(self, city):
if city not in self.cities:
self.cities.append(city)
def add_term(self, t):
if t not in self.terms:
self.terms.append(t)
def show_city_list(self):
return self.cities
def show_term_list(self):
return self.terms
class Business:
def __init__(self, business):
self.business_name = business.get('name', '')
self.yelp_id = business.get('id', '')
self.city = business.get('location', {}).get('city', '')
self.phone_number = business.get('phone', '')
self.review_count = business.get('review_count', -1)
self.rating = business.get('rating', -1)
self.price = business.get('price', '').count('$')
self.url = business.get('url', '')
self.address = business.get('location', {}).get('address1', '')
if self.business_name == None: self.business_name = ''
if self.yelp_id == None: self.yelp_id = ''
if self.city == None: self.city = ''
if self.phone_number == None: self.phone_number = ''
if self.review_count == None: self.review_count = -1
if self.rating == None: self.rating = -1
if self.price == None: self.price = 0
if self.url == None: self.url = ''
if self.address == None: self.address = ''
self.category = ['NULL'] * 3
if 'categories' in business:
for i in range(min(3, len(business['categories']))):
self.category[i] = business['categories'][i]['title']
self.pic, self.review = scrape.get_info_from_url(self.url)
def get_business_info(self):
return [self.yelp_id, self.business_name, self.city, self.phone_number,
self.review_count, self.rating, self.price, self.address, self.url]
def get_category_info(self):
return [self.yelp_id] + self.category
def open_cache():
''' Opens the cache file if it exists and loads the JSON into
the CACHE_DICT dictionary.
if the cache file doesn't exist, creates a new cache dictionary
Args:
None
Returns:
cache_dict (dict): The opened cache.
'''
try:
cache_file = open(CACHE_FILENAME, 'r')
cache_contents = cache_file.read()
cache_dict = json.loads(cache_contents)
cache_file.close()
except:
cache_dict = {}
return cache_dict
def save_cache(cache_dict):
''' Saves the current state of the cache to disk
Args:
cache_dict (dict): The dictionary to save.
Returns:
None
'''
dumped_json_cache = json.dumps(cache_dict)
fw = open(CACHE_FILENAME, "w")
fw.write(dumped_json_cache)
fw.close()
def construct_unique_key(baseurl, params):
''' constructs a key that is guaranteed to uniquely and
repeatably identify an API request by its baseurl and params
Args:
baseurl (str): The URL for the API endpoint.
params (dict): A dictionary of param:value pairs.
Returns:
unique_key (str): The unique key as a string.
'''
param_string = []
connector = "_"
for k in params.keys():
param_string.append(f"{k}_{params[k]}")
param_string.sort()
unique_key = baseurl + connector + connector.join(param_string)
return unique_key
def make_request(baseurl, params):
'''Make a request to the Web API using the baseurl and params
Args;
baseurl (str): The URL for the API endpoint.
params (dict): A dictionary of param:value pairs.
Returns:
results (dict): The JSON response from the request.
'''
response = requests.get(baseurl, params=params, headers=headers)
results = response.json()
return results
def make_request_with_cache(baseurl, term='', location='', count=50):
''' Check the cache for a saved result for this baseurl+params:values
combo. If the result is found, return it. Otherwise send a new
request, save it, then return it.
Args:
baseurl (str): The URL for the API endpoint
term (str): The search term passes to the API.
location (str): The search location passes to the API.
count (int): The number of business results to return.
Return:
results (dict): The JSON response from the request.
'''
params = {
'term': term.lower().replace(" ", "+"),
'location': location.lower().replace(" ", "+"),
'limit': count
}
request_key = construct_unique_key(baseurl=baseurl, params=params)
if request_key in CACHE_DICT:
# The data has been fetched before and stored in the cache
return CACHE_DICT[request_key]
else:
results = make_request(baseurl=baseurl, params=params)
CACHE_DICT[request_key] = results
save_cache(cache_dict=CACHE_DICT)
return results
def write_to_business_single(info):
''' Write a row into business_info table
Args:
info (list): A list of business information.
Returns:
None
'''
insert_instructors = '''
INSERT OR REPLACE INTO business_info
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(insert_instructors, info)
connection.commit()
connection.close()
def write_to_category_single(info):
''' Write a row into category_info table.
Args:
info (list): A list of category info of one business.
Returns:
None
'''
insert_instructors = '''
INSERT OR REPLACE INTO category_info
VALUES (?, ?, ?, ?)
'''
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(insert_instructors, info)
connection.commit()
connection.close()
def write_to_business(list):
''' Write multiple rows into business_info table.
Args:
list (list): A list of business objects.
Returns:
None
'''
for busi_obj in list:
write_to_business_single(busi_obj.get_business_info())
def write_to_category(list):
''' Write multiple rows into category_info table.
Args:
list (list): A list of business objects.
Returns:
None
'''
for busi_obj in list:
write_to_category_single(busi_obj.get_category_info())
def get_business_list(term='', location='', count=50):
''' Fetch the data throught API and process the JSON
response into two lists.
Args:
term (str): The search term passed into API.
location (str): The search location passed into API.
count (int): The number of business results to return.
Returns:
business_list (list): A list of business objects.
'''
results = make_request_with_cache(baseurl=BASEURL,
term=term, location=location, count=count)
business_info = results['businesses']
business_list = []
for business in business_info:
business_list.append(
Business(business)
)
return business_list
if __name__ == '__main__':
# term = 'barbecue'
# city = 'New York'
# busi_list = get_business_list(term=term, location=city, count=50)
# write_to_business_single(busi_list[31].get_business_info())
f = Filter()
f.add_term('Chinese restaurants')
f.add_term('Japanese restaurants')
f.add_term('Indian restaurants')
f.add_term('Mediterranean restaurants')
f.add_term('breakfast')
f.add_term('barbecue')
f.add_term('coffee')
f.add_term('noodles')
f.add_term('food')
f.add_term('hamburger')
f.add_term('sandwich')
f.add_term('bubble tea')
f.add_term('taco')
f.add_term('dumplings')
f.add_term('Korean')
f.add_term('sushi')
f.add_term('ramen')
f.add_term('curry')
f.add_term('cocktail')
f.add_term('bar')
f.add_term('seafood')
f.add_term('hot pot')
f.add_term('steak')
f.add_term('Vegetarian')
f.add_city('San Francisco')
f.add_city('Seattle')
f.add_city('New York')
f.add_city('Ann Arbor')
f.add_city('San Jose')
f.add_city('Boston')
f.add_city('Los Angeles')
f.add_city('Las Vegas')
f.add_city('Chicago')
f.add_city('Washington')
f.add_city('Detroit')
for term in f.show_term_list():
for city in f.show_city_list():
print(term, city)
busi_list = get_business_list(term=term, location=city, count=50)
write_to_business(busi_list)
write_to_category(busi_list)
| kedongh/507_final_proj | yelp.py | yelp.py | py | 8,122 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "secrets.API_KEY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "scrape.get_info_from_url",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "json.dumps",... |
32727491090 | import sqlite3
posts = [
{
'author': 'Dova Kin',
'title': 'First Post',
'content': 'First post.',
'date_posted': '20200301'
},
{
'author': 'Angi\'s Cabin',
'title': 'Second Post',
'content': 'Second post.',
'date_posted': '20200302'
},
{
'author': 'Lydia Doorblocker',
'title': 'Third Post',
'content': 'I am sworn to carry your burdens.',
'date_posted': '20200302'
}
]
deletedb = """drop table if exists posts"""
createdb = """create table if not exists posts (
author TEXT NOT NULL,
title TEXT NOT NULL,
content TEXT NOT NULL,
date_posted TEXT NOT NULL
)
"""
insertdb = """
insert into posts ( author, title, content, date_posted) values ( :author, :title, :content, :date_posted )
"""
with sqlite3.connect("posts.db") as conn:
# with sqlite3.connect(":memory:") as conn:
cursor = conn.cursor()
cursor.execute( deletedb )
cursor.execute( createdb )
conn.commit()
cursor.executemany( insertdb, posts )
conn.commit()
cursor.execute("select * from posts")
print(cursor.fetchall())
| majorgear/flask_blog | utils/populate_db.py | populate_db.py | py | 1,163 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 38,
"usage_type": "call"
}
] |
28669919345 | import json
import unittest
from ..base import AsyncAPITestCase
from yiyun.models import (User, Team, TeamMember, TeamMemberGroup,
Sport, Activity,
ActivityMember, TeamOrder)
from yiyun.service.order import OrderService
class UserOrderTestCase(AsyncAPITestCase):
RETAIN_DATA = False
json_header = True
REQUIRED_MODELS = [Sport, User, Team, TeamMember, TeamMemberGroup,
Activity, ActivityMember, TeamOrder]
LIST_PATH = "api/2/users/self/orders"
ORDER_DETAIL = LIST_PATH + "/{order_no}"
def setUp(self):
super(UserOrderTestCase, self).setUp()
self.initial_data()
def initial_data(self):
self.team_owner = User.create(name='test_activity')
self.team = Team.create(name='club_test_activity',
owner_id=self.team_owner.id)
self.user = self.creator = User.create(name='activity_creator')
self.activity = Activity.create(team=self.team,
creator=self.creator,
price='10', vip_price='8',
leader=self.creator,
title='just a test',
description='description',
start_time='3000-01-01 00:00:01',
end_time='3000-12-31 23:59:59')
self.order = OrderService.new_order(10, self.team, self.user,
TeamOrder.OrderType.ACTIVITY,
TeamOrder.OrderPaymentMethod.WXPAY,
self.activity.id,
title="UserOrderTest"
)
self.activity.add_member(self.user.id,
users_count=1,
price=10,
free_times=0,
total_fee=10,
order_id=self.order.id,
order_no=self.order.order_no,
payment_method=TeamOrder.OrderPaymentMethod.WXPAY,
payment_state=TeamOrder.OrderState.TRADE_BUYER_PAID,
state=TeamMember.TeamMemberState.normal)
def test_list_all_orders(self):
self.auth_user = self.user
response = self.fetch(self.LIST_PATH)
self.assertEqual(200, response.code, response.body.decode())
result = json.loads(response.body.decode())
self.assertIn("orders", result, result)
self.assertNotIn("id", result["orders"][0], result)
def test_order_detail(self):
url = self.ORDER_DETAIL.format(order_no=self.order.order_no)
# 404, not my order
self.auth_user = self.team_owner
response = self.fetch(url)
self.assertEqual(404, response.code, response.body.decode())
# 200
self.auth_user = self.user
response = self.fetch(url)
self.assertEqual(200, response.code, response.body.decode())
result = json.loads(response.body.decode())
self.assertEqual(self.user.id, result["user"]["id"], result)
if __name__ == '__main__':
unittest.main()
| haoweiking/image_tesseract_private | PaiDuiGuanJia/yiyun/tests/rest/order.py | order.py | py | 3,424 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base.AsyncAPITestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "yiyun.models.Sport",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "yiyun.models.User",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "yiyun.mode... |
23135790413 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 21:16:51 2019
@author: eikivi
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy import or_
engine = create_engine('sqlite:///sales.db', echo = False)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Customers(Base):
__tablename__ = 'customers'
id = Column(Integer, primary_key = True)
name = Column(String)
address = Column(String)
email = Column(String)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind = engine)
session = Session()
try:
# result = session.query(Customers).filter(or_(Customers.id>2, Customers.name.like('Ei%')))
#result = session.query(Customers).filter(Customers.id<2, Customers.name.like('Ja%'))
result = session.query(Customers).filter(Customers.id != 2)
#result = session.query(Customers).one()
except:
session.rollback()
raise
finally:
session.close()
print("")
#print(result)
for row in result:
print("")
print ("ID:", row.id, "Name: ",row.name, "Address:",row.address, "Email:",row.email) | baadam3/ICS0019_Advanced_python_solutions | Examples/Database_code/SQLAlchemyFilter7.py | SQLAlchemyFilter7.py | py | 1,137 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{... |
73696099389 | import os
import subprocess
from datetime import timedelta
from . import dispersion_file_utils as dfu
from .dispersiongrid import BSDispersionGrid, BSDispersionPlot, create_color_plot
class PolygonGenerator(object):
"""Generates polygon kmls from a NETCDF file representing smoke dispersion
time series.
Public Instance Attributes:
output_dir - output directory containing generated polygon kmls,
legend, xsl file, and cutpoints file
legend_filename - legend's file name
kml_files - list of tuples of the form (<kml file name>, <prediction
timestamp>)
"""
# HACK: It would be more elegant to generate xml using an XML package, like minidom.
# We're using raw strings for speed of implementation.
XSL_FIRST_PART = r"""<?xml version="1.0"?>
<!-- This is an xsl stylesheet to add styles to an OGR generated KML file -->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:kml="http://www.opengis.net/kml/2.2" version="1.0">
<xsl:output method="xml" indent="yes" omit-xml-declaration="no" encoding="utf-8"/>
<!-- In general, just pass through all elements and attributes -->
<xsl:template match="*">
<xsl:copy>
<xsl:copy-of select="@*" />
<xsl:apply-templates />
</xsl:copy>
</xsl:template>
<!-- We want to eliminate any embedded style because we don't want to hide the external styles -->
<xsl:template match="kml:Style" />
<!-- Eliminate Schema and ExtendedData -->
<xsl:template match="kml:Schema" />
<xsl:template match="kml:ExtendedData" />
<xsl:template match="kml:Document">
<xsl:copy>
<xsl:copy-of select="@*" />
"""
XSL_STYLE_ELEMENT = """<Style id=\"%s\">
<PolyStyle>
<color>%s</color>
<fill>%s</fill>
<outline>0</outline>
</PolyStyle>
</Style>
"""
XSL_LAST_PART = r"""<xsl:apply-templates />
</xsl:copy>
</xsl:template>
<xsl:template match="kml:Placemark">
<xsl:copy>
<xsl:copy-of select="@*" />
<styleUrl><xsl:value-of select="./kml:ExtendedData/kml:SchemaData/kml:SimpleData[@name='Category']" /></styleUrl>
<xsl:apply-templates />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""
POLYGONS_CONFIG_SECTION = 'PolygonsKML'
# TODO: pass in individual values from confif rather than config itself.
def __init__(self, config, parameter):
self._config = config
self._parameter = parameter
# TODO: support multiple color schemes
self._color_bar_section = self._config.get(self.POLYGONS_CONFIG_SECTION, 'POLYGON_COLORS').split(',')[0]
self._create_output_dir()
self._import_grid()
self._generate_custom_cutpoints_file()
self._generate_custom_xsl_files()
self._generate_kmls()
self._generate_legend()
def _create_output_dir(self):
self.output_dir = self._config.get(self.POLYGONS_CONFIG_SECTION,
'POLYGONS_OUTPUT_DIR').rstrip('/') + '-' + self._parameter.lower()
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def _import_grid(self):
self._infile = self._config.get('DispersionGridInput', "FILENAME")
self._makepolygons_infile = "NETCDF:%s:%s" % (self._infile, self._parameter)
self._grid = BSDispersionGrid(self._infile, param=self._parameter) # dispersion grid instance
def _generate_custom_cutpoints_file(self):
self._custom_cutpoints_filename = os.path.join(self.output_dir, 'CutpointsGateway.csv')
newfile = open(self._custom_cutpoints_filename, 'w')
newfile.write("Name,Threshold\n")
levels = [s for s in self._config.get(self._color_bar_section, "DATA_LEVELS").split()]
for i in range(len(levels)):
newfile.write("Cat%d,%s\n" % (i, levels[i]))
newfile.close()
def _generate_custom_xsl_files(self):
self._custom_xsl_filename = os.path.join(self.output_dir, 'KMLPolygonStyleGateway.xsl')
newfile = open(self._custom_xsl_filename, 'w')
newfile.write(self.XSL_FIRST_PART)
hex_colors = self._parse_colors()
for i in range(len(hex_colors)):
if hex_colors[i] == '000000':
color_str = '00000000'
fill_str = '0'
else:
color_str = '99%s' % (hex_colors[i])
fill_str = '1'
newfile.write(self.XSL_STYLE_ELEMENT % ("Cat%d" % (i), color_str, fill_str))
newfile.write(self.XSL_LAST_PART)
newfile.close()
def _parse_colors(self):
if self._config.getboolean(self._color_bar_section, "DEFINE_RGB"):
r = [int(s) for s in self._config.get(self._color_bar_section, "RED").split()]
g = [int(s) for s in self._config.get(self._color_bar_section, "GREEN").split()]
b = [int(s) for s in self._config.get(self._color_bar_section, "BLUE").split()]
if not len(r) == len(g) == len(b):
raise Exception("Configuration ERROR... RED, GREEN, BLUE must specify same number of values.")
# kml colors are specified as 'aabbggrr' (where 'aa' is the alpha value)
return ['%02x%02x%02x' % (b[i], g[i], r[i]) for i in range(len(r))]
elif self._config.getboolean(self._color_bar_section, "DEFINE_HEX"):
return [s.strip('#') for s in self._config.get(self._color_bar_section, "HEX_COLORS").split()]
else:
raise Exception("Configuration ERROR... DEFINE_RGB or HEX_COLORS must be true.")
def _generate_kmls(self):
self._kml_file_basename, ext = os.path.splitext(os.path.basename(self._infile))
dfu.create_polygon_kmls_dir(self._config, self._parameter)
self.kml_files = []
# def my_output_handler(logger, output, is_stderr):
# logger.log(OUTPUT, output)
for i in range(self._grid.num_times):
try:
self._generate_kml(i)
except:
break
def _generate_kml(self, i):
dt = self._grid.datetimes[i] - timedelta(hours=1)
band = i + 1
#self.log.debug("Processing %s band %2d: %s...", name, band, dt.strftime("%Y-%m-%d %HZ"))
#kmlfile = dt.strftime(name + "_%Y%m%d%H.kml")
kmlfile = self._kml_file_basename + str(band) + ".kml"
poly_file = os.path.join(self.output_dir, kmlfile)
#self.log.debug("Opened poly_file %s", poly_file)
args = [
self._config.get(self.POLYGONS_CONFIG_SECTION, "MAKEPOLYGONS_BINARY"),
"-in=" + self._makepolygons_infile,
"-band=" + str(band),
"-cutpoints=" + os.path.abspath(self._custom_cutpoints_filename),
"-format=KML",
"-kmlStyle=" + self._custom_xsl_filename,
"-out=" + poly_file
]
if subprocess.call(' '.join(args), shell=True) != 0:
msg = "Failure while trying to create %s" % (poly_file)
#self.log.error(msg)
raise RuntimeError(msg)
self.kml_files.append((kmlfile, dt))
LEGEND_FILENAME_ROOT = 'colorbar_polygons'
def _generate_legend(self):
plot = create_color_plot(self._config, self._parameter, self._grid,
self._color_bar_section)
root_w_parameter = "{}_{}".format(self._parameter.lower(),
self.LEGEND_FILENAME_ROOT)
plot.make_colorbar(os.path.join(self.output_dir, root_w_parameter))
self.legend_filename = "%s.%s" % (root_w_parameter, plot.export_format)
| pnwairfire/blueskykml | blueskykml/polygon_generator.py | polygon_generator.py | py | 7,736 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "dispersiongrid.BSDispersionGr... |
13202825418 | import datetime
menu = """
[d] Depositar
[s] Sacar
[e] Extrato
[q] Sair
=> """
saldo = 0
limite = 500
extrato = []
numero_saques = 0
total_saque_diario = 0
LIMITE_SAQUES = 3
while True:
opcao = input(menu)
if opcao == 'd':
valor = input('Valor do depósito (número inteiro e positivo): ')
if valor.isdigit() and int(valor) > 0:
valor = int(valor)
saldo += valor
data_hora = datetime.datetime.now()
extrato.append(('d', valor, data_hora))
print('Depósito realizado com sucesso.')
else:
print(
'Valor de depósito inválido. O valor deve ser um número inteiro positivo.')
elif opcao == 's':
if numero_saques >= LIMITE_SAQUES:
print('Quantidade de saques diários atingido.')
else:
try:
valor = int(input('Valor do saque: '))
if valor > 0 and total_saque_diario + valor <= limite:
if valor > saldo + limite:
print('Saldo insuficiente.')
else:
saldo -= valor
data_hora = datetime.datetime.now()
extrato.append(('s', valor, data_hora))
numero_saques += 1
total_saque_diario = total_saque_diario + valor
print('Saque realizado com sucesso.')
else:
print('Valor limite de saque diário atingido.')
except ValueError:
print('O valor do saque deve ser um número inteiro positivo.')
elif opcao == 'e':
print('\nExtrato Bancário')
print('#####################################################\n')
print('| OP | Data/Hora | Valor')
print('-----------------------------------------------------')
for operacao, valor, data_hora in extrato:
if operacao == 'd':
print(
f'| D | {data_hora.strftime("%d-%m-%Y %H:%M:%S")} | R${valor}')
elif operacao == 's':
print(
f'| S | {data_hora.strftime("%d-%m-%Y %H:%M:%S")} |-R${valor}')
print('-----------------------------------------------------')
print('#####################################################')
print(
f'| Saldo em {data_hora.strftime("%d-%m-%Y %H:%M:%S")} -> R$ {saldo}')
print('#####################################################\n')
elif opcao == 'q':
print('Você saiu do sistema...')
break
else:
print('Opção inválida.')
| ElPablitoBR/btc-c-d-desafio1 | desafio.py | desafio.py | py | 2,754 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "da... |
32660332596 | """
Implements the baseclasses for all Component types in Lumen.
The base classes implement the core validation logic and the ability
to dynamically resolve Source and Variable references.
"""
from __future__ import annotations
import warnings
from functools import partial
from typing import (
Any, ClassVar, Dict, List, Tuple, Type,
)
import pandas as pd
import panel as pn
import param # type: ignore
from panel.io.cache import _container_hash, _hash_funcs
from panel.util import classproperty
from typing_extensions import Literal
from .state import state
from .util import (
VARIABLE_RE, cleanup_expr, is_ref, resolve_module_reference,
)
from .validation import (
ValidationError, match_suggestion_message, reverse_match_suggestion,
validate_parameters,
)
class Component(param.Parameterized):
"""
Baseclass for all Lumen component types including Source, Filter,
Transform, Variable and View types. Components must implement
serialization and deserialization into a specification dictionary
via the `from_spec` and `to_spec` protocol. Additonally they
should implement validation.
"""
__abstract = True
# Whether the component allows references
_allows_refs: ClassVar[bool] = True
# Parameters that are computed internally and are not part of the
# component specification
_internal_params: ClassVar[List[str]] = ['name']
# Deprecated parameters that are still allowed as spec keys
_legacy_params: ClassVar[List[str]] = []
# Keys that must be declared declared as a list of strings or
# tuples of strings if one of multiple must be defined.
_required_keys: ClassVar[List[str | Tuple[str, ...]]] = []
# Keys that are valid to define
_valid_keys: ClassVar[List[str] | Literal['params'] | None] = None
# Whether to enforce parameter validation in specification
_validate_params: ClassVar[bool] = False
def __init__(self, **params):
self._refs = params.pop('refs', {})
expected = list(self.param)
validate_parameters(params, expected, type(self).name)
if self._allows_refs:
params = self._extract_refs(params, self._refs)
super().__init__(**params)
for p, ref in self._refs.items():
if isinstance(state.variables, dict):
continue
elif isinstance(ref, str) and '$variables' in ref:
ref_vars = VARIABLE_RE.findall(ref)
state.variables.param.watch(partial(self._update_ref, p, ref), ref_vars)
if '.' not in p and p not in params:
self._update_ref(p, ref)
def _extract_refs(self, params: Dict[str, Any], refs: Dict[str, Any]):
from .variables import Variable
processed = {}
for pname, pval in params.items():
if is_ref(pval):
refs[pname] = pval
elif isinstance(pval, (pn.widgets.Widget, param.Parameter, Variable)):
var = state.variables.add_variable(pval)
processed[pname] = var.value
refs[pname] = f'$variables.{var.name}'
continue
elif isinstance(pval, dict):
subrefs = {}
processed[pname] = self._extract_refs(pval, subrefs)
for subkey, subref in subrefs.items():
refs[f'{pname}.{subkey}'] = subref
continue
else:
processed[pname] = pval
return processed
def _update_ref(self, pname: str, ref: str, *events: param.parameterized.Event):
"""
Component should implement appropriate downstream events
following a change in a variable.
"""
expr = cleanup_expr(ref)
new_value = pd.eval(expr, local_dict=dict(state.variables), engine='python')
if '.' in pname:
pname, *keys = pname.split('.')
old = getattr(self, pname)
current = new = old.copy()
for k in keys[:-1]:
current = current[k]
current[keys[-1]] = new_value
else:
new = new_value
self.param.update({pname: new})
def _sync_refs(self, trigger: bool = True):
updates = []
for p, ref in self._refs.items():
if isinstance(state.variables, dict):
continue
elif isinstance(ref, str) and '$variables' in ref:
with param.discard_events(self):
self._update_ref(p, ref)
pname, *_ = p.split('.')
updates.append(pname)
if trigger:
self.param.trigger(*updates)
##################################################################
# Validation API
##################################################################
@classproperty
def _valid_keys_(cls) -> List[str] | None:
if cls._valid_keys == 'params':
valid = [p for p in cls.param if p not in cls._internal_params]
else:
valid = cls._valid_keys
return valid if valid is None else valid + cls._legacy_params
@classmethod
def _validate_keys_(cls, spec: Dict[str, Any]):
valid_keys = cls._valid_keys_
for key in spec:
if valid_keys is None or key in valid_keys:
continue
msg = f'{cls.__name__} component specification contained unknown key {key!r}.'
msg = match_suggestion_message(key, cls._valid_keys_ or [], msg)
raise ValidationError(msg, spec, key)
@classmethod
def _validate_required_(
cls, spec: Dict[str, Any], required: List[str | Tuple[str, ...]] | None = None
):
if required is None:
required_keys = cls._required_keys
else:
required_keys = required
for key in required_keys:
if isinstance(key, str):
if key in spec:
continue
msg = f'The {cls.__name__} component requires {key!r} parameter to be defined.'
msg, attr = reverse_match_suggestion(key, list(spec), msg)
raise ValidationError(msg, spec, attr)
elif isinstance(key, tuple):
if any(f in spec for f in key):
continue
skey = sorted(key)
key_str = "', '".join(skey[:-1]) + f"' or '{skey[-1]}"
msg = f"{cls.__name__} component requires one of '{key_str}' to be defined."
for f in key:
msg, attr = reverse_match_suggestion(f, list(spec), msg)
if attr:
break
raise ValidationError(msg, spec, attr)
@classmethod
def _validate_list_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: List[Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: List[Dict[str, Any] | str] | None = None
):
if not isinstance(subtype_specs, list):
raise ValidationError(
f'{cls.__name__} component {key!r} key expected list type but got {type(subtype_specs).__name__}. '
"This could be because of a missing dash in the yaml file.",
spec, key
)
subtypes = []
for subtype_spec in subtype_specs:
subtype_spec = subtype.validate(subtype_spec, context)
subtypes.append(subtype_spec)
if subcontext is not None:
subcontext.append(subtype_spec)
return subtypes
@classmethod
def _validate_dict_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: Dict[str, Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: Dict[str, Any] | None = None
):
if not isinstance(subtype_specs, dict):
raise ValidationError(
f'{cls.__name__} component {key!r} key expected dict type but got {type(subtype_specs).__name__}.',
spec, key
)
subtypes = {}
for subtype_name, subtype_spec in subtype_specs.items():
subtypes[subtype_name] = subtype.validate(subtype_spec, context)
if subcontext is not None:
subcontext[subtype_name] = subtypes[subtype_name]
return subtypes
@classmethod
def _validate_str_or_spec(
cls, key: str, subtype: Type[Component], subtype_spec: Dict[str, Any] | str,
spec: Dict[str, Any], context: Dict[str, Any]
):
if isinstance(subtype_spec, str):
if subtype_spec not in context[f'{key}s']:
msg = f'{cls.__name__} component specified non-existent {key} {subtype_spec!r}.'
msg = match_suggestion_message(subtype_spec, list(context[key]), msg)
raise ValidationError(msg, spec, subtype_spec)
return subtype_spec
return subtype.validate(subtype_spec, context)
@classmethod
def _validate_dict_or_list_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: Dict[str, Dict[str, Any] | str] | List[Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: Dict[str, Any] | List[Dict[str, Any] | str] | None = None
):
if isinstance(subtype_specs, list):
assert subcontext is None or isinstance(subcontext, list)
return cls._validate_list_subtypes(key, subtype, subtype_specs, spec, context, subcontext)
else:
assert subcontext is None or isinstance(subcontext, dict)
return cls._validate_dict_subtypes(key, subtype, subtype_specs, spec, context, subcontext)
@classmethod
def _deprecation(
cls, msg: str, key: str, spec: Dict[str, Any], update: Dict[str, Any]
):
warnings.warn(msg, DeprecationWarning)
if key not in spec:
spec[key] = {}
spec[key].update(update)
@classmethod
def _validate_ref(
cls, key: str, value: Any, spec: Dict[str, Any], context: Dict[str, Any]
):
refs = value[1:].split('.')
if refs[0] == 'variables':
if refs[1] not in context.get('variables', {}):
msg = f'{cls.__name__} component {key!r} references undeclared variable {value!r}.'
msg = match_suggestion_message(refs[1], list(context.get('variables', {})), msg)
raise ValidationError(msg, spec, refs[1])
elif refs[0] not in context.get('sources', {}):
msg = f'{cls.__name__} component {key!r} references undeclared source {value!r}.'
msg = match_suggestion_message(refs[1], list(context.get('sources', {})), msg)
raise ValidationError(msg, spec, refs[1])
@classmethod
def _validate_param(cls, key: str, value: Any, spec: Dict[str, Any]):
pobj = cls.param[key]
try:
if isinstance(pobj, param.Selector) and pobj.names and value in pobj.names:
return
pobj._validate(value)
except Exception as e:
msg = f"{cls.__name__} component {key!r} value failed validation: {str(e)}"
raise ValidationError(msg, spec, key)
@classmethod
def _is_component_key(cls, key: str) -> bool:
if key not in cls.param:
return False
pobj = cls.param[key]
return (
isinstance(pobj, param.ClassSelector) and
isinstance(pobj.class_, type) and
issubclass(pobj.class_, Component)
)
@classmethod
def _is_list_component_key(cls, key: str) -> bool:
if key not in cls.param:
return False
pobj = cls.param[key]
return (
isinstance(pobj, param.List) and
isinstance(pobj.item_type, type) and
issubclass(pobj.item_type, Component)
)
@classmethod
def _validate_spec_(
cls, spec: Dict[str, Any], context: Dict[str, Any] | None = None
) -> Dict[str, Any]:
validated: Dict[str, Any] = {}
if context is None:
context = validated
for key in (cls._valid_keys_ or list(spec)):
if key not in spec:
continue
val = spec[key]
if is_ref(val) and not cls._allows_refs:
raise ValidationError(
f'{cls.__name__} component does not allow references but {key} '
f'value ({val!r}) is a reference.', spec, val
)
if hasattr(cls, f'_validate_{key}'):
val = getattr(cls, f'_validate_{key}')(val, spec, context)
elif cls._is_component_key(key):
val = cls.param[key].class_.validate(val, context)
elif cls._is_list_component_key(key):
val = cls._validate_list_subtypes(
key, cls.param[key].item_type, val, spec, context
)
elif is_ref(val):
cls._validate_ref(key, val, spec, context)
elif key in cls.param:
if isinstance(val, str) and val.startswith('@'):
continue
cls._validate_param(key, val, spec)
validated[key] = val
return validated
##################################################################
# Public API
##################################################################
@property
def refs(self) -> List[str]:
return [v for k, v in self._refs.items() if v.startswith('$variables.')]
@classmethod
def from_spec(cls, spec: Dict[str, Any] | str) -> 'Component':
"""
Creates a Component instance from a specification.
Parameters
----------
spec : dict or str
Specification declared as a dictionary of parameter values
or a string referencing a source in the sources dictionary.
Returns
-------
Resolved and instantiated Component object
"""
if isinstance(spec, str):
raise ValueError(
"Component cannot be materialized by reference. Please pass "
"full specification for the component."
)
return cls(**spec)
def to_spec(self, context: Dict[str, Any] | None = None) -> Dict[str, Any]:
"""
Exports the full specification to reconstruct this component.
Parameters
----------
context: Dict[str, Any]
Context contains the specification of all previously serialized components,
e.g. to allow resolving of references.
Returns
-------
Declarative specification of this component.
"""
spec = {}
for p, value in self.param.values().items():
if p in self._internal_params or value == self.param[p].default:
continue
elif self._is_component_key(p):
pspec = value.to_spec(context=context)
if not pspec:
continue
value = pspec
elif self._is_list_component_key(p):
value = [
None if v is None else v.to_spec(context=context)
for v in value
]
spec[p] = value
if context is not None:
spec.update(self._refs)
return spec
@classmethod
def validate(
cls, spec: Dict[str, Any] | str, context: Dict[str, Any] | None = None
) -> Dict[str, Any] | str:
"""
Validates the component specification given the validation context.
Arguments
-----------
spec: dict | str
The specification for the component being validated (or a referene to the component)
context: dict
Validation context contains the specification of all previously validated components,
e.g. to allow resolving of references.
Returns
--------
Validated specification.
"""
if isinstance(spec, str):
return spec
context = {} if context is None else context
cls._validate_keys_(spec)
cls._validate_required_(spec)
return cls._validate_spec_(spec, context)
class MultiTypeComponent(Component):
"""
MultiComponentType is the baseclass for extensible Lumen components.
A `MultiTypeComponent` can be resolved using the `_get_type` method
either by the name declared in the `<component>_type` attribute,
where the name of the component represent the immediate subclasses
of MultiTypeComponent. For example `class View(MultiTypeComponent)`
should define `view_type` for all its descendants to override.
Just as any other Component, `MultiTypeComponent` implements
methods to construct an instance from a specification, export the
specification of a component and the ability to validate a
component.
"""
__abstract = True
_required_keys: ClassVar[List[str | Tuple[str, ...]]] = ['type']
@classproperty
def _valid_keys_(cls) -> List[str | Tuple[str, ...]] | None:
if cls._valid_keys is None:
valid = None
elif cls._valid_keys == 'params':
valid = list(cls.param)
elif 'params' in cls._valid_keys:
valid = cls._valid_keys.copy()
valid.extend(list(cls.param))
else:
valid = cls._valid_keys.copy()
if valid is not None and 'type' not in valid:
valid.append('type')
return valid if valid is None else valid + cls._legacy_params
@classproperty
def _base_type(cls):
if cls is MultiTypeComponent:
return
return cls.__mro__[cls.__mro__.index(MultiTypeComponent)-1]
@classproperty
def _component_type(cls) -> str:
component_type = getattr(cls, f'{cls._base_type.__name__.lower()}_type')
if component_type is not None:
return component_type
return f'{cls.__module__}.{cls.__name__}'
@classmethod
def _import_module(cls, component_type: str):
base_type = cls._base_type
try:
import_name = f'lumen.{base_type.__name__.lower()}s.{component_type}'
__import__(import_name)
except ImportError as e:
if e.name != import_name:
msg = (
f"In order to use the {base_type.__name__.lower()} "
f"component '{component_type}', the '{e.name}' package "
"must be installed."
)
raise ImportError(msg)
@classmethod
def _get_type(
cls, component_type: str, spec: Dict[str, Any] | None = None
) -> Type['MultiTypeComponent']:
base_type = cls._base_type
if component_type is None:
raise ValidationError(
f"No 'type' was provided during instantiation of {base_type.__name__} component.",
spec
)
if '.' in component_type:
return resolve_module_reference(component_type, base_type)
cls._import_module(component_type)
subcls_types = set()
for subcls in param.concrete_descendents(cls).values():
subcls_type = subcls._component_type
if subcls_type is None:
continue
subcls_types.add(subcls_type)
if subcls_type == component_type:
return subcls
msg = f"{base_type.__name__} component specification declared unknown type '{component_type}'."
msg = match_suggestion_message(component_type, list(subcls_types), msg)
raise ValidationError(msg, spec, component_type)
##################################################################
# Public API
##################################################################
@classmethod
def from_spec(cls, spec: Dict[str, Any] | str) -> 'MultiTypeComponent':
if isinstance(spec, str):
raise ValueError(
"MultiTypeComponent cannot be materialized by reference. Please pass "
"full specification for the MultiTypeComponent."
)
spec = dict(spec)
component_cls = cls._get_type(spec.pop('type'), spec)
return component_cls(**spec)
def to_spec(self, context: Dict[str, Any] | None = None) -> Dict[str, Any]:
"""
Exports the full specification to reconstruct this component.
Returns
-------
Resolved and instantiated Component object
"""
spec = super().to_spec(context=context)
spec['type'] = self._component_type
return spec
@classmethod
def validate(
cls, spec: Dict[str, Any] | str, context: Dict[str, Any] | None = None
) -> Dict[str, Any] | str:
"""
Validates the component specification given the validation context and the path.
Arguments
-----------
spec: dict | str
The specification for the component being validated or a reference to the component.
context: dict
Validation context contains the specification of all previously validated components,
e.g. to allow resolving of references.
Returns
--------
Validated specification.
"""
if isinstance(spec, str):
return spec
context = {} if context is None else context
if 'type' not in spec:
msg = f'{cls.__name__} component specification did not declare a type.'
msg, attr = reverse_match_suggestion('type', list(spec), msg)
raise ValidationError(msg, spec, attr)
component_cls = cls._get_type(spec['type'], spec)
component_cls._validate_keys_(spec)
component_cls._validate_required_(spec)
return component_cls._validate_spec_(spec, context)
_hash_funcs[Component] = lambda component: _container_hash(component.to_spec())
| holoviz/lumen | lumen/base.py | base.py | py | 22,041 | python | en | code | 149 | github-code | 6 | [
{
"api_name": "param.Parameterized",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "typing.ClassVar",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.List"... |
73000139069 | import configparser
from wireguard_keys import *
PUB_KEY = '...' # здесь должен быть указан public key
if __name__ == "__main__":
try:
with open('curr_ip.txt', 'r') as f:
IP_N = int(f.readline())
except FileNotFoundError:
IP_N = int(input('не найден последний IP, введите его вручную: '))
#numbers of clients
N = int(input('введите количество генерируемых конфигов: '))
for i in range(1, N+1):
cur_ip = IP_N + i # increment IP-address
(privkey, pubkey, sharkey) = generate_wireguard_keys()
config = configparser.ConfigParser()
config['Interface'] = {
'PrivateKey': privkey,
'ListenPort': '51820',
'Address': f'172.26.1.{cur_ip}/24',
'DNS': '192.9.200.124, 192.9.200.132',
'#pubkey': f'{pubkey}'}
config['Peer'] = {
'PublicKey': f'{PUB_KEY}',
'PresharedKey': f'{sharkey}',
'AllowedIPs': '172.26.1.0/24, 192.9.200.0/24',
'Endpoint': '...:...', # здесь должен быть указан внешний адрес и порт
'PersistentKeepalive': 5
}
name_config = input('введите дескрипшн конфига: ')
with open(f'wg_lan_{cur_ip}_{name_config}.conf', 'w') as f:
config.write(f)
print('-------------------------------------')
print(f'ip: 172.26.1.{cur_ip}')
print(f'имя конфига: {name_config}')
print(f'pubkey: {pubkey}')
print(f'sharkey: {sharkey}')
print('-------------------------------------')
print()
#update last ip
with open('curr_ip.txt', 'w') as f:
f.write(str(cur_ip))
| if13/utils | wireguard config generator/wireguard_export_lan.py | wireguard_export_lan.py | py | 1,671 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 22,
"usage_type": "call"
}
] |
73019294587 | #!/bin/python3
import sys
import csv
from pysam import VariantFile
import subprocess
vcf_in = VariantFile(sys.argv[1])
multiVcf = VariantFile(sys.argv[2])
new_header = vcf_in.header
# new_header.generic.add("Multi allelic variants added from Pisces.")
vcf_out = VariantFile(sys.argv[3], 'w', header=new_header)
for record in vcf_in.fetch():
vcf_out.write(record)
for mRecord in multiVcf.fetch():
if record.contig == mRecord.contig and record.pos == mRecord.pos:
# import pdb; pdb.set_trace()
if record.alts[0] != mRecord.alts[0]:
vcf_out.write(mRecord)
| clinical-genomics-uppsala/pomfrey | src/variantCalling/multiallelicAdd.py | multiallelicAdd.py | py | 612 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pysam.VariantFile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pysam.VariantFile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_n... |
35694932356 | # pylint: disable=E1111
from faker import Faker
from src.infra.entities import Pet as PetModel
from src.infra.config.db_config import DBConnectionHandler
from src.infra.entities.pet import AnimalTypes
from .pet_repository import PetRepository
faker = Faker()
pet_repository = PetRepository()
db_connection_handle = DBConnectionHandler()
def test_insert_pet():
"""Should Insert pet"""
name = faker.name()
species = "dog"
age = faker.random_number(digits=2)
user_id = faker.random_number()
engine = db_connection_handle.get_engine()
# SQL Commands
new_pet = pet_repository.insert_pet(name, species, age, user_id)
query_pet = engine.execute(f"SELECT * FROM pets WHERE id='{new_pet.id}'").fetchone()
engine.execute(f"DELETE FROM pets WHERE id='{new_pet.id}'")
assert new_pet.id == query_pet.id
assert new_pet.name == query_pet.name
assert new_pet.species == query_pet.species
assert new_pet.age == query_pet.age
assert new_pet.user_id == query_pet.user_id
def test_select_pet():
"""Should Select a pet in pets table and comapare it"""
pet_id = faker.random_number(digits=5)
name = faker.name()
species = "fish"
age = faker.random_number(digits=1)
user_id = faker.random_number()
species_mock = AnimalTypes("fish")
data = PetModel(
id=pet_id, name=name, species=species_mock, age=age, user_id=user_id
)
engine = db_connection_handle.get_engine()
engine.execute(
"INSERT INTO pets (id, name, species, age, user_id) "
+ f"VALUES ('{pet_id}', '{name}', '{species}', '{age}', '{user_id}')"
)
query_pet1 = pet_repository.select_pet(pet_id=pet_id)
query_pet2 = pet_repository.select_pet(user_id=user_id)
query_pet3 = pet_repository.select_pet(pet_id=pet_id, user_id=user_id)
assert data in query_pet1
assert data in query_pet2
assert data in query_pet3
engine.execute(f"DELETE FROM pets WHERE id='{pet_id}'")
| YuryTinos/backend-python | src/infra/repo/pet_repository_test.py | pet_repository_test.py | py | 1,976 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "faker.Faker",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pet_repository.PetRepository",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "src.infra.config.db_config.DBConnectionHandler",
"line_number": 12,
"usage_type": "call"
},
... |
41244789670 | from datetime import date
ano_atual = date.today().year
nascimento = int(input('Digite seu ano de nascimento: '))
idade = ano_atual - nascimento
if idade == 18:
print('Se alistar')
elif idade < 18:
saldo = 18 - idade
print('ainda faltam {} anos(s) para se alistar'.format(saldo))
ano = ano_atual + saldo
print('Seu alistamento será em {}'.format(ano))
elif idade > 18:
saldo = idade - 18
print('Já devia ter se alistado a {} ano'.format(saldo))
ano = ano_atual - saldo
print('Seu alistamento deveria ter sido em {}'.format(ano)) | andrematos90/Python | CursoEmVideo/Módulo 2/Desafio 039B.py | Desafio 039B.py | py | 570 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "datetime.date.today",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 3,
"usage_type": "name"
}
] |
41373317914 | from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
username = models.CharField(max_length=100)
email = models.EmailField(unique=True)
fecha_nacimiento = models.CharField(max_length=10, blank=True, null=True)
nacional = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def profile(self):
profile = Profile.objects.get(user=self)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=1000)
bio = models.CharField(max_length=100)
image = models.ImageField(upload_to="user_images", default="default.jpg")
verified = models.BooleanField(default=False)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
post_save.connect(create_user_profile, sender=User)
post_save.connect(save_user_profile, sender=User)
class TipoEvento(models.Model):
nombre = models.CharField(max_length=100)
descripcion = models.TextField()
class ActividadTipo(models.Model):
tipoevento = models.ForeignKey(TipoEvento, on_delete=models.CASCADE)
idactividades = models.ManyToManyField('Actividad')
class Actividad(models.Model):
nombre = models.CharField(max_length=100)
longitud = models.DecimalField(max_digits=10, decimal_places=6)
latitud = models.DecimalField(max_digits=10, decimal_places=6)
fecha = models.DateField()
descripcion = models.TextField()
img1 = models.TextField(blank=True, null=True)
img2 = models.TextField(blank=True, null=True)
class UsuarioActividad(models.Model):
idusuario = models.ForeignKey(User, on_delete=models.CASCADE)
idactividad = models.ForeignKey(Actividad, on_delete=models.CASCADE)
fecha_de_interes = models.DateField() | isabellaaguilar/ProyectoFinal-Turisteo-Cultural | backend_api/api/models.py | models.py | py | 2,026 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"... |
8665123714 | # -*- coding: utf-8 -*-
import os
import boto3
import settings
from jsonschema import validate, ValidationError
from cognito_trigger_base import CognitoTriggerBase
from user_util import UserUtil
from private_chain_util import PrivateChainUtil
class CustomMessage(CognitoTriggerBase):
def get_schema(self):
return {
'type': 'object',
'properties': {
'phone_number': settings.parameters['phone_number']
}
}
def validate_params(self):
params = self.event['request']['userAttributes']
if UserUtil.check_try_to_register_as_line_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_twitter_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_yahoo_user(self.event['userName']) or \
UserUtil.check_try_to_register_as_facebook_user(self.event['userName']):
raise ValidationError("external provider's user can not execute")
if params.get('phone_number', '') != '' and \
params.get('phone_number_verified', '') != 'true' and \
self.event['triggerSource'] != 'CustomMessage_ForgotPassword':
validate(params, self.get_schema())
client = boto3.client('cognito-idp')
response = client.list_users(
UserPoolId=self.event['userPoolId'],
Filter='phone_number = "%s"' % params['phone_number'],
)
for user in response['Users']:
for attribute in user['Attributes']:
if attribute['Name'] == 'phone_number_verified' and attribute['Value'] == 'true':
raise ValidationError('This phone_number is already exists')
# セキュリティ観点より、電話番号変更を実行させない。
# これにより XSS が発生したとしても、電話番号認証が必要な処理は回避が可能
if self.event['triggerSource'] == 'CustomMessage_VerifyUserAttribute':
# phone_number_verified が true の場合は電話番号変更を行っていないため当チェックは不要
if params.get('phone_number_verified', '') != 'true':
self.__validate_has_not_token(params)
# サードパーティを利用したユーザの場合、パスワード変更を実行させない
if self.event['triggerSource'] == 'CustomMessage_ForgotPassword':
# サードパーティを利用したユーザかを確認
if UserUtil.is_external_provider_user(self.dynamodb, self.event['userName']):
raise ValidationError("external provider's user can not execute")
def exec_main_proc(self):
if self.event['triggerSource'] == 'CustomMessage_ForgotPassword':
self.event['response']['smsMessage'] = '{user}さんのパスワード再設定コードは {code} です。'.format(
user=self.event['userName'], code=self.event['request']['codeParameter'])
self.event['response']['emailSubject'] = '【ALIS】パスワードの変更:再設定コードの送付'
self.event['response']['emailMessage'] = "{user}さんのパスワード再設定コードは {code} です".format(
code=self.event['request']['codeParameter'],
user=self.event['userName'])
else:
self.event['response']['smsMessage'] = 'ALISです。\n{user}さんの認証コードは {code} です。'.format(
user=self.event['userName'], code=self.event['request']['codeParameter'])
self.event['response']['emailSubject'] = '【ALIS】登録のご案内:メールアドレスの確認'
self.event['response']['emailMessage'] = """\
{user}様
ALISをご利用いただきありがとうございます。
仮登録が完了しました。
下記URLにアクセスし、ログインをして登録手続きを完了してください。
https://{domain}/confirm?code={code}&user={user}
※注意事項
・24時間以内に手続きを完了しない場合、上記URLは無効になります。最初から手続きをやり直してください。
・上記URLをクリックしてもページが開かない場合は、URLをコピーし、ブラウザのアドレス欄に貼り付けてください。
・このメールにお心当たりの無い場合は、恐れ入りますが、下記までお問合せください。
お問合せ(https://{domain}/help)
・このメールアドレスは配信専用となっております。本メールに返信していただきましても、お問合せにはお答えできませんのでご了承ください。
ALIS:https://alismedia.jp
""".format(
domain=os.environ['DOMAIN'],
code=self.event['request']['codeParameter'],
user=self.event['userName']
).replace("\n", "<br />")
return self.event
# トークンを保持していた場合は例外を出力
def __validate_has_not_token(self, params):
address = params.get('custom:private_eth_address')
if address is not None:
url = 'https://' + os.environ['PRIVATE_CHAIN_EXECUTE_API_HOST'] + '/production/wallet/balance'
payload = {'private_eth_address': address[2:]}
token = PrivateChainUtil.send_transaction(request_url=url, payload_dict=payload)
if token is not None and token != '0x0000000000000000000000000000000000000000000000000000000000000000':
raise ValidationError("Do not allow phone number updates")
| AlisProject/serverless-application | src/handlers/cognito_trigger/custommessage/custom_message.py | custom_message.py | py | 5,666 | python | ja | code | 54 | github-code | 6 | [
{
"api_name": "cognito_trigger_base.CognitoTriggerBase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "settings.parameters",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "user_util.UserUtil.check_try_to_register_as_line_user",
"line_number": 22,
... |
37617555944 | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import HttpResponse
from .models import Product
from math import ceil
# Create your views here.
def index(request):
#products = Product.objects.all()
#n = len(products)
allProds = []
catprods = Product.objects.values('cat' , 'Product_id')
cates = {item['cat'] for item in catprods}
for cats in cates:
prod = Product.objects.filter(cat=cats)
n = len(prod)
nSlides = n//4 + ceil((n/4) - (n//4))
#parms = {'no_of_slide':nSlide,'range':range(1,nSlide),'product':products}
#allProds=[[products, range(1, len(products)), nSlides],[products, range(1, len(products)), nSlides]]
allProds.append([prod,range(1,nSlides),nSlides])
parms={'allProds':allProds }
#print(catprods)
#print(cates)
#print(cats)
#print(prod)
return render(request, 'shop/template/index.html',parms)
#{% for products, range(1, len(products)), nSlides in allProds %}
def productview(request,myid):
product = Product.objects.filter(Product_id=myid)
print(product)
return render(request,'shop/template/prodview.html',{'product':product[0]})
| a22616/Django-project-2 | shopcart/shop/views.py | views.py | py | 1,244 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Product.objects.values",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 12,
"usage_type": "name"
},
{
"api_name... |
37516858475 | import os
import tarfile
import time
import shutil
from scipy.io import loadmat
import csv
DEVKIT_FILE_NAME = "ILSVRC2012_devkit_t12.tar.gz"
TRAIN_FILE_NAME = "ILSVRC2012_img_train.tar"
VAL_FILE_NAME = "ILSVRC2012_img_val.tar"
TEST_FILE_NAME = "ILSVRC2012_img_test_v10102019.tar"
def untar(file, target_dir="", is_show_detail=False):
file_name = file.split('.')[0]
file_ext = file.split('.')[-1]
mode = 'r'
if file_ext == 'gz':
mode = 'r:gz'
if is_show_detail:
print("read the file" + file)
tar_file = tarfile.open(file, mode)
if is_show_detail:
print("check or create directory")
if target_dir == "":
target_dir = file_name
if not os.path.exists(target_dir):
os.mkdir(target_dir)
files = tar_file.getnames()
if is_show_detail:
total_files = len(files)
current_file_index = 1
print("start to extract files")
for f in files:
if is_show_detail:
print("[" + str(current_file_index) + "/" + str(total_files) + "] extracting: " + f)
tar_file.extract(f, target_dir)
if is_show_detail:
print("[" + str(current_file_index) + "/" + str(total_files) + "] successfully extracted: " + f)
current_file_index += 1
tar_file.close()
def clear_folder(folder):
if os.path.exists(folder):
for root, dirs, files in os.walk(folder):
for file in files:
os.remove(os.path.join(root, file))
print("remove " + os.path.join(root, file))
for directory in dirs:
clear_folder(os.path.join(root, directory))
os.rmdir(folder)
if __name__ == '__main__':
#unzip dev kit
print("{1/4} extract development kit ")
DEVKIT_NAME = DEVKIT_FILE_NAME.split('.')[0]
untar(DEVKIT_FILE_NAME, "devkit")
print("{1/4} parse the validation ground truth")
val_index_label_pairs = {}
path_devkit_data = os.path.join("devkit",DEVKIT_NAME)
path_devkit_data = os.path.join(path_devkit_data,"data")
path_val_ground_truth = os.path.join(path_devkit_data,"ILSVRC2012_validation_ground_truth.txt")
file_val_ground_truth = open(path_val_ground_truth, "r")
lines = file_val_ground_truth.readlines()
line_index = 1
for line in lines:
val_index_label_pairs[line_index]=line.strip('\n')
line_index += 1
print("{1/4} validation ground truth cached")
print("{1/4} create the wnid-label-category-explanation form")
headers = ['wnid', 'label', 'category', 'explanation']
rows = []
path_train_labels = os.path.join(path_devkit_data,"meta.mat")
train_labels = loadmat(path_train_labels)
train_labels = train_labels['synsets']
for i in range(len(train_labels)):
row = {'wnid': train_labels[i][0][1][0], 'label': train_labels[i][0][0][0][0], 'category':train_labels[i][0][2][0], 'explanation': train_labels[i][0][3][0]}
rows.append(row)
with open('train_labels.csv', 'w') as f:
f_csv = csv.DictWriter(f, headers)
f_csv.writeheader()
f_csv.writerows(rows)
print("{1/4} wnid-label-category-explanation form created")
print("{1/4} development kit successfully extracted")
#unzip the training data
print("{2/4} extract training data")
print("{2/4} clean the train folder")
clear_folder("train")
print("{2/4} unzip the training dataset, may take a longer time")
untar(TRAIN_FILE_NAME, "train", is_show_detail=True)
print("{2/4} unzip the subfolders of training dataset, may take a longer time")
train_tar_files = os.listdir("train")
total_train_tar_files = len(train_tar_files)
train_tar_file_counter = 0
for train_tar_file in train_tar_files:
untar("train/"+train_tar_file, is_show_detail=False)
os.remove("train/"+train_tar_file)
train_tar_file_counter += 1
print("[" + str(train_tar_file_counter) + "/" + str(total_train_tar_files) + "] extracted: " + train_tar_file)
print("{2/4} trainning data successfully extracted")
#unzip the validation data
print("{3/4} extract validation data")
print("{3/4} clean the validation folder")
clear_folder("val")
print("{3/4} unzip the validation dataset, may take a longer time")
untar(VAL_FILE_NAME, "val", is_show_detail=True)
val_images = os.listdir('val')
num_val_images = len(val_images)
val_image_counter = 0
for image in val_images:
image_path = os.path.join("val", image)
image_index = int(image.split('.')[0].split('_')[-1])
image_target_dir = os.path.join("val", val_index_label_pairs[image_index])
if not os.path.exists(image_target_dir):
os.mkdir(image_target_dir)
shutil.move(image_path, image_target_dir)
val_image_counter += 1
print("[" + str(val_image_counter) + "/" + str(num_val_images) + "] moved: " + image)
print("{3/4} validation data successfully extracted")
#unzip the test data
print("{4/4} extract testing data")
print("{4/4} clean the test folder")
clear_folder("test")
print("{4/4} unzip the test dataset, may take a longer time")
untar(TEST_FILE_NAME, "test", is_show_detail=True)
print("{4/4} testing data successfully extracted")
print("Finished!")
| lizhouyu/ImageNet-Parser | imagenet.py | imagenet.py | py | 5,306 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tarfile.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number"... |
15917640785 | from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
path('category_list/', views.category_list, name='category_list'),
path('delete_category/<int:category_id>/', views.delete_category, name='delete_category'),
path('update_category/<int:category_id>/', views.update_category, name='update_category'),
path('product_list/', views.product_list, name='product_list'),
path('delete_product/<int:code>/', views.delete_product, name='delete_product'),
path('update_products/<int:pk>/', views.update_products, name='update_products'),
path('export_pdf/', views.export_pdf, name='export_pdf'),
path('export_excel/', views.export_excel, name='export_excel'),
path('import_excel/', views.import_excel, name='import_excel'),
path('export_import/', views.export_import, name='export_import'),
#path('add_product/', views.add_product, name='add_product'),
#path('update_product/<int:product_id>/', views.update_product, name='update_product'),
#path('delete_product/<int:product_id>/', views.delete_product, name='delete_product'),
#path('index/', views.index, name='index'),
] | elumes446/Store-Management-System | Store Managment System/main/urls.py | urls.py | py | 1,173 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
8384182801 | from __future__ import absolute_import
import sys
from optparse import OptionParser
import sumolib # noqa
from functools import reduce
def parse_args():
USAGE = "Usage: " + sys.argv[0] + " <netfile> [options]"
optParser = OptionParser()
optParser.add_option("-o", "--outfile", help="name of output file")
optParser.add_option("-r", "--radius", type=float, default=10., help="maximum air distance around the edge")
optParser.add_option("-t", "--travel-distance", type=float, help="maximum travel distance in the graph")
optParser.add_option("--symmetrical", action="store_true",
default=False, help="extend the bidi-relationship to be symmetrical")
options, args = optParser.parse_args()
try:
options.net, = args
except Exception:
sys.exit(USAGE)
if options.outfile is None:
options.outfile = options.net + ".taz.xml"
return options
def getCandidates(edge, net, radius):
candidates = []
r = min(radius, sumolib.geomhelper.polyLength(edge.getShape()) / 2)
for x, y in edge.getShape():
nearby = set()
for edge2, dist in net.getNeighboringEdges(x, y, r):
nearby.add(edge2)
candidates.append(nearby)
return candidates
ASYM_BIDI_CACHE = {} # edge : opposites
def computeBidiTazAsymByRadius(edge, net, radius):
if edge not in ASYM_BIDI_CACHE:
candidates = getCandidates(edge, net, radius)
opposites = reduce(lambda a, b: a.intersection(b), candidates)
opposites.update(set(edge.getToNode().getOutgoing()).intersection(
set(edge.getFromNode().getIncoming())))
ASYM_BIDI_CACHE[edge] = opposites
return ASYM_BIDI_CACHE[edge]
def computeAllBidiTaz(net, radius, travelDist, symmetrical):
for edge in net.getEdges():
travelOpposites = set()
if travelDist is not None:
queue = [(edge, -1.)]
while not len(queue) == 0:
edge2, dist = queue.pop()
if edge2 not in travelOpposites and dist < travelDist:
travelOpposites.add(edge2)
if dist == -1.:
dist = 0.
else:
dist += edge2.getLength()
toN = edge2.getToNode()
fromN = edge2.getFromNode()
for e in toN.getOutgoing() + toN.getIncoming() + fromN.getOutgoing() + fromN.getIncoming():
queue.append((e, dist))
if radius is not None and radius > 0.:
opposites = computeBidiTazAsymByRadius(edge, net, radius)
if symmetrical:
candidates = reduce(
lambda a, b: a.union(b), getCandidates(edge, net, radius))
for cand in candidates:
if edge in computeBidiTazAsymByRadius(cand, net, radius):
opposites.add(cand)
travelOpposites.update(opposites)
yield edge, travelOpposites
def main(netFile, outFile, radius, travelDist, symmetrical):
net = sumolib.net.readNet(netFile, withConnections=False, withFoes=False)
with open(outFile, 'w') as outf:
sumolib.writeXMLHeader(
outf, "$Id$") # noqa
outf.write('<tazs>\n')
for taz, edges in computeAllBidiTaz(net, radius, travelDist, symmetrical):
outf.write(' <taz id="%s" edges="%s"/>\n' % (
taz.getID(), ' '.join(sorted([e.getID() for e in edges]))))
outf.write('</tazs>\n')
return net
if __name__ == "__main__":
options = parse_args()
main(options.net, options.outfile, options.radius,
options.travel_distance, options.symmetrical)
| ngctnnnn/DRL_Traffic-Signal-Control | sumo-rl/sumo/tools/generateBidiDistricts.py | generateBidiDistricts.py | py | 3,730 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "optparse.OptionParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sumolib.geomhelper.polyL... |
7748783174 | import cv2
from cvzone.HandTrackingModule import HandDetector
import numpy as np
import pyfirmata
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
if not cap.isOpened():
print("Camera couldn't access")
exit()
detector = HandDetector(detectionCon=0.7)
port = "COM7"
board = pyfirmata.Arduino(port)
servo_pinX = board.get_pin('d:5:s') #pin 5 Arduino
servo_pinY = board.get_pin('d:6:s') #pin 6 Arduino
x, y = 150, 230
w, h = 200, 200
col = (255, 0, 255)
while cap.isOpened():
success, img = cap.read()
img = detector.findHands(img)
lmList, bboxInfo = detector.findPosition(img)
servoX = np.interp(x, [0, 1280], [0, 180])
servoY = np.interp(y, [0, 720], [0, 180])
if lmList:
dist,_,_ = detector.findDistance(8, 12, img, draw = False)
#print(dist)
fingers = detector.fingersUp()
if fingers[1] == 1 and fingers[2] == 1:
cursor = lmList[8]
if dist < 50:
if x-w // 2 < cursor[0] < x+w-120 // 2 and y-h // 2 < cursor[1] < y+h-120 // 2:
col = (255, 255, 0)
x, y = cursor
cv2.circle(img, cursor, 50, (255, 255, 0), cv2.FILLED)
cv2.putText(img, "HOLD", (cursor[0]-40, cursor[1]), cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255), 2)
else:
col = (255, 0, 255)
cv2.rectangle(img, (x-w // 2, y-h // 2), (x+w // 2, y+h // 2), col, cv2.FILLED)
cv2.putText(img, f'({str(x)}, {str(y)})', (x-90, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.rectangle(img, (40,20), (350,110), (0,255,255), cv2.FILLED)
cv2.putText(img, f'Servo X: {int(servoX)} deg', (50, 50), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.putText(img, f'Servo Y: {int(servoY)} deg', (50, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
servo_pinX.write(servoX)
servo_pinY.write(servoY)
cv2.imshow("Image", img)
cv2.waitKey(1)
| rizkydermawan1992/virtualdragdrop | drag and drop.py | drag and drop.py | py | 1,936 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cvzone.HandTrackingModule.HandDetector",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyfirmata.Arduino",
"line_number": 18,
"usage_type": "call"
},
{
"api_name... |
2795680906 | #PCA => Principal componet analysis using HSI
import math
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
class princiapalComponentAnalysis:
def __init__(self):
pass
def __str__(self):
pass
def pca_calculate(self,imagen_in,varianza = None,componentes = None):
dataImagen = imagen_in.copy()
if varianza != None :
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
pca = PCA()
pca.fit(imageTemp)
imageTemp = pca.transform(imageTemp)
#Evaluar el numero de coeficientes en base a los datos de varianza
var = 0
num_componentes = 0
for i in range(pca.explained_variance_ratio_.shape[0]):
var += pca.explained_variance_ratio_[i]
if var > varianza:
break
else:
num_componentes += 1
imageTemp = imageTemp.reshape( (dataImagen.shape[1], dataImagen.shape[2],dataImagen.shape[0]) )
imagePCA = np.zeros( (num_componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imagePCA.shape[0]):
imagePCA[i] = imageTemp[:,:,i]
if componentes != None:
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
c_pca = PCA(n_components=componentes)
c_pca.fit(imageTemp)
imageTemp = c_pca.transform(imageTemp)
imageTemp = imageTemp.reshape( (dataImagen.shape[1], dataImagen.shape[2],imageTemp.shape[1]) )
imagePCA = np.zeros( (componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imagePCA.shape[0]):
imagePCA[i] = imageTemp[:,:,i]
return imagePCA
def kpca_calculate(self, imagenInput, componentes = None):
imagen_in = imagenInput.copy()
#TOMA LA PORCION DE LA IMAGEN DE TAMAÑO W
i = 0 #Indice x para la imagen
j = 0 #Indice y para la imagen
W = 50 #Tamaño de subconjunto 50 por indices
fx_pc = 10 #Numero fijo de componentes
n_componentes = 0 #Numero inicial de componentes principales
for i in range(imagen_in.shape[1]): #Recorrer x
i_l = i*W
i_h = (i+1)*W
if i_l >= imagen_in.shape[1]:
break
if i_h > imagen_in.shape[1]:
i_h = imagen_in.shape[1]
for j in range(imagen_in.shape[2]): #Recorrer y
j_l = j*W
j_h = (j+1)*W
if j_l >= imagen_in.shape[2]:
break
if j_h > imagen_in.shape[2]:
j_h = imagen_in.shape[2]
dataImagen = imagen_in[:, i_l:i_h, j_l:j_h]
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T #Reorganiza para aplicar KPCA
#APLICA KPCA SOBRE TODOS LOS ELEMENTOS DIMENSIONALES
kpca = KernelPCA( kernel='rbf' ) # n_components=None, gamma=0.01
X_transformed = kpca.fit_transform(imageTemp)
#Calcula el porcentaje de varianza de cada componente y el número de componentes a utilizar
if componentes != None :
if n_componentes == 0:
n_componentes = componentes
ImagenOut = np.zeros( (n_componentes, imagen_in.shape[1], imagen_in.shape[2]) )
else:
if n_componentes == 0:
sum_varianza = 0
varianza = kpca.lambdas_/np.sum(kpca.lambdas_)
for v in range(varianza.shape[0]):
sum_varianza = sum_varianza+varianza[v]
if sum_varianza > 0.95:
break
else:
n_componentes += 1
if n_componentes < fx_pc:
print('pc find:'+str(n_componentes))
n_componentes = fx_pc
print('msn 1: fix number of PC used')
if n_componentes > imagen_in.shape[0]/2:
print('pc find:'+str(n_componentes))
n_componentes = fx_pc
print('msn 2: fix number of PC used')
ImagenOut = np.zeros( (n_componentes, imagen_in.shape[1], imagen_in.shape[2]) )
#RECUPERA EL NUMERO DE COMPONENTES NECESARIO
imageTemp = X_transformed[:,0:n_componentes].reshape( (dataImagen.shape[1], dataImagen.shape[2],n_componentes) )
imageKPCA = np.zeros( (n_componentes, dataImagen.shape[1], dataImagen.shape[2]) )
# RECONTRUIR LA SALIDA EN LA FORMA DE LA IMAGEN DE ENTRADA
for i in range(imageKPCA.shape[0]):
imageKPCA[i] = imageTemp[:,:,i]
ImagenOut[:, i_l:i_h, j_l:j_h] = imageKPCA
return ImagenOut
def kpca2_calculate(self, imagen_in, componentes):
dataImagen = imagen_in.copy()
imageTemp = dataImagen.reshape((dataImagen.shape[0],dataImagen.shape[1]*dataImagen.shape[2])).T
print(imageTemp.shape)
kpca = KernelPCA(n_components=componentes, kernel='rbf', gamma=0.3)
X_transformed = kpca.fit_transform(imageTemp)
print(X_transformed.shape)
imageTemp = X_transformed.reshape( (dataImagen.shape[1], dataImagen.shape[2],X_transformed.shape[1]) )
imageKPCA = np.zeros( (componentes, dataImagen.shape[1], dataImagen.shape[2]) )
for i in range(imageKPCA.shape[0]):
imageKPCA[i] = imageTemp[:,:,i]
return imageKPCA
def graficarPCA(self,imagePCA, channel):
plt.figure(1)
plt.imshow(imagePCA[channel])
plt.colorbar()
plt.show()
| davidruizhidalgo/unsupervisedRemoteSensing | package/PCA.py | PCA.py | py | 6,139 | python | es | code | 13 | github-code | 6 | [
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy... |
24128542933 | import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy as np
import pickle
with open("/home/ekin/Desktop/workspace/RotatetObjectDetectionReview/test_data/gt_area.pickle", 'rb') as handle:
gt_area = pickle.load(handle)
np.sort(gt_area)
'''
plt.hist(gt_area, bins='auto', edgecolor='black')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Histogram of Data')
plt.grid(True)
plt.show()
'''
# Reshape the data to have a single feature dimension
data_reshaped = np.array(gt_area).reshape(-1, 1)
# Number of clusters
num_clusters = 6
# Perform K-means clustering
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(data_reshaped)
# Get the cluster labels
labels = kmeans.labels_
cluster_centers = kmeans.cluster_centers_
print(np.sort(cluster_centers,axis = 0))
# Plot the scatter plot
plt.scatter(range(len(gt_area)), gt_area, c=labels, cmap='viridis')
plt.xlabel('Data Point')
plt.ylabel('Value')
plt.title('K-means Clustering')
plt.savefig("/home/ekin/Desktop/workspace/RotatetObjectDetectionReview/figures/area.png")
| ikoc/RotatetObjectDetectionReview | src/kMeansOfArea.py | kMeansOfArea.py | py | 1,058 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_n... |
19797979191 | import functools
from typing import Callable, Union
from aiohttp import web
from .exceptions import AuthRequiredException, ForbiddenException, AuthException
def login_required(func):
"""
If not authenticated user tries to reach to a `login_required` end-point
returns UNAUTHORIZED response.
"""
def wrapper(request):
if not isinstance(request, web.Request):
raise TypeError(f"Invalid Type '{type(request)}'")
if not getattr(request, "user", None):
return AuthRequiredException.make_response(request)
return func(request)
return wrapper
def permissions(
*required_scopes: Union[set, tuple], algorithm="any"
) -> web.json_response:
"""
Open the end-point for any user who has the permission to access.
"""
assert required_scopes, "Cannot be used without any permission!"
def request_handler(view: Callable) -> Callable:
@functools.wraps(view)
async def wrapper(request: web.Request):
if not isinstance(request, web.Request):
raise TypeError(f"Invalid Type '{type(request)}'")
authenticator = request.app["authenticator"]
try:
provided_scopes = await authenticator.get_permissions(request)
has_permission = await authenticator.check_permissions(
provided_scopes, required_scopes, algorithm=algorithm
)
if not has_permission:
raise ForbiddenException()
return await view(request)
except AuthException as e:
return e.make_response(request)
return wrapper
return request_handler
| mgurdal/aegis | aegis/decorators.py | decorators.py | py | 1,714 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "aiohttp.web.Request",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.web",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "exceptions.AuthRequiredException.make_response",
"line_number": 20,
"usage_type": "call"
},
{
... |
3814572161 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import numpy as np
df = pd.read_csv('mail_data.csv')
# Data Preprocessing
df['Category'] = df['Category'].map({'spam': 0, 'ham': 1})
X = df['Message']
Y = df['Category']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=3)
# Feature Extraction
feature_extraction = TfidfVectorizer(min_df=1, stop_words='english', lowercase=True)
X_train_features = feature_extraction.fit_transform(X_train)
X_test_features = feature_extraction.transform(X_test)
Y_train = Y_train.astype('int')
Y_test = Y_test.astype('int')
# Model Training
model = LogisticRegression()
model.fit(X_train_features, Y_train)
# Model Evaluation
prediction_on_training_data = model.predict(X_train_features)
accuracy_on_training_data = accuracy_score(Y_train, prediction_on_training_data)
print(f'Accuracy on Training Data: {accuracy_on_training_data}')
prediction_on_test_data = model.predict(X_test_features)
accuracy_on_test = accuracy_score(Y_test, prediction_on_test_data)
print(f'Accuracy on Test Data: {accuracy_on_test}')
# Input Mail Prediction
input_your_mail = ["Congratulations! You won 3000$ Walmart gift card. Go to http://bit.ly/123456 tp claim now."]
input_data_features = feature_extraction.transform(input_your_mail)
prediction = model.predict(input_data_features)
if prediction[0] == 1:
print('Ham')
else:
print('Spam')
print(prediction)
| bhar1gitr/ML_Spam-Ham_Detector | pandassss.py | pandassss.py | py | 1,653 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 18,
"usage_type"... |
30114979232 | import itertools
import pandas as pd
import math
from pathlib import Path
def composite_SD(means, SDs, ncounts):
'''Calculate combined standard deviation via ANOVA (ANalysis Of VAriance)
See: http://www.burtonsys.com/climate/composite_standard_deviations.html
Inputs are:
means, the array of group means
SDs, the array of group standard deviations
ncounts, the array of number of samples in each group
Result is the overall standard deviation.
'''
num_groups = len(means)
if num_groups != len(SDs) or num_groups != len(ncounts):
raise Exception('inconsistent list lengths')
# calculate total number of samples, N, and grand mean, GM
N = sum(ncounts)
if N == 1:
return SDs[0]
GM = 0.0
for i in range(num_groups):
GM += means[i] * ncounts[i]
GM /= N
# calculate Error Sum of Squares
ESS = 0.0
for i in range(num_groups):
ESS += ((SDs[i]) ** 2) * (ncounts[i] - 1)
# calculate Total Group Sum of Squares
TGSS = 0.0
for i in range(num_groups):
TGSS += ((means[i] - GM) ** 2) * ncounts[i]
# calculate standard deviation as square root of grand variance
result = math.sqrt((ESS + TGSS)/(N - 1))
return result
def create_transunion_csv():
"""
This python script is used to merge all the parquet data files into one
single csv file. TransUnion data needs to be partitioned into 10 different
csv files due to the memory limitation.
"""
num_partition = 10
data_dir = Path("data/transunion/")
num_files = math.ceil(len(list(data_dir.glob("*.parquet"))) / num_partition)
for i in range(num_partition):
df = pd.concat(
pd.read_parquet(parquet_file, engine="pyarrow")
for parquet_file in itertools.islice(data_dir.glob("*.parquet"), i *
num_files, (i + 1) * num_files))
df.to_csv("data/transunion_{}.csv".format(i))
def expand_df(df, columns):
"""
Parameters:
----------
df: pd.series
Each cell holds a 2d array.
colums: list
Column names for the expanded DataFrame.
Return:
-------
A expanded DataFrame.
"""
df = df.explode()
df = df.apply(pd.Series)
df.rename(columns=lambda x: columns[x], inplace=True)
return df
| superyang713/Synthetic_Data_Generation | utils.py | utils.py | py | 2,363 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 5... |
27099720616 | import xlwt
import numpy as np
import os
import os.path
import colour
from ..configuration.base_configuration import Filter
from ..configuration.base_configuration import TimeOfDayFilter
from ..core.status import Status
from plots import MatplotlibPlotter
from power_deviation_matrix import PowerDeviationMatrixSheet
import version as ver
def get_valid_excel_sheet_name(sheet_name, if_too_long_replace={}):
max_chars = 31
invalid_chars = ':|\\/*?[]'
for c in invalid_chars:
sheet_name = sheet_name.replace(c, '')
if len(sheet_name) > max_chars:
for sub_str in if_too_long_replace:
sheet_name = sheet_name.replace(sub_str, if_too_long_replace[sub_str])
return sheet_name[:max_chars]
def chckMake(path):
"""Make a folder if it doesn't exist"""
if not os.path.exists(path):
os.makedirs(path)
class PNGPlotter:
def plot(self, analysis, path):
chckMake(path)
plotter = MatplotlibPlotter(path, analysis)
if analysis.hasActualPower:
plotter.plotPowerCurve(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve, specified_title = 'Warranted', mean_title = 'Measured Mean', gridLines = True)
plotter.plotPowerCurve(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve, show_scatter = False, fname = "PowerCurve - Warranted vs Measured Mean", specified_title = 'Warranted', mean_title = 'Measured Mean', mean_pc_color = 'blue', gridLines = True)
if analysis.turbRenormActive:
plotter.plotTurbCorrectedPowerCurve(analysis.baseline.wind_speed_column, analysis.measuredTurbulencePower, analysis.allMeasuredTurbCorrectedPowerCurve)
if analysis.hasAllPowers:
plotter.plotPowerLimits(specified_title = 'Warranted', gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.hubWindSpeed, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.shearExponent, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.hubTurbulence, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.hubWindSpeed, analysis.hubTurbulence, analysis.dataFrame, gridLines = True)
if analysis.hasActualPower:
plotter.plotBy(analysis.hubWindSpeed, analysis.powerCoeff, analysis.dataFrame, gridLines = True)
plotter.plotBy('Input Hub Wind Speed', analysis.powerCoeff, analysis.allMeasuredPowerCurve, gridLines = True)
if analysis.inflowAngle in analysis.dataFrame.columns:
analysis.dataFrame.loc[analysis.dataFrame[analysis.inflowAngle]>180,analysis.inflowAngle] -= 360
plotter.plotBy(analysis.windDirection,analysis.inflowAngle,analysis.dataFrame, gridLines = True)
plotter.plotCalibrationSectors()
if analysis.hasActualPower:
if analysis.multiple_datasets:
plotter.plot_multiple(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve)
class TimeSeriesExporter:
def export(self, analysis, time_series_path, clean=True, full=True, calibration=True,
full_df_output_dir="TimeSeriesData"):
data_frame = analysis.dataFrame
dataset_configs = analysis.datasetConfigs
if clean:
data_frame.to_csv(time_series_path, sep='\t')
if full:
root_path = os.path.join(os.path.dirname(time_series_path), full_df_output_dir)
chckMake(root_path)
for ds in dataset_configs:
ds.data.fullDataFrame.to_csv(root_path + os.sep + "FilteredDataSet_AllColumns_{0}.dat".format(ds.name),
sep='\t')
if calibration and hasattr(ds.data,"filteredCalibrationDataframe"):
ds.data.filteredCalibrationDataframe.to_csv(
root_path + os.sep + "CalibrationDataSet_{0}.dat".format(ds.name), sep=',')
class Report:
bold_style = xlwt.easyxf('font: bold 1')
no_dp_style = xlwt.easyxf(num_format_str='0')
one_dp_style = xlwt.easyxf(num_format_str='0.0')
two_dp_style = xlwt.easyxf(num_format_str='0.00')
three_dp_style = xlwt.easyxf(num_format_str='0.000')
four_dp_style = xlwt.easyxf(num_format_str='0.0000')
percent_style = xlwt.easyxf(num_format_str='0.00%')
percent_no_dp_style = xlwt.easyxf(num_format_str='0%')
def __init__(self, windSpeedBins, calculated_power_deviation_matrix_dimensions):
self.version = ver.version
self.windSpeedBins = windSpeedBins
self.calculated_power_deviation_matrix_dimensions = calculated_power_deviation_matrix_dimensions
def report(self, path, analysis):
report_power_curve = analysis.hasActualPower
book = xlwt.Workbook()
plotsDir = os.path.dirname(path)
plotter = PNGPlotter()
plotter.plot(analysis, plotsDir)
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
if report_power_curve:
sh = book.add_sheet("PowerCurves", cell_overwrite_ok=True)
settingsSheet = book.add_sheet("Settings", cell_overwrite_ok=True)
self.reportSettings(settingsSheet, analysis)
if report_power_curve:
rowsAfterCurves = []
#rowsAfterCurves.append(self.reportPowerCurve(sh, 0, 0, 'uniqueAnalysisId', analysis.specified_power_curve, analysis)) #needs fixing + move to settings sheet
if analysis.specified_power_curve is not None:
if len(analysis.specified_power_curve.data_frame) != 0:
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 0, 'Specified', analysis.specified_power_curve, analysis))
if analysis.hasActualPower:
#for name in analysis.residualWindSpeedMatrices:
# residualMatrix = analysis.residualWindSpeedMatrices[name]
#
# if residualMatrix != None:
# self.reportPowerDeviations(book, "ResidualWindSpeed-%s" % name, residualMatrix, gradient)
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 5, 'Inner', analysis.innerMeasuredPowerCurve, analysis) )
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 10, 'Outer', analysis.outerMeasuredPowerCurve, analysis) )
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 15, 'All', analysis.allMeasuredPowerCurve, analysis) )
if analysis.turbRenormActive:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 20, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, analysis) )
if analysis.specified_power_curve is not None:
rowAfterCurves = max(rowsAfterCurves) + 5
sh.write(rowAfterCurves-2, 0, "Power Curves Interpolated to Specified Bins:", self.bold_style)
specifiedLevels = analysis.specified_power_curve.data_frame.index
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 5, 'Inner', analysis.innerMeasuredPowerCurve, specifiedLevels)
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 10, 'Outer', analysis.outerMeasuredPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 15, 'All', analysis.allMeasuredPowerCurve, specifiedLevels)
if analysis.turbRenormActive:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 20, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (25 if analysis.turbRenormActive else 20), 'DayTime', analysis.dayTimePowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (30 if analysis.turbRenormActive else 25), 'NightTime', analysis.nightTimePowerCurve, specifiedLevels)
self.reportPowerDeviations(book, "Baseline Power Deviations", analysis.baseline_power_deviations, gradient)
#if analysis.rewsActive:
# self.reportPowerDeviations(book, "REWS Deviation", analysis.rewsMatrix, gradient)
for correction_name in analysis.corrections:
correction = analysis.corrections[correction_name]
deviations = analysis.corrected_deviations[correction.correction_name]
sheet_name = get_valid_excel_sheet_name("{0} Power Deviations".format(
correction.short_correction_name), if_too_long_replace={'Power Deviations': 'PowDevs'})
self.reportPowerDeviations(book, sheet_name, deviations, gradient)
if analysis.nominal_wind_speed_distribution.absolute_path is not None:
sh = book.add_sheet("EnergyAnalysis", cell_overwrite_ok=True)
self.report_aep(sh, analysis)
if len(analysis.calibrations) == 1:
calSheet = book.add_sheet("Calibration", cell_overwrite_ok=True)
self.reportCalibration(calSheet,analysis.calibrations[0],timeStepInSeconds = analysis.timeStepInSeconds)
elif len(analysis.calibrations) > 1:
i = 0
for cal in analysis.calibrations:
i += 1
calSheet = book.add_sheet("Calibration_%03d" % i, cell_overwrite_ok=True)
self.reportCalibration(calSheet,cal,timeStepInSeconds = analysis.timeStepInSeconds)
book.save(path)
def reportCalibration(self,sh,calibration,timeStepInSeconds = 600.):
conf, calib = calibration
sh.write(0, 0, "Dataset Name", self.bold_style)
sh.write(1, 0, conf.name)
startRow = 3
col = -14
if 'belowAbove' in calib.calibrationSectorDataframe.columns :
belowAbove = True
else:
belowAbove = False
col+=16
row=startRow
sh.write(row,col,conf.name, self.bold_style)
sh.write(row,col+1,"Method:"+conf.calibrationMethod, self.bold_style)
row += 1
sh.write(row,col,"Bin", self.bold_style)
sh.write(row,col+1,"Slope", self.bold_style)
sh.write(row,col+2,"Offset", self.bold_style)
if conf.calibrationMethod != 'Specified':
sh.write(row,col+3,"Count", self.bold_style)
sh.write(row,col+4,"Hours", self.bold_style)
if belowAbove:
sh.write(row,col+5,"Count <= 8m/s", self.bold_style)
sh.write(row,col+6,"Hours <= 8m/s", self.bold_style)
sh.write(row,col+7,"Count > 8m/s", self.bold_style)
sh.write(row,col+8,"Hours > 8m/s", self.bold_style)
sh.write(row,col+9,"Speedup at 10m/s", self.bold_style)
sh.write(row,col+10,"% Speedup at 10m/s", self.bold_style)
sh.write(row,col+11,"Filter (Total Hours > 24)", self.bold_style)
sh.write(row,col+12,"Filter (Hours Below/Above 8m/s > 6)", self.bold_style)
sh.write(row,col+13,"Filter (Speedup Change < 2%)", self.bold_style)
sh.write(row,col+14,"Valid Sector", self.bold_style)
row+=1
for key in sorted(calib.calibrationSectorDataframe.index):
sh.write(row,col,float(key), self.bold_style)
sh.write(row,col+1,calib.calibrationSectorDataframe['Slope'][key], self.four_dp_style)
sh.write(row,col+2,calib.calibrationSectorDataframe['Offset'][key], self.four_dp_style)
if conf.calibrationMethod != 'Specified':
if 'Count' in calib.calibrationSectorDataframe.columns:
sh.write(row,col+3,calib.calibrationSectorDataframe['Count'][key], self.no_dp_style)
sh.write(row,col+4,calib.calibrationSectorDataframe['Count'][key]*(timeStepInSeconds/3600.0), self.one_dp_style)
if belowAbove:
ba = calib.calibrationSectorDataframe.loc[key,'belowAbove']
sh.write(row,col+5,ba[0], self.no_dp_style)
sh.write(row,col+6,ba[0]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+7,ba[1], self.no_dp_style)
sh.write(row,col+8,ba[1]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+9,calib.calibrationSectorDataframe['SpeedUpAt10'][key], self.four_dp_style)
sh.write(row,col+10,(calib.calibrationSectorDataframe['SpeedUpAt10'][key]-1.0), self.percent_style)
totalHoursValid = calib.getTotalHoursValidity(key, timeStepInSeconds)
sh.write(row,col+11, "TRUE" if totalHoursValid else "FALSE")
if belowAbove:
belowAboveValid = calib.getBelowAboveValidity(key, timeStepInSeconds)
sh.write(row,col+12, "TRUE" if belowAboveValid else "FALSE")
speedUpChangeValid = calib.getSpeedUpChangeValidity(key)
sh.write(row,col+13, "TRUE" if speedUpChangeValid else "FALSE")
sectorValid = calib.getSectorValidity(key, timeStepInSeconds)
sh.write(row,col+14, "TRUE" if sectorValid else "FALSE", self.bold_style)
row += 1
if len(conf.calibrationFilters) > 0:
row += 2
sh.write(row, col, "Calibration Filters", self.bold_style)
row += 1
sh.write(row, col, "Data Column", self.bold_style)
sh.write(row, col+1, "Filter Type", self.bold_style)
sh.write(row, col+2, "Inclusive", self.bold_style)
sh.write(row, col+3, "Filter Value", self.bold_style)
sh.write(row, col+4, "Active", self.bold_style)
row += 1
for filt in conf.calibrationFilters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, col, "Time Of Day Filter")
sh.write(row, col + 1, str(filt.startTime))
sh.write(row, col + 2, str(filt.endTime))
sh.write(row, col + 3, str(filt.daysOfTheWeek))
sh.write(row, col + 4, str(filt.months))
else:
sh.write(row, col, filt.column)
sh.write(row, col+1, filt.filterType)
sh.write(row, col+2, filt.inclusive)
sh.write(row, col+3, str(filt))
sh.write(row, col+4, filt.active) # always true if in list...
row += 1
def reportSettings(self, sh, analysis):
config = analysis
sh.write(0, 1, "PCWG Tool Version Number:")
sh.write(0, 2, self.version)
sh.write(0, 3, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 3
labelColumn = 1
dataColumn = 2
sh.col(labelColumn).width = 256 * 30
sh.col(dataColumn).width = 256 * 50
sh.col(dataColumn+1).width = 256 * 50
#Corretions
sh.write(row, labelColumn, "Density Correction Active", self.bold_style)
sh.write(row, dataColumn, config.densityCorrectionActive)
row += 1
sh.write(row, labelColumn, "REWS Correction Active", self.bold_style)
sh.write(row, dataColumn, config.rewsActive)
row += 1
sh.write(row, labelColumn, "Turbulence Correction Active", self.bold_style)
sh.write(row, dataColumn, config.turbRenormActive)
row += 1
#General Settings
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, analysis.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Power Curve Minimum Count", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMinimumCount)
row += 1
sh.write(row, labelColumn, "Power Curve Mode", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMode)
row += 1
#Inner Range
row += 1
sh.write(row, labelColumn, "Inner Range", self.bold_style)
row += 1
for dimension in config.inner_range_dimensions:
sh.write(row, labelColumn, "Lower {0}".format(dimension.parameter), self.bold_style)
sh.write(row, dataColumn, dimension.lower_limit)
row += 1
sh.write(row, labelColumn, "Upper {0}".format(dimension.parameter), self.bold_style)
sh.write(row, dataColumn, dimension.upper_limit)
row += 1
#Turbine
#row += 1
#sh.write(row, labelColumn, "Turbine", self.bold_style)
#row += 1
#sh.write(row, labelColumn, "Specified Power Curve", self.bold_style)
#sh.write(row, dataColumn, config.specified_power_curve.absolute_path)
#row += 1
#datasets
row += 1
sh.write(row, labelColumn, "Datasets", self.bold_style)
row += 2
for datasetConfig in analysis.datasetConfigs:
sh.write(row, labelColumn, "Name", self.bold_style)
sh.write(row, dataColumn, datasetConfig.name)
row += 1
sh.write(row, labelColumn, "Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.path)
row += 1
sh.write(row, labelColumn, "Rated Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.ratedPower)
row += 1
sh.write(row, labelColumn, "HubHeight", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubHeight)
row += 1
sh.write(row, labelColumn, "Diameter", self.bold_style)
sh.write(row, dataColumn, datasetConfig.diameter)
row += 1
sh.write(row, labelColumn, "Cut In Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutInWindSpeed)
row += 1
sh.write(row, labelColumn, "Cut Out Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutOutWindSpeed)
row += 1
sh.write(row, labelColumn, "Start Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.startDate))
row += 1
sh.write(row, labelColumn, "End Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.endDate))
row += 1
sh.write(row, labelColumn, "Hub Wind Speed Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeedMode)
row += 1
sh.write(row, labelColumn, "Density Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.densityMode)
row += 2
sh.write(row, labelColumn, "REWS Defined", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rewsDefined)
row += 1
sh.write(row, labelColumn, "Rotor Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rotorMode)
row += 1
sh.write(row, labelColumn, "Hub Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubMode)
row += 1
sh.write(row, labelColumn, "Number of Rotor Levels", self.bold_style)
sh.write(row, dataColumn, datasetConfig.numberOfRotorLevels)
row += 2
sh.write(row, labelColumn, "Measurements", self.bold_style)
row += 1
sh.write(row, labelColumn, "Input Time Series Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.input_time_series.absolute_path)
row += 1
sh.write(row, labelColumn, "Date Format", self.bold_style)
sh.write(row, dataColumn, datasetConfig.dateFormat)
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Time Stamp", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStamp)
row += 1
sh.write(row, labelColumn, "Bad Data Value", self.bold_style)
sh.write(row, dataColumn, datasetConfig.badData)
row += 1
sh.write(row, labelColumn, "Header Rows", self.bold_style)
sh.write(row, dataColumn, datasetConfig.headerRows)
row += 1
sh.write(row, labelColumn, "Turbine Location Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.turbineLocationWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Turbulence", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubTurbulence)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeed)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed Std Dev", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeedStdDev)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirection)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction Offset", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirectionOffset)
row += 1
sh.write(row, labelColumn, "Density", self.bold_style)
sh.write(row, dataColumn, datasetConfig.density)
row += 1
sh.write(row, labelColumn, "Temperature", self.bold_style)
sh.write(row, dataColumn, datasetConfig.temperature)
row += 1
sh.write(row, labelColumn, "Pressure", self.bold_style)
sh.write(row, dataColumn, datasetConfig.pressure)
row += 1
if len(datasetConfig.turbineShearMeasurements) > 0:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements,'Reference Location ')
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.turbineShearMeasurements,'Turbine Location ')
else:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements)
sh.write(row, labelColumn, "Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.power)
row += 2
if datasetConfig.rewsDefined:
sh.write(row, labelColumn, "Profile Levels", self.bold_style)
row += 1
sh.write(row, labelColumn, "Height", self.bold_style)
sh.write(row, dataColumn, "Speed", self.bold_style)
sh.write(row, dataColumn + 1, "Direction", self.bold_style)
row += 1
for height in sorted(datasetConfig.data.windSpeedLevels):
sh.write(row, labelColumn, height)
sh.write(row, dataColumn, datasetConfig.data.windSpeedLevels[height])
if hasattr(datasetConfig.data, 'windDirectionLevels'): # we are not using this in REWS yet
if height in datasetConfig.data.windDirectionLevels:
sh.write(row, dataColumn + 1, datasetConfig.data.windDirectionLevels[height])
row += 1
sh.write(row, labelColumn, "Filters", self.bold_style)
row += 1
sh.write(row, labelColumn, "Data Column", self.bold_style)
sh.write(row, dataColumn, "Filter Type", self.bold_style)
sh.write(row, dataColumn + 1, "Inclusive", self.bold_style)
sh.write(row, dataColumn + 2, "Filter Value", self.bold_style)
sh.write(row, dataColumn + 3, "Active", self.bold_style)
row += 1
for filter in datasetConfig.filters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, labelColumn, "Time Of Day Filter")
sh.write(row, dataColumn, str(filter.startTime))
sh.write(row, dataColumn + 1, str(filter.endTime))
sh.write(row, dataColumn + 2, str(filter.daysOfTheWeek))
sh.write(row, dataColumn + 3, str(filter.months))
else:
sh.write(row, labelColumn, filter.column)
sh.write(row, dataColumn, filter.filterType)
sh.write(row, dataColumn + 1, filter.inclusive)
sh.write(row, dataColumn + 2, str(filter))
sh.write(row, dataColumn + 3, "True") # always true if in list...
row += 1
def writeShear(self,sh,labelColumn,dataColumn,row,shearList,prefix=""):
i = 0
for sh_meas in shearList:
sh.write(row, labelColumn, prefix+"Shear Measurement " + str(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.wind_speed_column)
row += 1
sh.write(row, labelColumn, prefix+"Shear Measurement {0} Height ".format(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.height)
row += 1
i += 1
return row
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, analysis):
powerCurveLevels = powerCurve.data_frame.copy()
if powerCurve.wind_speed_column is None:
powerCurveLevels['Specified Wind Speed'] = powerCurveLevels.index
wind_speed_col = 'Specified Wind Speed'
else:
wind_speed_col = powerCurve.wind_speed_column
powerCurveLevels = powerCurveLevels.sort(wind_speed_col)
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.col(columnOffset + 1).width = 256 * 15
sh.col(columnOffset + 2).width = 256 * 15
sh.col(columnOffset + 3).width = 256 * 15
if powerCurve.wind_speed_column is None:
sh.col(columnOffset + 5).width = 256 * 5
else:
sh.col(columnOffset + 4).width = 256 * 15
sh.col(columnOffset + 5).width = 256 * 5
rowOrders = {'Data Count': 4, analysis.actualPower: 2, analysis.hubTurbulence: 3,
analysis.baseline.wind_speed_column: 1, 'Specified Power': 2, 'Specified Turbulence': 3,
'Specified Wind Speed': 1, analysis.measuredTurbulencePower:2, wind_speed_col: 1}
styles = {'Data Count': self.no_dp_style, analysis.baseline.wind_speed_column: self.two_dp_style,
analysis.actualPower: self.no_dp_style, analysis.hubTurbulence: self.percent_no_dp_style,
'Specified Power': self.no_dp_style, 'Specified Turbulence': self.percent_no_dp_style,
'Specified Wind Speed': self.two_dp_style, analysis.measuredTurbulencePower: self.no_dp_style,
wind_speed_col: self.two_dp_style}
for colname in powerCurveLevels.columns:
if colname in styles.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for windSpeed in powerCurveLevels.index:
for colname in powerCurveLevels.columns:
if colname in styles.keys():
val = powerCurveLevels[colname][windSpeed]
if type(val) is np.int64:
#xlwt needs numbers to be recognisable as integers or floats; isinstance(np.int64(1), int) returns False.
#Other numpy types (int32, float64, etc) are recognised as int and float appropriately.
val = int(val)
sh.write(rowOffset + countRow + 1, columnOffset + rowOrders[colname], val, styles[colname])
countRow += 1
if hasattr(powerCurve, 'zeroTurbulencePowerCurve'):
countRow += 3
try:
pc = powerCurve.zeroTurbulencePowerCurve.dfPowerLevels
sh.write(rowOffset + countRow, columnOffset + 2, name + ' Zero TI Power Curve', self.bold_style)
countRow += 1
sh.write(rowOffset + countRow, columnOffset + 1, 'Wind Speed', self.bold_style)
sh.write(rowOffset + countRow, columnOffset + 2, 'Power', self.bold_style)
for ws in pc.index:
sh.write(rowOffset + countRow + 1, columnOffset + 1, ws, styles['Specified Wind Speed'])
sh.write(rowOffset + countRow + 1, columnOffset + 2, pc.loc[ws, 'Power'], styles['Specified Wind Speed'])
countRow += 1
except:
sh.write(rowOffset + countRow, columnOffset + 2,'Zero TI Power Curve not calculated successfully for %s power curve.' % name)
countRow+=1
else:
countRow += 3
Status.add("Not reporting zero TI power curve for %s as it is not defined." % (name), verbosity=2)
sh.write(rowOffset + countRow, columnOffset + 2,"Not reporting zero TI power curve for %s as it is not defined." % (name))
countRow+=1
return countRow
def reportInterpolatedPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, levels):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.write(rowOffset + 1, columnOffset + 1, "Wind Speed", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 2, "Power", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 3, "Turbulence", self.bold_style)
count = 1
for windSpeed in sorted(levels):
sh.write(rowOffset + count + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 2, float(powerCurve.power_function(windSpeed)), self.no_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 3, float(powerCurve.turbulence_function(windSpeed)), self.percent_no_dp_style)
count += 1
def reportPowerDeviations(self, book, sheetName, powerDeviations, gradient):
sheet = PowerDeviationMatrixSheet(self.calculated_power_deviation_matrix_dimensions)
sheet.report(book, sheetName, powerDeviations, gradient)
def report_aep(self,sh,analysis):
sh # get tables in PP report form
# Summary of EY acceptance test results:
hrsMultiplier = (analysis.timeStepInSeconds/3600.0)
row = 2
tall_style = xlwt.easyxf('font:height 360;') # 18pt
first_row = sh.row(row)
first_row.set_style(tall_style)
sh.write(row,2, "Reference Turbine", self.bold_style)
sh.write(row,3, "Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,5, "Last Complete Bin (LCB)", self.bold_style)
sh.write(row,6, "Direction Sectors Analysed (degrees)", self.bold_style)
sh.write(row,7, "Measured Hours", self.bold_style)
#sh.write(row,8, "Annual Energy Yield Uncertainty as a percentage of the Warranted Annual Yield (%)", self.bold_style)
row += 1
sh.write(row,2, analysis.Name)
sh.write(row,3, analysis.aepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row,4, analysis.aepCalc.AEP*100, self.two_dp_style)
sh.write(row,5, analysis.aepCalcLCB.lcb, self.two_dp_style)
sh.write(row,6, "{mi} - {ma}".format(mi=analysis.dataFrame[analysis.windDirection].min(),ma=analysis.dataFrame[analysis.windDirection].max()))
timeCovered = analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount].sum() * hrsMultiplier
sh.write(row,7, timeCovered, self.two_dp_style)
#sh.write(row,8, "NOT YET CALCULATED")
row += 3
if hasattr(analysis.specified_power_curve,"referenceDensity"):
sh.write_merge(row,row,2,6, "Measured Power Curve\n Reference Air Density = {ref} kg/m^3".format(ref=analysis.specified_power_curve.referenceDensity), self.bold_style)
#sh.write(row,7, "Category A Uncertainty", self.bold_style)
#sh.write(row,8, "Category B Uncertainty", self.bold_style)
#sh.write(row,9, "Category C Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "Bin No", self.bold_style)
sh.write(row,3, "Bin Centre Wind Speed", self.bold_style)
sh.write(row,4, "Hub Height Wind Speed", self.bold_style)
sh.write(row,5, "Power Output", self.bold_style)
sh.write(row,6, "Cp", self.bold_style)
sh.write(row,7, "Qty 10-Min Data", self.bold_style)
sh.write(row,8, "Standard Deviation", self.bold_style)
#sh.write(row,7, "Standard Uncertainty", self.bold_style)
#sh.write(row,8, "Standard Uncertainty", self.bold_style)
#sh.write(row,9, "Standard Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "I", self.bold_style)
sh.write(row,3, "Vi_centre", self.bold_style)
sh.write(row,4, "Vi", self.bold_style)
sh.write(row,5, "Pi", self.bold_style)
sh.write(row,7, "Ni", self.bold_style)
sh.write(row,8, "StDev i", self.bold_style)
#sh.write(row,7, "si", self.bold_style)
#sh.write(row,8, "ui", self.bold_style)
#sh.write(row,9, "uc,I", self.bold_style)
row += 1
sh.write(row,3, "[m/s]", self.bold_style)
sh.write(row,4, "[kW]", self.bold_style)
sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,7, "[kW]", self.bold_style)
#sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,9, "[kW]", self.bold_style)
for binNo,ws in enumerate(analysis.allMeasuredPowerCurve.data_frame.index):
if ws <= analysis.aepCalcLCB.lcb and analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount][ws] > 0:
row+=1
sh.write(row,2, binNo+1, self.no_dp_style)
sh.write(row,3, ws, self.one_dp_style)
sh.write(row,4, analysis.allMeasuredPowerCurve.data_frame[analysis.baseline.wind_speed_column][ws], self.two_dp_style)
sh.write(row,5, analysis.allMeasuredPowerCurve.data_frame[analysis.actualPower][ws], self.two_dp_style)
if analysis.powerCoeff in analysis.allMeasuredPowerCurve.data_frame.columns:
sh.write(row,6, analysis.allMeasuredPowerCurve.data_frame[analysis.powerCoeff][ws], self.two_dp_style)
else:
sh.write(row,6, "-", self.no_dp_style)
datCount = analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount][ws]
sh.write(row,7, datCount, self.no_dp_style)
if analysis.powerStandDev in analysis.allMeasuredPowerCurve.data_frame.columns:
sh.write(row,8, analysis.allMeasuredPowerCurve.data_frame[analysis.powerStandDev][ws])
else:
sh.write(row,8, "-", self.no_dp_style)
#sh.write(row,7, "-", self.no_dp_style)
#sh.write(row,8, "~", self.no_dp_style)
#sh.write(row,9, "-", self.no_dp_style)
row+=2
sh.write_merge(row,row,2,5, "More than 180 hours of data:", self.bold_style)
sh.write(row,6, "TRUE" if timeCovered > 180 else "FALSE")
sh.write(row,7, "({0} Hours)".format(round(timeCovered,2)) , self.two_dp_style)
row+=1
if hasattr(analysis,"windSpeedAt85pctX1pnt5"):
sh.write_merge(row,row,2,5, "Largest WindSpeed > {0}:".format(round(analysis.windSpeedAt85pctX1pnt5,2)), self.bold_style)
sh.write(row,6, "TRUE" if analysis.aepCalcLCB.lcb > analysis.windSpeedAt85pctX1pnt5 else "FALSE")
sh.write(row,7, "Threshold is 1.5*(WindSpeed@0.85*RatedPower)")
row+=1
sh.write_merge(row,row,2,5, "AEP Extrap. within 1% of AEP LCB:",self.bold_style)
ans = abs(1-(analysis.aepCalc.AEP/analysis.aepCalcLCB.AEP)) < 0.01
sh.write(row,6, "TRUE" if ans else "FALSE")
if not ans:
sh.write(row,8, analysis.aepCalc.AEP)
sh.write(row,9, analysis.aepCalcLCB.AEP)
if analysis.turbRenormActive:
row += 2
sh.write(row,3, "Turbulence Corrected Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Turbulence Corrected Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row+1,3, analysis.turbCorrectedAepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row+1,4, analysis.turbCorrectedAepCalc.AEP*100, self.two_dp_style)
row+=2
sh.write_merge(row,row,3,10,"AEP Distribution",self.bold_style)
row+=1
sh.write_merge(row,row,3,6, "Reference", self.bold_style)
sh.write_merge(row,row,7,10, "Measured", self.bold_style)
row+=1
sh.write(row,2,"Wind Speed",self.bold_style)
sh.write(row,3,'Reference Freq',self.bold_style)
sh.write(row,4,'Reference Power',self.bold_style)
sh.write(row,5,'Reference Power (Resampled)',self.bold_style)
sh.write(row,6,"Reference Energy",self.bold_style)
sh.write(row,7,'Measured Freq',self.bold_style)
sh.write(row,8,'Measured Power',self.bold_style)
sh.write(row,9,'Measured Power (Resampled)',self.bold_style)
sh.write(row,10,"Measured Energy",self.bold_style)
for binNum in analysis.aepCalc.energy_distribution.index:
row+=1
sh.write(row,2,binNum,self.two_dp_style)
sh.write(row,3,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Freq"] ,self.four_dp_style)
sh.write(row,4,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Upper"] ,self.four_dp_style)
sh.write(row,5,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Power"] ,self.four_dp_style)
sh.write(row,6,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Energy"] ,self.four_dp_style)
sh.write(row,7,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Freq"] ,self.four_dp_style)
sh.write(row,8,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Upper"] ,self.four_dp_style)
sh.write(row,9,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Power"] ,self.four_dp_style)
sh.write(row,10,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Energy"] ,self.four_dp_style)
row+=3
def write_power_curves(self):
Status.add("Wind Speed\tSpecified\tInner\tOuter\tAll", verbosity=2)
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
text = "%0.4f\t" % windSpeed
if windSpeed in self.specified_power_curve.data_frame:
text += "%0.4f\t" % self.specified_power_curve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.innerMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.innerMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.outerMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.outerMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.allMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.allMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
Status.add(text, verbosity=2)
def write_power_deviation_matrix(self):
for j in reversed(range(self.turbulenceBins.numberOfBins)):
turbulence = self.turbulenceBins.binCenterByIndex(j)
text = "%f\t" % turbulence
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
if windSpeed in self.powerDeviations:
if turbulence in self.powerDeviations[windSpeed]:
text += "%f\t" % self.powerDeviations[windSpeed][turbulence]
else:
text += "\t"
else:
text += "\t"
Status.add(text, verbosity=2)
text = "\t"
for i in range(self.windSpeedBins.numberOfBins):
text += "%f\t" % self.windSpeedBins.binCenterByIndex(i)
Status.add(text, verbosity=2)
def report_scatter_metric(self,sh,analysis,row, turbRenormActive):
row += 5
sh.write(row, 1, "Scatter Metric Before TI Renormalisation:", self.bold_style)
sh.write(row+1, 1, analysis.powerCurveScatterMetric, self.percent_style)
if turbRenormActive:
sh.write(row, 2, "Scatter Metric After TI Renormalisation:", self.bold_style)
sh.write(row+1, 2, analysis.powerCurveScatterMetricAfterTiRenorm , self.percent_style)
return row + 3
class AnonReport(Report):
def __init__(self,targetPowerCurve,wind_bins, turbulence_bins, version="unknown"):
self.version = version
self.targetPowerCurve = targetPowerCurve
self.turbulenceBins = turbulence_bins
self.normalisedWindSpeedBins = wind_bins
def report(self, path, analysis, powerDeviationMatrix = True, scatterMetric=True):
self.analysis = analysis
book = xlwt.Workbook()
sh = book.add_sheet("Anonymous Report", cell_overwrite_ok=True)
sh.write(0, 0, "PCWG Tool Version Number:")
sh.write(0, 1, self.version)
sh.write(0, 2, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 1
if powerDeviationMatrix:
row = self.report_power_deviation_matrix(sh,analysis,book)
if scatterMetric:
row = self.report_scatter_metric(sh,analysis,row, analysis.turbRenormActive)
book.save(path)
def report_power_deviation_matrix(self,sh,analysis,book):
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
pcStart = 2
pcEnd = pcStart + self.normalisedWindSpeedBins.numberOfBins + 5
deviationMatrixStart = pcEnd + 5
row= []
row.append( self.reportPowerCurve(sh, pcStart, 0, self.targetPowerCurve.name + ' Power Curve', self.targetPowerCurve) )
row.append( self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedHubPowerDeviations, gradient, "Hub Power"))
if analysis.normalisedTurbPowerDeviations != None:
deviationMatrixStart += (self.turbulenceBins.numberOfBins + 5) * 2
row.append(self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedTurbPowerDeviations, gradient, "Turb Corrected Power") )
return max(row)
def reportPowerDeviations(self,sh, startRow, powerDeviations, gradient, name):
countShift = self.turbulenceBins.numberOfBins + 5
sh.write(startRow, 1, "Deviations Matrix (%s)" % name, self.bold_style)
sh.write(startRow + countShift, 1, "Data Count Matrix (%s)" % name, self.bold_style)
for j in range(self.turbulenceBins.numberOfBins):
turbulence = self.turbulenceBins.binCenterByIndex(j)
row = startRow + self.turbulenceBins.numberOfBins - j
countRow = row + countShift
sh.write(row, 0, turbulence, self.percent_no_dp_style)
sh.write(countRow, 0, turbulence, self.percent_no_dp_style)
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
col = i + 1
if j == 0:
sh.write(row + 1, col, windSpeed, self.two_dp_style)
sh.write(countRow + 1, col, windSpeed, self.two_dp_style)
if windSpeed in powerDeviations.matrix:
if turbulence in powerDeviations.matrix[windSpeed]:
deviation = powerDeviations.matrix[windSpeed][turbulence]
count = int(powerDeviations.count[windSpeed][turbulence])
if not np.isnan(deviation):
sh.write(row, col, deviation, gradient.getStyle(deviation))
sh.write(countRow, col, count, self.no_dp_style)
return startRow + self.turbulenceBins.numberOfBins + countShift
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
rowOrders = { 'Data Count':4, 'Normalised Wind Speed':1,'Normalised Power':2, 'Turbulence':3}
for colname in rowOrders.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
mask = self.analysis.dataFrame['Normalised WS Bin'] == windSpeed
dataCount = self.analysis.dataFrame[mask]['Normalised WS Bin'].count()
absoluteWindSpeed = windSpeed * self.analysis.observedRatedWindSpeed
sh.write(rowOffset + countRow + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 4,
dataCount, self.no_dp_style)
if dataCount > 0:
sh.write(rowOffset + countRow + 1, columnOffset + 2,
float(powerCurve.powerFunction(absoluteWindSpeed))/self.analysis.observedRatedPower, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 3,
float(powerCurve.turbulenceFunction(absoluteWindSpeed)), self.percent_no_dp_style)
countRow += 1
return countRow
| PCWG/PCWG | pcwg/reporting/reporting.py | reporting.py | py | 47,780 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "plots.MatplotlibPlotter",
... |
14706890571 | #!/usr/bin/env python
# coding: utf-8
import requests
import pymongo
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import time
# #### Open chrome driver
# open chrome driver browser
def init_browser():
executable_path = {'executable_path': 'chromedriver'}
return Browser('chrome', **executable_path, headless=False)
# ## NASA Mars News - Collect Latest News Title and Paragraph Text
def scrape():
browser = init_browser()
# define url
mars_news_url = "https://mars.nasa.gov/news/"
time.sleep(3)
browser.visit(mars_news_url)
#putting a sleep function here seems to make the flask application run
time.sleep(3)
# create beautiful soup object
html = browser.html
mars_news_soup = BeautifulSoup(html, 'html.parser')
# I added a few time.sleep(3) functions to allow the browser time to scrape the data. Hopefully that works.
# find the first news title
news_title = mars_news_soup.body.find("div", class_="content_title").text
# find the paragraph associated with the first title
news_p = mars_news_soup.body.find("div", class_="article_teaser_body").text
time.sleep(3)
mars_image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(mars_image_url)
# create the soup item
html_image = browser.html
mars_imaging = BeautifulSoup(html_image, 'html.parser')
# the large image is within the figue element with class = lede
image = mars_imaging.body.find("figure", class_="lede")
#obtaining the url for the photo
feat_img_url = image.find('figure', class_='lede').a['href']
featured_image_url = f'https://www.jpl.nasa.gov{feat_img_url}'
featured_image_url
# ## Mars Weather
# open url in browser
#needs time to load
time.sleep(3)
# create a soup item
# ## Mars Facts
time.sleep(3)
# define url
mars_facts_url = "https://space-facts.com/mars/"
# read html into pandas
table = pd.read_html(mars_facts_url)
# returns the value from an html table
df = table[2]
df.columns = ["Description", "Value"]
# converting data to html table
mars_facts_html=df.to_html()
mars_facts_html
# ## Mars Hemispheres
# define url and open in browser
time.sleep(3)
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
# #### Cerberus hemisphere
# click on the link for the Cerberus hemisphere
browser.click_link_by_partial_text('Cerberus')
# click on the open button to get to enhanced picture
browser.click_link_by_partial_text('Open')
# create a soup item
hemispheres_html = browser.html
cerberus_soup = BeautifulSoup(hemispheres_html, 'html.parser')
cerberus = cerberus_soup.body.find('img', class_ = 'wide-image')
cerberus_img = cerberus['src']
hem_base_url = 'https://astrogeology.usgs.gov'
#will store url later
cerberus_url = hem_base_url + cerberus_img
# #### Schiaperelli hemisphere
# define url and open in browser
time.sleep(3)
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
# click on the link for the Cerberus hemisphere
browser.click_link_by_partial_text('Schiaparelli')
# click on the open button to get to enhanced picture
browser.click_link_by_partial_text('Open')
#schiap html page
# create a soup item
schiap_html = browser.html
schiap_soup = BeautifulSoup(schiap_html, 'html.parser')
#obtaining the image of the schiaparelli
schiap = schiap_soup.body.find('img', class_ = 'wide-image')
schiap_img = schiap['src']
hem_base_url = 'https://astrogeology.usgs.gov'
schiap_url = hem_base_url + schiap_img
# print(schiap_url)
# #### Syrtis hemisphere
# define url and open in browser
time.sleep(3)
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
# click on the link for the Cerberus hemisphere
browser.click_link_by_partial_text('Syrtis')
# click on the link for the Cerberus hemisphere
browser.click_link_by_partial_text('Open')
# create a soup item
syrtis_html = browser.html
syrtis_soup = BeautifulSoup(syrtis_html, 'html.parser')
syrtis = syrtis_soup.body.find('img', class_ = 'wide-image')
syrtis_img = syrtis['src']
hem_base_url = 'https://astrogeology.usgs.gov'
syrtis_url = hem_base_url + syrtis_img
# print(syrtis_url)
# #### Valles hemisphere
# define url and open in browser
time.sleep(3)
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
# click on the link for the Valles hemisphere
browser.click_link_by_partial_text('Valles')
# click on the link for the Valles hemisphere
browser.click_link_by_partial_text('Open')
# create a soup item
valles_html = browser.html
valles_soup = BeautifulSoup(valles_html, 'html.parser')
valles = valles_soup.body.find('img', class_ = 'wide-image')
valles_img = valles['src']
hem_base_url = 'https://astrogeology.usgs.gov'
valles_url = hem_base_url + valles_img
# print(valles_url)
# #### Define list of dictionaries that include each hemisphere
hemispheres_image_urls = [
{"title": "Valles Marineris Hemisphere", "img_url": valles_url},
{"title": "Cerberus Hemisphere", "img_url": cerberus_url},
{"title": "Schiaparelli Marineris Hemisphere", "img_url": schiap_url},
{"title": "Syrtis Major Hemisphere", "img_url": syrtis_url}
]
# dictionary should be returned
mars_dict = {
'headline': news_title,
'paragraph': news_p,
'featuredimage': featured_image_url,
# 'currentweather': mars_weather,
'factstable': mars_facts_html,
"va_title": "Valles Marineris Hemisphere", "va_img_url": valles_url,
"ce_title": "Cerberus Hemisphere", "ce_img_url": cerberus_url,
"sc_title": "Schiaparelli Marineris Hemisphere", "sc_img_url": schiap_url,
"sy_title": "Syrtis Major Hemisphere", "sy_img_url": syrtis_url}
# print(mars_dictionary)
browser.quit()
return mars_dict
| lisaweinst/web-scraping-challenge | scrape_mars.py | scrape_mars.py | py | 6,535 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "splinter.Browser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_... |
19795243462 | import csv
from collections import defaultdict, OrderedDict
import itertools
import json
class clicknode:
node_count = itertools.count()
group_count = itertools.count()
group_map = {}
def __init__(self, **nodedict):
group = nodedict['REGION_VIEW_ID']
if group not in clicknode.group_map:
clicknode.group_map[group] = next(clicknode.group_count)
# use dictionary to populate object's fields
self.__dict__.update(nodedict)
self.id = next(clicknode.node_count)
# that each node is a single entity (used in merging group of nodes)
self.count = 1
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def __str__(self):
return str(self.id)+" "+self.REGION_VIEW_ID+" "+self.CLIENT_ID
class clicklink:
def __init__(self, nodea, nodeb, edge):
self.source = nodea
self.dest = nodeb
self.linkwt = edge
def __str__(self):
return ";".join(map(lambda x: str(x), [self.source, self.dest, self.linkwt]))
class linkwt:
name = None
def __init__(self, src, dest):
self.count = 1
self.val = getattr(dest, self.name)
self.length = 80
class countwt(linkwt):
name = "count"
def merge(self, linkwt):
self.val+=linkwt.val
self.count+=linkwt.count
class responsetimewt(linkwt):
name = "RESPONSE_TIME"
def merge(self, linkwt):
self.val = (self.count*self.val + linkwt.val*linkwt.count)/(self.count+linkwt.count)
self.count += linkwt.count
# make nodes from a click logs of one user
def make_nodes(click_session):
click_session.sort(key=lambda x:[x[('DATE')], x[('STARTTIME')]])
last = None
nodes = []
links = []
link_map = {}
for i in click_session:
if i['REGION_VIEW_ID'] == '/AtkNotificationFlowTF/AtkNotificationPage':
continue
node = clicknode(**i)
nodes.append(node)
return {"nodes":nodes}
# make links from the sequence of clicks based on the node field and link type given. such as response time links between all component types or count(frequency) links between all client ids
# returns list of nodes (id, group), list of edges (src, dest, linkwt)
def make_links(nodes, field, link_type):
#Ordered so that index of a key is constant with updates to the dict
node_map = OrderedDict()
if len(nodes) <= 1: return None
last = nodes[0]
link_map = {}
# get the field of an object dynamically
node_map[(getattr(last,field), last.REGION_VIEW_ID)] = nodes[0]
links = []
for node in nodes[1:]:
# None nodes are breaks representing change of sessions
if node is None:
continue
if node not in node_map:
# node not in node_map
node_map[(getattr(node,field), node.REGION_VIEW_ID)] = node
dest = node_map.keys().index((getattr(node,field), node.REGION_VIEW_ID))
src = node_map.keys().index((getattr(last,field), last.REGION_VIEW_ID))
edge = link_type(last, node)
if (src,dest) not in link_map:
link = clicklink(src, dest, edge)
link_map[(src, dest)] = link
else:
link = link_map[(src, dest)]
(link.linkwt).merge(edge)
last = node
return (node_map,link_map)
# to put all elements of the same RVID together, create extra links of 0 weight between all nodes of the same RVID
def converge_rvid_nodes(response):
nodes = response["nodes"]
# better algorithm: get_pairs(nodes) --> sorts the nodes based on group field so that each group ends at a known index
# and then for each group, return all pairs of indices
for i in range(len(nodes)):
for j in range(i+1, len(nodes)):
if nodes[i]["group"] == nodes[j]["group"]:
data = {}
data["source"] = i
data["target"] = j
data["value"] = 0
data["len"] = 40
response["links"].append(data)
# input from make_links()
# outputs json string format to be send as response
def jsonify_data(node_data, link_data):
response = {"nodes":[], "links":[]}
for field, group in node_data:
data = {}
data["name"] = field
data["group"] = group
data["prop"] = node_data[(field, group)].__dict__
response["nodes"].append(data)
for link in link_data:
l = link_data[link]
data = {}
data["source"] = l.source
data["target"] = l.dest
data["value"] = 1
data["len"] = l.linkwt.length
response["links"].append(data)
return response
def parse(lines):
users = defaultdict(list)
rvidDict = defaultdict(list)
ctypeDict = defaultdict(list)
for line in lines:
users[line['DSID']].append(line)
rvidDict[line['REGION_VIEW_ID']].append(line)
ctypeDict[line['COMPONENT_TYPE']].append(line)
return users
# pick the session numbered "num" from click history data
def session_fetch(user_data, num, envfilters=[]):
users = filter(lambda x: user_data[x][0]['ENVIRONMENT'] not in envfilters, user_data)
num = num%len(users)
session = user_data[users[num]]
return make_nodes(session)
def longest_session(users):
b = map(lambda x: [len(users[x]), x], users)
click_one = max(b)[1]
#users[click_one]
click_session = users[click_one]
return make_nodes(click_session)
def all_sessions(users):
nodes = []
for i in users:
s = users[i]
nodes.extend(make_nodes(s)["nodes"])
nodes.append(None)
return {"nodes":nodes}
| arbazkhan002/Clix | clickparser.py | clickparser.py | py | 5,102 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.count",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
... |
72165174909 | # -*- coding:utf-8 -*-
# ! usr/bin/env python3
"""
Created on 28/12/2020 9:16
@Author: XINZHI YAO
"""
import os
import argparse
def pubtator_split(pubtator_file: str, num_per_file: int,
save_path: str):
if not os.path.exists(save_path):
os.mkdir(save_path)
split_file_idx = 0
file_save_num = 0
base_prefix = os.path.basename(pubtator_file).split('.')[0]
save_file = f'{save_path}/{base_prefix}.{split_file_idx}.txt'
wf = open(save_file, 'w')
with open(pubtator_file) as f:
for line in f:
l = line.strip().split('|')
if l == ['']:
pass
# wf.write('\n')
if len(l) > 2:
if l[1] == 't':
file_save_num += 1
if file_save_num % num_per_file == 0:
print(f'{base_prefix}.{split_file_idx}.txt save done.')
wf.close()
split_file_idx += 1
save_file = f'{save_path}/{base_prefix}.{split_file_idx}.txt'
wf = open(save_file, 'w')
wf.write(f'{line.strip()}\n')
elif l[1] == 'a':
wf.write(f'{line.strip()}\n')
else:
wf.write(f'{line.strip()}\n')
print(f'{base_prefix}.{split_file_idx}.txt save done.')
wf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PubTator Split.')
parser.add_argument('-pf', dest='pubtator_file', type=str, required=True)
parser.add_argument('-pn', dest='pubtator_num_per_file', type=int,
default=2000, help='default: 2000')
parser.add_argument('-sp', dest='split_path', type=str, required=True)
args = parser.parse_args()
pubtator_split(args.pubtator_file, args.pubtator_num_per_file, args.split_path)
| YaoXinZhi/BioNLP-Toolkit | Split_PubTator_File.py | Split_PubTator_File.py | py | 1,971 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_num... |
3940897296 | import numpy as np
import torch
from torchvision import models
import torch.nn as nn
# from resnet import resnet34
# import resnet
from torch.nn import functional as F
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = nn.BatchNorm2d(out_planes)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, reduction=16):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU()
)
self.channel_conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_ch)
)
def forward(self, x):
residual = x
x = self.conv(x)
# x = self.se(x)
if residual.shape[1] != x.shape[1]:
residual = self.channel_conv(residual)
x += residual
return x
class up_edge(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up_edge, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.sigmoid = nn.Sigmoid()
self.change_ch = nn.Conv2d(int(in_ch), int(in_ch/2), kernel_size=1)
def forward(self, x1, x2,edge):
#x1:Decoder x2:Encoder,a_map edge
# print("x1", x1.size())
# print("x2", x2.size())
# print("a_map", a_map.size())
# print("a_map1", a_map.size())
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
x = torch.cat([edge,x2, x1], dim=1)
x = self.conv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.sigmoid = nn.Sigmoid()
self.change_ch = nn.Conv2d(int(in_ch), int(in_ch/2), kernel_size=1)
def forward(self, x1, x2):
# print("x1", x1.size())
# print("x2", x2.size())
# print("a_map", a_map.size())
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
if x2.shape[1]!=x1.shape[1]:
x1=self.change_ch(x1)
# print("x2", x2.shape)
# print("x1", x1.shape)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch, dropout=False, rate=0.1):
super(outconv, self).__init__()
self.dropout = dropout
if dropout:
print('dropout', rate)
self.dp = nn.Dropout2d(rate)
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
if self.dropout:
x = self.dp(x)
x = self.conv(x)
return x
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
return p
class dual_down(nn.Module):
def __init__(self, in_ch,out_ch):
super(dual_down, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_ch, in_ch, 3,2,autopad(3, 1),groups=1),nn.ReLU(),nn.Dropout2d())
self.conv2 = nn.Sequential(nn.Conv2d(2*in_ch, out_ch, 1), nn.ReLU(), nn.Dropout2d())
def forward(self, x1, x2):
x1=self.conv1(x1)
# print("x1",x1.shape,"x2",x2.shape)
x=torch.cat([x1,x2],dim=1)
x=self.conv2(x)
return x
class atten_down(nn.Module):
def __init__(self, in_ch):
super(atten_down, self).__init__()
self.edge_atten = nn.Sequential(nn.Conv2d(in_ch,in_ch,kernel_size=3, padding=1),
nn.Sigmoid())
self.conv = nn.Conv2d(in_ch, in_ch, kernel_size=3, bias=False)
self.bn = nn.BatchNorm2d(in_ch, eps=0.001, momentum=0.03)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, mask, edge):
e_atten=self.edge_atten(edge)
mask=self.act(self.bn(self.edge_atten(mask)))
mask=mask*e_atten
return mask
| Winterspringkle/EIANet | models/master.py | master.py | py | 5,598 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
... |
11194307443 | from typing import Tuple, Optional
import albumentations as A
import cv2
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
import os
from PIL import Image
from tqdm import tqdm
import pandas as pd
import pywt
import logging
from utils.image_utils import random_crop_with_transforms, load_image, split_by_wavelets
from utils.tensor_utils import preprocess_image
class WaveletSuperSamplingDataset(Dataset):
def __init__(self, folder_path, window_size: int = 224, dataset_size: int = 1000):
images_names_list = os.listdir(folder_path)
images_names_list.sort()
self.images_paths = [
os.path.join(folder_path, image_name)
for image_name in images_names_list
]
self.window_size = window_size
self.dataset_size = dataset_size
self.images_count = len(self.images_paths)
self.interpolations = [
cv2.INTER_AREA,
cv2.INTER_LANCZOS4,
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC,
None
]
def __len__(self):
return self.dataset_size
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
image_idx = np.random.randint(0, self.images_count)
image = load_image(self.images_paths[image_idx])
if min(image.shape[:2]) < self.window_size:
logging.info('Image {} so small, resizing!'.format(self.images_paths[image_idx]))
image = cv2.resize(image, (self.window_size + 5, self.window_size + 5), interpolation=cv2.INTER_AREA)
crop = random_crop_with_transforms(
image1=image,
window_size=self.window_size
)
selected_inter_method: Optional[int] = self.interpolations[np.random.randint(0, len(self.interpolations))]
# TODO: Add transform which changed OpenCV image to LL wavelet representation
selected_inter_method = None
ycrcb_ll_crop: Optional[np.ndarray] = None
if selected_inter_method is not None:
lr_crop = cv2.resize(
crop,
(self.window_size // 2, self.window_size // 2),
interpolation=selected_inter_method
)
ycrcb_ll_crop = cv2.cvtColor(lr_crop, cv2.COLOR_RGB2YCrCb)
ycrcb_ll_crop = ycrcb_ll_crop.astype(np.float32) / 255.0 * self.window_size * 2
ycrcb_crop = cv2.cvtColor(crop, cv2.COLOR_RGB2YCrCb)
y, cr, cb = cv2.split(ycrcb_crop)
# LL, LH, HL, HH <- C
y_ll, y_lh, y_hl, y_hh = split_by_wavelets(y)
cr_ll, cr_lh, cr_hl, cr_hh = split_by_wavelets(cr)
cb_ll, cb_lh, cb_hl, cb_hh = split_by_wavelets(cb)
if selected_inter_method is None:
ycrcb_ll_crop = cv2.merge((y_ll, cr_ll, cb_ll))
# 9 channels
gt_wavelets = cv2.merge((y_lh, y_hl, y_hh, cr_lh, cr_hl, cr_hh, cb_lh, cb_hl, cb_hh))
return preprocess_image(ycrcb_ll_crop), preprocess_image(gt_wavelets, 0, 1), preprocess_image(ycrcb_crop)
class SuperSamplingDataset(WaveletSuperSamplingDataset):
def __init__(self, folder_path, window_size: int = 224, dataset_size: int = 1000):
super().__init__(folder_path, window_size, dataset_size)
self.interpolations = [
cv2.INTER_AREA,
cv2.INTER_LANCZOS4,
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC
]
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
image_idx = np.random.randint(0, self.images_count)
image = load_image(self.images_paths[image_idx])
if min(image.shape[:2]) < self.window_size:
logging.info('Image {} so small, resizing!'.format(self.images_paths[image_idx]))
image = cv2.resize(image, (self.window_size + 5, self.window_size + 5), interpolation=cv2.INTER_AREA)
crop = random_crop_with_transforms(
image1=image,
window_size=self.window_size
)
selected_inter_method: int = self.interpolations[np.random.randint(0, len(self.interpolations))]
low_res_crop = cv2.resize(
crop,
(self.window_size // 2, self.window_size // 2),
interpolation=selected_inter_method
)
return preprocess_image(low_res_crop, 0, 1), preprocess_image(crop, 0, 1)
| AlexeySrus/WPNet | research_pipelines/supersampling_with_wavelets/dataloader.py | dataloader.py | py | 4,411 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
1447221561 | from django.shortcuts import render
from .forms import ProductCreationForm
from .models import Product
from django.contrib import messages
import random
# Create your views here.
def create(request):
if request.method == 'POST':
form = ProductCreationForm(request.POST, request.FILES)
if form.is_valid():
product = form.save(commit=False)
while True:
productNo = random.randint(100000, 999999)
try:
Product.objects.get(orderNo=productNo)
except:
break
product.productNo = productNo
try: product.save()
except:
messages.warning(request, 'Could not create product')
else:
messages.success(request, 'Product Created')
else:
form = ProductCreationForm()
context = {
"title": "Products",
"form": form
}
return render(request, 'products/create.html.django', context)
def product(request, productId):
product = Product.objects.get(id=productId)
context = {
"title": "Product - "+product.productName,
"product": product
}
return render(request, 'products/product.html.django', context) | Thorium0/IntelRobotics-webserver | products/views.py | views.py | py | 1,284 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "forms.ProductCreationForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Product.objects.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "m... |
71971270909 | import tempfile
import os
import posixpath
import stat
import logging
import collections
from kubeflow.fairing import utils as fairing_utils
from kubeflow.fairing.preprocessors.base import BasePreProcessor
from kubeflow.fairing.builders.append.append import AppendBuilder
from kubeflow.fairing.deployers.job.job import Job
from kubeflow.fairing.deployers.tfjob.tfjob import TfJob
from kubeflow.fairing.constants import constants
from kubeflow.fairing.kubernetes import utils as k8s_utils
from kubeflow.fairing.cloud import storage
from kubeflow.fairing.cloud import gcp
from kubeflow.fairing.frameworks import lightgbm_dist_training_init
from kubeflow.fairing.frameworks import utils
logger = logging.getLogger(__name__)
TRAIN_DATA_FIELDS = ["data", "train", "train_data",
"train_data_file", "data_filename"]
TEST_DATA_FIELDS = ["valid", "test", "valid_data", "valid_data_file", "test_data",
"test_data_file", "valid_filenames"]
NUM_MACHINES_FILEDS = ["num_machines", "num_machine"]
PORT_FIELDS = ["local_listen_port", "local_port"]
MLIST_FIELDS = ["machine_list_filename",
"machine_list_file", "machine_list", "mlist"]
OUTPUT_MODEL_FIELDS = ["output_model", "model_output", "model_out"]
INPUT_MODEL_FIELDS = ["input_model", "model_input", "model_in"]
OUTPUT_RESULT_FIELDS = ["output_result", "predict_result", "prediction_result",
"predict_name", "prediction_name", "pred_name", "name_pred"]
MACHINE_FIELDS = ["machines", "workers", "nodes"]
TREE_LEARNER_FIELDS = ["tree_learner",
"tree", "tree_type", "tree_learner_type"]
ENTRYPOINT = posixpath.join(constants.DEFAULT_DEST_PREFIX, "entrypoint.sh")
LIGHTGBM_EXECUTABLE = "lightgbm"
CONFIG_FILE_NAME = "config.conf"
MLIST_FILE_NAME = "mlist.txt"
BLACKLISTED_FIELDS = PORT_FIELDS + MLIST_FIELDS + MACHINE_FIELDS
WEIGHT_FILE_EXT = ".weight"
DATA_PARALLEL_MODES = ["data", "voting"]
def _modify_paths_in_config(config, field_names, dst_base_dir):
"""modify lightgbm config fields
:param config: config entries
:param field_names: list of fields
:param dst_base_dir: path to destination directory
"""
field_name, field_value = utils.get_config_value(config, field_names)
if field_value is None:
return [], []
src_paths = field_value.split(",")
dst_paths = []
for src_path in src_paths:
file_name = os.path.split(src_path)[-1]
dst_paths.append(posixpath.join(dst_base_dir, file_name))
config[field_name] = ",".join(dst_paths)
return src_paths, dst_paths
def _update_maps(output_map, copy_files, src_paths, dst_paths):
"""update maps
:param output_map: output map entries
:param copy_files: files to be copied
:param src_paths: source paths
:param dst_paths: destination paths
"""
for src_path, dst_path in zip(src_paths, dst_paths):
if os.path.exists(src_path):
output_map[src_path] = dst_path
else:
copy_files[src_path] = dst_path
def _get_commands_for_file_ransfer(files_map):
"""get commands for file transfer
:param files_map: files to be mapped
"""
cmds = []
for k, v in files_map.items():
storage_obj = storage.get_storage_class(k)()
if storage_obj.exists(k):
cmds.append(storage_obj.copy_cmd(k, v))
else:
raise RuntimeError("Remote file {} does't exist".format(k))
return cmds
def _generate_entrypoint(copy_files_before, copy_files_after, config_file,
init_cmds=None, copy_patitioned_files=None):
""" generate entry point
:param copy_files_before: previous copied files
:param copy_files_after: files to be copied
:param config_file: path to config file
:param init_cmds: commands(Default value = None)
:param copy_patitioned_files: (Default value = None)
"""
buf = ["#!/bin/sh",
"set -e"]
if init_cmds:
buf.extend(init_cmds)
# In data prallel mode, copying files based on RANK of the worker in the cluster.
# The data is partitioned (#partitions=#workers) and each worker gets one partition of the data.
if copy_patitioned_files and len(copy_patitioned_files) > 0: #pylint:disable=len-as-condition
buf.append("case $RANK in")
for rank, files in copy_patitioned_files.items():
buf.append("\t{})".format(rank))
buf.extend(
["\t\t" + cmd for cmd in _get_commands_for_file_ransfer(files)])
buf.append("\t\t;;")
buf.append("esac")
# copying files that are common to all workers
buf.extend(_get_commands_for_file_ransfer(copy_files_before))
buf.append("echo 'All files are copied!'")
buf.append("{} config={}".format(LIGHTGBM_EXECUTABLE, config_file))
for k, v in copy_files_after.items():
storage_obj = storage.get_storage_class(k)()
buf.append(storage_obj.copy_cmd(v, k))
_, file_name = tempfile.mkstemp()
with open(file_name, 'w') as fh:
content = "\n".join(buf)
fh.write(content)
fh.write("\n")
st = os.stat(file_name)
os.chmod(file_name, st.st_mode | stat.S_IEXEC)
return file_name
def _add_train_weight_file(config, dst_base_dir):
"""add train weight file
:param config: config entries
:param dst_base_dir: destination directory
"""
_, field_value = utils.get_config_value(config, TRAIN_DATA_FIELDS)
if field_value is None:
return [], []
else:
src_paths = field_value.split(",")
weight_paths = [x+WEIGHT_FILE_EXT for x in src_paths]
weight_paths_found = []
weight_paths_dst = []
for path in weight_paths:
found = os.path.exists(path)
if not found:
# in case the path is local and doesn't exist
storage_class = storage.lookup_storage_class(path)
if storage_class:
found = storage_class().exists(path)
if found:
weight_paths_found.append(path)
file_name = os.path.split(path)[-1]
weight_paths_dst.append(
posixpath.join(dst_base_dir, file_name))
return weight_paths_found, weight_paths_dst
def generate_context_files(config, config_file_name, num_machines):
"""generate context files
:param config: config entries
:param config_file_name: config file name
:param num_machines: number of machines
"""
# Using ordered dict to have consistent behaviour around order in which
# files are copied in the worker nodes.
output_map = collections.OrderedDict()
copy_files_before = collections.OrderedDict()
copy_files_after = collections.OrderedDict()
copy_patitioned_files = collections.OrderedDict()
# config will be modified inplace in this function so taking a copy
config = config.copy() # shallow copy is good enough
_, tree_learner = utils.get_config_value(config, TREE_LEARNER_FIELDS)
parition_data = tree_learner and tree_learner.lower() in DATA_PARALLEL_MODES
remote_files = [(copy_files_before,
[TEST_DATA_FIELDS, INPUT_MODEL_FIELDS]),
(copy_files_after,
[OUTPUT_MODEL_FIELDS, OUTPUT_RESULT_FIELDS])]
if parition_data:
train_data_field, train_data_value = utils.get_config_value(
config, TRAIN_DATA_FIELDS)
train_files = train_data_value.split(",")
if len(train_files) != num_machines:
raise RuntimeError("#Training files listed in the {}={} field in the config should be "
"equal to the num_machines={} config value."\
.format(train_data_field, train_data_value, num_machines))
weight_src_paths, weight_dst_paths = _add_train_weight_file(config,
constants.DEFAULT_DEST_PREFIX)
dst = posixpath.join(constants.DEFAULT_DEST_PREFIX, "train_data")
config[train_data_field] = dst
for i, f in enumerate(train_files):
copy_patitioned_files[i] = collections.OrderedDict()
copy_patitioned_files[i][f] = dst
if f+WEIGHT_FILE_EXT in weight_src_paths:
copy_patitioned_files[i][f +
WEIGHT_FILE_EXT] = dst+WEIGHT_FILE_EXT
else:
train_data_field, train_data_value = utils.get_config_value(
config, TRAIN_DATA_FIELDS)
if len(train_data_value.split(",")) > 1:
raise RuntimeError("{} has more than one file specified but tree-learner is set to {} "
"which can't handle multiple files. For distributing data across "
"multiple workers, please use one of {} as a tree-learner method. "
"For more information please refer the LightGBM parallel guide"
" https://github.com/microsoft/LightGBM/blob/master/docs/"
"Parallel-Learning-Guide.rst".format(
train_data_field, tree_learner, DATA_PARALLEL_MODES))
remote_files[0][1].insert(0, TRAIN_DATA_FIELDS)
weight_src_paths, weight_dst_paths = _add_train_weight_file(config,
constants.DEFAULT_DEST_PREFIX)
_update_maps(output_map, copy_files_before, weight_src_paths, weight_dst_paths)
for copy_files, field_names_list in remote_files:
for field_names in field_names_list:
src_paths, dst_paths = _modify_paths_in_config(
config, field_names, constants.DEFAULT_DEST_PREFIX)
_update_maps(output_map, copy_files, src_paths, dst_paths)
if len(output_map) + len(copy_files_before) + len(copy_patitioned_files) == 0:
raise RuntimeError("Both train and test data is missing in the config")
modified_config_file_name = utils.save_properties_config_file(config)
config_in_docker = posixpath.join(
constants.DEFAULT_DEST_PREFIX, CONFIG_FILE_NAME)
output_map[modified_config_file_name] = config_in_docker
output_map[config_file_name] = config_in_docker + ".original"
init_cmds = None
if num_machines > 1:
init_file = lightgbm_dist_training_init.__file__
init_file_name = os.path.split(init_file)[1]
output_map[init_file] = os.path.join(
constants.DEFAULT_DEST_PREFIX, init_file_name)
init_cmds = ["RANK=`python {} {} {}`".format(init_file_name,
CONFIG_FILE_NAME,
MLIST_FILE_NAME)]
entrypoint_file_name = _generate_entrypoint(
copy_files_before, copy_files_after, config_in_docker, init_cmds, copy_patitioned_files)
output_map[entrypoint_file_name] = ENTRYPOINT
output_map[utils.__file__] = os.path.join(
constants.DEFAULT_DEST_PREFIX, "utils.py")
return output_map
def execute(config,
docker_registry,
base_image="gcr.io/kubeflow-fairing/lightgbm:latest",
namespace=None,
stream_log=True,
cores_per_worker=None,
memory_per_worker=None,
pod_spec_mutators=None):
"""Runs the LightGBM CLI in a single pod in user's Kubeflow cluster.
Users can configure it to be a train, predict, and other supported tasks
by using the right config.
Please refere https://github.com/microsoft/LightGBM/blob/master/docs/Parameters.rst
for more information on config options.
:param config: config entries
:param docker_registry: docker registry name
:param base_image: base image (Default value = "gcr.io/kubeflow-fairing/lightgbm:latest")
:param namespace: k8s namespace (Default value = None)
:param stream_log: should that stream log? (Default value = True)
:param cores_per_worker: number of cores per worker (Default value = None)
:param memory_per_worker: memory value per worker (Default value = None)
:param pod_spec_mutators: pod spec mutators (Default value = None)
"""
if not namespace and not fairing_utils.is_running_in_k8s():
namespace = "kubeflow"
namespace = namespace or fairing_utils.get_default_target_namespace()
config_file_name = None
if isinstance(config, str):
config_file_name = config
config = utils.load_properties_config_file(config)
elif isinstance(config, dict):
config_file_name = utils.save_properties_config_file(config)
else:
raise RuntimeError("config should be of type dict or string(filepath) "
"but got {}".format(type(dict)))
utils.scrub_fields(config, BLACKLISTED_FIELDS)
_, num_machines = utils.get_config_value(config, NUM_MACHINES_FILEDS)
num_machines = num_machines or 1
if num_machines:
try:
num_machines = int(num_machines)
except ValueError:
raise ValueError("num_machines value in config should be an int >= 1 "
"but got {}".format(config.get('num_machines')))
if num_machines < 1:
raise ValueError(
"num_machines value in config should >= 1 but got {}".format(num_machines))
if num_machines > 1:
config['machine_list_file'] = "mlist.txt"
output_map = generate_context_files(
config, config_file_name, num_machines)
preprocessor = BasePreProcessor(
command=[ENTRYPOINT], output_map=output_map)
builder = AppendBuilder(registry=docker_registry,
base_image=base_image, preprocessor=preprocessor)
builder.build()
pod_spec = builder.generate_pod_spec()
pod_spec_mutators = pod_spec_mutators or []
pod_spec_mutators.append(gcp.add_gcp_credentials_if_exists)
pod_spec_mutators.append(k8s_utils.get_resource_mutator(
cores_per_worker, memory_per_worker))
if num_machines == 1:
# non-distributed mode
deployer = Job(namespace=namespace,
pod_spec_mutators=pod_spec_mutators,
stream_log=stream_log)
else:
# distributed mode
deployer = TfJob(namespace=namespace,
pod_spec_mutators=pod_spec_mutators,
chief_count=1,
worker_count=num_machines-1,
stream_log=stream_log)
deployer.deploy(pod_spec)
return deployer
| kubeflow/fairing | kubeflow/fairing/frameworks/lightgbm.py | lightgbm.py | py | 14,637 | python | en | code | 336 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "posixpath.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "kubeflow.fairing.constants.constants.DEFAULT_DEST_PREFIX",
"line_number": 37,
"usage_type": "attribute"
... |
15191647327 | import matplotlib.pyplot as plot
from pymongo import MongoClient
import numpy as np
from sys import argv
import random
from constants import CONNECTION_STRING, DATABASE_NAME, CLUSTER_COLLECTION_NAME, GENRE_K_DICT
from q2 import get_k_g, main as q2_main, client as client2
from q3 import main as q3_main, client as client3
client = MongoClient(CONNECTION_STRING)
db = client.get_database(DATABASE_NAME)
def get_clusters(g: str) -> list:
return list(db.get_collection(CLUSTER_COLLECTION_NAME).aggregate([
{
'$match': {
'genres': g
}
}, {
'$group': {
'_id': '$cluster',
'points': {
'$push': '$kmeansNorm'
}
}
}]))
def get_random_color(palette=[]):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
shapes = ['o', 's', 'p', 'd', 'D', '*', '+']
color_index = random.randint(0, len(colors) - 1)
shape_index = random.randint(0, len(shapes) - 1)
result = f'{colors[color_index]}{shapes[shape_index]}'
while result in palette:
color_index = random.randint(0, len(colors) - 1)
shape_index = random.randint(0, len(shapes) - 1)
result = f'{colors[color_index]}{shapes[shape_index]}'
return result
def plot_points(clusters: list, g: str):
plot.title(g)
plot.xlabel('Normalized startYear')
plot.ylabel('Normalized avgRating')
plot.xticks(np.arange(0, 1.2, 0.1))
plot.yticks(np.arange(0, 1.2, 0.1))
for cluster in clusters:
cluster_colors = []
cluster_color = get_random_color(cluster_colors)
cluster_colors.append(cluster_color)
for point in cluster['points']:
plot.plot(point[0], point[1], cluster_color, markersize=5)
plot.savefig(f'./img/q5/{g}.jpg', format='jpg')
plot.clf()
def main():
if len(argv) == 1:
for g in GENRE_K_DICT:
q2_main(GENRE_K_DICT[g], g)
q3_main(g)
clusters = get_clusters(g)
plot_points(clusters, g)
else:
k, g = get_k_g()
q2_main(k, g)
q3_main(g)
clusters = get_clusters(g)
plot_points(clusters, g)
client2.close()
client3.close()
if __name__ == "__main__":
main()
| GautamGadipudi/bd-assignment-8 | q5.py | q5.py | py | 2,250 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "constants.CONNECTION_STRING",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "constants.DATABASE_NAME",
"line_number": 12,
"usage_type": "argument"
},
{
"a... |
11314663313 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Group(models.Model):
title = models.CharField('название группы', max_length=200)
slug = models.SlugField('слаг', unique=True)
description = models.TextField('описание')
class Meta:
verbose_name = 'группа'
verbose_name_plural = 'группы'
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField(
'текст', help_text='Перед публикацией заполните поле.')
pub_date = models.DateTimeField(
'дата публикации', auto_now_add=True)
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='posts', verbose_name='автор')
group = models.ForeignKey(
Group, models.SET_NULL, blank=True,
null=True, related_name='posts', verbose_name='группа',
help_text='Выберите группу для публикации поста.')
image = models.ImageField(
'картинка', upload_to='posts/', blank=True, null=True,
help_text='Выберите картинку для публикации поста.')
class Meta:
verbose_name = 'пост'
verbose_name_plural = 'посты'
ordering = ['-pub_date']
def __str__(self):
return self.text[:15]
class Comment(models.Model):
post = models.ForeignKey(
Post, on_delete=models.CASCADE,
related_name='comments', verbose_name='пост')
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='comments', verbose_name='автор')
text = models.TextField(
'текст комментария', help_text='Перед публикацией заполните поле.')
created = models.DateTimeField(
'дата публикации', auto_now_add=True)
class Meta:
verbose_name = 'комментарий'
verbose_name_plural = 'комментарии'
ordering = ['-created']
def __str__(self):
return self.text[:15]
class Follow(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follower', verbose_name='подписчик')
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='following', verbose_name='автор')
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='following_unique',
),
]
| zzstop/hw05_final | posts/models.py | models.py | py | 2,692 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_... |
35846798880 | import sys
import cv2 as cv
__doc__ = """Wrapper to create new classifiers from OpenCV or other libraries.
"""
class NormalBayes(object):
"""Wraps a trained OpenCV Normal Bayes Classifier.
More info: http://docs.opencv.org/modules/ml/doc/normal_bayes_classifier.html
"""
def __init__(self):
self.model = cv.NormalBayesClassifier()
def train(self, dataset, responses):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
"""
self.model.train(dataset, responses)
def predict(self, samples):
"""Samples have to be a 2D numpy array of type np.float32.
Returns a list of prediction values.
"""
pred_results = self.model.predict(samples)
return [int(x[0]) for x in pred_results[1]]
class KNN(object):
"""Wraps a trained OpenCV k_nn classifier.
More info: http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
"""
def __init__(self):
self.model = cv.KNearest()
self.max_K = 32
def train(self, dataset, responses, params):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
Additionally, optional max_neighbors argument can be provided.
"""
if "nmax" in params:
self.max_K = params["nmax"]
else:
self.max_K = 32
self.model.train(dataset, responses, maxK=self.max_K)
def predict(self, samples, params):
"""Accepts samples for classification and K - number of neighbors to use.
Notice: K has to be <= maxK that was set while training.
Refer here: http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
for more info. Samples are 2D numpy array of type np.float32.
Returns a list of prediction values.
"""
if "nclass" in params:
K = params["nclass"]
else:
K = 7
if K > self.max_K:
print ("Bad argument: K")
return []
out = self.model.find_nearest(samples, K)
return [int(x[0]) for x in out[1]]
class RandomTrees(object):
"""Wraps a trained OpenCV RTrees classifier.
More info: http://docs.opencv.org/modules/ml/doc/random_trees.html
"""
def __init__(self):
self.model = cv.RTrees()
def train(self, dataset, responses, params):
"""Dataset and responses are assumed to be a 2D and 1D numpy matrix of type np.float32.
max_d corresponds to the max tree depth. Parameter criteria can be:
--CV_TERMCRIT_ITER Terminate learning by the max_num_of_trees_in_the_forest;
--CV_TERMCRIT_EPS Terminate learning by the forest_accuracy;
--CV_TERMCRIT_ITER + CV_TERMCRIT_EPS Use both termination criteria.
Refer here: http://docs.opencv.org/modules/ml/doc/random_trees.html
"""
if "maxdepth" in params:
max_d = params["maxdepth"]
else:
max_d = 4
if "criteria" in params:
criteria = params["criteria"]
else:
criteria=cv.TERM_CRITERIA_MAX_ITER+cv.TERM_CRITERIA_EPS
if "maxerror" in params:
max_error = params["maxerror"]
else:
max_error = 0.1
if "maxtrees" in params:
max_num_trees = params["maxtrees"]
else:
max_num_trees = 10
parameters = dict(max_depth=max_d, min_sample_count=1, use_surrogates=False,
calc_var_importance=True, max_categories=10, nactive_vars=0,
term_crit=(criteria, max_num_trees, max_error)) # not sure if max_error belongs here :D
self.model.train(dataset, cv.CV_ROW_SAMPLE, responses, params=parameters)
# print ("Num of trees: " + str(self.model.getVarImportance()))
def predict(self, samples):
"""Returns a list of prediction values for all samples.
Assuming samples are 2D numpy array of type np.float32.
"""
return [int(self.model.predict(s)) for s in samples]
| mmikulic/ProjektRasUzo | src/classifier.py | classifier.py | py | 4,064 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.NormalBayesClassifier",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.KNearest",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.RTrees",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_M... |
21354510025 | #
# @lc app=leetcode.cn id=337 lang=python3
#
# [337] 打家劫舍 III
#
from util import TreeNode
# @lc code=start
from functools import lru_cache
class Solution:
def rob(self, root: TreeNode) -> int:
nums = []
@lru_cache(None)
def dfs(node: TreeNode, can: bool) -> int:
if node is None:
return 0
node_sum = 0
if can:
t_sum = node.val
t_sum += dfs(node.left, False)
t_sum += dfs(node.right, False)
node_sum = t_sum
t_sum = 0
t_sum += dfs(node.left, True)
t_sum += dfs(node.right, True)
node_sum = max(node_sum, t_sum)
return node_sum
return dfs(root, True)
# @lc code=end | Alex-Beng/ojs | FuckLeetcode/337.打家劫舍-iii.py | 337.打家劫舍-iii.py | py | 811 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "util.TreeNode",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "util.TreeNode",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "functools.lru_cache",
"line_number": 12,
"usage_type": "call"
}
] |
43702400504 | from django.conf.urls import include, url
from . import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^yolog/$', views.yolo_index, name='yolo_index'),
url(r'^result/$', views.result, name='result'),
url(r'^list/$', views.RestaurantListView.as_view(), name="rlistview"),
url(r'^restaurants/$', views.RestaurantAllListView.as_view(), name="rallview"),
url(r'^restaurant/(?P<venue_id>[\w-]+)/$', views.restaurantwithid,name='rwithid'),
url(r'^restaurants/map/$', views.RestaurantAllMapListView.as_view(), name="rlistmapview"),
url(r'^api/v1/$',views.RestaurantList.as_view()),
url(r'^api/v1/pizzalist/$',views.PizzaList.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| hassanabidpk/searchrestaurant | django/searchrestaurant/search/urls.py | urls.py | py | 781 | python | en | code | 129 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
36060788445 | from utils.parse_json import parse_json
from utils.save_json import save_json
import logging
def put_command(sala: str, nivel: int, chave: str):
data = parse_json('src/json/comandos.json')
data[sala][0]['outputs'][nivel]['status'] = chave
save_json('src/json/comandos.json', data)
def get_command(sala: str, nivel: int):
data = parse_json('src/json/comandos.json')
return data[sala][0]['outputs'][nivel]['status']
def swap_command(escolha_input: int, sala: str):
if (escolha_input == 1):
if (get_command(sala, 0) == "ON"):
put_command(sala, 0, 'OFF')
logging.info('Lamapada 01 Desligada')
else:
put_command(sala, 0, 'ON')
logging.info('Lamapada 01 Ligada')
if (escolha_input == 2):
if (get_command(sala, 1) == 'ON'):
put_command(sala, 1, 'OFF')
logging.info('Lamapada 02 Desligada')
else:
put_command(sala, 1, 'ON')
logging.info('Lamapada 02 Ligada')
if (escolha_input == 3):
if (get_command(sala, 2) == 'ON'):
put_command(sala, 2, 'OFF')
logging.info('Projetor Desligado')
else:
put_command(sala, 2, 'ON')
logging.info('Projetor Ligada')
if (escolha_input == 4):
if (get_command(sala, 3) == 'ON'):
put_command(sala, 3, 'OFF')
logging.info('Ar-condicionado Desligado')
else:
put_command(sala, 3, 'ON')
logging.info('Ar-condicionado Ligado')
if (escolha_input == 5):
put_command(sala, 0, 'OFF')
logging.info('Lamapada 01 Desligada')
put_command(sala, 1, 'OFF')
logging.info('Lamapada 02 Desligada')
put_command(sala, 2, 'OFF')
logging.info('Projetor Desligado')
put_command(sala, 3, 'OFF')
logging.info('Ar-condicionado Desligado') | AntonioAldisio/FSE-2022-2-Trabalho-1 | src/utils/troca_comando.py | troca_comando.py | py | 1,900 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.parse_json.parse_json",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.save_json.save_json",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.parse_json.parse_json",
"line_number": 13,
"usage_type": "call"
},
{
"a... |
30011949474 | from flask import Blueprint, request, abort
from epmanage.lib.auth import AuthController, AuthException
auth_component = Blueprint('auth_component', __name__)
@auth_component.route('/', methods=['POST'])
def auth_do():
"""Perform authentication"""
try:
return AuthController.get_token_agent(request.json)
except AuthException:
abort(503)
except:
abort(503)
@auth_component.route('/enroll', methods=['POST'])
def enroll_do():
"""Perform enrollment"""
try:
return AuthController.enroll_agent(request.json)
except AuthException:
abort(503)
except:
abort(503)
| PokeSec/EPManage | epmanage/auth/auth.py | auth.py | py | 642 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "epmanage.lib.auth.AuthController.get_token_agent",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "epmanage.lib.auth.AuthController",
"line_number": 12,
"usage_type": "name... |
9928969059 | import numpy as np
import matplotlib.pyplot as plt
import cPickle
def plot_statistics(statistics, legends, title="", ylabel="", xlim=None, ylim=None, writeto="default.jpeg"):
plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')
plt.xlabel("Number of epochs")
plt.ylabel(ylabel)
plt.title(title)
for stat in statistics:
plt.plot(stat, linestyle="solid", marker=".")
plt.grid()
plt.legend(legends, loc='upper right')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.savefig("./" + writeto)
def extract_records(path):
channels = cPickle.load(open(path, "rb"))
return channels
def compare_records(ls_files, writeto, xlim, ylim, dataset, measure):
"""
ls_files is a list of (path, description)
dataset can be a list, measure can't be a list
"""
measure_lgd = {"loss": "Negative Log Likelihodd", "err": "Error rate"}
writeto = writeto + "_" + measure + ".jpeg"
if not isinstance(dataset, list):
dataset = [dataset]
records = []
legends = []
for (path, descript) in ls_files:
for ds in dataset:
channels = extract_records(path)[ds][measure]
records.append(channels)
legends.append(descript + " (" + measure + "_" + ds + ")")
plot_statistics(records, legends=legends,
ylabel=measure_lgd[measure], xlim=xlim, ylim=ylim, writeto=writeto)
if __name__ == '__main__':
name = "multi_view/comp_pretrain_valid"
# ls_files = [
# # ("./results/lasagne/mod_7_1/", ""),
# # ("./results/lasagne/mod_7_smaller1/", "smaller"),
# # ("./results/lasagne/mod_7_bigger1/", "bigger"),
# ("./results/lasagne/mod_7_smaller21/", "smaller with 3x3"),
# # ("./results/lasagne/mod_7_smaller31/", "3x3 and less neurons"),
# ("./results/lasagne/mod_7_smaller2_nomaxpool1/", "no maxpool at the end"),
# ("./results/lasagne/mod_7_smaller2_nomaxpool_3every1/", "only 3x3"),
# ("./results/lasagne/mod_7_top1/", "only 3x3 top")]
ls_files = [
("./multi_view/c_1view.pkl", "1 view"),
("./multi_view/c_5views_mean.pkl", "5 views mean"),
# ("./multi_view/c_5views_dropout_branches.pkl", "5 views mean "
# "dropout "
# "branches"),
# ("./multi_view/c_5views_max.pkl", "5 views max"),
# ("./multi_view/c_5views_l2.pkl", "5 views l2"),
("./multi_view/c_5views_pretrained.pkl", "5 views mean "
"pretrained")
]
compare_records(ls_files, name, xlim=(0,200),
ylim=(0.06,0.15),
dataset=["valid"],
measure="err",)
| adbrebs/dogs_vs_cats | results/utilities.py | utilities.py | py | 3,026 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matp... |
8772037717 | import requests,re,threading,os, sys,random,copy,random,json,httpx,hashlib
from loguru import logger
from wmi import WMI
from urllib.request import urlopen
from time import sleep
from colorama import init, Fore, Style
from urllib.parse import urlencode
from typing import Union, List
__version__ = "2-5"
HWID = WMI().Win32_ComputerSystemProduct()[0].UUID
CLIENTS = {
"MWEB": {
'context': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211109.01.00'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"ANDROID": {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"ANDROID_EMBED": {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20',
'clientScreen': 'EMBED'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"TV_EMBED": {
"context": {
"client": {
"clientName": "TVHTML5_SIMPLY_EMBEDDED_PLAYER",
"clientVersion": "2.0"
},
"thirdParty": {
"embedUrl": "https://www.youtube.com/",
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
}
}
requestPayload = {
"context": {
"client": {
"clientName": "WEB",
"clientVersion": "2.20210224.06.00",
"newVisitorCookie": True,
},
"user": {
"lockedSafetyMode": False,
}
}
}
userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
searchKey = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
videoElementKey = 'videoRenderer'
channelElementKey = 'channelRenderer'
playlistElementKey = 'playlistRenderer'
shelfElementKey = 'shelfRenderer'
itemSectionKey = 'itemSectionRenderer'
continuationItemKey = 'continuationItemRenderer'
richItemKey = 'richItemRenderer'
hashtagVideosPath = ['contents', 'twoColumnBrowseResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'richGridRenderer', 'contents']
hashtagContinuationVideosPath = ['onResponseReceivedActions', 0, 'appendContinuationItemsAction', 'continuationItems']
contentPath = ['contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents']
fallbackContentPath = ['contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'richGridRenderer', 'contents']
continuationContentPath = ['onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems']
continuationKeyPath = ['continuationItemRenderer', 'continuationEndpoint', 'continuationCommand', 'token']
def getValue(source: dict, path: List[str]) -> Union[str, int, dict, None]:
value = source
for key in path:
if type(key) is str:
if key in value.keys():
value = value[key]
else:
value = None
break
elif type(key) is int:
if len(value) != 0:
value = value[key]
else:
value = None
break
return value
def getVideoId(videoLink: str) -> str:
if 'youtu.be' in videoLink:
if videoLink[-1] == '/':
return videoLink.split('/')[-2]
return videoLink.split('/')[-1]
elif 'youtube.com' in videoLink:
if '&' not in videoLink:
return videoLink[videoLink.index('v=') + 2:]
return videoLink[videoLink.index('v=') + 2: videoLink.index('&')]
else:
return videoLink
class ComponentHandler:
def _getVideoComponent(self, element: dict, shelfTitle: str = None) -> dict:
video = element[videoElementKey]
component = {
'type': 'video',
'id': self._getValue(video, ['videoId']),
'title': self._getValue(video, ['title', 'runs', 0, 'text']),
'publishedTime': self._getValue(video, ['publishedTimeText', 'simpleText']),
'duration': self._getValue(video, ['lengthText', 'simpleText']),
'viewCount': {
'text': self._getValue(video, ['viewCountText', 'simpleText']),
'short': self._getValue(video, ['shortViewCountText', 'simpleText']),
},
'thumbnails': self._getValue(video, ['thumbnail', 'thumbnails']),
'richThumbnail': self._getValue(video, ['richThumbnail', 'movingThumbnailRenderer', 'movingThumbnailDetails', 'thumbnails', 0]),
'descriptionSnippet': self._getValue(video, ['detailedMetadataSnippets', 0, 'snippetText', 'runs']),
'channel': {
'name': self._getValue(video, ['ownerText', 'runs', 0, 'text']),
'id': self._getValue(video, ['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']),
'thumbnails': self._getValue(video, ['channelThumbnailSupportedRenderers', 'channelThumbnailWithLinkRenderer', 'thumbnail', 'thumbnails']),
},
'accessibility': {
'title': self._getValue(video, ['title', 'accessibility', 'accessibilityData', 'label']),
'duration': self._getValue(video, ['lengthText', 'accessibility', 'accessibilityData', 'label']),
},
}
component['link'] = 'https://www.youtube.com/watch?v=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
component['shelfTitle'] = shelfTitle
return component
def _getChannelComponent(self, element: dict) -> dict:
channel = element[channelElementKey]
component = {
'type': 'channel',
'id': self._getValue(channel, ['channelId']),
'title': self._getValue(channel, ['title', 'simpleText']),
'thumbnails': self._getValue(channel, ['thumbnail', 'thumbnails']),
'videoCount': self._getValue(channel, ['videoCountText', 'runs', 0, 'text']),
'descriptionSnippet': self._getValue(channel, ['descriptionSnippet', 'runs']),
'subscribers': self._getValue(channel, ['subscriberCountText', 'simpleText']),
}
component['link'] = 'https://www.youtube.com/channel/' + component['id']
return component
def _getVideoFromChannelSearch(self, elements: list) -> list:
channelsearch = []
for element in elements:
element = self._getValue(element, ["childVideoRenderer"])
json = {
"id": self._getValue(element, ["videoId"]),
"title": self._getValue(element, ["title", "simpleText"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"duration": {
"simpleText": self._getValue(element, ["lengthText", "simpleText"]),
"text": self._getValue(element, ["lengthText", "accessibility", "accessibilityData", "label"])
}
}
channelsearch.append(json)
return channelsearch
def _getChannelSearchComponent(self, elements: list) -> list:
channelsearch = []
for element in elements:
responsetype = None
if 'gridPlaylistRenderer' in element:
element = element['gridPlaylistRenderer']
responsetype = 'gridplaylist'
elif 'itemSectionRenderer' in element:
first_content = element["itemSectionRenderer"]["contents"][0]
if 'videoRenderer' in first_content:
element = first_content['videoRenderer']
responsetype = "video"
elif 'playlistRenderer' in first_content:
element = first_content["playlistRenderer"]
responsetype = "playlist"
else:
raise Exception(f'Unexpected first_content {first_content}')
elif 'continuationItemRenderer' in element:
# for endless scrolling, not needed here
# TODO: Implement endless scrolling
continue
else:
raise Exception(f'Unexpected element {element}')
if responsetype == "video":
json = {
"id": self._getValue(element, ["videoId"]),
"thumbnails": {
"normal": self._getValue(element, ["thumbnail", "thumbnails"]),
"rich": self._getValue(element, ["richThumbnail", "movingThumbnailRenderer", "movingThumbnailDetails", "thumbnails"])
},
"title": self._getValue(element, ["title", "runs", 0, "text"]),
"descriptionSnippet": self._getValue(element, ["descriptionSnippet", "runs", 0, "text"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"views": {
"precise": self._getValue(element, ["viewCountText", "simpleText"]),
"simple": self._getValue(element, ["shortViewCountText", "simpleText"]),
"approximate": self._getValue(element, ["shortViewCountText", "accessibility", "accessibilityData", "label"])
},
"duration": {
"simpleText": self._getValue(element, ["lengthText", "simpleText"]),
"text": self._getValue(element, ["lengthText", "accessibility", "accessibilityData", "label"])
},
"published": self._getValue(element, ["publishedTimeText", "simpleText"]),
"channel": {
"name": self._getValue(element, ["ownerText", "runs", 0, "text"]),
"thumbnails": self._getValue(element, ["channelThumbnailSupportedRenderers", "channelThumbnailWithLinkRenderer", "thumbnail", "thumbnails"])
},
"type": responsetype
}
elif responsetype == 'playlist':
json = {
"id": self._getValue(element, ["playlistId"]),
"videos": self._getVideoFromChannelSearch(self._getValue(element, ["videos"])),
"thumbnails": {
"normal": self._getValue(element, ["thumbnails"]),
},
"title": self._getValue(element, ["title", "simpleText"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"channel": {
"name": self._getValue(element, ["longBylineText", "runs", 0, "text"]),
},
"type": responsetype
}
else:
json = {
"id": self._getValue(element, ["playlistId"]),
"thumbnails": {
"normal": self._getValue(element, ["thumbnail", "thumbnails", 0]),
},
"title": self._getValue(element, ["title", "runs", 0, "text"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"type": 'playlist'
}
channelsearch.append(json)
return channelsearch
def _getShelfComponent(self, element: dict) -> dict:
shelf = element[shelfElementKey]
return {
'title': self._getValue(shelf, ['title', 'simpleText']),
'elements': self._getValue(shelf, ['content', 'verticalListRenderer', 'items']),
}
def _getValue(self, source: dict, path: List[str]) -> Union[str, int, dict, None]:
value = source
for key in path:
if type(key) is str:
if key in value.keys():
value = value[key]
else:
value = None
break
elif type(key) is int:
if len(value) != 0:
value = value[key]
else:
value = None
break
return value
class RequestHandler(ComponentHandler):
def _makeRequest(self) -> None:
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
if self.searchPreferences:
requestBody['params'] = self.searchPreferences
if self.continuationKey:
requestBody['continuation'] = self.continuationKey
requestBodyBytes = json.dumps(requestBody).encode('utf_8')
request = Request(
'https://www.youtube.com/youtubei/v1/search' + '?' + urlencode({
'key': searchKey,
}),
data = requestBodyBytes,
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Content-Length': len(requestBodyBytes),
'User-Agent': userAgent,
}
)
try:
self.response = urlopen(request, timeout=self.timeout).read().decode('utf_8')
except (Exception,):
return self._makeRequest()
def _parseSource(self) -> None:
try:
if not self.continuationKey:
responseContent = self._getValue(json.loads(self.response), contentPath)
else:
responseContent = self._getValue(json.loads(self.response), continuationContentPath)
if responseContent:
for element in responseContent:
if itemSectionKey in element.keys():
self.responseSource = self._getValue(element, [itemSectionKey, 'contents'])
if continuationItemKey in element.keys():
self.continuationKey = self._getValue(element, continuationKeyPath)
else:
self.responseSource = self._getValue(json.loads(self.response), fallbackContentPath)
self.continuationKey = self._getValue(self.responseSource[-1], continuationKeyPath)
except:
raise Exception('ERROR: Could not parse YouTube response.')
class RequestCore:
def __init__(self):
self.url = None
self.data = None
self.timeout = 2
self.proxy = []
proxy = open("proxy.txt", "r").read().splitlines()
for p in proxy:
p_split = p.split(':')
if len(p_split) == 2:#ip:port
self.proxy.append({"http://": "http://"+p})
elif len(p_split) == 4:#ip:port:login:password
self.proxy.append({"http://": f"http://{p_split[2]}:{p_split[3]}@{p_split[0]}:{p_split[1]}"})
elif '@' in p:#login:password@ip:port
self.proxy.append({"http://": "http://"+p})
def syncPostRequest(self) -> httpx.Response:
try:
r = httpx.post(
self.url,
headers={"User-Agent": userAgent},
json=self.data,
timeout=self.timeout,
proxies=random.choice(self.proxy)
)
if r.status_code == 200:
return r
else:
return self.syncPostRequest()
except (Exception,):
return self.syncPostRequest()
async def asyncPostRequest(self) -> httpx.Response:
try:
async with httpx.AsyncClient(proxies=random.choice(self.proxy)) as client:
r = await client.post(self.url, headers={"User-Agent": userAgent}, json=self.data, timeout=self.timeout)
if r.status_code == 200:
return r
else:
return self.asyncPostRequest()
except (Exception,):
return await self.asyncPostRequest()
def syncGetRequest(self) -> httpx.Response:
try:
r = httpx.get(self.url, headers={"User-Agent": userAgent}, timeout=self.timeout,
cookies={'CONSENT': 'YES+1'}, proxies=random.choice(self.proxy))
if r.status_code == 200:
return r
else:
return self.syncGetRequest()
except (Exception,):
return self.syncGetRequest()
async def asyncGetRequest(self) -> httpx.Response:
try:
async with httpx.AsyncClient(proxies=random.choice(self.proxy)) as client:
r = await client.get(self.url, headers={"User-Agent": userAgent}, timeout=self.timeout,
cookies={'CONSENT': 'YES+1'})
if r.status_code == 200:
return r
else:
return await self.asyncGetRequest()
except (Exception,):
return await self.asyncGetRequest()
class VideoCore(RequestCore):
def __init__(self, videoLink: str, componentMode: str, resultMode: int, timeout: int, enableHTML: bool, overridedClient: str = "ANDROID"):
super().__init__()
self.timeout = timeout
self.resultMode = resultMode
self.componentMode = componentMode
self.videoLink = videoLink
self.enableHTML = enableHTML
self.overridedClient = overridedClient
# We call this when we use only HTML
def post_request_only_html_processing(self):
self.__getVideoComponent(self.componentMode)
self.result = self.__videoComponent
def post_request_processing(self):
self.__parseSource()
self.__getVideoComponent(self.componentMode)
self.result = self.__videoComponent
def prepare_innertube_request(self):
self.url = 'https://www.youtube.com/youtubei/v1/player' + "?" + urlencode({
'key': searchKey,
'contentCheckOk': True,
'racyCheckOk': True,
"videoId": getVideoId(self.videoLink)
})
self.data = copy.deepcopy(CLIENTS[self.overridedClient])
async def async_create(self):
self.prepare_innertube_request()
response = await self.asyncPostRequest()
self.response = response.text
if response.status_code == 200:
self.post_request_processing()
else:
raise Exception('ERROR: Invalid status code.')
def sync_create(self):
self.prepare_innertube_request()
response = self.syncPostRequest()
self.response = response.text
if response.status_code == 200:
self.post_request_processing()
else:
raise Exception('ERROR: Invalid status code.')
def prepare_html_request(self):
self.url = 'https://www.youtube.com/youtubei/v1/player' + "?" + urlencode({
'key': searchKey,
'contentCheckOk': True,
'racyCheckOk': True,
"videoId": getVideoId(self.videoLink)
})
self.data = CLIENTS["MWEB"]
def sync_html_create(self):
self.prepare_html_request()
response = self.syncPostRequest()
self.HTMLresponseSource = response.json()
async def async_html_create(self):
self.prepare_html_request()
response = await self.asyncPostRequest()
self.HTMLresponseSource = response.json()
def __parseSource(self) -> None:
try:
self.responseSource = json.loads(self.response)
except Exception as e:
raise Exception('ERROR: Could not parse YouTube response.')
def __result(self, mode: int) -> Union[dict, str]:
if mode == ResultMode.dict:
return self.__videoComponent
elif mode == ResultMode.json:
return json.dumps(self.__videoComponent, indent=4)
def __getVideoComponent(self, mode: str) -> None:
videoComponent = {}
if mode in ['getInfo', None]:
try:
responseSource = self.responseSource
except:
responseSource = None
if self.enableHTML:
responseSource = self.HTMLresponseSource
component = {
'id': getValue(responseSource, ['videoDetails', 'videoId']),
'title': getValue(responseSource, ['videoDetails', 'title']),
'duration': {
'secondsText': getValue(responseSource, ['videoDetails', 'lengthSeconds']),
},
'viewCount': {
'text': getValue(responseSource, ['videoDetails', 'viewCount'])
},
'thumbnails': getValue(responseSource, ['videoDetails', 'thumbnail', 'thumbnails']),
'description': getValue(responseSource, ['videoDetails', 'shortDescription']),
'channel': {
'name': getValue(responseSource, ['videoDetails', 'author']),
'id': getValue(responseSource, ['videoDetails', 'channelId']),
},
'allowRatings': getValue(responseSource, ['videoDetails', 'allowRatings']),
'averageRating': getValue(responseSource, ['videoDetails', 'averageRating']),
'keywords': getValue(responseSource, ['videoDetails', 'keywords']),
'isLiveContent': getValue(responseSource, ['videoDetails', 'isLiveContent']),
'publishDate': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'publishDate']),
'uploadDate': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'uploadDate']),
'isFamilySafe': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'isFamilySafe']),
'category': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'category']),
}
component['isLiveNow'] = component['isLiveContent'] and component['duration']['secondsText'] == "0"
component['link'] = 'https://www.youtube.com/watch?v=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
videoComponent.update(component)
if mode in ['getFormats', None]:
videoComponent.update(
{
"streamingData": getValue(self.responseSource, ["streamingData"])
}
)
if self.enableHTML:
videoComponent["publishDate"] = getValue(self.HTMLresponseSource, ['microformat', 'playerMicroformatRenderer', 'publishDate'])
videoComponent["uploadDate"] = getValue(self.HTMLresponseSource, ['microformat', 'playerMicroformatRenderer', 'uploadDate'])
self.__videoComponent = videoComponent
class ResultMode:
json = 0
dict = 1
class SearchMode:
videos = 'EgIQAQ%3D%3D'
channels = 'EgIQAg%3D%3D'
playlists = 'EgIQAw%3D%3D'
livestreams = 'EgJAAQ%3D%3D'
class Video:
@staticmethod
def get(videoLink: str, mode: int = ResultMode.dict, timeout: int = None, get_upload_date: bool = False) -> Union[
dict, str, None]:
vc = VideoCore(videoLink, None, mode, timeout, get_upload_date)
if get_upload_date:
vc.sync_html_create()
vc.sync_create()
return vc.result
class ChannelSearchCore(RequestCore, ComponentHandler):
response = None
responseSource = None
resultComponents = []
def __init__(self, query: str, language: str, region: str, searchPreferences: str, browseId: str, timeout: int):
super().__init__()
self.query = query
self.language = language
self.region = region
self.browseId = browseId
self.searchPreferences = searchPreferences
self.continuationKey = None
self.timeout = timeout
def sync_create(self):
self._syncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
async def next(self):
await self._asyncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
return self.response
def _parseChannelSearchSource(self) -> None:
try:
last_tab = self.response["contents"]["twoColumnBrowseResultsRenderer"]["tabs"][-1]
if 'expandableTabRenderer' in last_tab:
self.response = last_tab["expandableTabRenderer"]["content"]["sectionListRenderer"]["contents"]
else:
tab_renderer = last_tab["tabRenderer"]
if 'content' in tab_renderer:
self.response = tab_renderer["content"]["sectionListRenderer"]["contents"]
else:
self.response = []
except:
raise Exception('ERROR: Could not parse YouTube response.')
def _getRequestBody(self):
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
requestBody['params'] = self.searchPreferences
requestBody['browseId'] = self.browseId
self.url = 'https://www.youtube.com/youtubei/v1/browse' + '?' + urlencode({
'key': searchKey,
})
self.data = requestBody
def _syncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = self.syncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
async def _asyncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = await self.asyncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
def result(self, mode: int = ResultMode.dict) -> Union[str, dict]:
'''Returns the search result.
Args:
mode (int, optional): Sets the type of result. Defaults to ResultMode.dict.
Returns:
Union[str, dict]: Returns JSON or dictionary.
'''
if mode == ResultMode.json:
return json.dumps({'result': self.response}, indent=4)
elif mode == ResultMode.dict:
return {'result': self.response}
class SearchCore(RequestCore, RequestHandler, ComponentHandler):
response = None
responseSource = None
resultComponents = []
def __init__(self, query: str, limit: int, language: str, region: str, searchPreferences: str, timeout: int):
super().__init__()
self.query = query
self.limit = limit
self.language = language
self.region = region
self.searchPreferences = searchPreferences
self.timeout = timeout
self.continuationKey = None
def sync_create(self):
self._makeRequest()
self._parseSource()
def _getRequestBody(self):
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
if self.searchPreferences:
requestBody['params'] = self.searchPreferences
if self.continuationKey:
requestBody['continuation'] = self.continuationKey
self.url = 'https://www.youtube.com/youtubei/v1/search' + '?' + urlencode({
'key': searchKey,
})
self.data = requestBody
def _makeRequest(self) -> None:
self._getRequestBody()
request = self.syncPostRequest()
try:
self.response = request.text
except:
raise Exception('ERROR: Could not make request.')
async def _makeAsyncRequest(self) -> None:
self._getRequestBody()
request = await self.asyncPostRequest()
try:
self.response = request.text
except:
raise Exception('ERROR: Could not make request.')
def result(self, mode: int = ResultMode.dict) -> Union[str, dict]:
if mode == ResultMode.json:
return json.dumps({'result': self.resultComponents}, indent=4)
elif mode == ResultMode.dict:
return {'result': self.resultComponents}
def _next(self) -> bool:
if self.continuationKey:
self.response = None
self.responseSource = None
self.resultComponents = []
self._makeRequest()
self._parseSource()
self._getComponents(*self.searchMode)
return True
else:
return False
async def _nextAsync(self) -> dict:
self.response = None
self.responseSource = None
self.resultComponents = []
await self._makeAsyncRequest()
self._parseSource()
self._getComponents(*self.searchMode)
return {
'result': self.resultComponents,
}
def _getComponents(self, findVideos: bool, findChannels: bool, findPlaylists: bool) -> None:
self.resultComponents = []
for element in self.responseSource:
if videoElementKey in element.keys() and findVideos:
self.resultComponents.append(self._getVideoComponent(element))
if channelElementKey in element.keys() and findChannels:
self.resultComponents.append(self._getChannelComponent(element))
if shelfElementKey in element.keys() and findVideos:
for shelfElement in self._getShelfComponent(element)['elements']:
self.resultComponents.append(
self._getVideoComponent(shelfElement, shelfTitle=self._getShelfComponent(element)['title']))
if richItemKey in element.keys() and findVideos:
richItemElement = self._getValue(element, [richItemKey, 'content'])
if videoElementKey in richItemElement.keys():
videoComponent = self._getVideoComponent(richItemElement)
self.resultComponents.append(videoComponent)
if len(self.resultComponents) >= self.limit:
break
class Search(SearchCore):
def __init__(self, query: str, limit: int = 20, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, True, True)
super().__init__(query, limit, language, region, None, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self) -> bool:
return self._next()
class VideosSearch(SearchCore):
def __init__(self, query: str, limit: int, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, False, False)
super().__init__(query, limit, language, region, SearchMode.videos, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self) -> bool:
return self._next()
class ChannelSearch(ChannelSearchCore):
def __init__(self, query: str, browseId: str, language: str = 'en', region: str = 'US', searchPreferences: str = "EgZzZWFyY2g%3D", timeout: int = None):
super().__init__(query, language, region, searchPreferences, browseId, timeout)
self.sync_create()
class CustomSearch(SearchCore):
def __init__(self, query: str, searchPreferences: str, limit: int = 20, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, True, True)
super().__init__(query, limit, language, region, searchPreferences, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self):
self._next()
init()
logger.remove() # Удаляем стандартный обработчик
logger.add(sink=sys.stdout,
format='<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <level>{message}</level>')
re_year = '"publishedAt": "(.*)",'
re_onlyyear= r'^(\d+)-'
re_email = '(?:[A-Za-z0-9!#$%&\'*+\\/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&\'*+\\/=?^_`{|}~-]+)*|\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])'
class Engine():
def __init__(self):
check = requests.post('https://api.ytmailer.pro/index.php',data={'hwid':HWID})
if check.status_code == 200:
check = json.loads(check.text)
if check['status'] == True:
hwid_new = check['hwid']
salt = 'ytsoft139392924992491dds'
if (hashlib.md5((HWID+salt).encode())).hexdigest() != hwid_new:
sys.exit()
else:
check_v = requests.post('https://api.ytmailer.pro/index.php',data={'hwid':HWID,'version':__version__}).json()
if check_v['status']:
logger.success(f'Найдена новая версия.. Обновляемся ({check_v["version"]})')
with open(f'YTparser-{check_v["version"]}.exe','wb') as file:
file.write(requests.get(check_v['url']).content)
os.system(f'YTparser-{check_v["version"]}.exe')
sys.exit()
else:
logger.info(f'Ваш HWID: {HWID}')
logger.error('У вас нет подписки! Отправьте ваш HWID продавцу')
input()
sys.exit()
else:
logger.error('Сервер на тех. Работах. Нажмите любую кнопку!')
input()
sys.exit()
self.apis = self.read_file("API.txt")
self.keys = self.read_file("keywords.txt")
self.blackwords = self.read_file("blackwords.txt")
self.proxys = self.read_file('proxy.txt')
self.emails = 0
os.system('title "@wxkssy | Tg bot: @qualityshop24_bot"')
num_threads = int(input(Fore.GREEN + '> Enter number of threads: ' + Style.RESET_ALL))
self.videocount = 0
while (True):
self.year = input(Fore.GREEN + '> Enter max channel reg-year: ' + Style.RESET_ALL)
if self.year.isdigit():
self.year = int(self.year)
if (self.year > 2000):
break
while (True):
self.views = input(Fore.GREEN + '> Enter min channel views: ' + Style.RESET_ALL)
if self.views.isdigit():
self.views = int(self.views)
break
while True:
self.subs = input(Fore.GREEN + '> Enter min & max subs: ' + Style.RESET_ALL)
if not '-' in self.subs:
self.subs = input(Fore.GREEN + '> Enter min & max subs: ' + Style.RESET_ALL)
else:
self.subs = [int(self.subs.split('-')[0]), int(self.subs.split('-')[1])]
if (self.subs[0] < self.subs [1]):
break
self.blacklist = input(Fore.GREEN + '> Enter blacklist (y/n): ' + Style.RESET_ALL)
if self.blacklist.lower() != 'y':
self.blackwords = ''
logger.info(f'Max Year: {self.year} | Min Views: {self.views} | Subs: {self.subs[0]}-{self.subs[1]}')
sleep(1)
threads = []
for i in range(num_threads):
t = threading.Thread(target=self.process_data)
threads.append(t)
for t in threads:
t.start()
threading.Thread(target=self.console_log).start()
for t in threads:
t.join()
logger.info('Данные закончились, завершение...')
input("Нажми ENTER, чтобы завершить")
def read_file(self,filename):
with open(filename, 'r',encoding='utf-8',errors='ignore') as f:
return f.read().split('\n')
def process_data(self):
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
while True:
try:
if self.apis == [] or self.keys == []:
break
api = random.choice(self.apis)
key = random.choice(self.keys)
search = VideosSearch(key, limit=50)
try:
self.keys.remove(str(key))
except:
pass
videoIds = search.result()
while True:
try:
for videoID in videoIds['result']:
description = ''
if videoID['descriptionSnippet'] != None:
for _ in videoID['descriptionSnippet']:
description += _['text'] + ' '
email = re.findall(re_email, description)
channelId = videoID['channel']['id']
while True:
try:
api = random.choice(self.apis)
resp = requests.get(f'https://www.googleapis.com/youtube/v3/channels?part=statistics%2Csnippet&maxResults=50&id={channelId}&key={str(api)}',proxies=proxies)
if resp.status_code == 200:
resp_rez = resp.json()["items"][0]
break
else:
try:
self.apis.remove(api)
except:
pass
if self.apis == []:
break
except:
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
if self.apis == []:
return
while True:
try:
vid = videoID['id']
except:
res3 = []
break
try:
api = random.choice(self.apis)
resp = requests.get(f"https://youtube.googleapis.com/youtube/v3/videos?part=snippet&part=contentDetails&part=statistics&id={vid}&key={api}",proxies=proxies)
if resp.status_code == 200:
res3 = re.findall(re_email, resp.text.replace(r"\n", ""))
break
else:
try:
self.apis.remove(api)
except:
pass
if self.apis == []:
break
except:
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
if self.apis == []:
return
yearid = int(resp_rez['snippet']['publishedAt'][:4])
# Количество подписчиков
try:
subscount = resp_rez["statistics"]["subscriberCount"]
except Exception:
subscount = 0
try:
viewscount = resp_rez["statistics"]["viewCount"]
except:
viewscount = 0
try:
countryId = resp_rez["snippet"]["country"]
except Exception:
countryId = 'Not'
if countryId in self.blackwords:
pass
else:
if res3 != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in res3:
self.write_mail(f"emails.txt", mail)
if email != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in email:
self.write_mail(f"emails.txt", mail)
# описание канала
try:
descriptionCN = resp_rez["snippet"]["description"]
except Exception:
descriptionCN = ''
emailDesc = re.findall(re_email, descriptionCN)
if emailDesc != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in emailDesc:
self.write_mail(f"emails.txt", mail)
self.videocount += 1
try:
search.next()
videoIds = search.result()
except:
break
nextpage = len(videoIds['result'])
if nextpage == 0:
break
except:
pass
except:
pass
def write_mail(self,filename, data):
x = self.read_file(filename)
with open(filename, 'a+',encoding='utf-8') as f:
if data not in x:
f.write(str(data) + '\n')
self.emails += 1
def console_log(self):
while True:
os.system('cls' if os.name == 'nt' else 'clear')
logger.info(f'ApiKeys: {len(self.apis)} | KeyWords: {len(self.keys)} | Emails: {self.emails} | Video_seen: {self.videocount}')
sleep(5)
Engine() | basautomaticaly/work | main2-5.py | main2-5.py | py | 46,695 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wmi.WMI",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 267... |
17953957335 | import numpy as np
from collections import Counter
def euclideanDistance(x, y):
return np.sqrt(np.sum((x-y)**2))
class KNN:
def __init__(self, k=3):
self.k = k
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X):
predictions = [self.singlePredict(x) for x in X]
return predictions
def singlePredict(self, x):
distances = [euclideanDistance(x, x_train) for x_train in self.X_train]
idxDist = np.argsort(distances)[:self.k]
nearLabels = [self.y_train[idx] for idx in idxDist]
most_common = Counter(nearLabels).most_common(1) # [9,4,4,4,5,6] returns [(4,3), (5,1) ...]
return most_common[0][0]
if __name__ == "__main__":
# Imports
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.model_selection import train_test_split
cmap = ListedColormap(["#FF0000", "#00FF00", "#0000FF"])
def accuracy(y_true, y_pred):
accuracy = np.sum(y_true == y_pred) / len(y_true)
return accuracy
iris = datasets.load_iris()
X, y = iris.data, iris.target
print(y.max())
print(X[100:105])
print(y[100:105])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=1234
)
k = 3
clf = KNN(k=k)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print("KNN classification accuracy", accuracy(y_test, predictions))
| Helyousfi/Machine-learning | KNN.py | KNN.py | py | 1,573 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_numb... |
29799264733 | # -*- coding: utf-8 -*-
# https://blog.csdn.net/Tifficial/article/details/78116862
import os
import time
import tkinter.messagebox
from tkinter import *
from tkinter.filedialog import *
from PIL import Image, ImageTk
import pygame
class create_UI():
def __init__(self):
pass
def create_button(self, app):
button_functions = [
self.picSelect, self.writePoet, self.showPoet, quit
]
button_texts = ['选\n择\n图\n片', '为\n你\n写\n诗', '查\n看', '退\n出']
column_index = 3
button_num = len(button_functions)
for index in range(button_num):
button = Button(
app,
text=button_texts[index],
font=('方正舒体', 25),
bd=0,
bg='white',
command=button_functions[index])
button.grid(row=0, column=column_index, sticky='n')
column_index += 1
def ui(self):
app = Tk()
app.title("为你写诗")
app.resizable(0, 0) #禁止调整窗口大小
image = Image.open(r'9668839.jpeg')
background_image = ImageTk.PhotoImage(image)
w = background_image.width()
h = background_image.height()
app.geometry('%dx%d+0+0' % (w, h))
background_label = Label(app, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
self.create_button(app)
app.mainloop()
def set_button_sound(self):
water_drop_pwd = r"SarahBrightman-ScarboroughFair.mp3"
pygame.mixer.init()
pygame.mixer.music.load(water_drop_pwd)
pygame.mixer.music.play()
time.sleep(200.5)
pygame.mixer.music.stop()
def picSelect(self):
self.set_button_sound()
default_dir = r"C:\Users\lenovon\Desktop" # 设置默认打开目录
fns = askopenfilename(
filetypes=[("all", "*.*"), ("text file", "*.txt")],
title=u"选择图片",
initialdir=(os.path.expanduser(default_dir)))
fns_list = list(fns)
print("fns list:", fns_list)
def writePoet(self):
self.set_button_sound()
tkinter.messagebox.showinfo('Message', '查看')
def showPoet(self):
self.set_button_sound()
tkinter.messagebox.showinfo('Message', '展示结果')
if __name__ == "__main__":
demo = create_UI()
demo.ui()
| anna160278/tkinter-examples | examples/aaa/tst.py | tst.py | py | 2,443 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
20649229622 | import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
from pyswarms.utils.plotters.plotters import plot_contour, plot_surface
from pyswarms.utils.plotters.formatters import Mesher, Designer
# Run optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}
optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=2, options=options)
# historia kosztów i pozycji
pos_history = optimizer.optimize(fx.sphere, iters=50)
# Plot the sphere function's mesh for better plots
m = Mesher(func=fx.sphere,
limits=[(-1,1), (-1,1)])
# Adjust figure limits
d = Designer(limits=[(-1,1), (-1,1), (-0.1,1)],
label=['x-axis', 'y-axis', 'z-axis'])
pos_history_3d = m.compute_history_3d(optimizer.pos_history) # preprocessing
animation3d = plot_surface(pos_history=pos_history_3d,
mesher=m, designer=d,
mark=(0, 0, 0))
animation3d.save('3d.gif', writer='imagemagick', fps=10)
| igorpustovoy/inteligencja_obliczeniowa | lab04/zad3/3.py | 3.py | py | 960 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyswarms.single.GlobalBestPSO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyswarms.single",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyswarms.utils.functions.single_obj.sphere",
"line_number": 10,
"usage_type": "attribute... |
12680443626 | import os
import warnings
import pandas as pd
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from utils.timefeatures import time_features
warnings.filterwarnings('ignore')
class MyDataset(Dataset):
def __init__(self, root_path, data_path, data, flag, seq_len, label_len, pred_len, features, target, timeenc, freq,
percent):
self.seq_len = seq_len
self.label_len = label_len
self.pred_len = pred_len
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.timeenc = timeenc
self.freq = freq
self.percent = percent
self.root_path = root_path
self.data_path = data_path
self.data = data
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
if self.data == 'ETTh1' or self.data == 'ETTh2':
border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]
border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]
elif self.data == 'ETTm1' or self.data == 'ETTm2':
border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]
border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]
elif self.data == 'custom':
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
else:
border1s = None
border2s = None
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.set_type == 0:
border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len
if self.features == 'M' or self.features == 'MS':
df_data = df_raw.iloc[:, 1:]
elif self.features == 'S':
df_data = df_raw[[self.target]]
else:
df_data = None
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = pd.DataFrame(self.scaler.transform(df_data.values)).fillna(0).values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
data_stamp = df_stamp.drop(['date'], 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
else:
data_stamp = None
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
| ForestsKing/TSF-Library | data_provider/data_loader.py | data_loader.py | py | 4,041 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 35,
"usage_type": "call"
},
{
... |
2872612166 | from flask import Flask, render_template, redirect, url_for, request
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, FloatField
from wtforms.validators import DataRequired
import requests
db = SQLAlchemy()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your_secret_key'
Bootstrap(app)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///movie-collection.db"
db.init_app(app)
movie_url = 'https://api.themoviedb.org/3/search/movie'
api_key = 'your_api_key'
parameters = {
'api_key': api_key,
'language': 'en-US'
}
class Movies(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
year = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(1500), nullable=False)
rating = db.Column(db.Float, nullable=True)
ranking = db.Column(db.Integer, nullable=True)
review = db.Column(db.String(500), nullable=True)
img_url = db.Column(db.String(1500), nullable=False)
class MovieForm(FlaskForm):
rating = FloatField('Your rating out of 10', validators=[DataRequired()])
review = StringField('Your review', validators=[DataRequired()])
class AddForm(FlaskForm):
title = StringField('Movie Title', validators=[DataRequired()])
submit = SubmitField('Submit')
with app.app_context():
db.create_all()
@app.route("/")
def home():
all_movies = db.session.query(Movies).order_by(Movies.rating).all()
for i in range(len(all_movies)):
all_movies[i].ranking = len(all_movies) - i
db.session.commit()
return render_template("index.html", all_movies=all_movies)
@app.route('/edit', methods=['GET', 'POST'])
def edit():
id = request.args.get('id')
movie = Movies.query.get(id)
form = MovieForm(movie_id=id)
if request.method == 'POST':
id = request.form.get('id')
movie = Movies.query.get(id)
movie.rating = request.form.get('rating')
movie.review = request.form.get('review')
db.session.commit()
return redirect(url_for('home'))
return render_template('edit.html', movie=movie, form=form)
@app.route('/delete')
def delete():
id = request.args.get('id')
movie_to_delete = Movies.query.get(id)
db.session.delete(movie_to_delete)
db.session.commit()
return redirect(url_for('home'))
@app.route('/add', methods=['POST', 'GET'])
def add():
form = AddForm()
if request.method == 'POST':
parameters['query'] = form.title.data
response = requests.get(url=movie_url, params=parameters).json()
data = []
for movie in response['results']:
movie_data = {
'title': movie['title'],
'id': movie['id'],
'year': movie['release_date'].split('-')[0]
}
data.append(movie_data)
return render_template('select.html', movies=data)
return render_template('add.html', form=form)
@app.route('/add_movie')
def add_movie():
url = f'https://api.themoviedb.org/3/movie/{request.args.get("id")}'
params = {
'api_key': api_key,
'language': 'en-US',
}
response = requests.get(url=url, params=params).json()
new_movie = Movies(title=response['title'],
year=int(response['release_date'].split('-')[0]),
description=response['overview'],
rating=response['vote_average'],
img_url=f'https://image.tmdb.org/t/p/w500{response["poster_path"]}')
db.session.add(new_movie)
db.session.commit()
movie = Movies.query.filter_by(title=response['title']).first()
movie_id = movie.id
return redirect(url_for('edit', id=movie_id))
if __name__ == '__main__':
app.run(debug=True)
| mgardner1011/UdemyProjects | movie_ranking_site/main.py | main.py | py | 3,878 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_bootstrap.Bootstrap",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flas... |
33480868557 | from django.shortcuts import render
from .models import Hardware, Software, Employees
from rest_framework import generics
from .serializers import HardwareSerializer, SoftwareSerializer, EmployeesSerializer
from django.db.models.query import Q
# Create your views here.
class CreateHardware(generics.CreateAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class UpdateHardware(generics.RetrieveUpdateAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class DeleteHardware(generics.RetrieveDestroyAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer
class ListHardware(generics.ListAPIView):
# queryset = Hardware.objects.all(),
serializer_class = HardwareSerializer
def get_queryset(self):
qs = Hardware.objects.all()
qs = qs.filter(~Q(pk__in = '5'))
qs = qs.exclude(name = '')
#qs = [q for q in qs if q.name != '']
#qs = qs.filter(Q('name') != '')
# query = self.request.GET.get('q')
# if query is not None:
# qs = qs.filter().distinct()
return qs
class DetailHardware(generics.RetrieveAPIView):
QuerySet = Hardware.objects.all(),
serializer_class = HardwareSerializer | vuedatavivek/productsample | crm_project/organization/views.py | views.py | py | 1,292 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Hardware.objects.all",
"line_number": 9,
"usage_type": "call"
}... |
18846943134 | import logging
from concurrent import futures
from threading import Timer
from functools import partial
import cloud.blockstore.public.sdk.python.protos as protos
from .error_codes import EResult
from .error import ClientError, _handle_errors, client_error_from_response
from .grpc_client import GrpcClient
from .http_client import HttpClient
from .durable import DurableClient
from .base_client import dispatch_nbs_client_methods
from .safe_client import _SafeClient
from .future import unit, bind
DEFAULT_HARD_TIMEOUT = 8*60 # 8 min
DEFAULT_DISCOVERY_LIMIT = 3
class _Executor(object):
def __init__(self, method, balancer, factory, limit, secure, log):
self.__response = futures.Future()
self.__method = method
self.__balancer = balancer
self.__factory = factory
self.__limit = limit
self.__secure = secure
self.__visited = None
self.__log = log
self.__hedged_timer = None
self.__pending = {}
self.__done = False
self.__instances = None
self.__instances_future = None
self.__idx = 0
self.__main_timer = None
self.__hedged_timer = None
def run(self, impl, addr, timeout, soft_timeout, create_timer):
self.__main_timer = create_timer(timeout, self._on_main_timer)
self.__main_timer.start()
if soft_timeout:
self.__hedged_timer = create_timer(soft_timeout, self._on_hedged_timer)
self.__hedged_timer.start()
self.__visited = addr
if impl is not None:
self._shoot(impl, addr)
else:
self._try_shoot()
return self.__response
def _cancel_all(self):
self.__done = True
self.__main_timer.cancel()
if self.__hedged_timer is not None:
self.__hedged_timer.cancel()
for f in self.__pending.values():
f.cancel()
def _set_result(self, result, impl, addr):
if self.__done:
return
self._cancel_all()
self.__response.set_result((impl, addr, result))
def _set_exception(self, e):
if self.__done:
return
self._cancel_all()
self.__response.set_exception(e)
def _on_main_timer(self):
if self.__done:
return
self._set_exception(
ClientError(EResult.E_TIMEOUT.value, "deadline exceeded"))
def _on_hedged_timer(self):
if self.__done:
return
self._try_shoot()
def _shoot(self, impl, addr):
r = self.__method(impl)
self.__pending[(addr.Host, addr.Port)] = r
def cb(f):
self._handle_response(f, impl, addr)
r.add_done_callback(cb)
def _on_discover_instances(self, f):
if self.__done:
return
e = f.exception()
if e is None:
self.__instances = f.result().Instances
self.__log.debug("success discovery: {}".format(
map(lambda x: x.Host + ":" + str(x.Port), self.__instances)))
self._try_shoot()
return
self.__log.error("error on discovery: {}".format(e))
if len(self.__pending) == 0:
self.set_exception(e)
def _try_shoot(self):
if self.__instances is None:
if self.__instances_future is None:
request = protos.TDiscoverInstancesRequest()
request.Limit = self.__limit
if self.__secure:
request.InstanceFilter = protos.EDiscoveryPortFilter.Value(
"DISCOVERY_SECURE_PORT")
self.__instances_future = self.__balancer.discover_instances_async(request)
self.__instances_future.add_done_callback(self._on_discover_instances)
return
while self.__idx < len(self.__instances):
inst = self.__instances[self.__idx]
self.__idx += 1
if self.__visited and \
inst.Host == self.__visited.Host and \
inst.Port == self.__visited.Port:
continue
try:
impl = self.__factory(inst.Host, inst.Port)
except Exception as e:
self.__log.warning("error on create client: {}".format(e))
continue
if impl is None:
continue
self._shoot(impl, inst)
return
if len(self.__pending) == 0:
self._set_exception(
ClientError(EResult.E_FAIL.value, "can't create client"))
def _handle_response(self, f, impl, addr):
if f.cancelled():
return
self.__log.debug("handle response from {}:{}".format(
addr.Host,
addr.Port))
del self.__pending[(addr.Host, addr.Port)]
if self.__done:
return
is_retriable = False
error = None
try:
response = f.result()
e = client_error_from_response(response)
if not e.succeeded:
raise e
except ClientError as e:
error = e
is_retriable = e.is_retriable
except Exception as e:
error = e
if not error:
self._set_result(response, impl, addr)
return
self.__log.error("{}:{} request error: {}".format(addr.Host, addr.Port, error))
if not is_retriable:
self._set_exception(error)
return
if len(self.__pending) == 0:
self._try_shoot()
@dispatch_nbs_client_methods
class _DiscoveryClient(object):
def __init__(
self,
balancer,
factory,
discovery_limit=None,
hard_timeout=None,
soft_timeout=None,
log=None,
secure=False):
self.__impl = None
self.__addr = None
self.__balancer = balancer
self.__factory = factory
self.__secure = secure
self.__limit = DEFAULT_DISCOVERY_LIMIT
if discovery_limit is not None:
self.__limit = discovery_limit
self.__timeout = DEFAULT_HARD_TIMEOUT
if hard_timeout is not None:
self.__timeout = hard_timeout
self.__soft_timeout = soft_timeout
if log is not None:
self.log = log
else:
self.log = logging.getLogger("discovery_client")
self.__create_timer = Timer
def close(self):
if self.__impl is not None:
self.__impl.close()
if self.__balancer.done() and not self.__balancer.cancelled():
self.__balancer.result().close()
def set_timer_factory(self, create_timer):
self.__create_timer = create_timer
@property
def timeout(self):
return self.__timeout
@property
def soft_timeout(self):
return self.__soft_timeout
@property
def limit(self):
return self.__limit
@_handle_errors
def _execute_request_async(
self,
method_name,
request,
idempotence_id,
timestamp,
trace_id,
request_timeout):
def method(impl):
m = getattr(impl, method_name + '_async')
return m(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
def run(client):
e = _Executor(
method,
client,
self.__factory,
self.__limit,
self.__secure,
self.log)
return e.run(
self.__impl,
self.__addr,
self.__timeout,
self.__soft_timeout,
self.__create_timer)
def update(client):
self.__impl, self.__addr, r = client
return unit(r)
return bind(bind(self.__balancer, run), update)
def ping_async(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
def cb(client):
return client.ping_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
return bind(self.__balancer, cb)
def ping(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
return self.ping_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout).result()
def discover_instances_async(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
def cb(client):
return client.discover_instances_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout)
return bind(self.__balancer, cb)
def discover_instances(
self,
request,
idempotence_id=None,
timestamp=None,
trace_id=None,
request_timeout=None):
return self.discover_instances_async(
request,
idempotence_id,
timestamp,
trace_id,
request_timeout).result()
def discover_instance_async(self):
future = futures.Future()
def ping_cb(f, impl, instances, i):
try:
f.result()
future.set_result(impl)
except Exception:
loop(instances, i)
def loop(instances, i):
while i < len(instances):
inst = instances[i]
i += 1
try:
impl = self.__factory(inst.Host, inst.Port)
except Exception as e:
self.__log.warning("error on create client: {}".format(e))
continue
if impl is None:
continue
f = impl.ping_async(protos.TPingRequest())
def cb(f):
ping_cb(f, impl, instances, i)
f.add_done_callback(cb)
return
future.set_exception(
ClientError(EResult.E_FAIL.value, "can't create client"))
def discover_instances_cb(f):
try:
instances = f.result().Instances
loop(instances, 0)
except Exception as e:
future.set_exception(e)
request = protos.TDiscoverInstancesRequest()
request.Limit = self.__limit
if self.__secure:
request.InstanceFilter = protos.EDiscoveryPortFilter.Value(
"DISCOVERY_SECURE_PORT")
f = self.discover_instances_async(request)
f.add_done_callback(discover_instances_cb)
return future
class DiscoveryClient(_SafeClient):
def __init__(self, impl):
super(DiscoveryClient, self).__init__(impl)
def discover_instance(self):
return self.discover_instance_async().result()
def discover_instance_async(self):
return self._impl.discover_instance_async()
def find_closest(clients, request_timeout=None):
result = futures.Future()
requests = dict()
def done(c, f):
if result.done():
return
del requests[c]
if f.exception():
if not requests:
result.set_exception(f.exception())
c.close()
else:
result.set_result(c)
while requests:
x, f = requests.popitem()
f.cancel()
x.close()
requests = {c: c.ping_async(
protos.TPingRequest(),
request_timeout=request_timeout) for c in clients}
for c, f in requests.copy().items():
f.add_done_callback(partial(done, c))
return result
def CreateDiscoveryClient(
endpoints,
credentials=None,
request_timeout=None,
retry_timeout=None,
retry_timeout_increment=None,
log=None,
executor=None,
hard_timeout=None,
soft_timeout=None,
discovery_limit=None):
def make_http_backend(endpoint):
return HttpClient(
endpoint,
credentials,
request_timeout,
log,
executor)
def make_grpc_backend(endpoint):
return GrpcClient(
endpoint,
credentials,
request_timeout,
log)
def make_backend(endpoint):
if endpoint.startswith('https://') or endpoint.startswith('http://'):
return make_http_backend(endpoint)
else:
return make_grpc_backend(endpoint)
def make_client(endpoint):
return DurableClient(
make_backend(endpoint),
retry_timeout,
retry_timeout_increment,
log)
def factory(host, port):
return make_client(host + ':' + str(port))
if not isinstance(endpoints, list):
endpoints = [endpoints]
balancer = find_closest(map(make_client, endpoints))
discovery_client = _DiscoveryClient(
balancer,
factory,
discovery_limit,
hard_timeout,
soft_timeout,
log,
credentials is not None)
return DiscoveryClient(discovery_client)
| ydb-platform/nbs | cloud/blockstore/public/sdk/python/client/discovery.py | discovery.py | py | 13,730 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "concurrent.futures.Future",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "concurrent.futures",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "error.ClientError",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "error_... |
41345912194 | import json
from functools import wraps
import requests
from service_now_api_sdk.settings import (
SERVICENOW_API_PASSWORD,
SERVICENOW_API_TOKEN,
SERVICENOW_API_USER,
SERVICENOW_URL,
)
def headers_replace(f):
@wraps(f)
def decorated_function(*args, **kwargs):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if SERVICENOW_API_TOKEN:
headers["Authorization"] = (f"Bearer {SERVICENOW_API_TOKEN}",)
if kwargs.get("headers"):
headers = {**headers, **kwargs.get["headers"]}
kwargs["headers"] = headers
return f(*args, **kwargs)
return decorated_function
class Client:
base_url = SERVICENOW_URL
default_path = ""
@headers_replace
def __http_request(
self,
method: str,
path: str,
headers: dict = None,
data=None,
params: dict = None,
timeout: int = None
):
if data is None:
data = {}
if params is None:
params = {}
if SERVICENOW_API_TOKEN:
return requests.request(
method=method,
url=f"{self.base_url}/{path}",
headers=headers,
data=json.dumps(data),
params=params,
timeout=timeout
)
if SERVICENOW_API_USER and SERVICENOW_API_PASSWORD:
return requests.request(
method=method,
url=f"{self.base_url}/{path}",
headers=headers,
data=json.dumps(data),
params=params,
auth=(SERVICENOW_API_USER, SERVICENOW_API_PASSWORD),
timeout=timeout
)
def post(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="POST", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def get(self, path: str, headers: dict = None, params: dict = None, timeout: int = None):
return self.__http_request(
method="GET", path=path, headers=headers, params=params, timeout=timeout
)
def put(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="PUT", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def patch(
self, path: str, headers: dict = None, data: dict = None, params: dict = None, timeout: int = None
):
return self.__http_request(
method="PATCH", path=path, headers=headers, data=data, params=params, timeout=timeout
)
def delete(self, path: str, headers: dict = None, data: dict = None, timeout: int = None):
return self.__http_request(
method="DELETE", path=path, headers=headers, data=data, timeout=timeout
)
| people-analytics-tech/service-now-api-sdk | service_now_api_sdk/sdk/servicenow/helpers/client.py | client.py | py | 3,159 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "service_now_api_sdk.settings.SERVICENOW_API_TOKEN",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "service_now_api_sdk.settings.SERVICENOW_API_TOKEN",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 15,
... |
41061708200 | from PySide6.QtWidgets import (
QWidget,
QToolBar,
QLabel,
QLineEdit,
QTextEdit,
QVBoxLayout,
QHBoxLayout,
)
import core.terminal_commands as tc
class WidgetGitUtils(QWidget):
"""
A custom QWidget that provides a user interface for Git utilities.
This widget contains a toolbar with actions for generating local and global Git configurations,
as well as resetting the configuration. It also has input fields for entering a username and email,
and a read-only text field for displaying output.
"""
def __init__(self):
"""
Initializes the WidgetGitUtils instance.
This method creates the user interface elements and adds them to the layout.
"""
super().__init__()
self._git_utils_toolbar = QToolBar()
self._git_utils_toolbar.addAction(
"Generate Local Config", self.generate_local_config
)
self._git_utils_toolbar.addAction(
"Generate Global Config", self.generate_global_config
)
self._git_utils_toolbar.addAction("Reset", self.reset)
self._username_label = QLabel("Username")
self._email_label = QLabel("Email")
self._username_line_edit = QLineEdit()
self._email_line_edit = QLineEdit()
self._username_pair = QHBoxLayout()
self._username_pair.addWidget(self._username_label)
self._username_pair.addWidget(self._username_line_edit)
self._username_widget = QWidget()
self._username_widget.setLayout(self._username_pair)
self._email_pair = QHBoxLayout()
self._email_pair.addWidget(self._email_label)
self._email_pair.addWidget(self._email_line_edit)
self._email_widget = QWidget()
self._email_widget.setLayout(self._email_pair)
self._text_edit = QTextEdit()
self._text_edit.setReadOnly(True)
self._main_layout = QVBoxLayout()
self._main_layout.addWidget(self._git_utils_toolbar)
self._main_layout.addWidget(self._username_widget)
self._main_layout.addWidget(self._email_widget)
self._main_layout.addWidget(self._text_edit)
self.setLayout(self._main_layout)
def generate_local_config(self):
"""
Generates local Git configuration commands.
This method retrieves the username and email entered in the input fields,
and uses them to generate Git configuration commands for setting the local
user.name and user.email. The generated commands are displayed in the read-only
text field.
"""
username: str = self._username_line_edit.text().strip()
email: str = self._email_line_edit.text().strip()
if len(username) > 0 and len(email) > 0:
result: str = tc.generate_git_config_commands(
username, email, is_global=False
)
self._text_edit.setPlainText(result)
def generate_global_config(self):
"""
Generates global Git configuration commands.
This method retrieves the username and email entered in the input fields,
and uses them to generate Git configuration commands for setting the global
user.name and user.email. The generated commands are displayed in the read-only
text field.
"""
username: str = self._username_line_edit.text().strip()
email: str = self._email_line_edit.text().strip()
if len(username) > 0 and len(email) > 0:
result: str = tc.generate_git_config_commands(
username, email, is_global=True
)
self._text_edit.setPlainText(result)
def reset(self):
"""
Resets the input fields and text field.
This method clears the text in the username and email input fields,
as well as the read-only text field.
"""
self._username_line_edit.setText("")
self._email_line_edit.setText("")
self._text_edit.setPlainText("")
| sanyokkua/dev_common_tools_py | ui/widgets/widget_git_utils.py | widget_git_utils.py | py | 4,017 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "PySide6.QtWidgets.QWidget",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QToolBar",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QLabel",
"line_number": 40,
"usage_type": "call"
},
{
"api... |
457933717 | '''
rest_framework reverse 补丁
'''
from rest_framework import relations
original_reverse = relations.reverse
def hack_reverse(alias, **kwargs):
namespace = kwargs['request'].resolver_match.namespace
if bool(namespace):
name = "%s:%s" % (namespace, alias)
return original_reverse(name, **kwargs)
else:
return original_reverse(alias, **kwargs)
relations.reverse = hack_reverse
original_resolve = relations.resolve
def hack_resolve(path, urlconf=None):
match = original_resolve(path, urlconf=urlconf)
if bool(match.app_name):
preffix = match.app_name + ':'
if match.view_name.startswith(preffix):
match.view_name = match.view_name[len(preffix):]
return match
relations.resolve = hack_resolve | dowhilefalse/Donation-Platform | api/__init__.py | __init__.py | py | 771 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "rest_framework.relations.reverse",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.relations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "rest_framework.relations.reverse",
"line_number": 15,
"usage_type": "attribu... |
3836899158 | from benchmark_task_manager import *
import itertools
iteration = 1
TM = [0,2]
toggle = itertools.cycle(TM)
while True:
t1 = time.time()
z = next(toggle)
eval('TaskManager{0}()._schedule()'.format(z))
groupid = z
elapsed = time.time() - t1
with open("tm_dump", "w") as fid:
fid.write("{0},{1},{2}".format(elapsed, groupid, iteration))
iteration += 1
time.sleep(1)
| fosterseth/awx-junk-drawer | serve_TM_data.py | serve_TM_data.py | py | 405 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.cycle",
"line_number": 5,
"usage_type": "call"
}
] |
36201897714 | from PIL import Image
from picamera.array import PiRGBArray
from picamera import PiCamera
from botocore.exceptions import ClientError
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from time import sleep, time
import sys
from uuid import uuid4
import os
import RPi.GPIO as GPIO
import json
import boto3
import io
################## GENERAL ##################
#SUPPORTED_BINS = ['trash', 'plastic', 'paper', 'metal', 'glass']
SUPPORTED_BINS = ['trash', 'paper']
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
bins = {'trash': {'ultrasound_pins': (24,23), 'servo_pin': 19},
'paper': {'ultrasound_pins': (21,20), 'servo_pin': 26},
'plastic': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'metal': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'glass': {'ultrasound_pins': (0,0), 'servo_pin': 0},
'cardboard': {'ultrasound_pins': (0,0), 'servo_pin': 0},
}
for bin_type in bins.copy():
if bin_type not in SUPPORTED_BINS:
del bins[bin_type]
bin_id_file = 'bin_id.txt'
bin_height = 20 #estimate bin height is 20cm
################## Button ##################
BIN_BUTTON_PIN = 27
GPIO.setup(BIN_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
################## Servo ##################
DEGREES_0 = 2.5
DEGREES_90 = 7.5
DEGREES_180 = 12.5
for bin_type, bin in bins.items():
servo_pin = bin['servo_pin']
GPIO.setup(servo_pin, GPIO.OUT)
################## ULTRASOUND ##################
def ultrasound_distance(GPIO_TRIGGER, GPIO_ECHO):
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time()
StopTime = time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
################## REKOGNITION ##################
def start_model(project_arn, model_arn, version_name, min_inference_units):
client=boto3.client('rekognition')
try:
# Start the model
print('Starting model: ' + model_arn)
response=client.start_project_version(ProjectVersionArn=model_arn,MinInferenceUnits=min_inference_units)
# Wait for the model to be in the running state
project_version_running_waiter = client.get_waiter('project_version_running')
project_version_running_waiter.wait(ProjectArn=project_arn,VersionNames=[version_name])
#Get the running status
describe_response=client.describe_project_versions(ProjectArn=project_arn,VersionNames=[version_name])
for model in describe_response['ProjectVersionDescriptions']:
print("Status: " + model['Status'])
print("Message: " + model['StatusMessage'])
except Exception as e:
print(e)
def show_custom_labels(model,bucket,photo, min_confidence):
client=boto3.client('rekognition')
# Load image from S3 bucket
s3_connection = boto3.resource('s3')
s3_object = s3_connection.Object(bucket,photo)
s3_response = s3_object.get()
stream = io.BytesIO(s3_response['Body'].read())
image=Image.open(stream)
#Call DetectCustomLabels
response = client.detect_custom_labels(Image={'S3Object': {'Bucket': bucket,'Name': photo}},MinConfidence=min_confidence,ProjectVersionArn=model)
highest_detected_label = None
highest_detected_confidence = 0
print('Detecting labels...')
for customLabel in response['CustomLabels']:
print('Label ' + str(customLabel['Name']))
print('Confidence ' + str(customLabel['Confidence']))
if customLabel['Confidence'] > highest_detected_confidence:
highest_detected_label = customLabel['Name'].lower()
highest_detected_confidence = customLabel['Confidence']
print('Done detection')
return highest_detected_label
################## S3 ##################
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
print("Successfully Uploaded!")
except ClientError as e:
return False
return True
################## MAIN ##################
# Custom MQTT message callback
def customCallback(client, userdata, message):
action = message.payload.decode()
if action == 'open':
print('Opening all bins...')
for trash_type, bin in bins.items():
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
servo.ChangeDutyCycle(DEGREES_180) #open bin
sleep(1)
servo.stop()
if action == 'close':
print('Opening all bins...')
for trash_type, bin in bins.items():
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
servo.ChangeDutyCycle(DEGREES_0) #close bin
sleep(1)
servo.stop()
#check if bin_id exists
if os.path.isfile(bin_id_file):
with open(bin_id_file, 'r') as f:
bin_id = f.read()
#if doesnt exist
else:
bin_id = 'smartbin-{}'.format(uuid4())
host="****************.us-east-1.amazonaws.com"
rootCAPath = os.path.join("certs", "rootca.pem")
certificatePath = os.path.join("certs", "certificate.pem.crt")
privateKeyPath = os.path.join("certs", "private.pem.key")
smartbin = AWSIoTMQTTClient(bin_id)
smartbin.configureEndpoint(host, 8883)
smartbin.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
smartbin.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
smartbin.configureDrainingFrequency(2) # Draining: 2 Hz
smartbin.configureConnectDisconnectTimeout(10) # 10 sec
smartbin.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
smartbin.connect()
if not os.path.isfile(bin_id_file):
smartbin.publish("bin/{}/add".format(bin_id), '{{"bin_id": "{}" }}'.format(bin_id), 1)
print('Published newly generated bin endpoint client ID: {}'.format(bin_id))
with open(bin_id_file, 'w') as f:
f.write(bin_id)
smartbin.subscribe("bin/{}/action".format(bin_id), 1, customCallback)
while True:
#If button is pushed take picture, analyze using rekognition and open the corresponding bin hole
if GPIO.input(BIN_BUTTON_PIN) == GPIO.HIGH:
print("Button was pushed!")
sleep(2)
# Take image from picamera and write to file
filename = str(uuid4())+".jpg"
write_image_file = open(filename, 'wb')
camera = PiCamera()
camera.resolution = (1024, 768)
camera.start_preview()
sleep(2)
camera.capture(write_image_file)
write_image_file.close()
camera.close()
print('Picture saved')
# Uploads image file to specified s3 bucket
bucket = "mysmartbin-image-bin"
upload_file(filename, bucket, object_name=None)
# Start rekognition model if is is not
project_arn='arn:aws:rekognition:us-east-1:****************'
model_arn='arn:aws:rekognition:us-east-1:****************'
min_inference_units=1
version_name='MySmartBin-Custom-Label-Training.2020-02-22T01.18.22'
start_model(project_arn, model_arn, version_name, min_inference_units)
# Analyse image based on the model above
min_confidence = 50
trash_type_detected = show_custom_labels(model_arn,bucket, filename, min_confidence)
os.remove(filename)
if trash_type_detected is None:
trash_type_detected = 'trash'
if trash_type_detected in SUPPORTED_BINS:
print('SUPPORTED TRASH TYPE!')
bin = bins[trash_type_detected]
servo = GPIO.PWM(bin['servo_pin'], 50)
servo.start(7.5)
sleep(0.1)
print('Opening bin...')
servo.ChangeDutyCycle(DEGREES_180) #open bin
sleep(5) #open for x number of seconds
print('Closing bin...')
servo.ChangeDutyCycle(DEGREES_0) #close bin
sleep(2)
servo.stop()
ultrasound_pins = bin['ultrasound_pins']
ultrasound_value = ultrasound_distance(ultrasound_pins[0], ultrasound_pins[1]) #gets ultrasonic sensor value
percentage = round(((bin_height - ultrasound_value)/bin_height)*100, 2)
mqtt_message = '{{"bin_id": "{}", "trash_type": "{}", "percentage": {} }}'.format(bin_id, trash_type_detected, percentage)
print(mqtt_message)
smartbin.publish("bin/{}/fullness".format(bin_id), mqtt_message, 1)
| scriptkiddyisme/mysmartbin | Raspberry Pi/smartbin.py | smartbin.py | py | 8,967 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "RPi.GPIO.setmode",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BCM",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"lin... |
72162560509 | import sqlite3 as lite
import sys
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class CrawlerPipeline(object):
def __init__(self):
con = lite.connect('crawler.db')
with con:
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS Results(Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Keyword TEXT, Title TEXT, Link TEXT, Description TEXT, BestContent TEXT, BestVote INTEGER, BestView INTEGER)")
def process_item(self, item, spider):
con = lite.connect('crawler.db')
with con:
cur = con.cursor()
cur.execute("INSERT INTO Results (Keyword, Title, Link, Description, BestContent, BestVote, BestView) " \
"VALUES (?,?,?,?,?,?,?)", (item['keyword'], item['title'], item['link'], item['desc'], item['bestContent'], item['bestVote'], item['bestView']))
return item
| yaoxiuh/WebCrawler | crawler/pipelines.py | pipelines.py | py | 1,056 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 25,
"usage_type": "call"
}
] |
14716216800 | import torch
from torch import nn
import torch.nn.functional as F
from models.Segformer import mit_b0,mit_b1,mit_b2#,mit_b3,mit_b4,mit_b5
class SK(nn.Module):
def __init__(self, in_channel, mid_channel, out_channel, fuse, len=32, reduce=16):
super(SK, self).__init__()
len = max(mid_channel // reduce, len)
self.fuse = fuse
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel, mid_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(mid_channel),
)
self.conv2 = nn.Sequential(
nn.Conv2d(mid_channel, out_channel,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(out_channel),
)
if fuse:
#https://github.com/syt2/SKNet
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(
nn.Conv2d(mid_channel, len, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(len),
nn.ReLU(inplace=True)
)
self.fc1 = nn.Sequential(
nn.Conv2d(mid_channel, len, kernel_size=1, stride=1, bias=False),
nn.ReLU(inplace=True)
)
self.fcs = nn.ModuleList([])
for i in range(2):
self.fcs.append(
nn.Conv2d(len, mid_channel, kernel_size=1, stride=1)
)
self.softmax = nn.Softmax(dim=1)
nn.init.kaiming_uniform_(self.conv1[0].weight, a=1)
nn.init.kaiming_uniform_(self.conv2[0].weight, a=1)
def forward(self, x, y=None, shape=None):
x = self.conv1(x)
if self.fuse:
shape = x.shape[-2:]
b = x.shape[0]
y = F.interpolate(y, shape, mode="nearest")
feas_U = [x,y]
feas_U = torch.stack(feas_U,dim=1)
attention = torch.sum(feas_U, dim=1)
attention = self.gap(attention)
if b ==1:
attention = self.fc1(attention)
else:
attention = self.fc(attention)
attention = [fc(attention) for fc in self.fcs]
attention = torch.stack(attention, dim=1)
attention = self.softmax(attention)
x = torch.sum(feas_U * attention, dim=1)
# output
y = self.conv2(x)
return y, x
class SKF(nn.Module):
def __init__(
self,student, in_channels, out_channels, mid_channel, embed
):
super(SKF, self).__init__()
self.student = student
skfs = nn.ModuleList()
for idx, in_channel in enumerate(in_channels):
skfs.append(SK(in_channel, mid_channel, out_channels[idx], idx < len(in_channels)-1))
self.skfs = skfs[::-1]
self.embed = embed
if self.embed == 5:
self.embed1_linearproject = nn.Linear(in_channels[0], out_channels[0])
self.embed2_linearproject = nn.Linear(in_channels[1], out_channels[1])
self.embed3_linearproject = nn.Linear(in_channels[2], out_channels[2])
self.embed4_linearproject = nn.Linear(in_channels[3], out_channels[3])
elif self.embed == 1:
self.embed1_linearproject = nn.Linear(in_channels[0], out_channels[0])
elif self.embed == 2:
self.embed1_linearproject = nn.Linear(in_channels[1], out_channels[1])
elif self.embed == 3:
self.embed1_linearproject = nn.Linear(in_channels[2], out_channels[2])
elif self.embed == 4:
self.embed1_linearproject = nn.Linear(in_channels[3], out_channels[3])
def forward(self, x):
student_features = self.student(x,is_feat=True)
embed = student_features[2]
logit = student_features[1]
x = student_features[0][::-1]
results = []
embedproj = []
out_features, res_features = self.skfs[0](x[0])
results.append(out_features)
for features, skf in zip(x[1:], self.skfs[1:]):
out_features, res_features = skf(features, res_features)
results.insert(0, out_features)
if self.embed ==5:
embedproj = [*embedproj, self.embed1_linearproject(embed[0])]
embedproj = [*embedproj, self.embed2_linearproject(embed[1])]
embedproj = [*embedproj, self.embed3_linearproject(embed[2])]
embedproj = [*embedproj, self.embed4_linearproject(embed[3])]
return results, logit, embedproj
elif self.embed == 0:
return results, logit
elif self.embed == 1:
embedproj = [*embedproj, self.embed1_linearproject(embed[0])]
return results, logit, embedproj
elif self.embed == 2:
embedproj = [*embedproj, self.embed1_linearproject(embed[1])]
return results, logit, embedproj
elif self.embed == 3:
embedproj = [*embedproj, self.embed1_linearproject(embed[2])]
return results, logit, embedproj
elif self.embed == 4:
embedproj = [*embedproj, self.embed1_linearproject(embed[3])]
return results, logit, embedproj
else:
assert 'the number of embeddings not supported'
def build_kd_trans(model,embed,in_channels = [32, 64, 160, 256], out_channels = [64, 128, 320, 512]):
mid_channel = 64
student = model
model = SKF(student, in_channels, out_channels, mid_channel,embed)
return model
def hcl(fstudent, fteacher):
loss_all = 0.0
for fs, ft in zip(fstudent, fteacher):
n,c,h,w = fs.shape
loss = F.mse_loss(fs, ft, reduction='mean')
cnt = 1.0
tot = 1.0
for l in [4,2,1]:
if l >=h:
continue
tmpfs = F.adaptive_avg_pool2d(fs, (l,l))
tmpft = F.adaptive_avg_pool2d(ft, (l,l))
cnt /= 2.0
loss += F.mse_loss(tmpfs, tmpft, reduction='mean') * cnt
tot += cnt
loss = loss / tot
loss_all = loss_all + loss
return loss_all
class ChannelNorm(nn.Module):
def __init__(self):
super(ChannelNorm, self).__init__()
def forward(self,featmap):
n,c,h,w = featmap.shape
featmap = featmap.reshape((n,c,-1))
featmap = featmap.softmax(dim=-1)
return featmap
class CriterionCWD(nn.Module):
def __init__(self,norm_type='none',divergence='mse',temperature=1.0):
super(CriterionCWD, self).__init__()
# define normalize function
if norm_type == 'channel':
self.normalize = ChannelNorm()
elif norm_type =='spatial':
self.normalize = nn.Softmax(dim=1)
elif norm_type == 'channel_mean':
self.normalize = lambda x:x.view(x.size(0),x.size(1),-1).mean(-1)
else:
self.normalize = None
self.norm_type = norm_type
self.temperature = 1.0
# define loss function
if divergence == 'mse':
self.criterion = nn.MSELoss(reduction='sum')
elif divergence == 'kl':
self.criterion = nn.KLDivLoss(reduction='sum')
self.temperature = temperature
self.divergence = divergence
def forward(self,preds_S, preds_T):
n,c,h,w = preds_S.shape
#import pdb;pdb.set_trace()
if self.normalize is not None:
norm_s = self.normalize(preds_S/self.temperature)
norm_t = self.normalize(preds_T.detach()/self.temperature)
else:
norm_s = preds_S[0]
norm_t = preds_T[0].detach()
if self.divergence == 'kl':
norm_s = norm_s.log()
loss = self.criterion(norm_s,norm_t)
#item_loss = [round(self.criterion(norm_t[0][0].log(),norm_t[0][i]).item(),4) for i in range(c)]
#import pdb;pdb.set_trace()
if self.norm_type == 'channel' or self.norm_type == 'channel_mean':
loss /= n * c
# loss /= n * h * w
else:
loss /= n * h * w
return loss * (self.temperature**2)
######################################################################################################################
class EmbedChannelNorm(nn.Module):
def __init__(self):
super(EmbedChannelNorm, self).__init__()
def forward(self,embed):
n,c,_ = embed.shape
embed = embed.softmax(dim=-1)
return embed
class CriterionEmbedCWD(nn.Module):
def __init__(self,norm_type='none',divergence='mse',temperature=1.0):
super(CriterionEmbedCWD, self).__init__()
# define normalize function
if norm_type == 'channel':
self.normalize = EmbedChannelNorm()
elif norm_type =='spatial':
self.normalize = nn.Softmax(dim=1)
elif norm_type == 'channel_mean':
self.normalize = lambda x:x.view(x.size(0),x.size(1),-1).mean(-1)
else:
self.normalize = None
self.norm_type = norm_type
self.temperature = 1.0
# define loss function
if divergence == 'mse':
self.criterion = nn.MSELoss(reduction='sum')
elif divergence == 'kl':
self.criterion = nn.KLDivLoss(reduction='sum')
self.temperature = temperature
self.divergence = divergence
def forward(self,embed_S, embed_T):
embed_S = embed_S.transpose(1, 2).contiguous()
embed_T = embed_T.transpose(1, 2).contiguous()
n,c,_ = embed_S.shape
#import pdb;pdb.set_trace()
if self.normalize is not None:
norm_s = self.normalize(embed_S/self.temperature)
norm_t = self.normalize(embed_T.detach()/self.temperature)
else:
norm_s = embed_S[0]
norm_t = embed_T[0].detach()
if self.divergence == 'kl':
norm_s = norm_s.log()
loss = self.criterion(norm_s,norm_t)
if self.norm_type == 'channel' or self.norm_type == 'channel_mean':
loss /= n * c
return loss * (self.temperature**2)
def hcl_feaw(fstudent, fteacher):
loss_all = 0.0
fea_weights = [0.1,0.1,0.5,1]
for fs, ft,fea_w in zip(fstudent, fteacher,fea_weights):
n,c,h,w = fs.shape
loss = F.mse_loss(fs, ft, reduction='mean')
cnt = 1.0
tot = 1.0
for l in [4,2,1]:
if l >=h:
continue
tmpfs = F.adaptive_avg_pool2d(fs, (l,l))
tmpft = F.adaptive_avg_pool2d(ft, (l,l))
cnt /= 2.0
loss += F.mse_loss(tmpfs, tmpft, reduction='mean') * cnt
tot += cnt
loss = loss / tot
loss_all = loss_all + fea_w*loss
return loss_all | RuipingL/TransKD | train/CSF.py | CSF.py | py | 10,763 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
37366659638 | #!/usr/bin/env python3
from pylab import *
from numpy import *
import matplotlib.cm as cm
from common import *
idx_vec = range(1, num_k+1)
if with_FVD_solution == True :
if num_k > 1 :
fig, ax = plt.subplots(2, num_k, figsize=(9, 5.5))
else :
fig, ax = plt.subplots(1, 2, figsize=(9, 5.5))
fig.suptitle('Eigenfunctions, %s' % task_name)
else :
fig, ax = plt.subplots(1, num_k, figsize=(9, 5.5))
fig.suptitle('Eigenfunctions, %s' % task_name)
tot_min = -0.3
tot_max = 0.3
if with_FVD_solution :
for i in range(len(idx_vec)) :
if conjugated_eigvec_flag == 1 :
data_file = open('../%s/data/%s_FVD_%d_conjugated.txt' % (working_dir_name, eig_file_name_prefix, idx_vec[i]), 'r')
else :
data_file = open('../%s/data/%s_FVD_%d.txt' % (working_dir_name, eig_file_name_prefix, idx_vec[i]), 'r')
xmin, xmax, nx = [ float (x) for x in data_file.readline().split() ]
ymin, ymax, ny = [ float (x) for x in data_file.readline().split() ]
Z = np.loadtxt(data_file)
x = np.linspace(xmin, xmax, int(nx))
y = np.linspace(ymin, ymax, int (ny))
if num_k > 1 :
fvd_ax = ax[0, i]
else :
fvd_ax = ax[i]
im = fvd_ax.imshow( Z , cmap=cm.jet, extent = [xmin, xmax, ymin, ymax], vmin=tot_min , vmax=tot_max , origin='lower', interpolation='none' )
fvd_ax.set_title('FVD, %dth' % (idx_vec[i]))
if i == 0:
yticks(np.linspace(xmin, xmax, 5))
else :
plt.setp(fvd_ax.get_yticklabels(), visible=False)
sign_list = [1 for i in range(num_k)]
sign_list[0] = 1
if num_k > 1 :
sign_list[1] = -1
if num_k > 2 :
sign_list[2] = -1
for i in range(len(idx_vec)) :
base_name = '../%s/data/%s' % (working_dir_name, eig_file_name_prefix)
if conjugated_eigvec_flag == 1 :
data_file = open('%s_%d_conjugated.txt' % (base_name, idx_vec[i]), 'r')
else :
data_file = open('%s_%d.txt' % (base_name, idx_vec[i]), 'r')
xmin, xmax, nx = [ float (x) for x in data_file.readline().split() ]
ymin, ymax, ny = [ float (x) for x in data_file.readline().split() ]
Z = np.loadtxt(data_file, skiprows=0)
x = np.linspace(xmin, xmax, int (nx))
y = np.linspace(ymin, ymax, int (ny))
X, Y = np.meshgrid(x,y)
# tot_min = Z.min()
# tot_max = Z.max()
# print (tot_min, tot_max)
if with_FVD_solution :
if num_k > 1 :
nn_ax = ax[1, i]
else :
nn_ax = ax[num_k+i]
else :
if num_k > 1 :
nn_ax = ax[i]
else :
nn_ax = ax
im = nn_ax.imshow( sign_list[i] * Z , cmap=cm.jet, extent = [xmin, xmax, ymin, ymax], vmin=tot_min , vmax=tot_max , origin='lower', interpolation='none' )
nn_ax.set_title('NN, %dth' % (idx_vec[i]) )
if i == 0:
yticks(np.linspace(xmin, xmax, 5))
else :
plt.setp(nn_ax.get_yticklabels(), visible=False)
cax = fig.add_axes([0.92, 0.12, .04, 0.79])
#fig.colorbar(im, cax=cax, orientation='horizontal',cmap=cm.jet)
fig.colorbar(im, cax=cax, cmap=cm.jet)
#cax.tick_params(labelsize=10)
base_name = '../%s/fig/eigvec_nn_and_FVD' % (working_dir_name)
if conjugated_eigvec_flag == 1 :
fig_name = '%s_%d_conjugated.eps' % (base_name, num_k)
else :
fig_name = '%s_%d.eps' % (base_name, num_k)
savefig(fig_name)
print ("output figure: %s" % fig_name)
| zwpku/EigenPDE-NN | plot_scripts/plot_2d_evs_nn_and_FVD.py | plot_2d_evs_nn_and_FVD.py | py | 3,318 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "matplotlib.cm.jet",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.jet",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "matplotli... |
9988555176 | import traceback, re, json, logging
from ..file_utilities.filepath import Filepath
from ..entitlements.entitlement_manager import Entitlement_Manager
from .file_manager import File_Manager
from ..client_config import COLLECTIONS_WITH_BAD_LEVEL_IMAGES, UNLOCK_ALL_BUDDIES
from .. import shared
logger_errors = logging.getLogger('VIM_errors')
logger = logging.getLogger('VIM_main')
logger_inv = logging.getLogger('VIM_inventory')
class Buddy_Manager:
@staticmethod
def generate_blank_buddy_database():
if shared is not None:
client = shared.client
weapon_data = client.all_weapon_data
payload = {}
File_Manager.update_individual_inventory(payload, "buddies")
@staticmethod
async def update_inventory(**kwargs):
payload = json.loads(kwargs.get("payload"))
buddy_uuid = payload["buddyUuid"]
new_data = payload["newData"]
inventory = File_Manager.fetch_individual_inventory()["buddies"]
for uuid,buddy in inventory.items():
if uuid == buddy_uuid:
inventory[uuid] = new_data
break
File_Manager.update_individual_inventory(inventory, "buddies")
await shared.client.broadcast_loadout()
return inventory
@staticmethod
async def favorite_all(**kwargs):
payload = json.loads(kwargs.get("payload"))
favorite = payload["favorite"]
inventory = File_Manager.fetch_individual_inventory()["buddies"]
for uuid,buddy in inventory.items():
for instance_uuid,instance in buddy["instances"].items():
if not instance["locked"]:
instance["favorite"] = favorite
File_Manager.update_individual_inventory(inventory, "buddies")
await shared.client.broadcast_loadout()
return inventory
@staticmethod
def refresh_buddy_inventory():
valclient = shared.client.client
client = shared.client
old_data = None
try:
old_data = File_Manager.fetch_individual_inventory()["buddies"]
except KeyError:
old_data = None
except Exception as e:
logger_errors.error(traceback.format_exc())
logger.debug("making fresh buddy database")
Buddy_Manager.generate_blank_skin_database()
buddy_entitlements = Entitlement_Manager.fetch_entitlements(valclient, "buddy")["Entitlements"]
sanitized_buddy_entitlements = {}
for entitlement in buddy_entitlements:
if not entitlement["ItemID"] in sanitized_buddy_entitlements.keys():
sanitized_buddy_entitlements[entitlement["ItemID"]] = []
sanitized_buddy_entitlements[entitlement["ItemID"]].append(entitlement["InstanceID"])
inventory = {}
# iterate through each buddy
for buddy in client.all_buddy_data:
buddy_owned = False
owned_level_id = ""
levels = [level["uuid"] for level in buddy["levels"]]
if UNLOCK_ALL_BUDDIES:
buddy_owned = True
for level in levels:
if level in sanitized_buddy_entitlements.keys():
buddy_owned = True
owned_level_id = level
break
if buddy_owned:
buddy_payload = {}
existing_buddy_data = None
if old_data is not None:
try:
existing_buddy_data = old_data[buddy["uuid"]]
except:
pass
buddy_payload["display_name"] = buddy["displayName"]
buddy_payload["uuid"] = buddy["uuid"]
buddy_payload["display_icon"] = buddy["displayIcon"]
buddy_payload["level_uuid"] = owned_level_id
buddy_payload["instance_count"] = len(sanitized_buddy_entitlements[owned_level_id])
buddy_payload["instances"] = {}
for instance in sanitized_buddy_entitlements[owned_level_id]:
try:
buddy_payload["instances"][instance] = {
"uuid": instance,
"favorite": existing_buddy_data["instances"][instance]["favorite"] if existing_buddy_data is not None else False,
"super_favorite": existing_buddy_data["instances"][instance]["super_favorite"] if existing_buddy_data is not None else False,
"locked": existing_buddy_data["instances"][instance]["locked"] if existing_buddy_data is not None else False,
"locked_weapon_uuid": existing_buddy_data["instances"][instance]["locked_weapon_uuid"] if existing_buddy_data is not None else "",
"locked_weapon_display_name": existing_buddy_data["instances"][instance]["locked_weapon_display_name"] if existing_buddy_data is not None else "",
}
# remove me later
except:
buddy_payload["instances"][instance] = {
"uuid": instance,
"favorite": False,
"super_favorite": False,
"locked": False,
"locked_weapon_uuid": "",
"locked_weapon_display_name": "",
}
# check for invalid favorite/lock combinations
for instance in buddy_payload["instances"].values():
if instance["locked"]:
instance["favorite"] = False
if instance["locked_weapon_uuid"] == "" or instance["locked_weapon_display_name"] == "":
instance["locked"] = False
instance["locked_weapon_uuid"] = ""
instance["locked_weapon_display_name"] = ""
inventory[buddy["uuid"]] = buddy_payload
sort = sorted(inventory.items(), key=lambda x: x[1]["display_name"].lower())
inventory = {k: v for k, v in sort}
logger_inv.debug(f"buddy inventory:\n{json.dumps(inventory)}")
File_Manager.update_individual_inventory(inventory,"buddies")
return True | colinhartigan/valorant-inventory-manager | server/src/inventory_management/buddy_manager.py | buddy_manager.py | py | 6,398 | python | en | code | 150 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "file_manager.Fi... |
71449750907 | from multiprocessing import Process, Lock, Queue, Semaphore
import time
from random import random
buffer = Queue(10)
empty = Semaphore(2) # 缓存空余数
full = Semaphore(0) # 缓存占用数
lock = Lock()
class Consumer(Process):
def run(self):
global empty, buffer, full, lock
while True:
full.acquire()
lock.acquire() # 占用空间先acquire
num = buffer.get()
time.sleep(1)
print(f"Consumer remove an element..{num}")
lock.release()
empty.release()
class Producer(Process):
def run(self):
global empty, full, buffer, lock
while True:
empty.acquire()
lock.acquire()
num = random()
buffer.put(num)
time.sleep(1)
print("Producer append an element... {}".format(num))
lock.release()
full.release()
if __name__ == "__main__":
consumer = Consumer()
producer = Producer()
producer.daemon = consumer.daemon = True
producer.start()
consumer.start()
producer.join()
consumer.join()
print("Main process ended!!!") | haidongsong/spider_learn | zhang_xiaobo_spider_practice/producer_custom.py | producer_custom.py | py | 1,177 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "multiprocessing.Queue",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Semaphore",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Semaphore",
"line_number": 7,
"usage_type": "call"
},
{
"api_name":... |
36540773216 | # Import libraries
from requests import get
from json import dumps
# Your own local host's url
URL = "http://127.0.0.1:5000/"
# Names of active pages
mine_block = "mine_block"
get_chain = "get_chain"
is_valid = "is_valid"
# Define function for to check if API works and use the API.
def check_request_and_get_result(url, target_page_name, checked=False, needed_json_dumps=True):
target_url = url + target_page_name
request = get(target_url)
response = request.status_code
if checked:
return dumps(request.json(), sort_keys=True, indent=4) if needed_json_dumps else request.json()
else:
return "Congratulation, API works!" if response == 200 else "Something went wrong."
print(check_request_and_get_result(URL, get_chain, True))
| mrn01/Blockchain_Project | blockchain_davidcoin/Module 1 - Create a Blockchain/use_your_own_API.py | use_your_own_API.py | py | 795 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 20,
"usage_type": "call"
}
] |
19160774674 | import sys, os
from turtle import home
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import time
import pytest
import allure
from allure_commons.types import AttachmentType
from Tests.test_Base import BaseTest
from Locators.Locators import Locators
from Config.config import TestData
from Pages.LoginPage import LoginPage
from Locators.EnumsPackage.Enums import Sort_Productss
class Test_Home(BaseTest):
@pytest.mark.order()
def test_verify_home_page_title(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
title = homePage.get_title()
assert title == TestData.HOME_PAGE_TITLE
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_home_page_header(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
header = homePage.get_header_value()
allure.attach(self.driver.get_screenshot_as_png(), attachment_type=AttachmentType.JPG)
assert header == TestData.HOME_PAGE_HEADER
@pytest.mark.order()
def test_verify_cart_icon_visible(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
notification = homePage.is_cart_icon_exist()
assert notification
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.JPG)
@pytest.mark.order()
def test_verify_product_sort_container(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.product_sort_container()
for getValue in Sort_Productss:
sortingNames = self.driver.find_element_by_xpath(
"//*[@class='product_sort_container']//option[contains(text(),'%s')]" % str(getValue.value))
assert sortingNames.text == getValue.value
@pytest.mark.order()
def test_verify_shopping(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.do_shopping()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_sorting_Zto_A(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.product_sort_container()
homePage.sort_product_High_to_Low()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)
@pytest.mark.order()
def test_verify_logout_into_app(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login()
homePage.do_logout()
allure.attach(self.driver.get_screenshot_as_png(),attachment_type=AttachmentType.PNG) | sawrav-sharma/py_new_dd | Tests/test_HomePage.py | test_HomePage.py | py | 2,879 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line... |
44966506931 | # import cv2
#
# filename="imgmirror.jpg"
# img= cv2.imread('image.jpg')
# res= img.copy()
# for i in range(img.shape[0]):
# for j in range(img.shape[1]):
# res[i][img.shape[1]-j-1]= img[i][j]
#
# cv2.imshow('image', res)
# cv2.imwrite(filename,res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# import cv2
#
# img = cv2.imread("no entry.png")
#
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# cv2.imshow("image ori", img)
# cv2.imshow("image gray", gray)
# filename="noentrygray.jpg"
# cv2.imwrite(filename,gray)
# cv2.waitKey(0)
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
import numpy as np
import cv2
#############################################
#frameWidth = 640 # CAMERA RESOLUTION
#frameHeight = 480
#brightness = 180
#threshold = 0.75 # PROBABLITY THRESHOLD
font = cv2.FONT_HERSHEY_SIMPLEX
##############################################
# SETUP THE VIDEO CAMERA
cap = cv2.VideoCapture(0)
#cap.set(3, frameWidth)
#cap.set(4, frameHeight)
#cap.set(10, brightness)
imageDimesions = (32, 32, 3)
noOfClasses = 3
sampleNum=0
no_Of_Filters = 60
size_of_Filter = (5, 5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
size_of_Filter2 = (3, 3)
size_of_pool = (2, 2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
model = Sequential()
model.add((Conv2D(no_Of_Filters, size_of_Filter, input_shape=(imageDimesions[0], imageDimesions[1], 1),
activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes, activation='relu'))
model.add(Dropout(0.5)) # INPUTS NODES TO DROP WITH EACH UPDATE 1 ALL 0 NONE
model.add(Dense(noOfClasses, activation='softmax')) # OUTPUT LAYER
# COMPILE MODEL
model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights('91model.h5')
def grayscale(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def equalize(img):
img = cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img)
img = equalize(img)
img = img / 255
return img
# def getCalssName(classNo):
# if classNo == 0:
# return 'No Entry'
# elif classNo == 1:
# return 'Turn Right'
# elif classNo == 2:
# return 'Turn Left'
# elif classNo == 3:
# return 'Go Ahead'
# cascLeft = "all.xml"
# cascRight = "all.xml"
# cascStop = "all.xml"
cascLeft = "turnLeft_ahead.xml"
cascRight = "turnRight_ahead.xml"
cascStop = "stopsign_classifier.xml"
#speedLimit = "lbpCascade.xml"
leftCascade = cv2.CascadeClassifier(cascLeft)
rightCascade = cv2.CascadeClassifier(cascRight)
stopCascade = cv2.CascadeClassifier(cascStop)
#speedCascade = cv2.CascadeClassifier(speedLimit)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
left = leftCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
right = rightCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
stop = stopCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# speed = speedCascade.detectMultiScale(
# gray,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(30, 30)
# )
# Draw a rectangle around the faces
for (x, y, w, h) in left:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
# if probabilityValue > threshold:
# cv2.putText(frame, str(tessss) + "%", (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 1,
# (0, 255, 0), 2, cv2.LINE_AA)
for (x, y, w, h) in right:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
#probabilityValue = np.amax(prediction)
# if probabilityValue > threshold:
# cv2.putText(frame, str(round(probabilityValue * 100, 2)) + "%", (x, y+h), cv2.FONT_HERSHEY_SIMPLEX, 1,
# (0, 255, 0), 2, cv2.LINE_AA)
for (x, y, w, h) in stop:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
prediction = model.predict(cropped_img)
#sampleNum = sampleNum + 1
rambu = ('Stop', 'Turn Right', 'Turn Left')
maxindex = rambu[int(np.argmax(prediction))]
cv2.putText(frame, maxindex, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
#cv2.imwrite("TrainingImage\ " + str(sampleNum) + ".jpg", frame)
# for (x ,y, w, h) in speed:
# cv2.rectangle(frame, (x ,y), (x+w, y+h), (0, 255, 0), 2)
# roi_gray = gray[y:y + h, x:x + w]
# cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (32, 32)), -1), 0)
# prediction = model.predict(cropped_img)
#
# rambu = ('Stop', 'Turn Right', 'Turn Left', 'Max Speed 50')
# maxindex = rambu[int(np.argmax(prediction))]
#
# cv2.putText(frame, maxindex, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| nicolafeby/Self-driving-car-robot-cnn | testcamex.py | testcamex.py | py | 7,221 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 62,
"usage_type": "call"
},
{
"api_name":... |
23873826885 | import cv2
import time
import numpy as np
import supervision as sv#this is a Roboflow open source libray
from ultralytics import YOLO
from tqdm import tqdm #this is a tool for visualising progress bars in console. Remove for production code as might slow things down
COLORS = sv.ColorPalette.default()
#Define entry and exit areas on image (got the cordinates by drawing zones using https://blog.roboflow.com/polygonzone/)
#Zone_in is garden bottom half and front of house bottom half - red colour
ZONE_IN_POLYGONS = [
np.array([[640, 154],[0, 242],[0, 360],[640, 360]]),
np.array([[650, 162],[986, 158],[990, 360],[646, 360]]),
]
#Zone_out is garden top half and front of house top half - green colour
ZONE_OUT_POLYGONS = [
np.array([[642, 0],[978, 0],[982, 142],[654, 146]]),
np.array([[0, 0],[634, 0],[638, 146],[2, 222]]),
]
def initiate_poylgon_zones(polygons:list[np.ndarray],frame_resolution_wh:tuple[int,int],triggering_position:sv.Position=sv.Position.CENTER)->list[sv.PolygonZone]:
return[sv.PolygonZone(polygon,frame_resolution_wh,triggering_position)for polygon in polygons]
class DetectionsManager:
def __init__(self) -> None:
self.tracker_id_to_zone_id: Dict[int, str] = {}
self.total_count: int = 5
#update function takes the list of detections triggered by a zone and maps the tracker ID to either in or out
def update(self,detections: sv.detection, detections_zone_in: list[sv.detection], detections_zone_out: list[sv.detection]) -> sv.detection:
for detection in detections_zone_in:
#print('Zone in detection ', detection)
if np.any(detection.tracker_id):#this tests if there are any tracker id's. If not the for loop below crashes
for tracker_id in detection.tracker_id:
if tracker_id in self.tracker_id_to_zone_id:
#print(self.tracker_id_to_zone_id[tracker_id])
if self.tracker_id_to_zone_id[tracker_id] == 'out':#if current value is out then this detection has crossed zones
self.total_count += 1 #add one to the count as an 'out' has become an 'in'
self.tracker_id_to_zone_id[tracker_id] = 'in' # and update zone in dictionary to reflect this
else:
self.tracker_id_to_zone_id[tracker_id] = 'in' #this means tracker ID is new so add to the dictionary
for detection in detections_zone_out:
#print('Zone out detections ', detection)
if np.any(detection.tracker_id): #this tests if there are any tracker id's. If not the for loop below crashes
for tracker_id in detection.tracker_id:
if tracker_id in self.tracker_id_to_zone_id:
#print(self.tracker_id_to_zone_id[tracker_id])
if self.tracker_id_to_zone_id[tracker_id] == 'in':#if current value is in then this detection has crossed zones
self.total_count -= 1 #minus one to the count as an 'in' has become an 'out'
self.tracker_id_to_zone_id[tracker_id] = 'out' # and update zone in dictionary to reflect this
else:
self.tracker_id_to_zone_id[tracker_id] = 'out' #this means tracker ID is new so add to the dictionary
#Need new statement which filters the detections so it only shows those from within a zone - although not sure that matters for this use case as zones cover whole field of view
#detections.class_id = np.vectorize(lambda x: self.tracker_id_to_zone_id.get(x, -1))(detections.tracker_id)#i don't understand what this is doing so need to come back to it
return self.total_count
class VideoProcessor:
def __init__(self, source_weights_path: str, source_video_path: str, target_video_path: str = None,
confidence_threshold: float = 0.1, iou_threshold: float = 0.7,) -> None:
self.source_weights_path = source_weights_path
self.conf_threshold = confidence_threshold
self.iou_threshold = iou_threshold
self.source_video_path = source_video_path
self.target_video_path = target_video_path
self.model = YOLO(self.source_weights_path)
self.tracker = sv.ByteTrack()
self.box_annotator = sv.BoxAnnotator(color=COLORS)
self.trace_annotator = sv.TraceAnnotator(color=COLORS, position=sv.Position.CENTER, trace_length=100, thickness=2)
self.video_info = sv.VideoInfo.from_video_path(source_video_path)
self.video_info.fps = 25 # setting the frames per second for writing the video to 25 instead of 30 as original cameras are at 25fps
print(self.video_info)
self.zone_in = initiate_poylgon_zones(ZONE_IN_POLYGONS,self.video_info.resolution_wh,sv.Position.CENTER)
self.zone_out = initiate_poylgon_zones(ZONE_OUT_POLYGONS,self.video_info.resolution_wh,sv.Position.CENTER)
self.detections_manager = DetectionsManager()
def process_video(self):
frame_generator = sv.get_video_frames_generator(self.source_video_path)
if self.target_video_path:
with sv.VideoSink(self.target_video_path, self.video_info) as f:
for frame in tqdm(frame_generator, total=self.video_info.total_frames):
t1 = cv2.getTickCount()
processed_frame = self.process_frame(frame)
t2 = cv2.getTickCount()
ticks_taken = (t2 - t1) / cv2.getTickFrequency()
FPS = 1 / ticks_taken
cv2.putText(processed_frame, 'FPS: {0:.2f}'.format(FPS), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 2, cv2.LINE_AA)
f.write_frame(processed_frame)
else:
for frame in frame_generator:
t1 = cv2.getTickCount()
processed_frame = self.process_frame(frame)
t2 = cv2.getTickCount()
ticks_taken = (t2 - t1) / cv2.getTickFrequency()
FPS = 1 / ticks_taken
cv2.putText(processed_frame,'FPS: {0:.2f}'.format(FPS), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 2, cv2.LINE_AA)
cv2.imshow("Count of Customers Indoors", processed_frame)
if cv2.waitKey(1) & 0xFF ==ord("q"):
break
cv2.destroyAllWindows()
def process_frame(self,frame: np.ndarray)-> np.ndarray:
#consider resizing the frame tp 180x640 for both training and inference to see of this speeds things up
result = self.model(frame, verbose = False, conf=self.conf_threshold,iou=self.iou_threshold)[0]#add the device parameter to run this on the Mac's GPU which sognificantly speeds up inference
detections = sv.Detections.from_ultralytics(result)#pass the YOLO8 inference results through supervision to use their detections object which is easier to process
detections = detections[detections.class_id == 0]#filter the list of detections so it only shows category '0' which is people
detections = self.tracker.update_with_detections(detections)#pass the detections through the tracker to add tracker ID as additional field to detections object
#filter out detections not triggered within a zone and add the deteections to lists for zone in and zone out
detections_zone_in = []
detections_zone_out = []
for zone_in, zone_out in zip(self.zone_in,self.zone_out):
detection_zone_in = detections[zone_in.trigger(detections)]#this is an Supervision function to test if a detection occured within a zone
detections_zone_in.append(detection_zone_in)
detection_zone_out = detections[zone_out.trigger(detections)]#this is an Supervision function to test if a detection occured within a zone
detections_zone_out.append(detection_zone_out)
total_count = self.detections_manager.update(detections,detections_zone_in,detections_zone_out)#call to the detections manager class 'rules engine' for working out which zone a detection was triggered in
return self.annotate_frame(frame,detections,total_count)
def annotate_frame(self,frame: np.ndarray, detections: sv.Detections,total_count:int)-> np.ndarray:
annotated_frame = frame.copy()
for i,(zone_in,zone_out) in enumerate(zip(self.zone_in,self.zone_out)):#use enumerate so you get the index [i] automatically
annotated_frame = sv.draw_polygon(annotated_frame,zone_in.polygon,COLORS.colors[0])#draw zone in polygons
annotated_frame = sv.draw_polygon(annotated_frame,zone_out.polygon,COLORS.colors[1])#draw zone out polygons
if detections:#need to check some detections are found before adding annotations, otherwise list comprehension below breaks
labels = [f"#{tracker_id}" for tracker_id in detections.tracker_id]#list comprehension to return list of tracker_ID's to use in label
annotated_frame = self.box_annotator.annotate(annotated_frame,detections,skip_label=True)#add in labels = labels if want tracker ID annotated on frame
annotated_frame = self.trace_annotator.annotate(annotated_frame,detections)
annotated_frame = sv.draw_text(scene=annotated_frame, text="Count of People Currently In", text_anchor=sv.Point(x=1130, y=150), text_scale=0.6, text_thickness=1,background_color=COLORS.colors[0])
annotated_frame = sv.draw_text(scene=annotated_frame,text=str(total_count),text_anchor=sv.Point(x=1118, y=226),text_scale=2,text_thickness=5,background_color=COLORS.colors[0],text_padding=40)
return annotated_frame
processor = VideoProcessor(
source_weights_path='yolov8nPeopleCounterV2.pt',
source_video_path='/Users/tobieabel/Desktop/video_frames/Youtube/v3_a demo.mp4',
#target_video_path='/Users/tobieabel/Desktop/video_frames/Youtube/v3_b demo_annotated.mp4',
)
processor.process_video()
| tobieabel/demo-v3-People-Counter | Demo v3.py | Demo v3.py | py | 10,021 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "supervision.ColorPalette.default",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "supervision.ColorPalette",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name... |
1965038380 | # -*- coding: utf-8 -*-
import json
import requests
import os
import time
import log21
from kafka import KafkaConsumer
access_token = os.environ.get("ACCESS_TOKEN")
kafka_host = os.environ.get("KAFKA_HOST")
kafka_port = os.environ.get("KAFKA_PORT", "9092")
kafka_topic = os.environ.get("KAFKA_TOPIC")
def dingtalk_robot(text):
url = "https://oapi.dingtalk.com/robot/send?access_token=" + access_token
headers = {'Content-Type': 'application/json'}
data_dict = {
"msgtype": "markdown",
"markdown": {
"title": "日志告警",
"text": text
}
}
json_data = json.dumps(data_dict)
response = requests.post(url, data=json_data, headers=headers)
print(response.text) # {"errcode":0,"errmsg":"ok"}
def test_to_json(message):
data = json.loads(message, strict=False)
return data.get('text').get('content')
def kafka_to_dingtalk():
if kafka_port == '':
bootstrap_server = '{}:{}'.format(kafka_host,'9092')
else:
bootstrap_server = '{}:{}'.format(kafka_host, kafka_port)
consumer = KafkaConsumer(
kafka_topic,
bootstrap_servers=bootstrap_server,
auto_offset_reset='latest',
api_version=(0, 10, 2)
)
log21.print(type(consumer))
for msg in consumer:
dingtalk_massage = test_to_json(msg.value.decode())
time.sleep(4)
dingtalk_robot(dingtalk_massage)
if __name__ == '__main__':
if access_token == '':
log21.print(log21.get_color('#FF0000') + '未提供钉钉机器人ACCESS_TOKEN' )
if kafka_host == '':
log21.print(log21.get_color('#FF0000') + '未配置Kafka的环境变量KAFKA_HOST' )
if kafka_host == '':
log21.print(log21.get_color('#FF0000') + '未配置Kafka的环境变量KAFKA_TOPIC' )
kafka_to_dingtalk()
| zxzmcode/oTools | python/Alnot/Dingtalk/kafka_to_Dingtalk/dingtalk.py | dingtalk.py | py | 1,832 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_nu... |
6196779715 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2022/9/12 2:08 下午
# @Author : LiangJun
# @Filename : test_demo2.py
import unittest
from ddt import ddt, data
test_datas = [
{'id': 1, 'title': '测试用例1'},
{'id': 2, 'title': '测试用例2'},
{'id': 3, 'title': '测试用例3'}
]
@ddt
class TestDemo(unittest.TestCase):
@data(*test_datas)
def test_demo1(self, i):
print(i)
| lj5092/py14_Test_Open | py14_04day/dome/test_demo2.py | test_demo2.py | py | 427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "ddt.data",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ddt.ddt",
"line_number": 18,
"usage_type": "name"
}
] |
9003224390 | import json
from django.http import HttpResponse
__author__ = 'diraven'
class HttpResponseJson(HttpResponse):
def __init__(self, data=None, is_success=False, message=''):
response_data = {
'data': data,
'message': message,
'success': is_success
}
super(HttpResponseJson, self).__init__(json.dumps(response_data), content_type="application/json") | diraven/streamchats2 | base/classes/HttpResponseJson.py | HttpResponseJson.py | py | 411 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
}
] |
39463845510 |
import time
import picamera
import sqlite3
import signal
import os
import shutil
pidDB = sqlite3.connect('/home/pi/System/PID.db')
pidCursor = pidDB.cursor()
actualPID = os.getpid()
print("I'm PID " + str(actualPID))
pidCursor.execute("""UPDATE PID SET value = ? WHERE name = ?""", (actualPID, "camera"))
pidDB.commit()
"""Function to take timelapse"""
def CameraFootage(signum, stack):
print("Received:" + str(signum))
if signum == 10:
print("Beginning timelapse")
with picamera.PiCamera() as camera:
camera.start_preview()
camera.annotate_text = time.strftime('%Y-%m-%d %H:%M:%S')
time.sleep(1)
shutil.rmtree('/home/dev/www/public/media/')
os.mkdir('/home/dev/www/public/media')
i = 0
for filename in camera.capture_continuous('/home/dev/www/public/media/img{counter:03d}.jpg'):
if i < 20:
print("Captured %s" %filename)
time.sleep(1)
i = i +1
else:
i = 0
break
signal.signal(signal.SIGUSR1, CameraFootage)
while True:
time.sleep(3)
| jeremyalbrecht/Alarm-RPI | camera.py | camera.py | py | 1,001 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "picamera.PiCamera",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_... |
26986909966 | # -*- coding: utf-8 -*-
import pytest
from nameko.testing.utils import get_extension
from nameko.testing.waiting import wait_for_call
from nameko_grpc.client import Client
from nameko_grpc.entrypoint import GrpcServer
class TestCloseSocketOnClientExit:
@pytest.fixture(params=["server=nameko"])
def server_type(self, request):
return request.param[7:]
def test_close_socket(self, server, load_stubs, spec_dir, grpc_port, protobufs):
"""Regression test for https://github.com/nameko/nameko-grpc/issues/39"""
stubs = load_stubs("example")
client = Client(
"//localhost:{}".format(grpc_port),
stubs.exampleStub,
"none",
"high",
False,
)
proxy = client.start()
container = server
grpc_server = get_extension(container, GrpcServer)
connection_ref = grpc_server.channel.conn_pool.connections.queue[0]
connection = connection_ref()
response = proxy.unary_unary(protobufs.ExampleRequest(value="A"))
assert response.message == "A"
with wait_for_call(connection.sock, "close"):
client.stop()
| nameko/nameko-grpc | test/test_connection.py | test_connection.py | py | 1,178 | python | en | code | 57 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.client.Client",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nameko.testing.utils.get_extension",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
72946561467 | #! -*- coding=utf-8 -*-
import os
import sys
filepath = os.path.abspath(__file__)
sys.path.append(os.path.dirname(os.path.dirname(filepath)))
import threading
import time
from datetime import datetime
from multiprocessing import Process
from machines.machineVPN import MachineVPN
# from machines.machineWujiVPN import MachineVPN
from machines.machineXposeHook import MachineXHook as Machine008
from appium4droid import webdriver
from bootstrap import setup_boostrap
from TotalMachine import WorkMachine
from appium4droid.support.ui import WebDriverWait
from machines.StateMachine import Machine
import random
import requests
import re
class TotalMachine(WorkMachine):
def load_task_info(self):
return []
def setup_machine(self):
dr = self.driver
self.machine008 = Machine008(dr)
self.machine008.task_schedule = ["record_file", "clear_data", "modify_data_suiji"] # 007 task list
self.appname = "testsdk"
def main_loop(self):
dr = self.driver
m008 = self.machine008
while True:
try:
dr.press_keycode(3)
time.sleep(1)
dr.press_keycode(3)
time.sleep(1)
#清后台
# dr.press_keycode(82)
# time.sleep(1)
# WebDriverWait(dr, 10).until(lambda d: d.find_element_by_id("com.android.systemui:id/clearButton")).click()
# time.sleep(1)
MachineVPN(dr).run()
m008.run()
# dr.press_keycode(3)
# time.sleep(1)
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
# time.sleep(5)
# 开启加速
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name("GMD Speed Time")).click()
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_id("com.gmd.speedtime:id/buttonStart")).click()
# time.sleep(2)
dr.press_keycode(3)
time.sleep(1)
WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
time.sleep(15)
#记录ip
self.log_ip()
dr.press_keycode(3)
time.sleep(5)
WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name(self.appname)).click()
time.sleep(1)
#关闭加速
# dr.press_keycode(3)
# time.sleep(1)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_name("嘀嗒拼车")).click()
# time.sleep(5)
# WebDriverWait(dr, 30).until(lambda d: d.find_element_by_id("com.gmd.speedtime:id/buttonStop")).click()
# time.sleep(1)
# dr.press_keycode(3)
# time.sleep(1)
except Exception as e:
print("somting wrong")
print(e)
finally:
pass
print("Again\n")
return self.exit
def log_ip(self):
WEB_URL = 'http://ip.chinaz.com/getip.aspx'
r = requests.get(WEB_URL)
print(r.text)
match = re.search(r'ip:\'(.+)\'\,address:\'(.+)\'', r.text)
if match:
print(match.group(1))
print(match.group(2))
ip = match.group(1)
addr = match.group(2)
with open('/sdcard/1/ip.log', 'a') as f:
f.write('\n%s %s' % (ip, addr))
if __name__ == "__main__":
TM = TotalMachine()
TM.run() | cash2one/brush-1 | slave/scripts/test/testht.py | testht.py | py | 3,824 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number... |
31534303974 | ## LESSON 6 Q1: AUDITING - ITERATIVE PARSING/SAX PARSE using ITERPARSE
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Your task is to use the iterative parsing to process the map file and
find out not only what tags are there, but also how many, to get the
feeling on how much of which data you can expect to have in the map.
The output should be a dictionary with the tag name as the key
and number of times this tag can be encountered in the map as value.
Note that your code will be tested with a different data file than the 'example.osm'
"""
import xml.etree.ElementTree as ET
import pprint
def count_tags(filename):
# YOUR CODE HERE
tagdict = {}
for event, elem in ET.iterparse(filename):
try:
if elem.tag in tagdict:
tagdict[elem.tag] += 1
else:
tagdict[elem.tag] = 1
elem.clear()
except 'NoneType':
pass
return tagdict
def test():
tags = count_tags('examples.osm')
pprint.pprint(tags)
assert tags == {'bounds': 1,
'member': 3,
'nd': 4,
'node': 20,
'osm': 1,
'relation': 1,
'tag': 7,
'way': 1}
if __name__ == "__main__":
test()
| rjshanahan/Data_Wrangling_with_MongoDB | Lesson 1_Udacity_MongoDB_CSV+JSON.py | Lesson 1_Udacity_MongoDB_CSV+JSON.py | py | 1,349 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "xml.etree.ElementTree.iterparse",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pprint.pprint",
"line_number": 37,
"usage_type": "call"
}
] |
8411903253 | url = "http://dantri.com.vn/"
output_file_name = "news.xlsx"
#Step 1: Download information on the Dantri website
from urllib.request import urlopen
from bs4 import BeautifulSoup
#1.1: Open a connection
conn = urlopen(url)
#1.2: read
raw_data = conn.read() #byte
#1.3: Decode
html_content = raw_data.decode('utf-8')
# Faster way
# from urllib.request import urlopen
# html_content = urlopen(url).read().decode('utf-8')
# print(html_content)
# print(html_content)
#How to save html_content as a file (in case internet is weak)
# html_file = open("dantri.html","wb") #write: byte
# html_file.write(raw_data)
# html_file.close()
#Step 2: Extract ROI (Region of interest)
#Create a soup
soup = BeautifulSoup(html_content, "html.parser")
# print(soup.prettify)
ul = soup.find("ul", "ul1 ulnew")
# find chi dung cho tim 1 cai
# print(ul.prettify())
li_list = ul.find_all("li")
#find_all dung cho tim tat ca
# for li in li_list:
# print(li)
# print("***" * 10)
#Step 3: Extract News
news_list = []
for li in li_list:
# li = li_list[0]
# h4 = li.h4 #h4 = li.find("h4")
# a = h4.a
#better way:
# a = li.h4.a (or li.a)
a = li.h4.a
href = url + a["href"]
title = a.string
news = {
"title": title,
"link": href
}
news_list.append(news)
print(news_list)
| taanh99ams/taanh-lab-c4e15 | Lab 2/dan_tri_extract.py | dan_tri_extract.py | py | 1,322 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 33,
"usage_type": "call"
}
] |
18523322737 | import xlrd
import xlwt
from featureComp import *
from createmat import *
def findRank(path2):
for i in range(1,39):
path3=path2+str(i)+'.xlsx'
matchday=xlrd.open_workbook(path3)
sheet1=matchday.sheet_by_index(0)
#print path3,'\n'
for j in range(1,21):
team_rank[sheet1.cell(j,2).value.strip()].append(sheet1.cell(j,0).value)
def resetMatches(matches_played):
for k in matches_played.keys():
matches_played[k]=0
teams={}
teamprofile={}
matches_played={}
team_rank={}
train_book=xlwt.Workbook()
sheet1=train_book.add_sheet("sheet 1")
book = xlrd.open_workbook("Season_table.xlsx")
first_sheet = book.sheet_by_index(0)
form_table=([0.75,0.15,20],[0.6,0.25,16],[0.4,0.4,12],[0.15,0.6,10])
for i in range(1,37):
teams[first_sheet.cell(i,0).value.strip()]=[]
teamprofile[first_sheet.cell(i,0).value.strip()]=[]
matches_played[first_sheet.cell(i, 0).value.strip()]=0
team_rank[first_sheet.cell(i,0).value.strip()]=[]
num=2005
match=1
featureobj=Feature()
for j in range(10):
path='Fixtures/'+str(num)+'.xlsx'
path2='Match Days/'+str(num)+'/Match'
fbook=xlrd.open_workbook(path)
first_sheet = fbook.sheet_by_index(0)
findRank(path2)
AQDQmat(first_sheet,teams)
FORMmat(first_sheet, team_rank, teams, matches_played, form_table)
resetMatches(matches_played)
featureobj.featureCompute(first_sheet,sheet1,teams,matches_played,teamprofile)
num+=1
train_book.save("training.xls")
rtrain_book=xlrd.open_workbook('training.xlsx')
svmdatasheet=rtrain_book.sheet_by_index(0)
with open('svmdataformat', 'w') as f:
featureobj.SVMformat(svmdatasheet,f)
f.closed
'''
for k,v in teams.iteritems():
print k
print '------------------'
print v
'''
teamslist=[]
for i in range(1,37):
for j in (9,10):
if int(book.sheet_by_index(0).cell(i,j).value)==1:
teamslist.append(book.sheet_by_index(0).cell(i,0).value.strip())
for names in teamslist:
train_book=xlwt.Workbook()
sheet1=train_book.add_sheet("sheet 1")
for i in range(len(teamprofile[names])):
for j in range(4):
sheet1.row(i).write(j,teamprofile[names][i][j])
train_book.save(str(names)+".xlsx")
| kushg18/football-match-winner-prediction | main.py | main.py | py | 2,107 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workboo... |
75126804667 | from mock import Mock
import pytest
import json
from datetime import date, datetime
from src.smallquery.functions.run_query import app
@pytest.fixture()
def run_query_event():
"""
Generates Run Query event
"""
return {
'query': 'select * from unit/test.parquet',
'limit': 10,
}
def test_results_serializer():
a_date = date(year=2020, month=11, day=10)
a_date_time = datetime(year=2021, month=6, day=24,
hour=13, minute=3, second=12, microsecond=2323)
a_str = 'ksjdf'
a_int = 78
data = {
'some_date': a_date,
'some_date_time': a_date_time,
'some_string': a_str,
'some_int': a_int,
}
expected_json = '{"some_date": "2020-11-10", "some_date_time": "2021-06-24T13:03:12.002323", "some_string": "ksjdf", "some_int": 78}'
actual_json = json.dumps(data, default=app.results_serializer)
assert expected_json == actual_json
def test_handler(run_query_event):
ensure_db_connected_mock = Mock()
run_query_mock = Mock()
app.ensure_db_connected = ensure_db_connected_mock
app.run_query = run_query_mock
app.lambda_handler(run_query_event, None)
assert ensure_db_connected_mock.call_count == 1
assert run_query_mock.call_count == 1
| nxn128/serverless-query | test/test_run_query.py | test_run_query.py | py | 1,290 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line... |
36618145736 | #!/usr/bin/env python3
import copy
import json
import logging
import os
import psutil
import shutil
import sys
import tempfile
from datetime import datetime
# import pysqlite3
from joblib import Parallel, delayed, parallel_backend
from tabulate import tabulate
from . import utils
from .config import Config
class PipelineWise(object):
"""..."""
def __init_logger(self, logger_name, log_file=None, level=logging.INFO):
self.logger = logging.getLogger(logger_name)
# Default log level is less verbose
level = logging.INFO
# Increase log level if debug mode needed
if self.args.debug:
level = logging.DEBUG
# Set the log level
self.logger.setLevel(level)
# Set log formatter and add file and line number in case of DEBUG level
if level == logging.DEBUG:
str_format = (
"%(asctime)s %(processName)s %(levelname)s %(filename)s (%(lineno)s): %(message)s"
)
else:
str_format = "%(asctime)s %(levelname)s: %(message)s"
formatter = logging.Formatter(str_format, "%Y-%m-%d %H:%M:%S")
# Create console handler
fh = logging.StreamHandler(sys.stdout)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# Create log file handler if required
if log_file and log_file != "*":
fh = logging.FileHandler(log_file)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def __init__(self, args, config_dir, venv_dir):
self.args = args
self.__init_logger("Pipelinewise CLI", log_file=args.log)
self.config_dir = config_dir
self.venv_dir = venv_dir
self.pipelinewise_bin = os.path.join(self.venv_dir, "cli", "bin", "pipelinewise")
self.config_path = os.path.join(self.config_dir, "config.json")
self.load_config()
if args.tap != "*":
self.tap = self.get_tap(args.target, args.tap)
self.tap_bin = self.get_connector_bin(self.tap["type"])
if args.target != "*":
self.target = self.get_target(args.target)
self.target_bin = self.get_connector_bin(self.target["type"])
self.tranform_field_bin = self.get_connector_bin("transform-field")
def create_consumable_target_config(self, target_config, tap_inheritable_config):
try:
dictA = utils.load_json(target_config)
dictB = utils.load_json(tap_inheritable_config)
# Copy everything from dictB into dictA - Not a real merge
dictA.update(dictB)
# Save the new dict as JSON into a temp file
tempfile_path = tempfile.mkstemp()[1]
utils.save_json(dictA, tempfile_path)
return tempfile_path
except Exception as exc:
raise Exception("Cannot merge JSON files {} {} - {}".format(dictA, dictB, exc))
def create_filtered_tap_properties(
self, target_type, tap_type, tap_properties, tap_state, filters, create_fallback=False
):
"""
Create a filtered version of tap properties file based on specific filter conditions.
Return values:
1) A temporary JSON file where only those tables are selected to
sync which meet the filter criterias
2) List of tap_stream_ids where filter criterias matched
3) OPTIONAL when create_fallback is True:
Temporary JSON file with table that don't meet the
filter criterias
4) OPTIONAL when create_fallback is True:
List of tap_stream_ids where filter criteries don't match
"""
# Get filer conditions with default values from input dictionary
# Nothing selected by default
f_selected = filters.get("selected", None)
f_target_type = filters.get("target_type", None)
f_tap_type = filters.get("tap_type", None)
f_replication_method = filters.get("replication_method", None)
f_initial_sync_required = filters.get("initial_sync_required", None)
# Lists of tables that meet and don't meet the filter criterias
filtered_tap_stream_ids = []
fallback_filtered_tap_stream_ids = []
self.logger.debug("Filtering properties JSON by conditions: {}".format(filters))
try:
# Load JSON files
properties = utils.load_json(tap_properties)
state = utils.load_json(tap_state)
# Create a dictionary for tables that don't meet filter criterias
fallback_properties = copy.deepcopy(properties) if create_fallback else None
# Foreach every stream (table) in the original properties
self.logger.info(tap_properties)
for stream_idx, stream in enumerate(properties.get("streams", tap_properties)):
selected = False
replication_method = None
initial_sync_required = False
# Collect required properties from the properties file
tap_stream_id = stream.get("tap_stream_id")
table_name = stream.get("table_name")
metadata = stream.get("metadata", [])
# Collect further properties from the properties file under the metadata key
table_meta = {}
for meta_idx, meta in enumerate(metadata):
if type(meta) == dict and len(meta.get("breadcrumb", [])) == 0:
table_meta = meta.get("metadata")
break
# table_meta = next((i for i in metadata if type(i) == dict and len(i.get("breadcrumb", [])) == 0), {}).get("metadata")
selected = table_meta.get("selected")
replication_method = table_meta.get("replication-method")
# Detect if initial sync is required. Look into the state file, get the bookmark
# for the current stream (table) and if valid bookmark doesn't exist then
# initial sync is required
bookmarks = state.get("bookmarks", {}) if type(state) == dict else {}
stream_bookmark = bookmarks.get(tap_stream_id, {})
if (
# Initial sync is required for INCREMENTAL and LOG_BASED tables
# where the state file has no valid bookmark.
#
# Valid bookmark keys:
# 'replication_key_value' key created for INCREMENTAL tables
# 'log_pos' key created by MySQL LOG_BASED tables
# 'lsn' key created by PostgreSQL LOG_BASED tables
#
# FULL_TABLE replication method is taken as initial sync required
replication_method == "FULL_TABLE"
or (
(replication_method in ["INCREMENTAL", "LOG_BASED"])
and (
not (
"replication_key_value" in stream_bookmark
or "log_pos" in stream_bookmark
or "lsn" in stream_bookmark
)
)
)
):
initial_sync_required = True
# Compare actual values to the filter conditions.
# Set the "selected" key to True if actual values meet the filter criterias
# Set the "selected" key to False if the actual values don't meet the filter criterias
if (
(f_selected == None or selected == f_selected)
and (f_target_type == None or target_type in f_target_type)
and (f_tap_type == None or tap_type in f_tap_type)
and (f_replication_method == None or replication_method in f_replication_method)
and (
f_initial_sync_required == None
or initial_sync_required == f_initial_sync_required
)
):
self.logger.debug(
"""Filter condition(s) matched:
Table : {}
Tap Stream ID : {}
Selected : {}
Replication Method : {}
Init Sync Required : {}
""".format(
table_name,
tap_stream_id,
selected,
replication_method,
initial_sync_required,
)
)
# Filter condition matched: mark table as selected to sync
properties["streams"][stream_idx]["metadata"][meta_idx]["metadata"][
"selected"
] = True
filtered_tap_stream_ids.append(tap_stream_id)
# Filter ocndition matched: mark table as not selected to sync in the fallback properties
if create_fallback:
fallback_properties["streams"][stream_idx]["metadata"][meta_idx][
"metadata"
]["selected"] = False
else:
# Filter condition didn't match: mark table as not selected to sync
properties["streams"][stream_idx]["metadata"][meta_idx]["metadata"][
"selected"
] = False
# Filter condition didn't match: mark table as selected to sync in the fallback properties
# Fallback only if the table is selected in the original properties
if create_fallback and selected == True:
fallback_properties["streams"][stream_idx]["metadata"][meta_idx][
"metadata"
]["selected"] = True
fallback_filtered_tap_stream_ids.append(tap_stream_id)
# Save the generated properties file(s) and return
# Fallback required: Save filtered and fallback properties JSON
if create_fallback:
# Save to files: filtered and fallback properties
temp_properties_path = tempfile.mkstemp()[1]
utils.save_json(properties, temp_properties_path)
temp_fallback_properties_path = tempfile.mkstemp()[1]
utils.save_json(fallback_properties, temp_fallback_properties_path)
return (
temp_properties_path,
filtered_tap_stream_ids,
temp_fallback_properties_path,
fallback_filtered_tap_stream_ids,
)
# Fallback not required: Save only the filtered properties JSON
else:
# Save eed to save
temp_properties_path = tempfile.mkstemp()[1]
utils.save_json(properties, temp_properties_path)
return temp_properties_path, filtered_tap_stream_ids
except Exception as exc:
raise Exception("Cannot create JSON file - {}".format(exc))
def load_config(self):
self.logger.debug("Loading config at {}".format(self.config_path))
config = utils.load_json(self.config_path)
if config:
self.config = config
else:
self.config = {}
def get_tap_dir(self, target_id, tap_id):
return os.path.join(self.config_dir, target_id, tap_id)
def get_tap_log_dir(self, target_id, tap_id):
return os.path.join(self.get_tap_dir(target_id, tap_id), "log")
def get_target_dir(self, target_id):
return os.path.join(self.config_dir, target_id)
def get_connector_bin(self, connector_type):
return os.path.join(self.venv_dir, connector_type, "bin", connector_type)
def get_connector_files(self, connector_dir):
return {
"config": os.path.join(connector_dir, "config.json"),
"inheritable_config": os.path.join(connector_dir, "inheritable_config.json"),
"properties": os.path.join(connector_dir, "properties.json"),
"state": os.path.join(connector_dir, "state.json"),
"transformation": os.path.join(connector_dir, "transformation.json"),
"selection": os.path.join(connector_dir, "selection.json"),
}
def get_targets(self):
self.logger.debug("Getting targets from {}".format(self.config_path))
self.load_config()
try:
targets = self.config.get("targets", [])
except Exception as exc:
raise Exception("Targets not defined")
return targets
def get_target(self, target_id):
self.logger.debug("Getting {} target".format(target_id))
targets = self.get_targets()
target = False
target = next((item for item in targets if item["id"] == target_id), False)
if target == False:
raise Exception("Cannot find {} target".format(target_id))
target_dir = self.get_target_dir(target_id)
if os.path.isdir(target_dir):
target["files"] = self.get_connector_files(target_dir)
else:
raise Exception("Cannot find target at {}".format(target_dir))
return target
def get_taps(self, target_id):
self.logger.debug("Getting taps from {} target".format(target_id))
target = self.get_target(target_id)
try:
taps = target["taps"]
# Add tap status
for tap_idx, tap in enumerate(taps):
taps[tap_idx]["status"] = self.detect_tap_status(target_id, tap["id"])
except Exception as exc:
raise Exception("No taps defined for {} target".format(target_id))
return taps
def get_tap(self, target_id, tap_id):
self.logger.debug("Getting {} tap from target {}".format(tap_id, target_id))
taps = self.get_taps(target_id)
tap = False
tap = next((item for item in taps if item["id"] == tap_id), False)
if tap == False:
raise Exception("Cannot find {} tap in {} target".format(tap_id, target_id))
tap_dir = self.get_tap_dir(target_id, tap_id)
if os.path.isdir(tap_dir):
tap["files"] = self.get_connector_files(tap_dir)
else:
raise Exception("Cannot find tap at {}".format(tap_dir))
# Add target and status details
tap["target"] = self.get_target(target_id)
tap["status"] = self.detect_tap_status(target_id, tap_id)
return tap
def merge_schemas(self, old_schema, new_schema):
schema_with_diff = new_schema
if not old_schema:
schema_with_diff = new_schema
else:
new_streams = new_schema["streams"]
old_streams = old_schema["streams"]
for new_stream_idx, new_stream in enumerate(new_streams):
new_tap_stream_id = new_stream["tap_stream_id"]
old_stream = False
old_stream = next(
(item for item in old_streams if item["tap_stream_id"] == new_tap_stream_id),
False,
)
# Is this a new stream?
if not old_stream:
new_schema["streams"][new_stream_idx]["is-new"] = True
# Copy stream selection from the old properties
else:
# Find table specific metadata entries in the old and new streams
new_stream_table_mdata_idx = 0
old_stream_table_mdata_idx = 0
try:
new_stream_table_mdata_idx = [
i
for i, md in enumerate(new_stream["metadata"])
if md["breadcrumb"] == []
][0]
old_stream_table_mdata_idx = [
i
for i, md in enumerate(old_stream["metadata"])
if md["breadcrumb"] == []
][0]
except Exception:
False
# Copy is-new flag from the old stream
try:
new_schema["streams"][new_stream_idx]["is-new"] = old_stream["is-new"]
except Exception:
False
# Copy selected from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["selected"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"selected"
]
except Exception:
False
# Copy replication method from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["replication-method"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"replication-method"
]
except Exception:
False
# Copy replication key from the old stream
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_stream_table_mdata_idx
]["metadata"]["replication-key"] = old_stream["metadata"][
old_stream_table_mdata_idx
][
"metadata"
][
"replication-key"
]
except Exception:
False
# Is this new or modified field?
new_fields = new_schema["streams"][new_stream_idx]["schema"]["properties"]
old_fields = old_stream["schema"]["properties"]
for new_field_key in new_fields:
new_field = new_fields[new_field_key]
new_field_mdata_idx = -1
# Find new field metadata index
for i, mdata in enumerate(
new_schema["streams"][new_stream_idx]["metadata"]
):
if (
len(mdata["breadcrumb"]) == 2
and mdata["breadcrumb"][0] == "properties"
and mdata["breadcrumb"][1] == new_field_key
):
new_field_mdata_idx = i
# Field exists
if new_field_key in old_fields.keys():
old_field = old_fields[new_field_key]
old_field_mdata_idx = -1
# Find old field metadata index
for i, mdata in enumerate(old_stream["metadata"]):
if (
len(mdata["breadcrumb"]) == 2
and mdata["breadcrumb"][0] == "properties"
and mdata["breadcrumb"][1] == new_field_key
):
old_field_mdata_idx = i
new_mdata = new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]
old_mdata = old_stream["metadata"][old_field_mdata_idx]["metadata"]
# Copy is-new flag from the old properties
try:
new_mdata["is-new"] = old_mdata["is-new"]
except Exception:
False
# Copy is-modified flag from the old properties
try:
new_mdata["is-modified"] = old_mdata["is-modified"]
except Exception:
False
# Copy field selection from the old properties
try:
new_mdata["selected"] = old_mdata["selected"]
except Exception:
False
# Field exists and type is the same - Do nothing more in the schema
if new_field == old_field:
self.logger.debug(
"Field exists in {} stream with the same type: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
# Field exists but types are different - Mark the field as modified in the metadata
else:
self.logger.debug(
"Field exists in {} stream but types are different: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-modified"] = True
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-new"] = False
except Exception:
False
# New field - Mark the field as new in the metadata
else:
self.logger.debug(
"New field in stream {}: {} : {}".format(
new_tap_stream_id, new_field_key, new_field
)
)
try:
new_schema["streams"][new_stream_idx]["metadata"][
new_field_mdata_idx
]["metadata"]["is-new"] = True
except Exception:
False
schema_with_diff = new_schema
return schema_with_diff
def make_default_selection(self, schema, selection_file):
if os.path.isfile(selection_file):
self.logger.info("Loading pre defined selection from {}".format(selection_file))
tap_selection = utils.load_json(selection_file)
selection = tap_selection["selection"]
not_selected = []
streams = schema["streams"]
for stream_idx, stream in enumerate(streams):
tap_stream_id = stream.get("tap_stream_id")
tap_stream_sel = False
for sel in selection:
if "tap_stream_id" in sel and tap_stream_id == sel["tap_stream_id"]:
tap_stream_sel = sel
# Find table specific metadata entries in the old and new streams
try:
stream_table_mdata_idx = [
i for i, md in enumerate(stream["metadata"]) if md["breadcrumb"] == []
][0]
except Exception:
False
if tap_stream_sel:
self.logger.info(
"Mark {} tap_stream_id as selected with properties {}".format(
tap_stream_id, tap_stream_sel
)
)
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx]["metadata"][
"selected"
] = True
if "replication_method" in tap_stream_sel:
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx][
"metadata"
]["replication-method"] = tap_stream_sel["replication_method"]
if "replication_key" in tap_stream_sel:
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx][
"metadata"
]["replication-key"] = tap_stream_sel["replication_key"]
else:
# self.logger.info("Mark {} tap_stream_id as not selected".format(tap_stream_id))
not_selected.append(tap_stream_id)
schema["streams"][stream_idx]["metadata"][stream_table_mdata_idx]["metadata"][
"selected"
] = False
if not_selected:
self.logger.info("The following were not selected: {}".format(", ".join(not_selected)))
return schema
def init(self):
self.logger.info("Initialising new project {}...".format(self.args.name))
project_dir = os.path.join(os.getcwd(), self.args.name)
# Create project dir if not exists
if os.path.exists(project_dir):
self.logger.error(
"Directory exists and cannot create new project: {}".format(self.args.name)
)
sys.exit(1)
else:
os.mkdir(project_dir)
for yaml in sorted(utils.get_sample_file_paths()):
yaml_basename = os.path.basename(yaml)
dst = os.path.join(project_dir, yaml_basename)
self.logger.info(" - Creating {}...".format(yaml_basename))
shutil.copyfile(yaml, dst)
def test_tap_connection(self):
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
self.logger.info(
"Testing {} ({}) tap connection in {} ({}) target".format(
tap_id, tap_type, target_id, target_type
)
)
# Generate and run the command to run the tap directly
# We will use the discover option to test connection
tap_config = self.tap["files"]["config"]
command = "{} --config {} --discover".format(self.tap_bin, tap_config)
result = utils.run_command(command)
# Get output and errors from tap
rc, new_schema, tap_output = result
if rc != 0:
self.logger.error("Testing tap connection ({} - {}) FAILED".format(target_id, tap_id))
sys.exit(1)
# If the connection success then the response needs to be a valid JSON string
if not utils.is_json(new_schema):
self.logger.error(
"Schema discovered by {} ({}) is not a valid JSON.".format(tap_id, tap_type)
)
sys.exit(1)
else:
self.logger.info("Testing tap connection ({} - {}) PASSED".format(target_id, tap_id))
def discover_tap(self, tap=None, target=None):
# Define tap props
if tap is None:
tap_id = self.tap.get("id")
tap_type = self.tap.get("type")
tap_config_file = self.tap.get("files", {}).get("config")
tap_properties_file = self.tap.get("files", {}).get("properties")
tap_selection_file = self.tap.get("files", {}).get("selection")
tap_bin = self.tap_bin
else:
tap_id = tap.get("id")
tap_type = tap.get("type")
tap_config_file = tap.get("files", {}).get("config")
tap_properties_file = tap.get("files", {}).get("properties")
tap_selection_file = tap.get("files", {}).get("selection")
tap_bin = self.get_connector_bin(tap_type)
# Define target props
if target is None:
target_id = self.target.get("id")
target_type = self.target.get("type")
else:
target_id = target.get("id")
target_type = target.get("type")
self.logger.info(
"Discovering {} ({}) tap in {} ({}) target...".format(
tap_id, tap_type, target_id, target_type
)
)
# Generate and run the command to run the tap directly
command = "{} --config {} --discover".format(tap_bin, tap_config_file)
result = utils.run_command(command)
# Get output and errors from tap
rc, new_schema, output = result
if rc != 0:
return "{} - {}".format(target_id, tap_id)
# Convert JSON string to object
try:
new_schema = json.loads(new_schema)
except Exception as exc:
return "Schema discovered by {} ({}) is not a valid JSON.".format(tap_id, tap_type)
# Merge the old and new schemas and diff changes
old_schema = utils.load_json(tap_properties_file)
if old_schema:
schema_with_diff = self.merge_schemas(old_schema, new_schema)
else:
schema_with_diff = new_schema
# Make selection from selectection.json if exists
try:
schema_with_diff = self.make_default_selection(schema_with_diff, tap_selection_file)
schema_with_diff = utils.delete_keys_from_dict(
self.make_default_selection(schema_with_diff, tap_selection_file),
# Removing multipleOf json schema validations from properties.json,
# that's causing run time issues
["multipleOf"],
)
except Exception as exc:
return "Cannot load selection JSON at {}. {}".format(tap_selection_file, str(exc))
# Save the new catalog into the tap
try:
self.logger.info(
"Writing new properties file with changes into {}".format(tap_properties_file)
)
utils.save_json(schema_with_diff, tap_properties_file)
except Exception as exc:
return "Cannot save file. {}".format(str(exc))
def detect_tap_status(self, target_id, tap_id, set_pid=False):
self.logger.debug("Detecting {} tap status in {} target".format(tap_id, target_id))
tap_dir = self.get_tap_dir(target_id, tap_id)
log_dir = self.get_tap_log_dir(target_id, tap_id)
connector_files = self.get_connector_files(tap_dir)
current_pid = os.getpid()
pid_path = os.path.join(tap_dir, "pid")
status = {
"currentStatus": "unknown",
"lastStatus": "unknown",
"lastTimestamp": None,
"pid": current_pid,
}
if os.path.exists(pid_path):
try:
executed_pid = int(open(pid_path, "r").readlines()[0])
if executed_pid in psutil.pids():
status["currentStatus"] = "running"
return status
except:
pass
if set_pid:
if os.path.exists(pid_path):
os.remove(pid_path)
open(pid_path, "w").write(str(current_pid))
# Tap exists but configuration not completed
if not os.path.isfile(connector_files["config"]):
status["currentStatus"] = "not-configured"
# Configured and not running
else:
status["currentStatus"] = "ready"
# Get last run instance
if os.path.isdir(log_dir):
log_files = utils.search_files(
log_dir, patterns=["*.log.success", "*.log.failed"], sort=True
)
if len(log_files) > 0:
last_log_file = log_files[0]
log_attr = utils.extract_log_attributes(last_log_file)
status["lastStatus"] = log_attr["status"]
status["lastTimestamp"] = log_attr["timestamp"]
return status
def status(self):
targets = self.get_targets()
tab_headers = [
"Tap ID",
"Tap Type",
"Target ID",
"Target Type",
"Enabled",
"Status",
"Last Sync",
"Last Sync Result",
]
successful_taps = []
unsuccessful_taps = []
unknown_taps = []
for target in targets:
taps = self.get_taps(target["id"])
for tap in taps:
current_status = tap.get("status", {}).get("lastStatus", "<Unknown>")
tap_status = [
tap.get("id", "<Unknown>"),
tap.get("type", "<Unknown>"),
target.get("id", "<Unknown>"),
target.get("type", "<Unknown>"),
tap.get("enabled", "<Unknown>"),
tap.get("status", {}).get("currentStatus", "<Unknown>"),
tap.get("status", {}).get("lastTimestamp", "<Unknown>"),
tap.get("status", {}).get("lastStatus", "<Unknown>"),
]
if current_status == "success":
successful_taps.append(tap_status)
elif current_status == "failed":
unsuccessful_taps.append(tap_status)
else:
unknown_taps.append(tap_status)
if successful_taps:
print(f"{len(successful_taps)} currently succeeding\n")
print(
tabulate(
sorted(successful_taps, key=lambda x: x[0]),
headers=tab_headers,
tablefmt="simple",
)
)
print("\n")
if unsuccessful_taps:
print(f"{len(unsuccessful_taps)} currently failing\n")
print(
tabulate(
sorted(unsuccessful_taps, key=lambda x: x[0]),
headers=tab_headers,
tablefmt="simple",
)
)
print("\n")
if unknown_taps:
print(f"{len(unknown_taps)} currently in an unknown state\n")
print(
tabulate(
sorted(unknown_taps, key=lambda x: x[0]), headers=tab_headers, tablefmt="simple"
)
)
def reset_tap(self):
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
log_dir = self.get_tap_log_dir(target_id, tap_id)
self.logger.info("Resetting {} tap in {} target".format(tap_id, target_id))
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if not running
tap_status = self.detect_tap_status(target_id, tap_id)
if tap_status["currentStatus"] != "running":
self.logger.info("Tap is not currently running, nothing to reset")
sys.exit(0)
os.remove(utils.search_files(log_dir, patterns=["*.log.running"])[0])
self.logger.info("Tap log successfully removed")
def clean_logs(self, to_keep=2):
"""
Removes all but the most recent logs, cleaning space but preserving last run success/failure
"""
targets = self.get_targets()
for target in targets:
taps = self.get_taps(target["id"])
for tap in taps:
self.logger.info("Cleaning {}".format(tap["id"]))
log_dir = self.get_tap_log_dir(target["id"], tap["id"])
log_files = utils.search_files(
log_dir, patterns=["*.log.success", "*.log.failed"], sort=True
)
if len(log_files) < to_keep:
self.logger.info("No logs to clean")
for file in log_files[to_keep:]:
os.remove(os.path.join(log_dir, file))
self.logger.info("{} files removed".format(len(log_files[1:])))
def run_tap_singer(
self,
tap_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
target_config,
log_file,
):
"""
Generating and running piped shell command to sync tables using singer taps and targets
"""
new_tap_state = tempfile.mkstemp()[1]
# Following the singer spec the catalog JSON file needs to be passed by the --catalog argument
# However some tap (i.e. tap-mysql and tap-postgres) requires it as --properties
# This is problably for historical reasons and need to clarify on Singer slack channels
tap_catalog_argument = utils.get_tap_property_by_tap_type(tap_type, "tap_catalog_argument")
# Add state arugment if exists to extract data incrementally
if not os.path.isfile(tap_state):
open(tap_state, "w").write("{}")
tap_state_arg = "--state {}".format(tap_state)
# Remove the state and rewrite the config if necessary
if self.args.start_date:
self.original_start = None
config = json.load(open(tap_config))
if "start_date" in config.keys():
self.original_start = config["start_date"]
config["start_date"] = datetime.strptime(self.args.start_date, "%Y-%m-%d").strftime(
"%Y-%m-%dT00:00:00Z"
)
open(tap_config, "w").write(json.dumps(config))
os.remove(tap_state)
open(tap_state, "w").write("{}")
else:
self.logger.warning(
"Tried to start from {} but this tap doesn't use start date".format(
self.args.start_date
)
)
# Detect if transformation is needed
has_transformation = False
if os.path.isfile(tap_transformation):
tr = utils.load_json(tap_transformation)
if "transformations" in tr and len(tr["transformations"]) > 0:
has_transformation = True
# Run without transformation in the middle
if not has_transformation:
command = " ".join(
(
" {} --config {} {} {} {}".format(
self.tap_bin,
tap_config,
tap_catalog_argument,
tap_properties,
tap_state_arg,
),
"| {} --config {}".format(self.target_bin, target_config),
"> {}".format(new_tap_state),
)
)
self.logger.info(command)
# Run with transformation in the middle
else:
command = " ".join(
(
" {} --config {} {} {} {}".format(
self.tap_bin,
tap_config,
tap_catalog_argument,
tap_properties,
tap_state_arg,
),
"| {} --config {}".format(self.tranform_field_bin, tap_transformation),
"| {} --config {}".format(self.target_bin, target_config),
"> {}".format(new_tap_state),
)
)
# Do not run if another instance is already running
log_dir = os.path.dirname(log_file)
# Run command
result = utils.run_command(command, log_file)
# Save the new state file if created correctly
if utils.is_json_file(new_tap_state):
self.logger.info("Writing new state file")
self.logger.info(open(new_tap_state, "r").readlines())
shutil.copyfile(new_tap_state, tap_state)
os.remove(new_tap_state)
else:
self.logger.warning("Not a valid state record")
# Reset the config back
if self.args.start_date:
if self.original_start:
config["start_date"] = self.original_start
os.remove(tap_config)
open(tap_config, "w").write(json.dumps(config))
def run_tap_fastsync(
self,
tap_type,
target_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
target_config,
log_file,
):
"""
Generating and running shell command to sync tables using the native fastsync components
"""
fastsync_bin = utils.get_fastsync_bin(self.venv_dir, tap_type, target_type)
# Add state arugment if exists to extract data incrementally
tap_transform_arg = ""
if os.path.isfile(tap_transformation):
tap_transform_arg = "--transform {}".format(tap_transformation)
command = " ".join(
(
" {} ".format(fastsync_bin),
"--tap {}".format(tap_config),
"--properties {}".format(tap_properties),
"--state {}".format(tap_state),
"--target {}".format(target_config),
"{}".format(tap_transform_arg),
"{}".format("--tables {}".format(self.args.tables) if self.args.tables else ""),
)
)
# Do not run if another instance is already running
log_dir = os.path.dirname(log_file)
# Run command
result = utils.run_command(command, log_file)
def run_tap(self):
"""
Generating command(s) to run tap to sync data from source to target
The generated commands can use one or multiple commands of:
1. Fastsync:
Native and optimised component to sync table from a
specific type of tap into a specific type of target.
This command will be used automatically when FULL_TABLE
replication method selected or when initial sync is required.
2. Singer Taps and Targets:
Dynamic components following the singer specification to
sync tables from multiple sources to multiple targets.
This command will be used automatically when INCREMENTAL
and LOG_BASED replication method selected. FULL_TABLE
replication are not using the singer components because
they are too slow to sync large tables.
"""
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
self.logger.info("Running {} tap in {} target".format(tap_id, target_id))
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if not running
tap_status = self.detect_tap_status(target_id, tap_id, set_pid=True)
self.logger.info(tap_status)
if tap_status["currentStatus"] == "running":
self.logger.info(
"Tap {} is currently running. Do nothing and exit normally.".format(
self.tap["name"]
)
)
sys.exit(0)
# Generate and run the command to run the tap directly
tap_config = self.tap["files"]["config"]
tap_inheritable_config = self.tap["files"]["inheritable_config"]
tap_properties = self.tap["files"]["properties"]
tap_state = self.tap["files"]["state"]
tap_transformation = self.tap["files"]["transformation"]
target_config = self.target["files"]["config"]
# Some target attributes can be passed and override by tap (aka. inheritable config)
# We merge the two configs and use that with the target
cons_target_config = self.create_consumable_target_config(
target_config, tap_inheritable_config
)
# Output will be redirected into target and tap specific log directory
log_dir = self.get_tap_log_dir(target_id, tap_id)
current_time = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
# Create fastsync and singer specific filtered tap properties that contains only
# the the tables that needs to be synced by the specific command
(
tap_properties_fastsync,
fastsync_stream_ids,
tap_properties_singer,
singer_stream_ids,
) = self.create_filtered_tap_properties(
target_type,
tap_type,
tap_properties,
tap_state,
{
"selected": True,
"target_type": ["target-snowflake", "target-redshift"],
"tap_type": ["tap-mysql", "tap-postgres"],
"initial_sync_required": True,
},
create_fallback=True,
)
log_file_fastsync = os.path.join(
log_dir, "{}-{}-{}.fastsync.log".format(target_id, tap_id, current_time)
)
log_file_singer = os.path.join(
log_dir, "{}-{}-{}.singer.log".format(target_id, tap_id, current_time)
)
try:
# Run fastsync for FULL_TABLE replication method
if len(fastsync_stream_ids) > 0:
self.logger.info(
"Table(s) selected to sync by fastsync: {}".format(fastsync_stream_ids)
)
self.run_tap_fastsync(
tap_type,
target_type,
tap_config,
tap_properties_fastsync,
tap_state,
tap_transformation,
cons_target_config,
log_file_fastsync,
)
else:
self.logger.info("No table available that needs to be sync by fastsync")
# Run singer tap for INCREMENTAL and LOG_BASED replication methods
if len(singer_stream_ids) > 0:
self.logger.info(
"Table(s) selected to sync by singer: {}".format(singer_stream_ids)
)
self.run_tap_singer(
tap_type,
tap_config,
tap_properties_singer,
tap_state,
tap_transformation,
cons_target_config,
log_file_singer,
)
else:
self.logger.info("No table available that needs to be sync by singer")
# Delete temp files if there is any
except utils.RunCommandException as exc:
self.logger.error(exc)
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
sys.exit(1)
except Exception as exc:
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
raise exc
utils.silentremove(cons_target_config)
utils.silentremove(tap_properties_fastsync)
utils.silentremove(tap_properties_singer)
def sync_tables(self):
"""
Sync every or a list of selected tables from a specific tap.
The function is using the fastsync components hence it's only
available for taps and targets where the native and optimised
fastsync component is implemented.
"""
tap_id = self.tap["id"]
tap_type = self.tap["type"]
target_id = self.target["id"]
target_type = self.target["type"]
fastsync_bin = utils.get_fastsync_bin(self.venv_dir, tap_type, target_type)
self.logger.info(
"Syncing tables from {} ({}) to {} ({})...".format(
tap_id, tap_type, target_id, target_type
)
)
# Run only if tap enabled
if not self.tap.get("enabled", False):
self.logger.info(
"Tap {} is not enabled. Do nothing and exit normally.".format(self.tap["name"])
)
sys.exit(0)
# Run only if tap not running
tap_status = self.detect_tap_status(target_id, tap_id)
if tap_status["currentStatus"] == "running":
self.logger.info(
"Tap {} is currently running and cannot sync. Stop the tap and try again.".format(
self.tap["name"]
)
)
sys.exit(1)
# Tap exists but configuration not completed
if not os.path.isfile(fastsync_bin):
self.logger.error(
"Table sync function is not implemented from {} datasources to {} type of targets".format(
tap_type, target_type
)
)
sys.exit(1)
# Generate and run the command to run the tap directly
tap_config = self.tap["files"]["config"]
tap_inheritable_config = self.tap["files"]["inheritable_config"]
tap_properties = self.tap["files"]["properties"]
tap_state = self.tap["files"]["state"]
tap_transformation = self.tap["files"]["transformation"]
target_config = self.target["files"]["config"]
# Some target attributes can be passed and override by tap (aka. inheritable config)
# We merge the two configs and use that with the target
cons_target_config = self.create_consumable_target_config(
target_config, tap_inheritable_config
)
# Output will be redirected into target and tap specific log directory
log_dir = self.get_tap_log_dir(target_id, tap_id)
current_time = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
log_file = os.path.join(
log_dir, "{}-{}-{}.fastsync.log".format(target_id, tap_id, current_time)
)
# sync_tables command always using fastsync
try:
self.run_tap_fastsync(
tap_type,
target_type,
tap_config,
tap_properties,
tap_state,
tap_transformation,
cons_target_config,
log_file,
)
# Delete temp file if there is any
except utils.RunCommandException as exc:
self.logger.error(exc)
utils.silentremove(cons_target_config)
sys.exit(1)
except Exception as exc:
utils.silentremove(cons_target_config)
raise exc
utils.silentremove(cons_target_config)
def import_project(self):
"""
Take a list of YAML files from a directory and use it as the source to build
singer compatible json files and organise them into pipeline directory structure
"""
# Read the YAML config files and transform/save into singer compatible
# JSON files in a common directory structure
config = Config.from_yamls(self.config_dir, self.args.dir, self.args.secret)
config.save()
# Activating tap stream selections
#
# Run every tap in discovery mode to generate the singer specific
# properties.json files for the taps. The properties file than
# updated to replicate only the tables that is defined in the YAML
# files and to use the required replication methods
#
# The tap Discovery mode needs to connect to each source databases and
# doing that sequentially is slow. For a better performance we do it
# in parallel.
self.logger.info("ACTIVATING TAP STREAM SELECTIONS...")
total_targets = 0
total_taps = 0
discover_excs = []
# Import every tap from every target
start_time = datetime.now()
for tk in config.targets.keys():
target = config.targets.get(tk)
total_targets += 1
total_taps += len(target.get("taps"))
with parallel_backend("threading", n_jobs=-1):
# Discover taps in parallel and return the list
# of exception of the failed ones
discover_excs.extend(
list(
filter(
None,
Parallel(verbose=100)(
delayed(self.discover_tap)(tap=tap, target=target)
for (tap) in target.get("taps")
),
)
)
)
# Log summary
end_time = datetime.now()
self.logger.info(
"""
-------------------------------------------------------
IMPORTING YAML CONFIGS FINISHED
-------------------------------------------------------
Total targets to import : {}
Total taps to import : {}
Taps imported successfully : {}
Taps failed to import : {}
Runtime : {}
-------------------------------------------------------
""".format(
total_targets,
total_taps,
total_taps - len(discover_excs),
str(discover_excs),
end_time - start_time,
)
)
if len(discover_excs) > 0:
sys.exit(1)
def encrypt_string(self):
"""
Encrypt the supplied string using the provided vault secret
"""
b_ciphertext = utils.vault_encrypt(self.args.string, self.args.secret)
yaml_text = utils.vault_format_ciphertext_yaml(b_ciphertext)
print(yaml_text)
print("Encryption successful")
| beherap/pipelinewise | pipelinewise/cli/pipelinewise.py | pipelinewise.py | py | 55,124 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.INFO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",... |
43599317125 | from __future__ import division
import h5py
import numpy as np
'''
PARAMETERS
'''
#savefig()
outFile='all_data.hdf5'
def main():
f=h5py.File(outFile,'r')
ds = f['data'][:,0:6,:]
data = f['interpo']
import_features=['Weight_Index', 'Waist(CM)', 'Hip(CM)', 'Waist_Hip_Ratio','systolic_pressure', 'diastolic_pressure', 'Hb', 'Cr', 'Ch', 'TG', 'HDL', 'LDL', 'FBG', 'PBG', 'INS0', 'CP0', 'Ch', 'TG', 'HDL', 'LDL', 'FBG', 'PBG', 'HbA1c', 'INS0','HOMAIR', 'HOMAB', 'CP0', 'CRP', 'FFA', 'visceral_fat', 'subcutaneous_fat','FT3', 'FT4', 'TSH']
all_features=['Weight(KG)','Weight_Index','Waist(CM)','Hip(CM)','Waist_Hip_Ratio','Heart_rate','systolic_pressure','diastolic_pressure','WBC','Hb','ALT','AST','rGT','ALP','prealbumin','bile_acid','total_bilirubin','direct_bilirubin','BUN','Cr','uric_acid','RBP','CysC','K','Na','Mg','Ca','P','Ch','TG','HDL','LDL','FBG','PBG','HbA1c','GA','INS0','INS30','INS120','HOMAIR','HOMAB','CP0','CP30','CP120','HOMAcp','ALB1','ALB2','ALB3','Average_uric_ALB','GFR','ACR','CRP','folic_acid','VitB12','PTH','OH25D','Serum_Fe','serum_Fe_protein','CA199','FFA','visceral_fat','subcutaneous_fat','FT3','FT4','TSH','Reversed_T3','BG30','AAINS0','AAINS2','AAINS4','AAINS6','AAINS_index','AACP0','AACP2','AACP4','AACP6','AACP_index','urinary_uric_acid','Urine_creatinine']
all_ids = f['ids'][0:-2]
build_cm(data,import_features,all_features,all_ids)
return
def is_important(feature,important):
if feature in important:
return True
else:
return False
def build_cm(data,import_features,all_features,all_ids):
pt_idx = data.shape[0] - 1
ft_idx = data.shape[2] - 1
pt_list = all_ids
ft_list = all_features
temp_mat = data[:,:,:]
path_f = open('path.txt','w')
order_f = open('order.txt','w')
rem_f = open('removal_order.txt','w')
cm = np.zeros([data.shape[0],data.shape[2]])
f_cm = open('cm.txt','w')
while pt_idx != 0 and ft_idx != 0:
path_f.write('(%d,%d)\n' % (pt_idx,ft_idx))
p_order, f_order, p_max, f_max = sort_by_nan(temp_mat,pt_list,ft_list,import_features)
temp_mat = temp_mat[p_order,:,:]
temp_mat = temp_mat[:,:,f_order]
pt_list = [pt_list[p_order[x]] for x in range(len(p_order))]
ft_list = [ft_list[f_order[x]] for x in range(len(f_order))]
for i in range(pt_idx,-1,-1):
cm[i,ft_idx] = np.count_nonzero(np.isnan(temp_mat[0:i+1,:,0:ft_idx+1]))#/(data.shape[-1]*data.shape[1])
for i in range(ft_idx,-1,-1):
cm[pt_idx,i] = np.count_nonzero(np.isnan(temp_mat[0:pt_idx+1,:,0:i+1]))#/(data.shape[0]*data.shape[1])
order_f.write('%s' % (pt_list[p_order[0]]))
for i in range(1,len(p_order)):
order_f.write(', %s' % (pt_list[p_order[i]]))
order_f.write('\n')
order_f.write('%s' % (ft_list[f_order[0]]))
for i in range(1,len(f_order)):
order_f.write(', %s' % (ft_list[f_order[i]]))
order_f.write('\n')
order_f.write('\n')
if p_max > f_max or ft_idx == 0:
rem_f.write('%s\n' % (pt_list[-1]))
temp_mat = temp_mat[0:pt_idx,:,:]
pt_idx -= 1
pt_list = pt_list[0:-1]
else:
rem_f.write('%s\n' % (ft_list[-1]))
temp_mat = temp_mat[:,:,0:ft_idx]
ft_idx -= 1
ft_list = ft_list[0:-1]
for i in range(cm.shape[0]):
f_cm.write('%f' % (cm[i,0]))
for j in range(1,cm.shape[1]):
f_cm.write(', %f' % (cm[i,j]))
f_cm.write('\n')
f_cm.close()
def sort_by_nan(data,patients,features,important):
pt_pcts = np.zeros(len(patients))
ft_pcts = np.zeros(len(features))
n_pts = data.shape[0]
n_feats = data.shape[2]
n_tpts = data.shape[1]
#percent (# empty) / (total #) for each patient
for i in range(n_pts):#patient id
pt_pcts[i] = float(np.count_nonzero(np.isnan(data[i,:,:])))/(n_feats*n_tpts)
#percent (# empty) / (total #) for each feature
for i in range(n_feats):
ft_pcts[i] = float(np.count_nonzero(np.isnan(data[:,:,i])))/(n_pts*n_tpts)
p_order = np.argsort(pt_pcts)
f_order = np.argsort(ft_pcts)
p_max = np.nanmax(pt_pcts)
f_max = np.nanmax(ft_pcts)
# count = 0
# for i in range(len(f_order)):
# if is_important(features[f_order[i]],important):
# continue
# else:
# if count != i and count < len(important):
# j = i
# while j < n_feats and is_important(features[f_order[j]],important):
# j += 1
# if j == len(f_order):
# break
# temp = f_order[j]
# for k in range(j,i,-1):
# f_order[k] = f_order[k-1]
# f_order[i] = temp
# count += 1
return p_order, f_order, p_max, f_max
main()
| taylorsmith-UKY/diabetes | get_path.py | get_path.py | py | 4,368 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "h5py.File",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_num... |
7663203620 | import numpy as np
import sounddevice as sd
from scipy.io.wavfile import write
from isort import file
from tools import get_device_number
# 録音の設定
fs = 48000 # サンプリング周波数
duration = 5 # 録音時間(秒)
channels = 7
device_num = get_device_number("Azure Kinect") # マイクロフォンアレイのデバイス番号
# マイクロフォンアレイからの録音
print("録音開始...")
audio_data = sd.rec(int(duration * fs), samplerate=fs, device=device_num,
channels=channels, dtype='float64')
sd.wait() # 録音が終了するまで待つ
print("録音終了")
# 各マイクロフォンのデータを別々のファイルに保存
for i in range(audio_data.shape[1]):
filename = f"mic_{i+1}.wav"
write(filename, fs, audio_data[:, i])
print(f"{filename}に保存しました")
# マイクロフォンアレイのデータをファイルに保存
filename = "mic_array.wav"
write(filename, fs, audio_data)
print(f"{filename}に保存しました") | fkfk21/Enjoy_Azure_Kinect | scripts/record_audio.py | record_audio.py | py | 1,024 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "tools.get_device_number",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sounddevice.rec",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sounddevice.wait",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.io.wav... |
70204805949 | import requests
from fake_useragent import UserAgent
import re
import base64
import sys
from fontTools.ttLib import TTFont
from lxml import etree
import pymysql
# Unicode => ASCII => hex
from unicode_to_hex import get_hex_back
# 继承重写TTFont,直接使用字节串数据,避免在动态字体加密中重复打开关闭woff文件
class MyTTFont(TTFont):
"""
主要目的:实现直接读取字节串数据,避免每次存取文件
fontTools-version: 4.22.0
"""
def __init__(self, file_content, checkChecksums=0, fontNumber=-1, _tableCache=None):
from fontTools.ttLib.sfnt import SFNTReader
from io import BytesIO
# 用content数据代替原码的open()
file = BytesIO(file_content)
super().__init__()
# 继承父类的初始化,但是没有传值会影响后续赋值,
self._tableCache = _tableCache
self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
self.sfntVersion = self.reader.sfntVersion
self.flavor = self.reader.flavor
self.flavorData = self.reader.flavorData
class TongchengSpider:
def __init__(self):
# 匹配字体文件的base64数据正则
self.regex = r"charset=utf-8;base64,(.*?)'\) format\('truetype'\)"
self.pattern = re.compile(self.regex, re.S)
# 存入mysql数据库
self.db = pymysql.connect(
host='192.168.31.63',
port=3306,
user='root',
password='123456',
database='ershouche'
)
self.cursor = self.db.cursor()
self.ins = 'insert into carinfo(brand,detail,price) values(%s,%s,%s)'
def get_requests_data(self, url):
"""简单封装了随机UA的get请求"""
ua = UserAgent().chrome
headers = {'User-Agent': ua}
html = requests.get(url=url, headers=headers).text
# print(html)
return html
# 提取网页中的base64_font数据
def parse_font(self, html):
"""传入HTML text数据提取font文件的base64数据"""
font_base64 = self.pattern.findall(html)
if font_base64:
font_base64 = font_base64[0].encode()
# 返回base64解码后的字节串数据
return base64.b64decode(font_base64)
else:
sys.exit('没有匹配到字体数据')
# 创建价格字符到实际价格的映射
def create_font_dict(self, font):
"""
根据font对象创建字典,针对只有0-9且顺序排列的字体文件
:param font:font对象
:return:hex到font数字的映射
"""
font_names = font.getGlyphOrder()
font_dict = {}
number = 0
# 这种字体动态加密较为简单,虽然字体文件在变换,但是GlyphOder和字体的对应并没有改变
for font_name in font_names[1:]:
font_name = font_name[3:]
font_dict[font_name] = str(number)
number += 1
return font_dict
# 提取二手车页面中的品牌、车型、价格字符,以及字体还原
def parse_ershouche_data(self, html, font_dict):
p = etree.HTML(html)
info_title = p.xpath('//li[@class="info"]/div/a')
result_list = []
for msg in info_title:
car_brand = msg.xpath('.//span[@class="info_link"]/font/text()')[0]
car_info = msg.xpath('.//span[@class="info_link"]/text()')[0].strip()
car_price_obj = msg.xpath('.//div[@class="info--price"]/b/text()')[0]
price_info = get_hex_back(car_price_obj)
price_info = self.decode_real_price(price_info, font_dict) + '万元'
result_list.append((car_brand, car_info, price_info))
return result_list
# 解析拼接出实际显示的价格数据
def decode_real_price(self, price_info_dict, font_dict):
"""
将网页源码中的16进制码转换为实际显示字体对应的数字
:param price_info_dict: 整数部分和小数部分字典 {'int_part': ['2f'], 'decimal_part': ['2d']}
:param font_dict: hex到font字体的查询字典 {'8D77': 0, '5143': 1,...}
:return:拼接好的价格数据,不带单位,单位为:万元
"""
# 获取整数和小数部分编码
int_part_list = price_info_dict['int_part']
decimal_part_list = price_info_dict['decimal_part']
# 查询转换整数部分
int_part = self.query_hex_codes(int_part_list, font_dict)
# 如果list内元素为0而不是16进制码,代表没有数据,注意,实际价格若为0,也应该有编码查询到font字体的‘0’
if not decimal_part_list[0]:
return int_part
else:
# 查询转换小数部分
decimal_part = self.query_hex_codes(decimal_part_list, font_dict)
return int_part + '.' + decimal_part
# 把一长串价格字符查找拼接成价格数字,不包含小数点
def query_hex_codes(self, hex_list, font_dict):
"""
遍历列表中的hex,查询对应的font字体
:param hex_list: 网页源码中价格加密的hex
:param font_dict: hex到font字体的映射
:return:
"""
price_str = ''
for item in hex_list:
price_slices = font_dict.get(item)
price_str += price_slices
return price_str
def save_mysql(self,result_list):
self.cursor.executemany(self.ins,result_list)
self.db.commit()
def run(self):
# 以目标网站前5页内容为例
for i in range(5):
url = 'https://cd.58.com/ershouche/pn%s/' % (i+1)
html = self.get_requests_data(url)
# 构建出font查询字典:
font_content = self.parse_font(html)
font = MyTTFont(font_content)
# 转为xml文件,重写的MyTTFont可以实现原有功能
# font.saveXML('1.xml')
font_dict = self.create_font_dict(font)
# print(font_dict)
font.close()
result_list = self.parse_ershouche_data(html, font_dict)
print(result_list)
self.save_mysql(result_list)
self.cursor.close()
self.db.close()
if __name__ == '__main__':
spider = TongchengSpider()
spider.run()
| xiaohao-a/58_ershouche_font | 58ershouche.py | 58ershouche.py | py | 6,376 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "fontTools.ttLib.TTFont",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "fontTools.ttLib.sfnt.SFNTReader",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.... |
71315302907 | import requests,sys
import requests
from requests.structures import CaseInsensitiveDict
sys.path.insert(1,".")
from config import TODIST_API_KEY
#Rate Limit: 1 per 2 seconds.
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = "Bearer " + str(TODIST_API_KEY)
#This function gets all the tasks in the "Tasks" project which is the the project to be used.
def getTasks():
#Get all the tasks in project and turn it into a list of dicts.
tasks = requests.get('https://api.todoist.com/rest/v1/tasks?project_id=2208003845',headers=headers).json()
tasksPretty = []
#Filter out all the useless data
for tasks in tasks:
temp = {}
temp["Task"] = tasks["content"]
temp["Description"] = tasks["description"]
temp["Priority"] = tasks["priority"]
temp["Due Date"] = tasks["due"]["date"]
tasksPretty.append(temp)
return tasksPretty | leekycauldron/statusAPI | TodoistApps/createTask.py | createTask.py | py | 937 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "requests.structures.CaseInsensitiveDict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "co... |
32119109971 | from rest_framework.routers import DefaultRouter
from apps.home.views import FeatureView, HomeView, I18nViewSet, SitemapView
router = DefaultRouter()
router.register("", HomeView)
router.register("sitemap", SitemapView, basename="sitemap")
router.register("i18n", I18nViewSet, basename="i18n")
router.register("features", FeatureView, basename="feature")
urlpatterns = router.urls
| OVINC-CN/iWikiAPI | apps/home/urls.py | urls.py | py | 384 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "apps.home.views.HomeView",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "apps.home.views.SitemapView",
"line_number": 7,
"usage_type": "argument"
... |
73815074426 | from user.models import User
from rest_framework import exceptions
def get_user(username):
user = None
if "@" in username:
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
user = User.objects.create(
username=username,
email=username
)
elif username.isdigit():
try:
user = User.objects.get(phone_no=username)
except User.DoesNotExist:
user = User.objects.create(
username=username,
phone_no=username
)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise exceptions.NotFound("User matching username was not found!")
return user
| Python-Crew/base_drf | auth_user/services.py | services.py | py | 815 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "user.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "user.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "user.models.User.objects.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "user.models.User.... |
31440178142 | from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.login,name='loginform'),
# path('OTP/',views.otp,name="otp"),
path('verifyotp/',views.verifyotp,name='verifyotp'),
path('multiform/',views.multiform,name='multiform'),
path("payment_index/", views.payment_home, name="payment_home"),
path("payment/", views.order_payment, name="payment"),
path("callback/", views.callback, name="callback"),
] | HIRAPARANAYAN/verni-by-s | authentication/urls.py | urls.py | py | 490 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
14752733594 | from flask import Flask,render_template, request, session, redirect, url_for
from threading import Thread
def createApp():
app = Flask(
__name__,
template_folder=r"templates",
static_folder=r"static"
)
return app
app = createApp()
@app.route("/")
def home():
return render_template("./index.html")
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
if __name__ == '__main__':
keep_alive() | SentientPlatypus/Self-Driving-Car-Simulation | services/main.py | main.py | py | 485 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 25,
"usage_type": "call"
}
] |
30884940545 | from zope.event import notify
from zope.component import adapts, queryUtility
from zope.interface import implements, alsoProvides
from getpaid.core.interfaces import ILineItemFactory, IShoppingCart
from getpaid.core.item import PayableLineItem, RecurringLineItem
from pfg.donationform.interfaces import IDonationFieldSet, DonationCreatedEvent, IDonationCart
from Products.CMFPlone.utils import safe_unicode
try:
from zope.intid.interfaces import IIntIds
IIntIds
except ImportError:
IIntIds = None
try:
from Products.PloneGetPaid import sessions
sessions
except ImportError:
sessions = None
class DonationFieldLineItemFactory(object):
implements(ILineItemFactory)
adapts(IShoppingCart, IDonationFieldSet)
def __init__(self, cart, field):
self.cart = cart
self.field = field
form = field.REQUEST.form
fname = self.field.getId()
self.amount = form.get(fname + '_level')
if not self.amount:
self.amount = form.get(fname + '_amount', '0')
self.amount = self.amount.lstrip('$')
self.is_recurring = form.get(fname + '_recurring', False)
self.occurrences = form.get(fname + '_occurrences', 9999)
def create(self):
pfg = self.field.aq_parent
if self.is_recurring:
item = RecurringLineItem()
item.interval = 1
item.unit = 'months'
item.total_occurrences = self.occurrences
else:
item = PayableLineItem()
item.item_id = self.field.UID()
if IIntIds:
intid_utility = queryUtility(IIntIds)
if intid_utility:
item.uid = intid_utility.register(self.field)
item.name = safe_unicode(pfg.Title())
item.cost = float(self.amount)
item.quantity = 1
# Clear the cart before adding the donation.
# We don't want to surprise users by charging them for something
# they didn't realize they were buying!
for key in self.cart.keys():
del self.cart[key]
self.cart[item.item_id] = item
alsoProvides(self.cart, IDonationCart)
notify(DonationCreatedEvent(self.cart))
try:
sessions.set_came_from_url(pfg)
except:
pass
return item
| collective/pfg.donationform | pfg/donationform/cart.py | cart.py | py | 2,366 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "zope.intid.interfaces.IIntIds",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "zope.intid.interfaces.IIntIds",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "Products.PloneGetPaid.sessions",
"line_number": 17,
"usage_type": "name"
},... |
71550269628 | from pathlib import Path
import typer
from meteor import IdentityStage
from meteor import Language
from meteor import StemmingStage
from meteor import meteor_macro_avg
def cli(
hypotheses_file: Path = typer.Option(
...,
"-h",
"--hypotheses",
help="utf-8 encoded file with system output, one sentence per line",
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
references_file: Path = typer.Option(
...,
"-r",
"--references",
help="utf-8 encoded file with translation references, one sentence per line", # noqa
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
language: Language = typer.Option(
Language.german,
"-l",
"--language",
help="The language to run meteor for. Controls tokenization and stemming.", # noqa
show_default=True,
case_sensitive=False,
),
):
"""
Computes the METEOR score for the given sentence pairs
and returns the macro average.
Input files must be of same length and contain one sentence per line.
Assumes UTF-8 encoding.
"""
with hypotheses_file.open(encoding="utf-8") as infile:
hypotheses = [line.strip() for line in infile if line.strip()]
with references_file.open(encoding="utf-8") as infile:
references = [line.strip() for line in infile if line.strip()]
if len(hypotheses) != len(references):
typer.echo("Error: Input files must be of same length.")
exit(1)
stages = [
IdentityStage(1.0),
StemmingStage(0.6, language),
]
macro_avg = meteor_macro_avg(hypotheses, references, stages, language)
typer.echo(f"METEOR macro average: {round(macro_avg, 3)}")
def main():
typer.run(cli)
| wbwseeker/meteor | meteor/cli.py | cli.py | py | 1,909 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "meteor.Language",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typer.Option",
"line_nu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.