seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
10058551191 | import numpy as np
from scipy.integrate import solve_ivp
class VanDerPolOscillator:
def __init__(self, epsilon):
self.epsilon = epsilon
def coupledEquation(self, t, x):
x1 = x[0]
x2 = x[1]
fx1 = x2
fx2 = -x1 - (self.epsilon * ((x1 ** 2) - 1) * x2)
return np.array([fx1, fx2], float)
def methodRK45(self, initialState, t0=0):
tSpan = [t0, (8 * np.pi)]
points = solve_ivp(self.coupledEquation, tSpan, initialState, method='RK45', first_step=0.01, max_step=0.01)
tPoints = points['t']
fx1Points = points['y'][0]
fx2Points = points['y'][1]
return tPoints, fx1Points, fx2Points
| MFournierQC/PhysiqueNumerique | TP3/VanDerPolOscillator.py | VanDerPolOscillator.py | py | 685 | python | en | code | 0 | github-code | 36 |
1900770945 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 21:43:03 2017
@author: ly
"""
import numpy as np
import pandas as pd
import os
import seaborn as sns # data visualization library
import matplotlib.pyplot as plt
import xgboost as xgb
import math
from sklearn import metrics
from sklearn.model_selection import KFold
from xgboost.sklearn import XGBClassifier
from sklearn.linear_model import Lasso
from sklearn.metrics import confusion_matrix
def prepare_data(filepath):
filepath=r"E:\workspace\Dementia\Q_DLB_nonDLB_after_removing_education_normalizion_0_to_1.csv"
dataset=pd.read_csv(filepath, index_col=None)
temp = dataset.copy()
DLB_count = 0
nonDLB_count = 0
for i in range(len(temp)):
if temp.loc[i,'Diagnosis'] == 'DLB':
temp.loc[i,'Diagnosis'] = 1
DLB_count+=1
else:
temp.loc[i,'Diagnosis'] = 0
nonDLB_count+=1
return temp,DLB_count,nonDLB_count
#filepath =r"E:\workspace\Dementia\Q_DLB_nonDLB_after_removing_education_normalizion_0_to_1.csv"
#temp,DLB_count,nonDLB_count = prepare_data(filepath)
#raw_target = list(temp.loc[:,'Diagnosis'] )
#Label_Array = temp.columns[:-1]
#import itertools
#combination = list(itertools.combinations(Label_Array,2))
def statistic(true,predict):
TP=0 #TP:正确的正例
TN=0 #TN:正确的负例
FP=0 #FP:错误的正例
FN=0 #FN:错误的负例
for i in range(len(true)):
if(true[i]==1):
if(predict[i]==1):
TP+=1 #真实为1,预测也为1
else :
FP+=1 #真实为1,预测为0
elif(predict[i]==1):
FN+=1 #真实为0,预测为1
else :
TN+=1 #真实为0,预测为0
return [TP,FP,TN,FN]
#统计准确率衡量的5个指标:Sn,Sp,Avc,Acc,Mcc
def assess(TP,FP,TN,FN):
Sn=Sp=Acc=Avc=Mcc=0 #评价分类器所用指标
if(TP+FN!=0):
Sn=TP*1.0/(TP+FN) #预测为1(正)的正确率
if(TN+FP!=0):
Sp=TN*1.0/(TN+FP) #预测为0(负)的正确率
Avc=(Sn+Sp)*1.0/2 #正负平均准确率
Acc=(TP+TN)*1.0/(TP+FP+TN+FN) #总体预测准确率
if((TP+FN)*(TP+FP)*(TN+FP)*(TN+FN)!=0):
Mcc=(TP*TN-FP*FN)*1.0/math.sqrt((TP+FN)*(TP+FP)*(TN+FP)*(TN+FN))
return [Sn,Sp,Acc,Avc,Mcc]
def kFoldTest(clf, raw_data, raw_target):
'''
十折交叉检验,clf是分类器,返回预测集
'''
predict=[]
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(raw_data):
#print("TRAIN:", train_index, "TEST:", test_index)#查看如何分割数据
X_train, X_test = raw_data[[train_index]], raw_data[[test_index]]
#Y_test在这里没作用,为了数据变量对齐0.0
Y_train, Y_test = raw_target[:test_index[0]]+raw_target[test_index[-1]+1:], raw_target[test_index[0]:test_index[-1]+1]
clf.fit(X_train,Y_train)
test_target_temp=clf.predict(X_test)
predict.append(test_target_temp)
test_target = [i for temp in predict for i in temp]#将10次测试集展平
return test_target
def common_classier(raw_data, raw_target):
'''
使用常见的分类器进行分类
'''
from sklearn import neighbors
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
df=pd.DataFrame(index=['Sn','Sp','ACC','AVC','MCC'])#该dataframe为了将正确率写入Excel
clf = SVC(kernel='linear', C=1)
value = statistic(raw_target,kFoldTest(clf, raw_data, raw_target))
df['SVM']=assess(value[0],value[1],value[2],value[3])
#用KNN
clf=neighbors.KNeighborsClassifier(n_neighbors = 3 )
value=statistic(raw_target,kFoldTest(clf, raw_data, raw_target))
df['KNN']=assess(value[0],value[1],value[2],value[3])
# #NB,pca的时候不能用
# clf=MultinomialNB(alpha=0.01)
# ACC,DLB,nonDLB=statistics(raw_target,kFoldTest(clf, ra
#Dtree
clf = DecisionTreeClassifier(random_state=0)
value=statistic(raw_target,kFoldTest(clf, raw_data, raw_target))
df['Dtree']=assess(value[0],value[1],value[2],value[3])
#随机森林
clf = RandomForestClassifier(n_estimators= 30, max_depth=13, min_samples_split=110,
min_samples_leaf=20,max_features='sqrt' ,oob_score=True,random_state=10)
value=statistic(raw_target,kFoldTest(clf, raw_data, raw_target))
df['RF']=assess(value[0],value[1],value[2],value[3])
#boosting
clf = AdaBoostClassifier(n_estimators=100)
value=statistic(raw_target,kFoldTest(clf, raw_data, raw_target))
df['adaboost']=assess(value[0],value[1],value[2],value[3])
return df
'''
filepath = r"E:\workspace\Dementia\Q_DLB_nonDLB_after_removing_education_normalizion_0_to_1.csv"
temp,DLB_count,nonDLB_count = prepare_data(filepath)
raw_data = temp.drop('Diagnosis',1).as_matrix(columns=None)
raw_label = list(temp.loc[:,'Diagnosis'])
df = common_classier(raw_data,raw_label)
temp_acc = max(list(df.loc['ACC',:] ))
Label_Array = temp.columns[:-1]
import itertools
combination = list(itertools.combinations(Label_Array,2))
writer = pd.ExcelWriter(r'E:\workspace\Dementia\acc.xlsx')
i = 0
max_acc = 0
for label_index in combination:
sheet = "combination" + str(i)
i += 1
raw_data = temp.loc[:,label_index].as_matrix(columns=None)
temp_df = common_classier(raw_data, raw_label)
temp_acc = max(list(temp_df.loc['ACC',:] ))
if temp_acc >= max_acc :
temp_df.to_excel(writer,sheet_name=sheet,index=True)
max_acc = temp_acc
writer.save()
'''
def Combination(temp, DLB_count, nonDLB_count, num):
'''
对数据集temp特征进行组合再进行分类
'''
raw_target = list(temp.loc[:,'Diagnosis'] )
Label_Array = temp.columns[:-1]
import itertools
combination = list(itertools.combinations(Label_Array,num))
writer = pd.ExcelWriter(r'E:\workspace\Dementia\acc.xlsx')
i = 0
max_acc = 0
for label_index in combination:
sheet = "combination" + str(i)
i += 1
raw_data = temp.loc[:,label_index].as_matrix(columns=None)
temp_df = common_classier(raw_data, raw_target)
temp_acc = max(list(temp_df.loc['ACC',:] ))
if temp_acc >= max_acc :
temp_df.to_excel(writer,sheet_name=sheet,index=True)
max_acc = temp_acc
writer.save()
if __name__ == '__main__':
filepath = r"E:\workspace\Dementia\Q_DLB_nonDLB_after_removing_education_normalizion_0_to_1.csv"
temp,DLB_count,nonDLB_count = prepare_data(filepath)
raw_data = temp.drop('Diagnosis',1).as_matrix(columns=None)
raw_label = list(temp.loc[:,'Diagnosis'])
Combination(raw_data, DLB_count, nonDLB_count, 2)
| LiuyangJLU/Dementia | 1205test.py | 1205test.py | py | 7,023 | python | en | code | 0 | github-code | 36 |
2894349509 | from typing import Dict
from src.property.PropertyFactory import PropertyFactory
from src.storage.common.entity.Entity import Entity
from src.template.entity.EntityTemplate import EntityTemplate
class EntityFactory:
def __init__(self, entity_template: EntityTemplate):
self.entity_template = entity_template
def create(self, key: str, props_values: Dict[str, str]) -> Entity:
return Entity(
key,
PropertyFactory.create_from_template_and_dict(
self.entity_template.properties_templates,
props_values,
lambda prop_template_id: 'Property ' + prop_template_id + ' not found for entity ' + key
)
)
| andreyzaytsev21/MasterDAPv2 | src/storage/common/entity/EntityFactory.py | EntityFactory.py | py | 714 | python | en | code | 0 | github-code | 36 |
24212631697 | tc = int(input())
for _ in range(tc):
# 금광 행렬 정보 입력 받음
n, m = map(int, input().split())
# 매장된 금의 개수 정보 입력 받음
data = list(map(int, input().split()))
# matrix, arr 초기화 => tc for문 2번째 돌 때 초기화 상태여야함
dp = []
arr = []
# 매장된 금의 개수 정보를 matrix로 표현
for i in range(n*m):
arr.append(data[i])
if (i+1) % m == 0:
dp.append(arr)
arr = []
# 행의 개수 = n
# 이동방법 RU, R, RD 총 세 개
# 단, 범위를 벗어나지 못하므로 맨위, 맨아래에선 오른쪽 이동만 가능
# 모든 방향의 이동에 대해서 최종 금광 개수를 저장
# 모든 저장 값들 중 최댓값 산출
# 세 방향에서 왔을 때 최댓값 담을 2차원 배열
find_max = []
for j in range(1, m):
for i in range(n):
if i == 0:
dp[i][j] += max(dp[i][j-1], dp[i+1][j-1])
if i == n-1:
dp[i][j] += max(dp[i][j-1], dp[i-1][j-1])
else:
dp[i][j] += max(dp[i][j-1], dp[i-1][j-1], dp[i+1][j-1])
if j == m-1:
for k in range(n):
find_max.append(dp[k][j])
result = max(find_max)
print(result)
| 031wnstjd/Algorithm | 이것이 취업을 위한 코딩 테스트다 with 파이썬/다이나믹프로그래밍/금광 문제.py | 금광 문제.py | py | 1,382 | python | ko | code | 0 | github-code | 36 |
73037033705 | import pathlib
import sys
import typing
import flash
import flash.image
import pytorch_lightning
import torch
import torchmetrics
import torchvision
import enpheeph
import enpheeph.injections.plugins.indexing.indexingplugin
CURRENT_DIR = pathlib.Path(__file__).absolute().parent
RESULTS_DIRECTORY = CURRENT_DIR / "results" / "alexnet-cifar10"
WEIGHTS_FILE = RESULTS_DIRECTORY / "weights" / "alexnet-cifar10.pt"
LOG_DIRECTORY = RESULTS_DIRECTORY / "injection_results"
WEIGHTS_FILE.parent.mkdir(parents=True, exist_ok=True)
LOG_DIRECTORY.mkdir(parents=True, exist_ok=True)
CIFAR_DIRECTORY = pathlib.Path("/shared/ml/datasets/vision/") / "CIFAR10"
class AlexNetLightningModule(pytorch_lightning.LightningModule):
def __init__(self, pretrained: bool = True, num_classes: int = 1000) -> None:
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.model = torchvision.models.AlexNet(num_classes=num_classes)
if self.pretrained:
# must be accessed with sys.modules otherwise it uses the function
# which is imported from the sub-module
# we use type: ignore as mypy cannot check torchvision typings
# we have to split it otherwise black creates problems
mod = sys.modules["torchvision.models.alexnet"]
state_dict = torch.hub.load_state_dict_from_url(
mod.model_urls["alexnet"], # type: ignore[attr-defined]
progress=True,
)
# we must filter the mismatching keys in the state dict
# we generate the current model state dict
model_state_dict = self.model.state_dict()
filtered_state_dict = {
k: v_new
# we select the new value if the dimension is the same as with the old
# one
if v_new.size() == v_old.size()
# otherwise we use the initialized one from the model
else v_old
for (k, v_old), v_new in zip(
model_state_dict.items(),
state_dict.values(),
)
}
self.model.load_state_dict(filtered_state_dict, strict=False)
self.normalizer_fn = torch.nn.Softmax(dim=-1)
self.accuracy_fn = torchmetrics.Accuracy()
self.loss_fn = torch.nn.CrossEntropyLoss()
self.save_hyperparameters()
# we initialize the weights
self.init_weights()
def init_weights(self) -> None:
# this initialization is similar to the ResNet one
# taken from https://github.com/Lornatang/AlexNet-PyTorch/
# @ alexnet_pytorch/model.py#L63
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def forward(self, inpt: torch.Tensor) -> torch.Tensor:
return self.model(inpt)
def configure_optimizers(self) -> torch.optim.Optimizer:
optimizer = torch.optim.SGD(self.parameters(), lr=1e-2)
return optimizer
def inference(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> typing.Dict[str, torch.Tensor]:
# we need to check for the batch to be a flash batch or to be a standard tuple
# as otherwise it may not be compatible
if isinstance(batch, dict):
x = batch.get(flash.core.data.data_source.DefaultDataKeys.INPUT, None)
y = batch.get(flash.core.data.data_source.DefaultDataKeys.TARGET, None)
if x is None or y is None:
raise ValueError("Incompatible input for the batch")
else:
x, y = batch
output = self.forward(x)
return {
"loss": self.loss_fn(output, y),
"accuracy": self.accuracy_fn(self.normalizer_fn(output), y),
}
def training_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> torch.Tensor:
res = self.inference(batch, batch_idx)
self.log_dict(
{"train_loss": res["loss"], "train_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
return res["loss"]
def validation_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> None:
res = self.inference(batch, batch_idx)
self.log_dict(
{"val_loss": res["loss"], "val_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
def test_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> None:
res = self.inference(batch, batch_idx)
self.log_dict(
{"test_loss": res["loss"], "test_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
pytorch_lightning.seed_everything(seed=41, workers=True)
storage_plugin = enpheeph.injections.plugins.storage.SQLiteStoragePlugin(
db_url="sqlite:///" + str(LOG_DIRECTORY / "database.sqlite")
)
pytorch_mask_plugin = enpheeph.injections.plugins.NumPyPyTorchMaskPlugin()
pytorch_handler_plugin = enpheeph.handlers.plugins.PyTorchHandlerPlugin()
monitor_1 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_1 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Weight,
parameter_name="weight",
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (
...,
0,
0,
),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=[10, 16, 31],
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_2 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_3 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_2 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=...,
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_4 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
injection_handler = enpheeph.handlers.InjectionHandler(
injections=[monitor_1, fault_1, monitor_2, monitor_3, fault_2, monitor_4],
library_handler_plugin=pytorch_handler_plugin,
)
callback = enpheeph.integrations.pytorchlightning.InjectionCallback(
injection_handler=injection_handler,
storage_plugin=storage_plugin,
)
trainer = pytorch_lightning.Trainer(
callbacks=[callback],
deterministic=True,
enable_checkpointing=False,
max_epochs=10,
# one can use gpu but some functions will not be deterministic, so deterministic
# must be set to False
accelerator="cpu",
devices=1,
# if one uses spawn or dp it will fail as sqlite connector is not picklable
# strategy="ddp",
)
model = AlexNetLightningModule(num_classes=10, pretrained=False)
# transform = torchvision.transforms.Compose(
# [
# #torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(
# (0.5, 0.5, 0.5),
# (0.5, 0.5, 0.5),
# ),
# torchvision.transforms.RandomHorizontalFlip(),
# ]
# )
cifar_train = torchvision.datasets.CIFAR10(
str(CIFAR_DIRECTORY),
train=True,
download=True,
)
cifar_test = torchvision.datasets.CIFAR10(
str(CIFAR_DIRECTORY),
train=False,
download=True,
)
datamodule = flash.image.ImageClassificationData.from_datasets(
train_dataset=cifar_train,
test_dataset=cifar_test,
val_split=0.2,
num_workers=64,
batch_size=32,
)
if not WEIGHTS_FILE.exists():
trainer.fit(
model,
train_dataloaders=datamodule.train_dataloader(),
val_dataloaders=datamodule.val_dataloader(),
)
trainer.save_checkpoint(str(WEIGHTS_FILE))
model = model.load_from_checkpoint(str(WEIGHTS_FILE))
# no injections/monitors
print("\n\nBaseline, no injection or monitors\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we enable only the monitors
# we use this as baseline, no injections
callback.injection_handler.activate([monitor_1, monitor_2, monitor_3, monitor_4])
print("\n\nBaseline, no injection, only monitors\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we enable the faults
callback.injection_handler.activate([fault_1, fault_2])
print("\n\nWeight + activation injection\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we disable the faults
callback.injection_handler.deactivate([fault_1, fault_2])
print("\n\nBaseline again, no injection, only monitors\n")
# we test again to reach same results as before injection
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
| Alexei95/enpheeph | papers/iros2022/comparisons/tensorfi2/alexnet-cifar10.py | alexnet-cifar10.py | py | 13,471 | python | en | code | 1 | github-code | 36 |
27884844846 | import sys, os, string, random, psycopg2, sqlite3
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Float, Boolean, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker, backref, scoped_session
from sqlalchemy import create_engine
from sqlalchemy.sql import func
from sqlalchemy.sql.sqltypes import TIMESTAMP
Base = declarative_base()
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key = True)
displayname = Column(String(80), nullable = False)
username = Column(String(80), nullable = False)
password = Column(String(80), nullable = False)
bio = Column(String(250), default = '')
icon_link = Column(String, default = '')
icon_id = Column(String, default = '')
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
last_loggedin = Column(TIMESTAMP(timezone = True), server_default = func.now())
last_read_notifications = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'displayname': self.displayname,
'username': self.username,
'bio': self.bio,
'icon_link': self.icon_link,
'icon_id': self.icon_id,
'date_created': str(self.date_created),
'last_loggedin': str(self.last_loggedin),
'last_read_notifications': str(self.last_read_notifications),
}
class Follows(Base):
__tablename__ = 'follows'
id = Column(Integer, primary_key = True)
user_id = Column(Integer, ForeignKey('users.id'))
user_rel = relationship('Users', foreign_keys=[user_id])
follows_id = Column(Integer, ForeignKey('users.id'))
follows_rel = relationship('Users', foreign_keys=[follows_id])
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
# Returns Data Object In Proper Format
return {
'id': self.id,
'user': self.user_rel.serialize if self.user_rel else None,
'follows': self.follows_rel.serialize if self.follows_rel else None,
'date_created': str(self.date_created),
}
class Posts(Base):
__tablename__ = 'posts'
id = Column(Integer, nullable = False, primary_key = True)
owner_id = Column(Integer, ForeignKey('users.id'))
owner_rel = relationship('Users')
title = Column(String, nullable = False)
body = Column(Text, nullable = False)
hashtags = Column(String, default = '')
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
last_updated = Column(TIMESTAMP(timezone = True), server_default = func.now(), onupdate = func.now())
@property
def serialize(self):
return {
'id': self.id,
'owner': self.owner_rel.serialize if self.owner_rel else None,
'title': self.title,
'body': self.body,
'hashtags': self.hashtags,
'hashtags_list': self.hashtags.split(',') if self.hashtags != '' else [],
'date_created': str(self.date_created),
'last_updated': str(self.last_updated),
}
class PostLikes(Base):
__tablename__ = 'post_likes'
id = Column(Integer, nullable = False, primary_key = True)
owner_id = Column(Integer, ForeignKey('users.id'))
owner_rel = relationship('Users')
post_id = Column(Integer, ForeignKey('posts.id'))
post_rel = relationship('Posts')
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'owner': self.owner_rel.serialize if self.owner_rel else None,
'post_id': self.post_id,
'date_created': str(self.date_created),
}
class Comments(Base):
__tablename__ = 'comments'
id = Column(Integer, nullable = False, primary_key = True)
owner_id = Column(Integer, ForeignKey('users.id'))
owner_rel = relationship('Users')
post_id = Column(Integer, ForeignKey('posts.id'))
post_rel = relationship('Posts')
body = Column(Text, nullable = False)
hashtags = Column(String(80), default = '')
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
last_updated = Column(TIMESTAMP(timezone = True), server_default = func.now(), onupdate = func.now())
@property
def serialize(self):
return {
'id': self.id,
'owner': self.owner_rel.serialize if self.owner_rel else None,
'post_id': self.post_id,
'body': self.body,
'hashtags': self.hashtags,
'hashtags_list': self.hashtags.split(',') if self.hashtags != '' else [],
'date_created': str(self.date_created),
'last_updated': str(self.last_updated),
}
class CommentLikes(Base):
__tablename__ = 'comment_likes'
id = Column(Integer, nullable = False, primary_key = True)
owner_id = Column(Integer, ForeignKey('users.id'))
owner_rel = relationship('Users')
comment_id = Column(Integer, ForeignKey('comments.id'))
comment_rel = relationship('Comments')
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'owner': self.owner_rel.serialize if self.owner_rel else None,
'comment_id': self.comment_id,
'date_created': str(self.date_created),
}
class Messagings(Base):
__tablename__ = 'messagings'
id = Column(Integer, nullable = False, primary_key = True)
user_id = Column(Integer, ForeignKey('users.id'))
user_rel = relationship('Users', foreign_keys=[user_id])
sender_id = Column(Integer, ForeignKey('users.id'))
sender_rel = relationship('Users', foreign_keys=[sender_id])
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
last_updated = Column(TIMESTAMP(timezone = True), server_default = func.now(), onupdate = func.now())
@property
def serialize(self):
return {
'id': self.id,
'user': self.user_rel.serialize if self.user_rel else None,
'sender': self.sender_rel.serialize if self.sender_rel else None,
'date_created': str(self.date_created),
'last_updated': str(self.last_updated),
}
class MessagingUserLastOpens(Base):
__tablename__ = 'messaging_user_last_opens'
id = Column(Integer, nullable = False, primary_key = True)
messaging_id = Column(Integer, ForeignKey('messagings.id'))
messaging_rel = relationship('Messagings', foreign_keys=[messaging_id])
user_id = Column(Integer, ForeignKey('users.id'))
user_rel = relationship('Users', foreign_keys=[user_id])
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
user_last_opened = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'messaging': self.messaging_rel.serialize if self.messaging_rel else None,
'user_id': self.user_id,
'date_created': str(self.date_created),
'user_last_opened': str(self.user_last_opened),
}
class Messages(Base):
__tablename__ = 'messages'
id = Column(Integer, nullable = False, primary_key = True)
from_id = Column(Integer, ForeignKey('users.id'))
from_rel = relationship('Users', foreign_keys=[from_id])
to_id = Column(Integer, ForeignKey('users.id'))
to_rel = relationship('Users', foreign_keys=[to_id])
body = Column(Text, nullable = False)
read = Column(Boolean, default = False)
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'body': self.body,
"read": self.read,
'from': self.from_rel.serialize if self.from_rel else None,
'to': self.to_rel.serialize if self.to_rel else None,
'date_created': str(self.date_created),
}
class Notifications(Base):
__tablename__ = 'notifications'
id = Column(Integer, nullable = False, primary_key = True)
from_id = Column(Integer, ForeignKey('users.id'))
from_rel = relationship('Users', foreign_keys=[from_id])
to_id = Column(Integer, ForeignKey('users.id'))
to_rel = relationship('Users', foreign_keys=[to_id])
event = Column(String, nullable = False)
target_type = Column(String, nullable = False)
target_id = Column(String, nullable = False)
read = Column(Boolean, default = False)
date_created = Column(TIMESTAMP(timezone = True), server_default = func.now())
@property
def serialize(self):
return {
'id': self.id,
'from': self.from_rel.serialize if self.from_rel else None,
'to': self.to_rel.serialize if self.to_rel else None,
'event': self.event,
'target_type': self.target_type,
'target_id': self.target_id,
'read': self.read,
'date_created': str(self.date_created),
}
# --- Create Database Session --- #
sqlite_file = "sqlite:///database.db?check_same_thread=False"
db_string = os.environ.get('DATABASE_URL', sqlite_file)
app_state = ''
if db_string[:8] == 'postgres':
app_state = 'production'
print('--- production ---')
else:
app_state = 'development'
print('--- development ---')
engine = create_engine(db_string, echo=True)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
Scoped_Session = scoped_session(DBSession)
db_session = Scoped_Session()
| ryanwaite28/cmsc-495-project-backend | models.py | models.py | py | 10,173 | python | en | code | 0 | github-code | 36 |
70192147944 | t = xr.open_dataset('C:/Users/rober/Downloads/hawaii_soest_794e_6df2_6381_6464_6c66_07de.nc') # global netcdf file
# split up around -180
dsEast = t.sel(longitude=slice(-180,-150))
dsWest = t.sel(longitude=slice(150,180))
# revise longitude labels
dsWest['longitude2'] = dsWest.longitude-360
dsWest = dsWest.swap_dims({'longitude':'longitude2'})
dsWest = dsWest.drop('longitude')
dsEast['longitude2'] = dsEast.longitude
dsEast = dsEast.swap_dims({'longitude':'longitude2'})
dsEast = dsEast.drop('longitude')
# merge the datasets, removing the redundant data at -180
dsAll = dsWest.merge(dsEast.where(dsEast.longitude2>-180,drop=True))
#revert to original coordinate labels and save
dsAll = dsAll.rename({'longitude2':'longitude'})
dsAll.to_netcdf('C:/Users/rober/Downloads/PAR.nc') | leviner/rltools | akMaps/subsetEtopo.py | subsetEtopo.py | py | 785 | python | en | code | 2 | github-code | 36 |
5798683173 | import pygame
from SupportFuncs import load_image
class URadioButtons(pygame.sprite.Sprite):
def __init__(self, screen, coords, group):
super(URadioButtons, self).__init__(group)
self.coords = coords
self.buttons = []
self.checked_button = 0
self.font = pygame.font.Font('font/arial.ttf', 15)
self.screen = screen
self.draw()
def draw(self):
self.image = pygame.Surface((65 * len(self.buttons), 50), pygame.SRCALPHA)
self.rect = self.image.get_rect()
self.rect.x = self.coords[0]
self.rect.y = self.coords[1]
for i in range(len(self.buttons)):
color = (0, 0, 0)
image_name = 'ui_images/RadioButtonDefault.png'
if i == self.checked_button:
color = (255, 0, 0)
image_name = 'ui_images/RadioButtonChecked.png'
text_pg = self.font.render(self.buttons[i][0], True, color)
btn_img = pygame.transform.scale(load_image(image_name, colorkey=-1),
(50, 50))
self.image.blit(btn_img, (50 * i + 5 * (i + 1), 0))
self.image.blit(text_pg, (50 * i + 10 + 5 * (i + 1), 40 - text_pg.get_height()))
self.screen.blit(self.image, (self.coords[0], self.coords[1]))
def click_check(self, pos):
if pygame.sprite.collide_rect(pos, self):
cell_x = (pos.rect.x - 10) // 50 - self.coords[0] // 50
cell_y = (pos.rect.y - 10) // 50
if cell_x < 0 or cell_x >= len(self.buttons) or cell_y != 0:
return
self.checked_button = cell_x
self.buttons[cell_x][1]()
self.draw()
def hover_check(self, pos):
pass
def add_button(self, text, func):
self.buttons.append([text, func])
class ULineEdit(pygame.sprite.Sprite):
def __init__(self, screen, coords, group):
super(ULineEdit, self).__init__(group)
self.font = pygame.font.Font('font/arial.ttf', 15)
self.screen = screen
self.coords = coords
self.text = ''
self.en_to_ru = {'A': 'ф', 'B': 'и', 'C': 'с',
'D': 'в', 'E': 'у', 'F': 'а',
'G': 'п', 'H': 'р', 'I': 'ш',
'J': 'о', 'K': 'л', 'L': 'д',
'M': 'ь', 'N': 'т', 'O': 'щ',
'P': 'з', 'Q': 'й', 'R': 'к',
'S': 'ы', 'T': 'е', 'U': 'г',
'V': 'м', 'W': 'ц', 'X': 'ч',
'Y': 'н', 'Z': 'я', ',': 'б',
'.': 'ю', ';': 'ж', '\'': 'э',
'[': 'х', ']': 'ъ', '/': ','}
self.draw()
def draw(self):
self.image = pygame.Surface((200, 50), pygame.SRCALPHA)
self.rect = self.image.get_rect()
self.rect.x = self.coords[0]
self.rect.y = self.coords[1]
self.image.blit(pygame.transform.scale(load_image('ui_images/LineEdit.png', colorkey=-1), (200, 50)), (0, 0))
text_pg = self.font.render(self.text, True, (0, 0, 0))
self.image.blit(text_pg, (10, 40 - text_pg.get_height()))
self.screen.blit(self.image, (self.coords[0], self.coords[1]))
def click_check(self, pos):
pass
def hover_check(self, pos, event):
if pygame.sprite.collide_rect(pos, self):
if event.type == pygame.KEYDOWN:
key = pygame.key.name(event.key)
if key == 'backspace':
if len(self.text) >= 1:
self.text = self.text[:-1]
elif key in ['б', 'ю', 'ж', 'э', 'х', 'ъ']:
self.text += key
elif key.upper() in self.en_to_ru:
self.text += self.en_to_ru[key.upper()]
elif key.isdigit():
self.text += key
elif key == 'space':
self.text += ' '
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
class UButton(pygame.sprite.Sprite):
def __init__(self, screen, coords, group, text, func, image_name='ui_images/ButtonBlue.png'):
super(UButton, self).__init__(group)
self.font = pygame.font.Font('font/arial.ttf', 15)
self.screen = screen
self.coords = coords
self.text = text
self.func = func
self.image_name = image_name
self.draw()
def draw(self):
self.image = pygame.Surface((70, 50), pygame.SRCALPHA)
self.rect = self.image.get_rect()
self.rect.x = self.coords[0]
self.rect.y = self.coords[1]
self.image.blit(pygame.transform.scale(load_image(self.image_name, colorkey=-1), (70, 50)), (0, 0))
text_pg = self.font.render(self.text, True, (0, 0, 0))
self.image.blit(text_pg, (10, 40 - text_pg.get_height()))
self.screen.blit(self.image, (self.coords[0], self.coords[1]))
def hover_check(self, pos):
pass
def click_check(self, pos):
if pygame.sprite.collide_rect(pos, self):
self.func()
class ULabel(pygame.sprite.Sprite):
def __init__(self, screen, coords, group, text, height=40, font_size=10):
super(ULabel, self).__init__(group)
self.font_size = font_size
self.font = pygame.font.Font('font/arial.ttf', self.font_size)
self.screen = screen
self.coords = coords
self.text = text
self.height = height
self.on_flag = True
self.draw()
def draw(self):
if self.on_flag:
self.image = pygame.Surface((len(self.text) * self.font_size * 0.55, self.height), pygame.SRCALPHA)
self.rect = self.image.get_rect()
self.rect.x = self.coords[0]
self.rect.y = self.coords[1]
self.image.blit(pygame.transform.scale(load_image('ui_images/Label.png', colorkey=-1),
(len(self.text) * self.font_size * 0.55, self.height)), (0, 0))
text_pg = self.font.render(self.text, True, (0, 0, 0))
self.image.blit(text_pg, (10, self.height - text_pg.get_height()))
self.screen.blit(self.image, (self.coords[0], self.coords[1]))
def set_text(self, text):
self.text = text
def off_on(self):
self.on_flag = not self.on_flag
| musaewullubiy/BigTaskMapAPI | UTINGAME.py | UTINGAME.py | py | 6,461 | python | en | code | 0 | github-code | 36 |
7573547861 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import math
MSS = 1440
INIT_CWND = 10
QSIZE = 20 #In packets
DEF_RTT = 20 #In ms
DEF_RATE = 12 #Mbps
def get_log_fct(rtt, rate, flow_sz):
#Get FCT estimation using log-based calculations
dict_fct = {}
initcwnd = INIT_CWND*MSS
dict_fct[rtt] = {}
dict_fct[rtt][rate] = {}
rate_bytes = (rate*1000000)/float(8)
bdp = rate_bytes*rtt
for pkts in flow_sz:
sz = pkts*MSS
dict_fct[rtt][rate][sz] = {}
tdelay = (MSS/float(rate_bytes))*pkts
# print('pkts: {}, TD: {}'.format(pkts,TD))
for pkts in flow_sz:
sz = pkts*MSS
dict_fct[rtt][rate][sz] = {}
tdelay = (MSS/float(rate_bytes))*pkts
if sz <= initcwnd:
fct = (2*rtt) + tdelay
elif sz > initcwnd and sz < bdp:
fct = ((2+math.ceil(math.log(sz/float(initcwnd),2)))*rtt) + tdelay
else:
fct = ((1+(math.ceil(math.log((bdp+(initcwnd-MSS))/float(initcwnd),2))))*rtt) + tdelay
dict_fct[rtt][rate][sz]['fct'] = fct
return dict_fct | eweyulu/tcp-fct | log.py | log.py | py | 1,123 | python | en | code | 1 | github-code | 36 |
74574329062 | # -*- coding: utf-8 -*-
#
# Author: Ingelrest François (Francois.Ingelrest@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk, modules, os.path
from tools import consts, loadGladeFile, prefs
from gettext import gettext as _
MOD_INFO = ('Status Icon', _('Status Icon'), _('Add an icon to the notification area'), [], False, False)
class StatusIcon(modules.Module):
def __init__(self):
""" Constructor """
modules.Module.__init__(self, (consts.MSG_EVT_MOD_LOADED, consts.MSG_EVT_MOD_UNLOADED, consts.MSG_EVT_APP_STARTED,
consts.MSG_EVT_NEW_TRACK, consts.MSG_EVT_PAUSED, consts.MSG_EVT_UNPAUSED,
consts.MSG_EVT_STOPPED, consts.MSG_EVT_NEW_TRACKLIST, consts.MSG_EVT_TRACK_MOVED))
def install(self):
""" Install the Status icon """
self.tooltip = consts.appName
self.isPaused = False
self.popupMenu = None
self.isPlaying = False
self.icoNormal = None
self.mainWindow = prefs.getWidgetsTree().get_widget('win-main')
self.trackHasNext = False
self.trackHasPrev = False
self.emptyTracklist = True
self.isMainWinVisible = True
# The status icon does not support RGBA, so make sure to use the RGB color map when creating it
colormap = self.mainWindow.get_screen().get_rgb_colormap()
gtk.widget_push_colormap(self.mainWindow.get_screen().get_rgb_colormap())
self.statusIcon = gtk.StatusIcon()
gtk.widget_pop_colormap()
# GTK+ handlers
self.statusIcon.connect('activate', self.toggleWinVisibility)
self.statusIcon.connect('popup-menu', self.onPopupMenu)
self.statusIcon.connect('size-changed', self.renderIcons)
# Install everything
self.statusIcon.set_tooltip(consts.appName)
self.onNewTrack(None)
self.statusIcon.set_visible(True)
def uninstall(self):
""" Uninstall the Status icon """
self.statusIcon.set_visible(False)
self.statusIcon = None
if not self.isMainWinVisible:
self.mainWindow.show()
self.isMainWinVisible = True
def renderIcons(self, statusIcon, availableSize):
""" (Re) Create icons based the available tray size """
# Normal icon
if availableSize >= 48+2: self.icoNormal = gtk.gdk.pixbuf_new_from_file(consts.fileImgIcon48)
elif availableSize >= 32+2: self.icoNormal = gtk.gdk.pixbuf_new_from_file(consts.fileImgIcon32)
elif availableSize >= 24+2: self.icoNormal = gtk.gdk.pixbuf_new_from_file(consts.fileImgIcon24)
else: self.icoNormal = gtk.gdk.pixbuf_new_from_file(consts.fileImgIcon16)
# Paused icon
self.icoPause = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, self.icoNormal.get_width(), self.icoNormal.get_height())
self.icoPause.fill(0x00000000)
self.icoNormal.composite(self.icoPause, 0, 0, self.icoNormal.get_width(), self.icoNormal.get_height(), 0, 0, 1, 1, gtk.gdk.INTERP_HYPER, 100)
if self.icoNormal.get_width() == 16: pauseStock = self.mainWindow.render_icon(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_MENU)
else: pauseStock = self.mainWindow.render_icon(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_BUTTON)
diffX = self.icoPause.get_width() - pauseStock.get_width()
diffY = self.icoPause.get_height() - pauseStock.get_height()
pauseStock.composite(self.icoPause, 0, 0, pauseStock.get_width(), pauseStock.get_height(), diffX/2, diffY/2, 1, 1, gtk.gdk.INTERP_HYPER, 255)
# Use the correct icon
if self.isPaused: statusIcon.set_from_pixbuf(self.icoPause)
else: statusIcon.set_from_pixbuf(self.icoNormal)
def onNewTrack(self, track):
""" A new track is being played, None if none """
if track is None: self.tooltip = consts.appName
else: self.tooltip = '%s - %s' % (track.getArtist(), track.getTitle())
self.isPaused = False
self.isPlaying = track is not None
self.statusIcon.set_from_pixbuf(self.icoNormal)
self.statusIcon.set_tooltip(self.tooltip)
def onPause(self):
""" The current track has been paused """
self.isPaused = True
self.statusIcon.set_from_pixbuf(self.icoPause)
self.statusIcon.set_tooltip(_('%(tooltip)s [paused]') % {'tooltip': self.tooltip})
def onUnpause(self):
""" The current track has been unpaused """
self.isPaused = False
self.statusIcon.set_from_pixbuf(self.icoNormal)
self.statusIcon.set_tooltip(self.tooltip)
def toggleWinVisibility(self, statusIcon):
""" Show/hide the main window """
if not self.isMainWinVisible:
self.mainWindow.show()
self.isMainWinVisible = True
elif self.mainWindow.has_toplevel_focus():
self.mainWindow.hide()
self.isMainWinVisible = False
else:
self.mainWindow.hide()
self.mainWindow.show()
# --== Message handler ==--
def handleMsg(self, msg, params):
""" Handle messages sent to this module """
if msg == consts.MSG_EVT_PAUSED: self.onPause()
elif msg == consts.MSG_EVT_STOPPED: self.onNewTrack(None)
elif msg == consts.MSG_EVT_UNPAUSED: self.onUnpause()
elif msg == consts.MSG_EVT_NEW_TRACK: self.onNewTrack(params['track'])
elif msg == consts.MSG_EVT_MOD_LOADED: self.install()
elif msg == consts.MSG_EVT_TRACK_MOVED: self.trackHasNext, self.trackHasPrev = params['hasNext'], params['hasPrevious']
elif msg == consts.MSG_EVT_APP_STARTED: self.install()
elif msg == consts.MSG_EVT_MOD_UNLOADED: self.uninstall()
elif msg == consts.MSG_EVT_NEW_TRACKLIST: self.emptyTracklist = (len(params['tracks']) == 0)
# --== GTK handlers ==--
def onPopupMenu(self, statusIcon, button, time):
""" The user asks for the popup menu """
if self.popupMenu is None:
wTree = loadGladeFile('StatusIconMenu.glade')
self.menuPlay = wTree.get_widget('item-play')
self.menuStop = wTree.get_widget('item-stop')
self.menuNext = wTree.get_widget('item-next')
self.popupMenu = wTree.get_widget('menu-popup')
self.menuPause = wTree.get_widget('item-pause')
self.menuPrevious = wTree.get_widget('item-previous')
self.menuSeparator = wTree.get_widget('item-separator')
# Connect handlers
wTree.get_widget('item-quit').connect('activate', lambda btn: modules.postQuitMsg())
wTree.get_widget('item-preferences').connect('activate', lambda btn: modules.showPreferences())
self.menuPlay.connect('activate', lambda btn: modules.postMsg(consts.MSG_CMD_TOGGLE_PAUSE))
self.menuStop.connect('activate', lambda btn: modules.postMsg(consts.MSG_CMD_STOP))
self.menuNext.connect('activate', lambda btn: modules.postMsg(consts.MSG_CMD_NEXT))
self.menuPrevious.connect('activate', lambda btn: modules.postMsg(consts.MSG_CMD_PREVIOUS))
self.menuPause.connect('activate', lambda btn: modules.postMsg(consts.MSG_CMD_TOGGLE_PAUSE))
self.popupMenu.show_all()
# Enable only relevant menu entries
self.menuStop.set_sensitive(self.isPlaying)
self.menuNext.set_sensitive(self.isPlaying and self.trackHasNext)
self.menuPause.set_sensitive(self.isPlaying and not self.isPaused)
self.menuPrevious.set_sensitive(self.isPlaying and self.trackHasPrev)
self.menuPlay.set_sensitive((not (self.isPlaying or self.emptyTracklist)) or self.isPaused)
self.popupMenu.popup(None, None, gtk.status_icon_position_menu, button, time, statusIcon)
| gabrielmcf/biel-audio-player | src/modules/StatusIcon.py | StatusIcon.py | py | 8,708 | python | en | code | 0 | github-code | 36 |
1065889096 | import numpy as np
import matplotlib.pyplot as plt
import sys
def h(X, theta):
return 1 / (1 + np.e ** -(X.dot(theta.T)))
def J(X, y, theta):
m = X.shape[0]
y_hat = h(X, theta)
erro = (-y * np.log(y_hat) - (1-y) * np.log(1-y_hat)).sum(0)
return erro / m
def GD(X, y, theta, alpha, niters):
m = X.shape[0]
cost = np.zeros((niters,1))
print('iteração:')
for k in range(0, niters):
print(' ',k,end='')
y_hat = h(X, theta)
erro = ((y_hat-y) *X).sum(0)/m
theta -= (alpha * (erro))
cost[k] = J(X, y, theta)
print('\r\r\r\r\r\r',end='')
return (cost, theta)
def featureScaling(X):
X=X-np.min(X,0)
den=np.max(X,0)-np.min(X,0)
return X/den
if len(sys.argv) < 5:
print('Usage %s <dataset> <# of iterations> <alpha> <delimiter>'%sys.argv[0])
f=sys.argv[1]
niters=int(sys.argv[2])
alpha=float(sys.argv[3])
delim=sys.argv[4]
## delete \n at the end of the string
data=np.genfromtxt(f,delimiter=delim)
## split the string into the values
# now data is a list of lists
X=data[:,:-1]
X=np.array(X,dtype=float)
#### Inicio da área que é permita alguma mudança
X=featureScaling(X)
X=np.insert(X,0,1,axis=1)
#### Fim da área que é permitida alguma mudança
y=data[:,-1]
y=np.reshape(np.array(y,dtype=int),(len(y),1))
Theta=np.zeros((1,X.shape[1]))
nsize=int(X.shape[0]*.7)
Xtr=X[:nsize,:]
Xte=X[nsize:,:]
ytr=y[:nsize]
yte=y[nsize:]
c,t=GD(Xtr,ytr,Theta,alpha,niters)
y_hat=np.round(h(Xtr,t))
print('Home made learner:')
print(' Taxa de acerto (treino):', np.mean(y_hat==ytr))
y_hat=np.round(h(Xte,t))
print(' Taxa de acerto (teste):', np.mean(y_hat==yte))
plt.plot(c)
plt.show()
#------------
from sklearn import linear_model
r=linear_model.LogisticRegression()
ytr=np.ravel(ytr)
r.fit(Xtr,ytr)
yte=np.ravel(yte)
y_hat=r.predict(Xtr)
print('Home made learner:')
print(' Taxa de acerto (treino):', np.mean(y_hat==ytr))
y_hat=r.predict(Xte)
print(' Taxa de acerto (teste):', np.mean(y_hat==yte))
| brunoprograma/machine_learning | aula_03/LRegression.py | LRegression.py | py | 1,934 | python | en | code | 0 | github-code | 36 |
788517980 | import os, gtts, PIL, praw, PIL.Image, PIL.ImageDraw, PIL.ImageFont, moviepy.editor, shutil
class program: #the main class
class output: #the class for controlled stdout within the program
outputEnabled = True #controls whether or not to print controlled output lines
def print(string) -> None: #will only print if <program.output.outputEnabled == True>.
if (program.output.outputEnabled):
print (string)
class tts: #the class for text to speech stuff
def makeTTSFile(ttsText, language = 'en') -> str: #this outputs a .mp3 file and returns the path
try:
currentNumberCount = int(str(open('./tts-file-count-number.txt').read()))
except:
currentNumberCount = 1
file = open('./tts-file-count-number.txt', 'w')
file.write(str(currentNumberCount + 1))
file.close()
if ('tmp' not in os.listdir('.')):
os.mkdir('tmp')
filePath = './tmp/{}.mp3'.format(str(currentNumberCount))
textToSpeech = gtts.gTTS(text = str(ttsText), lang = language)
textToSpeech.save(filePath)
return filePath
class reddit: #the class that has the functions and data that has to do with reddit
reddit = praw.Reddit('bot1', user_agent = 'bot1 user agent')
def getRepliesFromTopPost() -> dict: #returns a list of the post's replies sorted by their score
comments = {}
sbmsn = None
for submission in program.reddit.reddit.subreddit('askreddit').hot(limit = 1):
sbmsn = submission
for comment in submission.comments:
try:
isAscii = True
normalChars = [ #I dont know any better way to do this so I had to hardcode it
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '`', '~', '!',
'@', '#', '$', '%', '^', '&', '*', '(', ')', '[', ']', '{', '}',
'|', '\\', '"', "'", ';', ':', ',', '<', '>', '.', '/', '?', ' ',
'-', '_', '+', '=', '\n'
]
for each in str(comment.body): #nested for loop paradise...
if (each.lower() in normalChars):
pass
else:
isAscii = False
if (isAscii):
comments[int(comment.score)] = str(comment.body)
except:
pass
return [comments, sbmsn]
class presets: #the class for the configuration variables
numberOfAskredditCommentsToShow = 10 #will show the top x amount of comments from the post.
class utils:
def asciitize(string):
normalChars = [ #I dont know any better way to do this so I had to hardcode it
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '`', '~', '!',
'@', '#', '$', '%', '^', '&', '*', '(', ')', '[', ']', '{', '}',
'|', '\\', '"', "'", ';', ':', ',', '<', '>', '.', '/', '?', ' ',
'-', '_', '+', '=', '\n'
]
newString = ''
for each in string:
if (each.lower() in normalChars):
pass
else:
each = '?'
newString += each
return newString
class images: #the class that has the functions for generating images
def generateImageWithTextOnIt(text, dimensions = [1920, 1080], bgcolor = 'white', fgcolor = 'black'): #generates an image that can later be stitched into the video
image = PIL.Image.new('RGB', (dimensions[0], dimensions[1]), bgcolor)
text = text.replace('\n', '')
newText = []
tmpText = ''
last = 1
lineWidthInChars = 50 #make the lines 50 characters long each
for each in text: #split the string into 50 characer long segments
last += 1
tmpText = str(tmpText) + str(each)
if (last >= lineWidthInChars):
last = 1
tmpText += '-'
newText.append(tmpText)
tmpText = ''
if (tmpText != ''):
newText.append(tmpText)
if ('tmp' not in os.listdir('.')):
os.mkdir('tmp')
try:
currentNumberCount = int(str(open('./image-file-count-number.txt').read()))
except:
currentNumberCount = 1
file = open('./image-file-count-number.txt', 'w')
file.write(str(currentNumberCount + 1))
file.close()
filePath = './tmp/{}.png'.format(str(currentNumberCount))
textTopYCoordinate = 0 #int(image.size[1] / 4) #there will be no text above this y coordinate
textHeight = int(image.size[1] - textTopYCoordinate)
textHeight /= len(newText)
if (textHeight > (image.size[0] / 30)):
textHeight = int(image.size[0] / 30)
font = PIL.ImageFont.truetype('./utils/default-font.ttf', int(textHeight))
draw = PIL.ImageDraw.Draw(image)
lastYCoord = textTopYCoordinate
for textLine in newText:
textSize = draw.textsize(textLine, font = font)
textCoords = [0, 0]
textCoords[0] = int((image.size[0] - textSize[0]) / 2)
textCoords[1] = int(lastYCoord)
lastYCoord += textSize[1]
if (lastYCoord % 2 == 0):
pass
else:
lastYCoord += 1
image = image.resize((dimensions[0], lastYCoord), PIL.Image.ANTIALIAS)
lastYCoord = textTopYCoordinate
font = PIL.ImageFont.truetype('./utils/default-font.ttf', int(textHeight))
draw = PIL.ImageDraw.Draw(image)
for textLine in newText:
textSize = draw.textsize(textLine, font = font)
textCoords = [0, 0]
textCoords[0] = int((image.size[0] - textSize[0]) / 2)
textCoords[1] = int(lastYCoord)
draw.text(textCoords, textLine, fgcolor, font = font)
lastYCoord += textSize[1]
newImage = PIL.Image.new('RGB', (dimensions[0], dimensions[1]), bgcolor)
'''if (image.size[1] > newImage.size[1]):
aspectRatio = image.size[0] / image.size[1]
newImageSize = [0, 0]
newImageSize[0] = int(newImage.size[0] * aspectRatio)
newImageSize[1] = int(newImage.size[1] / aspectRatio)
image = image.resize((*newImageSize), PIL.Image.ANTIALIAS)'''#fix the resizing method so that the image doesnt overflow on the y axis
newImageCoords = [int((newImage.size[0] - image.size[0]) / 2), int((newImage.size[1] - image.size[1]) / 2)]
newImage.paste(image, newImageCoords)
newImage.save(filePath)
return filePath
program.output.print('Program started.')
program.output.print('Getting the comments from the top askreddit post.')
askRedditCommentList = program.reddit.getRepliesFromTopPost()
commentUpvotesSorted = sorted(askRedditCommentList[0])
commentUpvotesSorted.reverse()
program.output.print(program.utils.asciitize('Found a post titled "{}" with {} upvotes that is in hot.'.format(askRedditCommentList[1].title, askRedditCommentList[1].score)))
if (program.presets.numberOfAskredditCommentsToShow > len(commentUpvotesSorted)):
program.output.print('The number of comments you chose to display was larger than the amount of available comments - <program.presets.numberOfAskredditCommentsToShow> was changed to the max amount of comments and nothing more.')
program.presets.numberOfAskredditCommentsToShow = len(commentUpvotesSorted)
topComments = {}
topCommentsUpvotesSorted = []
iterationStage = 0
while (iterationStage < program.presets.numberOfAskredditCommentsToShow):
topComments[commentUpvotesSorted[iterationStage]] = askRedditCommentList[0][commentUpvotesSorted[iterationStage]]
topCommentsUpvotesSorted.append(commentUpvotesSorted[iterationStage])
iterationStage += 1
ttsFilePathsInOrderOfTopCommentsSortedByUpvotes = [program.tts.makeTTSFile(askRedditCommentList[1].title)] #10/10 file naming :)
iterationStage = 0
for comment in topComments:
iterationStage += 1
program.output.print('Making TTS file {}/{}.'.format(str(iterationStage), str(len(topComments))))
commentText = topComments[comment]
ttsPath = program.tts.makeTTSFile(commentText)
ttsFilePathsInOrderOfTopCommentsSortedByUpvotes.append(ttsPath)
imageFilePathsInOrderOfTopCommentsSortedByUpvotes = [program.images.generateImageWithTextOnIt(askRedditCommentList[1].title, fgcolor = '#9494FF')] #sorry :)
iterationStage = 0
for comment in topComments:
iterationStage += 1
program.output.print('Making image file {}/{}.'.format(str(iterationStage), str(len(topComments))))
imagePath = program.images.generateImageWithTextOnIt(topComments[comment], fgcolor = '#ff4301')
imageFilePathsInOrderOfTopCommentsSortedByUpvotes.append(imagePath)
outputMp4List = []
for each in range(len(imageFilePathsInOrderOfTopCommentsSortedByUpvotes)):
program.output.print('Stitching together audio and video files ({}/{}).'.format(str(each + 1), str(len(imageFilePathsInOrderOfTopCommentsSortedByUpvotes))))
imagePath = imageFilePathsInOrderOfTopCommentsSortedByUpvotes[each]
audioPath = ttsFilePathsInOrderOfTopCommentsSortedByUpvotes[each]
os.system('ffmpeg.exe -loop 1 -i {} -i {} -c:v libx264 -tune stillimage -c:a aac -b:a 192k -pix_fmt yuv420p -shortest ./tmp/out{}.mp4'.format(imagePath, audioPath, str(each)))
outputMp4List.append('./tmp/out{}.mp4'.format(str(each)))
program.output.print('Stitching together the videos.')
videoFileList = []
for each in outputMp4List:
videoFileList.append(moviepy.editor.VideoFileClip(each))
finalVideo = moviepy.editor.concatenate_videoclips(videoFileList)
finalVideo.write_videofile('output.mp4')
program.output.print('Done!')
shutil.rmtree('tmp') | renamedquery/automatic-askreddit-video-maker | video-maker.py | video-maker.py | py | 10,898 | python | en | code | 1 | github-code | 36 |
3647124333 | import commands
import sys
sys.path.append('../../')
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import django
django.setup()
path = os.getcwd()
pangu_info = "/".join([path,"mysite/config/ecs_pangu.txt"])
def parse_srv_status():
oss_srv_stat = {}
ret = []
pangu_srv_status = commands.getoutput("cat %s |grep -Ei \"normal|disconnected\""%pangu_info)
split_data = pangu_srv_status.split('\n')
for srv in split_data:
oss_srv_stat["status"] = srv.split()[1]
oss_srv_stat['ip'] = srv.split()[5].split('//')[1]
oss_srv_stat['hostname'] = srv.split()[6]
ret.append(dict(oss_srv_stat))
return ret
| luvensin/privateCloudMonitor | mysite/mysite/config/parse_data_ecs_pangu.py | parse_data_ecs_pangu.py | py | 689 | python | en | code | 0 | github-code | 36 |
39156539433 | import re
import sys
from collections import namedtuple, Counter, OrderedDict
from operator import itemgetter
from math import log
from Bio import SeqIO
from RecBlast import print, merge_ranges
from RecBlast.Search import id_search
from itertools import chain, islice
import mygene
from pathlib import Path
from RecBlast.RBC import RecBlastContainer
from io import StringIO
import pandas as pd
import sqlite3
def cleanup_fasta_input(handle, filetype='fasta', write=True):
oldlist = [i for i in SeqIO.parse(handle, filetype)]
names = set([i.name for i in oldlist])
newlist = list()
for name in names:
x = [i for i in oldlist if i.name == str(name) and 'Sequenceunavailable' not in i.seq]
for j in x:
j.name += '_' + str(j.description).split('|')[2]
newlist += x
if write:
with open(handle + '.clean', 'w') as outf:
SeqIO.write(newlist, outf, filetype)
return newlist
def massively_translate_fasta(SeqIter):
mg = mygene.MyGeneInfo()
all_genes = []
def chunks(iterable, size=1000):
iterator = iter(iterable)
for first in iterator:
yield list(chain([first], islice(iterator, size - 1)))
for x in chunks(SeqIter):
out = mg.querymany([a.id for a in x], scopes='refseq', fields='symbol', species='Homo sapiens', returnall=True)
tdict = {}
for a in out['out']:
try:
tdict[a['query']] = a['symbol']
except KeyError:
continue
for i in x:
try:
i.id = tdict[i.id]
except KeyError:
continue
all_genes += x
return all_genes
def translate_ids(id_list, orig='refseq', to='symbol', species='human'):
"""Converts a name from one type to another using mygene.
:param id_list list:
:param orig str:
:param to str:
:param species str:
:return list:
"""
mg = mygene.MyGeneInfo()
out = mg.querymany(id_list, scopes=orig, fields=to, species=species)
trans_dict = {rec['query']: rec[to] for rec in out if to in rec}
translated_id_list = [trans_dict[rec] if rec in trans_dict else rec for rec in id_list]
return translated_id_list
def nr_by_longest(handle, filetype='fasta', write=True):
oldlist = SeqIO.parse(handle, filetype)
seqdict = {}
for seq in oldlist:
if seq.seq == 'Sequenceunavailable':
print('Seq Unavailable:\t', seq.name)
continue
try:
seq.id, seq.description = seq.id.split('|')[0], seq.id.split('|')[1]
except IndexError:
seq.id, seq.description = seq.id.split(' ')[0], ''.join(seq.id.split('|')[1:len(seq.id.split('|'))])
assert seq.id != 'gi' or seq.id != 'emb' or seq.id != 'acc'
if seq.id in seqdict:
if len(seq) > len(seqdict[seq.id]):
seqdict[seq.id] = seq
else:
continue
else:
seqdict[seq.id] = seq
newlist = (seq for _, seq in seqdict.items())
if write:
outhandle = 'nr_' + str(Path(handle).name)
with Path(outhandle).open('w') as outf:
SeqIO.write(newlist, outf, filetype)
return newlist
def cull_reciprocal_best_hit(recblast_out):
"""
returns a recblast_out container that only has the reciprocal best hits.
:param recblast_out:
:return:
"""
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
if isinstance(recblast_out, list):
rc_out_list = []
for index, rc in enumerate(recblast_out):
rc_out_list.append(cull_reciprocal_best_hit(rc))
return rc_out_list
else:
# assert isinstance(recblast_out, RecBlastContainer), "Items must be RecBlastContainer Objects!"
for species, rc_spec_rec in recblast_out.items():
# print('Species:\t', species, indent=0)
for query, rc_rec in rc_spec_rec.items():
# print('Query:\t', query, indent=1)
try:
rc_out = rc_rec['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
tmprecord = []
for record in rc_out:
try:
# print(record.description, indent=3)
target_id, annotations = record.description.split('|-|')
# print('Target ID:\t', target_id, indent=4)
# print('Annotations:', annotations.lstrip('\t'), indent=4)
except ValueError:
print(record.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
id_lst = pat.findall(annotations)
# print('id_list:\t', id_lst, indent=4)
if id_lst:
if query in id_lst[0]:
tmprecord.append(record)
else:
print("For query {0}, target {1} was not a reciprocal best hit!".format(query,
target_id))
continue
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(record.name,
species,
query))
continue
recblast_out[species][query]['recblast_results'] = tmprecord
return recblast_out
def simple_struct(recblast_out, verbose=True):
"""Returns a nice diagram of queries, targets, and annotations"""
master_dict = {}
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
if isinstance(recblast_out, list):
# Prepare a list of dictionaries of length recblast_out, along with a list of respective species
master_count = [dict] * len(recblast_out)
for index, rc in enumerate(recblast_out):
try:
master_count[index] = simple_struct(rc)
except AttributeError:
master_count[index] = rc
for subdict in master_count:
for species, species_dict in subdict.items():
if isinstance(species_dict, Exception):
continue
try:
comb_spec_dict = master_dict[species]
except KeyError:
master_dict[species] = dict()
comb_spec_dict = master_dict[species]
for query, query_dict in species_dict.items():
try:
comb_query_dict = comb_spec_dict[query]
except KeyError:
comb_spec_dict[query] = dict()
comb_query_dict = comb_spec_dict[query]
for target_id, annotation_list in query_dict.items():
try:
comb_anno_list = comb_query_dict[target_id]
except KeyError:
comb_query_dict[target_id] = list()
comb_anno_list = comb_query_dict[target_id]
comb_anno_list += annotation_list if isinstance(annotation_list, list) else [annotation_list]
return master_dict
else:
"""
Structure:
master_dict:
Species| species_dict:
Query| query_dict:
target_id| annotations_list
"""
# assert isinstance(recblast_out, RecBlastContainer), 'Item in recblast_out was not a RecBlastContainer object!'
try:
recblast_out.__delitem__('__dict__')
except KeyError:
pass
for species, rc_spec_rec in recblast_out.items():
# print('Species:\t', species, indent=0)
try:
species_dict = master_dict[species]
except KeyError:
master_dict[species] = dict()
species_dict = master_dict[species]
for query, rc_rec in rc_spec_rec.items():
# print('Query:\t', query, indent=1)
try:
query_dict = species_dict[query]
except KeyError:
species_dict[query] = dict()
query_dict = species_dict[query]
try:
rc_out = rc_rec['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
for record in rc_out:
try:
# print(record.description, indent=3)
target_id, annotations = record.description.split('|-|')
# print('Target ID:\t', target_id, indent=4)
# print('Annotations:', annotations.lstrip('\t'), indent=4)
except ValueError:
print(record.description, indent=2)
# print('Could not unpack annotations!', indent=2)
continue
try:
target_list = query_dict[target_id]
except KeyError:
query_dict[target_id] = list()
target_list = query_dict[target_id]
id_lst = pat.findall(annotations)
# print('id_list:\t', id_lst, indent=4)
if id_lst:
target_list += id_lst
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(record.name,
species,
query))
if verbose:
print('*******************************************')
for species, species_dict in master_dict.items():
print(species, indent=0)
for query, query_dict in species_dict.items():
print(query, indent=1)
for target_id, annotation_list in query_dict.items():
print(target_id, indent=2)
tmp = []
for annotation in annotation_list:
p, item, seq_range, id_type = id_search(annotation, id_type='brute', verbose=0)
if id_type == 'symbol':
tmp.append(item)
else:
tmp.append(item)
query_dict[target_id] = tmp
for annotation in query_dict[target_id]:
print(annotation, indent=3)
print('*******************************************')
return master_dict
def rc_out_stats(rc_out):
# Todo: use 'from Collections import Counter' to rapidly count duplicates
if isinstance(rc_out, list):
holder = []
for rc in rc_out:
holder.append(rc_out_stats(rc))
c_hit_list, c_multihit_list = zip(holder)
hit_perc = sum(c_hit_list) / len(c_hit_list)
multihit_perc = sum(c_multihit_list) / len(c_multihit_list)
# Percentage of searches with reciprocal hits, regardless of number:
# Percentage of searches with more than one hit:
elif isinstance(rc_out, RecBlastContainer):
c_hit = 0
c_multihit = 0
for species, queries_dict in rc_out.items():
for query, results in rc_out.items():
try:
record_list = results['recblast_results']
except KeyError:
return (0, 0)
has_run = 0
for record in record_list:
if not has_run:
c_hit += 1
has_run = 0
c_multihit += 1
else:
return None
def count_dups(recblast_out):
""" Inverts target-annotation dictionary to find out, for every best-hit annotation, how many targets there are"""
species_anno_target_dict = {}
species_anno_count_dict = {}
master_dict = simple_struct(recblast_out, verbose=False)
for species, species_dict in master_dict.items():
try:
anno_target_dict = species_anno_target_dict[species]
except KeyError:
species_anno_target_dict[species] = {}
anno_target_dict = species_anno_target_dict[species]
print(species_dict, indent=0)
for query, query_dict in species_dict.items():
# ignoring query
print(query_dict, indent=1)
for target_id, annotation_list in query_dict.items():
print(annotation_list, indent=2)
tophit = annotation_list[0]
print(tophit, indent=2)
try:
anno_target_dict[tophit] += [target_id]
except KeyError:
anno_target_dict[tophit] = list()
anno_target_dict[tophit].append(target_id)
print(anno_target_dict[tophit], indent=3)
for species, anno_dict in species_anno_target_dict.items():
print(species, indent=0)
try:
anno_count_dict = species_anno_count_dict[species]
except KeyError:
species_anno_count_dict[species] = {}
anno_count_dict = species_anno_count_dict[species]
for annotation, target_list in anno_dict.items():
print(annotation, '\t\t\t', len(target_list))
anno_count_dict[annotation] = len(target_list)
return species_anno_target_dict, species_anno_count_dict
class FilterRBHs(object):
def __init__(self, **kwargs):
"""Convenience class for use with RecBlastContainer.result_filter(). Removes non-Reciprocal Best Hits from RBC.
"""
self._recblast_object = 'query_record'
self.args = {'func': self.fun, 'summary_statistic': self._stat, 'recblast_object': self._recblast_object}
for k, v in kwargs.items():
self.args[k] = v
def _stat(self, query_record):
""" Summary Statistic function for filter_RBH. Requires setting recblast_object='query_record'
:param query_record:
:return:
"""
return query_record.name
def fun(self, hit, stat, verbose=False):
pat = re.compile('\|\[(.*?):.*\]\|') # regex for items in annotation
try:
hit_split = hit.description.split('|-|')
top_anno = hit_split[1]
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
return False
except IndexError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
return False
id_lst = pat.findall(top_anno)[0].strip()
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=verbose)
if stat == hit_symbol:
return True
else:
return False
def map_ranges(hit):
""" Convenience function for RBC.results_map(). Replaces results with a tup of result descriptions and loci."""
_, h_id, h_range, _ = id_search(hit.description, verbose=False)
h_start = h_range[0]
h_end = h_range[1]
h_strand = h_range[2]
h_d = (hit.description, h_id, h_start, h_end, h_strand)
return h_d
def RBC_drop_many_to_one_hits(RBC):
loci_dict_RBC = {}
for species, query, rec in RBC.result_map(map_ranges):
r = rec['recblast_results']
for index, hit in enumerate(r):
loci_dict_RBC[(hit[1], hit[2], hit[3], ''.join((query, str(index))))] = (species, query, index)
filtered_loci_dict_RBC = drop_overlaps_bed(loci_dict_RBC)
filter_dict = {}
for hit_loc in filtered_loci_dict_RBC.values():
species, query, index = hit_loc
if (species, query) in filter_dict.keys():
filter_dict[(species, query)].append(index)
else:
filter_dict[(species, query)] = [index]
for (species, query), indexes in filter_dict.items():
for hit_index, hit in enumerate(RBC[species][query]['recblast_results']):
if hit_index in indexes:
continue
else:
del RBC[species][query]['recblast_results'][hit_index]
def count_reciprocal_best_hits(recblast_out):
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
species_counters = {}
for species, species_dict in recblast_out.items():
species_counters[species] = Counter()
for query, query_dict in species_dict.items():
try:
rc_out = query_dict['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
for hit in rc_out:
try:
hit_split = hit.description.split('|-|')
target_id = hit_split[0]
annotations = hit_split[1]
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
except IndexError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
id_lst = ''.join(pat.findall(annotations))
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=0)
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(hit.name,
species,
query))
continue
if query == hit_symbol:
species_counters[species].update({query: 1})
return species_counters
def export_count_as_csv(rec_hit_counter_dict, filename='RecBlastCount'):
# First get a list of all the genes, period.
allgenes = []
for species, species_counter in rec_hit_counter_dict.items():
for key, value in species_counter.items():
if key in allgenes:
continue
else:
allgenes.append(key)
# Next, make a dict with a tuple of counts per species
genedict = {gene: tuple((rec_hit_counter_dict[species][gene]
for species in rec_hit_counter_dict.keys())) for
gene in allgenes}
all_lines = ['Gene\t' + '\t'.join([species for species in rec_hit_counter_dict.keys()]) + '\n']
all_lines += ['{Gene}\t{counts_str}\n'.format(Gene=key, counts_str='\t'.join([str(i) for i in value]))
for key, value in genedict.items()]
with open(filename + '.tsv', 'w') as outf:
outf.writelines(all_lines)
def count_reciprocal_best_hits_from_pandas(pandas_df):
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
spec_list = list(pandas_df.target_species.unique())
species_counters = {}
for species in spec_list:
species_counters[species] = Counter()
species_results = pandas_df.loc[pandas_df['target_species'] == species]
query_list = list(species_results.query_name.unique())
for query in query_list:
print(query)
query_results = species_results.loc[species_results['query_name'] == query].ix[:, 5:-1]
rc_out = []
for i, d in query_results.iterrows():
rc_out += d.tolist()
# Annoying shunt
rc_out_asfasta = '\n'.join(['>' + i for i in rc_out if i is not None])
tmp = StringIO(rc_out_asfasta)
rc_out = SeqIO.parse(tmp, 'fasta')
for hit in rc_out:
try:
hit_split = hit.description.split('|-|')
id_lst = ''.join(pat.findall(hit_split[1]))
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=0)
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(hit.name,
species,
query))
continue
if query == hit_symbol:
species_counters[species].update({query: 1})
return species_counters
def sqlite_to_pandas(sql_file, table_name):
conn = sqlite3.connect(sql_file)
df = pd.read_sql_query("select * from {0};".format(table_name), conn)
return df
def filter_hits_pandas(pandas_df):
def filter_func(row):
qrec = row.query_record
qrec = SeqIO.read(StringIO(qrec), 'fasta')
min_len = 0.25 * len(qrec)
intro = row.iloc[0:6].tolist()
hits = row.iloc[5:-1].tolist()
new_hits = []
for hit in hits:
if hit == 'NA':
new_hits.append(None)
continue
elif hit is not None:
tmp = '>' + hit
else:
new_hits.append(None)
continue
hit = SeqIO.read(StringIO(tmp), 'fasta')
id_lst = hit.id
_, hit_symbol, seq_range, _ = id_search(id_lst, id_type='brute', verbose=0)
try:
seq_range = seq_range[hit_symbol]
except KeyError:
new_hits.append(None)
continue
seq_len = abs(int(seq_range[1]) - int(seq_range[0]))
new_hits.append(hit.description if seq_len >= min_len else None)
full = intro + new_hits
return full
return pandas_df.apply(filter_func, axis=1)
class DataIntegratorParser(object):
def __init__(self, file):
transtab = str.maketrans('!@#$%^&*();:.,\'\"/\\?<>|[]{}-=+', '_____________________________')
if isinstance(file, str):
self.file = Path(file)
assert self.file.exists(), file + ' is an invalid file path or does not exist!'
assert self.file.is_file(), file + ' is not a valid file!'
elif isinstance(file, Path):
assert self.file.exists(), str(file) + ' is an invalid file path or does not exist!'
assert self.file.is_file(), str(file) + ' is not a valid file!'
else:
raise TypeError('File must be either a str or Path object!')
self.regions = []
with self.file.open() as f:
for index, line in enumerate(f):
line = line.strip()
if index == 0:
self.header = line.lstrip('# ')
continue
elif line.startswith('# region='):
region = line.lstrip('# region=').translate(transtab)
if getattr(self, region, None) is None:
self.regions.append(region)
setattr(self, region, [])
continue
elif line.startswith('#') and not line.startswith('# '):
cnames = line.lstrip('#').translate(transtab)
ColNames = namedtuple('ColNames', cnames.split('\t'))
self.colnames = ColNames._fields
continue
elif line.startswith('# No data'):
newitem = getattr(self, region, []) + [ColNames(*[None] * len(self.colnames))]
setattr(self, region, newitem)
continue
else:
try:
newitem = getattr(self, region, []) + [ColNames(*line.split('\t'))]
setattr(self, region, newitem)
except NameError as err:
raise NameError(str(err) + '\nParser encountered a line of data before either the column names '
'or the genomic region was declared in the file!')
except TypeError:
print(line, file=sys.stderr)
raise
continue
def rename_regions_via_bedfile(self, bedfile):
transtab = str.maketrans('!@#$%^&*();:.,\'\"/\\?<>|[]{}-=+', '_____________________________')
if isinstance(bedfile, str):
self.bedfile = Path(bedfile)
assert self.bedfile.exists(), bedfile + ' is an invalid file path or does not exist!'
assert self.bedfile.is_file(), bedfile + ' is not a valid file!'
elif isinstance(bedfile, Path):
assert self.bedfile.exists(), str(bedfile) + ' is an invalid file path or does not exist!'
assert self.bedfile.is_file(), str(bedfile) + ' is not a valid file!'
else:
raise TypeError('File must be either a str or Path object!')
bed_trans = {}
with self.bedfile.open() as f:
for line in f:
line = line.strip().split('\t')
bed_trans['{0}_{1}_{2}'.format(line[0], str(int(line[1]) + 1), line[2])] = line[3].translate(transtab)
self.regions = []
for oldid in bed_trans:
self.regions.append(bed_trans[oldid])
setattr(self, bed_trans[oldid], getattr(self, oldid, []))
delattr(self, oldid)
def count_stats_per_record(self, attr_name):
counts = OrderedDict()
for region in sorted(self.regions):
rec = getattr(self, region)
c = Counter([getattr(r, attr_name) for r in rec])
counts[region] = c
return counts
def __iter__(self):
for region in self.regions:
yield getattr(self, region)
def __str__(self):
string = ''
for region in self.regions:
content = getattr(self, region)
string += "{0}:\t {1} ... {2} ({3})\n".format(region,
content[0][0],
content[-1][0],
len(content))
return string
def read_bed(bedfile, key_col=3):
"""Returns a dict using the given 0-indexed key_column"""
d = {}
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
with bedfile.open() as bed:
for line in bed:
if line.startswith("#"):
continue
items = line.strip().split('\t')
for i, j in enumerate(items):
try:
new_j = int(j)
items[i] = new_j
except ValueError:
try:
new_j = float(j)
items[i] = new_j
except ValueError:
continue
if isinstance(key_col, slice):
key = tuple(items[key_col])
if key in d.keys():
raise KeyError('Duplicate keys in dictionary!')
else:
d[key] = items
else:
if items[key_col] in d.keys():
raise KeyError('Duplicate keys in dictionary!')
else:
d[items[key_col]] = items
return d
def drop_overlaps_bed(bedfile):
d = bedfile if isinstance(bedfile, dict) else read_bed(bedfile, key_col=slice(0, 3))
d_new = []
dlocs = {}
for loc in d.keys():
if loc[0] in dlocs.keys():
dlocs[loc[0]].append([int(loc[1]), int(loc[2]), loc[3]])
else:
dlocs[loc[0]] = [[int(loc[1]), int(loc[2]), loc[3]]]
for k, v in dlocs.items():
if len(v) > 1:
v = [sorted(i[0:2]) + [i[2]] for i in v]
# comparison matrix
t = [[max(v[i][0], j[0]) <= min(v[i][1], j[1]) for j in v] for i in range(0, len(v))]
# set diagonal identities to False
for index in range(0, len(t)):
t[index][index] = False
# sum per column of matrix
t_sums = [sum(i) for i in zip(*t)]
# Select only items which have a zero in the t_sums index
filtered_v = [v[i] for i in range(0, len(t_sums)) if t_sums[i] == 0]
d_new += [(k, i[0], i[1], i[2]) for i in filtered_v]
else:
try:
v = v[0]
d_new.append((k, v[0], v[1], v[2]))
except Exception:
print(k, v)
raise
filtered_d = {}
for item in d_new:
if item in d.keys():
filtered_d[item] = d[item]
elif (item[0], item[2], item[1]) in d.keys():
filtered_d[(item[0], item[2], item[1])] = d[(item[0], item[2], item[1])]
else:
print(item)
raise Exception
return filtered_d
def calc_effective_copy_number_by_coverage(query_record):
# get list of ranges
if len(query_record['recblast_results']) == 0:
return None
else:
raw_ranges = (hit.features[0].qualifiers['query_coverage'] for hit in query_record['recblast_results'])
ranges = []
for r in raw_ranges:
try:
rng = (int(r[0]), int(r[1]))
ranges.append(sorted(rng))
except IndexError:
continue
coverage = list(merge_ranges(ranges))
sum_coverage = sum([i[1] - i[0] for i in coverage])
if sum_coverage == 0:
return 0
else:
sum_nuc = sum(
[sum([sum([s in range(r[0], r[1]) for s in range(i[0], i[1])]) for i in ranges]) for r in coverage])
return round(sum_nuc / sum_coverage, 2)
def bed_get_flanking_regions(bedfile, left_range, right_range, genome_file=None):
"""Returns two new bedfiles with ranges left-and-right of each item of the original file, respectively.
:param str bedfile:
:param left_range: Either a single positive integer indicating the left-most number of bases in range;
or a tuple of two integers indicating the left-and-right bound of the range.
:param right_range: Either a single positive integer indicating the right-most number of bases in range;
or a tuple of two integers indicating the right-and-left bound of the range.
:return:
"""
if isinstance(left_range, int):
left_range = (left_range, 0)
if isinstance(right_range, int):
right_range = (0, right_range)
assert isinstance(left_range, tuple), "Parameter 'left_range' must either be an integer or a tuple!"
assert len(left_range) == 2, "Parameter 'left_range' must be a tuple of length 2!"
assert left_range[0] > left_range[1] or left_range == (0, 0), ("The left-side range modifier of left_range must be "
"less than the right-side!")
assert isinstance(right_range, tuple), "Parameter 'right_range' must either be an integer or a tuple!"
assert len(right_range) == 2, "Parameter 'right_range' must be a tuple of length 2!"
assert right_range[0] < right_range[1] or right_range == (0, 0), ("The right-side range modifier of left_range must"
" be greater than the left-side!")
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
leftbed = bedfile.with_name(bedfile.stem +
"_left_Offset{0}_Size{1}".format(left_range[1],
left_range[0] - left_range[1]) +
bedfile.suffix)
rightbed = bedfile.with_name(bedfile.stem +
"_right_Offset{0}_Size{1}".format(right_range[1],
right_range[0] - right_range[1]) +
bedfile.suffix)
granges = {chrm: int(size) for chrm, size
in [line.strip().split("\t") for line in open(genome_file)]} if genome_file else None
with bedfile.open() as bf, leftbed.open("w") as lbf, rightbed.open("w") as rbf:
records = (line.strip().split('\t')[0:4] for line in bf)
for (chr, s, e, id) in records:
if left_range != (0, 0):
left = [chr,
int(s) - left_range[0],
int(s) - left_range[1],
id + "_left"]
ldiff = 0
if left[2] > left[1] > 0:
left[3] += "_offset-{0}_size-{1}".format(left_range[1],
left[2] - left[1])
else:
if left[1] < 0:
ldiff = -left[1] # note its '-' because left[1] is negative
left[2] += ldiff
left[2] = left[2] if left[2] <= int(s) else int(s)
left[1] = 0
if left[1] == left[2]:
left[2] += 1
ldiff -= 1
left[3] += "_offset-{0}_size-{1}".format(left_range[1] - ldiff,
left[2] - left[1])
else:
left[3] += "_offset-{0}_size-{1}".format(left_range[1],
left[2] - left[1])
left = (str(i) for i in left)
lbf.write('\t'.join(left) + "\n")
if right_range != (0, 0):
right = [chr,
int(e) + right_range[0],
int(e) + right_range[1],
id + "_right"]
if granges:
if granges[chr] <= right[2] or granges[chr] <= right[1]:
rdiff = granges[chr] - right[2]
right[2] = granges[chr]
right[1] += rdiff
right[1] = right[1] if right[1] >= int(e) else int(e)
if right[2] == right[1]:
right[1] -= 1
rdiff -= 1
right[3] += "_offset-{0}_size-{1}".format(right_range[0] + rdiff,
right[2] - right[1])
else:
right[3] += "_offset-{0}_size-{1}".format(right_range[0],
right[2] - right[1])
else:
right[3] += "_offset-{0}_size-{1}".format(right_range[0],
right[2] - right[1])
right = (str(i) for i in right)
rbf.write('\t'.join(right) + "\n")
return
def bed_extract_duplicates(bedfile, outfile="", verbose=False):
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
bed_dict = read_bed(bedfile)
hits = sorted(bed_dict.keys())
counts = Counter((''.join(hit.split("_")[:-1]) for hit in hits))
duphits = (hit for hit in hits if counts[hit.split("_")[0]] > 1)
outfile = Path(outfile) if outfile else bedfile.with_suffix(".bed.dups")
try:
first = next(duphits)
if verbose:
print(first, "\t", counts[first.split("_")[0]])
with outfile.open("w") as of:
of.write("\t".join((str(i) for i in bed_dict[first])) + "\n")
for hit in duphits:
if verbose:
print(hit, "\t", counts[hit.split("_")[0]])
of.write("\t".join((str(i) for i in bed_dict[hit])) + "\n")
except StopIteration:
if verbose:
print("No duplicates found in file!")
def merge_ids(fasta):
outfasta = Path(fasta)
with outfasta.with_name(outfasta.name + "_joined").open('w') as outfile:
from Bio import SeqIO
bla = SeqIO.parse(fasta, "fasta")
newrec = {}
for rec in bla:
rec.id = rec.id.split("_left")[0].split("_right")[0]
if rec.id in newrec:
newrec[rec.id].seq += rec.seq
newrec[rec.id].description += "\t" + rec.description
else:
newrec[rec.id] = rec
SeqIO.write((v for v in newrec.values()), outfile, "fasta")
class BLASTSearchParameters(object):
def __init__(self, blast_type, blastdb_path, blast_db="auto", expect=10, perc_score=0.009, perc_span=0.1,
ncbi_search=False, perc_ident=0.69, perc_length=0.001, megablast=True, blastdb_version='auto',
email='', **kwargs):
self.search_type = blast_type
self.search_local = not ncbi_search
self.email = email
self.expect = expect
self.perc_score = perc_score
self.perc_ident = perc_ident
self.perc_span = perc_span
self.perc_length = perc_length
self.megablast = megablast
self.id_db_version = blastdb_version
self.id_db_path = blastdb_path
self.search_db = blast_db if isinstance(blast_db, dict) or isinstance(blast_db, str) else "auto"
for k, v in kwargs:
setattr(self, k, v)
if ncbi_search:
assert "@" in self.email, "If using NCBI for remote BLAST searching, a valid email must be set!"
class BLATSearchParameters(object):
def __init__(self, blat_type, twobit_path, twobit_port_dict, gfserver_host="localhost",
expect=10, perc_score=0.009, perc_span=0.1, perc_ident=0.69,
perc_length=0.001, twobit_file_dict="auto", twobit_version='auto'):
self.search_type = blat_type
self.expect = expect
self.perc_score = perc_score
self.perc_ident = perc_ident
self.perc_span = perc_span
self.perc_length = perc_length
self.search_local = gfserver_host
self.id_db_version = twobit_version
self.id_db_path = twobit_path
self.id_db = twobit_file_dict if (isinstance(twobit_file_dict, dict) or
isinstance(twobit_file_dict, str)) else "auto"
self.search_db = twobit_port_dict
self.id_source = "twobit"
class SQLServerParameters(object):
def __init__(self, host='localhost', id_db='bioseqdb', user='postgres', driver='psycopg2',
password='', id_db_version='auto'):
self.id_source = 'sql'
self.driver = driver
self.host = host
self.id_db = id_db
self.user = user
self.password = password
self.id_db = id_db
self.id_db_version = id_db_version
| docmanny/RecSearch | RecBlast/Auxilliary.py | Auxilliary.py | py | 40,401 | python | en | code | 4 | github-code | 36 |
3585805775 | '''
Created on Dec 26, 2016
@author: Anuj
'''
import scramblingModule
import sys
ip_file_name = input("Enter File name(Specify full path of file if it is not in current directory) : ")
try :
print("Output File : ",scramblingModule.WordScrambling().scrambleFile(ip_file_name))
except :
print(sys.exc_info()[1])
| anujpatel2809/Word-Scrambling | wordscrambling/wordscrambling.py | wordscrambling.py | py | 340 | python | en | code | 0 | github-code | 36 |
37711710841 | def cekPalindrom(mystr):
newstr=mystr[::-1]
if mystr == newstr:
return newstr,True
else:
return newstr,False
def main():
mywords=input('Masukkan kata: ')
newwords,palindrom=cekPalindrom(mywords)
print('\ninput:',mywords,'\noutput:',newwords,'\nPalindrom:',palindrom,'\n')
if __name__ == '__main__':
main()
| adnanhf/Basic-Programming-Algorithm | Modul-5-Sequence Data Type/Number-4.py | Number-4.py | py | 330 | python | en | code | 1 | github-code | 36 |
21477469223 | num = input()
length = len(num)
ans = 0
if num[:2] == '0x':
for i in range(2, length):
digit = num[i]
if digit.isalpha():
digit = ord(num[i]) - 87
ans += int(digit) * (16 ** (length-i-1))
print(ans)
elif num[0] == '0':
for i in range(1, length):
ans += int(num[i]) * (8 ** (length-i-1))
print(ans)
else:
print(int(num))
| Minsoo-Shin/jungle | ps_after/boj_11816.py | boj_11816.py | py | 388 | python | en | code | 0 | github-code | 36 |
14918571499 | from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from cassandra.cluster import Cluster
import signal
# if (contents.length > 0 && !contents[0].equalsIgnoreCase("year") && !contents[18].equalsIgnoreCase("1")) {
# String origin = contents[15];
# int delay = (int) (Float.parseFloat(contents[14]));
# String destinationDelay = contents[8] + "_" + delay;
# context.write(new Text(origin), new Text(destinationDelay));
# }
top = []
top_airports_table = "TopAirlinesByAirport"
def get_airport_carrier_delay(content):
data = content[1].split(',')
try:
if len(data) > 0 and not data[0] == 'year' and not data[18] == '1\n':
origin_carrier = data[15] + "_" + data[8]
destination_delay = float(data[14])
return [(origin_carrier, (destination_delay, 1))]
except:
return []
def init_cassandra():
cluster = Cluster(['127.0.0.1'])
return cluster.connect('tp')
def top_complex_average(rdd):
global top
chandle = init_cassandra()
# iterate locally on driver (master) host
curr = rdd.toLocalIterator()
# concat top and curr values
top_dict = dict(top)
total = 0
for el in curr:
total += 1
key = el[0].split('-')[0]
subkey = el[0].split('-')[1]
if key in top_dict:
if subkey in top_dict[key]:
top_dict[key][subkey] = (top_dict[key][subkey][0] + el[1][0], top_dict[key][subkey][1] + el[1][1])
else:
top_dict[key][subkey] = el[1]
else:
top_dict[key] = {subkey: el[1]}
top = top_dict
prepared_stmt = chandle.prepare(
'INSERT INTO {} (airport_name,airline_name) values (?, ?, ?)'.format(top_airports_table))
for origin in top:
carriers = ' '.join(["%s=%0.2f" % (el[0], el[1][0] / el[1][1]) for el in
sorted(top[origin].items(), key=lambda el: el[1][0] / el[1][1])][10])
chandle.execute(prepared_stmt, (origin, carriers))
chandle.shutdown()
def stop_streaming():
global ssc
ssc.stop(stopSparkContext=True, stopGraceFully=True)
def stream_kafka():
global ssc
kstream = KafkaUtils.createDirectStream(ssc, topics=['2008'], kafkaParams={
"metadata.broker.list": 'ip-172-31-12-78.us-west-1.compute.internal:6667'})
contents = kstream.flatMap(get_airport_carrier_delay).reduceByKey(
lambda a, b: (a[0] + b[0], a[1] + b[1])).foreachRDD(top_complex_average)
ssc.start()
ssc.awaitTerminationOrTimeout(15000)
ssc.stop(stopSparkContext=True, stopGraceFully=True)
def main():
global ssc
conf = SparkConf()
conf.setAppName("TopAirports")
conf.set("spark.streaming.kafka.maxRatePerPartition", "0")
conf.set('spark.streaming.stopGracefullyOnShutdown', True)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 1) # Stream every 1 second
ssc.checkpoint("/tmp/checkpoint")
signal.signal(signal.SIGINT, stop_streaming)
stream_kafka()
if __name__ == "__main__":
main()
| karthikBG/AviationAnalytics | SparkStreaming/2.1.TopAirlinesByAirport.py | 2.1.TopAirlinesByAirport.py | py | 3,139 | python | en | code | 0 | github-code | 36 |
73583266345 | import phunspell
import inspect
import unittest
class TestSqAL(unittest.TestCase):
pspell = phunspell.Phunspell('sq_AL')
def test_word_found(self):
self.assertTrue(self.pspell.lookup("katërpalëshe"))
def test_word_not_found(self):
self.assertFalse(self.pspell.lookup("phunspell"))
def test_lookup_list_return_not_found(self):
words = "adaptoheshin koalicione antiimperialistë diskriminova katërpalëshe borken"
self.assertListEqual(
self.pspell.lookup_list(words.split(" ")), ["borken"]
)
if __name__ == "__main__":
unittest.main()
| dvwright/phunspell | phunspell/tests/test__sq_AL.py | test__sq_AL.py | py | 615 | python | en | code | 4 | github-code | 36 |
5075129662 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "wu zhi bin"
# Email: wuzhibin05@163.com
# Date: 2021/8/26
"""
3.用map来处理字符串列表,把列表中所有人都变成sb,比方alex_sb
name=['alex','wupeiqi','yuanhao','nezha']
"""
# 方法一
# name = ['alex', 'wupeiqi', 'yuanhao', 'nezha']
#
# def add_str(x):
# return x + "_sb"
# ret = map(add_str, name)
# print(list(ret))
# 方法二
# ret2 = map((lambda x: x + "_sb"), name)
# print(list(ret2))
"""
4.用filter函数处理数字列表,将列表中所有的偶数筛选出来
num = [1,3,5,6,7,8]
"""
num = [1, 3, 5, 6, 7, 8]
# 方法一
# def is_even(x):
# if x % 2 == 0:
# return True
# ret = filter(is_even, num)
# print(list(ret))
# 方法二
# lambda x:x if x % 2 == 0
# ret = filter((lambda x: True if x % 2 == 0 else False), num)
# print(list(ret))
"""
5.随意写一个20行以上的文件
运行程序,先将内容读到内存中,用列表存储。
接收用户输入页码,每页5条,仅输出当页的内容
"""
def get_page(n):
file_list = []
with open('text', encoding='utf-8') as f:
for line in f:
file_list.append(line)
if (n-1)*5 <= len(file_list):
return file_list[(n-1)*5:n*5]
else:
return "没有当前页,请重新输入\n"
while True:
page_str = input("请输入你要看的页码(输入q退出):")
if page_str.isdigit():
page_num = int(page_str)
ret = get_page(page_num)
print(("当前显示的为第%d页"%page_num).center(80,"="))
for i in ret:
print(i,end='')
elif page_str.isupper() == "Q":
break
else:
print("输入错误,请重新输入")
"""
6.如下,每个小字典的name对应股票名字,shares对应多少股,price对应股票的价格
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
"""
"""
# 6.1.计算购买每支股票的总价
"""
"""
# 6.2.用filter过滤出,单价大于100的股票有哪些
"""
| Wuzhibin05/python-course | Course/Section-1/day16/code/pratice.py | pratice.py | py | 2,270 | python | zh | code | 0 | github-code | 36 |
19154199672 | import os
import unittest
import numpy as np
from total_scattering.file_handling.load import load
from total_scattering.file_handling.save import save_banks, save_file
from tests import EXAMPLE_DIR, TEST_DATA_DIR
from mantid.simpleapi import mtd, \
LoadNexusProcessed, LoadAscii, ConvertToHistogram
class TestSave(unittest.TestCase):
def setUp(self):
align_and_focus_args = {
'CalFilename': os.path.join(EXAMPLE_DIR, 'isis', 'polaris_grouping.cal'),
'ResampleX': -6000,
'DSpacing': False,
'PreserveEvents': False,
'MaxChunkSize': 8,
'ReductionProperties': '__powderreduction'
}
# Highly cropped version of the workspace to improve run time
ws_name = 'test-sample'
sample_file_path = os.path.join(TEST_DATA_DIR, 'POLARIS00097947-min.nxs')
wksp = load(ws_name, sample_file_path, **align_and_focus_args)
self.wksp = mtd[wksp]
self.out_nxs = '%s.nxs' % ws_name
self.out_ascii = '%s.nxs' % ws_name
def tearDown(self):
mtd.clear()
if os.path.isfile(self.out_nxs):
os.remove(self.out_nxs)
if os.path.isfile(self.out_ascii):
os.remove(self.out_ascii)
def test_save_banks_exists(self):
save_banks(self.wksp, self.out_nxs, 'wksp', '.')
self.assertTrue(os.path.isfile(self.out_nxs))
mtd.clear()
def test_save_banks_relative_path(self):
save_banks(self.wksp, self.out_nxs, 'wksp', './output')
self.assertTrue(os.path.isfile(os.path.join('./output', self.out_nxs)))
mtd.clear()
def test_save_banks_check_contents(self):
save_banks(self.wksp, self.out_nxs, 'wksp', '.')
out_wksp = LoadNexusProcessed(self.out_nxs)
self.assertEqual(out_wksp.blocksize(),
self.wksp.blocksize())
self.assertEqual(out_wksp.getNumberHistograms(),
self.wksp.getNumberHistograms())
self.assertTrue(np.array_equal(out_wksp.getAxis(0).extractValues(),
self.wksp.getAxis(0).extractValues())
)
def test_save_banks_binning(self):
save_banks(self.wksp, self.out_nxs, 'wksp', '.', Binning='0,100,10000')
out_wksp = LoadNexusProcessed(self.out_nxs)
self.assertNotEqual(out_wksp.blocksize(),
self.wksp.blocksize())
self.assertEqual(out_wksp.blocksize(), 100)
def test_save_banks_grouping(self):
# TODO: Will have to implement when we have event test data.
'''
Below does not work since this POLARIS test data is Histogrammed
and does not contain counts
will need to re-add CreateGroupingWorkspace to imported algorithms
# Create grouping workspace.
grp_ws, nspectra, grp_count = CreateGroupingWorkspace(InstrumentName='POLARIS',
GroupDetectorsBy="All")
save_banks(self.wksp, self.out_nxs, 'wksp_title', '.', GroupingWorkspace=grp_ws)
out_wksp = LoadNexusProcessed(self.out_nxs)
self.assertEqual(grp_ws.blocksize(), 1)
self.assertEqual(nspectra, 3008)
self.assertEqual(grp_count, 1)
self.assertNotEqual(out_wksp.getNumberHistograms(),
self.wksp.getNumberHistograms())
self.assertEqual(out_wksp.getNumberHistograms(), 1)
'''
def test_save_file_exists(self):
save_file(self.wksp, self.out_ascii)
self.assertTrue(os.path.isfile(self.out_ascii))
def test_save_file_check_contents(self):
save_file(self.wksp, self.out_ascii)
out_wksp = LoadAscii(self.out_ascii, Separator='Space')
out_wksp = ConvertToHistogram(out_wksp)
self.assertEqual(out_wksp.blocksize(),
self.wksp.blocksize())
self.assertEqual(out_wksp.getNumberHistograms(),
self.wksp.getNumberHistograms())
self.assertTrue(np.allclose(out_wksp.getAxis(0).extractValues(),
self.wksp.getAxis(0).extractValues())
)
if __name__ == '__main__':
unittest.main() # pragma: no cover
| ckendrick/mantid_total_scattering | tests/file_handling/test_save.py | test_save.py | py | 4,281 | python | en | code | null | github-code | 36 |
14355032478 | import re
f= open('payload3.js').read()
g = open('payload4.js', 'w')
v = open('payload4_vars.js', 'w')
vars = {}
def cb(m):
name=m.group(1).strip()
x = m.group(2).strip()
vars[name] = x
return '_'+name+'_'
for x in range(0x100):
reg = r'\bn%d\b'%x
f = re.sub(reg, str(x), f)
f = re.sub(r'EXPR ([^ ]+)((.|\n)*?)END', cb, f)
f = f.replace('_NUMS_','')
f = f.replace('_NUMVARS_','')
v.write('_ARY1_=%s;\n'%(vars['ARY1']))
v.write('_ARY2_=%s;\n'%(vars['ARY2']))
v.write('_ARY3_=%s;\n'%(vars['ARY3']))
v.write('_ARY4_=%s;\n'%(vars['ARY4']))
v.write('_ARY5_=%s;\n'%(vars['ARY5']))
v.write('_ARY6_=%s;\n'%(vars['ARY6']))
v.write('_ARY7_=%s;\n'%(vars['ARY7']))
g.write(f)
| niklasb/34c3ctf-sols | fuckbox/solve2.py | solve2.py | py | 693 | python | en | code | 22 | github-code | 36 |
11294428652 | import tkinter as tk
from tkinter import ttk
# def createTreeView(frame, columns, height=15):
# tree = ttk.Treeview(frame, columns=columns, show='headings', height=height)
# tree.tag_configure('odd', background='gainsboro')
# tree.heading('#1', text='武将名')
# tree.heading('#2', text='部队属性')
# tree.heading('#3', text='Lv', command=lambda:self.treeview_sort_column(tree, '#3', False))
def treeview_sort_column(tv, col, reverse):
l = [(tv.set(k, col), k) for k in tv.get_children('')]
l.sort(reverse=reverse)
# rearrange items in sorted positions
for index, (val, k) in enumerate(l):
tv.move(k, '', index)
# reverse sort next time
tv.heading(col, command=lambda:treeview_sort_column(tv, col, not reverse)) | WeiCaoMelbourne/keyboard | modules/tv_funcs.py | tv_funcs.py | py | 774 | python | en | code | 0 | github-code | 36 |
42242964730 | import numpy as np
import bead_util as bu
import matplotlib.pyplot as plt
import os
import scipy.signal as sig
import scipy
import glob
from scipy.optimize import curve_fit
import cant_util as cu
data_dir1 = "/data/20170831/image_calibration2/align_profs"
data_dir2 = "/data/20170831/image_calibration2/align_profs"
out_dir = "/calibrations/image_alignments"
date = '20170831'
def get_stage_column(attribs, stage_cols = [17, 18, 19], attrib_inds = [3, 6, 9], ortho_columns = [18, 17, 19]):
'''gets the first driven stage axis from data attribs'''
stage_settings = attribs['stage_settings']
driven = np.array(list(map(bool, stage_settings[attrib_inds])))
return (np.array(stage_cols)[driven])[0], (np.array(ortho_columns)[driven])[0]
def gauss_beam(r, mu, w, A):
'''gaussian beam function for fitting'''
return A*np.exp(-2.*(r-mu)**2/w**2)
def line(x, m, b):
'''line function for fitting'''
return m*x + b
def line_intersection(popt0, popt1):
'''the intersection of 2 lines where y=mx+b and popt = [m, b]'''
x_int = (popt1[1]-popt0[1])/(popt0[0]-popt1[0])
return x_int, line(x_int, *popt0)
def profile(fname, ends = 100, stage_cal = 8., data_column = 5, make_plot = False, p0 = [30, 30, .001], ortho_column = [18, 17, 19]):
'''takes raw data makes profile and fits to gaussian to determine beam center. returns beam center and position on orthogonal beam axis'''
dat, attribs, f = bu.getdata(fname)
dat = dat[ends:-ends, :]
stage_column, ortho_column = get_stage_column(attribs)
dat[:,stage_column]*=stage_cal
dat[:, ortho_column]*=stage_cal
f.close()
bp, yp, ep = cu.sbin_pn(dat[:, stage_column], dat[:, data_column], bin_size = .1, vel_mult = 1.)
bn, yn, en = cu.sbin_pn(dat[:, stage_column], dat[:, data_column], bin_size = .1, vel_mult = -1.)
profp = np.abs(np.gradient(yp, bp))
profn = np.abs(np.gradient(yn, bn))
poptp, pcovp = curve_fit(gauss_beam, bp[10:-10], profp[10:-10], p0 = p0)
poptn, pcovn = curve_fit(gauss_beam, bn[10:-10], profn[10:-10], p0 = p0)
if make_plot:
plt.semilogy(bp, profp, 'o')
plt.semilogy(bp, gauss_beam(bp, *poptp), 'r')
plt.semilogy(bn, profn, 'o')
plt.semilogy(bn, gauss_beam(bn, *poptn), 'k')
plt.show()
return np.mean([poptn[0], poptp[0]]), np.mean(dat[:, ortho_column])
def find_edge(xsweep_dir, ysweep_dir, over_plot = 10.):
xfs = glob.glob(xsweep_dir + '/*.h5')
yfs = glob.glob(ysweep_dir + '/*.h5')
xdata = np.array(list(map(profile, xfs)))
ydata = np.array(list(map(profile, yfs)))
plt.plot(xdata[:, 0], xdata[:, 1], 'x')
plt.plot(ydata[:, 1], ydata[:, 0], 'x')
p0x = [xdata[-1, 0]-xdata[0, 0]/(xdata[-1, 1]-xdata[0, 1]), 0]
p0y = [ydata[-1, 0]-ydata[0, 0]/(ydata[-1, 1]-ydata[0, 1]), 0]
poptx, pcovx = curve_fit(line, xdata[:, 0], xdata[:, 1], p0 = p0x)
popty, pcovy = curve_fit(line, ydata[:, 1], ydata[:, 0], p0 = p0y)
xplt = np.linspace(np.min(xdata[:, 0])-over_plot, np.max(xdata[:, 0])+over_plot, 1000)
yplt = np.linspace(np.min(ydata[:, 1])-over_plot, np.max(ydata[:, 1])+over_plot, 1000)
plt.plot(xplt, line(xplt, *poptx))
plt.plot(yplt, line(yplt, *popty))
xint, yint = line_intersection(poptx, popty)
plt.plot([xint], [yint], 'o')
plt.show()
return np.array([xint, yint])
def save_cal(p_arr, path, date):
#Makes path if it does not exist and saves parr to path/stage_position.npy
if not os.path.exists(path):
os.makedirs(path)
outfile = os.path.join(path, 'stage_position_' + date)
np.save(outfile, p_arr)
p_arr = find_edge(data_dir1, data_dir2)
save_cal(p_arr, out_dir, date)
| charlesblakemore/opt_lev_analysis | scripts/camera_analysis/align_image.py | align_image.py | py | 3,689 | python | en | code | 1 | github-code | 36 |
28224798976 | """
Given a list of numbers, calculate another list in which
i_th element is the product of all numbers in the list except
the original i_th element.
"""
from functools import reduce
from typing import List
def solution_1(input_nums: List[int]) -> List[int]:
"""Calculate the result list via the first solution."""
result: List[int] = []
prod: int = reduce(lambda x, y: x * y, nums)
for num in input_nums:
try:
replacement = int(prod / num)
except ZeroDivisionError:
replacement = prod
result.append(replacement)
return result
def solution_2(input_nums: List[int]) -> List[int]:
"""Calculate the result list via the second solution."""
result: List[int] = [1] * len(input_nums)
prod = 1
for i, _ in enumerate(result):
result[i] *= prod
prod *= input_nums[i]
prod = 1
for i in range(len(result) - 1, -1, -1):
result[i] *= prod
prod *= input_nums[i]
return result
nums: List[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(solution_1(nums))
print(solution_2(nums))
nums: List[int] = [2, 3, 4]
print(solution_1(nums))
print(solution_2(nums))
| HomayoonAlimohammadi/Training | DailyProblem/19_6_2022.py | 19_6_2022.py | py | 1,176 | python | en | code | 2 | github-code | 36 |
11238933952 | # authomatic Nudget Elastic Band Method (NEB) for vacancy migration energy barrier with LAMMPS
# Stefano Segantin
# Politecnico di Torino
# ---------- DISCUSSION ------------------------------------------------------------------------------------------------
# this routine relies on the assumption that the position in the box of all the atoms rests the same in the intial
# simulation and in the NEB simulation. It is not always like that. In order to do that you need to use the same number
# of parallelized core in both the simulations
########################################################################################################################
# import section
import os
import numpy as np
import NEB_stats as neb
import NEB_readout as nebro
import pandas as pd
myDir = os.getcwd()
os.chdir(myDir)
initialfile = r'data.***.0'
myneb = neb.NEBstats(initialfile) # this just opens the
mynebro = nebro.NEBreadout()
microstructure = "BCC"
ao = 3.31642067489331 # lattice constant
N_rnd_atoms = 1 # number of random atoms to pick
finalfile = r"final.***"
lmpInFile = r"***_neb_py.in"
nebOutFile = r"***_neb_py.xlsx"
vacancies = []
neighbors = []
MainInfo = []
atom_coords = myneb.atomCoords()
box_size = myneb.boxSize()
num_atoms = myneb.numAtoms()
MI = []
counter = 0
for sample in range(N_rnd_atoms):
print("NUMBER OF VACANCY SAMPLED = %s" % (sample))
rndvacancy = myneb.randAtomID(num_atoms, atom_coords, box_size, ao, 1)
vacancyID = int(rndvacancy[0])
neighbors = myneb.atomNN(atom_coords, microstructure, ao, vacancyID)
vacancies.append(rndvacancy)
NNs = []
for NN in neighbors:
file = myneb.writeFinalFile(finalfile, rndvacancy, NN)
NNs.append(NN)
txtin = (r""" # Nudged elastic band (NEB) analysis
#Politecnico di Torino
# ---------- INITIALIZATION ----------------------------------------------------
clear
timer full
units metal # A ps K bar eV
dimension 3
boundary p p p
atom_style atomic
atom_modify map array
variable ao equal %s
# ---------- GEOMETRY DEFINITION -----------------------------------------------
lattice bcc ${ao}
read_data %s
group
# ---------- create vacancy ----------------------------------------------------
group vacancy id %s
delete_atoms group vacancy compress no
# ---------- FORCE FIELD -------------------------------------------------------
pair_style
pair_coeff * *
neigh_modify every 1 delay 0 check yes
# ---------- dumb run ----------------------------------------------------------
run 0
# ---------- SETTINGS ----------------------------------------------------------
# COMPUTES
compute csym all centro/atom bcc
compute eng all pe/atom
compute eatoms all reduce sum c_eng
# VARIABLES
variable N equal count(all)
variable Eavg equal c_eatoms/$N
# ---------- DYNAMICS ----------------------------------------------------------
# ---------- energy minimization -----------------------------------------------
# PRINT-SCREEN
thermo
thermo_style
displace_atoms all random
minimize
# --------- NEB analysis -------------------------------------------------------
variable u uloop
reset_timestep 0
fix 1 all neb
timestep
min_style quickmin
neb final %s
""" % (str(ao), str(initialfile), str(vacancyID), str(finalfile)))
with open(lmpInFile, 'w') as input_file:
input_file.write(txtin)
os.system('mpiexec -np 10 lmp_mpi -partition 10x1 -in %s' % lmpInFile)
# get the info about the climbing replica
Creplica_data, Creplica_num = mynebro.climbingReplica()
Ebarrier = mynebro.energyBarrier(Creplica_data[-1])
MI.append([rndvacancy[0], rndvacancy[1], NN[0], NN[1], Ebarrier])
neighbors.append(NNs)
MainInfo.append(MI)
MI = np.array(MI)
df = pd.DataFrame(MI)
df.to_excel(excel_writer=nebOutFile) | shortlab/2022-PEL-Heterogeneity-VCr-TaW | MEB/NEB_Vacancy.py | NEB_Vacancy.py | py | 3,924 | python | en | code | 0 | github-code | 36 |
31012134032 | from django.db import connection
def ingredient_name_and_amount_query(receipt_id):
with connection.cursor() as cursor:
cursor.execute(f"SELECT ingredient_calories.ingredient_name, receipt_ingredient.amount, receipt_ingredient.amount_type \
FROM receipt_ingredient \
Inner Join ingredient_calories on \
receipt_ingredient.ingredient_name_frk_id = ingredient_calories.id \
where receipt_ingredient.recipe_frk_id = %s", (receipt_id,))
rows = cursor.fetchall()
ingredients_of_receipt = []
for row in rows:
ingredient = {"ingredient_name": row[0], "amount": row[1], "amount_type": row[2]}
ingredients_of_receipt.append(ingredient)
return ingredients_of_receipt
# total_receipt_cal_per_100gr:
def total_receipt_cal_per_100gr(receipt_id):
with connection.cursor() as cursor:
cursor.execute(f"select (COALESCE(SUM (ingredient_calories.ingredient_calories_per_100_gr_or_ml), 0) * 100)/COALESCE(SUM(receipt_ingredient.amount),0) \
from receipt_ingredient, ingredient_calories \
WHERE ingredient_calories.id = receipt_ingredient.ingredient_name_frk_id and \
receipt_ingredient.recipe_frk_id = %s \
group by receipt_ingredient.recipe_frk_id", (receipt_id,))
total_cal = cursor.fetchone()[0]
return total_cal
# search_recipe_by_category:
def search_recipe_by_category(category):
with connection.cursor() as cursor:
cursor.execute(f"SELECT recipe.id, recipe.recipe_name, recipe.pic_url \
FROM recipe \
where recipe.recipe_category = %s", (category,))
rows = cursor.fetchall()
all_recipes_by_category = []
for row in rows:
recipe_details = {"recipe_id": row[0], "recipe_name": row[1], "recipe_url": row[2]}
# all_recipes_by_category[row[0]] = recipe_details
all_recipes_by_category.append(recipe_details)
print(all_recipes_by_category)
return all_recipes_by_category
| ravityeho/recipes | recipes_and_more_app/custom_queries.py | custom_queries.py | py | 2,179 | python | en | code | 0 | github-code | 36 |
18045957332 | import numpy as np
from random import randrange
from backend.rl_base_classes.mp_base_classes import FPATrimsAndTurns, MovingTargetFPATrimsAndTurns
from backend.rl_environments import DiscreteEnv
from backend.base_aircraft_classes.target_classes import MovingTarget
def run_actions(_initial_state, _env, _actions, plot=False):
done = False
i = 0
max_i = len(_actions)
_ = _env.reset(initial_state=_initial_state)
while not done and i < max_i:
action = _actions[i]
_, __, done, ___ = _env.step(action)
i += 1
if plot:
_env.agent.plot_state_history(style='3d')
agent = 'MovingTargetFPATrimsAndTurns'
initial_state = np.array((19000, np.deg2rad(35.1), np.deg2rad(-179.6), 6000, 0, -np.pi/2))
target_state = np.array((00000, np.deg2rad(40.4), np.deg2rad(-86.9), 100, 0, 0))
if agent == 'FPATrimsAndTurns':
env = DiscreteEnv(FPATrimsAndTurns(initial_state, target_state[:3]))
elif agent == 'MovingTargetFPATrimsAndTurns':
target = MovingTarget(target_state)
env = DiscreteEnv(MovingTargetFPATrimsAndTurns(initial_state, target))
for trial in range(500):
actions = [randrange(0, env.agent.n_actions) for __ in range(100)]
run_actions(initial_state, env, actions, plot=False)
print(f'Finished trial {trial}\n')
| hmdmia/HighSpeedRL | model_testing/test_mp_base_classes.py | test_mp_base_classes.py | py | 1,291 | python | en | code | 0 | github-code | 36 |
70786900903 | import copy, re
from django.core import validators
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
__all__ = ['EmptyValidator', 'KeysValidator', 'MD5ChecksumValidator']
class EmptyValidator(validators.RegexValidator):
regex = r'\S+'
message = _('This field cannot be blank.')
code = 'blank'
@deconstructible
class KeysValidator(object):
"""
A validator designed for HStore to require, even restrict keys.
Code mostly borrowed from:
https://github.com/django/django/blob/master/django/contrib/postgres/validators.py
"""
messages = {
'missing_keys': _('Some keys were missing: %(keys)s'),
'extra_keys': _('Some unknown keys were provided: %(keys)s'),
}
strict = False
def __init__(self, required_keys=None, optional_keys=None, strict=False, messages=None):
self.required_keys = set(required_keys or [])
self.optional_keys = set(optional_keys or [])
if not self.required_keys and not self.optional_keys:
raise ImproperlyConfigured('You must set at least `required_keys` or `optional_keys`')
self.strict = strict
if messages is not None:
self.messages = copy.copy(self.messages)
self.messages.update(messages)
def __call__(self, value):
keys = set(value.keys())
if self.required_keys:
missing_keys = self.required_keys - keys
if missing_keys:
raise ValidationError(
self.messages['missing_keys'],
code='missing_keys',
params={'keys': ', '.join(missing_keys)})
if self.strict:
extra_keys = keys - self.required_keys - self.optional_keys
if extra_keys:
raise ValidationError(
self.messages['extra_keys'],
code='extra_keys',
params={'keys': ', '.join(extra_keys)})
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.required_keys == other.required_keys
and self.optional_keys == other.optional_keys
and self.messages == other.messages
and self.strict == other.strict
)
def __ne__(self, other):
return not self == other
class MD5ChecksumValidator(validators.RegexValidator):
regex = re.compile(r'[0-9a-f]{32}')
| davidfischer-ch/pytoolbox | pytoolbox/django/core/validators.py | validators.py | py | 2,541 | python | en | code | 38 | github-code | 36 |
75006697064 | from .models import TodoModel
from django import forms
class TodoForm(forms.ModelForm):
class Meta:
model = TodoModel
fields = '__all__'
labels ={
'subject':'',
'details':'',
}
widgets = {
'subject': forms.TextInput(attrs={'class': 'form-control bg-info rounded-5 p-3','rows': 2,'cols': 1,'placeholder': 'Enter Subjecct','id': 'id_content' }),
'details': forms.Textarea(attrs={'class': 'form-control bg-info border-0 p-3','rows': 3,'cols': 1,'placeholder': 'Write Details','id': 'id_content' })
} | SalmanMirSharin/Django-ToDo-App | todo/forms.py | forms.py | py | 611 | python | en | code | 0 | github-code | 36 |
27971059344 | from bs4 import BeautifulSoup
from urllib.request import urlopen,Request,urlretrieve,build_opener,install_opener
import os
import random
from main.models import Post,Images
from django.core.files import File
from django.contrib.auth.models import User
from main.categories import user_agent_list
class ScrapeFunction:
user_agent=user_agent = random.choice(user_agent_list)
headers = {'User-Agent': user_agent }
def __init__(self,):
super().__init__()
def getHTML(self,link):
try:
html = urlopen(link).read()
except Exception as e:
req = Request(link, headers=self.headers)
html = urlopen(req).read()
response = BeautifulSoup(html, "html.parser")
return response
class PamakeoPress:
def __init__(self):
super().__init__()
def news_items(self,link='http://pamakiopress.rw/'):
try:
get_html=ScrapeFunction().getHTML(link)
#get_html=ScrapeFunction().getHTML('file:///home/hacker/Desktop/Video%20Toutorial/Amakuru%20yizewe%20yuzuye%20kandi%20kugihe%20-%20Pamakio%20Press.html')
#articles=get_html.find_all('article',class_='mh-loop-item')
except Exception as e:
pass
result=list()
from_={
'Amakuru':'mh_magazine_lite_posts_focus-3',
'Imikino':'mh_magazine_lite_posts_focus-4',
'Politiki':'mh_custom_posts-12',
'Imyidagaduro':'mh_custom_posts-9',
'Ubuzima':'mh_custom_posts-8'
#'utunu n\'utundi'
}
i=0
try:
for category in from_:
#print(category)
#print('--'*15)
articles=get_html.find(id=from_[category])
user=User.objects.all().first()
for li in articles.find_all('figure'):
im=Images()
news_link=li.find('a')['href']
thumbnail=self.getThumbnail(news_link)
title=self.getTitle()
result = urlretrieve(thumbnail)
im.image_from=title
im.image.save(os.path.basename(im.image_from+str(i) +'.png'), File(open(result[0], 'rb')))
post=Post.objects.filter(title=title).exists()
#check if post not exist
if post:
continue
post_item=Post()
category=self.getCategory()
post_from='PamakeoPress'
body=self.getContents()
#insert section
post_item.title=title
post_item.thumbnail=im.image
post_item.post_from=post_from
post_item.body=body
post_item.post_category=category
post_item.status='published'
post_item.author=user
post_item.save()
#print(user.username)
i=i+1
#continue
except Exception as e:
#scrape item section problem
#print("error "+str(e))
pass
return i
def getArticleLink(self,inThis):
arlink=inThis.find('a')['href'].strip()
return arlink
def getThumbnail(self,l):
other=ScrapeFunction().getHTML(l)
self.contents=other.find(class_='mh-content')
entry_thumb=self.contents.find(class_='entry-thumbnail')
thumbnail=entry_thumb.find_all('img')[0]['src']
if "https:" not in thumbnail:
thumbnail="http://pamakiopress.rw/{}".format(thumbnail)
#img_urls="file:///home/hacker/Downloads/{}".format(img_urls)
return thumbnail
def getContents(self):
contents=self.contents.find(class_='entry-content mh-clearfix')
htmlContent=contents
i=0
for img in contents.find_all('img'):
i=i+1
img_urls = img['src']
if "https:" not in img_urls:
img_urls="http://pamakiopress.rw/{}".format(img_urls)
#img_urls="file:///home/hacker/Downloads/{}".format(img_urls)
if i==1:
#thumb=im.image
htmlContent=contents
im=Images()
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
install_opener(opener)
title=self.getTitle()
im.image_from=title
result=urlretrieve(img_urls)
im.image.save(os.path.basename(im.image_from+str(i)+'.png'),File(open(result[0], 'rb')))
im.save()
htmlContent=str(htmlContent).replace(img['src'],'/media/'+str(im.image))
htmlContent=str(htmlContent).replace("(adsbygoogle = window.adsbygoogle || []).push(\{\});",'')
htmlContent=str(htmlContent).replace('<!-- 720x90 adsense -->','')
htmlContent=str(htmlContent).replace('<!--CusAds1118-->','')
return htmlContent
def getCategory(self):
category=self.contents.find(class_='entry-meta-categories')
category=category.text.strip()
#print(category)
return category.capitalize()
def getTitle(self):
title=self.contents.find(class_='entry-title')
title=title.text.strip()
#print(title)
return title
#v=PamakeoPress()
#v.getThumbnail()
#v.getContents()
#v.getCategory()
#v.getTitle()
#v.news_items()
'''
for article in articles:
articlelink=self.getArticleLink(article.find('h3',class_='entry-title mh-loop-title'))
get_thumbnail=self.getThumbnail(l=articlelink)
title=self.getTitle()
body=self.getContents()
category=self.getCategory()
result.append({
'title':title,
'body':body,
'category':category,
'thumbnail':get_thumbnail,
'post_from':'PamakeoPress',
})
#print(get_thumbnail)
return result
#return get_html.getHTML(link)
result.append({
'thumbnail':thumbnail,
'title':title,
'category':self.getCategory(),
'post_from':'PamakeoPress',
'body':self.getContents(),
})
''' | itfidele/Bhano-Blog | operations/scrape.py | scrape.py | py | 6,352 | python | en | code | 1 | github-code | 36 |
74140233385 | s=input()
s=' '+s
n=int(input())
for k in range(n):
query=list(map(int,input().split()))
i=query[0]
j=query[1]
count=0
for t in range(i+1,j+1):
if s[t]==s[t-1]:
count+=1
print(count) | fahadnayyar/Codeforces | cf/313btry.py | 313btry.py | py | 195 | python | en | code | 0 | github-code | 36 |
16159941777 | import asyncio
import atexit
import logging
import os
import signal
import subprocess
import time
import supriya.exceptions
logger = logging.getLogger("supriya.server")
class ProcessProtocol:
def __init__(self):
self.is_running = False
atexit.register(self.quit)
def boot(self, options, scsynth_path, port):
...
def quit(self):
...
class SyncProcessProtocol(ProcessProtocol):
### PUBLIC METHODS ###
def boot(self, options, scsynth_path, port):
if self.is_running:
return
options_string = options.as_options_string(port)
command = "{} {}".format(scsynth_path, options_string)
logger.info("Boot: {}".format(command))
self.process = subprocess.Popen(
command,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
start_new_session=True,
)
try:
start_time = time.time()
timeout = 10
while True:
line = self.process.stdout.readline().decode().rstrip()
if line:
logger.info("Boot: {}".format(line))
if line.startswith("SuperCollider 3 server ready"):
break
elif line.startswith("ERROR:"):
raise supriya.exceptions.ServerCannotBoot(line)
elif line.startswith(
"Exception in World_OpenUDP: bind: Address already in use"
):
raise supriya.exceptions.ServerCannotBoot(line)
elif (time.time() - start_time) > timeout:
raise supriya.exceptions.ServerCannotBoot(line)
self.is_running = True
except supriya.exceptions.ServerCannotBoot:
try:
process_group = os.getpgid(self.process.pid)
os.killpg(process_group, signal.SIGINT)
self.process.terminate()
self.process.wait()
except ProcessLookupError:
pass
raise
def quit(self):
if not self.is_running:
return
process_group = os.getpgid(self.process.pid)
os.killpg(process_group, signal.SIGINT)
self.process.terminate()
self.process.wait()
self.is_running = False
class AsyncProcessProtocol(asyncio.SubprocessProtocol, ProcessProtocol):
### INITIALIZER ###
def __init__(self):
ProcessProtocol.__init__(self)
asyncio.SubprocessProtocol.__init__(self)
self.boot_future = None
self.exit_future = None
### PUBLIC METHODS ###
async def boot(self, options, scsynth_path, port):
if self.is_running:
return
self.is_running = False
options_string = options.as_options_string(port)
command = "{} {}".format(scsynth_path, options_string)
logger.info(command)
loop = asyncio.get_running_loop()
self.boot_future = loop.create_future()
self.exit_future = loop.create_future()
_, _ = await loop.subprocess_exec(
lambda: self, *command.split(), stdin=None, stderr=None
)
def connection_made(self, transport):
self.is_running = True
self.transport = transport
def pipe_data_received(self, fd, data):
for line in data.splitlines():
logger.info(line.decode())
if line.strip().startswith(b"Exception"):
self.boot_future.set_result(False)
elif line.strip().startswith(b"SuperCollider 3 server ready"):
self.boot_future.set_result(True)
def process_exited(self):
self.is_running = False
self.exit_future.set_result(None)
if not self.boot_future.done():
self.boot_future.set_result(False)
def quit(self):
if not self.is_running:
return
if not self.boot_future.done():
self.boot_future.set_result(False)
if not self.exit_future.done():
self.exit_future.set_result
if not self.transport._loop.is_closed() and not self.transport.is_closing():
self.transport.close()
self.is_running = False
| MusicAsCode/supriya | supriya/realtime/protocols.py | protocols.py | py | 4,257 | python | en | code | null | github-code | 36 |
43418734139 | import findspark
findspark.init()
from operator import add
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
from pyspark.sql import *
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("q4") \
.getOrCreate()
sc = SparkContext.getOrCreate()
business = spark.read.format("csv").option("delimiter", ":").load("C:/Users/psait/Desktop/bda/business.csv").toDF("business_id", "tempCol1", "full_address", "tempCol3", "categories").drop("tempCol1", "tempCol3")
review = spark.read.format("csv").option("delimiter", ":").load("C:/Users/psait/Desktop/bda/review.csv").toDF("review_id", "tempCol1", "user_id", "tempCol3", "business_id", "tempCol5", "stars").drop("tempCol1", "tempCol3", "tempCol5", "review_id")
review = review.withColumn("stars", review["stars"].cast(IntegerType()))
jf = business.join(review, "business_id").select("business_id", "full_address", "categories", "stars").groupBy("business_id",
"full_address",
"categories").avg(
"stars");
opframe = jf.toDF("business_id", "full_address", "categories", "avg_rating").sort("avg_rating",ascending = False).take(10)
op = sc.parallelize(list(opframe)).toDF()
final = op.rdd.map(lambda x :str(x[0]) + "\t" + str(x[1]) + "\t" + str(x[2]) + "\t" + str(x[3]))
final.repartition(1).saveAsTextFile("C:/Users/psait/Desktop/bda/q4.txt")
| saitejapeddi/pyspark | q4.py | q4.py | py | 1,671 | python | en | code | 0 | github-code | 36 |
27218739066 | num = int(input("임의의 자연수를 입력하시오. "))
is_prime = True
if num > 0 :
for i in range(2,num) :
if num % i == 0 :
is_prime = False
else :
print("자연수가 아닙니다.")
if is_prime :
print("소수입니다.")
else :
print("소수가 아닙니다.")
num = int(input("어디까지 소수를 출력할까요? "))
for i in range(4,num+1) :
is_prime = True
for j in range(2,i):
if i % j == 0 :
is_prime = False
if is_prime :
print(i)
| pithecuse527/python-practice | Ch.4/is_prime.py | is_prime.py | py | 561 | python | ko | code | 0 | github-code | 36 |
2153719879 | #!/usr/bin/env python
# coding: utf-8
"""
https://leetcode.com/submissions/detail/86181181/
"""
def longest_common_prefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs[0]
p = strs[0][:]
for i in xrange(1, len(strs)):
s = strs[i]
t = ''
for j in xrange(min(len(p), len(s))):
if p[j] == s[j]:
t += p[j]
else:
break
if t == '':
return ''
else:
p = t
return p | lizzz0523/algorithm | python/longest_common_prefix/main.py | main.py | py | 598 | python | en | code | 4 | github-code | 36 |
27452373548 | import json
import os
import re
import sys
class Request(object):
def __init__(self, getenv=os.getenv):
self.getenv_ = getenv
self.populate_options_()
self.populate_args_()
if sys.stdin.isatty() == False:
self.input = json.load(sys.stdin)
else:
self.input = {}
self.service_key = self.getenv_("COG_SERVICE_TOKEN")
self.step = self.getenv_("COG_INVOCATION_STEP", None)
def populate_options_(self):
names = self.getenv_("COG_OPTS")
if names is None:
self.options = None
return
names = re.sub(r'(^"|"$)', r'', names)
names = names.split(",")
self.options = {}
for name in names:
name = name.upper()
# Options that have a list value will have a
# COG_OPT_<NAME>_COUNT environment variable set,
# indicating how many values there are. Scalar values will
# have no such environment variable
count = self.getenv_("COG_OPT_%s_COUNT" % name)
if count is None:
self.options[name] = self.getenv_("COG_OPT_%s" % name)
else:
count = int(count)
values = []
for i in range(count):
values.append(self.getenv_("COG_OPT_%s_%d" % (name, i)))
self.options[name] = values
def populate_args_(self):
arg_count = int(self.getenv_("COG_ARGC", "0"))
if arg_count == 0:
self.args = None
return
self.args = []
for i in range(arg_count):
self.args.append(self.getenv_("COG_ARGV_%d" % i))
def get_optional_option(self, name):
if name in self.options.keys():
return self.options[name]
return None
| operable/pycog3 | cog/request.py | request.py | py | 1,830 | python | en | code | 3 | github-code | 36 |
1098849667 | import logging
import traceback
import psycopg2
from django.db import IntegrityError
from apps.fyle_expense.models import Expense, ExpenseGroup
from apps.task_log.exceptions import MissingMappingsError
from apps.task_log.models import TaskLog
from apps.xero_workspace.models import EmployeeMapping, CategoryMapping, ProjectMapping, Invoice, InvoiceLineItem, \
FyleCredential, XeroCredential
from apps.xero_workspace.utils import connect_to_fyle, connect_to_xero
from fyle_jobs import FyleJobsSDK
from fyle_xero_integration_web_app import settings
LOGGER = logging.getLogger(__name__)
def schedule_expense_group_creation(workspace_id, user):
"""
Schedule Expense Group creation
:param workspace_id:
:param user:
:return:
"""
task_log = TaskLog.objects.create(
workspace_id=workspace_id,
type="FETCHING EXPENSES",
status="IN_PROGRESS"
)
try:
fyle_sdk_connection = connect_to_fyle(workspace_id)
jobs = FyleJobsSDK(settings.FYLE_JOBS_URL, fyle_sdk_connection)
created_job = jobs.trigger_now(
callback_url='{0}{1}'.format(
settings.API_BASE_URL,
'/workspace_jobs/{0}/expense_group/trigger/'.format(
workspace_id
)
),
callback_method='POST',
object_id=task_log.id,
payload={
'task_log_id': task_log.id
},
job_description=f'Fetch expenses: Workspace id - {workspace_id}, user - {user}'
)
task_log.task_id = created_job['id']
task_log.save()
except FyleCredential.DoesNotExist:
LOGGER.error('Error: Fyle Credentials not found for this workspace.')
task_log.detail = {
'error': 'Please connect your Source (Fyle) Account'
}
task_log.status = 'FYLE CONNECTION ERROR'
task_log.save()
def schedule_invoice_creation(workspace_id, expense_group_ids, user):
"""
Schedule Invoice creation
:param workspace_id:
:param expense_group_ids:
:param user:
:return:
"""
expense_groups = ExpenseGroup.objects.filter(
workspace_id=workspace_id, id__in=expense_group_ids).all()
fyle_sdk_connection = connect_to_fyle(workspace_id)
jobs = FyleJobsSDK(settings.FYLE_JOBS_URL, fyle_sdk_connection)
for expense_group in expense_groups:
task_log = TaskLog.objects.create(
workspace_id=expense_group.workspace.id,
expense_group=expense_group,
type='CREATING INVOICE',
status='IN_PROGRESS'
)
created_job = jobs.trigger_now(
callback_url='{0}{1}'.format(
settings.API_BASE_URL,
'/workspace_jobs/{0}/expense_group/{1}/invoice/trigger/'.format(
workspace_id,
expense_group.id
)
),
callback_method='POST',
object_id=task_log.id,
payload={
'task_log_id': task_log.id
},
job_description=f'Create invoice: Workspace id - {workspace_id}, \
user - {user}, expense group id - {expense_group.id}'
)
task_log.task_id = created_job['id']
task_log.save()
def fetch_expenses_and_create_groups(workspace_id, task_log, user):
"""
Fetch expenses and create expense groups
:param workspace_id
:param task_log
:param user
"""
expense_group_ids = []
try:
updated_at = None
task_logs = TaskLog.objects.filter(workspace__id=workspace_id, type='FETCHING EXPENSES',
status='COMPLETE')
if task_logs:
updated_at = task_logs.latest().created_at
expenses = Expense.fetch_paid_expenses(workspace_id, updated_at)
expense_objects = Expense.create_expense_objects(expenses)
connection = connect_to_fyle(workspace_id)
expense_groups = ExpenseGroup.group_expense_by_report_id(expense_objects, workspace_id, connection)
expense_group_objects = ExpenseGroup.create_expense_groups(expense_groups)
for expense_group in expense_group_objects:
expense_group_ids.append(expense_group.id)
task_log.status = 'COMPLETE'
task_log.detail = 'Expense groups created successfully!'
task_log.save()
schedule_invoice_creation(workspace_id, expense_group_ids, user)
except FyleCredential.DoesNotExist:
LOGGER.error('Error: Fyle Credentials not found for this workspace.')
task_log.detail = {
'error': 'Please connect your Source (Fyle) Account'
}
task_log.status = 'FYLE CONNECTION ERROR'
task_log.save()
except Exception:
error = traceback.format_exc()
LOGGER.exception(f'Error: Workspace id - {workspace_id}\n{error}')
task_log.detail = {
'error': 'Please contact system administrator.'
}
task_log.status = 'FATAL'
task_log.save()
return expense_group_ids
def check_mappings(expense_group):
mappings_error = ""
employee_email = expense_group.description.get("employee_email")
if not EmployeeMapping.objects.filter(workspace=expense_group.workspace,
employee_email=employee_email).exists():
mappings_error += f"Employee mapping missing for employee_email: {employee_email} \n"
try:
EmployeeMapping.objects.create(workspace=expense_group.workspace,
employee_email=employee_email, invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
for expense in expense_group.expenses.all():
if not CategoryMapping.objects.filter(workspace=expense_group.workspace,
category=expense.category).exists():
mappings_error += f"Category mapping missing for category name: {expense.category} \n"
try:
CategoryMapping.objects.create(workspace=expense_group.workspace, category=expense.category,
sub_category=expense.sub_category,
invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
if expense.project is not None:
if not ProjectMapping.objects.filter(workspace=expense_group.workspace,
project_name=expense.project).exists():
mappings_error += f"Project mapping missing for project_name: {expense.project}"
try:
ProjectMapping.objects.create(workspace=expense_group.workspace,
project_name=expense.project, invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
if mappings_error:
raise MissingMappingsError(message=mappings_error)
def create_invoice_and_post_to_xero(expense_group, task_log):
"""
Creates an Xero Invoice
:param expense_group:
:param task_log:
:return:
"""
try:
check_mappings(expense_group)
invoice_id = Invoice.create_invoice(expense_group)
InvoiceLineItem.create_invoice_line_item(invoice_id, expense_group)
xero_sdk_connection = connect_to_xero(expense_group.workspace.id)
invoice_obj = Invoice.objects.get(id=invoice_id)
invoice_data = generate_invoice_request_data(invoice_obj)
response = post_invoice(invoice_data, xero_sdk_connection)
for invoice in response["Invoices"]:
invoice_obj.invoice_id = invoice["InvoiceID"]
invoice_obj.save()
expense_group.status = 'Complete'
expense_group.save()
task_log.invoice = invoice_obj
task_log.detail = 'Invoice created successfully!'
task_log.status = 'COMPLETE'
task_log.save()
except XeroCredential.DoesNotExist:
LOGGER.error('Error: Xero Credentials not found for this workspace.')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': 'Please connect your Destination (Xero) Account'
}
task_log.status = 'XERO CONNECTION ERROR'
task_log.save()
except MissingMappingsError as error:
LOGGER.error(f'Error: {error.message}')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': error.message
}
task_log.status = 'MISSING MAPPINGS'
task_log.save()
except Exception:
error = traceback.format_exc()
LOGGER.exception(f'Error: Workspace id - {task_log.workspace.id}\n{error}')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': 'Please contact system administrator.'
}
task_log.status = 'FATAL'
task_log.save()
def generate_invoice_request_data(invoice):
"""
Generate invoice request data as defined by Xero
:param invoice
:return: request_data
"""
request_data = {
"Type": "ACCPAY",
"Contact": {
"Name": invoice.contact_name,
},
"DateString": str(invoice.date),
"InvoiceNumber": invoice.invoice_number,
"LineAmountTypes": "Exclusive",
"LineItems": []
}
for line_item in invoice.invoice_line_items.all():
request_data["LineItems"].append({
"Description": line_item.description,
"Quantity": "1",
"UnitAmount": str(line_item.amount),
"AccountCode": line_item.account_code,
"Tracking": [{
"Name": line_item.tracking_category_name,
"Option": line_item.tracking_category_option,
}]
})
return request_data
def post_invoice(data, xero):
""" Makes an API call to create invoices in Xero
:param data: Request data for the invoice API
:param xero: Xero connection object
:return response: response data from Xero API
"""
response = xero.invoices.post(data)
return response
| akshay-codemonk/fyle-xero | apps/task_log/tasks.py | tasks.py | py | 10,343 | python | en | code | 0 | github-code | 36 |
30503083715 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import shutil
import argparse
"""
for i in `find . -maxdepth 1 | awk -F '/' '{ print $2 }' | grep -v "\ "`; do
echo "-----------------------------------------$i---------------------------------";
python gen_dao.py --dir=$i ; done
可自动生成dao文件,并移动到public目录下[cdb_mtnc/public/db_access/xxx]:
1.通过sql文件生成model及mappers,并生成dao文件 【较少用】
python gen_dao.py --dir=test --sql=yes
2.根据models及mappers生成dao文件 【常用】
python gen_dao.py --dir=test
"""
step = 0
def red_print(msg):
global step
step += 1
print("\033[0;33;40m\t" + str(step) + msg + "\033[0m")
def yellow_print(msg):
print("\033[0;31;40m\tError: " + msg + "\033[0m")
def copy_files(src_dir, dest_dir):
for file in os.listdir(src_dir):
if file == "daos.h":
continue
src_file = os.path.join(src_dir, file)
dest_file = os.path.join(dest_dir, file)
if os.path.isfile(src_file):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest_file) or(os.path.exists(dest_file) and (os.path.getsize(dest_file) != os.path.getsize(src_file))):
open(dest_file, "wb").write(open(src_file, "rb").read())
if os.path.isdir(src_file):
copyFiles(src_file, dest_file)
def gen_dao():
path = os.getcwd() + "/" + args.dir
os.chdir(path)
flag = False
if not os.path.exists('cowboy'):
red_print(".create cowboy link.")
os.symlink("../../cowboy", "cowboy")
if args.sql:
red_print(".update models and mappers by sql ")
flag = os.system("./cowboy -u -o -x -m")
else:
red_print(".gen dao.cpp from mappers and models ")
flag = os.system("./cowboy -o -x -m")
if not flag:
red_print(".gen dao files done")
move_dao_files()
else:
yellow_print("gen dao files err ")
exit(-1)
def move_dao_files():
cur_path = os.getcwd()
#yellow_print(path)
path = cur_path.split("tools")[0]
#yellow_print(path)
global dest_dir
dest_dir = path + "public/db_access/" + args.dir
red_print(".move dao files to: " + dest_dir )
os.popen("rm -rf " + dest_dir)
os.system("mkdir " + dest_dir)
copy_files("dao", dest_dir)
copy_files("model", dest_dir)
copy_files("mapper", dest_dir)
def parse_options():
global args
info = """1.python gen_dao.py --dir=xxxx --sql=yes
2.python gen_dao.py --dir=xxxx """
parser = argparse.ArgumentParser(usage=info, description="")
parser.add_argument("--dir", required=True, help="\tgen dao.cpp from which dir.", dest="dir")
parser.add_argument("-sql", "--sql", default=False, dest="sql", help="\tupdate models and mappers by sql.")
args = parser.parse_args()
if not os.path.exists(args.dir):
yellow_print("dir " + args.dir + " not exits!")
exit()
if __name__ == '__main__':
parse_options()
gen_dao()
red_print(".all done ")
print("new files:__________________________")
os.system("ls -l --full-time " + dest_dir)
| feng1o/python_1 | tx_add/gen_dao.py | gen_dao.py | py | 3,263 | python | en | code | 1 | github-code | 36 |
6347327566 | import numpy as np
import torch
from homan.utils.nmr_renderer import OrthographicRenderer, PerspectiveRenderer
import neural_renderer as nr
def visualize_perspective(image, predictions, K=None):
perspect_renderer = PerspectiveRenderer(image_size=max(image.shape))
new_image = image.copy()
# 2 * factor to be investigated !
verts = 2 * torch.Tensor(predictions["verts"]).cuda().unsqueeze(0)
faces = torch.Tensor(predictions["faces"]).cuda().unsqueeze(0)
K = torch.Tensor(K).cuda().unsqueeze(0)
trans = torch.Tensor([0, 0, 0]).cuda().unsqueeze(0)
for i in range(len(verts)):
v = verts[i:i + 1]
new_image = perspect_renderer(vertices=v,
faces=faces,
color_name="blue",
image=new_image,
translation=trans,
K=K)
return (new_image * 255).astype(np.uint8)
def visualize_orthographic(image, predictions):
ortho_renderer = OrthographicRenderer(image_size=max(image.shape))
new_image = image.copy()
verts = torch.Tensor(predictions["verts"]).cuda().unsqueeze(0)
faces = torch.Tensor(predictions["faces"]).cuda().unsqueeze(0)
cams = torch.Tensor(predictions["cams"]).cuda().unsqueeze(0)
for i in range(len(verts)):
v = verts[i:i + 1]
cam = cams[i:i + 1]
new_image = ortho_renderer(vertices=v,
faces=faces,
cam=cam,
color_name="blue",
image=new_image)
return (new_image * 255).astype(np.uint8)
def visualize_hand_object(model,
images,
verts_hand_gt=None,
verts_object_gt=None,
dist=3,
viz_len=7,
init=False,
gt_only=False,
image_size=640,
max_in_batch=2):
if gt_only:
rends, masks = model.render_gt(
model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
max_in_batch=max_in_batch,
)
elif verts_hand_gt is None:
rends, masks = model.render(model.renderer,
viz_len=viz_len,
max_in_batch=max_in_batch)
else:
rends, masks = model.render_with_gt(model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
init=init,
max_in_batch=max_in_batch)
bs = rends.shape[0]
# Rendered frontal image
new_images = []
for image, rend, mask in zip(images, rends, masks):
if image.max() > 1:
image = image / 255.0
h, w, c = image.shape
L = max(h, w)
new_image = np.pad(image.copy(), ((0, L - h), (0, L - w), (0, 0)))
new_image[mask] = rend[mask]
new_image = (new_image[:h, :w] * 255).astype(np.uint8)
new_images.append(new_image)
# Rendered top-down image
theta = 1.3
x, y = np.cos(theta), np.sin(theta)
obj_verts, _ = model.get_verts_object()
mx, my, mz = obj_verts.mean(dim=(0, 1)).detach().cpu().numpy()
K = model.renderer.K
R2 = torch.cuda.FloatTensor([[[1, 0, 0], [0, x, -y], [0, y, x]]])
t2 = torch.cuda.FloatTensor([mx, my + dist, mz])
top_renderer = nr.renderer.Renderer(image_size=image_size,
K=K,
R=R2,
t=t2,
orig_size=1)
top_renderer.background_color = [1, 1, 1]
top_renderer.light_direction = [1, 0.5, 1]
top_renderer.light_intensity_direction = 0.3
top_renderer.light_intensity_ambient = 0.5
top_renderer.background_color = [1, 1, 1]
if verts_hand_gt is None:
top_down, _ = model.render(model.renderer,
rotate=True,
viz_len=viz_len,
max_in_batch=max_in_batch)
elif gt_only:
top_down, _ = model.render_gt(
model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
rotate=True,
max_in_batch=max_in_batch,
)
else:
top_down, _ = model.render_with_gt(model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
rotate=True,
viz_len=viz_len,
init=init,
max_in_batch=max_in_batch)
top_down = (top_down * 255).astype(np.uint8)
return np.stack(new_images), top_down
| hassony2/homan | homan/visualize.py | visualize.py | py | 5,329 | python | en | code | 85 | github-code | 36 |
9322300717 | # fit a second degree polynomial to the economic data
from numpy import arange,sin,log,tan
from pandas import read_csv
from scipy.optimize import curve_fit
from matplotlib import pyplot
# define the true objective function
def objective(x):
return 0.01006304431397636*sin(0.009997006528342673*x+0.010000006129223197)+0.3065914809778943*x+0.01033913912969194
# load the dataset
url = 'output.csv'
dataframe = read_csv(url, header=None)
data = dataframe.values
# choose the input and output variables
x, y = data[1:, 0], data[1:, -1]
# plot input vs output
pyplot.scatter(x, y)
#convert string to float
x=[float(i) for i in x]
# define a sequence of inputs between the smallest and largest known inputs
x_line = arange(min(x),max(x),1)
# calculate the output for the range
y_line = objective(x_line)
# create a line plot for the mapping function
pyplot.plot(x_line, y_line, color='red')
pyplot.show() | atul1503/curve-fitting | Custom_Function_Graph_Plotter_without_curve_fit.py | Custom_Function_Graph_Plotter_without_curve_fit.py | py | 907 | python | en | code | 0 | github-code | 36 |
41260980135 | from geometry_msgs.msg import Twist
import pyzbar.pyzbar as pyzbar
from datetime import datetime
import pyrealsense2 as rs
import numpy as np
import schedule
import rospy
import time
import cv2
frame_crop_x1 = 0
frame_crop_y1 = 120
frame_crop_x2 = 639
frame_crop_y2 = 479
minLineLength = 30
maxLineGap = 15
speed = 0
angle = 0
avr_x = 0
turn = -0.5
code_start = "start"
barcode_data_line_QR = []
text_0 = ""
text_1 = ""
## 동일한 qr코드 인식시 스피드 0 or 움직임
view_same_QR = 0
view_start_QR_and_no_product = 0
obstacle_view = 0
cap_0 = cv2.VideoCapture(2)
cap_1 = cv2.VideoCapture(4)
cap_1.set(cv2.CAP_PROP_FRAME_HEIGHT,180)
cap_1.set(cv2.CAP_PROP_FRAME_WIDTH,320)
def cam_0_read():
global retval_0, frame_0, original, gray_line_0, gray_line_1
retval_0, frame_0 = cap_0.read()
original = frame_0
gray_line_0 = cv2.cvtColor(frame_0, cv2.COLOR_BGR2GRAY)
gray_line_1 = cv2.cvtColor(frame_0, cv2.COLOR_BGR2GRAY)
def cam_1_read():
global retval_1, frame_1, gray_product_0
retval_1, frame_1 = cap_1.read()
gray_product_0 = cv2.cvtColor(frame_1, cv2.COLOR_BGR2GRAY)
def cam_0_use_line():
global retval_0, frame_0, original, theta
blurred = gray_line_0[frame_crop_y1:frame_crop_y2,frame_crop_x1:frame_crop_x2]
blurred = cv2.boxFilter(blurred, ddepth=-1, ksize=(31,31))
retval2 ,blurred = cv2.threshold(blurred, 100, 255, cv2.THRESH_BINARY)
edged = cv2.Canny(blurred, 85, 85)
lines = cv2.HoughLinesP(edged,1,np.pi/180,10,minLineLength,maxLineGap)
max_diff = 1000
final_x = 0
if ( lines is not None ):
if ( lines is not None ):
add_line = 0
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(original,(x1+frame_crop_x1,y1+frame_crop_y1),(x2+frame_crop_x1,y2+frame_crop_y1),(0,255,0),3)
mid_point = ( x1 + x2 ) / 2
diff = abs((640/2) - mid_point)
if ( max_diff > diff ) :
max_diff = diff
final_x = mid_point
add_line = add_line + final_x
average_x = add_line / len(lines)
if ( int(average_x) != 0 ) :
original = cv2.circle(original,(int(average_x),int((frame_crop_y1+frame_crop_y2)/2)),5,(0,0,255),-1)
original = cv2.rectangle(original,(int(frame_crop_x1),int(frame_crop_y1)),(int(frame_crop_x2),int(frame_crop_y2)),(0,0,255),1)
frame_0 = original
theta = int(( int(average_x) - 320.0 ) / 640.0 * 100)
if ( lines is None ):
theta = -50
def cam_0_use_qrcode():
global barcode_data_line_QR, barcode_type_line_QR
decoded_line_QR = pyzbar.decode(gray_line_1)
for _ in decoded_line_QR:
x, y, w, h = _.rect
barcode_data_line_QR = _.data.decode("utf-8")
barcode_type_line_QR = _.type
cv2.rectangle(frame_0, (x, y), (x + w, y + h), (0, 0, 255), 2)
text_0 = '%s (%s)' % (barcode_data_line_QR, barcode_type_line_QR)
cv2.putText(frame_0, text_0, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)
if decoded_line_QR == [] :
barcode_data_line_QR = "QR_X"
def cam_1_use_qrcode():
global barcode_data_product_QR, barcode_type_product_QR
decoded_product_QR = pyzbar.decode(gray_product_0)
for _ in decoded_product_QR:
a, b, c, d = _.rect
barcode_data_product_QR = _.data.decode("utf-8")
barcode_type_product_QR = _.type
cv2.rectangle(frame_1, (a, b), (a + c, b + d), (0, 0, 255), 2)
text_1 = '%s (%s)' % (barcode_data_product_QR, barcode_type_product_QR)
cv2.putText(frame_1, text_1, (a, b), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)
if decoded_product_QR == [] :
barcode_data_product_QR = "QR_X"
def cam_lidar_read():
global pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 320, 240, rs.format.z16, )
pipeline.start(config)
def cam_lidar_use():
global add_num, add_edge_num, add_edge_remain_num, image_all, left_view
frames = pipeline.wait_for_frames()
depth = frames.get_depth_frame()
coverage = [0]*32
image_all = []
add_num = 0
add_edge_num = 0
add_edge_remain_num = 0
for y in range(240):
for x in range(320):
dist = depth.get_distance(x, y)
if 0 < dist and dist < 1:
coverage[x//10] += 1
if y%20 is 19:
line = ""
for c in coverage:
line += " 12345678"[c//25]
coverage = [0]*32
image_all.append(line)
for a in range(1,32):
if line[a] == " " : add_num = add_num
else : add_num = add_num + int(line[a])
for a in range(1,2):
if line[a] == " " : add_edge_num = add_edge_num
else : add_edge_num = add_edge_num + int(line[a])
for a in range(3,32):
if line[a] == " " : add_edge_remain_num = add_edge_remain_num
else : add_edge_remain_num = add_edge_remain_num + int(line[a])
def speed_and_angle_make():
global angle, speed
angle = round((-theta) * (0.012), 2)
speed = 0.3 - abs(angle * 0.2)
def speed_and_angle_turn():
global angle, speed
speed = 0
angle = turn
def speed_and_angle_main():
global angle, speed, barcode_data_product_QR, barcode_data_line_QR, turn, obstacle_view, view_same_QR, view_start_QR_and_no_product
if barcode_data_line_QR == "right_turn" : turn = -0.5
if barcode_data_line_QR == "left_turn" : turn = 0.5
if view_same_QR == 0 and view_start_QR_and_no_product == 0 :
if obstacle_view == 0 :
if theta != -50:
if add_num <= 10 : speed_and_angle_make()
if add_num <= 10 and barcode_data_product_QR != "QR_X" and barcode_data_product_QR == barcode_data_line_QR : view_same_QR = 1
if add_num <= 10 and barcode_data_product_QR == "QR_X" and barcode_data_line_QR == "start" : view_start_QR_and_no_product = 1
if add_num > 10 :
speed = 0
obstacle_view = 1
if theta == -50 : speed_and_angle_turn()
if obstacle_view == 1 :
if add_num != 0 :
if add_edge_num > 0 and add_edge_remain_num == 0 : angle = -0.4
if add_edge_remain_num > 0 and add_edge_num == 0 : angle = -0.4
if add_edge_remain_num > 0 and add_edge_num > 0 : angle = -0.4
speed = 0.2
if add_num == 0 : angle = 0.4
if theta != -50 and add_num == 0 : obstacle_view = 0
if view_same_QR == 1 or view_start_QR_and_no_product == 1:
speed = 0
angle = 0
if view_same_QR == 1 and barcode_data_product_QR == "QR_X" : view_same_QR = 0
if view_start_QR_and_no_product == 1 and barcode_data_product_QR != "QR_X" : view_start_QR_and_no_product = 0
def talker():
global speed, angle
rospy.init_node("line_qr_sensor")
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
msg = Twist()
cam_lidar_read()
while not rospy.is_shutdown():
msg.linear.x = speed
msg.angular.z = angle
pub.publish(msg)
cam_0_read()
cam_0_use_line()
cam_0_use_qrcode()
cam_1_read()
cam_1_use_qrcode()
cam_lidar_use()
speed_and_angle_main()
for y in range(12):
print(image_all[y])
print(add_num)
print("장애물 : ", obstacle_view)
print("세타값 : ", theta)
print("턴값 : ", turn)
cv2.imshow('frame_0', frame_0)
cv2.imshow('frame_1', frame_1)
key = cv2.waitKey(25)
if key == 27: #ESC
break
if __name__ == "__main__":
try:
talker()
except rospy.ROSInterruptException:
pass | LEEJUNHO95/ROS_project | line_detect.py | line_detect.py | py | 8,101 | python | en | code | 5 | github-code | 36 |
70153811305 | # -*- coding: utf-8 -*-
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from employee.models import Employee
from employee.serializers import employee_serializer
# Create your views here.
@csrf_exempt
def employee_list(request):
"""
Get all employee from db and return in json format
Post (save) a new employee in the database
"""
if request.method == 'GET':
#Get all object form database
employee = Employee.objects.all()
#Serializer all
serializer = employee_serializer(employee, many=True)
#return to the request
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
#retrive the post content
data = JSONParser().parse(request)
# Serializer it
serializer = employee_serializer(data=data)
if serializer.is_valid():
# if is correct, save in the database
serializer.save()
return JsonResponse(serializer.data, status=201)
#otherside return an error (400)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def employee_detail(request, pk):
"""
Get details, update or delete a specific (pk) employee
"""
try:
#Find employee by the id
employee = Employee.objects.get(pk=pk)
except Employee.DoesNotExist:
#if not exist!!
return HttpResponse(status=404)
#if the request is get
if request.method == 'GET':
#return the employee in Json
serializer = employee_serializer(employee)
return JsonResponse(serializer.data)
#if request is put
elif request.method == 'PUT':
#parse the body request content
data = JSONParser().parse(request)
#Serializer the employee
serializer = employee_serializer(employee, data=data)
if serializer.is_valid():
#save if is valid
serializer.save()
return JsonResponse(serializer.data)
#otherside return 400 - bad Request
return JsonResponse(serializer.errors, status=400)
#if request is delete
elif request.method == 'DELETE':
#end of live of the employee
employee.delete()
#return No content
return HttpResponse(status=204) | borgessouza/DjangoLabs | employee/views.py | views.py | py | 2,429 | python | en | code | 0 | github-code | 36 |
41237480224 | from fastapi import FastAPI, APIRouter, HTTPException, status
from pydantic import BaseModel,json
from api.settings import base_url
import pandas as pd
import requests
import json
from typing import List, Optional
from routers.users import user_login
import datetime
vehicle_router = APIRouter(tags=["Vehicle"])
@vehicle_router.get("/labels/{labelId}")
def get_color(labelId):
url = f'{base_url}/dev/index.php/v1/labels/{labelId}'
access_token = user_login()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
response = requests.request("GET",url, headers=headers)
return response.json()
@vehicle_router.get("/filter/hu")
def filter():
with open("response.json","r",encoding="utf-8") as file:
data = json.load(file)
filtered_data = [x for x in data if x["hu"] != None]
with open("filter_hu.json", "w") as write_file:
json.dump(filtered_data, write_file, indent=4)
return filtered_data
@vehicle_router.get("/hu")
def check_hu(datas):
strToday = datetime.datetime.today().strftime('%Y-%m-%d')
dateToday = datetime.datetime.strptime(strToday, '%Y-%m-%d')
for data in datas:
if data["hu"] and data["colored"]:
hu = datetime.datetime.strptime(data["hu"], "%Y-%m-%d")
result = (dateToday.year - hu.year) * 12 + (dateToday.month - hu.month)
if result < 3:
data["colorCode"] = "#007500"
elif result < 12:
data["colorCode"] = "#FFA500"
else:
data["colorCode"] = "#b30000"
return datas
@vehicle_router.post("/upload")
def upload(kys:List[str],url, colored:Optional[bool] = True):
csvDataFrame = pd.read_csv(url, encoding='utf8',sep=';',header=None,names=kys)
df = pd.DataFrame(csvDataFrame)
df["colored"] = colored
datas = df.to_json(orient="table",indent=4)
check_hu(datas)
current_date = datetime.datetime.now().isoformat('-',"hours")
with pd.ExcelWriter(f"vehicles_{current_date}.xlsx") as writer:
datas.to_excel(writer)
dataframe = pd.read_excel(f"vehicles_{current_date}.xlsx")
dataframe.to_json('vehicle.json', index=False, orient="table", indent=4)
new_data = datas["data"]
return new_data
@vehicle_router.post("/vehicle")
def post_data():
url = f"{base_url}/dev/index.php/v1/vehicles/select/active"
access_token = user_login()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
response = requests.request("GET",url,headers= headers)
data = json.load(open('vehicle.json'))
new_data = data["data"]
with open("sorted_vehicle.json", "w", encoding="utf-8") as file1:
json.dump(sorted(new_data, key=lambda x: (x["gruppe"] is None, x["gruppe"])), file1 ,indent=4)
with open("response.json", "w",encoding="utf-8") as file:
json.dump(response.json(), file, indent=4)
return response.json()
@vehicle_router.get("/search")
def search_field(key):
search_data = []
with open("reesponse.json","r",encoding="utf-8") as file:
data = json.load(file)
s_data = {}
for src in data:
if src[key]:
s_data[key] = src[key]
s_data["kurzname"] = src["kurzname"]
s_data["info"] = src["info"]
search_data.append(s_data)
else:
return HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail="Data does not exist.")
return search_data
@vehicle_router.post("/merge")
def merge_data(url1, url2)->dict:
csvData1 = pd.read_csv(url1, encoding='utf8',sep=';',error_bad_lines=False)
csvData2 = pd.read_csv(url2, encoding='utf8',sep=';',error_bad_lines=False)
df1 = pd.DataFrame(csvData1)
df2 = pd.DataFrame(csvData2)
merge_data = pd.concat([df1,df2]).drop_duplicates().reset_index(drop=True)
merge_data.to_json("merged_data.json", indent=4)
return merge_data
| deryacortuk/FastAPI-Pandas | routers/vehicles.py | vehicles.py | py | 4,489 | python | en | code | 0 | github-code | 36 |
3459125677 | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: None Do not return anything, modify nums1 in-place instead.
"""
length1 = len(nums1) - 1
i = m - 1
j = n - 1
if n == 0:
return nums2
while j >= 0 and i >= 0:
if nums1[i] > nums2[j]:
nums1[length1] = nums1[i]
i -= 1
length1 -= 1
else:
nums1[length1] = nums2[j]
j -= 1
length1 -= 1
#only a few tip..
if i < 0:
nums1[0:length1+1] = nums2[:j+1]
return nums1
if __name__ == '__main__':
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
nums1 = [2,0]
m = 1
nums2 = [1]
n = 1
nums1 = [0]
m = 1
nums2 = [1]
n = 1
print( Solution().merge(nums1, m, nums2, n) )
| pi408637535/Algorithm | com/study/algorithm/other/Merge Sorted Array.py | Merge Sorted Array.py | py | 1,039 | python | en | code | 1 | github-code | 36 |
72663792744 | # mypy: ignore-errors
import streamlit as st
from bokeh.models import CustomJS
from bokeh.models.widgets import Button
from streamlit_bokeh_events import streamlit_bokeh_events
REC_GIF = "ai_talks/assets/icons/rec_on.gif"
def get_js_code(lang: str) -> str:
return """
var value = "";
var rand = 0;
var recognition = new webkitSpeechRecognition();
recognition.continuous = false;
recognition.interimResults = true;
""" + f"recognition.lang = '{lang}';" + """
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'start'}));
recognition.onspeechstart = function () {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'running'}));
}
recognition.onsoundend = function () {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'stop'}));
}
recognition.onresult = function (e) {
var value2 = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
rand = Math.random();
} else {
value2 += e.results[i][0].transcript;
}
}
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: {t:value, s:rand}}));
document.dispatchEvent(new CustomEvent("GET_INTRM", {detail: value2}));
}
recognition.onerror = function(e) {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'stop'}));
}
recognition.start();
"""
def show_speak_btn() -> Button:
stt_button = Button(label=st.session_state.locale.speak_btn, button_type="success", width=100)
stt_button.js_on_event("button_click", CustomJS(code=get_js_code(st.session_state.locale.lang_code)))
return stt_button
def get_bokeh_result() -> dict:
stt_button = show_speak_btn()
return streamlit_bokeh_events(
bokeh_plot=stt_button,
events="GET_TEXT,GET_ONREC,GET_INTRM",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0,
)
def show_voice_input() -> None:
if "input" not in st.session_state:
st.session_state.input = {"text": "", "session": 0}
result = get_bokeh_result()
tr = st.empty()
tr.code(st.session_state.input["text"])
if result:
if "GET_TEXT" in result and (
result.get("GET_TEXT")["t"] != "" and result.get("GET_TEXT")["s"] != st.session_state.input["session"]):
st.session_state.input["text"] = result.get("GET_TEXT")["t"] # type: ignore
tr.code(st.session_state.input["text"])
st.session_state.input["session"] = result.get("GET_TEXT")["s"]
if "GET_INTRM" in result and result.get("GET_INTRM") != "":
tr.code(st.session_state.input["text"] + " " + result.get("GET_INTRM"))
if "GET_ONREC" in result:
if result.get("GET_ONREC") == "start":
st.image(REC_GIF)
st.session_state.input["text"] = ""
elif result.get("GET_ONREC") == "running":
st.image(REC_GIF)
elif result.get("GET_ONREC") == "stop" and st.session_state.input["text"] != "":
st.session_state.user_text = st.session_state.input["text"]
st.session_state.input["text"] = ""
| dKosarevsky/AI-Talks | ai_talks/src/utils/stt.py | stt.py | py | 3,457 | python | en | code | 243 | github-code | 36 |
20157817247 | from database import *
class invitados(object):
idInvitado = None
nombre_invitado = None
apellido_invitado = None
descripcion = None
url_imagen = None
@staticmethod
def cargar(id):
info = Database().run("Select * FROM invitados WHERE idInvitado = '%s'" %(id))
invitado = invitados()
for item in info:
invitado.idInvitado = item["idInvitado"]
invitado.nombre_invitado = item["nombre_invitado"]
invitado.apellido_invitado = item["apellido_invitado"]
invitado.descripcion = item["descripcion"]
invitado.url_imagen = item["url_imagen"]
return invitado
def alta(self):
Database().run("INSERT INTO invitados Values (NULL, %s, '%s', '%s', '%s')" % (self.nombre_invitado,
self.apellido_invitado,
self.descripcion,
self.url_imagen))
def baja(self):
Database().run("DELETE FROM eventos WHERE idInivitado = '%s'" %(self.idInvitado))
Database().run("DELETE FROM invitados WHERE idInivitado = '%s'" %(self.idInvitado))
def modificacion(self):
Database().run("UPDATE invitados SET nombre_invitado = '%s', apellido_invitado = '%s', descripcion = '%s',"
"url_imagen = '%s'" %(self.nombre_invitado, self.apellido_invitado, self.descripcion,
self.url_imagen)) | politecnicomodelopoo2018/ProyectoWeb-Albisetti | class_invitados.py | class_invitados.py | py | 1,644 | python | es | code | 0 | github-code | 36 |
1806212132 | from __future__ import annotations
import asyncio
import concurrent.futures
import dataclasses
import functools
import logging
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import jinja2
from .. import scraper
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class MailConfig(object):
from_addr: str
host: str
port: str
tls: bool
user: str = None
passwd: str = None
def _do_send_email(cfg: MailConfig, to_addr: str, subject: str, text: str):
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = cfg.from_addr
msg['To'] = to_addr
msg['Date'] = formatdate(localtime=True)
msg['Message-ID'] = make_msgid('punkow')
txt = MIMEText(text)
msg.attach(txt)
smtp = smtplib.SMTP(host=cfg.host, port=cfg.port)
if cfg.tls:
smtp.starttls()
if cfg.user is not None:
smtp.login(cfg.user, cfg.passwd)
try:
smtp.sendmail(cfg.from_addr, [to_addr], msg.as_string())
finally:
smtp.quit()
logger.info("Sent an email")
class Mailer(object):
def __init__(self, loop: asyncio.AbstractEventLoop, config: MailConfig, base_url: str):
self._loop = loop
self._config = config
self._base_url = base_url
self._tpl = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'email_templates')),
autoescape=jinja2.select_autoescape(['html', 'xml'])
)
self._executor = concurrent.futures.ProcessPoolExecutor(max_workers=2)
async def _send_email(self, to_addr, subject, text):
await self._loop.run_in_executor(self._executor, _do_send_email, self._config, to_addr, subject, text)
async def send_success_email(self, email, booking: scraper.BookingResult):
tpl = self._tpl.get_template("success.txt")
text = tpl.render(meta=booking.metadata, change_url=scraper.BASE_URL + scraper.MANAGE_URL,
process_id=booking.process_id, auth_code=booking.auth_key)
await self._send_email(email, "Your appointment was booked", text)
async def send_confirmation_email(self, email, req_key):
tpl = self._tpl.get_template("confirmation.txt")
text = tpl.render(base_url=self._base_url, req_key=req_key)
await self._send_email(email, "Your booking request was registered", text)
async def send_cancel_email(self, email, req_key):
tpl = self._tpl.get_template("cancel.txt")
text = tpl.render(base_url=self._base_url, req_key=req_key)
await self._send_email(email, "Your booking request was canceled", text)
def start_queue(self) -> AsyncMailQueue:
return AsyncMailQueue(self._loop, self)
class AsyncMailQueue(object):
def __init__(self, loop: asyncio.AbstractEventLoop, mailer: Mailer):
self._loop = loop
self._mailer = mailer
self._queue = []
async def __aenter__(self) -> AsyncMailQueue:
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if len(self._queue) != 0:
try:
await self._loop.run_in_executor(None, functools.partial(
concurrent.futures.wait, self._queue))
except:
logger.exception("Exception in waiting for mail sending")
def _append_task(self, coro):
self._queue.append(asyncio.run_coroutine_threadsafe(coro, self._loop))
def send_success_email(self, email, booking: scraper.BookingResult):
self._append_task(self._mailer.send_success_email(email, booking))
def send_confirmation_email(self, email, req_key):
self._append_task(self._mailer.send_confirmation_email(email, req_key))
async def send_cancel_email(self, email, req_key):
self._append_task(self._mailer.send_cancel_email(email, req_key))
| janLo/punkow | punkow/service/mailer.py | mailer.py | py | 4,021 | python | en | code | 0 | github-code | 36 |
21367017821 | '''
Link: https://www.lintcode.com/problem/722
'''
# Slightly modified from the solution from jiuzhang.com. Uses trie. Has O(n) time complexity, where n is the length of the array.
# It makes use of trie data structure, which makes string retrieval and comparison efficient. Otherwise it would be O(n^2).
class TrieNode:
def __init__(self):
self.sons = [None, None]
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, number):
node = self.root
for i in range(30, -1, -1):
digit = number >> i & 1
if not node.sons[digit]:
node.sons[digit] = TrieNode()
node = node.sons[digit]
# Each time we call get_largest_xor(), it will always reach a prefix value previously stored into the trie, which is maximally different from
# number, meaning that the found prefix value XOR number is the largest possible across all the prefix values stored in the trie. The returned
# Result is that value. And the subarray yielding this maximum XOR value starts right after the found prefix value.
def get_largest_xor(self, number):
node = self.root
result = 0
for i in range(30, -1, -1):
digit = number >> i & 1
target = 1 - digit
if node.sons[target]:
result += 1 << i
node = node.sons[target]
else:
node = node.sons[digit]
return result
class Solution:
"""
@param nums: the array
@return: the max xor sum of the subarray in a given array
"""
def maxXorSubarray(self, nums):
trie = Trie()
trie.insert(0)
prefix_xor = 0
max_xor = float('-inf')
for num in nums:
prefix_xor ^= num
trie.insert(prefix_xor)
max_xor = max(max_xor, trie.get_largest_xor(prefix_xor))
return max_xor
| simonfqy/SimonfqyGitHub | lintcode/super/722_maximum_subarray_vi.py | 722_maximum_subarray_vi.py | py | 1,936 | python | en | code | 2 | github-code | 36 |
9055011719 | from mysql.connector.errors import DatabaseError, ProgrammingError
from addData.add_data import add_user_data
from create_connection import get_cursor
from viewStatsClient.ask_batter_stats import ask_batter
from viewStatsClient.ask_game_queries import ask_game
from viewStatsClient.ask_pitcher_stats import ask_pitcher
from viewStatsClient.ask_team_stats import ask_team
from viewStatsClient.ask_umpire_queries import ask_umpire
from viewStatsClient.ask_venue_queries import ask_venue
def main():
try:
mydb, cursor = get_cursor()
except ProgrammingError:
print('Invalid credentials! Please fix and try again')
return
except DatabaseError:
print('Connection timed out. IP address may be incorrect. Please fix and try again')
return
stats_dict = {
1: ask_batter,
2: ask_game,
3: ask_pitcher,
4: ask_umpire,
5: ask_venue,
6: ask_team,
7: lambda var1, var2: None
}
stats_print_dict = {
1: ". View Batter stats",
2: ". View Game stats",
3: ". View Pitcher stats",
4: ". View Umpire stats",
5: ". View Venue stats",
6: ". View Team stats",
7: ". Back"
}
while True:
print('What would you like to do?')
print('1. View Stats')
print('2. Add Data')
print('3. Exit')
option = input('Enter your choice: ')
try:
option = int(option)
except ValueError:
print('Error: You must enter an integer between 1 and 3')
continue
if option < 1 or option > 3:
print('Invalid option, please choose 1, 2, or 3')
continue
if option == 1:
print('What Stats would you like to view')
for x in stats_print_dict:
print(str(x) + stats_print_dict[x])
while True:
option = input('Enter your choice: ')
try:
option = int(option)
except ValueError:
print('Error: You must enter an integer between 1 and 7')
continue
if option not in stats_dict.keys():
print('Error: You must enter an integer between 1 and 7')
continue
print("\033c", end="")
stats_dict[option](mydb, cursor)
break
elif option == 2:
add_user_data(mydb, cursor)
continue
elif option == 3:
return
if __name__ == '__main__':
main()
| SidhaantAnand/MLB-Analysis | MLB.py | MLB.py | py | 2,609 | python | en | code | 0 | github-code | 36 |
3201757332 | from flask import *
from Graph import *
from AStar import *
app = Flask(__name__)
'''
Routers
'''
# Route to index
@app.route('/')
def index():
return render_template('index.html')
@app.route('/compute',methods=['POST'])
def compute():
data = request.data
dataDict = json.loads(data)
# Dokumentasi Pemakaian
# dataDict erdiri atas
# -size (jumlah node yang berarti node dari 0 sampai size-1)
# -Node (node beserta latitude dan longitude)
# -adj (ini adalah ajasensi list)
# -start (start node)
# -goal (goal node)
# Berikut contoh json yang dikirimkan
# {"size":4,"node":[{"latitude":-6.890853531500832,"longitude":107.60832433328301},{"latitude":-6.890821577490572,"longitude":107.61042718515068},{"latitude":-6.892493834467413,"longitude":107.60995511636406},{"latitude":-6.892493834467413,"longitude":107.60822777375847}],"adj":[[2,3],[3],[3,0],[2,1,0]],"start":3,"goal":2}
# pengaksesan size -> dataDict['size']
# Pengaksesan node 0 -> dataDict['node'][0]
# Pengaksesan latitude node 0 -> dataDict['node'][0].latitude atau dataDict['node'][0]['latitude'] dicoba saalah satu seharusnya bisa salah satu
# Pengaksesan adjasensi list node 0 -> dataDict['adj'][0] (hasil ini adalah list of node, yanng merupakan sisi yang bersisian dengan node 0)
# Pengaksesan start -> dataDict['start'] , ini adalah node start
# Pengaksesan goal -> dataDict['goal'] , ini adalah node goal
print(dataDict)
# get nodes from dataDict
nodeList = []
edgeList = []
# create Graph
G = Graph(nodeList, edgeList)
# create and add nodes from dataDict to nodeList
for i in range (0, len(dataDict['node'])):
N = LocationNode(i, dataDict['node'][i]['longitude'], dataDict['node'][i]['latitude'])
G.nodeList.append(N)
i += 1
# create and add edges from dataDict to edgeList
for i in range (0, len(dataDict['adj'])):
firstNode = G.nodeList[i]
for j in range (0, len(dataDict['adj'][i])):
if dataDict['adj'][i][j] > i:
secondNode = G.nodeList[dataDict['adj'][i][j]]
E = WeightedEdge(firstNode, secondNode, HarversineDistance(firstNode, secondNode))
G.edgeList.append(E)
# set edge list to solve
SetEdgeList(G.edgeList)
# set heuristic distance func for A* shortest path algorithm
SetHeuristicDistanceFunc(HarversineDistance)
# get solution path
solution = GetShortestPath(G.nodeList[dataDict['start']], G.nodeList[dataDict['goal']])
path, cost = solution
# cast list of nodes in path into only its node indices
pathNodeIdx = []
for node in path:
pathNodeIdx.append(node.value)
# for node in pathNodeIdx:
# print(node)
# print(cost)
# get solution as a dictionary with key path and cost
solutionDict = {
'path' : pathNodeIdx,
'cost' : cost
}
# convert the solution dictionary to json to be returned
solutionJson = json.dumps(solutionDict)
print(solutionJson)
# Untuk pemakaian di peta/index.html
# Klik 2 kali pin/marker untuk memasangkan edge atau sisi ke node lain, node yang ingin dipasangkan juga diklik 2 kali
# Klik 1 kali pin/marker untuk menandai start node , warna akan berubah menjadi ungu
# Klik kanan 1 kali pin/marker untuk menandai goal Node , warna akan berubah menjadi kuning , yang ini sabar agak lama
# Klik pada peta untuk menambahkan pin
# Pastikan semua kondisi diatas terpenuhi sebelum mengirim request
#Return json
#-Kalo bisa nanti kembaliannya list of node yang sudah terurut pathnya dari start ke goal, kalo bisa nama key json "path"
#-Ditambah hitungan jarakanya , klo bisa keynya 'jarak'
#PS
#Kalo mau ngecek kembaliannya di console lognya sementara,nanti kalo udah selesai bilang biar aku parse di htmlnya
#Mangats
#kalo penjelasan diatas ada yang salah atau error langsung bilang ke aku yak.
return solutionJson
if __name__ == '__main__':
app.run(debug= True) | wildansupernova/AStar-Algorithm-with-Google-Maps-API | src/app.py | app.py | py | 4,062 | python | id | code | 0 | github-code | 36 |
6771451793 | # 1072
import sys
import decimal
input = sys.stdin.readline
def get_victory_percent(x, y):
# return int(y/x * 100)
return y * 100 // x
# 부동소수점 오차로 인해 해당 return 문에 대해서만 정답 처리가 됨.
# int( / ) 와 // 의 차이를 이해해야 할듯 하다.
# python float 다룰 때 주의!
# 1. 게임 횟수 x, 이긴 게임 수 y, 승률 z
x, y = map(int, input().split())
z = get_victory_percent(x, y)
answer = float('inf')
start = 1
end = 1000000000 # 최대 10억이 들어올 수 있어서 end 값을 10억으로 초기화
# 2. 범위 좁혀가며 이진 탐색
while start <= end:
mid = (start + end) // 2
# x, y 는 함께 증가 => 게임 진행 판수
# mid번 더 진행했을 때 원래의 승률이 더 크다면
# start를 mid 이상으로 두고 다시 탐색한다.
if get_victory_percent(x+mid, y+mid) <= z:
start = mid + 1
else:
# mid번 더 진행했을 때 처음 승률보다 크다면, answer은 현재 mid가 되고
# end를 mid 이하로 두고 다시 탐색한다.
answer = mid
end = mid - 1
if answer == float('inf'): # 처음 진행했을 때보다 승률이 크게 변경되지 않은 경우
print(-1)
else:
print(answer)
# IDEA
# 해당 문제는 왜 이진 탐색으로 풀어야 할까? "1 ≤ X ≤ 1,000,000,000"
# 값이 최대 10억이 나올 수 있음 => O(N)으로 풀어도 시간 초과가 날 것이기 때문에, 연산을 줄일 수 있는 로직을 생각해나야함
# 다량의 데이터 검색을 하는 문제는 이진 탐색을 적용해볼 수 있음 | chajuhui123/algorithm-solving | BOJ/이진탐색/230113_게임.py | 230113_게임.py | py | 1,691 | python | ko | code | 0 | github-code | 36 |
3310605398 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Sequential):
def __init__(self, in_planes, out_planes, args):
super(Block, self).__init__()
self.x5_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x6_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x7_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
# nn.Dropout(args.dropout),
)
def forward(self, x4):
x5 = self.x5_block(x4)
x6 = self.x6_block(x4+x5)
x7 = self.x7_block(x4+x5+x6)
return x5, x6, x7
class DNN(nn.Module):
def __init__(self, args):
super(DNN, self).__init__()
self.x1_block = nn.Sequential(
nn.Conv2d(in_channels=6, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x2_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x3_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
# nn.Dropout(args.dropout),
)
self.x4_pool = nn.MaxPool2d(2, 2)
self.x5_6_7_block = Block(64, 64, args)
self.x8_pool = nn.MaxPool2d(2, 2)
self.x9_10_11_block = Block(64, 64, args)
self.idm1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xm2_block = Block(64, 64, args)
self.idm2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xm4_block = Block(64, 64, args)
self.idm4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.finm = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.idd1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xd2_block = Block(64, 64, args)
self.idd2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xd4_block = Block(64, 64, args)
self.idd4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.find = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
x1 = self.x1_block(x)
x2 = self.x2_block(x1)
x3 = self.x3_block(x1+x2)
x4 = self.x4_pool(x1+x2+x3)
x5, x6, x7 = self.x5_6_7_block(x4)
x8 = self.x8_pool(x5+x6+x7)
x9, x10, x11 = self.x9_10_11_block(x8)
outm = self.idm1(x11)
outm = F.interpolate(outm, scale_factor=2, mode='bilinear')
_, _, outm = self.xm2_block(outm + x7)
outm = self.idm2(outm)
outm = F.interpolate(outm, scale_factor=2, mode='bilinear')
_, _, outm = self.xm4_block(outm + x3)
outm = self.idm4(outm)
outm = self.finm(outm)
outd = self.idd1(x11)
outd = F.interpolate(outd, scale_factor=2, mode='bilinear')
_, _, outd = self.xd2_block(outd + x7)
outd = self.idd2(outd)
outd = F.interpolate(outd, scale_factor=2, mode='bilinear')
_, _, outd = self.xd4_block(outd + x3)
outd = self.idd4(outd)
outd = self.finm(outd)
return outm, outd
if __name__ == '__main__':
class DNNArg:
dropout = 0.0
net = DNN(DNNArg())
y = net(torch.randn(1,3,224,224))
print(y.size())
| uday96/EVA4-TSAI | S15/models/quiz_dense.py | quiz_dense.py | py | 4,240 | python | en | code | 1 | github-code | 36 |
6911964039 | from os.path import dirname, realpath, join
import time
from datetime import datetime
from rich import box
from rich.table import Table
from rich.console import Console
SCRIPT_PATH = dirname(realpath(__file__))
class Results:
RESULTS_PATH = "results"
def __init__(self, results_name: str):
self.method = ""
self.start_time = None
self.results_name = results_name
self.results_path = join(SCRIPT_PATH, "..", "..", "performance_results", f"{results_name}.md")
self.table = self.initiate_table()
self.accuracies = list()
@staticmethod
def initiate_table():
table = Table(title="", box=box.MINIMAL_DOUBLE_HEAD)
table.add_column("Dataset")
table.add_column("Method")
table.add_column("Train \n size", justify="right")
table.add_column("Test \n size", justify="right")
table.add_column("Time", justify="right")
table.add_column("Acc.", justify="right")
return table
def set_start_time(self):
print("start time")
self.start_time = time.time()
def get_total_time(self):
total_time = round(time.time() - self.start_time)
print(f"{datetime.now():%Y_%m_%d_%H_%M}", f"finished in {total_time} seconds")
return str(total_time) + "s"
@staticmethod
def format_dataset_name(name):
return name.replace(".tsv", "").replace("_", " ")
def save_result(self, dataset: str, method: str, accuracy: float, train_length: int, test_length: int):
self.accuracies.append(accuracy)
self.table.add_row(
self.format_dataset_name(dataset),
method,
str(train_length),
str(test_length),
self.get_total_time(),
str(round(accuracy)) + "%",
)
def write_results(self):
accuracies_average = round(sum(self.accuracies) / len(self.accuracies))
self.table.add_row("", "", "", "", "", "")
self.table.add_row("average", "", "", "", "", str(accuracies_average) + "%")
console = Console(record=True)
console.print(self.table)
console.save_text(self.results_path)
| huridocs/pdf_metadata_extraction | src/performance/Results.py | Results.py | py | 2,170 | python | en | code | 2 | github-code | 36 |
70606645225 | import os
import sys
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import execjs
import time
from datetime import datetime
import pandas as pd
import requests
import json
import akshare as ak
def get_var():
'''
获取js
:return:
'''
js = '/opt/code/pythonstudy_space/05_quantitative_trading_hive/util/ths.js'
with open(js) as f:
comm = f.read()
comms = execjs.compile(comm)
result = comms.call('v')
return result
def get_headers(cookie='Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1672230413; historystock=688255%7C*%7C003816%7C*%7C002933%7C*%7C600706%7C*%7C688687; Hm_lvt_da7579fd91e2c6fa5aeb9d1620a9b333=1673161546; log=; user=MDq080MxOjpOb25lOjUwMDo1MTMxNDQ1NjI6NywxMTExMTExMTExMSw0MDs0NCwxMSw0MDs2LDEsNDA7NSwxLDQwOzEsMTAxLDQwOzIsMSw0MDszLDEsNDA7NSwxLDQwOzgsMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDEsNDA7MTAyLDEsNDA6MjQ6Ojo1MDMxNDQ1NjI6MTY3MzY1OTkxNTo6OjE1NzQ1NTQ5ODA6MjY3ODQwMDowOjFkZjgzN2I5YThiZTRiNzBhZTIyZTE2MzViYWFiYjlhODpkZWZhdWx0XzQ6MQ%3D%3D; userid=503144562; u_name=%B4%F3C1; escapename=%25u5927C1; ticket=90f706428300af2c9ad5b9bc8faf3498; user_status=0; utk=bd0610c31e8fad6a9f67c1c47f83cb90; Hm_lpvt_da7579fd91e2c6fa5aeb9d1620a9b333=1673661956; Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1673661957; v=A5hM9kejT7kEnWM9jZ0-eQlTac0vgfoIXu7QjdKD5lmEWjbzepHMm671oDEh'):
'''
获取请求头 设置自己的请求头在get_header配置
:param cookie:
:return:
'''
v = get_var()
cookie = cookie.split('v=')
cookie = cookie[0] + 'v=' + v
headers={
'Cookie':cookie ,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
}
return headers
def get_all_stock():
'''
获取全部自选股
:return:
'''
headers=get_headers()
url='https://t.10jqka.com.cn/newcircle/group/getSelfStockWithMarket/?'
params={
'callback': 'selfStock',
'_': '1673617915586'
}
res=requests.get(url=url,params=params,headers=headers)
text=res.text[10:len(res.text)-2]
json_text=json.loads(text)
df = pd.DataFrame(json_text['result'])
return df
def add_stock_to_account(stock='600111'):
'''
添加股票到自选股
:param stock:
:return:
'''
url='https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers=get_headers()
params={
'callback':'modifyStock',
'op': 'add',
'stockcode': stock,
'_': '1673620068115',
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text[12:len(res.text) - 2]
json_text = json.loads(text)
err=json_text['errorMsg']
if err=='修改成功':
print('{}加入自选股成功'.format(stock))
else:
print('{}{}'.format(stock,err))
def del_stock_from_account(stock='600111'):
'''
删除股票从自选股
:param stock:
:return:
'''
url = 'https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers = get_headers()
df=get_all_stock()
try:
marker=df[df['code']==stock]['marketid'].tolist()[0]
stockcode='{}_{}'.format(stock,marker)
params={
'op':'del',
'stockcode':stock
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
print('{}删除自选股成功'.format(stock))
else:
print('{}{}'.format(stock, err))
except:
print('{}没有在自选股'.format(stock))
def all_zt_stock_add_account(date='20230113'):
'''
将涨停的股票全部加入自选股
:return:
'''
df=ak.stock_zt_pool_em(date=date)
for stock in df['代码'].tolist():
add_stock_to_account(stock=stock)
def all_del_add_stocks(codes):
'''
将所有自选股删除并加入新股票
:return:
'''
del_df = get_all_stock()
d_n = 0
a_n = 0
url = 'https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers = get_headers()
for stock in del_df['code'].tolist():
try:
marker = del_df[del_df['code'] == stock]['marketid'].tolist()[0]
stockcode = '{}_{}'.format(stock, marker)
params = {
'op': 'del',
'stockcode': stock
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
d_n = d_n+1
else:
print('{}{}'.format(stock, err))
except:
print('{}没有在自选股'.format(stock))
for stock in codes:
params = {
'callback': 'modifyStock',
'op': 'add',
'stockcode': stock,
'_': '1673620068115',
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text[12:len(res.text) - 2]
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
a_n = a_n+1
else:
print('{}{}'.format(stock, err))
print('删除自选股成功,删除了{}个;加入自选股成功,加入了{}个'.format(d_n,a_n))
# python /opt/code/pythonstudy_space/05_quantitative_trading_hive/util/同花顺自选股.py
if __name__=='__main__':
start_time = time.time()
codes = ['002689','002094','002651','002264','002808','002888','003040','002762','002238','002766','003028']
all_del_add_stocks(codes)
end_time = time.time()
print('{}:程序运行时间:{}s,{}分钟'.format(os.path.basename(__file__),end_time - start_time, (end_time - start_time) / 60))
| cgyPension/pythonstudy_space | 05_quantitative_trading_hive/util/同花顺自选股.py | 同花顺自选股.py | py | 5,988 | python | en | code | 7 | github-code | 36 |
25717509151 | from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
#######################
# Verification String #
#######################
_validate_get_verification_string_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
)
@get.register(VerificationStringDto)
@validate_query(_validate_get_verification_string_query, convert_region_to_platform)
def get_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> VerificationStringDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=query["summoner.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except (ValueError, APINotFoundError) as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return VerificationStringDto(data)
_validate_get_many_verification_string_query = (
Query.has("platforms").as_(Iterable).also.has("summoner.ids").as_(Iterable)
)
@get_many.register(VerificationStringDto)
@validate_query(
_validate_get_many_verification_string_query, convert_region_to_platform
)
def get_many_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> Generator[VerificationStringDto, None, None]:
def generator():
parameters = {"platform": query["platform"].value}
for platform, summoner_id in zip(query["platforms"], query["summoner.ids"]):
platform = Platform(platform.upper())
endpoint = (
"lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=summoner_id
)
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = platform.region.value
data["summonerId"] = summoner_id
yield VerificationStringDto(data)
return generator()
| meraki-analytics/cassiopeia | cassiopeia/datastores/kernel/thirdpartycode.py | thirdpartycode.py | py | 3,374 | python | en | code | 522 | github-code | 36 |
3849164917 |
# bottom-up solution
def coin_change_bottomup(coins, value):
min_coins = [0] + [None] * (value - 1)
for v in range(1, value):
if v < min(coins):
pass
else:
options = []
for i in range(len(coins)):
if v >= coins[i]:
options.append(min_coins[v - coins[i]])
# residual = (v - coins[i]) + 1
# options = min_coins[0:residual]
min_coins[v] = min(options) + 1
return min_coins[value - 1]
#print(coin_change_bottomup([9, 6, 5, 1], 12))
# top-down solution
import math
def coin_change_topdown(coins, value):
memo = [0] + [None] * (value - 1)
return coin_change_topdown_aux(coins, value, memo)
def coin_change_topdown_aux(coins, value, memo):
memo = [None] * value
if value == 0:
return 0
if memo[value-1] == None:
min_coins = math.inf
for i in range(1, len(coins)):
if coins[i] <= value:
min_coins = min(min_coins, 1 + coin_change_topdown_aux(coins, value - coins[i], memo))
memo[value-1] = min_coins
return memo[value-1]
#print(coin_change_topdown([9, 6, 5, 1], 12)) | theRealAndyYang/FIT2004-Algorithm-and-Data-Structure | Week 5/tute5code/problem1.py | problem1.py | py | 1,244 | python | en | code | 5 | github-code | 36 |
16182812319 | import boto3
class S3:
def __init__(self, aws_access_key_id, aws_secret_access_key):
self.s3_client = boto3.client(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.s3 = boto3.resource(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
def upload_file(self, file_name, bucket):
"""
Function to upload a file to an S3 bucket
"""
object_name = file_name.split('/')[-1]
response = self.s3_client.upload_file(file_name, bucket, object_name)
return response
def download_file(self, file_name, bucket):
"""
Function to download a given file from an S3 bucket
"""
output = f"downloads/{file_name}"
self.s3.Bucket(bucket).download_file(file_name, output)
return output
def delete_file(self, file_name, bucket):
'''
Function to delete a file from an S3 bucket
'''
self.s3.Object(bucket, file_name).delete()
def list_files(self, bucket):
"""
Function to list files in a given S3 bucket
"""
contents = []
try:
for item in self.s3_client.list_objects(Bucket=bucket)['Contents']:
contents.append(item)
except KeyError:
print("No contents available")
contents.append("No items in the bucket... Add some...")
return contents
| satishvis/s3test | s3_demo.py | s3_demo.py | py | 1,544 | python | en | code | 0 | github-code | 36 |
17991705417 | import pandas as pd
import numpy as np
from prediction2 import create_model, evaluate_model
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.grid_search import GridSearchCV
def get_data():
df = pd.read_csv("data/clean_data.csv")
df["young_cust"] = df.age<26
df["mature_cust"] = df.age>60
df["target"]= df.y=="yes"
df.drop("y", axis=1, inplace=True)
return df
def transform_model(df):
df_transform = pd.get_dummies(df)
return df_transform
def test_model(df):
gbc= GradientBoostingClassifier()
test_data(df, gbc)
gbc_params = {
'min_samples_leaf': [1, 5, 10, 20],
'n_estimators': [100,200,300,400],
'learning_rate': [0.01, 0.025, 0.05, 0.1],
}
search = GridSearchCV(gbc, gbc_params, n_jobs=-1, scoring = "f1")
search.fit(X_train, y_train)
preds_b = search.best_estimator_.predict(X_test)
evaluate_model(preds_b, y_test)
return search.best_estimator_
def test_data(df,model, fit=False):
target = df.target
features = df.drop("target", axis=1)
X_train, X_test, y_train, y_test = train_test_split(features,target)
if fit:
preds =model.predict(X_test)
else:
preds,_=create_model(model,X_train, X_test, y_train)
evaluate_model(preds,y_test)
# Get Data
df = get_data()
persona0= df[df.cluster==0]
persona1= df[df.cluster==1]
persona2= df[df.cluster==2]
persona3= df[df.cluster==3]
#Build subsets
P0=transform_model(persona0)
persona0_columns = ["poutcome_failure", "poutcome_nonexistent","poutcome_success","month_oct","month_sep",
"month_mar","month_may","month_dec","month_apr", "education_university.degree","education_basic.4y",
"job_blue-collar", "job_admin.", "duration","campaign", "pdays","emp.var.rate", "cons.price.idx",
"euribor3m","nr.employed","cell_phone","target"]
P0=P0[persona0_columns]
P1 = transform_model(persona1)
persona1_columns = ["age","duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone", "clust_dist", "young_cust", "job_student", "marital_divorced",
"marital_single", "marital_married", "education_basic.9y","education_unknown","month_apr","month_dec",
"month_jul", "month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P1=P1[persona1_columns]
P2 = transform_model(persona2)
persona2_columns = ["duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone", "job_admin.","job_blue-collar",
"education_university.degree","month_dec",
"month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P2=P2[persona2_columns]
P3 = transform_model(persona3)
persona3_columns = ["age", "duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone","clust_dist","mature_cust", "job_blue-collar",
"education_basic.4y","month_dec", "job_retired", "job_self-employed", "job_services",
"job_technician", "education_basic.6y", "month_apr","month_jul","month_jun","poutcome_failure",
"month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P3=P3[persona3_columns]
| dbluiett/more_than_gut | project/customer_preds.py | customer_preds.py | py | 3,594 | python | en | code | 4 | github-code | 36 |
26350089653 | import libtcodpy as libtcod
import math
import globals as g
class Item:
def __init__(self, use_function=None):
self.use_function = use_function
# an item that can be picked up and used.
def pick_up(self, objects):
# add to the player's inventory and remove from the map
if len(g.inventory) >= 26:
g.message('Your inventory is full, cannot pick up ' + self.owner.name + '.', libtcod.red)
else:
g.inventory.append(self.owner)
objects.remove(self.owner)
g.message('You picked up a ' + self.owner.name + '!', libtcod.green)
# special case: automatically equip, if the corresponding equipment slot is unused
equipment = self.owner.equipment
if equipment and get_equipped_in_slot(equipment.slot, g.inventory) is None:
equipment.equip()
def use(self):
# special case: if the object has the Equipment component, the "use" action is to equip/dequip
if self.owner.equipment:
self.owner.equipment.toggle_equip()
return
# just call the "use_function" if it is defined
if self.use_function is None:
g.message('The ' + self.owner.name + ' cannot be used.')
else:
if self.use_function() != 'cancelled':
g.inventory.remove(self.owner) # destroy after use, unless it was cancelled for some reason
def drop(self, player, objects):
# add to the map and remove from the player's inventory. also, place it at the player's coordinates
objects.append(self.owner)
g.inventory.remove(self.owner)
self.owner.x = player.x
self.owner.y = player.y
# special case: if the object has the Equipment component, dequip it before dropping
if self.owner.equipment:
self.owner.equipment.dequip()
g.message('You dropped a ' + self.owner.name + '.', libtcod.yellow)
class Fighter:
# combat-related properties and methods (monster, player, NPC).
def __init__(self, hp, defense, power, xp, death_function=None):
self.base_power = power
self.base_max_hp = hp
self.base_defense = defense
self.xp = xp
self.death_function = death_function
self.hp = hp
# @property
def power(self, player):
bonus = sum(equipment.power_bonus for equipment in get_all_equipped(self.owner, player))
return self.base_power + bonus
# @property
def defense(self, player): # return actual defense, by summing up the bonuses from all equipped items
bonus = sum(equipment.defense_bonus for equipment in get_all_equipped(self.owner, player))
return self.base_defense + bonus
# @property
def max_hp(self, player): # return actual max_hp, by summing up the bonuses from all equipped items
bonus = sum(equipment.max_hp_bonus for equipment in get_all_equipped(self.owner, player))
return self.base_max_hp + bonus
def take_damage(self, damage, objects, player):
# apply damage if possible
if damage > 0:
self.hp -= damage
# check for death. if there's a death function, call it
if self.hp <= 0:
function = self.death_function
if function is not None:
function(self.owner, objects)
if self.owner != player: # yield experience to the player
player.fighter.xp += self.xp
def heal(self, amount, player):
# heal by the given amount, without going over the maximum
self.hp += amount
if self.hp > self.max_hp(player):
self.hp = self.max_hp(player)
def attack(self, target, objects, player):
# a simple formula for attack damage
damage = self.power(player) - target.fighter.defense(player)
if damage > 0:
# make the target take some damage
g.message(self.owner.name.capitalize() + ' attacks ' + target.name + ' for ' + str(damage) + ' hit points.')
target.fighter.take_damage(damage, objects, player)
else:
g.message(self.owner.name.capitalize() + ' attacks ' + target.name + ' but it has no effect!')
class BasicMonster:
def __init__(self):
print("whatever")
# AI for a basic monster.
def take_turn(self, fov_map, player, map, objects):
# a basic monster takes its turn. If you can see it, it can see you
monster = self.owner
if libtcod.map_is_in_fov(fov_map, monster.x, monster.y):
# move towards player if far away
if monster.distance_to(player) >= 2:
monster.move_towards(player.x, player.y, map, objects)
# close enough, attack! (if the player is still alive.)
elif player.fighter.hp > 0:
monster.fighter.attack(player, objects, player)
class ConfusedMonster:
# AI for a temporarily confused monster (reverts to previous AI after a while).
def __init__(self, old_ai, num_turns=g.CONFUSE_NUM_TURNS):
self.old_ai = old_ai
self.num_turns = num_turns
def take_turn(self, fov_map, player, map, objects):
if self.num_turns > 0: # still confused...
# move in a random direction, and decrease the number of turns confused
self.owner.move(libtcod.random_get_int(0, -1, 1), libtcod.random_get_int(0, -1, 1), map, objects)
self.num_turns -= 1
else: # restore the previous AI (this one will be deleted because it's not referenced anymore)
self.owner.ai = self.old_ai
g.message('The ' + self.owner.name + ' is no longer confused!', libtcod.red)
class Equipment:
# an object that can be equipped, yielding bonuses. automatically adds the Item component.
def __init__(self, slot, power_bonus=0, defense_bonus=0, max_hp_bonus=0):
self.power_bonus = power_bonus
self.defense_bonus = defense_bonus
self.max_hp_bonus = max_hp_bonus
self.slot = slot
self.is_equipped = False
def toggle_equip(self): # toggle equip/dequip status
if self.is_equipped:
self.dequip()
else:
self.equip()
def equip(self):
# if the slot is already being used, dequip whatever is there first
old_equipment = get_equipped_in_slot(self.slot, g.inventory)
if old_equipment is not None:
old_equipment.dequip()
# equip object and show a message about it
self.is_equipped = True
g.message('Equipped ' + self.owner.name + ' on ' + self.slot + '.', libtcod.light_green)
def dequip(self):
# dequip object and show a message about it
if not self.is_equipped: return
self.is_equipped = False
g.message('Dequipped ' + self.owner.name + ' from ' + self.slot + '.', libtcod.light_yellow)
class Object:
def __init__(self, x, y, char, name, color, blocks=False, always_visible=False, fighter=None, ai=None, item=None,
level=1, equipment=None):
self.always_visible = always_visible
self.x = x
self.y = y
self.char = char
self.color = color
self.name = name
self.blocks = blocks
self.fighter = fighter
if self.fighter: # let the fighter component know who owns it
self.fighter.owner = self
self.ai = ai
if self.ai: # let the AI component know who owns it
self.ai.owner = self
self.item = item
if self.item: # let the Item component know who owns it
self.item.owner = self
self.equipment = equipment
if self.equipment:
self.equipment.owner = self
self.item = Item()
self.item.owner = self
self.level = level
def distance(self, x, y):
# return the distance to some coordinates
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)
def send_to_back(self, objects):
# make this object be drawn first, so all others appear above it if they're in the same tile.
objects.remove(self)
objects.insert(0, self)
def move_towards(self, target_x, target_y, map, objects):
# vector from this object to the target, and distance
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
# normalize it to length 1 (preserving direction), then round it and
# convert to integer so the movement is restricted to the map grid
dx = int(round(dx / distance))
dy = int(round(dy / distance))
self.move(dx, dy, map, objects)
def move(self, dx, dy, map, objects):
# move by the given amount, if the destination is not blocked
if not is_blocked(self.x + dx, self.y + dy, map, objects):
self.x += dx
self.y += dy
def draw(self, fov_map, tile_map, con):
if (libtcod.map_is_in_fov(fov_map, self.x, self.y) or
(self.always_visible and tile_map[self.x][self.y].explored)):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE)
def clear(self, con):
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
def distance_to(self, other):
# return the distance to another object
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
def get_equipped_in_slot(slot, inventory):
# returns the equipment in a slot, or None if it's empty
for obj in inventory:
if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped:
return obj.equipment
return None
def get_all_equipped(obj, player): # returns a list of equipped items
if obj == player:
equipped_list = []
for item in g.inventory:
if item.equipment and item.equipment.is_equipped:
equipped_list.append(item.equipment)
return equipped_list
else:
return [] # other objects have no equipment
def is_blocked(x, y, map, objects):
# first test the map tile
if map[x][y].blocked:
return True
# now check for any blocking objects
for object in objects:
if object.blocks and object.x == x and object.y == y:
return True
return False
def monster_death(monster, objects):
# transform it into a nasty corpse! it doesn't block, can't be
# attacked and doesn't move
g.message('The ' + monster.name + ' is dead! You gain ' + str(monster.fighter.xp) + ' experience points.',
libtcod.orange)
g.message(monster.name.capitalize() + ' is dead!')
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of ' + monster.name
monster.send_to_back(objects)
def player_death(player, objects):
# the game ended!
g.message('You died!', libtcod.red)
g.game_state = 'dead'
# for added effect, transform the player into a corpse!
player.char = '%'
player.color = libtcod.dark_red
def create_orc(x, y):
orc_fighter_component = Fighter(hp=10, defense=0, power=3, death_function=monster_death, xp=35)
ai_component = BasicMonster()
monster = Object(x, y, 'o', 'orc', libtcod.desaturated_green,
blocks=True, fighter=orc_fighter_component, ai=ai_component)
return monster
def create_troll(x, y):
troll_fighter_component = Fighter(hp=16, defense=1, power=4, death_function=monster_death, xp=100)
ai_component = BasicMonster()
monster = Object(x, y, 'T', 'troll', libtcod.darker_green,
blocks=True, fighter=troll_fighter_component, ai=ai_component)
return monster
def create_heal_potion(x, y, use_function):
item_component = Item(use_function=use_function)
item = Object(x, y, '!', 'healing potion', libtcod.violet, item=item_component)
return item
def create_fireball_scroll(x, y, use_function):
item_component = Item(use_function=use_function)
item = Object(x, y, '#', 'scroll of fireball', libtcod.light_yellow, item=item_component)
return item
def create_lightning_scroll(x, y, use_function):
item_component = Item(use_function=use_function)
item = Object(x, y, '#', 'scroll of lightning bolt', libtcod.light_yellow, item=item_component)
return item
def create_confuse_scroll(x, y, use_function):
item_component = Item(use_function=use_function)
item = Object(x, y, '#', 'scroll of confusion', libtcod.light_yellow, item=item_component)
return item
def place_sword(x, y, use_function=None):
# create a sword
equipment_component = Equipment(slot='right hand')
item = Object(x, y, '/', 'sword', libtcod.sky, equipment=equipment_component)
return item
def place_shield(x, y, use_function=None):
# create a shield
equipment_component = Equipment(slot='left hand', defense_bonus=1)
item = Object(x, y, '[', 'shield', libtcod.darker_orange, equipment=equipment_component)
return item
| DenSev/snakes-in-dungeon | objects.py | objects.py | py | 13,268 | python | en | code | 0 | github-code | 36 |
74965781222 | #!/usr/bin/env python3
class Class:
def __init__(self,classId,name,teacherId):
if id is None:
self.id = 0
else:
self.id = id
self.classId = classId
self.name = name
self.teacherId = teacherId
@staticmethod
def createClass(obj):
list = []
for i in obj:
list.append(Class(i[0],i[1],i[2]))
return list
| isibol98/Python---MySQL | school-app/class1.py | class1.py | py | 417 | python | en | code | 0 | github-code | 36 |
3156914931 | import math
vezes = int(input())
cont = 0
while cont < vezes:
entrada = int(input())
contaDivisores = 0
raiz = int(math.sqrt(entrada)+1)
for i in range(1, raiz):
if entrada % i == 0:
contaDivisores += 1
if contaDivisores > 1:
print("Not Prime")
else:
print("Prime")
cont += 1 | MarceloBritoWD/URI-online-judge-responses | Matemática/1221.py | 1221.py | py | 300 | python | pt | code | 2 | github-code | 36 |
36515561991 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 10:11:41 2019
@author: Mohammed
"""
from sklearn import datasets
from sklearn import metrics
from sklearn import linear_model
from sklearn import svm
from sklearn import model_selection
import matplotlib.pyplot as plt
def main():
digits = datasets.load_digits()
print(digits.DESCR)
print()
plt.imshow(digits.data[0,:].reshape(8,8))
kf = model_selection.KFold(n_splits=2, shuffle=True)
for train_index,test_index in kf.split(digits.data):
clf1 = linear_model.Perceptron()
clf2 = svm.SVC(kernel="rbf", gamma=1e-3)
clf3 = svm.SVC(kernel="sigmoid", gamma=1e-4)
clf1.fit(digits.data[train_index], digits.target[train_index ])
prediction1 = clf1.predict(digits.data[test_index])
clf2.fit(digits.data[train_index], digits.target[train_index])
prediction2 = clf2.predict(digits.data[test_index])
clf3.fit(digits.data[train_index], digits.target[train_index])
prediction3 = clf3.predict(digits.data[test_index])
score1 = metrics.accuracy_score(digits.target[test_index], prediction1)
score2 = metrics.accuracy_score(digits.target[test_index], prediction2)
score3 = metrics.accuracy_score(digits.target[test_index], prediction3)
print("Perceptron accuracy score: ", score1)
print("SVM with RBF kernel accuracy score: ", score2)
print("SVM with Sigmoid kernel accuracy score: ", score3)
print()
main()
| mjachowdhury/MachineLearning-4thYear-CIT | Lab6/lab6.py | lab6.py | py | 1,564 | python | en | code | 0 | github-code | 36 |
7989155548 | import numpy as np
import cv2
import time
import math
from visual import *
import visual as vs # for 3D panel
import wx # for widgets
capture = cv2.VideoCapture(1)
def nothing(x):
pass
####### TRACKBAR #########
#cv2.namedWindow('bar')
#cv2.createTrackbar('R','bar',0,255,nothing)
#cv2.createTrackbar('G','bar',0,255,nothing)
#cv2.createTrackbar('B','bar',0,255,nothing)
#cv2.createTrackbar('R1','bar',0,255,nothing)
#cv2.createTrackbar('G1','bar',0,255,nothing)
#cv2.createTrackbar('B1','bar',0,255,nothing)
def rescale_frame(capturing, wpercent=50, hpercent=50):
width = int(capturing.shape[1] * wpercent / 100)
height = int(capturing.shape[0] * hpercent / 100)
return cv2.resize(capturing, (width, height), interpolation=cv2.INTER_AREA)
def roi_seg(img,hsv):
#r = cv2.getTrackbarPos('R','bar')
#g = cv2.getTrackbarPos('G','bar')
#b = cv2.getTrackbarPos('B','bar')
#r1 = cv2.getTrackbarPos('R1','bar')
#g1 = cv2.getTrackbarPos('G1','bar')
#b1 = cv2.getTrackbarPos('B1','bar')
r = 247
g = 145
b = 99
r1 = 255
g1 = 255
b1 = 131
low_limit = np.array([b,g,r]) # color (99,145,247)
upper_limit = np.array([b1,g1,r1]) # color (131,255,255)
# filtro anti-ruido
mask2 = cv2.inRange(hsv,low_limit,upper_limit)
res = cv2.bitwise_and(img,img,mask=mask2)
kernel = np.ones((20,20),np.uint8) # destruindo os ruidos
res1 = cv2.morphologyEx(res,cv2.MORPH_OPEN,kernel)
#cv2.imshow('Segmentando_cor',res1)
return res1
def filtragem(frame):
blurred = cv2.GaussianBlur(frame,(11,11),0)
errosion = cv2.erode(blurred,(11,11),1)
#cv2.imshow('filter',errosion)
hsv = cv2.cvtColor(errosion,cv2.COLOR_BGR2HSV)
roi = roi_seg(frame,hsv)
return roi
def contorno(white_img,frame):
ret1,thr = cv2.threshold(white_img, 127, 255, cv2.THRESH_BINARY)
#cv2.imshow('thr',thr) use issoo aki <----------------
canny = cv2.Canny(white_img, 50, 255)
#cv2.imshow('canny',canny)
# depois tente aplicar contorno no canny
#ret1,thr = cv2.threshold(white_img, 127, 255, cv2.THRESH_BINARY)
result = cv2.findContours(canny,cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
cont,hierarchy = result if len(result) == 2 else result[1:3]
#cv2.imshow('Canny',canny)
if len(cont) > 0:
areas = [cv2.contourArea(c) for c in cont]
max_index = np.argmax(areas)
cont_max = cont[max_index]
M = cv2.moments(cont[max_index])
area = cv2.contourArea(cont[max_index])
if (M['m00'] != 0):
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(frame,(cx,cy),8,(0,255,105),3)
return (cx,cy,area)
return (0,0,0)
ball = sphere (color = color.green, radius = 0.4)
ball.mass = 1.0
ball.pos = (0,0,0)
ball_1 = sphere (color = color.blue, radius = 0.4)
dt = 0.5
t=0.0
def axes( frame, colour, sz, posn ): # Make axes visible (of world or frame).
# Use None for world.
directions = [vs.vector(sz,0,0), vs.vector(0,sz,0), vs.vector(0,0,sz)]
texts = ["X","Y","Z"]
posn = vs.vector(posn)
for i in range (3): # EACH DIRECTION
vs.curve( frame = frame, color = colour, pos= [ posn, posn+directions[i]])
vs.label( frame = frame,color = colour, text = texts[i], pos = posn+ directions[i],
opacity = 0, box = False )
axes( None, color.white, 3, (-11,6,0))
while True:
rate(100)
_,img = capture.read()
pressed_key = cv2.waitKey(1) & 0xFF
frame = rescale_frame(img)
height,width = frame.shape[:2]
roi = filtragem(frame)
### draw contorno e pegar o centroide:
cv2.imshow('Segmentando_cor',roi)
(x1,y1,area) = contorno(roi,frame)
r = math.sqrt(area/math.pi)
#cv2.imshow('frame',frame)
# Convertendo para o Mundo virtual
t = t + dt
print(x1,y1)
ball_1.pos = (x1/100,y1/100,0)
ball_1.radius = r/100
if pressed_key == ord("z"):
break
cv2.destroyAllWindows()
capture.release()
| samuelamico/PingPongOpenCV | ball_detect.py | ball_detect.py | py | 4,564 | python | en | code | 0 | github-code | 36 |
70173864105 | import os
import re
from typing import Any, Iterable, List
from flask import Flask, request
from werkzeug.exceptions import HTTPException
app = Flask(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, "data")
class CustomBadRequest(HTTPException):
status_code = 400
description = 'Bad request'
def parse_query(file: Iterable[str], query: str) -> List[str]:
res = map(lambda v: v.strip(), file)
for q in query.split("|"):
q_split = q.split(":")
cmd = q_split[0]
if cmd == "filter":
arg = q_split[1]
res = filter(lambda v, txt=arg: txt in v, res)
if cmd == "map":
arg = int(q_split[1])
res = map(lambda v, idx=arg: v.split(" ")[idx], res)
if cmd == "unique":
res = set(res)
if cmd == "sort":
arg = q_split[1]
reverse = arg == "desc"
res = sorted(res, reverse=reverse)
if cmd == "limit":
arg = int(q_split[1])
res = list(res)[:arg]
if cmd == "regex":
arg = q_split[1]
res = filter(lambda v, pattern=arg: re.search(pattern, v), res)
return res
@app.route("/perform_query")
def perform_query() -> Any:
try:
query = request.args['query']
file_name = request.args['file_name']
except KeyError:
raise CustomBadRequest(description=f"Neded uery was not found")
file_path = os.path.join(DATA_DIR, file_name)
if not os.path.exists(file_path):
return CustomBadRequest(description=f"{file_name} was not found")
with open(file_path) as file:
res = parse_query(file, query)
data = '\n'.join(res)
return app.response_class(data, content_type="text/plain")
| IgorVolokho99/LESSON_24_HomeWork | app.py | app.py | py | 1,798 | python | en | code | 0 | github-code | 36 |
11230963388 | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# 加载数据集
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)
# 定义模型,加载预训练参数
model = models.resnet18(pretrained=True)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 10)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 训练模型
def train(model, train_loader, criterion, optimizer):
model.train()
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 测试模型
def test(model, test_loader, criterion):
model.eval()
with torch.no_grad():
total_loss = 0.0
total_corrects = 0
for inputs, labels in test_loader:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
total_loss += loss.item() * inputs.size(0)
total_corrects += torch.sum(preds == labels.data)
avg_loss = total_loss / len(test_loader.dataset)
accuracy = float(total_corrects) / len(test_loader.dataset)
return avg_loss, accuracy
# 训练和测试模型
num_epochs = 10
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
train(model, train_loader, criterion, optimizer)
test_loss, test_acc = test(model, test_loader, criterion)
print('Test loss: {:.4f}, Test accuracy: {:.4f}'.format(test_loss, test_acc))
# 保存模型
torch.save(model.state_dict(), 'cifar10_resnet18.pth') | rainy2k/deep-learning | transfer_learning.py | transfer_learning.py | py | 2,427 | python | en | code | 0 | github-code | 36 |
1429692832 | import sys
import math
def is_prime(n):
if n == 1: return False
for k in range(2, int(math.sqrt(n)) + 1):
if n % k == 0:
return False
return True
x = int(input())
if x == 2:
print(x)
sys.exit()
for i in range(x, 10**5+4):
if i % 2 == 1 and is_prime(i):
print(i)
sys.exit() | nawta/atcoder_archive | atcoder.jp/abc149/abc149_c/Main.py | Main.py | py | 338 | python | en | code | 0 | github-code | 36 |
1963001410 | from django.conf.urls import include, url
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext
from wagtail.admin.rich_text.editors.draftail import features as draftail_features
from wagtail.core import hooks
from . import urls
from .richtext import (
ContentstateSnippetLinkConversionRule, ContentstateSnippetEmbedConversionRule,
SnippetLinkHandler, SnippetEmbedHandler,
)
@hooks.register("register_rich_text_features")
def register_snippet_link_feature(features):
feature_name = "snippet-link"
type_ = "SNIPPET"
features.register_link_type(SnippetLinkHandler)
features.register_editor_plugin(
"draftail",
feature_name,
draftail_features.EntityFeature(
{"type": type_, "icon": "snippet", "description": ugettext("Snippet Link")},
js=[
"wagtailsnippets/js/snippet-chooser-modal.js",
"wagtail_draftail_snippet/js/snippet-model-chooser-modal.js",
"wagtail_draftail_snippet/js/wagtail-draftail-snippet.js",
],
),
)
features.register_converter_rule(
"contentstate", feature_name, ContentstateSnippetLinkConversionRule
)
@hooks.register("register_rich_text_features")
def register_snippet_embed_feature(features):
feature_name = "snippet-embed"
type_ = "SNIPPET-EMBED"
features.register_embed_type(SnippetEmbedHandler)
features.register_editor_plugin(
"draftail",
feature_name,
draftail_features.EntityFeature(
{"type": type_, "icon": "code", "description": ugettext("Snippet Embed")},
js=[
"wagtailsnippets/js/snippet-chooser-modal.js",
"wagtail_draftail_snippet/js/snippet-model-chooser-modal.js",
"wagtail_draftail_snippet/js/wagtail-draftail-snippet.js",
],
),
)
features.register_converter_rule(
"contentstate", feature_name, ContentstateSnippetEmbedConversionRule
)
@hooks.register("insert_editor_js")
def editor_js():
# window.chooserUrls.snippetChooser = '{0}';
return format_html(
"""
<script>
window.chooserUrls.snippetLinkModelChooser = '{0}';
window.chooserUrls.snippetEmbedModelChooser = '{1}';
</script>
""",
# reverse('wagtailsnippets:list'),
reverse("wagtaildraftailsnippet:choose-snippet-link-model"),
reverse("wagtaildraftailsnippet:choose-snippet-embed-model"),
)
@hooks.register("register_admin_urls")
def register_admin_urls():
return [url(r"^snippets/", include(urls, namespace="wagtaildraftailsnippet"))]
| cividi/wagtail-draftail-snippet | wagtail_draftail_snippet/wagtail_hooks.py | wagtail_hooks.py | py | 2,730 | python | en | code | null | github-code | 36 |
27193939959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import gzip
import logging
import argparse
from collections import OrderedDict
LOG = logging.getLogger(__name__)
__version__ = "1.0.1"
__author__ = ("Xingguo Zhang",)
__email__ = "invicoun@foxmail.com"
__all__ = []
def read_tsv(file, sep="\t"):
if file.endswith(".gz"):
fh = gzip.open(file)
else:
fh = open(file)
for line in fh:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.strip()
if not line or line.startswith("#"):
continue
yield line.split(sep)
fh.close()
def split_tax(tax):
r = OrderedDict()
for i in tax.split("|"):
level, value = i.split("__", 1)
if (level == "k") and (level in r):
continue
r[level] = value
return r
def stat_mpa_tax(file):
data = {}
for taxs, reads in read_tsv(file, sep="\t"):
taxs = split_tax(taxs)
index = list(taxs)
level = index[-1]
if level not in data:
data[level] = 0
data[level] += int(reads)
if level != "s":
continue
if "." not in taxs[level]:
continue
level = "sub"
if level not in data:
data[level] = 0
data[level] += int(reads)
print("#Kingdom\tPhylum\tClass\tOrder\tFamil\tGenus\tSpecies\tSub Species")
temp = []
for i in ["k", "p", "c", "o", "f", "g", "s", "sub"]:
reads = 0
if i in data:
reads = data[i]
temp.append(format(reads, ","))
print("\t".join(temp))
return 0
def add_hlep_args(parser):
parser.add_argument("input", metavar="FILE", type=str,
help="Input the abundance statistics result file of each sample, kreport2mpa.report")
return parser
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
For exmple:
stat_mpa_tax.py kreport2mpa.report >stat_tax.tsv
version: %s
contact: %s <%s>\
''' % (__version__, " ".join(__author__), __email__))
args = add_hlep_args(parser).parse_args()
stat_mpa_tax(args.input)
if __name__ == "__main__":
main()
| zxgsy520/metavirus | scripts/stat_mpa_tax.py | stat_mpa_tax.py | py | 2,401 | python | en | code | 1 | github-code | 36 |
71054390185 | import data_pipeline as dp
import glob
import numpy as np
import pandas as pd
import os
import shutil
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
## Global Parameters
IMG_WIDTH=300
IMG_HEIGHT=300
IMG_DIM = (IMG_WIDTH, IMG_HEIGHT)
def process_img(img):
img = conv_img(img)
img = scale_img(img)
return img
def conv_img(img):
return img_to_array(load_img(img, target_size=IMG_DIM))
def scale_img(img):
img_scaled = img.astype("float32")
img_scaled /= 255
return img_scaled
def load_files(classes):
class_string = '_'.join(classes)
files = dp.build_dataset(class_string)
X_train = []
y_train = []
X_val = []
y_val = []
for c in classes:
train_files = files[c]["train"]
val_files = files[c]["val"]
train_imgs = [conv_img(img) for img in train_files]
val_imgs = [conv_img(img) for img in val_files]
i = 0
while i < len(train_imgs):
X_train.append(train_imgs[i])
y_train.append(c)
i = i+1
i = 0
while i < len(val_imgs):
X_val.append(val_imgs[i])
y_val.append(c)
i = i+1
X_train = np.array(X_train)
X_val = np.array(X_val)
# visualize a sample image
array_to_img(train_imgs[0])
return X_train, y_train, X_val, y_val
def scale_imgs(X):
imgs_scaled = X.astype("float32")
imgs_scaled /= 255
return imgs_scaled
def encode_labels(y_train, y_val):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_val_enc = le.transform(y_val)
print(y_train[0:5], y_train_enc[0:5])
# y_train_enc = np.asarray(y_train_enc).astype('float32').reshape((-1,1))
# y_val_enc = np.asarray(y_val_enc).astype('float32').reshape((-1,1))
print(y_train_enc.shape)
return y_train_enc, y_val_enc, le
def gen_augmented_data(X_train, y_train, X_val, y_val):
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode="nearest")
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(X_train, y_train,batch_size=30)
val_generator = val_datagen.flow(X_val, y_val, batch_size=30)
return train_generator, val_generator
def test_datagen(datagen, X,y):
generator = datagen.flow(X, y, batch_size=1)
img = [next(generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print("Labels:", [item[1][0] for item in img])
l = [ax[i].imshow(img[i][0][0]) for i in range(0,5)]
def load_generators(classes):
X_train, y_train, X_val, y_val = load_files(classes)
X_train = scale_imgs(X_train)
X_val = scale_imgs(X_val)
y_train, y_val, _ = encode_labels(y_train, y_val)
train_gen, val_gen = gen_augmented_data(X_train, y_train, X_val, y_val)
return train_gen, val_gen
def get_test(classes):
class_string = '_'.join(classes)
files = dp.build_dataset(class_string)
_, y_train, _, y_val = load_files(classes)
X_test = []
y_test = []
for c in classes:
test = files[c]["test"]
test_imgs = [conv_img(img) for img in test]
i = 0
while i < len(test_imgs):
X_test.append(test_imgs[i])
y_test.append(c)
i = i+1
_, _, le = encode_labels(y_train, y_val)
X_test = np.array(X_test)
# visualize a sample image
array_to_img(test_imgs[0])
return X_test, y_test, le
| luke-truitt/learn-together-model | data_preprocessing.py | data_preprocessing.py | py | 3,735 | python | en | code | 0 | github-code | 36 |
18699363715 | #encoding=utf-8
from __future__ import unicode_literals
import sys
sys.path.append("../")
import Terry_toolkit as tkit
# data=tkit.Json(file_path="/mnt/data/dev/tdata/知识提取/chinese/test.json").auto_load()
# for it in data:
# print(it)
import json
# json.load()函数的使用,将读取json信息
file = open('/mnt/data/dev/tdata/知识提取/chinese/train.json','r',encoding='utf-8')
info = json.load(file)
# print(info)
relation={}
relation_full={}
n=0
for it in info:
if it['relation']!="NA":
print(it['head']['word'])
print(it['relation'].split('/')[-1])
relation[it['relation'].split('/')[-1]]=0
relation_full[it['relation']]=0
print(it['tail']['word'])
# print(it)
n=n+1
print("*"*40)
print(relation)
print(relation_full)
print(n)
print(len(relation)) | napoler/Terry-toolkit | test/ttjson.py | ttjson.py | py | 851 | python | en | code | 0 | github-code | 36 |
41681015503 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 21:06:54 2020
@author: ASTRA
"""
f = open("address.txt","w")
n = 10000
n_2 = 0
n_1 = 1
current = 1
for x in range(2, n+1):
current = n_2 + n_1
n_2 = n_1
n_1 = current
print(str(id(current)),file=f) | sysu18364125/os-assignment2 | trace.py | trace.py | py | 275 | python | en | code | 0 | github-code | 36 |
14919631267 | #!/usr/bin/env python
# This file is part of fdsgeogen.
#
# fdsgeogen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fdsgeogen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fdsgeogen. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess as sp
import os.path
import argparse
# check for command line options
parser = argparse.ArgumentParser()
parser.add_argument("--force",
help="submit FDS job even if job was already submitted", action="store_true")
parser.add_argument("--number_of_jobs",
help="maximal number of jobs in queue (default: 5)", default=5, type=int)
parser.add_argument("--status",
help="shows status of jobs (no action / submitted / running / finished)", action="store_true")
cmdl_args = parser.parse_args()
fn_subdirlist = 'fgg.subdirlist'
submit_cmd = 'sbatch'
subdirs = []
inputs = []
chids = []
# read in all sub directories, FDS input files, and CHIDs
if not os.path.isfile(fn_subdirlist):
print(" -- file %s could not be opened -> EXIT"%fn_subdirlist)
print()
sys.exit(1)
subdirs_file = open(fn_subdirlist, 'r')
for line in subdirs_file:
if line[0] == '#': continue
lc = line.rstrip().split(';')
subdirs.append(lc[0])
inputs.append(lc[1])
chids.append(lc[2])
subdirs_file.close()
print("processing subdirectories: ")
if cmdl_args.status:
cnt_finished = 0
cnt_running = 0
cnt_queued = 0
cnt_noaction = 0
for cd_ind in range(len(subdirs)):
subdir = subdirs[cd_ind]
chid = chids[cd_ind]
inputfile = inputs[cd_ind]
if os.path.isfile(os.path.join(subdir, "fgg.jureca.finished")):
print(subdir + ": simulation finished")
cnt_finished +=1
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.running")):
print(subdir + ": simulation running")
cnt_running += 1
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.submitted")):
print(subdir + ": simulation queued")
cnt_queued += 1
continue
print(subdir + ": no action so far")
cnt_noaction += 1
print("SUMMARY")
print("finished: ", cnt_finished)
print("running : ", cnt_running)
print("queued : ", cnt_queued)
print("noaction: ", cnt_noaction)
else:
submitted_number = 0
for cd_ind in range(len(subdirs)):
subdir = subdirs[cd_ind]
chid = chids[cd_ind]
inputfile = inputs[cd_ind]
print(" -", subdir)
if os.path.isfile(os.path.join(subdir, "fgg.jureca.finished")):
print(" ... skipping, is already finished")
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.submitted")) and not cmdl_args.force:
print(" ... was already submitted")
else:
stdoutf = open(os.path.join(subdir, 'fgg.jureca.stdout'), 'w')
sp.Popen([submit_cmd, 'fgg.jureca.job'], stdout=stdoutf, stderr=sp.STDOUT, cwd=subdir).communicate()
stdoutf.close()
sf = open(os.path.join(subdir, 'fgg.jureca.submitted'), 'w')
sf.close()
print(" ... submitted to job queue")
submitted_number += 1
if submitted_number >= cmdl_args.number_of_jobs:
print(" maximal number of submitted jobs reached, stopping ")
break
| FireDynamics/fdsgeogen | scripts/fgg_run_jureca.py | fgg_run_jureca.py | py | 3,900 | python | en | code | 11 | github-code | 36 |
35386786784 | #!/usr/bin/python3
#import TAlight dove collocare le funzioni scritte una volta per tutte a bagaglio comune dei problemi.
import sys
import yaml
import argparse
from colorama import init
init()
#from termcolor import cprint
parser = argparse.ArgumentParser(description="evaluate one single submission file (the stream received from stdin as default) for one specific (goal,subtask) pair", epilog="Enjoy the program! :)", fromfile_prefix_chars='@')
parser.add_argument("goal", metavar='goal', type=int, choices=[1, 2],
help="goal=1,2 per specificare il goal per il quale la submission è intesa")
parser.add_argument("subtask", metavar='subtask', type=int, choices=[1, 2, 3],
help="subtask=1,2,3 per specificare il subtask del goal per il quale la submission è intesa")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--quiet", action="store_true",
help="impone che non venga scritto nè su stdout nè su stderr")
group.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2], default=2, metavar='LEVEL',
help="to set a partial feedback-level")
parser.add_argument("-l", "--local", action="store_true",
help="local versus on server")
parser.add_argument("-t", "--test", type=str, default=None, metavar='outcome_code',
help="when testing, to assert the outcome code that the submitted instance should deliver")
args = parser.parse_args()
def internal_error(error_nickname, message): # this function should go in a problem-independent library
#cprint(f"({error_nickname}) Internal error (never fault of the problem solver, detectable with local testing):", 'red', 'on_cyan', attrs=['bold'])
#cprint(message, 'on_cyan')
print(f"({error_nickname}) Internal error (never fault of the problem solver, detectable with local testing):")
print(message)
sys.exit(2)
def format_error(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
"""Format error. This is fault of the problem solver (on a side that often it is not relevant at all, only a lack of care in the programming issue). Either it must be served here in the checkers writing parsing code (takes a lot of time and load on the problem maker. Also: it makes lenghtly adapting old problems), or format defined and assessed via yaml file). Most of the times (all CMS-like problems) we can obtain, for each goal, a template solution which manages input/output and only calls a function out from a simple yaml file + small script in a minimal language defined by us"""
#cprint(f"({feedback_nickname}) Format error.", 'red', 'on_cyan', attrs=['bold'], end=" ")
#cprint(f"You should review the format of the file you have submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}]. (You can do it in local, also taking profit of the format checking script made available to you.\n", 'on_cyan')
print(f"({feedback_nickname}) Format error.", end=" ")
print(f"You should review the format of the file you have submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}]. (You can do it in local, also taking profit of the format checking script made available to you.\n")
if message != None:
#cprint("More precisely, pay attention to this:", 'on_cyan')
print("More precisely, pay attention to this:")
print(message)
sys.exit(0)
def solution_error(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
"""True feedback on the problem. There are errors in the solution submitted. This is fault of the problem solver."""
#cprint(f"({feedback_nickname}) Error found in the solution you submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}].\n", 'red', 'on_cyan', attrs=['bold'])
print(f"({feedback_nickname}) Error found in the solution you submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}].\n")
if message != None:
#cprint("More precisely, pay attention to this:", 'on_cyan')
print("More precisely, pay attention to this:")
print(message)
sys.exit(0)
def solution_OK(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
#cprint(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is a feasible one.", 'green', attrs=['bold'])
print(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is a feasible one.")
if message != None:
print(message)
sys.exit(0)
def solution_perfect(feedback_nickname, goal, subtask, lesson = None, next_challenge = None): # this function should go in a problem-independent library
#cprint(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is perfect!", 'green', attrs=['bold'])
print(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is perfect!")
if lesson != None:
#cprint("What have we learned:", 'red')
print("What have we learned:")
print(lesson)
if next_challenge != None:
#cprint("What next:", 'red')
print("What next:")
print(next_challenge)
sys.exit(0)
# PROBLEM SPECIFIC PART:
codeproblem = "tiling_mxn-boards_by_1x2-boards"
M=20
N=20
with open("eval_submission.it.yaml", 'r') as stream:
try:
api = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
def is_tilable(m, n):
return 1 - (m%2)*(n%2)
def check_decision(goal, subtask):
"""
Se subtask=1 si chiede di verificare solo la prima riga di risposte (per griglie con m=1, ossia di una sola riga).
Se subtask=2 si chiede di verificare solo le prime due righe di risposte (per m=1 e m=2).
Se subtask=3 vanno verificate tutte le MxN risposte.
"""
def fstr(template):
return eval(f"f'{template}'")
global M
if subtask <= 2:
M = subtask
for i in range(1,M+1):
try:
risp_line_full = sys.stdin.readline()
risp_line = risp_line_full.rstrip()
except EOFError:
tmpstr1=api["too-few-lines"]
format_error("too-few-lines", goal, subtask, eval(f"f'{tmpstr1}'"))
if len(risp_line) != N:
if len(risp_line_full)-len(risp_line) == 1:
tmpstr1=api["wrong-line-length-single-newline-char"]
format_error("wrong-line-length-single-newline-char", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong-line-length-more-newline-chars"]
format_error("wrong-line-length-more-newline-chars", goal, subtask, eval(f"f'{tmpstr1}'"))
for j in range(1,N+1):
if risp_line[j-1] not in {"0","1"}:
tmpstr1=api["wrong-char-bool"]
format_error("wrong-char-bool", goal, subtask, eval(f"f'{tmpstr1}'"))
if int(risp_line[j-1]) != is_tilable(i, j):
if is_tilable(i, j):
tmpstr1=api["wrong0-answ"]
solution_error("wrong0-answ", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong1-answ"]
solution_error("wrong1-answ", goal, subtask, eval(f"f'{tmpstr1}'"))
if M==1:
solution_perfect("perfect1-1-challenge", goal, subtask, api["perfect1-1-lesson"], api["perfect1-1-challenge"])
elif M==2:
solution_perfect("perfect1-2-challenge", goal, subtask, api["perfect1-2-lesson"], api["perfect1-2-challenge"])
else:
solution_perfect("perfect1-3-challenge", goal, subtask, api["perfect1-3-lesson"], api["perfect1-3-challenge"])
def check_tiling(goal, subtask):
"""
Valutiamo il tiling offerto anche se esso è per una griglia più grande che come inteso dal subtask scelto.
"""
def fstr(template):
return eval(f"f'{template}'")
m, n = map(int, sys.stdin.readline().split())
if not ( (0 <= m <= 20) and (0 <= n <= 20)):
tmpstr1=api["out-of-range-m-n"]
format_error("out-of-range-m-n", goal, subtask, eval(f"f'{tmpstr1}'"))
booked = [False] * 20
for i in range(1,m+1):
try:
risp_line_full = sys.stdin.readline()
risp_line = risp_line_full.rstrip()
except EOFError:
tmpstr1=api["too-few-lines"]
format_error("too-few-lines", goal, subtask, eval(f"f'{tmpstr1}'"))
if len(risp_line) != n:
if len(risp_line_full)-len(risp_line) == 1:
tmpstr1=api["wrong-line-length-single-newline-char"]
format_error("wrong-line-length-single-newline-char", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong-line-length-more-newline-chars"]
format_error("wrong-line-length-more-newline-chars", goal, subtask, eval(f"f'{tmpstr1}'"))
for j in range(1,n+1):
if booked[j-1] and risp_line[j-1] != "S":
i = i-1
tmpstr1=api["wrong-N"]
format_error("wrong-N", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] not in {"N","S","W","E"}:
tmpstr1=api["wrong-char-card-point"]
format_error("wrong-char-card-point", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "S" and booked[j-1] == False:
tmpstr1=api["wrong-S"]
format_error("wrong-S", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "E" and (j==1 or risp_line[j-1] != "W"):
tmpstr1=api["wrong-E"]
format_error("wrong-E", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "W" and (j==n or risp_line[j+1] != "E"):
tmpstr1=api["wrong-W"]
format_error("wrong-W", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "N":
if i==m:
tmpstr1=api["wrong-N"]
format_error("wrong-N", goal, subtask, eval(f"f'{tmpstr1}'"))
booked[j-1] == True
solution_perfect("perfect2-challenge", goal, subtask)
#tmpstr1=api["goal2-task3"]
#if args.goal==2 and args.subtask > 2:
# internal_error("goal2-task3", eval(f"f'{tmpstr1}'"))
if args.goal==1:
check_decision(args.goal, args.subtask)
else:
check_tiling(args.goal, args.subtask)
| romeorizzi/TALight | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/eval_submission.py | eval_submission.py | py | 10,376 | python | en | code | 11 | github-code | 36 |
31114009708 | import os
import fnmatch
def coroutine(func):
def start(*args,**kwargs):
g = func(*args,**kwargs)
g.next()
return g
return start
@coroutine
def find_files(target):
while True:
topdir,pattern = (yield)
for path,dirname,filelist in os.walk(topdir):
for name in filelist:
if fnmatch.fnmatch(name.pattern):
target.send(os.path.join(path,name))
@coroutine
def opener(target):
while True:
name = (yield)
if name.endswith(".gz"):
f = gzip.open(name)
elif name.endswith(".bz2"):
f = bz2.open(name)
else:
f = open(name)
target.send(f)
@coroutine
def cat(target):
while True:
f = (yield)
for line in f:
target.send(line)
@coroutine
def grep(pattern,target):
while True:
line = (yield)
if pattern in line:
target.send(line)
@coroutine
def printer():
while True:
line = (yield)
sys.stdout.write(line)
# just opposite to pipe using generator
finder = find_files(opener(cat(grep("python",printer()))))
finder.send(("www","access-log")) | saisai/python_tutorial | python/coroutine_stream.py | coroutine_stream.py | py | 995 | python | en | code | 0 | github-code | 36 |
2410926756 | from django.contrib import admin
from db_file_storage.form_widgets import DBAdminClearableFileInput
from django import forms
from .models import Kid, Photo, PhotoFile
admin.site.site_header = "Administración del sitio"
admin.site.site_title = admin.site.site_header
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
exclude = []
widgets = {
'image': DBAdminClearableFileInput
}
class PhotoAdmin(admin.ModelAdmin):
form = PhotoForm
admin.site.register(Kid)
admin.site.register(Photo)
| kiddybigmoments/kiddybigmoments-server | webapp/admin.py | admin.py | py | 553 | python | en | code | 0 | github-code | 36 |
74712490663 | #!/usr/bin/env python3
from aoc2021.util import print_solutions, import_strs
import timeit
Mark = "M"
def part_1(inputs):
called, boards = inputs
for target in called:
for board in boards:
if mark_board(board, target) and check_board(board):
return score_board(board) * target
return None
def score_board(board):
score = 0
for num in sum(board, []):
if num != Mark:
score += num
return score
def mark_board(board, target):
marked = False
for row in board:
for col_idx, num in enumerate(row):
if num == target:
row[col_idx] = Mark
marked = True
return marked
def check_board(board):
cols = []
for idx in range(len(board[0])):
cols.append([row[idx] for row in board])
for row in board + cols:
if not list(filter(lambda n: n != Mark, row)):
return True
return False
def read_bingo(path):
with open(path) as f:
lines = f.readlines()
first_line = lines[0].strip()
called = [int(d) for d in first_line.split(",")]
lines.pop(0)
board = []
boards = []
for line in lines:
line = line.strip()
if not line:
if board:
boards.append(board)
board = []
continue
row = [int(d) for d in line.split(" ") if d]
board.append(row)
if board:
boards.append(board)
return (called, boards)
def part_2(inputs):
called, boards = inputs
last_score = None
for target in called:
boards[:] = [b for b in boards if b]
if not boards:
break
for idx, board in enumerate(boards):
if score_board(board) == 148:
print(board)
if mark_board(board, target) and check_board(board):
last_score = score_board(board) * target
boards[idx] = None
return last_score
def main():
print_solutions(
["resources/day4-test.txt", "resources/day4.txt"],
#["resources/day4-test.txt"],
read_bingo,
part_2
)
if __name__ == "__main__":
main()
| chao-mu/aoc2021 | src/day4.py | day4.py | py | 2,206 | python | en | code | 0 | github-code | 36 |
1922678376 | from problem_000 import *
from sequences import triangle_number, triangle_number_inverse, is_triangle_number, is_pentagonal_number, is_hexagonal_number
class Problem_045(Problem):
def __init__(self):
self.problem_nr = 45
self.input_format = (InputType.NUMBER_INT, 1, None)
self.default_input = 40755
self.description_str = '''Triangle, pentagonal, and hexagonal numbers are generated by the following formulae:
Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ...
Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ...
It can be verified that T285 = P165 = H143 = ''' + dye_input_var(40755) + '''.
Find the next triangle number that is also pentagonal and hexagonal.
'''
def calculate(self, N):
n = int(triangle_number_inverse(N)) + 1
t = triangle_number(n)
while not (is_pentagonal_number(t) and is_hexagonal_number(t)):
n += 1
t = triangle_number(n)
self.last_result = t
self.last_result_details = n
def details(self):
n = self.last_result_details
return "n = " + dye_highlight(n)
register_problem(Problem_045())
| Kwasniok/ProjectEuler-Solver | src/problem_045.py | problem_045.py | py | 1,216 | python | en | code | 1 | github-code | 36 |
27863085436 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import os
driver = webdriver.Chrome()
driver.get("https://10fastfingers.com/typing-test/french")
os.system('pause')
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.ID, 'words')))
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.ID, 'row1')))
div_word_list = driver.find_element_by_id("row1")
spans_word_list = div_word_list.find_elements_by_xpath("./span")
len_spans_word_list = len(spans_word_list)
while len_spans_word_list == 0:
sleep(0.5)
div_word_list = driver.find_element_by_id("row1")
spans_word_list = div_word_list.find_elements_by_xpath("./span")
len_spans_word_list = len(spans_word_list)
word_list = []
for element in spans_word_list:
word_list.append(element.get_attribute('innerText'))
input_field = driver.find_element_by_id('inputfield')
for word in word_list :
input_field.send_keys(str(word))
input_field.send_keys(Keys.SPACE)
sleep(0.27)
| mmangon/10fastfingers | main.py | main.py | py | 1,196 | python | en | code | 0 | github-code | 36 |
8365190714 | # abstract types
from .expr import Expr
# basic types
from .leafexpr import LeafExpr
from .addr import Addr
from .bytes import Bytes
from .int import Int, EnumInt
from .methodsig import MethodSignature
# properties
from .arg import Arg
from .txn import TxnType, TxnField, TxnExpr, TxnaExpr, TxnArray, TxnObject, Txn
from .gtxn import GtxnExpr, GtxnaExpr, TxnGroup, Gtxn
from .gaid import GeneratedID
from .gitxn import Gitxn, GitxnExpr, GitxnaExpr, InnerTxnGroup
from .gload import ImportScratchValue
from .global_ import Global, GlobalField
from .app import App, AppField, OnComplete, AppParam
from .asset import AssetHolding, AssetParam
from .acct import AccountParam
# inner txns
from .itxn import InnerTxnBuilder, InnerTxn, InnerTxnAction
# meta
from .array import Array
from .tmpl import Tmpl
from .nonce import Nonce
# unary ops
from .unaryexpr import (
UnaryExpr,
Btoi,
Itob,
Len,
BitLen,
Sha256,
Sha512_256,
Keccak256,
Not,
BitwiseNot,
Sqrt,
Pop,
Balance,
MinBalance,
BytesNot,
BytesSqrt,
BytesZero,
Log,
)
# binary ops
from .binaryexpr import (
BinaryExpr,
Add,
Minus,
Mul,
Div,
Mod,
Exp,
BitwiseAnd,
BitwiseOr,
BitwiseXor,
ShiftLeft,
ShiftRight,
Eq,
Neq,
Lt,
Le,
Gt,
Ge,
GetBit,
GetByte,
BytesAdd,
BytesMinus,
BytesDiv,
BytesMul,
BytesMod,
BytesAnd,
BytesOr,
BytesXor,
BytesEq,
BytesNeq,
BytesLt,
BytesLe,
BytesGt,
BytesGe,
ExtractUint16,
ExtractUint32,
ExtractUint64,
)
# ternary ops
from .ternaryexpr import Divw, Ed25519Verify, SetBit, SetByte
from .substring import Substring, Extract, Suffix
# more ops
from .naryexpr import NaryExpr, And, Or, Concat
from .widemath import WideRatio
# control flow
from .if_ import If
from .cond import Cond
from .seq import Seq
from .assert_ import Assert
from .err import Err
from .return_ import Return, Approve, Reject
from .subroutine import (
Subroutine,
SubroutineDefinition,
SubroutineDeclaration,
SubroutineCall,
SubroutineFnWrapper,
)
from .while_ import While
from .for_ import For
from .break_ import Break
from .continue_ import Continue
# misc
from .scratch import ScratchSlot, ScratchLoad, ScratchStore, ScratchStackStore
from .scratchvar import ScratchVar
from .maybe import MaybeValue
from .multi import MultiValue
__all__ = [
"Expr",
"LeafExpr",
"Addr",
"Bytes",
"Int",
"EnumInt",
"MethodSignature",
"Arg",
"TxnType",
"TxnField",
"TxnExpr",
"TxnaExpr",
"TxnArray",
"TxnObject",
"Txn",
"GtxnExpr",
"GtxnaExpr",
"TxnGroup",
"Gtxn",
"GeneratedID",
"ImportScratchValue",
"Global",
"GlobalField",
"App",
"AppField",
"OnComplete",
"AppParam",
"AssetHolding",
"AssetParam",
"AccountParam",
"InnerTxnBuilder",
"InnerTxn",
"InnerTxnAction",
"Gitxn",
"GitxnExpr",
"GitxnaExpr",
"InnerTxnGroup",
"Array",
"Tmpl",
"Nonce",
"UnaryExpr",
"Btoi",
"Itob",
"Len",
"BitLen",
"Sha256",
"Sha512_256",
"Keccak256",
"Not",
"BitwiseNot",
"Sqrt",
"Pop",
"Balance",
"MinBalance",
"BinaryExpr",
"Add",
"Minus",
"Mul",
"Div",
"Mod",
"Exp",
"Divw",
"BitwiseAnd",
"BitwiseOr",
"BitwiseXor",
"ShiftLeft",
"ShiftRight",
"Eq",
"Neq",
"Lt",
"Le",
"Gt",
"Ge",
"GetBit",
"GetByte",
"Ed25519Verify",
"Substring",
"Extract",
"Suffix",
"SetBit",
"SetByte",
"NaryExpr",
"And",
"Or",
"Concat",
"WideRatio",
"If",
"Cond",
"Seq",
"Assert",
"Err",
"Return",
"Approve",
"Reject",
"Subroutine",
"SubroutineDefinition",
"SubroutineDeclaration",
"SubroutineCall",
"SubroutineFnWrapper",
"ScratchSlot",
"ScratchLoad",
"ScratchStore",
"ScratchStackStore",
"ScratchVar",
"MaybeValue",
"MultiValue",
"BytesAdd",
"BytesMinus",
"BytesDiv",
"BytesMul",
"BytesMod",
"BytesAnd",
"BytesOr",
"BytesXor",
"BytesEq",
"BytesNeq",
"BytesLt",
"BytesLe",
"BytesGt",
"BytesGe",
"BytesNot",
"BytesSqrt",
"BytesZero",
"ExtractUint16",
"ExtractUint32",
"ExtractUint64",
"Log",
"While",
"For",
"Break",
"Continue",
]
| gconnect/voting-dapp-pyteal-react | venv/lib/python3.8/site-packages/pyteal/ast/__init__.py | __init__.py | py | 4,490 | python | en | code | 6 | github-code | 36 |
70521717223 | import datetime
import json
import os
import time
import random
import requests
from Crypto.Cipher import AES
from django.db.models import Q
from django.http import JsonResponse,HttpResponseRedirect
from django.views.decorators.cache import cache_page
from activety.models import Usercoupon
from news.views import to_dict
from shopnew.models import Topimg
from shopping.fengqiao import *
from shopping.models import *
from shopping.pay import *
#微信支付统一下单接口
url = "https://api.mch.weixin.qq.com/pay/unifiedorder"
appid = 'wx16360426dc864b7d'
mch_id = '1537642871'
trade_type = 'JSAPI'
key = '1234567890QWERTYUIOPASDFGHJKLZXC'
clientCode = 'LLYLKJSZ'
checkWord = 'STGuVhBlDznxZbvyFFSxP5fdsyH8geFq'
"""
可变参数
body = 'test' #类目
out_trade_no = '20191210' #商户订单号
total_fee = 88 #支付金额,单位分
spbill_create_ip = '14.23.150.211' #终端ip
notify_url = 'https://www.jianshu.com/p/40c7bd9388a6' #通知回调url
"""
def get_params(body,out_trade_no,total_fee,spbill_create_ip,openid,notify_url):
data_params = {
'appid':appid,
'mch_id':mch_id,
'body':body,
'out_trade_no':out_trade_no,
'total_fee':total_fee,
'spbill_create_ip':spbill_create_ip,
'trade_type':trade_type,
'notify_url':notify_url,
'nonce_str':randnum(),
'openid':openid
}
return data_params
#生成sign,并生成xml参数data_params(没有含有sign)
def get_xml_params(data_params,key):
sign = get_sign(data_params, key)
data_params['sign'] = sign
xml_params = trans_dict_to_xml(data_params)
return xml_params
#发起请求,调用微信支付接口
def pay_wx(xml_params):
response = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(response.text)
return get_dict
#查询支付状态
def query_pay(obj):
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': obj['order_num'],
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
#查询订单
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
res = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(res.text)
state = get_dict['trade_state']
if state == 'SUCCESS':
# 生成物流订单
xml = compose_addorderxml(obj)
response = addorder(xml)
try:
mailno = response['mailno']
except:
#查询物流状态
res = getstatu(obj['order_num'])
mailno = response['mailno']
#存储订单
order = ZhouBianorders()
order.order_location = obj['order_location']
order.order_phone = obj['order_phone']
order.getman = obj['getman']
order.order_num = obj['order_num']
order.order_user = obj['order_user']
order.order_start_time = obj['order_start_time']
order.order_true_pay =obj['order_true_pay']
order.goodnum =obj['goodnum']
order.type = 2
order.couponid = 0 if obj['couponid'] == '' else obj['couponid']
order.zhoubianid = obj['zhoubianid']
order.goodname = obj['goodname']
order.goodimg = obj['goodimg']
order.goodprice = obj['goodprice']
order.waybill_id = response['mailno']
order.save()
return {'status':1,'data':'SUCCESS'}
else:
return {'status':0,'data':state}
#关闭订单
def close_pay(order_num):
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': order_num,
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
#关订单
url = 'https://api.mch.weixin.qq.com/pay/closeorder'
res = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(res.text)
#关闭物流单
xml = compose_delorderxml(order_num)
fengqiaodelorder(xml)
return get_dict
def closeorder(request):
order_num = request.POST.get('order_num')
get = close_pay(order_num)
result_code = get['result_code']
if result_code == 'SUCCESS':
return JsonResponse({'status':1,'code':'SUCCESS'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
@cache_page(60*60,cache='longtime')
def goods_type(request):
types = Good_types.objects.all()
data =[]
for type in types:
obj = {}
obj['type_id'] = type.id
obj['type_name']=type.type_name
obj['type_icon']=type.type_icon
obj['color']='#6e6d6d'
data.append(obj)
return JsonResponse({'status':1,'data':data})
# def goods(request):
# goods = Goods.objects.all()
# types = Good_types.objects.all()
# typecontent = []
# for type in types:
# obj = {}
# obj['type_id'] = type.id
# obj['type_name'] = type.type_name
# obj['type_icon'] = type.type_icon
# obj['color'] = '#6e6d6d'
# typecontent.append(obj)
# data = {}
# for good in goods:
# type = good.type
# if type in data:
# goodlist = data[type]
# obj_in = {}
# obj_in['good_id'] = good.id
# obj_in['good_name'] = good.goods_name
# obj_in['goods_price'] = float(good.goods_price)
# obj_in['store_num'] = good.store_num
# obj_in['description'] = good.description
# obj_in['picture'] = good.picture
# obj_in['num'] = 0
# obj_in['type'] = type
# goodlist.append(obj_in)
# else:
# goodlist = []
# obj_in = {}
# obj_in['good_id'] = good.id
# obj_in['good_name'] = good.goods_name
# obj_in['goods_price'] = float(good.goods_price)
# obj_in['store_num'] = good.store_num
# obj_in['description'] = good.description
# obj_in['picture'] = good.picture
# obj_in['num'] = 0
# obj_in['type'] = type
# goodlist.append(obj_in)
# data[type] = goodlist
# datalist = [{'type':k,'data':v} for k,v in data.items()]
# return JsonResponse({'status': 1, 'data': datalist,'typecontent':typecontent})
#周边商品
@cache_page(60*60,cache='longtime')
def showzhoubian(request):
zhoubians = Zhoubian.objects.all()
data = []
for zhoubian in zhoubians:
obj = {}
obj['img'] = zhoubian.img
obj['name'] = zhoubian.name
obj['price'] = zhoubian.price
obj['log'] = zhoubian.log
obj['id'] = zhoubian.id
data.append(obj)
return JsonResponse({'status':1,'data':data})
#周边详情
@cache_page(60*60,cache='longtime')
def thezhoubian(request):
id = int(request.GET.get('zhoubianid'))
zhoubian = Zhoubian.objects.get(id = id)
obj = {}
obj['img'] = zhoubian.img
obj['name'] = zhoubian.name
obj['price'] = zhoubian.price
obj['log'] = zhoubian.log
obj['id'] = zhoubian.id
ll = [zhoubian.detail1,zhoubian.detail2,zhoubian.detail3,zhoubian.detail4]
obj['detail'] =[i for i in ll if i != '']
return JsonResponse({'status': 1, 'data': obj})
#15分钟后若任然是未支付,则删除订单
def delorder(order_num):
time.sleep(910)
order = ZhouBianorders.objects.filter(order_num=order_num)
if order.exists() and order[0].type == 1:
order.delete()
#提交周边订单
def post_zhoubianorder(request):
order_user = request.POST.get('userid')
order_location = request.POST.get('location')
order_phone = request.POST.get('phone')
order_couponid = request.POST.get('couponid')
order_true_pay = request.POST.get('true_pay')
getman = request.POST.get('getman')
goodnum = request.POST.get('goodnum')
zhoubianid = request.POST.get('zhoubianid')
#获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
zhoubian = Zhoubian.objects.get(id=int(zhoubianid))
order_num = randnum()
#删除优惠券
if order_couponid != '':
usercou = Usercoupon.objects.filter(Q(userid=order_user)&Q(coupon_id =int(order_couponid)))
if usercou.exists():
usercou[0].delete()
#微信统一下单接口
body = 'test' # 类目
out_trade_no = order_num # 商户订单号
total_fee = int(float(order_true_pay)*100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_user[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
# {'return_code': 'SUCCESS', 'trade_type': 'JSAPI', 'prepay_id': 'wx18102325542417b42cdbe9ef1001807600',
# 'mch_id': '1537642871', 'sign': '36DEB26F5187D2DB8ABE839373EC09F1', 'return_msg': 'OK',
# 'appid': 'wx16360426dc864b7d', 'result_code': 'SUCCESS', 'nonce_str': 'vVqn4SuQts0v18iE'}
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp']= timestamp
send_data['appId']= response_dict['appid']
send_data['signType']= 'MD5'
send_data['nonceStr']= response_dict['nonce_str'].upper()
send_data['package']= 'prepay_id='+ response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = order_num
#订单数据
obj = {}
obj['order_location'] = order_location
obj['order_phone'] = order_phone
obj['getman'] = getman
obj['order_num'] = order_num
obj['order_user'] = order_user
now = datetime.datetime.now()
end = now + datetime.timedelta(minutes=10)
obj['order_start_time'] = str(now)[:-7]
obj['order_end_time'] = str(end)[:-7]
obj['order_true_pay'] = order_true_pay
obj['goodnum'] = goodnum
obj['type'] = 1
obj['couponid'] = order_couponid
obj['zhoubianid'] = zhoubian.id
obj['goodname'] = zhoubian.name
obj['goodimg'] = zhoubian.img
obj['goodprice'] = zhoubian.price
return JsonResponse({'status': 1,'wx_data':send_data,'order_data':obj})
#获取支付结果通知
def get_wxnotice_pay(request):
# data = request.body.decode()
# data_dict = trans_xml_to_dict(data)
# if data_dict['return_code'] == 'SUCCESS':
# order_num = data_dict['out_trade_no']
# order_true_pay = data_dict['total_fee']
# order = ZhouBianorders.objects.filter(Q(order_num=order_num)&Q(order_true_pay=order_true_pay))[0]
# order.type = 2
# order.save()
return JsonResponse({'status': 1})
#待支付再次调用支付
def ready_pay(request):
order = request.POST.get('order_data')
# order = ZhouBianorders.objects.get(order_num=order_num)
# now = time.time()
# distance = now - int(order.timestamp)
# if distance > (60*60*1.9):
# 关闭订单
old_order_num = order['order_num']
data = close_pay(old_order_num)
if data['result_code'] == 'FAIL':
return JsonResponse({'status':0,'wx_data':'关闭订单失败'})
# 重新发起支付,获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
# new_order_num = randnum()
order_true_pay = order['order_true_pay']
order_user = order['order_user']
# 微信统一下单接口
body = 'test' # 类目
out_trade_no = old_order_num # 商户订单号
total_fee = int(float(order_true_pay) * 100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_user[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
# {'return_code': 'SUCCESS', 'trade_type': 'JSAPI', 'prepay_id': 'wx18102325542417b42cdbe9ef1001807600',
# 'mch_id': '1537642871', 'sign': '36DEB26F5187D2DB8ABE839373EC09F1', 'return_msg': 'OK',
# 'appid': 'wx16360426dc864b7d', 'result_code': 'SUCCESS', 'nonce_str': 'vVqn4SuQts0v18iE'}
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp'] = timestamp
send_data['appId'] = response_dict['appid']
send_data['signType'] = 'MD5'
send_data['nonceStr'] = response_dict['nonce_str'].upper()
send_data['package'] = 'prepay_id=' + response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = old_order_num
# 重新存储订单
# order.order_num = new_order_num
# order.timestamp = timestamp
# order.save()
# delorder(order_num)
return JsonResponse({'status': 1, 'wx_data': send_data, 'order_data': order})
# 退款
def refundment(request):
order_num = request.GET.get('order_num')
order = ZhouBianorders.objects.get(order_num=order_num)
if order.type == 41:
order_true_pay = int(order.order_true_pay*100)
notify_url = 'http://101.132.47.14/shop/get_wxnotice_refund/'
url = 'https://api.mch.weixin.qq.com/secapi/pay/refund'
params = {
'appid': appid,
'mch_id': mch_id,
'nonce_str': randnum(),
'out_trade_no': order_num,
'out_refund_no':order_num,
'total_fee': order_true_pay,
'refund_fee': order_true_pay,
'notify_url': notify_url,
}
xml_params = get_xml_params(params, key)
headers = {'Content-Type': 'application/xml'}
ssh_keys_path = '/home/zhou/project/xiaochengxu/cert'
weixinapiclient_cert = os.path.join(ssh_keys_path, "apiclient_cert.pem")
weixinapiclient_key = os.path.join(ssh_keys_path, "apiclient_key.pem")
res = requests.post(url, data=xml_params, headers=headers,
cert=(weixinapiclient_cert, weixinapiclient_key), verify=True)
get_dict = trans_xml_to_dict(res.text)
if get_dict['result_code'] == 'SUCCESS':
#提交退款成功
refund = Refund()
refund.order_num = order_num
refund.refund_num = order.order_true_pay
refund.save()
time.sleep(3)
return HttpResponseRedirect('/admin/shopping/zhoubianorders/')
else:
return JsonResponse({'status': 1,'code':'FAIL'})
#获取退款结果通知
def get_wxnotice_refund(request):
data = request.body.decode()
data_dict = trans_xml_to_dict(data)
if data_dict['return_code'] == 'SUCCESS':
req_info = data_dict['req_info']
md5 = hashlib.md5() # 使用MD5加密模式
md5.update(key.encode('utf-8')) # 将参数字符串传入
tokey = md5.hexdigest().lower()
code = base64.b64decode(req_info)
cipher = AES.new(tokey, AES.MODE_ECB).decrypt(code).decode()
res_data = trans_xml_to_dict(cipher,'root')
order_true_pay = float(res_data['total_fee'])/100
order_num = res_data['out_trade_no']
refund = Refund.objects.get(order_num=order_num)
if refund.refund_status ==0:
refund.refund_status = 1
refund.save()
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
order.type = 43
order.save()
return JsonResponse({'status': 1})
else:
return JsonResponse({'status': 1})
#查询支付状态
def query_pay_state(request):
order = request.POST.get('order_data')
obj = json.loads(order)
data = query_pay(obj)
return JsonResponse(data)
#获取我的订单
def myorder(request):
userid = request.POST.get('userid')
type = int(request.POST.get('type'))
if type == 2:
orders = ZhouBianorders.objects.filter(Q(order_user=userid) & Q(type=2)).order_by('-id')
elif type == 3:
orders = ZhouBianorders.objects.filter( (Q(order_user=userid)&Q(type=31))|(Q(order_user=userid)&Q(type=32)) ).order_by('-id')
else:
orders = ZhouBianorders.objects.filter( (Q(order_user=userid)&Q(type=41))|(Q(order_user=userid)&Q(type=42))|(Q(order_user=userid)&Q(type=43)) ).order_by('-id')
order_data = []
if orders.exists():
for order in orders:
obj = {}
if type == 2:
#物流查询
xml = query_xml(order.order_num)
route_list = queryorder(xml)
if 'remark' in route_list[-1]:
obj['trans']= route_list[-1]['remark']
opcode = route_list[-1]['opcode']
if opcode == '80':
receivetime = route_list[-1]['accept_time']
now = time.time()
receive = datetime.datetime.strptime(receivetime, '%Y-%m-%d %H:%M:%S').timestamp()
if now - receive > 604800:
order.type = 31
order.receivetime = receivetime
order.save()
else:
order.type = 32
order.receivetime = receivetime
order.save()
else:
obj['trans'] = '待揽件'
elif type == 3:
#判断是否过七天
if order.type == 32:
now = time.time()
receive = datetime.datetime.strptime(order.receivetime, '%Y-%m-%d %H:%M:%S').timestamp()
if now - receive > 604800:
order.type = 31
order.save()
obj['order_num'] = order.order_num
obj['order_id'] = order.id
obj['order_start_time'] = order.order_start_time
obj['order_true_pay'] = order.order_true_pay
obj['goodnum'] = order.goodnum
obj['type'] = order.type
obj['goodname'] = order.goodname
obj['goodimg'] = order.goodimg
obj['goodprice'] = order.goodprice
order_data.append(obj)
return JsonResponse({'status': 1,'order_list':order_data,'type':type})
#申请退货
def return_goods(request):
order_num = request.POST.get('order_num')
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
if order.type == 32:
order.type = 41
order.save()
return JsonResponse({'status': 1,'code':'succese'})
else:
return JsonResponse({'status': 0,'code':'该状态不支持退款'})
#取消退款
def cancel_return(request):
order_num = request.POST.get('order_num')
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
order_start_time = order.order_start_time
if order.type == 41:
now = time.time()
start = datetime.datetime.strptime(order_start_time, '%Y-%m-%d %H:%M:%S').timestamp()
if now - start > 604800:
order.type = 31
else:
order.type = 32
order.save()
return JsonResponse({'status': 1})
else:
return JsonResponse({'status': 0,'code':'该状态不支持取消退款'})
#拒绝退款
def refuse_return(request):
order_num = request.GET.get('order_num')
print('order_num',order_num)
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
if order.type == 41:
order.type = 42
order.save()
return HttpResponseRedirect('/admin/shopping/zhoubianorders/')
else:
return JsonResponse({'status': 0,'code':'该状态不支持拒绝退款'})
#现场服务
#@cache_page(60*60,cache='longtime')
def newgoods(request):
goods = Goods.objects.all()
types = Good_types.objects.all()
topimg = Topimg.objects.get(type='xianchang').img
typecontent = []
for type in types:
obj = {}
obj['type_id'] = type.id
obj['type_name'] = type.type_name
obj['type_icon'] = type.type_icon
obj['color'] = '#6e6d6d'
typecontent.append(obj)
data = {}
for good in goods:
type = good.type
if type in data:
gooddict = data[type]
obj_in = {}
id = good.id
obj_in['good_id'] = id
obj_in['good_name'] = good.goods_name
obj_in['goods_price'] = float(good.goods_price)
obj_in['store_num'] = good.store_num
obj_in['description'] = good.description
obj_in['picture'] = good.picture1
obj_in['picture_list'] = [good.picture1,good.picture2,good.picture3,good.picture4,good.picture5]
obj_in['num'] = 0
obj_in['type'] = type
gooddict[id] = obj_in
else:
gooddict = {}
obj_in = {}
id = good.id
obj_in['good_id'] = id
obj_in['good_name'] = good.goods_name
obj_in['goods_price'] = float(good.goods_price)
obj_in['store_num'] = good.store_num
obj_in['description'] = good.description
obj_in['picture'] = good.picture1
obj_in['picture_list'] = [i for i in [good.picture1,good.picture2,good.picture3,good.picture4,good.picture5] if i != '']
obj_in['num'] = 0
obj_in['type'] = type
gooddict[id] = obj_in
data[type] = gooddict
return JsonResponse({'status': 1, 'data': data,'typecontent':typecontent,'topimg':topimg})
def post_xianchangorder(request):
order_userid = request.POST.get('userid')
location_site = request.POST.get('location_site')
location_seat = request.POST.get('location_seat')
phone = request.POST.get('phone')
couponid = request.POST.get('couponid')
order_true_pay = request.POST.get('true_pay')
order_getman = request.POST.get('getman')
goodbag = request.POST.get('goodbag')
goodbag = json.loads(goodbag)
# 获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
order = Xianchangorder()
now = datetime.datetime.now()
ordernum = randnum()
order.order_num = ordernum
order.order_start_time = str(now)[:-7]
order.order_userid = order_userid
order.order_getman = order_getman
order.location_site = location_site
order.location_seat = location_seat
order.phone = phone
order.couponid = 0 if couponid == '' else int(couponid)
order.order_true_pay = order_true_pay
order.save()
for goodid in goodbag:
detail = Xianchangorder_detail()
detail.goodid = goodbag[goodid]['good_id']
detail.goodnum = goodbag[goodid]['num']
detail.goodname = goodbag[goodid]['good_name']
detail.goodprice = goodbag[goodid]['goods_price']
detail.ordernum = ordernum
detail.orderForeignKey = order
detail.save()
#删除优惠券
if couponid != '':
usercou = Usercoupon.objects.filter(Q(userid=order_userid)&Q(coupon_id =int(couponid)))
if usercou.exists():
usercou[0].delete()
#微信统一下单接口
body = 'test' # 类目
out_trade_no = ordernum # 商户订单号
total_fee = int(float(order_true_pay)*100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_userid[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp']= timestamp
send_data['appId']= response_dict['appid']
send_data['signType']= 'MD5'
send_data['nonceStr']= response_dict['nonce_str'].upper()
send_data['package']= 'prepay_id='+ response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = ordernum
return JsonResponse({'status': 1, 'wx_data': send_data})
def qureypay_forxianchang(request):
order_num = request.POST.get('order_num')
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': order_num,
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
# 查询订单
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
res = requests.post(url, data=xml_params)
get_dict = trans_xml_to_dict(res.text)
if get_dict['result_code'] == 'SUCCESS':
state = get_dict['trade_state']
if state == 'SUCCESS':
theorder = Xianchangorder.objects.get(order_num = order_num)
theorder.ispay = 1
theorder.save()
return JsonResponse({'status': 1, 'code': 'SUCCESS'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
def showorder_forxianchang(request):
userid = request.POST.get('userid')
orders = Xianchangorder.objects.filter(order_userid=userid,ispay=1).order_by('-id')
wait = []
get = []
if orders.exists:
for order in orders:
obj = to_dict(order)
ordernum = obj['order_num']
details = Xianchangorder_detail.objects.filter(ordernum=ordernum)
detail_list = []
for detail in details:
inner = {}
inner['goodname'] = detail.goodname
inner['goodnum'] = detail.goodnum
inner['goodprice'] = detail.goodprice
detail_list.append(inner)
obj['detail_list'] = detail_list
sum = 0
for the in detail_list:
sum += the['goodnum'] * the['goodprice']
obj['coupon'] = sum - obj['order_true_pay']
obj['sum'] = sum
if obj['isget'] == 0:
wait.append(obj)
else:
get.append(obj)
return JsonResponse({'waitorder':wait,'got':get})
def loactionforxianchang(request):
loca = Locationforxianchang.objects.all()
list_data = []
for i in loca:
list_data.append(i.location)
return JsonResponse({'status':1,'location':list_data})
| zhoujialefanjiayuan/liu-lian | xiaochengxu/shopping/views.py | views.py | py | 27,704 | python | en | code | 0 | github-code | 36 |
28821282358 | #!/usr/bin/env python3
import sys
import rwpy.code as code
from rwpy.errors import IniSyntaxError
def log(message: str):
print('rwcheck:' + message)
def isensured(text: str):
return text.isspace() or text == '' or text.startswith('#')
if __name__ == '__main__':
errors = []
if len(sys.argv) == 2:
ini = None
with open(sys.argv[1],'r') as file:
try:
ini = code.create_ini(file.read())
except IniSyntaxError as e:
log(str(e))
exit(-2)
if ini is None:
log('文件初始化失败')
else:
atbs = filter(lambda x: isinstance(x,code.Attribute),ini.elements)
for ele in ini.elements:
if isinstance(ele,code.Attribute):
if not atb.value.strip().startswith('@global'):
errors.append('行号:{0}|错误的ini头部属性->{1}'.format(atb.linenum,str(atb)))
else:
if not isensured(str(ele)):
errors.append('行号:{0}|ini头部非正确的元素->{1}'.format(ele.linenum,str(ele)))
read_sections = []
for sec in ini.sections:
if sec.name in map(lambda x: x.name,read_sections):
errors.append('行号:{0}|重复的段落名->{1}'.format(sec.linenum,sec.name))
read_sections.append(sec)
read_attributes = []
for ele in sec.elements:
if isinstance(ele,code.Attribute):
if ele.key in map(lambda x: x.key,read_attributes):
errors.append('行号:{0}|在段落{1}中重复的属性->{2}'.format(ele.linenum,sec.name,str(ele)))
read_attributes.append(ele)
else:
if not isensured(str(ele)):
errors.append('行号:{0}|在段落{1}非合法元素->{2}'.format(ele.linenum,sec.name,str(ele)))
else:
log('参数错误')
exit(-1)
for e in errors:
print(e) | zerodegress/rwtools | rwcheck.py | rwcheck.py | py | 2,120 | python | en | code | 2 | github-code | 36 |
34742768602 | from random import randint, choice
class Moves:
def __init__(self, nom="Missing Move",
typ=choice(["Water", "Flying", "Normal", "Fire", "Electric", "Ghost", "Poison", "Dragon", "Bug", "Ice",
"Psychic"]),
category=choice(["Physical", "Special"]),
pp=randint(1, 7) * 5,
power=randint(50, 150),
accuracy=randint(50, 100)):
self.nom = nom
self.type = typ
self.category = category
self.pp = pp
self.power = power
self.accuracy = accuracy
def __str__(self):
return self.nom
def type_advantage(self, target):
"""
* paramètres : l'attaque et la cible
* valeur retournée : le coefficient multiplicateur par rapport au type_table
"""
if target.type[1] is not None:
type_multiplicator = type_table[type_number[self.type]][type_number[target.type[0]]] * \
type_table[type_number[self.type]][type_number[target.type[1]]]
else:
type_multiplicator = type_table[type_number[self.type]][type_number[target.type[0]]]
if type_multiplicator < 1:
print("Ceci n'est pas très efficace.")
elif type_multiplicator > 1:
print("Ceci est très efficace.")
return type_multiplicator
def damage(self, attacker, target):
"""
* paramètres : l'attaque, le pokemon qui attaque et la cible
* valeur retournée : Le nombre de pv retiré à la cible
"""
if self.category == "Physical":
dmg = ((((2 * 50) / 5 + 2) * self.power * (attacker.atk / target.defense)) / 50 + 2) * self.type_advantage(
target)
elif self.category == "Special":
dmg = ((((2 * 50) / 5 + 2) * self.power * (
attacker.sp_attaque / target.sp_defense)) / 50 + 2) * self.type_advantage(target)
else:
dmg = 0
return dmg
type_number = {"Normal": 0, "Fire": 1, "Water": 2, "Plant": 3, "Electric": 4, "Ice": 5, "Fighting": 6, "Poison": 7,
"Ground": 8, "Flying": 9, "Psychic": 10, "Bug": 11, "Rock": 12, "Ghost": 13, "Dragon": 14,
"Dark": 15} # correspond aux indices de la type_table par rapport aux types
type_table = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 1, 1],
[1, 0.5, 0.5, 2, 1, 2, 1, 1, 1, 1, 1, 2, 0.5, 1, 0.5, 0.5],
[1, 2, 0.5, 0.5, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 0.5, 0.5],
[1, 0.5, 2, 0.5, 1, 1, 1, 0.5, 2, 0.5, 1, 0.5, 2, 1, 0.5, 1],
[1, 1, 2, 0.5, 0.5, 1, 1, 1, 0, 2, 1, 1, 1, 1, 0.5, 1],
[1, 1, 0.5, 2, 1, 0.5, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1],
[2, 1, 1, 1, 1, 2, 1, 0.5, 1, 0.5, 0.5, 0.5, 2, 0, 1, 2],
[1, 1, 1, 2, 1, 1, 1, 0.5, 0.5, 1, 1, 2, 0.5, 0.5, 1, 1],
[1, 2, 1, 0.5, 2, 1, 1, 2, 1, 0, 1, 0.5, 2, 1, 1, 1],
[1, 1, 1, 2, 0.5, 1, 2, 1, 1, 1, 1, 2, 0.5, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 0.5, 1, 1, 1, 1, 0],
[1, 0.5, 1, 2, 1, 1, 0.5, 2, 1, 0.5, 2, 1, 1, 0.5, 1, 2],
[1, 2, 1, 1, 1, 2, 0.5, 1, 0.5, 2, 1, 2, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 2, 1, 0.5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 2, 1, 1, 2, 1, 0.5]] # table des avantages de types
| Redrock18/pokemon | MoveClass.py | MoveClass.py | py | 3,491 | python | en | code | 1 | github-code | 36 |
28587679911 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
import pika
import json
import time
class rabbitmq(models.Model):
_name = 'res.partner'
_description = 'Processing of Contact Records'
_inherit = "res.partner"
def sequential_contacts(self):
records = self.env['res.partner'].search([])
for record in records:
self.process_contact(record)
def process_contacts(self):
connection, channel = self.get_connection()
records = self.env['res.partner'].search([])
for record in records:
rec = {'id':record.id, 'name': record.name}
# Publish it to RabbitMQ
channel.basic_publish(exchange='',
routing_key='process_contact',
body=json.dumps(rec))
# REFACTOR/MOVE TO CALLBACK METHOD
print(" [x] Sent '"+ record.name+ " '")
connection.close()
# Slow methods / processes / API / Nested Loops / Bad Code / Sad Code / Etc.
def process_contact(self,rec):
print(rec)
time.sleep(1)
def process_queue(self):
connection, channel = self.get_connection()
# PROCESS CONTACT - Anything on the queue needing to be called is moved here
def callback(ch, method, properties, body):
#print(body)
if body:
try:
rec = json.loads(body)
self.process_contact(rec)
print(" [x] Received %r" % rec)
ch.basic_ack(delivery_tag=method.delivery_tag)
except:
print("error loading json")
# Process the callback
channel.basic_qos(prefetch_count=1)
channel.basic_consume('process_contact', callback)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def get_connection(self):
credentials = pika.PlainCredentials(username='mojo', password='mojo')
connection = pika.BlockingConnection(pika.ConnectionParameters(host="168.235.109.177",port=5672,credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='process_contact', durable=True)
return connection,channel
#name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
| FirstClassComputerConsulting/odoo_insurance_app | rabbitmq/models/models.py | models.py | py | 2,625 | python | en | code | 0 | github-code | 36 |
26054870224 | import sys
import socket
header_size = 8
class MessageHeader(object):
def __init__(self):
self.type = 0
self.size = 0
def send_packet(conn, textdata):
sbytes = bytearray(map(ord, textdata))
type = 0
size = len(sbytes)
typebytes = type.to_bytes(4, byteorder="little")
sizebytes = size.to_bytes(4, byteorder="little")
print("send type:", type, " size:", size, " data:", textdata)
b = bytearray()
b.extend(typebytes)
b.extend(sizebytes)
b.extend(sbytes)
conn.send(b)
def recv_packet(conn):
data = conn.recv(8)
if not data:
print("not Data")
return None
header = MessageHeader()
header.type = int.from_bytes(data[0:4], sys.byteorder)
header.size = int.from_bytes(data[4:8], sys.byteorder)
print("recv header type:", header.type, " size:", header.size)
body = conn.recv(header.size)
print("recv body:", body)
if not body:
return None
return body
def server_program():
host = "127.0.0.1"
port = 11000
server_socket = socket.socket()
server_socket.bind((host, port))
while True:
try:
server_socket.listen(2)
conn, address = server_socket.accept()
print("Connection from:", str(address))
while True:
body = recv_packet(conn)
if body is None:
break
print("from connected user:", str(body))
send_packet(conn, "this is data from server.")
print("close connection")
except socket.error:
print(socket.error)
conn.close()
if __name__ == '__main__':
server_program() | insooneelife/PythonExamples | server_example.py | server_example.py | py | 1,705 | python | en | code | 0 | github-code | 36 |
30148735325 | from modules.symboltable import Symbol_table
import socket
import struct
import datetime
try:
import yara
except:
pass
symbol_table = Symbol_table()
class Node:
def __init__(self, value, children:list) -> None:
self.value = value
self.children:list = children
def evaluate(self):
return None,"int"
class Bin_op(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
ch0 = self.children[0].evaluate()
ch1 = self.children[1].evaluate()
if type(self.value) == str:
if ch0[1] == "string" or ch1[1] == "string":
if self.value == ".":
return str(ch0[0][0]) + str(ch1[0]), "string"
if ch0[1] == "string" and ch1[1] == "string":
if self.value == "==":
return int(ch0[0] == ch1[0]), "int"
if self.value == ">":
return int(ch0[0] > ch1[0]), "int"
if self.value == "<":
return int(ch0[0] < ch1[0]), "int"
else:
raise Exception("Error")
if ch0[1] == "int" and ch1[1] == "int":
if self.value == "+":
return ch0[0] + ch1[0], "int"
if self.value == "-":
return ch0[0] - ch1[0], "int"
if self.value == "/":
return ch0[0] // ch1[0], "int"
if self.value == "*":
return ch0[0] * ch1[0], "int"
if self.value == "&&":
return int(ch0[0] and ch1[0]), "int"
if self.value == "||":
return int(ch0[0] or ch1[0]), "int"
if self.value == "==":
return int(ch0[0] == ch1[0]), "int"
if self.value == ">":
return int(ch0[0] > ch1[0]), "int"
if self.value == "<":
return int(ch0[0] < ch1[0]), "int"
if self.value == ".":
return str(ch0[0]) + str(ch1[0]), "string"
else:
raise Exception("Error")
else:
raise Exception("Error")
class Un_op(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
if type(self.value) == str:
if self.value == "+":
return +self.children[0].evaluate()[0],"int"
if self.value == "-":
return -self.children[0].evaluate()[0],"int"
if self.value == "!":
return int(not self.children[0].evaluate()[0]),"int"
else:
raise Exception("Error")
class Int_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "int"
class Str_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "string"
class Rule_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "rule"
class No_op(Node):
def __init__(self, value=None,typedef = "int", children:list=None): #Aqui
super().__init__(value, children)
self.typedef = typedef
def evaluate(self)->Node:
return None,self.typedef
class Block(Node):
def __init__(self, value=None, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
for c in self.children:
c.evaluate()
class Print(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
print(self.children[0].evaluate()[0])
class Scanhost(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
ip, port = self.children
result = "open"
try:
sock.connect((ip.evaluate()[0], port.evaluate()[0]))
except socket.error:
result = "closed"
finally:
sock.close()
return result, "string"
class Traffic(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
def parse_ethernet_frame(packet_data):
ethernet_header = struct.unpack('!6s6sH', packet_data[:14])
destination_address = ':'.join(f'{byte:02x}' for byte in ethernet_header[0])
source_address = ':'.join(f'{byte:02x}' for byte in ethernet_header[1])
ether_type = ethernet_header[2]
return destination_address, source_address, ether_type
def parse_ip_packet(packet_data):
ip_header = struct.unpack('!BBHHHBBH4s4s', packet_data[14:34])
version_ihl = ip_header[0]
version = version_ihl >> 4
ihl = (version_ihl & 0x0F) * 4
ttl = ip_header[5]
protocol = ip_header[6]
source_ip = socket.inet_ntoa(ip_header[8])
dest_ip = socket.inet_ntoa(ip_header[9])
return version, ihl, ttl, protocol, source_ip, dest_ip
raw_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
packet_data, _ = raw_socket.recvfrom(65536)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dest_address, src_address, ether_type = parse_ethernet_frame(packet_data)
print(f"\nTimestamp: {timestamp}")
print(f"Packet Length: {len(packet_data)} bytes")
print(f"Source MAC: {src_address}")
print(f"Destination MAC: {dest_address}")
print(f"Ethernet Type: {hex(ether_type)}")
if ether_type == 0x0800: # IPv4 EtherType
version, ihl, ttl, protocol, source_ip, dest_ip = parse_ip_packet(packet_data)
print("IPv4 Header:")
print(f"Version: {version}")
print(f"IHL: {ihl} bytes")
print(f"TTL: {ttl}")
print(f"Protocol: {protocol}")
print(f"Source IP: {source_ip}")
print(f"Destination IP: {dest_ip}")
class Match(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
yara_rule = f"rule {self.children[1].value}" + "{\n\tstrings:"
strings = self.children[1].evaluate()
for string in strings:
yara_rule+= f'\n\t\t${string} = "{strings[string].evaluate()[0]}"'
yara_rule+="\n\tcondition:\n\t\tall of them\n}"
compiled_rule = yara.compile(source=yara_rule)
file_path = self.children[0].evaluate()[0]
matches = compiled_rule.match(file_path)
if matches:
return int(True),"int"
else:
return int(False),"int"
class Input(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return input(),"string"
class Identifier(Node):
def __init__(self, value = None, children:list=[]): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return symbol_table.getter(self.value)
class Assignment(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
symbol_table.setter(self.children[0].value,
self.children[1].evaluate())
class If(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
#if
if (self.children[0].evaluate()[0]):
self.children[1].evaluate()
#else
elif len(self.children) > 2:
return self.children[2].evaluate()
class While(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
condition, block = self.children
while True:
if not condition.evaluate()[0]: #CONDITION
break
block.evaluate() #BLOCK
class Foreach(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
init, final, block = self.children
init.evaluate()
final.evaluate()
start = init.children[0].evaluate()[0]
end = final.children[0].evaluate()[0]
for i in range(start,end):
block.evaluate() #BLOCK
class VarDec(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
#self.typedef = typedef
def evaluate(self)->Node:
if type(self.value) == dict:
symbol_table.create(self.children[0].value,
self.value)
else:
symbol_table.create(self.children[0].value,
self.children[1].evaluate())
| matheus-1618/GuardScript | Interpreted/modules/node.py | node.py | py | 9,480 | python | en | code | 0 | github-code | 36 |
27275984958 | """Glue all the CLIs together into one interface."""
# First Party Library
from wepy.orchestration.cli import cli as orch_cli
cli = orch_cli
# SNIPPET: I was intending to aggregate multiple command lines other
# than the orchestration, but this never materialized or was
# needed. In the future though this can be the place for that.
# @click.group()
# def cli():
# """ """
# pass
# # add in the sub-clis
# cli.add_command(orch_cli)
# # the orchestrator stuff we keep in the top-level still though
# for subgroup in orch_subgroups:
# cli.add_command(subgroup)
if __name__ == "__main__":
cli()
| ADicksonLab/wepy | src/wepy/__main__.py | __main__.py | py | 617 | python | en | code | 44 | github-code | 36 |
34338413182 | from p154 import Solution
test_cases = [
([1,3,5], 1),
([2,2,2,0,1], 0),
]
def test_findMin():
for case in test_cases:
s = Solution()
assert s.findMin(case[0]) == case[1], case
| 0x0400/LeetCode | p154_test.py | p154_test.py | py | 207 | python | en | code | 0 | github-code | 36 |
43906506457 | """
Methods for pyspectrumscale.Api that deal with filesets
"""
from typing import Union
import json
def get_fileset(
self,
filesystem: Union[str, None],
fileset: Union[str, None]=None,
allfields: Union[bool, None]=None
):
"""
@brief List all filesets or return a specific fileset from a filesystem
@param self The object
@param filesystem The filesystem name, default None, which returns all filesystems
@return The request response as a Response.requests object
"""
params = {}
if allfields is not None:
params['fields'] = ':all:'
if fileset is not None:
commandurl = "%s/filesystems/%s/filesets/%s" % (
self._baseurl,
filesystem,
fileset
)
else:
commandurl = "%s/filesystems/%s/filesets" % (
self._baseurl,
filesystem
)
return self._get(
commandurl,
params=params
)
def fileset(
self,
filesystem: str,
fileset: Union[str, None]=None,
allfields: Union[bool, None]=None,
acl: bool=False,
quota: bool=False,
owner: bool=False,
everything: bool=False
):
"""
@brief This method returns a specifc fileset from a specific filesystem as JSON with the response stripped away.
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
# everything overrides all the other arguments
if everything:
acl = True
quota = True
owner = True
allfields = True
response = None
fsresponse = self.get_fileset(
filesystem=filesystem,
fileset=fileset,
allfields=allfields
)
if fsresponse.ok:
response = fsresponse.json()['filesets']
if acl or quota or owner:
updatedfs = []
for fs in response:
if acl:
fsacl = self.acl(
filesystem=fs['filesystemName'],
path=fs['config']['path'],
allfields=allfields
)
if fsacl:
fs['config']['acl'] = fsacl
updatedfs.append(fs)
response = updatedfs
# If it's a single element list, just return the element
if len(response) == 1:
response = response[0]
return response
def filesets(
self,
filesystems: Union[str, list, None]=None,
filesets: Union[str, list, None]=None,
allfields: Union[bool, None]=None,
acl: bool=False,
quota: bool=False,
owner: bool=False,
everything: bool=False
):
"""
@brief This method returns the list of matching filesets as JSON with the response stripped away.
@param self The object
@param filesystems The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
response = []
if filesystems is None:
response = self.filesets(
filesystems=self.list_filesystems(),
filesets=filesets,
allfields=allfields
)
elif isinstance(filesystems, list):
for fs in filesystems:
fsresponse = self.filesets(
filesystems=fs,
filesets=filesets,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
else:
if isinstance(filesets, list):
for fs in filesets:
fsresponse = self.fileset(
filesystem=filesystems,
fileset=fs,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
else:
fsresponse = self.fileset(
filesystem=filesystems,
fileset=filesets,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
if isinstance(response, list):
if len(response) == 1:
response = response[0]
if not response:
response = None
return response
def list_filesets(
self,
filesystem: Union[str, None],
fileset: Union[str, None]=None
):
"""
@brief This methdo returns the list of matching filesets as JSON with the response stripped away.
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
response = self.fileset(
filesystem,
fileset
)
filesets = []
if isinstance(response, list):
for fileset in response:
filesets.append(fileset['filesetName'])
else:
filesets.append(response['filesetName'])
return filesets
## WRITABLE METHODS
## The following methods can create changes in the Spectrum Scale Filesystem
## Make sure that in all cases that they respect the dry-run flag
# Create a prepared request to create a fileset
def preppost_fileset(
self,
filesystem: type=str,
fileset: type=str,
path: type=str,
owner: type=str,
group: type=str,
permissions: type=str,
permissionchangemode: str='chmodAndUpdateAcl',
parent: type=str,
comment: type=str
):
"""
@brief Creates a pepared request to POST creation of a fileset. While this
is not a writable method in itself, if it is sent to the self._session
it will be executed. Using the self.send() method is recommended
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@param path The path
@param owner The owner
@param group The group
@param permissions The permissions
@param permissionchangemode The permissionchangemode
@param parent The parent
@param comment The comment
@return a requests.PreparedRequest object
"""
prepresponse = None
commandurl = (
"%s/filesystems/%s/filesets" % (
self._baseurl,
filesystem
)
)
data = {
'filesetName': fileset,
'path': path,
'owner': ("%s:%s" % (owner, group)),
'permissions': permissions,
'permissionChangeMode': permissionchangemode,
'inodeSpace': parent,
'comment': comment
}
prepresponse = self._preppost(
commandurl=commandurl,
data=data
)
return prepresponse
| Aethylred/pyspectrumscale | pyspectrumscale/Api/_fileset.py | _fileset.py | py | 7,753 | python | en | code | 0 | github-code | 36 |
7628041943 | from django.test import TestCase
from api import models
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from exercise.serializers import TagSerializer
TAGS_URL = reverse('exercise:tag-list')
#help funcs
# def create_user(email='user@example.com', password='testpass123'):
# """Create and return a user."""
# return get_user_model().objects.create_user(email=email, password=password)
def tag_url(tag_id):
return reverse('exercise:tag-detail', args=[tag_id])
class TagModelAPITest(TestCase):
#testing api requests for tags
def setUp(self):
# self.user = create_user()
self.client = APIClient()
def test_retrive_tags(self):
#testing retriving list of tags
models.Tag.objects.create(name='chest')
models.Tag.objects.create(name='back')
result = self.client.get(TAGS_URL)
tags = models.Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertEqual(result.data, serializer.data)
def test_tags_limited_to_user(self):
#testing list of tags
models.Tag.objects.create(name='shoulders')
tag = models.Tag.objects.create(name='shoulders')
result = self.client.get(TAGS_URL)
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertEqual(result.data[0]['name'], tag.name)
def test_updating_tag(self):
#testing updating tag
tag = models.Tag.objects.create(name='chest')
credentails = {'name': 'chest updated'}
url = tag_url(tag.id)
result = self.client.patch(url, data=credentails)
self.assertEqual(result.status_code, status.HTTP_200_OK)
tag.refresh_from_db()
self.assertEqual(tag.name, credentails['name'])
def test_deleting_tag(self):
tag = models.Tag.objects.create(name='chest')
url = tag_url(tag.id)
result = self.client.delete(url)
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(models.Tag.objects.filter().exists())
| Mgalazyn/gym_api-drf | app/tests/test_tag_api.py | test_tag_api.py | py | 2,264 | python | en | code | 0 | github-code | 36 |
73947669542 | '''
Desarrollado por: Ferney Vanegas Hernández
Misión TIC 2022
Versión : 1.0.2
Título: Reto 4
'''
import modules.rows as r
import modules.columns as c
import modules.widhts as w
import modules.longs as l
import modules.walls as wall
def main():
dim = int(input('Ingresa un número para dimensionar el tablero (cuadrado Dim x Dim). Ej:2 (Para crear un tablero de 2x2\n'))
pos = int(input('Ingresa por favor las posiciones (ó cantidad de muros) que deseas implementar(Ej: 4)\n'))
# OBTENCIÓN DE LISTAS BASE
# ============================
# Cuando paso a dim como parámetros, le resto uno por la forma en la que se generan aleatorios en las funciones
rows = r.get_rows(dim - 1, pos)
columns = c.get_columns(dim - 1, pos)
widths = w.get_widths(dim - 1, pos)
longs = l.get_longs(dim - 1, pos)
# ============================
# OBTENCIÓN DE COORDENADAS
coord_walls = wall.get_coord_walls(rows,columns, widths, longs, dim - 1)
# CONTRUCCIÓN DE MAPA
print(
f'--------------------------------------------\n'
f'+++ COORDENADAS DE CONSTRUCCIÓN +++\n'
f'--------------------------------------------\n'
f'{coord_walls}\n'
f'--------------------------------------------\n'
f'+++ MAPA +++\n'
f'--------------------------------------------\n'
)
wall.construct_walls(coord_walls, dim)
main()
| ferneyvanegas/WorldCraft-ASCII-Listas | main.py | main.py | py | 1,468 | python | es | code | 1 | github-code | 36 |
11467704901 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn.functional import pad
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class ConvBNLayer(nn.Module):
def __init__(self,in_channels,out_channels,kernel,stride=1,act='ReLU'):
super(ConvBNLayer,self).__init__()
self.act_flag = act
self.conv = nn.Conv2d(in_channels,out_channels,kernel_size=2 if stride ==(1,1) else kernel,stride=stride,padding=(kernel-1)//2,dilation=2 if stride==(1,1) else 1)
self.bn = nn.BatchNorm2d(out_channels)
self.act = nn.ReLU(True)
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
if self.act_flag != 'None':
x = self.act(x)
return x
class Shortcut(nn.Module):
def __init__(self,in_channels,out_channels,stride,is_first=False):
super(Shortcut,self).__init__()
self.use_conv = True
if in_channels!=out_channels or stride!=1 or is_first==True:
if stride==(1,1):
self.conv = ConvBNLayer(in_channels,out_channels,1,1)
else:
self.conv = ConvBNLayer(in_channels,out_channels,1,stride)
else:
self.use_conv =False
def forward(self,x):
if self.use_conv:
x = self.conv(x)
return x
class BottleneckBlock(nn.Module):
def __init__(self,in_channels,out_channels,stride):
super(BottleneckBlock,self).__init__()
self.conv0 = ConvBNLayer(in_channels,out_channels,kernel=1)
self.conv1 = ConvBNLayer(out_channels,out_channels,kernel=3,stride=stride)
self.conv2 = ConvBNLayer(out_channels,out_channels*4,kernel=1,act='None')
self.short = Shortcut(in_channels,out_channels*4,stride=stride)
self.out_channels = out_channels*4
self.relu = nn.ReLU(True)
def forward(self,x):
y = self.conv0(x)
y = self.conv1(y)
y = self.conv2(y)
y = y+self.short(x)
y = self.relu(y)
return y
class BasicBlock(nn.Module):
def __init__(self,in_channels,out_channels,stride,is_first):
super(BasicBlock,self).__init__()
self.conv0 = ConvBNLayer(in_channels,out_channels,kernel=3,stride=stride)
self.conv1 = ConvBNLayer(out_channels,out_channels,kernel=3,act='None')
self.short = Shortcut(in_channels,out_channels,stride,is_first)
self.out_chanels = out_channels
self.relu = nn.ReLU(True)
def forward(self,x):
y = self.conv0(x)
y = self.conv1(y)
y = y + self.short(x)
y = self.relu(y)
return y
class ResNet_FPN(nn.Module):
def __init__(self,in_channels=1,layers=50,**kwargs):
super(ResNet_FPN,self).__init__()
supported_layers = {
18:{
'depth':[2,2,2,2],
'block_class': BasicBlock
},
34:{
'depth':[3,4,6,3],
'block_class': BasicBlock
},
50:{
'depth':[3,4,6,3],
'block_class': BottleneckBlock
},
101:{
'depth':[3,4,23,3],
'block_class': BottleneckBlock
},
152:{
'depth':[3,8,36,3],
'block_class': BottleneckBlock
}
}
stride_list = [(2,2),(2,2,),(1,1),(1,1)]
num_filters = [64,128,256,512]
self.depth = supported_layers[layers]['depth']
self.F = []
self.conv = ConvBNLayer(in_channels=in_channels,out_channels=64,kernel=7,stride=2) #64*256 ->32*128
self.block_list = nn.ModuleList()
in_ch = 64
if layers>=50:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
self.block_list.append(BottleneckBlock(in_channels=in_ch,out_channels=num_filters[block],stride = stride_list[block] if i==0 else 1))
in_ch = num_filters[block]*4
else:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
if i==0 and block!=0:
stride = (2,1)
else:
stride = (1,1)
basic_block = BasicBlock(in_channels=in_ch,out_channels=num_filters[block],stride=stride_list[block] if i==0 else 1, is_first= block==i==0)
in_ch = basic_block.out_chanels
self.block_list.append(basic_block)
out_ch_list = [in_ch // 4 ,in_ch // 2, in_ch]
self.base_block = nn.ModuleList()
self.conv_trans = []
self.bn_block = []
for i in [-2,-3]:
in_channels = out_ch_list[i+1] + out_ch_list[i]
self.base_block.append(nn.Conv2d(in_channels,out_ch_list[i],kernel_size=1)) #进行升通道
self.base_block.append(nn.Conv2d(out_ch_list[i],out_ch_list[i],kernel_size=3,padding=1)) #进行合并
self.base_block.append(nn.Sequential(nn.BatchNorm2d(out_ch_list[i]),nn.ReLU(True)))
self.base_block.append(nn.Conv2d(out_ch_list[i],512,kernel_size=1))
self.out_channels = 512
def forward(self,x):
x = self.conv(x)
fpn_list = []
F = [ ]
for i in range(len(self.depth)):
fpn_list.append(np.sum(self.depth[:i+1]))
for i,block in enumerate(self.block_list):
x = block(x)
for number in fpn_list:
if i+1==number:
F.append(x)
base = F[-1]
j = 0
for i,block in enumerate(self.base_block):
if i%3 ==0 and i<6:
j = j+1
b,c,w,h = F[-j-1].size()
if [w,h] == list(base.size()[2:]):
base = base
else:
base = self.conv_trans[j-1](base)
base = self.bn_block[j-1](base)
base = torch.cat([base,F[-j-1]],dim=1)
base = block(base)
return base
if __name__=='__main__':
res_fpn = ResNet_FPN(3,50)
res_fpn = res_fpn.to(device)
print(res_fpn)
x = torch.randn([140,3,64,256]).to(device)
output = res_fpn(x)
| milely/SRN.Pytorch | backbone/resnet_fpn.py | resnet_fpn.py | py | 6,634 | python | en | code | 27 | github-code | 36 |
19956688482 | """
Given a list of different students' scores, write a function that returns the average of each student's top five scores. You should return the averages in ascending order of the students' id numbers.
Each entry (scores[i]) has the student's id number (scores[i][0]) and the student's score (scores[i][1]). The averages should be calculated using integer division.
Example 1:
Input: [[1,91],[1,92],[2,93],[2,97],[1,60],[2,77],[1,65],[1,87],[1,100],[2,100],[2,76]]
Output: [[1,87],[2,88]]
Explanation:
The average student `1` is `87`.
The average of student `2` is `88.6`, but with integer division is `88`.
Notes:
The score of the students is between 1 and 100.
[execution time limit] 4 seconds (py3)
[input] array.array.integer scores
[output] array.array.integer
"""
def solution(scores):
student_avg = {}
averages = []
for x in range(len(scores)):
if scores[x][0] not in student_avg:
student_avg[scores[x][0]] = []
student_avg[scores[x][0]] += [scores[x][1]]
new_dict = {x:sorted(student_avg[x], reverse=True) for x in student_avg.keys()}
for key, value in new_dict.items():
top_five = value[0:5]
avg = sum(top_five) // len(top_five)
averages.append([key, avg])
return averages | scottmm374/coding_challenges | codesignal/other_school_codesignal/time_space_complexity/average_of_top_five.py | average_of_top_five.py | py | 1,319 | python | en | code | 1 | github-code | 36 |
18913786240 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 2018/6/12 20:38
@File : run.py
@contact : mmmaaaggg@163.com
@desc :
"""
import time
import logging
from ibats_bitmex_feeder.backend.orm import init
from ibats_bitmex_feeder.feeder import start_feeder
logger = logging.getLogger()
if __name__ == "__main__":
init(True)
# while True:
supplier = start_feeder(init_symbols=True, do_fill_history=True)
try:
while supplier.is_working:
time.sleep(5)
except KeyboardInterrupt:
logger.warning('Feeder 终止...')
finally:
supplier.is_working = False
supplier.stop()
supplier.join()
logger.warning('子线程已经结束')
| IBATS/IBATS_BitMexFeeder | run.py | run.py | py | 731 | python | en | code | 5 | github-code | 36 |
12772839204 | import os
import base64
import json
import logging
from aws_kinesis_agg.deaggregator import deaggregate_records
from src.consumers.mysql_consumer import MySQLConsumer
from src.utils import get_secret
logger = logging.getLogger()
logger.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
def handle_event(event, context):
raw_records = event["Records"]
records = deaggregate_records(raw_records)
mysql = None
secret_string = None
i = 0
for record in records:
payload = json.loads(base64.b64decode(record["kinesis"]["data"]).decode()) # noqa
if secret_string is None:
try:
secret_string = get_secret(f"/maxwell/{os.environ.get('CLUSTER_NAME')}") # noqa
except Exception:
logger.warn(f"No secret found for table, ignoring. Cluster: /maxwell/{os.environ.get('CLUSTER_NAME')}")
return
if mysql is None:
mysql = MySQLConsumer(payload["database"], secret_string)
logger.info("Processing records for: {}".format(payload["database"])) # noqa
mysql.process_row(payload)
i = i + 1
logger.info("Number of records processed: {} ".format(str(i)))
mysql.close()
| troybESM/maxwell-kinesis-consumer | src/handlers/maxwell_kinesis_mysql.py | maxwell_kinesis_mysql.py | py | 1,216 | python | en | code | 0 | github-code | 36 |
32560269380 | # coding: utf-8
import sys
import os
from Public.Decorator import *
import uiautomator2 as u2
from Public.common import common
from tenacity import *
cm = common()
#获取resourceID
condition = os.path.exists(cm.mapping_gp_path)
mapping_path = (cm.mapping_vid_path,cm.mapping_gp_path)[condition]
res = cm.parse_mapping_file(mapping_path, condition)
class appBase(common):
"""app基础处理"""
def __init__(self):
self.d = u2.connect()
self.apk_path = cm.apk_rel_path
self.pkg_name = cm.pkg_name
self.d.screen_on() # 打开屏幕
self.d.unlock() # 解锁屏幕
self.d.press('home') # 回到桌面
@exception_decoration
def rate_skip(self):
action.exist_click(text='Exit')
@exception_decoration
def case_restart_check(self,text=None,resourceId=None,restart=False):
"""用例是否重起检查"""
if text is not None:
self.d(text=text).click(timeout=2)
elif resourceId is not None:
self.d(resourceId=text).click(timeout=2)
elif restart is True:
raise Exception('restart')
@exception_decoration
def vip_check(self):
"""VIP检查"""
self.d(text="ME").click(timeout=2)
if not self.d(text='You are already a VIP').exists(timeout=5): # vip标识
self.d(text='3-Day Free Trial').click(timeout=2)
self.d(text="CONTINUE").click(timeout=5)
self.d(text="订阅").click(timeout=5)
action.exist_click(text='以后再说')
action.exist_click(text='不用了')
action.exist_click(text='确定')
action.exist_click(text='Done')
self.d(text="xxx").click(timeout=5)
print('vip检查通过')
@exception_pass
def startpage_handle(self):
"""启动开屏页处理"""
self.d(resourceId="com.android.permissioncontroller:id/permission_allow_button").click(timeout=5)
self.d(text="Skip").click(timeout=2)
self.d(text="Got It").click(timeout=2)
print('启动开屏页检查通过')
@exception_pass
def clear_home_xxx(self):
"""删除首页的xxx"""
self.d(text="xxx").click(timeout=2)
test_xxx_name = self.d(resourceId=res['com.app.xxxxxx:id/tvxxxName']).get_text(timeout=10)
while test_xxx_name not in ['xxx_xxx.mp4','app_xxx.mp4','xxx_xxx.mp4']:
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click()
self.d(scrollable=True).scroll.toEnd() # 滚动到最底部
self.d(text="Delete").click(timeout=2)
self.d(text="OK").click(timeout=2)
test_xxx_name = self.d(resourceId=res['com.app.xxxxxx:id/tvxxxName']).get_text(timeout=10)
print('清理测试视频检查通过')
@exception_pass
def clear_downloaded_xxx(self):
"""删除下载管理器记录"""
while not self.d(text='Go to download').exists(timeout=2):
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click(timeout=2)
self.d(text="Delete").click(timeout=2)
self.d(text="Confirm").click(timeout=2)
print('清理下载管理器记录通过')
@exception_pass
def clear_music(self):
"""删除下音乐记录"""
self.d(text="MUSIC").click(timeout=2)
music_title = self.d(resourceId=res['com.app.xxxxxx:id/tvSongName']).get_text(timeout=2)
while 'test_music1' not in music_title:
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click(timeout=2)
self.d(scrollable=True).scroll.toEnd() # 滚动到最底部
self.d(text="Delete").click(timeout=2)
self.d(text="OK").click(timeout=2)
music_title = self.d(resourceId=res['com.app.xxxxxx:id/tvSongName']).get_text(timeout=2)
self.d(text="xxx").click(timeout=2)
print('清除音乐文件通过')
@exception_pass
def music_permission(self):
"""音乐权限处理"""
self.d(text="Ok").click(timeout=3)
self.d(text="允许").click(timeout=3)
@retry(stop=stop_after_attempt(2))
def download_xxx(self):
"""下载视频"""
#下载新的视频
self.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=2)
###### 清理下载记录
self.d(resourceId=res['com.app.xxxxxx:id/ivDownload']).click()
self.clear_downloaded_xxx()
self.d.press('back')
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=2)
self.d.send_keys("https://www.ted.com/talks/armand_d_angour_the_ancient_origins_of_the_olympics/up-next")
self.d.press('enter')
self.d(resourceId=res['com.app.xxxxxx:id/button_analytics']).click(timeout=10)
time.sleep(10)
if not str(self.d(resourceId=res['com.app.xxxxxx:id/text_size']).get_text(timeout=10)).__contains__('MB'):
time.sleep(10)
self.d(text="Download").click(timeout=5)
self.d(text="view >").click(timeout=5)
if not self.d(resourceId=res['com.app.xxxxxx:id/flCover']).exists(timeout=2): raise ('下载管理器没有视频')
check_text = time.strftime("%Y-%m-%d", time.localtime())
suc_text = self.d(resourceId=res['com.app.xxxxxx:id/tvDownloaded']).get_text(timeout=240)
if check_text not in suc_text:
raise ('测试视频下载超时未完成')
self.d(resourceId=res['com.app.xxxxxx:id/ivLeft']).click(timeout=2)
self.d.press('back')
self.d.press('back')
self.d(text="xxx").click(timeout=1)
print('下载视频通过')
@exception_pass
def clear_notification(self):
"""清理通知栏消息"""
self.d.open_notification()
self.d(text='全部清除').click(timeout=2)
def home_start(self,text=None,resourceId=None):
"""home键再打开app"""
self.d.press('home')
self.d.app_start(self.pkg_name)
if text is not None:
self.d(text=text).click(timeout=5)
elif resourceId is not None:
self.d(resourceId=resourceId).click(timeout=5)
def xxx_xxx_check(self,xxxlink):
"""检查xxx热剧"""
self.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=2)
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=2)
self.d.send_keys(xxxlink)
self.d.xpath('//*[@resource-id="app"]/android.view.View[1]/android.view.View[1]/android.view.View[1]/android.view.View[4]/android.view.View[1]').click(timeout=20)
self.d(scrollable=True).scroll.toEnd()
self.d.click(0.596, 0.808)
self.d(resourceId=res['com.app.xxxxxx:id/iv_close']).click(timeout=20)
self.d.click(0.431, 0.232) # 高亮屏幕
playtime_pre = self.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
time.sleep(10)
self.d.click(0.431, 0.232) # 高亮屏幕
playtime_next = self.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
if playtime_pre == playtime_next: raise Exception('播放时间没有跑动')
self.d.screenshot()
class VdieoPlay(appBase):
d = u2.connect()
@classmethod
@exception_pass
def play_xxx_skip(cls):
"""处理播放引导"""
cls.d(resourceId=res['com.app.xxxxxx:id/svgOrientation']).click(timeout=5)
cls.d(text="skip").click(timeout=2) # 跳过播放引导
cls.d(text="Skip").click(timeout=2) # 跳过播放引导
@classmethod
def xxx_error_feedback(cls):
"""视频异常反馈"""
if cls.d(text='Error!').exists(timeout=10):
cls.d.screenshot()
cls.d(text='Feedback').click(timeout=2)
cls.d(text='Submit').click(timeout=2)
raise Exception('视频异常')
@classmethod
def xxx_play_time_check(cls):
"""视频播放检查"""
###### 时间走动验证 ######
cls.d.click(0.431, 0.232) # 高亮屏幕
playtime_pre = cls.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
time.sleep(10)
cls.d.click(0.431, 0.232) # 高亮屏幕
playtime_next = cls.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
cls.d.screenshot()
return playtime_pre, playtime_next
class action:
d = u2.connect()
@classmethod
def exist_click(cls,text=None,resourceId=None):
"""存在操作"""
if text is not None and cls.d(text=text).exists(timeout=3):
cls.d(text=text).click()
elif resourceId is not None and cls.d(resourceId=resourceId).exists(timeout=3):
cls.d(resourceId=resourceId).click()
@classmethod
def screenshot_name(cls,name):
"""按照名称截图"""
date_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
screenshot = name + '-' + date_time + '.PNG'
# path = ReportPath().get_path() + '/' + screenshot
path = os.path.join(ReportPath().get_path(), screenshot)
cls.d.screenshot(path)
return screenshot
if __name__ == '__main__':
print(res['com.app.xxxxxx:id/tvGotIt'])
| taylortaurus/android-ui-runner | Public/appBase.py | appBase.py | py | 9,249 | python | en | code | 0 | github-code | 36 |
27756843887 | import socket
import json
def main():
TYPE_OF_NETWORK_ADRESS = socket.AF_INET
THE_PROTOCOL = socket.SOCK_STREAM # TCP
THE_LEVEL = socket.SOL_SOCKET
THE_OPTION = socket.SO_REUSEADDR
THE_VALUE = 1
with socket.socket(TYPE_OF_NETWORK_ADRESS, THE_PROTOCOL) as the_socket:
the_socket.setsockopt(THE_LEVEL, THE_OPTION, THE_VALUE)
the_socket.bind(("localhost", 8000))
the_socket.listen()
the_socket.settimeout(1)
while True:
try:
CLIENT_SOCKET, ADRESS = the_socket.accept()
except socket.timeout:
continue
print(f"Connected from: {ADRESS[0]}")
with CLIENT_SOCKET:
the_messages = []
while True:
try:
the_data = CLIENT_SOCKET.recv(4096)
except socket.timeout:
continue
if not the_data:
break
the_messages.append(the_data)
# Decode list-of-byte-strings to UTF8 and parse JSON data
message_bytes = b''.join(the_messages)
message_str = message_bytes.decode("utf-8")
try:
message_dict = json.loads(message_str)
except json.JSONDecodeError:
continue
print(f"The message: {message_dict['message']}")
if __name__ == "__main__":
main()
| ibrahimhalilbayat/data_engineering_diary | Sockets/tcp_server.py | tcp_server.py | py | 1,496 | python | en | code | 0 | github-code | 36 |
14983002282 | #!/usr/bin/python
# encoding=utf8
from flask import Flask, render_template, request, flash, redirect, session, abort
from tinydb import TinyDB,Query
import os
import json
import cPickle as cp
import sys
reload(sys)
sys.setdefaultencoding('utf8')
app = Flask(__name__)
app.secret_key = os.urandom(12)
usersAnnot={"admin":"pwd"}
userAdmin={}
db=""
init = 0
tI = 0
userN = ""
def loadPickle(nameF):
#a = json.load(open(nameF,"r"))
#return a["data"]
return cp.load(open(nameF,"rb"))
rand150 = loadPickle("rand5.cp")
quotes = loadPickle("quotes.cp")
@app.route('/')
def home(user = ""):
global db, init, tI, userN
complete = 0
msg = []
if user:
if not init:
db = TinyDB(user+".json")
init = 1
done = []
for i in db:
done.append(i["id"])
toDo = list(set(rand150).difference(done))
if not toDo:
complete = 1
else:
tI = toDo[0]
msg = quotes[tI]
return render_template("index.html", user = user, complete = complete, msg = msg, lenMsg = len(msg))
@app.route('/', methods=['POST'])
def doStuff():
global db,tI
tP = {}
ky = request.form.keys()
#print ky
for i in ky:
if not "other" in i:
if not request.form[i]:
tP[i] = request.form['other'+str(i)]
else:
tP[i] = request.form[i]
db.insert({"id":tI,"em":tP})
return home(user = userN)
@app.route('/login', methods=['POST'])
def do_admin_login():
global userN
userN = request.form['username']
password = request.form['password']
if userN in usersAnnot:
if usersAnnot[userN] == password:
session['logged_in'] = True
else:
userN = ""
flash("Wrong Password Entered.!")
else:
flash('User Not Authorized.!')
userN = ""
return home(user = userN)
@app.route('/login')
def something():
return home()
@app.route("/logout")
def logout():
global userN, db, init, tI
session['logged_in'] = False
if db:
db.close()
db = ""
init = 0
tI = 0
userN = ""
return home(user = "")
@app.errorhandler(404)
def page_not_found(e):
return home()
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run() | ankitvad/AnnotationSite | hello.py | hello.py | py | 2,037 | python | en | code | 0 | github-code | 36 |
29392684092 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findTarget(self, root: Optional[TreeNode], k: int) -> bool:
s = set()
def inorder(node):
if not node:
return False
left = right = None
if node.left:
left = inorder(node.left)
if k - node.val in s:
return True
s.add(node.val)
if node.right:
right = inorder(node.right)
return left or right
return inorder(root) | AnotherPianist/LeetCode | 653-two-sum-iv-input-is-a-bst/653-two-sum-iv-input-is-a-bst.py | 653-two-sum-iv-input-is-a-bst.py | py | 744 | python | en | code | 1 | github-code | 36 |
6298326622 |
import pyttsx3
import speech_recognition as sr
import PyPDF2
from gtts import gTTS
from googletrans import Translator
from playsound import playsound
import os
assistant=pyttsx3.init("sapi5") #creation object for speak
voices=assistant.getProperty('voices') #check voices
assistant.setProperty('voice', voices[0].id) # 1 for female 0 for male
assistant.setProperty('rate',170)
def speaking(audio):
assistant.say(audio) #say() method to speak
print("")
assistant.runAndWait() #to run the speech we use runAndWait() All the say() texts won’t be said unless the interpreter encounters runAndWait().
print("")
def command(): #Create command function
#recognizer
command=sr.Recognizer()
#recognize from micrphone
with sr.Microphone() as source:
print("Listening.....!")
command.pause_threshold=1 #Represents the minimum length of silence (in seconds) that will register as the end of a phrase. Can be changed
audio=command.listen(source)
try:
print("Recognizing....!")
## recognize speech using goggle
query=command.recognize_google(audio,language='en-in')
# print("You said =",query,type(query))
except Exception as Error:
return "None"
return query.lower()
def reader(query):
speaking("Tell me name of the book")
name=input("enter book name :")
Book_dir='C:\\Users\\KABIR\Desktop\\Book' #get directory
Books=os.listdir(Book_dir) #list all of the book
name=name+'.pdf'
if name in Books:
path='C:\\Users\\KABIR\Desktop\\Book\\'+name
os.startfile(path)
book=open(path,'rb')
pdfreader=PyPDF2.PdfFileReader(book)
pages=pdfreader.getNumPages()
speaking(f"Number of pages in this book are {pages}")
speaking("Enter from which page i have to start reading")
numpage=int(input("Enter start page : "))
page=pdfreader.getPage(numpage)
text=page.extractText()
speaking("Tell me which language, i have to read")
lang=command()
dict_lang={'marathi':'mr','bihari':'bh','italian':'it','korean':'ko','swedish':'sw','malayalam':'ml','latin':'la','urdu':'ur','armenian':'hy','english':'en','hindi':'hi','bengali':'bn','arabic':'ar','tamli':'ta','spanish':'es','french':'fr','chinese':'zh-cn'}
if lang in dict_lang:
transl=Translator()
language=dict_lang[lang]
textlang=transl.translate(text,dest=language).text
textm=textlang
speech=gTTS(text=textm)
try:
speech.save("book.mp3")
playsound("book.mp3")
except:
playsound("book.mp3")
else:
speaking(text)
else:
speaking("No book found in your directory")
| Kabir2099/Desktop-Assistant | Book_Reader.py | Book_Reader.py | py | 2,948 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.