seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7122973024 | _base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
data_root = 'data/Ver3/output/' # Root path of data
# Path of train annotation file
train_ann_file = 'train.json'
train_data_prefix = 'train/' # Prefix of train image path
# Path of val annotation file
val_ann_file = 'val.json'
val_data_prefix = 'val/' # Prefix of val image path
class_name = ('ebike', ) # the name of classes
num_classes = len(class_name) # the number of classes
metainfo = dict(classes=class_name, palette=[(20, 220, 60)])
train_batch_size_per_gpu = 32
max_epochs = 400 # Maximum training epochs
# ========================modified parameters======================
deepen_factor = 0.67
widen_factor = 0.75
lr_factor = 0.1
affine_scale = 0.9
loss_cls_weight = 0.3
loss_obj_weight = 0.7
mixup_prob = 0.1
load_from = "/mnt/c/Users/ShengRen/wise_transportation/mmyolo/work_dirs/MMYolo/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth"
# =======================Unmodified in most cases==================
num_det_layers = _base_.num_det_layers
img_scale = _base_.img_scale
model = dict(
backbone=dict(
frozen_stages=4,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
),
neck=dict(
deepen_factor=deepen_factor,
widen_factor=widen_factor,
),
bbox_head=dict(
head_module=dict(widen_factor=widen_factor,
num_classes=num_classes),
loss_cls=dict(loss_weight=loss_cls_weight *
(num_classes / 80 * 3 / num_det_layers)),
loss_obj=dict(loss_weight=loss_obj_weight *
((img_scale[0] / 640)**2 * 3 / num_det_layers))))
pre_transform = _base_.pre_transform
albu_train_transforms = _base_.albu_train_transforms
mosaic_affine_pipeline = [
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
pre_transform=pre_transform),
dict(
type='YOLOv5RandomAffine',
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(1 - affine_scale, 1 + affine_scale),
# img_scale is (width, height)
border=(-img_scale[0] // 2, -img_scale[1] // 2),
border_val=(114, 114, 114))
]
# enable mixup
train_pipeline = [
*pre_transform, *mosaic_affine_pipeline,
dict(
type='YOLOv5MixUp',
prob=mixup_prob,
pre_transform=[*pre_transform, *mosaic_affine_pipeline]),
dict(
type='mmdet.Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
}),
dict(type='YOLOv5HSVRandomAug'),
dict(type='mmdet.RandomFlip', prob=0.5),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
'flip_direction'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline,
data_root=data_root,
metainfo=metainfo,
ann_file=train_ann_file,
data_prefix=dict(img=train_data_prefix),),
batch_size=train_batch_size_per_gpu)
val_dataloader = dict(dataset=dict(data_root=data_root,
metainfo=metainfo,
data_prefix=dict(img=val_data_prefix),
ann_file=val_ann_file,))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + val_ann_file)
test_evaluator = val_evaluator
default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor,
max_epochs=max_epochs))
train_cfg = dict(max_epochs=max_epochs)
visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='TensorboardVisBackend')]) | CA-TT-AC/wrong-way-cycling | mmyolo/configs/custom/5m.py | 5m.py | py | 4,019 | python | en | code | 4 | github-code | 36 |
32391506851 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 13:22:19 2016
@author: Zhaoyi.Shen
"""
import numpy as np
def swobs_col(filename):
npz = np.load(filename)
swup_toa = npz['swup_toa']
swdn_toa = npz['swdn_toa']
swup_sfc = npz['swup_sfc']
swdn_sfc = npz['swdn_sfc']
return swdn_toa-swdn_sfc+swup_sfc-swup_toa
def swobs_col_clr(filename):
npz = np.load(filename)
swup_toa = npz['swup_toa_clr']
swdn_toa = npz['swdn_toa_clr']
swup_sfc = npz['swup_sfc_clr']
swdn_sfc = npz['swdn_sfc_clr']
return swdn_toa-swdn_sfc+swup_sfc-swup_toa
def lwobs_col(filename):
npz = np.load(filename)
olr = npz['olr']
lwup_sfc = npz['lwup_sfc']
lwdn_sfc = npz['lwdn_sfc']
return -lwdn_sfc+lwup_sfc-olr
def lwobs_col_clr(filename):
npz = np.load(filename)
olr = npz['olr_clr']
lwup_sfc = npz['lwup_sfc_clr']
lwdn_sfc = npz['lwdn_sfc_clr']
return -lwdn_sfc+lwup_sfc-olr
| szy21/py | lib/calc.py | calc.py | py | 955 | python | en | code | 1 | github-code | 36 |
74879223784 | import pandas as pd
import streamlit as st
import numpy as np
from common import session_manager
ssm = session_manager.st_session()
# ssm.write_session_info()
st.title("表を描画する")
# データフレームを元に表を表示する
df = pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
})
st.write("write関数")
st.write(df)
st.write("table関数")
st.table(df)
st.write("dataframe関数、writeとほとんど同じ?")
st.dataframe(df)
#
df2 = np.random.randn(10, 20)
st.dataframe(df2)
#
dataframe = pd.DataFrame(
np.random.randn(10, 20),
columns=('col %d' % i for i in range(20))) # col名を設定
# highlight_max(最大値にハイライトする)、axis=0(インデックス(列?)の対して評価する、1とすると行になる)
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.io.formats.style.Styler.highlight_max.html#pandas.io.formats.style.Styler.highlight_max
st.dataframe(dataframe.style.highlight_max(axis=1,color="red"))
#
st.table(dataframe.style.highlight_max(axis=1,color="red"))
| nishimu555/streamlit-lab | lab2/app/pages/02_write_and_table.py | 02_write_and_table.py | py | 1,107 | python | ja | code | 0 | github-code | 36 |
36307745878 | import cv2
import numpy
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
import os
import random
from tqdm import tqdm
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry, SamPredictor
import argparse
from utils.utils import *
import time
def get_embedding(img, predictor):
predictor.set_image(img)
img_emb = predictor.get_image_embedding()
return img_emb
def train(args, predictor):
data_path = args.data_path
assert os.path.exists(data_path), 'data path does not exist!'
num_image = args.k
fnames = os.listdir(os.path.join(data_path, 'images'))
# get 20 random indices from fnames
random.shuffle(fnames)
fnames = fnames[:num_image]
image_embeddings = []
labels = []
# get the image embeddings
print('Start training...')
t1 = time.time()
i = 0
for fname in tqdm(fnames):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY) # threshold the mask to 0 and 1
downsampled_mask = cv2.resize(mask, dsize=(64, 64), interpolation=cv2.INTER_NEAREST)
img_emb = get_embedding(image, predictor)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
image_embeddings.append(img_emb)
labels.append(downsampled_mask.flatten())
i += 1
if i > num_image: break
t2 = time.time()
print("Time used: {}m {}s".format((t2 - t1) // 60, (t2 - t1) % 60))
image_embeddings_cat = np.concatenate(image_embeddings)
labels = np.concatenate(labels)
# Create a linear regression model and fit it to the training data
model = LogisticRegression(max_iter=1000)
model.fit(image_embeddings_cat, labels)
return model
def test_visualize(args, model, predictor):
data_path = args.data_path
num_image = args.k
fnames = os.listdir(os.path.join(data_path, 'images'))
random.shuffle(fnames)
fnames = fnames[num_image:]
num_visualize = args.visualize_num
dice_linear = []
dice1 = []
dice2 = []
dice3 = []
i = 0
for fname in tqdm(fnames[:num_visualize]):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY)
H, W, _ = image.shape
# get the image embedding and flatten it
img_emb = get_embedding(image, predictor)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
# get the mask predicted by the linear classifier
y_pred = model.predict(img_emb)
y_pred = y_pred.reshape((64, 64))
# mask predicted by the linear classifier
mask_pred_l = cv2.resize(y_pred, (mask.shape[1], mask.shape[0]), interpolation=cv2.INTER_NEAREST)
# use distance transform to find a point inside the mask
fg_point = get_max_dist_point(mask_pred_l)
# Define the kernel for dilation
kernel = np.ones((5, 5), np.uint8)
eroded_mask = cv2.erode(mask_pred_l, kernel, iterations=3)
mask_pred_l = cv2.dilate(eroded_mask, kernel, iterations=5)
# set the image to sam
predictor.set_image(image)
# prompt the sam with the point
input_point = np.array([[fg_point[0], fg_point[1]]])
input_label = np.array([1])
masks_pred_sam_prompted1, _, _ = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=None,
multimask_output=False,
)
# prompt the sam with the bounding box
y_indices, x_indices = np.where(mask_pred_l > 0)
if np.all(mask_pred_l == 0):
bbox = np.array([0, 0, H, W])
else:
x_min, x_max = np.min(x_indices), np.max(x_indices)
y_min, y_max = np.min(y_indices), np.max(y_indices)
H, W = mask_pred_l.shape
x_min = max(0, x_min - np.random.randint(0, 20))
x_max = min(W, x_max + np.random.randint(0, 20))
y_min = max(0, y_min - np.random.randint(0, 20))
y_max = min(H, y_max + np.random.randint(0, 20))
bbox = np.array([x_min, y_min, x_max, y_max])
masks_pred_sam_prompted2, _, _ = predictor.predict(
point_coords=None,
point_labels=None,
box=bbox[None, :],
multimask_output=False,)
# prompt the sam with both the point and bounding box
masks_pred_sam_prompted3, _, _ = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=bbox[None, :],
multimask_output=False,)
dice_l = dice_coef(mask, mask_pred_l)
dice_p = dice_coef(mask, masks_pred_sam_prompted1[0])
dice_b = dice_coef(mask, masks_pred_sam_prompted2[0])
dice_i = dice_coef(mask, masks_pred_sam_prompted3[0])
dice_linear.append(dice_l)
dice1.append(dice_p)
dice2.append(dice_b)
dice3.append(dice_i)
# plot the results
fig, ax = plt.subplots(1, 5, figsize=(15, 10))
ax[0].set_title('Ground Truth')
ax[0].imshow(mask)
ax[1].set_title('Linear + e&d')
ax[1].plot(fg_point[0], fg_point[1], 'r.')
ax[1].imshow(mask_pred_l)
ax[2].set_title('Point')
ax[2].plot(fg_point[0], fg_point[1], 'r.')
ax[2].imshow(masks_pred_sam_prompted1[0])
ax[3].set_title('Box')
show_box(bbox, ax[3])
ax[3].imshow(masks_pred_sam_prompted2[0])
ax[4].set_title('Point + Box')
ax[4].plot(fg_point[0], fg_point[1], 'r.')
show_box(bbox, ax[4])
ax[4].imshow(masks_pred_sam_prompted3[0])
[axi.set_axis_off() for axi in ax.ravel()]
if os.path.exists(args.save_path) == False:
os.mkdir(args.save_path)
plt.savefig(os.path.join(args.save_path, fname.split('.')[0]+str(i)))
mdice0 = round(sum(dice_linear)/float(len(dice_linear)), 5)
mdice1 = round(sum(dice1)/float(len(dice1)), 5)
mdice2 = round(sum(dice2)/float(len(dice2)), 5)
mdice3 = round(sum(dice3)/float(len(dice3)), 5)
print('For the first {} images: '.format(num_visualize))
print('mdice(linear classifier: )', mdice0)
print('mDice(point prompts): ', mdice1)
print('mDice(bbox prompts): ', mdice2)
print('mDice(points and boxes): ', mdice3)
def test(args, predictor):
data_path = args.data_path
images = []
masks = []
fnames = os.listdir(os.path.join(data_path, 'images'))
print(f'loading images from {data_path}...')
for fname in tqdm(fnames):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY)
images.append(image)
masks.append(mask)
kf = KFold(n_splits=5, shuffle=True, random_state=42)
for train_index, text_index in kf.split(images):
train_images = [images[i] for i in train_index]
train_masks = [masks[i] for i in train_index]
test_images = [images[i] for i in text_index]
test_masks = [masks[i] for i in text_index]
# train the linear classifier
k = args.k
random_indices = random.sample(range(len(train_images)), k)
image_embeddings = []
labels = []
for idx in random_indices:
image = train_images[idx]
mask = train_masks[idx]
downsampled_mask = cv2.resize(mask, dsize=(64, 64), interpolation=cv2.INTER_NEAREST)
img_emb = get_embedding(image)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
image_embeddings.append(img_emb)
labels.append(downsampled_mask.flatten())
image_embeddings_cat = numpy.concatenate(image_embeddings)
labels = numpy.concatenate(labels)
model = LogisticRegression(max_iter=1000) # how to set parameters?? C, max_iter, verbose, solver
model.fit(image_embeddings_cat, labels)
# test
dice_linear=[]
dice1=[]
dice2=[]
dice3=[]
for idx in range(len(test_images)):
image = test_images[idx]
mask = test_masks[idx]
H, W, _ = image.shape
img_emb = get_embedding(image)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
# ger the mask predicted by the linear classifier
y_pred = model.predict(img_emb)
y_pred = y_pred.reshape((64,64))
mask_pred_l = cv2.resize(y_pred, (mask.shape[1], mask.shape[0]), interpolation=cv2.INTER_NEAREST)
# use distance transform to find a point inside the mask
fg_point = get_max_dist_point(mask_pred_l)
# Define the kernel for dilation
kernel = np.ones((5, 5), np.uint8)
eroded_mask = cv2.erode(mask_pred_l, kernel, iterations=3)
mask_pred_l = cv2.dilate(eroded_mask, kernel, iterations=5)
# set the image to sam
predictor.set_image(image)
# prompt sam with the point
input_point = np.array([[fg_point[0], fg_point[1]]])
input_label = np.array([1])
masks_pred_sam_prompted1, _, logits = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=None,
multimask_output=False,)
# prompt sam with the bbox
y_indices, x_indices = np.where(mask_pred_l > 0)
if np.all(mask_pred_l==0):
bbox = np.array([0 ,0, H, W])
else:
x_min, x_max = np.min(x_indices), np.max(x_indices)
y_min, y_max = np.min(y_indices), np.max(y_indices)
H, W = mask_pred_l.shape
x_min = max(0, x_min - np.random.randint(0, 20))
x_max = min(W, x_max + np.random.randint(0, 20))
y_min = max(0, y_min - np.random.randint(0, 20))
y_max = min(H, y_max + np.random.randint(0, 20))
bbox = np.array([x_min, y_min, x_max, y_max])
masks_pred_sam_prompted2, _, _ = predictor.predict(
point_coords=None,
point_labels=None,
box=bbox[None, :],
multimask_output=False,)
masks_pred_sam_prompted3, _, _,= predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=bbox[None, :],
multimask_output=False,)
dice_l = dice_coef(mask, mask_pred_l)
dice_p = dice_coef(mask, masks_pred_sam_prompted1[0])
dice_b = dice_coef(mask, masks_pred_sam_prompted2[0])
dice_c = dice_coef(mask, masks_pred_sam_prompted3[0])
dice_linear.append(dice_l)
dice1.append(dice_p)
dice2.append(dice_b)
dice3.append(dice_c)
mdice0 = round(sum(dice_linear)/float(len(dice_linear)), 5)
mdice1 = round(sum(dice1)/float(len(dice1)), 5)
mdice2 = round(sum(dice2)/float(len(dice2)), 5)
mdice3 = round(sum(dice3)/float(len(dice3)), 5)
print('mdice(linear classifier: )', mdice0)
print('mDice(point prompts): ', mdice1)
print('mDice(bbox prompts): ', mdice2)
print('mDice(points and boxes): ', mdice3)
print('\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:0', help='device')
parser.add_argument('--k', type=int, default=10, help='number of pics')
parser.add_argument('--data_path', type=str, default='./data/Kvasir-SEG', help='path to train data')
parser.add_argument('--model_type', type=str, default='vit_b', help='SAM model type')
parser.add_argument('--checkpoint', type=str, default='./checkpoints/sam_vit_b_01ec64.pth', help='SAM checkpoint')
parser.add_argument('--visualize', type=bool, default=True, help='visualize the results')
parser.add_argument('--save_path', type=str, default='./results', help='path to save the results')
parser.add_argument('--visualize_num', type=int, default=30, help='number of pics to visualize')
args = parser.parse_args()
# set random seed
random.seed(42)
# register the SAM model
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint).to(args.device)
global predictor
predictor = SamPredictor(sam)
print('SAM model loaded!', '\n')
if args.visualize:
model = train(args, predictor)
test_visualize(args, model, predictor)
else:
test(args, predictor)
if __name__ == '__main__':
main() | PeterYYZhang/few-shot-self-prompt-SAM | main.py | main.py | py | 13,776 | python | en | code | 44 | github-code | 36 |
30420538376 | from pyspark.sql import Window
import pyspark.sql.functions as f
from app import columns
class QueryManager:
def __init__(self, spark, trip_fare_df, trip_data_df):
self.spark = spark
self.trip_fare_df = trip_fare_df
self.trip_data_df = trip_data_df
def trips_count(self, date_column):
"""
Args:
date_column: desired date column in dataframe
Returns:
dataframe which has three columns
1. Vendor_ID
2. Day of Week
3. Count (count of trips)
"""
trip_df = self.trip_data_df.withColumn('dayofweek',
f.date_format(self.trip_data_df[date_column], 'EEEE'))
trips_by_week = (trip_df.filter(f.col(columns.vendor_id).isNotNull()).groupBy(columns.vendor_id, 'dayofweek').
count().orderBy(f.desc(columns.vendor_id), f.desc('count')).withColumn('max_trip_count',
f.max('count').over(
Window.partitionBy(
'vendor_id')))
.filter(f.col('count') == f.col('max_trip_count')).drop('max_trip_count'))
return trips_by_week
def total_revenue(self):
""" Calculates the total revenue of each vendor
Returns:
DataFrame: A DataFrame containing the total revenue for each vendor.
"""
dataframe = (self.trip_fare_df.filter(f.col(columns.vendor_id).isNotNull()).groupBy(columns.vendor_id)
.agg(f.format_number(f.sum(columns.total_amount), 2).alias('total revenue')))
return dataframe
def avg_trip_distance(self):
"""
Calculates the average trip distance for different numbers of passengers.
Returns:
DataFrame: A DataFrame containing the average trip distance for each combination of vendor
and passenger count.
"""
dataframe = (self.trip_data_df.filter(f.col(columns.passenger_count).
isNotNull()).groupBy(columns.vendor_id, columns.passenger_count).
agg(f.avg(columns.trip_distance)).orderBy(f.desc(columns.passenger_count)))
return dataframe
def simultaneous_trips(self):
"""
Calculates the maximum number of simultaneous trips that happened on the same day.
Returns:
DataFrame: A DataFrame containing the maximum number of simultaneous trips for the top 5 days.
"""
pickup_dataframe = (self.trip_data_df.filter(f.col(columns.pickup_datetime).isNotNull()).
select(f.col(columns.pickup_datetime).alias('event_time'),
f.lit(1).alias('event_count')))
dropoff_dateframe = (self.trip_data_df.filter(f.col(columns.dropoff_datetime).isNotNull()).
select(f.col(columns.dropoff_datetime).alias('event_time'),
f.lit(-1).alias('event_count')))
event_dateframe = pickup_dataframe.union(dropoff_dateframe)
dataframe = event_dateframe.withColumn('sum', f.sum('event_count').over(Window.partitionBy('event_time')
.orderBy(f.asc('event_time'))))
dataframe = dataframe.groupBy(f.date_format('event_time', 'yyyy-MM-dd').alias('day')
).agg(f.max('sum').alias('simultaneous_trips')).orderBy(
f.desc(f.col('simultaneous_trips'))).limit(5)
return dataframe
def most_expensive_trips(self):
"""
Calculates the most expensive trips for each vendor.
Returns:
DataFrame: A DataFrame containing the most expensive trips for each vendor.
"""
dataframe = (self.trip_fare_df.filter(f.col(columns.vendor_id).isNotNull())
.groupBy(columns.vendor_id).agg(f.max(columns.total_amount).
alias(columns.total_amount)))
return dataframe
def avg_amount_rate_code(self):
"""
Calculates the count of trips with a tip above the average tip amount for trips with different rate codes.
Returns:
DataFrame: A DataFrame containing the count of such trips for each rate code.
"""
dataframe = self.trip_fare_df.join(self.trip_data_df, ['medallion', 'hack_license', 'vendor_id',
'pickup_datetime'], 'inner')
average_tip_amounts = dataframe.groupBy(columns.rate_code).agg(f.avg(columns.tip_amount)
.alias('avg_tip_amount'))
joined_data = dataframe.join(average_tip_amounts, [columns.rate_code], 'inner')
dataframe = joined_data.withColumn('tip_above_avg', f.col('tip_amount') > f.col('avg_tip_amount'))
dataframe = (dataframe.groupBy(columns.rate_code).count().withColumnRenamed('count', 'trip_count').
orderBy(f.desc('trip_count')))
return dataframe
def tips_count(self):
""" Identifies the specific day of the week when each vendor tends to receive the highest amount of tips.
Returns:
DataFrame: A DataFrame containing the day of the week and the corresponding highest amount of tips received
for each vendor.
"""
window_spec = Window.partitionBy(columns.vendor_id).orderBy(f.col("total_tips").desc())
dataframe = (self.trip_fare_df.withColumn("day_of_week", f.date_format(columns.pickup_datetime, 'EEEE'))
.groupBy(columns.vendor_id, "day_of_week")
.agg(f.format_number(f.sum(columns.tip_amount), 2).alias("total_tips"))
.withColumn("rank", f.row_number().over(window_spec))
.filter(f.col("rank") == 1)
.select(columns.vendor_id, "day_of_week", "total_tips"))
return dataframe
def avg_fare_amount_payment(self):
""" Calculates the average fare amount for each payment type.
Returns:
DataFrame: A DataFrame containing the average fare amount for each payment type.
"""
dataframe = (self.trip_fare_df.groupBy(columns.payment_type)
.agg(f.format_number(f.avg(columns.fare_amount), 2).alias("average_fare_amount"))
.orderBy(f.desc("average_fare_amount")))
return dataframe
def top_vendor_drivers(self):
""" Identifies the top 10 drivers for each vendor based on average trip distance and total tip amount.
Returns:
DataFrame: A DataFrame containing the vendor ID, unique driver license, average mileage covered, total tip
amount received and the corresponding rank.
"""
joined_df = (self.trip_data_df.withColumnRenamed(columns.vendor_id, "vendor")
.join(self.trip_fare_df, [columns.hack_license, columns.pickup_datetime],
'inner'))
window_spec = Window.partitionBy("vendor").orderBy(f.desc("average mileage"), f.desc("total tip amount"))
dataframe = (joined_df.groupBy(["vendor", columns.hack_license])
.agg(f.format_number(f.avg(columns.trip_distance), 2).alias('average mileage'),
f.format_number(f.sum(columns.tip_amount), 2).alias('total tip amount'))
.withColumn("rank", f.rank().over(window_spec))
.filter(f.col("rank") <= 10))
return dataframe
def percentage_long_trips(self):
""" Calculates the percentage of trips with a duration greater than 30 minutes for each vendor.
Returns:
DataFrame: A DataFrame containing the vendor ID, total trips executed for each vendor, amount of trips whose
duration greater than 30 minutes and percentage of these trips.
"""
dataframe = (self.trip_data_df.filter(f.col(columns.vendor_id) != 'None')
.groupBy(columns.vendor_id)
.agg(f.count("*").alias("total_trips"),
f.count(f.when(f.col(columns.trip_time_in_secs) > 1800, True))
.alias("long_trips"))
.withColumn("percentage_long_trips",
f.format_number((f.col("long_trips") /
f.col("total_trips")) * 100, 2)))
return dataframe
def top_tips_in_cash(self):
""" Calculates top 5 biggest tips for each vendor if the user paid in cash.
Returns:
DataFrame: A DataFrame containing the vendor ID and top 5 largest tips paid in cash for each vendor.
"""
window_spec = Window.partitionBy(columns.vendor_id).orderBy(f.desc(columns.tip_amount))
dataframe = (self.trip_fare_df.filter(f.col(columns.payment_type) == "CSH")
.withColumn("rank", f.dense_rank().over(window_spec))
.filter(f.col("rank") <= 5).select(columns.vendor_id, columns.tip_amount, "rank"))
return dataframe
def trips_weekdays_weekend(self):
""" Calculates the number of trips occurred on weekend and weekdays for each vendor.
Returns:
DataFrame: A DataFrame containing the number of trips executed on weekdays and weekends for each vendor.
"""
weekdays = [2, 3, 4, 5, 6]
dataframe = self.trip_fare_df.withColumn("day_of_week", f.dayofweek(f.col(columns.pickup_datetime)))
dataframe = (dataframe.withColumn("day_type", f.when(f.col("day_of_week")
.isin(weekdays), "weekday").otherwise("weekend"))
.groupBy(columns.vendor_id, "day_type")
.count()
.orderBy(columns.vendor_id, "day_type"))
return dataframe
def trips_with_tip_mount_greater_than_fare_amount(self):
""" Data of trips with tips amount greater than the fare amount.
Returns:
dataframe with columns:
medallion, hack_license, vendor_id, pickup_datetime, payment_type, fare_amount, tip_amount.
"""
result_columns_names = [columns.medallion, columns.hack_license, columns.vendor_id, columns.pickup_datetime,
columns.payment_type, columns.fare_amount, columns.tip_amount]
trips_with_tip_mount_greater_than_fare_amount = (
self.trip_fare_df.filter(f.col(columns.fare_amount) < f.col(columns.tip_amount))
.select(*result_columns_names)
)
return trips_with_tip_mount_greater_than_fare_amount
def total_earnings_of_each_vendor_for_first_seven_days_of_january(self):
""" Sum of earning of each vendor for trips that started on each of the first seven days of January 2013.
Returns:
dataframe with columns:
vendor_id, date(in format yyyy-MM-dd), total_earnings.
"""
column_date = 'date'
column_total_earnings = 'total_earnings'
start_date_string = '2012-12-31 23:59:59.59'
end_date_string = '2013-01-07 23:59:59.59'
total_earnings_of_each_vendor_for_first_seven_days_of_january = (
self.trip_fare_df
.withColumn(column_date, f.date_format(self.trip_fare_df[columns.pickup_datetime], 'yyyy-MM-dd'))
.filter(f.col(column_date).between(start_date_string, end_date_string))
.orderBy(columns.vendor_id, column_date)
.groupBy(columns.vendor_id, column_date)
.agg(f.sum(columns.total_amount).alias(column_total_earnings))
)
return total_earnings_of_each_vendor_for_first_seven_days_of_january
def driver_of_each_day(self):
""" Driver who received the biggest amount of tips for each day
(tips are considered received when the trip is over).
Returns:
dataframe with columns:
date, hack_licence, vendor_id, tips_sum.
"""
column_date = 'date'
column_tips_sum = 'tips_sum'
column_max_tips_sum = 'max_tips_sum'
join_column_names = [columns.vendor_id, columns.medallion, columns.hack_license, columns.pickup_datetime]
joined_df = self.trip_fare_df.join(self.trip_data_df, join_column_names, 'inner')
drivers = (
joined_df.withColumn('date', f.date_format(joined_df[columns.dropoff_datetime], 'yyyy-MM-dd'))
.groupBy(columns.vendor_id, columns.hack_license, column_date)
.agg(f.sum(columns.tip_amount).alias(column_tips_sum))
.orderBy(column_date, f.desc(column_tips_sum))
.withColumn(column_max_tips_sum, f.max(f.col(column_tips_sum))
.over(Window.partitionBy(column_date)).alias(column_max_tips_sum))
.filter(f.col(column_max_tips_sum) == f.col(column_tips_sum))
.select(column_date, columns.hack_license, columns.vendor_id, column_tips_sum)
)
return drivers
def price_per_second_of_drive_for_each_vendor(self):
""" Average price per second of drive for each vendor.
Returns:
dataframe with columns:
vendor_id, average_fare_per_second
"""
column_sum_fare_amount = 'sum_fare_amount'
column_sum_trip_time_in_secs = 'sum_trip_time_in_secs'
column_average_fare_per_second = 'average_fare_per_second'
join_column_names = [columns.vendor_id, columns.medallion, columns.hack_license, columns.pickup_datetime]
joined_df = self.trip_fare_df.join(self.trip_data_df, join_column_names, 'inner')
price_per_second_of_drive_for_each_vendor = (
joined_df.groupBy('vendor_id')
.agg(f.sum(columns.fare_amount).alias(column_sum_fare_amount),
f.sum(columns.trip_time_in_secs).alias(column_sum_trip_time_in_secs))
.withColumn(column_average_fare_per_second,
f.col(column_sum_fare_amount) / f.col(column_sum_trip_time_in_secs))
.select(columns.vendor_id, column_average_fare_per_second)
)
return price_per_second_of_drive_for_each_vendor
def top_vendor_for_each_payment_type(self):
""" Vendor who received the biggest amount of money for each payment type.
Returns:
dataframe with columns:
payment_type, vendor_id, sum_total_amount.
"""
column_sum_total_amount = 'sum_total_amount'
column_max_for_payment_type = 'max_for_payment_type'
top_vendor_for_each_payment_type = (
self.trip_fare_df.groupBy(columns.vendor_id, columns.payment_type)
.agg(f.sum(columns.total_amount).alias(column_sum_total_amount))
.orderBy(columns.payment_type, f.desc(column_sum_total_amount))
.withColumn(column_max_for_payment_type,
f.max(f.col(column_sum_total_amount))
.over(Window.partitionBy(columns.payment_type)))
.filter(f.col(column_sum_total_amount) == f.col(column_max_for_payment_type))
.select(columns.payment_type, columns.vendor_id, column_sum_total_amount)
)
return top_vendor_for_each_payment_type
def top_five_drivers_with_greatest_sum_of_time_in_trip(self):
""" Top 5 drivers with greatest sum of time spent in trips.
Returns:
dataframe with columns:
vendor_id, hack_license, sum_trip_time_in_secs
"""
column_sum_trip_time_in_secs = 'sum_trip_time_in_secs'
top_five_drivers_with_greatest_sum_of_time_in_trip = (
self.trip_data_df.groupBy(columns.vendor_id, columns.hack_license)
.agg(f.sum(f.col(columns.trip_time_in_secs)).alias(column_sum_trip_time_in_secs))
.orderBy(f.desc(column_sum_trip_time_in_secs))
).limit(5)
return top_five_drivers_with_greatest_sum_of_time_in_trip
def most_popular_payment_type(self):
"""
Calculates the most popular payment type.
Returns:
DataFrame: A DataFrame containing only one row with the most popular payment type.
"""
dataframe = (
self.trip_fare_df.groupBy(columns.payment_type)
.count()
.orderBy('count', ascending=False)
.limit(1)
)
return dataframe
def highest_fare_amount(self):
"""
Calculates the highest fare when vendor is VTS.
Returns:
DataFrame: A DataFrame containing only one row with the highest fare amount for VTS.
"""
dataframe = (
self.trip_fare_df.filter(f.col(columns.vendor_id) == 'VTS')
.orderBy(columns.fare_amount, ascending=False)
.limit(1)
)
return dataframe
def top_total_amount(self):
"""
Calculates the top 10 total_amount values for drivers when passengers count > 5.
Returns:
DataFrame: A DataFrame containing 10 rows with biggest total_amount values for drivers
when passengers count > 5.
"""
dataframe = (
self.trip_fare_df.join(self.trip_data_df, [columns.medallion, columns.hack_license,
columns.pickup_datetime], 'inner')
.filter(f.col(columns.passenger_count) > 5)
.groupBy(columns.medallion, columns.hack_license, columns.passenger_count)
.agg(f.max(columns.total_amount))
.orderBy(f.col(f'max({columns.total_amount})'), ascending=False)
.limit(10)
)
return dataframe
def total_revenue_per_day(self):
"""
Calculates the total revenue for each day of the week, categorized by payment type.
Returns:
DataFrame: A DataFrame with columns: 'pickup_datetime', 'payment_type', 'total_amount',
and 'total_revenue_per_day'.
"""
dataframe = self.trip_fare_df.withColumn('day_num', f.dayofweek(columns.pickup_datetime))
window_spec = (
Window.partitionBy(
f.col('day_num'),
f.col(columns.payment_type)
).orderBy(f.col('day_num'))
)
dataframe = dataframe.withColumn('total_revenue_per_day', f.sum(f.col(columns.total_amount)).over(window_spec))
return dataframe
def tip_percentage(self):
"""
Calculates percentage of tip to total_amount if payment type not cash.
Returns:
DataFrame: A DataFrame with new column tips_percentages and only rides which were paid not in cash.
"""
window_spec = Window.partitionBy(columns.medallion, columns.hack_license, columns.pickup_datetime)
dataframe = self.trip_fare_df.filter(f.col(columns.payment_type) != 'CSH')
dataframe = dataframe.withColumn('tips_percetages',
(f.sum(columns.tip_amount).over(window_spec) /
f.sum(columns.total_amount).over(window_spec)) * 100)
return dataframe
def avg_trip_duration(self):
"""
Calculates the average trip duration for different rate codes.
Returns:
DataFrame: A DataFrame grouped by rate codes and found avg trip duration time for them
"""
dataframe = (
self.trip_data_df
.filter(f.col(columns.rate_code).isNotNull())
.groupBy(columns.rate_code)
.agg(
f.avg(columns.trip_time_in_secs)
.alias('avg_trip_duration')
).orderBy(f.asc(columns.rate_code))
)
return dataframe
| andriisydor/big_data_2023 | app/QueryManager.py | QueryManager.py | py | 20,166 | python | en | code | 0 | github-code | 36 |
71244931944 | import django_filters
from teachers.models import Teacher
class TeacherFilter(django_filters.FilterSet):
class Meta:
model = Teacher
fields = {
'age': ['gte', 'lte', 'exact'],
'first_name': ['icontains'],
'last_name': ['icontains'],
'occupation': ['icontains']
}
| ApolloNick/lms | api/v1/filters.py | filters.py | py | 341 | python | en | code | 0 | github-code | 36 |
35941174358 | import requests
import json
from PIL import Image, ImageTk
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
import os
from bs4 import BeautifulSoup
import tkinter as tk
import io
# Custom Exceptions Start
class invalidInputInfo(Exception):
pass
class clearList(Exception):
pass
class checkFaild(Exception):
pass
class siteUnreachable(Exception):
pass
class screenshotSelectedElementError(Exception):
pass
class imageCropError(Exception):
pass
class displayError(Exception):
pass
# Custom Exceptions End
# Player API Start
def playerUUIDgui():
homeUnpack()
# Frame init
mapiot.geometry('1000x600')
canvasFrame = tk.Frame(mapiot)
infoFrame = tk.Frame(mapiot)
# one time search
def startThisFunc():
# Clear previous canvas in frame
try:
for skinC in canvasFrame.winfo_children():
skinC.destroy()
except:
pass
# get info from gui
uI = usrInput.get()
# processing
try:
# Info processing
getInfo = playerAPI(uI)
outBlock.set(getInfo[0])
# image processing
url = str("https://minecraftskinstealer.com/api/v1/skin/render/fullbody/" + getInfo[1] + "/700")
skinImage = ImageTk.PhotoImage(Image.open(io.BytesIO(requests.get(url).content)))
skinCanvas = tk.Label(canvasFrame, image=skinImage, bg="white")
skinCanvas.image = skinImage
skinCanvas.pack()
except invalidInputInfo:
outBlock.set("Invalid Info")
except Exception:
outBlock.set("Something went wrong")
# dynamic info init
outBlock = tk.StringVar()
# Default Image init
defaultImageUrl = "https://upload.wikimedia.org/wikipedia/en/5/51/Minecraft_cover.png"
skinImage = ImageTk.PhotoImage(Image.open(io.BytesIO(requests.get(defaultImageUrl).content)))
skinCanvas = tk.Label(canvasFrame, image=skinImage, bg="white")
skinCanvas.image = skinImage
skinCanvas.pack()
canvasFrame.pack()
# button init
outLable = tk.Label(infoFrame, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInput = tk.Entry(infoFrame, show=None, font=('Arial', 14))
usrInput.pack()
startIt = tk.Button(infoFrame, text = 'Search', command=startThisFunc)
startIt.pack()
infoFrame.pack()
# exit init
def fucExit():
homePack()
try:
infoFrame.pack_forget()
canvasFrame.destroy()
except:
outBlock.set("Something went wrong")
buttonExit = tk.Button(infoFrame, text = 'Back to home', command=fucExit)
buttonExit.pack()
def formatUUID(uuid):
outLst = [alphabit for alphabit in uuid if alphabit != "-"]
return "".join(outLst)
def testUUID(uuid):
fullURL = "https://api.minetools.eu/profile/" + uuid
content = requests.get(url=fullURL)
result = json.loads(content.text)
try:
if str(result["decoded"]) == "None":
return False
else:
return True
except:
return False
def playerAPI(infoIn):
toolDict = {
"MoJangAPI": "https://api.mojang.com/user/profiles/",
# "MineToolsEU": "https://api.minetools.eu/profile/"
}
if testUUID(infoIn) is False:
raise invalidInputInfo()
for tool in toolDict.keys():
if tool == "MoJangAPI":
infoNeeded = formatUUID(infoIn)
FullURL = toolDict[tool] + infoNeeded + "/names"
content = requests.get(url=FullURL)
nameLst = json.loads(content.text)
if len(nameLst) > 1:
infoA = nameLst[-1]["name"]
previousName = []
for name in nameLst[:-1]:
previousName.append(name["name"])
infoB = "Used IDs: " + "; ".join(previousName)
if len(nameLst) == 1:
infoA = nameLst[0]["name"]
returnLst = []
returnLst.append(str("-=" * 15))
returnLst.append(str("Current ID: " + infoA))
returnLst.append(infoB)
returnLst.append(str("-=" * 15))
return "\n".join(returnLst), infoA
# Player API End
# Server API Start
def serverAPIgui():
homeUnpack()
def startThisFunc():
uI = usrInputIP.get()
uI2 = usrInputPort.get()
try:
outBlock.set(serverAPI(uI, uI2))
except invalidInputInfo:
outBlock.set("Invalid Info")
outBlock = tk.StringVar()
outBlock.set("Ip in upper box \nport in lower box \ntype 0 indicate default port")
outLable = tk.Label(mapiot, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInputIP = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputIP.pack()
usrInputPort = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputPort.pack()
startIt = tk.Button(mapiot, text = 'Search', command=startThisFunc)
startIt.pack()
def fucExit():
homePack()
buttonExit.pack_forget()
usrInputIP.pack_forget()
usrInputPort.pack_forget()
startIt.pack_forget()
outLable.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def minecraftColorcodeTranslate(letter):
mcFontDict = {
"DARK_RED": ["\u00A74", "&4"],
"RED": ["\u00A7c", "&c"],
"GOLD": ["\u00A76", "&6"],
"YELLOW": ["\u00A7e", "&e"],
"DARK_GREEN": ["\u00A72", "&2"],
"GREEN": ["\u00A7a", "&a"],
"AQUA": ["\u00A7b", "&b"],
"DARK_AQUA": ["\u00A73", "&3"],
"DARK_BLUE": ["\u00A71", "&1"],
"BLUE": ["\u00A79", "&9"],
"LIGHT_PURPLE": ["\u00A7d", "&d"],
"DARK_PURPLE": ["\u00A75", "&5"],
"WHITE": ["\u00A7f", "&f"],
"GRAY": ["\u00A77", "&7"],
"DARK_GRAY": ["\u00A78", "&8"],
"BLACK": ["\u00A70", "&0"],
"FONT_RESET": ["\u00A7r", "&r"],
"FONT_BOLD": ["\u00A7l", "&l"],
"FONT_ITALIC": ["\u00A7o", "&o"],
"FONT_UNDERLINE": ["\u00A7n", "&n"],
"FONT_STRIKE": ["\u00A7m", "&m"]
}
for colorCodes in mcFontDict.keys():
letter = letter.replace(mcFontDict[colorCodes][0], mcFontDict[colorCodes][1])
letter = letter.replace(">>>", ">>>")
return letter
def serverAPI(infoIn, gamePort):
toolDict = {
"mcsrvstat": "https://api.mcsrvstat.us/2/",
"mcapi": "https://mcapi.us/server/status?ip=",
}
dumpLst = []
outLst = []
def getConent(fullURL):
content = requests.get(url=fullURL)
formated = json.loads(content.text)
dumpLst.append([tool, formated])
try:
if int(gamePort) == 0:
for tool in toolDict.keys():
fullURL = toolDict[tool] + infoIn
getConent(fullURL)
else:
for tool in toolDict.keys():
fullURL = toolDict[tool] + infoIn + "&port=" + gamePort
getConent(fullURL)
except:
raise invalidInputInfo
if dumpLst[0][1]["online"] == True:
outLst.append(str("-=" * 15))
outLst.append("Stat: Serving")
outLst.append(f"Ping: {int(dumpLst[1][1]['duration']) / 1000000:.2f} ms")
outLst.append(f"IP:{dumpLst[0][1]['hostname']} ({dumpLst[0][1]['ip']})")
outLst.append(f'Port: {dumpLst[0][1]["port"]}')
try:
outLst.append(f'Motd Line A: {minecraftColorcodeTranslate(dumpLst[0][1]["motd"]["clean"][0]).strip()}')
except:
outLst.append(f'Motd Line A: NoInfo')
try:
outLst.append(f'Motd Line B: {minecraftColorcodeTranslate(dumpLst[0][1]["motd"]["clean"][1]).strip()}')
except:
outLst.append(f'Motd Line B: NoInfo')
outLst.append(f"Players: {dumpLst[0][1]['players']['online']} / {dumpLst[0][1]['players']['max']}")
outLst.append(str("-=" * 15))
else:
outLst.append(str("-=" * 15))
outLst.append(f"IP:{dumpLst[0][1]['hostname']} ({dumpLst[0][1]['ip']})")
outLst.append("Stat: Down")
outLst.append(str("-=" * 15))
return "\n".join(outLst)
# Server API End
# Slime Chunck Finder Start
def slimeCFgui():
homeUnpack()
mapiot.geometry('1000x600')
slimeImgFrame = tk.Frame(mapiot)
slimeImgFrame.pack()
infoFrame = tk.Frame(mapiot)
infoFrame.pack()
def startSearch():
try:
try:
for slimeImg in slimeImgFrame.winfo_children():
slimeImg.destroy()
except:
pass
try:
slimeFilePath = slimeChunckFinder(seedInputEntry.get(), xLocateEntry.get(), yLocateEntry.get())
slimeImageCall = tk.PhotoImage(file=slimeFilePath)
slimeImageDisplay = tk.Label(slimeImgFrame, image=slimeImageCall)
slimeImageDisplay.image = slimeImageCall
slimeImageDisplay.pack()
except:
raise displayError
except checkFaild:
errorTextVar.set("checkFaild")
except siteUnreachable:
errorTextVar.set("siteUnreachable")
except screenshotSelectedElementError:
errorTextVar.set("screenshotSelectedElementError")
except imageCropError:
errorTextVar.set("imageCropError")
except displayError:
errorTextVar.set("displayError")
errorTextVar = tk.StringVar()
errorTextVar.set("First Line: Minecraft Seed \nSecond Line: X Location \nThird Line: Y Location")
errorNoticeBlock = tk.Label(infoFrame, textvariable=errorTextVar, font=('Arial', 14))
errorNoticeBlock.pack()
seedInputEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
seedInputEntry.pack()
xLocateEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
xLocateEntry.pack()
yLocateEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
yLocateEntry.pack()
searchStartButton = tk.Button(infoFrame, text="Search 5x5 Chunks", command=startSearch)
searchStartButton.pack()
def exitSearch():
infoFrame.pack_forget()
slimeImgFrame.pack_forget()
homePack()
exitButton = tk.Button(infoFrame, text = 'Back to home', command=exitSearch)
exitButton.pack()
def slimeChunckFinder(seedInput, locationX, locationY):
baseURL = "http://mineatlas.com/?levelName=Random&seed="
uselessArg = [
"&mapZoom=18",
"&pos=",
"&Player=true",
"&Spawn=true",
"&Likely+Villages=false",
"&Ocean+Monuments=false",
"&Jungle+Temples=false",
"&Desert+Temples=false",
"&Witch+Huts=false",
"&Slime+Chunks=true"
]
otherAttri = ''.join(uselessArg)
try:
driver = visitSite(baseURL + seedInput + locationX + locationY + otherAttri)
except:
raise siteUnreachable
webXPATH = '/html/body/div/div[2]/div[1]/div[2]'
try:
slimeCanvas = driver.find_element(By.XPATH,webXPATH)
except:
raise checkFaild
try:
slimeFilePath = os.path.expandvars('$HOME') + "/Downloads/mapiot"
if not os.path.exists(slimeFilePath):
os.makedirs(slimeFilePath)
slimeFile = slimeFilePath + "/slimeChunks.png"
slimeCanvas.screenshot(slimeFile)
except:
raise screenshotSelectedElementError
driver.quit()
try:
slimeCanvasScreenShot = Image.open(slimeFile)
originalWidth, originalHeight = slimeCanvasScreenShot.size
width = originalWidth / 2 - 60
top = originalWidth / 2 - 60
right = originalHeight / 2 + 60
bottom = originalHeight / 2 + 60
slimeResult = slimeCanvasScreenShot.crop((width, top, right, bottom))
slimeResult.save(slimeFile)
return slimeFile
except:
raise imageCropError
# Slime Chunck Finder End
# Major Bug Checker Start
def majorBugGUI():
textBlockA = tk.Label(mapiot, text = 'This may take seconds to load, pls wait', font=('Arial', 14))
textBlockA.pack()
homeUnpack()
textBlockB = tk.Listbox(mapiot, yscrollcommand = scrollB.set, font=('Arial', 14), height=10, width=50)
for eachEr in checkMajorBug():
textBlockB.insert("end", eachEr + "\n")
textBlockB.pack()
# Finish loading
textBlockA.pack_forget()
def fucExit():
homePack()
buttonExit.pack_forget()
textBlockB.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def checkMajorBug():
mojangBugURL = "https://bugs.mojang.com/issues/"
jqlArg = "?jql=project%20%3D%20MC%20AND%20status%20%3D%20%22In%20Progress%22%20ORDER%20BY%20votes%20DESC%2C%20updated%20DESC"
mojangBugReportURL = mojangBugURL + jqlArg
siteXPATH = '//*[@id="main"]/div/div[2]/div/div/div/div/div/div[1]/div[1]/div/div[1]/div[2]/div/ol'
driver = visitSite(mojangBugReportURL)
inProgressBugLst = driver.find_element(By.XPATH,siteXPATH)
lstHTML = inProgressBugLst.get_attribute('innerHTML')
bfObject = BeautifulSoup(str(lstHTML), features="lxml")
preBugLst = bfObject.find_all('li')
guiDisplay = []
for preBug in preBugLst:
guiDisplay.append(str("━" * 70))
guiDisplay.append(f"\t[{preBug.get('data-key')}] \t{preBug.get('title')}")
driver.quit()
return guiDisplay
# Major Bug Checker End
# Spigot Resource Checker Start
def spigotCheckerGUI():
homeUnpack()
processLst = []
def inCheck(usrIn):
try:
testA = usrIn.find("-")
except:
raise invalidInputInfo
if len(usrIn) < 3:
raise invalidInputInfo
if usrIn == "clear":
raise clearList
return usrIn
def addToProcessLst():
try:
processLst.append(inCheck(usrInputId.get()))
outBlock.set("\n".join(processLst))
except invalidInputInfo:
outBlock.set("Invalid Resource Info")
except clearList:
for i in range(len(processLst)):
processLst.pop(0)
outBlock.set("Cleared List")
def startThisFunc():
try:
outBlock.set(spigotResourceChecker(processLst))
except invalidInputInfo:
outBlock.set("Invalid Info")
def seeList():
outBlock.set("\n".join(processLst))
# Display
outBlock = tk.StringVar()
outBlock.set("type in the format of <spigotID>[dash]<version>, click add")
outLable = tk.Label(mapiot, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInputId = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputId.pack()
addTrigger = tk.Button(mapiot, text = 'Add to List', command=addToProcessLst)
addTrigger.pack()
curLst = tk.Button(mapiot, text = 'Current List', command=seeList)
curLst.pack()
startIt = tk.Button(mapiot, text = 'Check', command=startThisFunc)
startIt.pack()
# Exit Button
def fucExit():
homePack()
buttonExit.pack_forget()
usrInputId.pack_forget()
addTrigger.pack_forget()
startIt.pack_forget()
outLable.pack_forget()
curLst.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def spigotResourceChecker(resDetail):
returnLst = []
try:
for spigotPlugin in resDetail:
versionPosition = spigotPlugin.find("-")
versionId = spigotPlugin[versionPosition+1:]
resId = spigotPlugin[:versionPosition]
fullURL = "https://api.spigotmc.org/legacy/update.php?resource=" + resId
spigotAPI = requests.get(url=fullURL)
if str(spigotAPI.text) != versionId:
yesOrNoUTD = "X"
else:
yesOrNoUTD = "√"
returnLst.append(str("-" * 70))
returnLst.append(f"Resource ID: {resId} | Your Version: {versionId} | Newest: {str(spigotAPI.text)} | Uptodate: {yesOrNoUTD}")
return "\n".join(returnLst)
except:
return "empty list"
# Spigot Resource Checker Stop
# Environment Start
def chromeSetting():
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('window-size=1920x1080')
options.add_argument('--hide-scrollbars')
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors", "enable-automation"])
return options
def visitSite(FullURL):
driver = webdriver.Chrome(options=options, service=Service(ChromeDriverManager().install()))
driver.get(FullURL)
time.sleep(2)
return driver
def excecutePath():
preworkPath = "C:/Program Files/mapiot" if os.name=='nt' else str(os.environ['HOME'] + "/Downloads/mapiot")
if not os.path.exists(preworkPath):
os.makedirs(preworkPath)
return preworkPath + "/"
# Environment End
# GUI Start
def homeUnpack():
# Frame Unpack
homeMenu.pack_forget()
def homePack():
# Frame Pack, init window size
mapiot.geometry('500x300')
homeMenu.pack()
# GUI End
# Script Start
if __name__ == '__main__':
# Headless Browser Init
options = chromeSetting()
# GUI Init
mapiot = tk.Tk()
mapiot.title("Mapiot v1.0.0")
mapiot.geometry('500x300')
scrollB= tk.Scrollbar(mapiot)
scrollB.pack(side="right", fill="y")
# Buttons
homeMenu = tk.Frame(mapiot)
nameDisplay = tk.Label(homeMenu, text = 'Thank you for using Mapiot.', font=('Arial', 20), width=30, height=2)
buttonUUID = tk.Button(homeMenu, text = 'Player UUID Checker', command=playerUUIDgui)
buttonMajorBugGUI = tk.Button(homeMenu, text = 'Mojang Bugs Checker', command=majorBugGUI)
buttonServerAPI = tk.Button(homeMenu, text = 'Server Stats Checker', command=serverAPIgui)
buttonSpigotChecker = tk.Button(homeMenu, text = 'Spigot Resources Checker', command=spigotCheckerGUI)
slimeChecker = tk.Button(homeMenu, text = 'Slime Chunk Finder', command=slimeCFgui)
buttonQuit = tk.Button(homeMenu, text = 'Quit', command=quit)
# Button Install
nameDisplay.pack()
buttonMajorBugGUI.pack()
buttonUUID.pack()
buttonServerAPI.pack()
buttonSpigotChecker.pack()
slimeChecker.pack()
buttonQuit.pack()
# Frame Install
homePack()
# GUI Loop
mapiot.mainloop()
| akaTiger/Mapiot | old.py | old.py | py | 18,576 | python | en | code | 0 | github-code | 36 |
22277300373 | #!/usr/bin/env python
"""
https://www.codewars.com/kata/520b9d2ad5c005041100000f/python
"""
import ipdb
import pytest
"""
pig_it('Pig latin is cool') # igPay atinlay siay oolcay
pig_it('Hello world !') # elloHay orldway !
"""
# from codewars solution
def pig_it(text):
lst = text.split()
return ' '.join( [word[1:] + word[:1] + 'ay' if word.isalpha() else word for word in lst])
@pytest.mark.parametrize("input_, expected",
[('Pig latin is cool', 'igPay atinlay siay oolcay'),
('Hello world !', 'elloHay orldway !')])
def test_solution(input_, expected):
assert move_first_letter_of_each_word_to_the_end(input_) == expected
def move_first_letter_of_each_word_to_the_end(text: str)-> str:
return ' '.join([f"{i[1:]}{i[0]}ay" if i not in ['!',',','?'] else i for i in text.split() ])
# out_str= ''
# for i in s.split():
# #print(i)
# out_str += i[1:]
# return out_str
if __name__ == '__main__':
print(move_first_letter_of_each_word_to_the_end('Pig latin is cool'))
print(move_first_letter_of_each_word_to_the_end('Hello world !')) | romantix74/codewars | move_first_letter_word_end.py | move_first_letter_word_end.py | py | 1,144 | python | en | code | 0 | github-code | 36 |
17310575825 | # MIT License
#
# Copyright (c) 2023 Andrey Zhdanov (rivitna)
# https://github.com/rivitna
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import io
import struct
import zlib
MARKER = b'\xFE\x09\x00\x00\x8D'
def decompress_data(data):
"""Decompress data"""
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
#
# Main
#
if len(sys.argv) != 2:
print('Usage: '+ sys.argv[0] + ' filename')
sys.exit(0)
filename = sys.argv[1]
with io.open(filename, 'rb') as f:
file_data = f.read()
pos = 0
# Find configuration data
while True:
pos = file_data.find(MARKER, pos)
if pos < 0:
break
pos += len(MARKER)
# stsfld
if file_data[pos + 4] != 0x80:
continue
cfg_data_token, = struct.unpack_from('<L', file_data, pos + 5)
# ldsfld
if file_data[pos + 9] != 0x7E:
continue
token, = struct.unpack_from('<L', file_data, pos + 10)
if token == cfg_data_token:
pos += 9
break
if pos < 0:
print('Error: Configuration data not found.')
sys.exit(1)
print('cfg data position: %08X' % pos)
print('cfg data token: 0x%08X' % cfg_data_token)
cfg_data_dict = {}
# Parse IL code
while pos + 16 <= len(file_data):
# ldsfld
if file_data[pos] != 0x7E:
break
pos += 1
token, = struct.unpack_from('<L', file_data, pos)
if token != cfg_data_token:
break
pos += 4
# ldc.i4
if file_data[pos] != 0x20:
break
pos += 1
idx, = struct.unpack_from('<L', file_data, pos)
if cfg_data_dict.get(idx) is not None:
break
pos += 4
# ldc.i4, stelem.i1
if (file_data[pos] != 0x20) or (file_data[pos + 5] != 0x9C):
break
pos += 1
val, = struct.unpack_from('<L', file_data, pos)
if val > 255:
break
pos += 5
cfg_data_dict[idx] = val
# skip nop
if file_data[pos] == 0:
pos += 1
pack_cfg_data_size = max(cfg_data_dict.keys()) + 1
print('compressed cfg data size: %d' % pack_cfg_data_size)
pack_cfg_data = b''
for i in range(pack_cfg_data_size):
val = cfg_data_dict.get(i)
if val is None:
break
pack_cfg_data += bytes([val])
cfg_data = decompress_data(pack_cfg_data)
print('cfg data size: %d' % len(cfg_data))
cfg_filename = filename + '.cfg'
with io.open(cfg_filename, 'wb') as f:
f.write(cfg_data)
| rivitna/Malware | HsHarada/hsharada_extract_cfg.py | hsharada_extract_cfg.py | py | 3,594 | python | en | code | 218 | github-code | 36 |
7852008585 | class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
same_tasks = {}
for c in tasks:
if c in same_tasks:
same_tasks[c] += 1
else:
same_tasks[c] = 1
max_heap = []
for i in same_tasks.values():
heapq.heappush(max_heap, -i)
cycles = 0
while len(max_heap) != 0:
temp = []
for i in range(n+1):
if len(max_heap) != 0:
temp.append(-heapq.heappop(max_heap))
for i in temp:
i -= 1
if i > 0: heapq.heappush(max_heap, -i)
if len(max_heap) == 0:
cycles += len(temp)
else:
cycles += n + 1
return cycles | midasama3124/cracking-coding-interview | python/leetcode/task_scheduler.py | task_scheduler.py | py | 798 | python | en | code | 0 | github-code | 36 |
74772398185 | # https://cloud.google.com/pubsub/docs/create-topic#create_a_topic
# https://cloud.google.com/python/docs/reference/pubsub/latest
# %%
from google.cloud import pubsub_v1
# TODO(developer)
project_id = "podact-topic-extractor"
topic_id = "your-topic-id"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
topic = publisher.create_topic(request={"name": topic_path})
print(f"Created topic: {topic.name}")
# %%
# When you delete a topic, its subscriptions are not deleted.
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
publisher.delete_topic(request={"topic": topic_path})
print(f"Topic deleted: {topic_path}")
# %%
from google.cloud import pubsub_v1
publisher = pubsub_v1.PublisherClient()
project_path = f"projects/{project_id}"
for topic in publisher.list_topics(request={"project": project_path}):
print(topic)
# %%
"""Publishes multiple messages to a Pub/Sub topic with an error handler."""
from concurrent import futures
from typing import Callable
from google.cloud import pubsub_v1
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
publish_futures = []
def get_callback(
publish_future: pubsub_v1.publisher.futures.Future, data: str
) -> Callable[[pubsub_v1.publisher.futures.Future], None]:
def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None:
try:
# Wait 60 seconds for the publish call to succeed.
print(publish_future.result(timeout=60))
except futures.TimeoutError:
print(f"Publishing {data} timed out.")
return callback
for i in range(10):
data = str(i)
# When you publish a message, the client returns a future.
publish_future = publisher.publish(topic_path, data.encode("utf-8"))
# Non-blocking. Publish failures are handled in the callback function.
publish_future.add_done_callback(get_callback(publish_future, data))
publish_futures.append(publish_future)
# Wait for all the publish futures to resolve before exiting.
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
print(f"Published messages with error handler to {topic_path}.")
# %%
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
# Add two attributes, origin and username, to the message
future = publisher.publish(
topic_path, data, origin="python-sample", username="gcp"
)
print(future.result())
print(f"Published messages with custom attributes to {topic_path}.")
# %%
from google.cloud import pubsub_v1
# TODO(developer): Choose an existing topic.
# project_id = "your-project-id"
# topic_id = "your-topic-id"
publisher_options = pubsub_v1.types.PublisherOptions(enable_message_ordering=True)
# Sending messages to the same region ensures they are received in order
# even when multiple publishers are used.
client_options = {"api_endpoint": "us-east1-pubsub.googleapis.com:443"}
publisher = pubsub_v1.PublisherClient(
publisher_options=publisher_options, client_options=client_options
)
# The `topic_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/topics/{topic_id}`
topic_path = publisher.topic_path(project_id, topic_id)
for message in [
("message1", "key1"),
("message2", "key2"),
("message3", "key1"),
("message4", "key2"),
]:
# Data must be a bytestring
data = message[0].encode("utf-8")
ordering_key = message[1]
# When you publish a message, the client returns a future.
future = publisher.publish(topic_path, data=data, ordering_key=ordering_key)
print(future.result())
print(f"Published messages with ordering keys to {topic_path}.")
# %%
from concurrent import futures
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# Configure the batch to publish as soon as there are 10 messages
# or 1 KiB of data, or 1 second has passed.
batch_settings = pubsub_v1.types.BatchSettings(
max_messages=10, # default 100
max_bytes=1024, # default 1 MB
max_latency=1, # default 10 ms
)
publisher = pubsub_v1.PublisherClient(batch_settings)
topic_path = publisher.topic_path(project_id, topic_id)
publish_futures = []
# Resolve the publish future in a separate thread.
def callback(future: pubsub_v1.publisher.futures.Future) -> None:
message_id = future.result()
print(message_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
publish_future = publisher.publish(topic_path, data)
# Non-blocking. Allow the publisher client to batch multiple messages.
publish_future.add_done_callback(callback)
publish_futures.append(publish_future)
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
print(f"Published messages with batch settings to {topic_path}.")
# %%
from google import api_core
from google.cloud import pubsub_v1
# %%
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# Configure the retry settings. Defaults shown in comments are values applied
# by the library by default, instead of default values in the Retry object.
custom_retry = api_core.retry.Retry(
initial=0.250, # seconds (default: 0.1)
maximum=90.0, # seconds (default: 60.0)
multiplier=1.45, # default: 1.3
deadline=300.0, # seconds (default: 60.0)
predicate=api_core.retry.if_exception_type(
api_core.exceptions.Aborted,
api_core.exceptions.DeadlineExceeded,
api_core.exceptions.InternalServerError,
api_core.exceptions.ResourceExhausted,
api_core.exceptions.ServiceUnavailable,
api_core.exceptions.Unknown,
api_core.exceptions.Cancelled,
),
)
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
future = publisher.publish(topic=topic_path, data=data, retry=custom_retry)
print(future.result())
print(f"Published messages with retry settings to {topic_path}.") | lgarzia/topic_extractions | pub_sub_tutorials/create_and_manage_topic.py | create_and_manage_topic.py | py | 6,502 | python | en | code | 0 | github-code | 36 |
74612434345 | '''
概念:一种保存数据的格式
作用:可以保存本地的json文件,也可以将json串进行传输,通常将json称为轻量级的传输方式
json文件组成
{} 代表对象(字典)
[] 代表列表
: 代表键值对
, 分隔两个部分
'''
import json
jsonStr = '''{
"rate": "8.0",
"cover_x": 1400,
"title": "我是余欢水",
"url": "https:\/\/movie.douban.com\/subject\/33442331\/",
"playable": true,
"cover": "https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2574916002.jpg",
"id": "33442331",
"cover_y": 2139,
"is_new": false
}
'''
#将json格式的字符串转为python数据类型的对象
#将json格式数据转换为字典,方便取值
jsonData = json.loads(jsonStr)
print(jsonData)
print(type(jsonData))
print(type(jsonStr))
jsonData2 = {
"rate": "8.0",
"cover_x": 1400,
"title": "我是余欢水",
"url": "https:\/\/movie.douban.com\/subject\/33442331\/",
"playable": True,
"cover": "https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2574916002.jpg",
"id": "33442331",
"cover_y": 2139,
"is_new": False
}
#将python数据类型的对象转换为json格式的字符串
jsonStr2 = json.dumps(jsonData2)
print(jsonStr2)
print(type(jsonData2))
print(type(jsonStr2)) | hanyb-sudo/hanyb | 正则表达式与爬虫/3、爬虫/7、json数据解析.py | 7、json数据解析.py | py | 1,279 | python | zh | code | 0 | github-code | 36 |
8325766076 | import json
import math
import random
boardTypes = {'Empty': 0, 'Wall': 1, 'Snake_Body': 2, 'Snake_Head': 3, 'Food': 4}
def distanceBetweenTwoPoints(point1, point2):
return (abs((point2['x'] - point1['x'])) + abs((point2['y'] - point1['y'])))
def createBoardObject(data, snakes):
global boardTypes
boardHeight = data["board"]["height"]
boardWidth = data["board"]["width"]
# Create empty board
Board = [[0 for x in range(boardHeight)] for x in range(boardWidth)]
# Start off as empty
for i in range(boardHeight - 1):
for j in range(boardWidth - 1):
Board[i][j] = boardTypes['Empty']
# Find Snakes
#print("~~~~~~~~Adding Snake in Board~~~~~~~")
for snake in snakes:
for index, point in enumerate(snake.coordinates, start=0):
#print(snake.coordinates)
if index == 0:
Board[point['x']][point['y']] = boardTypes['Snake_Head']
else:
# Unnecessary Condition
if Board[point['x']][point['y']] != boardTypes['Snake_Head']:
Board[point['x']][point['y']] = boardTypes['Snake_Body']
# Find Food
#print("~~~~~~~Adding Food in Board~~~~~~~~")
for leaf in data['board']['food']:
#print (leaf)
Board[leaf['x']][leaf['y']] = boardTypes['Food']
return Board | krunal1998/BattleSnake2021 | utility.py | utility.py | py | 1,263 | python | en | code | 0 | github-code | 36 |
23234426744 | from motor import Motor
import keyboard, time, sys
from threading import Thread
import tkinter as tk
'''
#PINOS DE COMUNICAÇÃO EM BINÁRIO
#[0,1,2,3, 4, 5, 6, 7] - BITS DA PLACA
#[1,2,4,8,16,32,64,128] - SINAL DE COMUNICAÇÃO CORRESPONDENTE
'''
mx = Motor(4, 8)
my = Motor(16, 32)
dx, dy = 2700, 270
while(my.pos <= 2700):
mx.andar(dx, 1)
if(mx.exit == True):
break
my.andar(dy, 1)
if(my.exit == True):
break
mx.andar(dx, 0)
if(mx.exit == True):
break
my.andar(dy, 1)
if(my.exit == True):
break
print(mx.pos, my.pos)
print('cabou') | eduardof-rabelo/IC | main.py | main.py | py | 642 | python | pt | code | 0 | github-code | 36 |
12717941520 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
# ---
# title: hello,hikyson
# tags: [Default]
# category: [Default]
# comments: true
# date: 2014-04-20 22:18:43
# ---
#
# hello,hikyson
#
# <!-- more -->
#
# |Version|Codename|API|Distribution|
# |---|---|---|---|
# |111|222|333|444|
import os
from ScrapyForAndroidDashboard.git_pusher import post_title, local_time_str, post_name, push, post_file_dir
class ScrapyforandroiddashboardPipeline(object):
def process_item(self, item, spider):
# generate md file
divider = "---"
line_feed = "\r\n"
title = post_title
tags = "[android,spider,scrapy]"
category = "[scrapy]"
comments = "true"
date = local_time_str
more = "<!-- more -->"
head = "".join(
[divider, line_feed, "title: ", title, line_feed, "tags: ", tags, line_feed, "category: ", category,
line_feed, "comments: ", comments, line_feed, "date: ", date, line_feed, divider, line_feed])
summary = "This is a post generate by a spider , grab from url: [developer.android.google.cn](developer.android.google.cn)"
updatetime = "Update time: %s" % local_time_str
version_data_dict = json.loads(item["version_data"])
version_chart_url = "https:" + version_data_dict["chart"] + ".png"
# version text
text_version = "".join(
["" % version_chart_url, line_feed, line_feed, "|Codename|API|Distribution|",
line_feed, "|---|---|---|", line_feed])
version_items = version_data_dict["data"]
for version_item in version_items:
api = version_item["api"]
name = version_item["name"]
perc = version_item["perc"]
text_version = text_version + "|" + str(api) + "|" + name + "|" + str(perc) + "|" + line_feed
post = "".join(
[head, line_feed, line_feed, summary, line_feed, updatetime, line_feed, line_feed, more, line_feed,
line_feed, text_version])
for file_name in os.listdir(post_file_dir):
if file_name.find(post_title) >= 0:
os.remove(os.path.join(post_file_dir, file_name))
file_name = os.path.join(post_file_dir, post_name)
with open(file_name, 'wb') as f:
f.write(post)
push()
return item
| Kyson/ScrapyForAndroidDashboard | ScrapyForAndroidDashboard/ScrapyForAndroidDashboard/pipelines.py | pipelines.py | py | 2,522 | python | en | code | 1 | github-code | 36 |
2835263565 | import pandas as pd
import plotly.express as px
from ..sequence_info.sequences import group_2_coins
from ..utils import google_form_question_to_coin_sequence
DATA_FILENAME = "C:/Users/Crystal Wang/Downloads/9.660/9.660-final-project/data/data.csv"
def get_df():
df = pd.read_csv(DATA_FILENAME)
df = df.drop("Timestamp", axis = 1)
# Get rid of the music data
df = df.drop(df.columns[-70:], axis = 1)
# Clean ratings
df = df.replace("1 (least representative)", 1)
df = df.replace("7 (most representative)", 7)
# Set index
df = df.set_index("Name")
return df
def split_groups(df):
group1_df = df[df["Who asked you to do this survey?"] == "Crystal (Group 1)"]
group1_df = group1_df.drop(group1_df.columns[-37:], axis = 1)
group1_df = group1_df.drop("Who asked you to do this survey?", axis = 1)
group2_df = df[df["Who asked you to do this survey?"] == "Julia (Group 2)"]
group2_df = group2_df.drop(group2_df.columns[-73: -36], axis = 1)
group2_df = group2_df.drop("Who asked you to do this survey?", axis = 1)
return group1_df, group2_df
def get_control_data(g_df):
g_control_df = g_df.drop(g_df.columns[5:], axis = 1)
g_control_df = g_control_df.astype(int)
g_control_df.columns = [1,2,3,4,5]
return g_control_df
def plot_line_data(df):
df = df.stack().reset_index()
df = df.rename(columns = {"level_1": "x", 0: "y"})
px.line(df, x = "x", y = "y", color = "Name")
def significance_t_test(df1, df2):
df1_means = df1.mean(axis = 0)
df2_means = df2.mean(axis = 0)
df1_vars = df1.std(axis = 0, ddof = 0) ** 2
df2_vars = df2.std(axis = 0, ddof = 0) ** 2
df1_vars /= len(df1.index)
df2_vars /= len(df2.index)
numerator = df1_means - df2_means
denominator = (df1_vars + df2_vars) ** 0.5
return (numerator / denominator).abs() > 1.96
def verify_control_significance(g1_df, g2_df, plot = False):
g1_control_df = get_control_data(g1_df)
g2_control_df = get_control_data(g2_df)
if plot:
plot_line_data(g1_control_df)
plot_line_data(g2_control_df)
# See if there is a significant difference for each sequence
is_significant = significance_t_test(g1_control_df, g2_control_df)
if is_significant.any():
raise Exception("The samples are significantly different in control responses!")
else:
print("No significant difference between the control responses.\n")
return
def remove_control_data(g_df):
return g_df.drop(g_df.columns[:5], axis = 1).astype(int)
def sort_columns(df):
return df.reindex(sorted(df.columns), axis=1)
def clean_group_1(g1_df):
g1_df.columns = [google_form_question_to_coin_sequence(column) for column in g1_df.columns]
g1_df = sort_columns(g1_df)
return g1_df
def clean_group_2(g2_df):
g2_df.columns = [
google_form_question_to_coin_sequence(f"{column} [Coin {coin}: ]")
for column, coin in zip(g2_df.columns, group_2_coins)
]
g2_df = sort_columns(g2_df)
return g2_df
def test_experiment_significance(g1_df, g2_df):
print("---- GROUP 1 vs. GROUP 2 EXPERIMENT ----")
is_significant = significance_t_test(g1_df, g2_df)
if is_significant.any():
significant_cols = list((is_significant.loc[is_significant]).index)
print(f"There is a significant difference between group 1 and group 2: {significant_cols}\n")
else:
print("There is NO significant difference between group 1 and group 2\n")
def get_groups():
df = get_df()
g1_df, g2_df = split_groups(df)
verify_control_significance(g1_df, g2_df)
g1_df = remove_control_data(g1_df)
g2_df = remove_control_data(g2_df)
g1_df = clean_group_1(g1_df)
g2_df = clean_group_2(g2_df)
test_experiment_significance(g1_df, g2_df)
return g1_df, g2_df | cwcrystal8/9.660-final-project | coins/data_cleaning/groups.py | groups.py | py | 3,884 | python | en | code | 0 | github-code | 36 |
36748158121 | import hashlib
import os
from string import hexdigits
class FudgeException(Exception):
pass
def read_file(path, mode='rb'):
with open(path, mode) as f:
data = f.read()
return data
def write_file(path, data, mode='wb'):
with open(path, mode) as f:
f.write(data)
def makedirs(path):
"""Create a path."""
if not os.path.exists(path):
os.makedirs(path)
def stat(path):
status = os.stat(path)
def split(time):
return [int(part) for part in str(time).split('.')]
ctime_s, ctime_n = split(status.st_ctime)
mtime_s, mtime_n = split(status.st_mtime)
return {
'ctime_s': ctime_s,
'ctime_n': ctime_n,
'mtime_s': mtime_s,
'mtime_n': mtime_n,
'dev': status.st_dev & 0xffffffff,
'ino': status.st_ino & 0xffffffff,
'perms': status.st_mode,
'uid': status.st_uid & 0xffffffff,
'gid': status.st_gid & 0xffffffff,
'size': status.st_size & 0xffffffff,
}
def get_hash(data):
if isinstance(data, str):
data = bytes(data, 'utf-8')
return hashlib.sha1(data).hexdigest()
def ishex(string):
digits = set(hexdigits)
return all(char in digits for char in string)
def issafe(string):
blacklist = set(['\0', '\n', '<', '>'])
edge_blacklist = set([' ', '.', ',', ':', ';', '"', "'"])
return not any(char in blacklist for char in string) \
and not any(string.startswith(char) for char in edge_blacklist) \
and not any(string.endswith(char) for char in edge_blacklist)
| QuantamKawts/fudge | fudge/utils.py | utils.py | py | 1,568 | python | en | code | 0 | github-code | 36 |
39303555840 | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: admin
@file: MultiHeadedAttention.py
@time: 2021/09/02
@desc:
"""
import copy
import torch
import math
from torch import nn
import torch.nn.functional as F
def clones(module, N):
"""
克隆基本单元,克隆的单元之间参数不共享
"""
return nn.ModuleList([
copy.deepcopy(module) for _ in range(N)
])
def attention(query, key, value, mask=None, dropout=None):
"""
Scaled Dot-Product Attention(方程(4))
"""
# q、k、v向量长度为d_k
d_k = query.size(-1)
# 矩阵乘法实现q、k点积注意力,sqrt(d_k)归一化
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
# 注意力掩码机制
if mask is not None:
scores = scores.masked_fill(mask==0, -1e9)
# 注意力矩阵softmax归一化
p_attn = F.softmax(scores, dim=-1)
# dropout
if dropout is not None:
p_attn = dropout(p_attn)
# 注意力对v加权
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention(编码器(2))
"""
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
"""
`h`:注意力头的数量
`d_model`:词向量维数
"""
# 确保整除
assert d_model % h == 0
# q、k、v向量维数
self.d_k = d_model // h
# 头的数量
self.h = h
# WQ、WK、WV矩阵及多头注意力拼接变换矩阵WO
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
# 批次大小
nbatches = query.size(0)
# WQ、WK、WV分别对词向量线性变换,并将结果拆成h块
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 注意力加权
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 多头注意力加权拼接
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
# 对多头注意力加权拼接结果线性变换
return self.linears[-1](x) | coinyue/Transformer | model/MultiHeadedAttention.py | MultiHeadedAttention.py | py | 2,463 | python | en | code | 0 | github-code | 36 |
19944553952 | from time import sleep
from threading import Thread
from consumer import QueryConsumer
from querier import QueryMongo
class Pipe(QueryConsumer):
def __init__(self):
super().__init__()
self._mongo = QueryMongo()
self._switch = False
self._response = None
self._queue = []
def _trigger(self):
while self._switch:
self._query = self._pollMsg()
sleep(1)
@property
def response(self):
return self._response
@property
def queue(self):
return self._queue
@property
def on(self):
self._switch = True
self.thread = Thread(target = self._trigger)
self.thread.start()
return self
@property
def off(self):
self._switch = False
sleep(5)
self.thread.join()
return self
@property
def _query(self):
return self._msg
@_query.setter
def _query(self, msg):
self._msg = msg
if self._msg is not None:
self._response = self._mongo.query(self._msg.value())
self._queue.append(self._response) | MatheusGaignoux/Kafka-MongoDB-query-parameters-integration | src/pipe.py | pipe.py | py | 1,181 | python | en | code | 0 | github-code | 36 |
29501639753 | import sys, math
input = sys.stdin.readline
A, B = map(int, input().split())
point = int(math.sqrt(B))
# 제곱근까지만 탐색하여 시간 아낌
prime = [True] * (point + 1)
# 모든 수를 소수라고 가정
prime[1] = False
# 소수 판별
for i in range(2, point + 1):
if prime[i]:
if i*i > point:
break
for j in range(int(math.pow(i, 2)), point + 1, i):
prime[j] = False
# 거의 소수 구하기
cnt = 0
for i in range(1, len(prime)):
if prime[i]:
res = int(math.pow(i, 2))
while True:
if res < A:
res *= i
continue
if res > B:
break
res *= i
cnt += 1
print(cnt) | harii-in/BAEKJOON | 1456.py | 1456.py | py | 738 | python | ko | code | 0 | github-code | 36 |
31859196035 | from pygame import *
class UserControl(object):
"""docstring for UserControl."""
def __init__(self):
print("User controller init!")
def decide(self, keys,cells=None, prev_pos=(0,0)):
dx,dy = prev_pos
if keys[K_w] and dy != 1:
dx, dy = 0,-1
if keys[K_s] and dy != -1:
dx, dy = 0, 1
if keys[K_a] and dx != 1:
dx, dy =-1, 0
if keys[K_d] and dx != -1:
dx, dy = 1, 0
return (dx,dy)
| CymerR/School_snake_ai | UserControl.py | UserControl.py | py | 500 | python | en | code | 1 | github-code | 36 |
34212093305 | # https://www.acmicpc.net/problem/16236
# sol
# 상어객체를 구현하여 bfs로 최단거리의 가능한 먹이를 탐색한다
# 1) bfs하며 최단거리이면서 (여럿일 경우 위쪽/왼쪽순) 먹을 수 있는(상어보다 사이즈 작은) 먹이 탐색
# 1-1) 이때 가능한 먹이가 여럿일 수 있기에 bfs 큐에 상어 이동거리를 포함하는 변형이 들어간다
# 1-2) 가능한 먹이가 없으면 그때까지 상어 이동거리를 return하고 끝낸다
# 2) 결정된 먹이를 먹고 상어의 상태와 space를 업데이트 한다
# 3) 가능한 먹이 없을때까지 bfs를 반복한다
import sys
from collections import deque
from queue import PriorityQueue
MAX = 987654321
class Shark:
size = 2
food_cnt = 0
r = None; c = None
total_time = 0
def __init__(self, r=None, c=None):
self.r = r
self.c = c
def _size_up(self):
self.size += 1
def food_cnt_up(self):
self.food_cnt += 1
if self.food_cnt == self.size:
self._size_up()
self.food_cnt = 0
def set_loc(self, r, c):
self.r = r
self.c = c
def plus_total(self, time):
self.total_time += time
def bfs_find_fish(shark)->tuple:
global space
visited = [[0 for _ in range(n)] for _ in range(n)]
bfs_q = PriorityQueue()
bfs_q.put( (0, shark.r,shark.c) )
visited[shark.r][shark.c] = 1
min_dist = MAX
target_loc = tuple()
while not bfs_q.empty():
d,r,c = bfs_q.get() # 가까운거리 우선으로 현재 위치를 get한다
if d >= min_dist: # 뽑혀져나온 지점까지 이동거리가 최단거리 이상이라면 더이상의 탐색은 무의미하다
break
move = [(-1,0), (0,-1), (1,0), (0,1)]
for dr,dc in move:
n_d, n_r, n_c = (d+1, r+dr, c+dc)
if 0 <= n_r < n and 0 <= n_c < n:
if space[n_r][n_c]!=0 and space[n_r][n_c] < shark.size and n_d < min_dist: # 최단거리 먹이를 찾은 경우 탐색결과 업데이트
visited[n_r][n_c] = 1
target_loc = (n_r, n_c)
min_dist = n_d
elif space[n_r][n_c]!=0 and space[n_r][n_c] < shark.size and n_d == min_dist: # 현재 최단거리인 먹이까지와 이동거리 같다면 위치를 따진다
visited[n_r][n_c] = 1
if n_r < target_loc[0]: # 높이가 더 위라면
target_loc = (n_r, n_c)
min_dist = n_d
elif n_r == target_loc[0] and n_c < target_loc[1]: # 높이 같을때 더 왼쪽에 있으면
target_loc = (n_r, n_c)
min_dist = n_d
else:
continue
elif (space[n_r][n_c]==0 or space[n_r][n_c] == shark.size) and visited[n_r][n_c] == 0: # 이동만 가능한 경우
visited[n_r][n_c] = 1
bfs_q.put((n_d,n_r,n_c))
if target_loc == tuple():
return MAX, (None, None)
return min_dist, target_loc
if __name__ == "__main__":
n = int(sys.stdin.readline())
space = []
shark = Shark()
for r in range(n):
row = list(map(int, sys.stdin.readline().rstrip('\n').split()))
space.append(row)
for c, val in enumerate(row):
if val == 9:
shark.set_loc(r, c)
while True:
# 먹이 탐색
min_dist, target_loc = bfs_find_fish(shark)
if min_dist == MAX:
break
# 상어의 이동과 상태변화를 조정
space[shark.r][shark.c] = 0
shark.set_loc(target_loc[0],target_loc[1])
shark.food_cnt_up()
space[target_loc[0]][target_loc[1]] = 0
shark.plus_total(min_dist)
print(shark.total_time)
| chankoo/problem-solving | graph/boj16236.py | boj16236.py | py | 3,914 | python | ko | code | 1 | github-code | 36 |
74605435944 | from model.contact import Contact
from datetime import datetime
import re
import csv
class Phonebook:
"""
The Phonebook class allows users to create, update, delete, search, and perform various operations on contacts.
Attributes:
contacts (list): A list of Contact objects representing the phonebook's contacts.
Methods:
search_contacts(): Searches for contacts based on user-defined criteria.
create_contact(): Creates a new contact and adds it to the phonebook.
validate_phone_number(phone_number): Validates a phone number's format.
validate_email_address(email_address): Validates an email address's format.
update_contact(): Updates an existing contact's information.
delete_contact(): Deletes an existing contact from the phonebook.
print_all_contacts(): Prints details of all contacts in the phonebook.
print_contact_history(): Prints the contact history for a specific contact.
sort_contacts(): Sorts the contacts in the phonebook.
group_contacts(): Groups contacts by the initial letter of their last names.
"""
def __init__(self):
self.contacts = [Contact]*0
print("Starting phonebook application...")
def search_contacts(self):
"""
Searches for contacts in the contact list based on user-defined criteria.
The method allows the user to choose between two search options:
- 0: Search by name or phone number. The user can enter characters and view matching results.
- 1: Search for contacts added within a specific time frame. The user enters start and end dates.
Depending on the user's choice, the method displays matching results or contacts
added within the specified time frame.
Returns:
None
"""
choice = input("Options: \n 0. Search with name or phone number \n " +
"1. Search for contacts added within specific time frame \n How do you want to search for the contact: ")
if choice == "0":
user_input = input("To search for contacts, start entering characters below and press enter to see results: \n")
counter = 0
print("Below is a list of matching results: \n")
for contact in self.contacts:
if (user_input in contact.get_first_name()
or user_input in contact.get_last_name()
or user_input in contact.get_phone_number()
or user_input in contact.get_email_address()
or user_input in contact.get_address()):
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
elif choice == "1":
start_date = input("Please enter start date in yyyy/MM/dd format: ")
end_date = input("Please enter end date in yyyy/MM/dd format: ")
while True:
try:
start_time=datetime(*[int(i) for i in start_date.split('/')])
end_time=datetime(*[int(i) for i in end_date.split('/')]).replace(hour=23,minute=59,second=59)
break
except:
print("Please enter a valid date")
print("Start time: ", start_time)
print("End Time: ", end_time)
filtered_contacts = [filtered_contact for filtered_contact in self.contacts if start_time <= filtered_contact.create_time <= end_time]
print("\nBelow is a list of matching results: \n")
counter = 0
for contact in filtered_contacts:
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
else:
print("Please enter a valid option")
def create_contact(self):
"""
Creates a new contact and adds it to the contact list.
This method provides two options to create a contact:
- Option 0: Manually enter individual contact details
- Option 1: Load contacts in batch from a CSV file.
Depending on the user's choice, the method either guides the user to enter
individual contact details or loads contacts from a CSV file.
It validates the phone number and email address format, checks for duplicate
contacts, and adds the new contacts to the contact list.
Returns:
None
"""
print("Creating contact...")
print("Options: \n 0. Enter individual contact manually \n 1. Load contacts in batch from csv file")
batch_load = input("How do you want to add contact: ")
if batch_load=="0":
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
while True:
phone_number = input("Enter phone number in (XXX) XXX-XXXX format : ")
if self.validate_phone_number(phone_number)==False:
print("Please enter a valid phone number. Make sure format is (XXX) XXX-XXXX")
continue
else:
break
while True:
email_address = input("Enter email address, press enter to skip: ")
if email_address=="": email_address=None
if self.validate_email_address(email_address)==False:
print("Please enter a valid email address.")
continue
else:
break
address = input("Enter address, press enter to skip: ")
if address=="": address=None
contact_exists=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
contact_exists=True
if contact_exists==True:
print("Contact already exists! Please check the contact details and delete or update it as per your need.")
else:
new_contact = Contact(first_name,last_name,phone_number,email_address,address)
self.contacts.append(new_contact)
print("Contact added successfully!")
self.print_all_contacts()
elif batch_load=="1":
print("\n We have sample_contacts.csv file already present in data folder. \n You can copy your required csv file to that path first.")
file_name = input("Now enter the file name you want to load from the data folder:")
csv_file_path = "data/"+file_name
try:
with open(csv_file_path, mode='r', newline='') as file:
csv_reader = csv.reader(file)
for contact in csv_reader:
first_name = contact[0]
last_name = contact[1]
phone_number = contact[2]
if self.validate_phone_number(phone_number)==False:
print("Phone number: ", phone_number, " is not valid format (XXX) XXX-XXXX, exiting csv file. Please try again after fixing the value in csv file.")
return
email_address = contact[3]
if email_address!="" and self.validate_email_address(email_address)==False:
print("Email address: ", email_address, " is not valid format, exiting csv file. Please try again after fixing the value in csv file.")
return
address = contact[4]
contact_exists=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
contact_exists=True
if contact_exists==True:
print("Contact with first name: ", first_name, " and last name: ", last_name +
" already exists! Please check the contact details and delete or update it as per your need.")
else:
new_contact = Contact(first_name,last_name,phone_number,email_address,address)
self.contacts.append(new_contact)
print("Contacts added successfully from csv file in batch")
self.print_all_contacts()
except:
print("Error opening the file, please check the file name.")
else:
print("Please enter a valid option!")
def validate_phone_number(self, phone_number):
"""
Validates a phone number to ensure it matches the format '(###) ###-####'.
Args:
phone_number (str): The phone number to be validated.
Returns:
bool: True if the phone number is in the correct format, False otherwise.
"""
pattern = r'^\(\d{3}\) \d{3}-\d{4}$'
if re.match(pattern,phone_number):
return True
else:
return False
def validate_email_address(self, email_address):
"""
Validates an email address to ensure it matches a standard email format.
Args:
email_address (str): The email address to be validated.
Returns:
bool: True if the email address is in a valid format, False otherwise.
"""
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
if re.match(pattern, email_address):
return True
else:
return False
def update_contact(self):
"""
Updates an existing contact's information in the contact list.
This method prompts the user to enter the first name and last name of the contact to be updated.
If the contact is found, the user is presented with a menu to choose which field to update:
- 0: First Name
- 1: Last Name
- 2: Phone Number
- 3: Email Address
- 4: Address
After selecting a field to update, the user is prompted to enter the new value for that field.
The contact's information is then updated, and the updated contact list is displayed.
If the specified contact does not exist in the list, a message is displayed indicating that
the contact was not found.
"""
first_name = input("Enter first name of contact to be updated: ")
last_name = input("Enter last name of contact to be updated: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
print("Fields: \n 0. First Name \n 1. Last Name \n 2. Phone Number \n 3. Email Address \n 4. Address")
user_input = input("Enter which field you want to update: ")
if user_input=="0":
updated_first_name=input("Enter the new first name: ")
contact.update_first_name(updated_first_name)
elif user_input=="1":
updated_last_name=input("Enter the new last name: ")
contact.update_last_name(updated_last_name)
elif user_input=="2":
updated_phone_number=input("Enter the new phone number: ")
contact.update_phone_number(updated_phone_number)
elif user_input=="3":
updated_email_address=input("Enter the new email address: ")
contact.update_email_address(updated_email_address)
elif user_input=="4":
updated_address=input("Enter the new address: ")
contact.update_address(updated_address)
else:
print("Please enter a valid option!")
self.print_all_contacts()
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def delete_contact(self):
"""
Deletes an existing contact from the contact list.
This method prompts the user to enter the first name and last name of the contact to be deleted.
If the contact is found in the list, it is removed from the list, and a confirmation message
is displayed indicating that the contact has been deleted.
If the specified contact does not exist in the list, a message is displayed indicating that
the contact was not found.
Returns:
None
"""
first_name = input("Enter first name of contact to be deleted: ")
last_name = input("Enter last name of contact to be deleted: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
self.contacts.remove(contact)
print("Contact deleted successfully!")
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def print_all_contacts(self):
"""
Prints the details of all contacts in the contact list.
This method displays the details of each contact in the contact list using a counter
to keep track of contact ids displayed.
If the contact list is empty, it notifies the user to add new contacts.
Returns:
None
"""
counter = 0
if(self.contacts.count==0):
print("Contact list is empty, please add new contacts.")
else:
print("\nFull Contact List: ")
for contact in self.contacts:
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
def print_contact_history(self):
"""
Prints the contact history for a specific contact.
This method prompts the user to enter the first name and last name of a contact
to retrieve their contact history.
If the contact is found in the contact list, it displays their contact history,
which may include details of previous interactions or communications.
If the specified contact does not exist in the list, a message is displayed indicating
that the contact was not found.
Returns:
None
"""
first_name = input("Enter first name of contact: ")
last_name = input("Enter last name of contact: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
print("Contact History: ", contact.get_contact_history())
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def sort_contacts(self):
"""
Sorts the contacts in the contact list based on the user's choice.
This method allows the user to choose between two sorting options:
- 0: Ascending order based on first names.
- 1: Descending order based on first names.
Depending on the user's choice, it sorts the contacts accordingly and provides feedback
to the user.
Returns:
None
"""
choice=input("\n\nOptions: \n0. Ascending order \n1. Descending order \n\nHow do you want to sort: ")
if choice=="0":
self.contacts.sort(key=lambda contact: contact.get_first_name())
print("Contacts sorted in ascending order. Press 4 to view all contacts.")
elif choice=="1":
self.contacts.sort(key=lambda contact: contact.get_first_name(), reverse=True)
print("Contacts sorted in descending order. Press 4 to view all contacts.")
else:
print("Please enter a valid option")
def group_contacts(self):
"""
This method sorts the contacts based on the initial letter of their last names,
effectively grouping them alphabetically.
Returns:
None
"""
print("Grouping contacts by initial letter of last name")
self.contacts.sort(key=lambda contact:contact.get_last_name()[0] )
print("Contacts successfully grouped. Press 4 to view all contacts.")
| Kartik-Nair/PhoneBook | phonebook.py | phonebook.py | py | 16,960 | python | en | code | 0 | github-code | 36 |
17164702508 | from os import DirEntry
from geradorDeSql import GeradorDeSql
#logstash_data={"host":"192.168.0.116","port":5000,"username":"elastic","password":"changeme"}
#logstash_data={"host":"192.168.0.116","port":5000}
class Gerar_bd_teste:
def __init__(self,local_sqlite:DirEntry="scripts/teste_db.db",total_threads=0,logstash_data={}):
self.total_threads=total_threads
self.logstash_data=logstash_data
self.gerador=GeradorDeSql(sqlite_db=local_sqlite,sql_file_pattern="scripts/sqlitePattern.sql", log_file="scripts/geradorSQL.log",level=40,logging_pattern='%(asctime)s - %(name)s - %(levelname)s - %(message)s',logstash_data=logstash_data)
def executar(self,quantidade_elementos_iniciais_insercao=100,quantidade_elementos_totais=10000):
if quantidade_elementos_iniciais_insercao>0:
self.gerador.gerar_todos_dados_por_json_paralel(threads=self.total_threads,select_country="pt_br",tipo=[1],quantidade_final=quantidade_elementos_iniciais_insercao)
print("gerados dados de inserção")
if quantidade_elementos_totais>0:
self.gerador.gerar_todos_dados_por_json_paralel(threads=self.total_threads,select_country="pt_br",quantidade_final=quantidade_elementos_totais)
print("gerados dados randomicos")
if __name__ == "__main__":
Gerar_bd_teste(local_sqlite="scripts/initial_db.db").executar()
| mzramna/algoritimo-de-testes-de-benchmark-de-bancos-de-dados | scripts/geração_bd_testes.py | geração_bd_testes.py | py | 1,385 | python | pt | code | 0 | github-code | 36 |
39353557558 | import math
def area(r):
"""Area of a circle with radius 'r'"""
return math.pi * (r**2)
radii = [2, 5, 7.1, 0.3, 10]
# Method 1: Direct method
areas = []
for r in radii:
a = area(r)
areas.append(a)
print(areas)
# Method 2: Use 'map' functions
print(list(map(area, radii)))
print("===========")
temps = [("Berlin", 29), ("Cairo", 36), ("Buenos Aires", 19),
("Los Angeles", 26), ("Tokyo", 27),
("New York", 28), ("London", 22), ("Beiking", 32)]
c_to_f = lambda data: (data[0], (9/5)*data[1] + 32)
print(list(map(c_to_f, temps)))
print("===========")
import statistics
data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]
avg = statistics.mean(data)
print(avg)
print(list(filter(lambda x: x > avg, data)))
print("===========")
countries = ["", "Argentina", "Brazil", "Chile",
"", "Colombia", "", "Ecuador", "", "",
"Venezuela"]
print(list(filter(None, countries)))
print("===========")
from functools import reduce
# Multiply all numbers in a list
data = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
multi = lambda x, y: x * y
print(reduce(multi, data))
| Vaijyant/PythonPlayground | 23_map_filter_redunce.py | 23_map_filter_redunce.py | py | 1,113 | python | en | code | 0 | github-code | 36 |
2808401161 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.1.2"
__author__ = "Abien Fred Agarap"
import argparse
from models.svm.svm import Svm
# Hyper-parameters
BATCH_SIZE = 256
LEARNING_RATE = 1e-5
N_CLASSES = 2
SEQUENCE_LENGTH = 21
def parse_args():
parser = argparse.ArgumentParser(description="SVM for Intrusion Detection")
group = parser.add_argument_group("Arguments")
group.add_argument(
"-o",
"--operation",
required=True,
type=str,
help='the operation to perform: "train" or "test"',
)
group.add_argument(
"-t",
"--train_dataset",
required=False,
type=str,
help="the NumPy array training dataset (*.npy) to be used",
)
group.add_argument(
"-v",
"--validation_dataset",
required=True,
type=str,
help="the NumPy array validation dataset (*.npy) to be used",
)
group.add_argument(
"-c",
"--checkpoint_path",
required=True,
type=str,
help="path where to save the trained model",
)
group.add_argument(
"-l",
"--log_path",
required=False,
type=str,
help="path where to save the TensorBoard logs",
)
group.add_argument(
"-m",
"--model_name",
required=False,
type=str,
help="filename for the trained model",
)
group.add_argument(
"-r",
"--result_path",
required=True,
type=str,
help="path where to save the actual and predicted labels",
)
arguments = parser.parse_args()
return arguments
def main(arguments):
if arguments.operation == "train":
train_features, train_labels = data.load_data(dataset=arguments.train_dataset)
validation_features, validation_labels = data.load_data(
dataset=arguments.validation_dataset
)
train_size = train_features.shape[0]
validation_size = validation_features.shape[0]
model = Svm(
alpha=LEARNING_RATE,
batch_size=BATCH_SIZE,
svm_c=arguments.svm_c,
num_classes=N_CLASSES,
num_features=SEQUENCE_LENGTH,
)
model.train(
checkpoint_path=arguments.checkpoint_path,
log_path=arguments.log_path,
model_name=arguments.model_name,
epochs=arguments.num_epochs,
result_path=arguments.result_path,
train_data=[train_features, train_labels],
train_size=train_size,
validation_data=[validation_features, validation_labels],
validation_size=validation_size,
)
elif arguments.operation == "test":
test_features, test_labels = data.load_data(
dataset=arguments.validation_dataset
)
test_size = test_features.shape[0]
test_features = test_features[: test_size - (test_size % BATCH_SIZE)]
test_labels = test_labels[: test_size - (test_size % BATCH_SIZE)]
test_size = test_features.shape[0]
Svm.predict(
batch_size=BATCH_SIZE,
num_classes=N_CLASSES,
test_data=[test_features, test_labels],
test_size=test_size,
checkpoint_path=arguments.checkpoint_path,
result_path=arguments.result_path,
)
if __name__ == "__main__":
args = parse_args()
main(args)
| AFAgarap/gru-svm | svm_main.py | svm_main.py | py | 3,519 | python | en | code | 136 | github-code | 36 |
70938863144 | from math import fabs
class Graphics:
RATIO = 2
#rectSymbol = "#"
rectSymbol = "█"
underSymbol = "="
def __init__(self, w, h, ratio=2):
self.HEIGHT = h
self.WIDTH = w
self.RATIO = ratio
self.RATIO_WIDTH = w * ratio
self.lines = []
for i in range(self.HEIGHT):
line = []
for j in range(self.RATIO_WIDTH):
line.append(" ")
self.lines.append(line)
def render(s):
for i in range(s.HEIGHT):
for j in range(s.RATIO_WIDTH):
print(s.lines[i][j], end='')
print()
def drawBounds(self):
self.drawRectangle(0, 0, self.WIDTH, self.HEIGHT)
def clear(s):
for i in range(s.HEIGHT):
for j in range(s.RATIO_WIDTH):
s.lines[i][j] = " "
def drawRectangle(s, x, y, w, h,clearWithin=True):
if clearWithin:
s.drawFullRectangle(x,y,w,h," ")
x = int(s.RATIO * x)
w = int(s.RATIO * w)
for xx in range(w):
s.pC(x + xx, y, s.rectSymbol)
s.pC(x + xx, y + h - 1, s.rectSymbol)
for yy in range(h):
s.pC(x, y + yy, s.rectSymbol)
s.pC(x + w - 1, y + yy, s.rectSymbol)
def drawFullRectangle(s, x, y, w, h,char):
x = int(s.RATIO * x)
w = int(s.RATIO * w)
for xx in range(w):
xxx=x+xx
for yy in range(h):
yyy = y + yy
s.pC(xxx,yyy, char)
def pC(self, x, y, char):
if 0 <= x < self.RATIO_WIDTH and 0 <= y < self.HEIGHT:
self.lines[y][x] = char
def drawBitmap(s,x,y,array):
x = int(s.RATIO * x)
for yy in range(len(array)):
line = array[yy]
for xx in range(len(line)-1):
s.pC(x + xx, y+yy, line[xx])
def drawText(s, x, y, text, centered=0, underline=False): # centered: 0->left, 1->center, 2->right
if underline:
string = len(text) * Graphics.underSymbol
s.drawText(x, y + 1, string, centered)
x = int(s.RATIO * x)
if centered == 0:
for xx in range(len(text)):
s.pC(x + xx, y, text[xx])
elif centered == 2:
for xx in range(len(text)):
s.pC(x - len(text) + xx, y, text[xx])
else:
padding = int(len(text) / 2)
for xx in range(len(text)):
s.pC(x - padding + xx, y, text[xx])
def drawLine(s, x1, y1, x2, y2):
x1 = int(s.RATIO * x1)
x2 = int(s.RATIO * x2)
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
switchY = y1 > y2
width = x2 - x1
height = fabs(y2 - y1)
for x in range(width):
mX = int(x / width * width)
mY = int(((1 - x / width) if switchY else (x / width)) * height)
s.pC(mX + x1, mY + min(y1, y2), Graphics.rectSymbol)
class Renderer:
def __init__(self, g):
self.g = g
self.listBack = []
self.list = []
def render(self):
self.g.clear()
for drawable in self.listBack:
drawable.draw(self.g)
for drawable in self.list:
drawable.draw(self.g)
self.g.render()
def add(self, drawable):
self.remove(drawable)
self.list.append(drawable)
def addBack(self, drawable):
self.remove(drawable)
self.listBack.append(drawable)
def remove(self, drawable):
try:
self.list.remove(drawable)
except ValueError:
try:
self.listBack.remove(drawable)
except ValueError:
pass
def clear(self):
self.list.clear()
self.listBack.clear()
| Cooble/BirthdayPie | graphics.py | graphics.py | py | 3,818 | python | en | code | 0 | github-code | 36 |
26376325124 | #!/usr/bin/env python
# coding: utf-8
# In[12]:
# Question 1 c)
# Author: Ilyas Sharif
import numpy as np
import matplotlib.pyplot as plt
# Defining the parameters that didn't change (same as code for before)
v_f = 0.1
omega_0 = 1
tau = 1
gamma = 0.5
a = 0.0
b = 100.0
N = 10000
h = (b-a)/N
tpoints = np.arange(a, b,h)
x_0 = 0
y_0 = 0
# Defining the xpoints and ypoints array.
vp = v_f * np.log((gamma*tau)/v_f)
v_p = [0.1*vp,0.25*vp, 0.5*vp, 0.75*vp, 1*vp, 1.25*vp, 1.5*vp]
indexing = [0.1,0.25,0.5,0.75,1,1.25,1.5]
for i in range(len(v_p)):
C = v_p[i]
xpoints = []
r = np.array([x_0, y_0], float)
# Creating f(r,t) where r = (x, y = dx/dt)
def f(r,t):
x = r[0]
y = r[1]
fx = y
fy = -(omega_0**2)*((x) - v_p[i] * t) - (y)/tau - gamma*np.exp(-np.abs(y)/v_f)
return np.array([fx, fy], float)
# Creating r array and computing RK4 method (copied from Newman odesim.py)
for t in tpoints:
xpoints.append(r[0])
k1 = h*f(r,t)
k2 = h*f(r+0.5*k1,t+0.5*h)
k3 = h*f(r+0.5*k2,t+0.5*h)
k4 = h*f(r+k3,t+h)
r += (k1+2*k2+2*k3+k4)/6
plt.plot(tpoints, xpoints, label = '$v_p$ = ' + str(indexing[i]) + '$v_p$')
# I'm going to comment this out, but if you want to see the constant velocity
# solutions that each of them oscillate around, feel free to comment out the
# 2 lines below :)
#x0 = -(1/omega_0**2)*(C/tau + gamma*np.exp(-C/v_f)) + (v_p[i]*tpoints)
#plt.plot(tpoints,x0, linestyle = ":", color = 'k')
plt.title("Comparison of Different Choices for $v_p$")
plt.xlabel(" time (seconds) ")
plt.ylabel(" position (meters) ")
plt.legend()
plt.xlim(0,100)
plt.show()
# In[ ]:
| SpencerKi/Computational-Methods | Differentiation and Differential Equations/Lab06_Q1_c.py | Lab06_Q1_c.py | py | 1,726 | python | en | code | 0 | github-code | 36 |
33660433557 | import os
from flask import Flask, Response, request, current_app, url_for, send_from_directory
from fishapiv2.database.models import *
from flask_restful import Resource
from werkzeug.utils import secure_filename
from fishapiv2.resources.helper import *
from fishapiv2.resources.controller.authentication import *
import datetime
import json
from mongoengine import ObjectIdField
from flask_jwt_extended import jwt_required
from flask_jwt_extended import get_jwt_identity
from bson.objectid import ObjectId
class PondsApi(Resource):
@jwt_required()
# @token_req
def get(self):
try:
url = url_for('pondimageapidummy', _external=True)
current_user = get_jwt_identity()
farm = str(current_user['farm_id'])
farm_id = ObjectId(farm)
# farm = farm_id.objectId
pipeline = [
{"$match": {"farm_id": farm_id}},
{"$sort": {"status": 1,"alias": 1}},
{'$lookup': {
'from': 'pond_activation',
'let': {"pondid": "$_id"},
'pipeline': [
{'$match': {'$expr': {'$and': [
{'$eq': ['$pond_id', '$$pondid']},
]}}},
{"$sort": {"activated_at": -1}},
{'$lookup': {
'from': 'fish_log',
'let': {"pond_activation_id": "$_id"},
'pipeline': [
{'$match': {
'$expr': {'$and': [
{'$eq': ['$pond_activation_id',
'$$pond_activation_id']},
]}
}},
{"$project": {
"created_at": 0,
"updated_at": 0,
}},
{"$group": {
"_id": "$fish_type",
"fish_type": {"$first": "$fish_type"},
"fish_amount": {"$sum": "$fish_amount"}
}},
{"$sort": {"fish_type": -1}},
{"$project": {
"_id": 0,
}},
],
'as': 'fish_alive'
}},
{"$addFields": {
"activated_at": {'$dateToString': {
'format': "%d-%m-%Y", 'date': "$activated_at"}},
"deactivated_at": {'$dateToString': {
'format': "%d-%m-%Y", 'date': "$deactivated_at"}},
"total_fish_alive": {"$sum": "$fish_alive.fish_amount"}
}},
{"$project": {
"pond_id": 0,
"feed_type_id": 0,
"created_at": 0,
"updated_at": 0,
}},
],
'as': 'pond_activation_list'
}},
{"$addFields": {
"area": {"$cond": {
"if": {"$eq": ["$shape", "persegi"]},
"then": {"$multiply": ["$length", "$width"]},
"else": {"$divide": [
{"$multiply": [float(22), "$diameter", "$diameter"]},
28
]},
}},
"image_link":{"$concat": [url, "/", {"$toString": "$_id"}]}
}},
{"$addFields": {
"volume": {"$multiply": ["$area", "$height"]},
"last_activation": {"$first": "$pond_activation_list"},
"status": {
"$switch":
{
"branches": [
{
"case": {"$eq": ["$isActive", True]},
"then": "Aktif"
},
{
"case": {"$and": [
{"$eq": ["$isActive", False]},
{"$lt": [
{"$size": "$pond_activation_list"}, 1]}
]},
"then": "Tidak Aktif"
}
],
"default": "Panen"
}
},
}},
{"$addFields": {
"activation_date": "$last_activation.activated_at",
"fish_alive": "$last_activation.total_fish_alive",
}},
{"$project": {
"pond_id": 0,
"feed_type_id": 0,
"created_at": 0,
"updated_at": 0,
"pond_activation_list": 0,
"last_activation": 0,
}}
]
ponds = Pond.objects.aggregate(pipeline)
# token = request.headers['Authorization']
# token = str.replace(str(token), 'Bearer ', '')
# tokens = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=["HS256"])
# user = _ruleUserObj.getRuleUser(tokens["sub"]["username"])
# token = request.form.get('token')
# current_user = get_jwt_identity()
# user = json.dumps(current_user, default=str)
# usernow = jsonify(user)
# pondlist = Pond.objects.get(farm_id=current_user['farm_id'])
list_ponds = list(ponds)
# farm_id = list_ponds.alias
response = json.dumps(list_ponds, default=str)
# response = response[0].alias
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
@jwt_required()
def post(self):
try:
current_user = get_jwt_identity()
farm = str(current_user['farm_id'])
shape = request.form.get("shape", None)
if shape == "bundar":
body = {
"farm_id": farm,
"alias": request.form.get("alias", None),
"location": request.form.get("location", None),
"shape": request.form.get("shape", None),
"material": request.form.get("material", None),
"status": 'Tidak Aktif',
"diameter": request.form.get("diameter", None),
"height": request.form.get("height", None),
"build_at": request.form.get("build_at", None),
}
else :
body = {
"farm_id": farm,
"alias": request.form.get("alias", None),
"location": request.form.get("location", None),
"shape": request.form.get("shape", None),
"material": request.form.get("material", None),
"length": request.form.get("length", None),
"width": request.form.get("width", None),
"status": 'Tidak Aktif',
"height": request.form.get("height", None),
"build_at": request.form.get("build_at", None),
}
pond = Pond(**body).save()
id = pond.id
response = {"message": "success add pond", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
class PondApi(Resource):
def put(self, id):
try:
body = request.form.to_dict(flat=True)
Pond.objects.get(id=id).update(**body)
response = {"message": "success change data pond", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
return
def delete(self, id):
try:
pond = Pond.objects.get(id=id).delete()
response = {"message": "success delete pond"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
def get(self, id):
try:
objects = Pond.objects.get(id=id)
pond = objects.to_mongo()
response_dump = json.dumps(pond, default=str)
return Response(response_dump, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
class PondImageApiDummy(Resource):
def get(self):
pass
class PondImageApi(Resource):
def get(self, id):
# init object pond
objects = Pond.objects.get(id=id)
# convert to dict
pond = objects.to_mongo()
# dump dict to json string
path = os.path.join(current_app.instance_path,
current_app.config['UPLOAD_DIR'])
return send_from_directory(path, pond['image_name'])
def put(self, id):
try:
file = request.files['image']
if not file:
response = {"message": "no file selected"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
if not allowed_file(file.filename):
response = {"message": "file type not allowed"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
filename = secure_filename(file.filename)
filename = pad_timestamp(filename)
path = os.path.join(current_app.instance_path,
current_app.config['UPLOAD_DIR'])
try:
os.makedirs(path)
except OSError:
pass
filepath = os.path.join(path, filename)
file.save(filepath)
# database
objects = Pond.objects.get(id=id)
pond = objects.to_mongo()
old_image_name = pond["image_name"]
new_image_name = filename
if old_image_name != "default.jpg":
os.remove(os.path.join(path, old_image_name))
data = {
"image_name": new_image_name
}
objects.update(**data)
id = objects.id
response = {"message": "success change image", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
| MauL08/AquaBreedingAPI-V2 | fishapiv2/resources/controller/pond.py | pond.py | py | 12,401 | python | en | code | 0 | github-code | 36 |
12803276455 | import os, sys, io, math
class SequenceReader:
def __init__(self, file_path):
self.file_path = file_path
def set_file_path(self, file_path):
self.file_path = file_path
def get_file_path(self):
return self.file_path
def read_sequence(self):
with open(self.file_path) as f:
lines = f.read().strip().splitlines()
sequence = None
for line in lines:
if not (line.startswith(">") or line.startswith("#")):
sequence = line
break
elif line.startswith(">"):
sequence_descriptor = line
return sequence_descriptor, sequence
class Utils:
@staticmethod
def find_max_index(l):
max_index = 0
i = 1
while i < len(l):
if l[i] > l[max_index]:
max_index = i
i = i + 1
return max_index
@staticmethod
def content_to_dict(content):
l = [i.strip() for i in content.splitlines() if i.strip()]
return {key: value for key, value in [((int(i.split()[1])-1, int(i.split()[2]) - 1), i[0][0].replace("S", "E")) for i in l]}
@staticmethod
def count_for_confusion_matrix(truth_dict, prediction_dict, truth_key, prediction_key):
start = min(truth_dict.keys())
end = max(truth_dict.keys())
counter = 0
for i in range(start, end + 1):
if prediction_dict[i] == prediction_key and truth_dict[i] == truth_key:
counter += 1
return counter
@staticmethod
def count_individual_confusion_statistics(truth_dict, prediction_dict, key):
start = min(truth_dict.keys())
end = max(truth_dict.keys())
true_positive, true_negative, false_positive, false_negative = 0, 0, 0, 0
for i in range(start, end + 1):
if truth_dict[i] == key and prediction_dict[i] == key: true_positive += 1
if truth_dict[i] != key and prediction_dict[i] != key: true_negative += 1
if truth_dict[i] != key and prediction_dict[i] == key: false_positive += 1
if truth_dict[i] == key and prediction_dict[i] != key: false_negative += 1
return true_positive, true_negative, false_positive, false_negative
@staticmethod
def path_to_position_dict(path):
return {key: value for key, value in [(index, path[index]) for index in range(len(path))]}
@staticmethod
def generate_position_dict(d, length):
result = {}
sorted_keys = sorted(d)
i = 0
for interval in sorted_keys:
ll, ul = interval
if i < ll:
for y in range(i, ll):
result[y] = 'N'
for y in range(ll, ul + 1):
result[y] = d[interval]
i = ul + 1
if i < length:
for y in range(i, length):
result[y] = 'N'
return result
class ViterbiAlgorithm:
def __init__(self, hmm, sequence):
self.hmm = hmm
self.sequence = sequence
self.column_count = len(self.sequence)
self.states_list = self.hmm.get_states()
self.matrix = [[0 for j in range(len(sequence))] for i in range(len(self.states_list))]
self.arrow_map = {}
self.fill_in_the_matrix()
def fill_in_the_matrix(self):
j = 0
for i in range(len(self.states_list)):
state = self.states_list[i]
self.matrix[i][j] = self.hmm.tlp('START', state) + self.hmm.elp(state, self.sequence[j])
for j in range(1, self.column_count):
aa = self.sequence[j] # aa stands for amino_acid
for i in range(len(self.states_list)):
state = self.states_list[i]
self.matrix[i][j] = self.hmm.elp(state, aa)
list_to_look_for_max = []
for k in range(len(self.states_list)):
inner_state = self.states_list[k]
list_to_look_for_max.append(self.matrix[k][j - 1] + self.hmm.tlp(inner_state, state))
max_index = Utils.find_max_index(list_to_look_for_max)
self.arrow_map[(i, j)] = max_index
self.matrix[i][j] += list_to_look_for_max[max_index]
if j == self.column_count - 1: # if we are in the last column, take into account the end state probability
self.matrix[i][j] += self.hmm.tlp(state, 'END')
def construct_path(self):
self.path = ""
list_to_look_for_max = []
for i in range(len(self.states_list)):
list_to_look_for_max.append(self.matrix[i][self.column_count - 1])
max_index = Utils.find_max_index(list_to_look_for_max)
j = self.column_count - 1
i = max_index
log_probability = list_to_look_for_max[max_index]
while j > 0:
to_go = self.arrow_map[(i, j)]
self.path = self.states_list[i] + self.path
i = to_go
j -= 1
self.path = self.states_list[i] + self.path
return self.path, log_probability
class HMM:
def __init__(self, training_set_path):
self.load_training_set(training_set_path)
self.preprocess_training_set()
# X and the lowercase letters are for the letters found in the training set
self.amino_acid_alphabet = "ACDEFGHIKLMNPQRSTVWYXabcdegfhijklmnopqrutvw"
self.states = {'H': {key: 0 for key in self.amino_acid_alphabet},
'E': {key: 0 for key in self.amino_acid_alphabet},
'T': {key: 0 for key in self.amino_acid_alphabet}}
self.transitions = {}
for state_i in "HET":
for state_j in "HET":
self.transitions[(state_i, state_j)] = 0
for state in "HET":
self.transitions[("START", state)] = 0
for state in "HET":
self.transitions[(state, "END")] = 0
self.train()
def get_states(self):
return tuple("HET")
def tlp(self, from_state, to_state):
# tlp stands for transition_log_probability
return self.transitions[(from_state, to_state)]
def elp(self, state, amino_acid):
# elp stands for emission_log_probability
return self.states[state][amino_acid]
def load_training_set(self, training_set_path):
with open(training_set_path) as file:
training_set = file.read().strip().splitlines()
self.training_sequences = {}
index_list = [i for i in range(len(training_set)) if training_set[i].startswith(">")]
for index in index_list:
self.training_sequences[training_set[index].strip()] = (training_set[index + 1].strip(), training_set[index + 2].strip())
print(f"Loaded {len(self.training_sequences)} training samples.")
def preprocess_training_set(self):
print("Preprocessing training data...", end = ' ')
sys.stdout.flush()
for key, sequence_structure_tuple in self.training_sequences.items():
sequence, structure = sequence_structure_tuple
preprocessed_sequence_io = io.StringIO()
preprocessed_structure_io = io.StringIO()
for i in range(len(sequence)):
structure_char = structure[i]
sequence_char = sequence[i]
if structure_char != "_":
preprocessed_sequence_io.write(sequence_char)
if structure_char in ('G', 'H', 'I'):
preprocessed_structure_io.write('H')
elif structure_char in ('B', 'E'):
preprocessed_structure_io.write('E')
elif structure_char in ('T', 'S', 'L'):
preprocessed_structure_io.write('T')
self.training_sequences[key] = (preprocessed_sequence_io.getvalue(), preprocessed_structure_io.getvalue())
print("Done!")
def train(self):
print ("Training...", end = ' ')
sys.stdout.flush()
inner_transition_counts = {'H': 0, 'E': 0, 'T': 0}
start_transition_count = 0
for key, sequence_structure_tuple in self.training_sequences.items():
sequence, structure = sequence_structure_tuple
for index in range(len(sequence)):
sequence_char = sequence[index]
structure_char = structure[index]
if index == 0:
start_transition_count += 1
self.transitions[('START', structure_char)] += 1
else:
inner_transition_counts[structure[index - 1]] += 1
self.transitions[(structure[index - 1], structure_char)] += 1
if index == len(sequence) - 1:
inner_transition_counts[structure_char] += 1
self.transitions[(structure_char, 'END')] += 1
self.states[structure_char][sequence_char] += 1
for state, emissions in self.states.items():
summation = sum(emissions.values())
for amino_acid, count in emissions.items():
self.states[state][amino_acid] = math.log2((count + 1) / (summation + len(self.amino_acid_alphabet)))
for state_i in "HET":
for state_j in "HET":
self.transitions[(state_i, state_j)] = math.log2(self.transitions[(state_i, state_j)] / inner_transition_counts[state_i])
for state in "HET":
self.transitions[("START", state)] = math.log2(self.transitions[("START", state)] / start_transition_count)
for state in "HET":
self.transitions[(state, "END")] = math.log2(self.transitions[(state, "END")] / inner_transition_counts[state])
print("Done!")
class Main:
def __init__(self):
try:
training_set_path = sys.argv[1]
sequence_path = sys.argv[2]
except IndexError:
self.print_usage()
sys.exit()
truth_interval_dict = None
if len(sys.argv) > 3:
secondary_structure_path = sys.argv[3]
with open(secondary_structure_path) as f:
truth_interval_dict = Utils.content_to_dict(f.read().strip())
sequence_reader = SequenceReader(sequence_path)
header, sequence = sequence_reader.read_sequence()
self.hmm = HMM(training_set_path)
self.viterbi_algorithm = ViterbiAlgorithm(self.hmm, sequence)
path, log_probability = self.viterbi_algorithm.construct_path()
print("\nInput protein sequence:\n" + "-"*30 + "\n" + header + "\n" + sequence)
print("\nThe path predicted by HMM:\n" + "-"*30 + "\n" + path)
print("\nLog2 probability of this path:\n" + "-"*30 + "\n" + str(log_probability))
if truth_interval_dict:
truth_dict = Utils.generate_position_dict(truth_interval_dict, len(sequence))
prediction_dict = Utils.path_to_position_dict(path)
print("\n3x3 confusion matrix computations:")
print("True".ljust(10), "Predicted".ljust(10), "Count".ljust(10))
for key_i in "HET":
for key_j in "HET":
print (key_i.ljust(10), key_j.ljust(10), str(Utils.count_for_confusion_matrix(truth_dict, prediction_dict, key_i, key_j)).ljust(10))
print("Individual confusion matrix computations:")
for key in "HET":
print(f"Individual confusion matrix computations for {key}:")
print("TP".ljust(10), "TN".ljust(10), "FP".ljust(10), "FN".ljust(10))
tp, tn, fp, fn = Utils.count_individual_confusion_statistics(truth_dict, prediction_dict, key)
print(str(tp).ljust(10), str(tn).ljust(10), str(fp).ljust(10), str(fn).ljust(10))
def print_usage(self):
print(f"Usage: python3 {os.path.split(sys.argv[0])[-1]} <training_set_path> <sequence_path> <secondary_structure_path>")
if __name__ == "__main__":
main = Main()
| ender-s/HMM-Based-Secondary-Structure-Prediction | hmm_based_predictor.py | hmm_based_predictor.py | py | 12,161 | python | en | code | 0 | github-code | 36 |
26540686747 | from random import randint
import xlrd
from datetime import datetime
import matplotlib.pyplot as plt
# this should be done with a database, so I should not put too much effort into making this program easy to use
PATH = "/home/yannick/git-repos/MyPython/math programs/investi.xls" # .xls only
DATA_RANGE = (15, 559) # tuple of (start_num, end_num) !starting from 0, subtract 1 from line number
COLUMNS = {"year": "B", "id": "C", "A_or_R": "E", "m_score": "F", "e_score": "G"} # dict with "data name": "column charachter (A-ZZ)"
MODULO_NUMBER = 10
# For blank values, I just set a score of 0
def make_data_list(sheet, data_range, columns):
data_list = []
for i_line in range(data_range[0], data_range[1]):
line = []
for key in columns.keys():
line.append(sheet.cell_value(rowx=i_line, colx=columns[key]))
data_list.append(line)
return data_list
def column_to_number(col): # char of colum name, only defined from A to ZZ
col = col.upper() # make all chars uppercase
if len(col) == 1:
return ord(col) - 65 # ord returns the ascii value which has an offset of 65
elif len(col) == 2:
value = (26 * (ord(col[0])-64)) + (ord(col[1])-65) # first char is like offset of 26 columns
# A means zero as the second char, but as the first char it stands for + 1*26, so we need to subtract only 64
return value
return -1 # if column name is too long, return -1
def convert_columns(columns):
# convert column letters to numbers
for key in columns.keys():
columns[key] = column_to_number(columns[key])
return columns
def replace_something(list, replace, replaceWith): # two dimensional list, returns modified list number of items replaced
counter = 0
to_be_replaced = []
for i1 in range(len(list)):
for i2 in range(len(list[i1])):
if list[i1][i2] is replace:
to_be_replaced.append([i1, i2])
counter += 1
for bad in to_be_replaced:
list[bad[0]][bad[1]] = replaceWith
return list
def make_columns_for_list(columns):
i = 0
list_columns = {}
for key in columns.keys():
list_columns[key] = i
i += 1
return list_columns
def get_average_scores(list, english): # returns a tuple of (A scores, R scores)
sum_of_english_A = 0
num_of_english_A = 0
sum_of_english_R = 0
num_of_english_R = 0
if english:
key = "e_score"
else:
key = "m_score"
for student in list:
if student[columns_for_list["A_or_R"]] == "A":
sum_of_english_A += student[columns_for_list[key]]
num_of_english_A += 1
elif student[columns_for_list["A_or_R"]] == "R":
sum_of_english_R += student[columns_for_list[key]] # english is the 5th column as defined above
num_of_english_R += 1
else:
print("wrong a or r")
if num_of_english_R == 0 or num_of_english_A == 0:
return None
return (sum_of_english_A/num_of_english_A, sum_of_english_R/num_of_english_R)
def get_error(real_value, value):
return ((value-real_value)/real_value)*100
print(randint(0,6))
start_time = datetime.now()
book = xlrd.open_workbook(PATH)
sh = book.sheet_by_index(0)
columns = convert_columns(COLUMNS)
columns_for_list = make_columns_for_list(columns)
list_of_students = make_data_list(sh, DATA_RANGE, columns)
list_of_students = replace_something(list_of_students, '_', 0) # what should a bad entry be replaced with?? or just discard the entire data point?
new_list = []
for i in range(len(list_of_students)):
if i % MODULO_NUMBER == 0:
new_list.append(list_of_students[i])
average_real_scores = get_average_scores(list_of_students, True)
average_sample_scores = get_average_scores(new_list, True)
print("This took a time of {} ".format(datetime.now()-start_time))
print(len(list_of_students))
print(len(new_list))
average_sample_scores = get_average_scores(new_list, True)
average_sample_scores_M = get_average_scores(new_list, False)
average_real_scores = get_average_scores(list_of_students, True)
error_A = get_error(average_real_scores[0], average_sample_scores[0])
error_R = get_error(average_real_scores[1], average_sample_scores[1])
print("The average sample English scores are {:.4} for A-students and {:.4} for R-students".format(average_sample_scores[0], average_sample_scores[1]))
print("The average sample Math scores are {:.4} for A-students and {:.4} for R-students".format(average_sample_scores_M[0], average_sample_scores_M[1]))
print("The sampled and real value differ by {:.2} percent for A-students and {:.2} percent for R-students".format(error_A, error_R))
| su595/MyPython | math programs/statistics.py | statistics.py | py | 4,798 | python | en | code | 2 | github-code | 36 |
9924072616 | import os
import re
import sys
from lib.instruction import Instruction, AInstruction, CInstruction, LInstruction
from typing import Generator, Tuple
class Parser:
"""
Parse the Xxx.asm into stream of instructions.
- read source file
- understand the format of input file
- break each into different components
- C: dest, comp, jump
- A: value
"""
def __init__(self, path: str):
self._path = path
def _get_clean_line(self, line):
return self.strip_comments(line.strip())
def _parse_c_instruction(self, line):
if line.count(";") > 1:
raise ValueError('line format error, should not have more than one ";" ')
if line.count("=") > 1:
raise ValueError('line format error, should not have more than one "="')
if "=" in line and ";" in line:
# D=M; JMP
dest, tail = line.split("=")
comp, jump = tail.split(";")
return CInstruction(dest=dest.strip(), comp=comp.strip(), jump=jump.strip())
elif ";" in line:
# M; JMP
comp, jump = line.split(";")
return CInstruction(dest=None, comp=comp.strip(), jump=jump.strip())
elif "=" in line:
# M=D
dest, comp = line.split("=")
return CInstruction(dest=dest.strip(), comp=comp.strip(), jump=None)
else:
# D
return CInstruction(dest=None, comp=line.strip(), jump=None)
raise ValueError("line format invalid: ", line)
def _parse(self, line) -> Instruction:
if line.startswith("("):
inst = LInstruction(name=line[1:-1].strip())
elif line.startswith("@"):
inst = AInstruction(value=line[1:])
else:
inst = self._parse_c_instruction(line)
return inst
def get_instruction(self) -> Generator[Tuple[int, Instruction], None, None]:
with open(self._path, "r") as f:
for line in f:
line = self._get_clean_line(line)
if not line:
continue
instruction = self._parse(line)
yield instruction
def strip_comments(self, text):
return re.sub(
"//.*?$|/\*.*?\*/|'(?:\\.|[^\\'])*'|\"(?:\\.|[^\\\"])*\"",
"",
text,
flags=re.S,
).strip()
| mtx2d/nand2tetris | projects/06/src/lib/parser.py | parser.py | py | 2,398 | python | en | code | 0 | github-code | 36 |
26767660941 | import prodigy
from prodigy.components.loaders import JSONL
from prodigy.components.db import connect
import random
from datetime import datetime
with open('prodigy_recipe/template.js', 'r') as template:
javascript_template = template.read()
@prodigy.recipe("classify-trees")
def classify_trees(dataset, source):
def get_stream():
stream = prodigy.get_stream(source)
for image in stream:
yield image
stream = list(get_stream())
return {
"dataset": dataset,
"stream": stream,
"view_id": "image_manual",
"exclude": [],
"config": {
"javascript": javascript_template,
"force_stream_order": True,
"feed_overlap": True,
"batch_size": "5",
"answer_batch_size": "1",
"label": "Tree",
"labels": ["Tree"],
"image_manual_modes": ["rect"],
"image_manual_show_labels": False,
}
}
| aloui-mathias/campagne_prodigy | prodigy/prodigy_recipe/classify_tree_patches.py | classify_tree_patches.py | py | 977 | python | en | code | 0 | github-code | 36 |
21131295808 | """Nautobot Golden Config plugin application level metrics ."""
from django.conf import settings
from django.db.models import Count, F, Q
from nautobot.dcim.models import Device
from prometheus_client.core import GaugeMetricFamily
from nautobot_golden_config.models import ComplianceFeature, ComplianceRule, ConfigCompliance, GoldenConfig
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG.get("nautobot_golden_config", {})
def metric_gc_functions():
"""Calculate the successful vs the failed GC jobs for backups, intended & compliance.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
backup_gauges = GaugeMetricFamily("nautobot_gc_backup_total", "Nautobot Golden Config Backups", labels=["status"])
successful_backups = GoldenConfig.objects.filter(backup_last_attempt_date=F("backup_last_success_date")).count()
attempted_backups = GoldenConfig.objects.filter(backup_last_attempt_date__isnull=False).count()
backup_gauges.add_metric(labels=["success"], value=successful_backups)
backup_gauges.add_metric(labels=["failure"], value=attempted_backups - successful_backups)
yield backup_gauges
intended_gauges = GaugeMetricFamily(
"nautobot_gc_intended_total", "Nautobot Golden Config Intended", labels=["status"]
)
successful_intended = GoldenConfig.objects.filter(
intended_last_attempt_date=F("intended_last_success_date")
).count()
attempted_intended = GoldenConfig.objects.filter(intended_last_attempt_date__isnull=False).count()
intended_gauges.add_metric(labels=["success"], value=successful_intended)
intended_gauges.add_metric(labels=["failure"], value=attempted_intended - successful_intended)
yield intended_gauges
compliance_gauges = GaugeMetricFamily(
"nautobot_gc_compliance_total", "Nautobot Golden Config Compliance", labels=["status"]
)
successful_compliance = GoldenConfig.objects.filter(
compliance_last_attempt_date=F("compliance_last_success_date")
).count()
attempted_compliance = GoldenConfig.objects.filter(compliance_last_attempt_date__isnull=False).count()
compliance_gauges.add_metric(labels=["success"], value=successful_compliance)
compliance_gauges.add_metric(labels=["failure"], value=attempted_compliance - successful_compliance)
yield compliance_gauges
def metric_devices_per_feature():
"""Calculate number of devices configured for GC Compliance feature.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
features = ComplianceFeature.objects.all()
devices_gauge = GaugeMetricFamily(
"nautobot_gc_devices_per_feature", "Nautobot Golden Config Devices per feature", labels=["device"]
)
for feature in features:
rules_per_feature = ComplianceRule.objects.filter(feature=feature)
if rules_per_feature:
devices_gauge.add_metric(
labels=[feature.name], value=Device.objects.filter(platform=rules_per_feature.first().platform).count()
)
else:
devices_gauge.add_metric(labels=[feature.name], value=0)
yield devices_gauge
def metric_compliance_devices():
"""Calculate Compliant & Non-Compliant total number of devices per feature.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
compliance_gauge = GaugeMetricFamily(
"nautobot_gc_compliant_devices_by_feature_total",
"Nautobot Golden Config Compliance",
labels=["feature", "compliant"],
)
queryset = ConfigCompliance.objects.values("rule__feature__slug").annotate(
compliant=Count("rule__feature__slug", filter=Q(compliance=True)),
non_compliant=Count("rule__feature__slug", filter=~Q(compliance=True)),
)
counters = {item["rule__feature__slug"]: {"compliant": 0, "non_compliant": 0} for item in queryset}
for feature in queryset:
counters[feature["rule__feature__slug"]]["compliant"] += feature["compliant"]
counters[feature["rule__feature__slug"]]["non_compliant"] += feature["non_compliant"]
for feature, counter_value in counters.items():
compliance_gauge.add_metric(labels=[feature, "true"], value=counter_value["compliant"])
compliance_gauge.add_metric(labels=[feature, "false"], value=counter_value["non_compliant"])
yield compliance_gauge
metrics = [metric_gc_functions, metric_devices_per_feature, metric_compliance_devices]
| nautobot/nautobot-plugin-golden-config | nautobot_golden_config/metrics.py | metrics.py | py | 4,420 | python | en | code | 91 | github-code | 36 |
25606581626 | # Exercício Python 058: Melhore o jogo do DESAFIO 028 onde o computador vai "pensar" em um número entre 0 e 10. Só que
# agora o jogador vai tentar adivinhar até acertar, mostrando no final quantos palpites foram necessários para vencer
from random import randint
from time import sleep
bot = randint(0, 10)
tentativas = 1
pensa = 'PENSANDO...'
print('O computador esta pensando em um numero entre 0 e 10')
for x in pensa:
print(x, end='')
sleep(0.3)
palpite = int(input('\nQual seu palpite?\n>'))
while palpite != bot:
if palpite > bot:
palpite = int(input('Menos... tente novamente\n>'))
else:
palpite = int(input('Mais... tente novamente\n>'))
tentativas += 1
print('Voce precisou de {} tentativas pra acertar. PARABENS!!!'.format(tentativas))
| MarcosSx/CursoGuanabara | Exercicios/mundo2/aula014_estruturaDeRepericaoWhile/ex058-JogoDaAdvinhacaoV2.py | ex058-JogoDaAdvinhacaoV2.py | py | 790 | python | pt | code | 0 | github-code | 36 |
38164787541 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foi_requests', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='foirequest',
name='title',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='foirequest',
name='foi_text',
field=models.TextField(verbose_name='Your FOI Request'),
),
]
| foilaundering/foilaundering | foilaundering/apps/foi_requests/migrations/0002_auto_20151122_1253.py | 0002_auto_20151122_1253.py | py | 587 | python | en | code | 0 | github-code | 36 |
11458787486 | import time
from decimal import getcontext,Decimal
#计时开始时间点
start=time.time()
#精度设置(此处设置为总位数,为使精确到小数点后100位应设为101
getcontext().prec=101
#用于arctan计算的精确值
par1=Decimal(1)/Decimal(5)
par2=Decimal(1)/Decimal(239)
#得到arctan每一项与求和每一项的函数
def arctanSer(num,index):
if index%2==0: #正负判断
flag=-1
else:
flag=1
return flag*num**(2*index-1)/(2*index-1)
def getBase(index):
global par1,par2
base=4*arctanSer(par1,index)-arctanSer(par2,index)
return base
#计算
index=1
numSum=Decimal(0)
base=getBase(index)
while abs(base)>10**(-100): #确保后续项对求和的影响小于小数点后100位
print(base)
numSum+=base
index+=1
base=getBase(index)
pi=4*numSum
print(pi)
#计时结束时间点
end=time.time()
print("Total time cost is",end-start)
| A-LOST-WAPITI/Computational_Physics | HW_2/Problem_2.py | Problem_2.py | py | 909 | python | zh | code | 0 | github-code | 36 |
22283584367 | #!/Users/tnt/Documents/虚拟环境/Py4E/bin/python3
# -*- encoding: utf-8 -*-
# Time : 2021/07/25 22:41:43
# Theme : 寻找数组中第二个最大元素
def find_second_maximum_1(lst):
first_max = float('-inf')
second_max = float('-inf')
# find first max
for item in lst:
if item > first_max:
first_max = item
# find max relative to first max
for item in lst:
if item != first_max and item > second_max:
second_max = item
return second_max
def find_second_maximum(lst):
"""initialize two variables max_no and secondmax to -inf. We then traverse the list, and if the current element in the list is greater than the maximum value, then set secondmax to max_no and max_no to the current element. If the current element is greater than the second maximum number and not equal to maximum number, then update secondmax to store the value of the current variable. Finally, return the value stored in secondmax."""
if (len(lst) < 2):
return
# initialize the two to infinity
max_no = second_max_no = float('-inf')
for i in range(len(lst)):
# update the max_no if max_no value found
if (lst[i] > max_no):
second_max_no = max_no
max_no = lst[i]
# check if it is the second_max_no and not equal to max_no
elif (lst[i] > second_max_no and lst[i] != max_no):
second_max_no = lst[i]
if (second_max_no == float('-inf')):
return
else:
return second_max_no
print(find_second_maximum([9, 2, 3, 6]))
print(find_second_maximum([9, 2, 3, 6]))
| Createitv/BeatyPython | 05-PythonAlgorithm/BasicDataStructure/array/second_largest_num.py | second_largest_num.py | py | 1,616 | python | en | code | 1 | github-code | 36 |
74998248105 | from __future__ import print_function
import numpy as np
import cv2
import subprocess
import itertools
from multiprocessing import Pool
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
f = subprocess.check_output(["ls"]).split()
files = []
#make list of files that contain ellipse data
for i in f:
if "ellipseList.txt" in i:
files.append(i)
print(files)
class Image:
def __init__(self, filename, window_size):
self.im = cv2.imread(filename,0)
#self.im = cv2.resize(self.im,(0,0),fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
self.mask = []
self.mask_small = []
self.windows = []
self.windows_small = []
self.scores = []
self.scores_small = []
self.cx = []
self.cy = []
self.decimation_factor = []
self.imno = 0
#self.slide = [-6,-4,-2,0,2,4,6]
self.slide = [-3,-2,-1,0,1,2,3]
self.window_size = window_size
def ellipse(self, ellipse_info):
ellipse_info = ellipse_info.split(" ")
axes = [float(ellipse_info[0]),float(ellipse_info[1])]
decim_fac = int(max(max(axes[0]*2/self.window_size,axes[1]*2/self.window_size),1))
self.decimation_factor.append(decim_fac)
#print "best decimation is %.2f and %.2f"%(axes[0]*2/32,axes[1]*2/32)
theta = float(ellipse_info[2])
self.cx.append(float(ellipse_info[3]))
self.cy.append(float(ellipse_info[4]))
#print "diameter is %0.2f"%(2*max(axes[0],axes[1]))
y,x = np.ogrid[0:self.im.shape[0],0:self.im.shape[1]]
mask = np.power(((x-self.cx[-1])*np.cos(theta) + (y-self.cy[-1])*np.sin(theta))/axes[0],2) + np.power(((x-self.cx[-1])*np.sin(theta) - (y-self.cy[-1])*np.cos(theta))/axes[1],2) <= 1
self.mask.append(mask)
#self.mask.append(mask[::2,::2])
#self.cx[-1] /= 2
#self.cy[-1] /= 2
def ellipse_decim(self, ellipse_info):
ellipse_info = ellipse_info.split(" ")
axes = [float(ellipse_info[0])/2,float(ellipse_info[1])/2]
print("best decimation is %.2f and %.2f"%(axes[0]*2/32,axes[1]*2/32))
theta = float(ellipse_info[2])
self.cx.append(float(ellipse_info[3])/2)
self.cy.append(float(ellipse_info[4])/2)
#print "diameter is %0.2f"%(2*max(axes[0],axes[1]))
y,x = np.ogrid[0:self.im.shape[0],0:self.im.shape[1]]
mask = np.power(((x-self.cx[-1])*np.cos(theta) + (y-self.cy[-1])*np.sin(theta))/axes[0],2) + np.power(((x-self.cx[-1])*np.sin(theta) - (y-self.cy[-1])*np.cos(theta))/axes[1],2) <= 1
self.mask.append(mask)
def get_score(self,mask,cx,cy,x,i,ellipse_size):
s = self.window_size/2
flag = False
flag = flag or cy+x[0]-s < 0
flag = flag or cx+x[0]-s < 0
flag = flag or cy+x[1]+s+1 > mask.shape[0]
flag = flag or cx+x[1]+s+1 > mask.shape[1]
if flag == True:
return -1.
#intersect = np.sum(self.mask[i][cy+x[0]-16:cy+x[0]+17,cx+x[1]-16:cx+x[1]+17]).astype(float)
#union = ellipse_size - intersect + (32*32)
intersect = np.sum(mask[cy+x[0]-s:cy+x[0]+s+1,cx+x[1]-s:cx+x[1]+s+1]).astype(float)
union = ellipse_size - intersect + (4*s*s)
self.imno += 1
#CHOOSE THE SCORE YOU WANT
return np.float32(intersect/union)
#return intersect/ellipse_size
def get_random_window(self,image,mask,center):
s = self.window_size/2
rand_mask = mask[center[0]-s:center[0]+s+1,center[1]-s:center[1]+s+1]
if rand_mask.size < (self.window_size**2) or np.sum(rand_mask) > 5:
return None
return image[center[0]-s:center[0]+s+1,center[1]-s:center[1]+s+1].astype(np.float32)
def get_windows(self):
s = self.window_size/2
self.image_slides = []
self.score_slides = []
for i in xrange(len(self.mask)):
image = cv2.resize(self.im,(0,0),fx=1./self.decimation_factor[i],fy=1./self.decimation_factor[i],interpolation=cv2.INTER_AREA)
mask = cv2.resize(self.mask[i].astype(np.uint8),(0,0),fx=1./self.decimation_factor[i],fy=1./self.decimation_factor[i],interpolation=cv2.INTER_AREA).astype(bool)
mask_size = np.sum(mask)
cx = int(round(self.cx[i]/self.decimation_factor[i]))
cy = int(round(self.cy[i]/self.decimation_factor[i]))
self.score_slides.append(map(lambda x: self.get_score(mask,cx,cy,x,i,mask_size), itertools.product(self.slide,self.slide)))
self.image_slides.append(map(lambda x: image[cy+x[0]-s:cy+x[0]+s+1,cx+x[1]-s:cx+x[1]+s+1].astype(np.float32), itertools.product(self.slide,self.slide)))
#generate random images
self.random_slides = []
self.random_scores = []
mask = np.zeros(self.im.shape)
for i in xrange(len(self.mask)):
mask = np.maximum(mask, self.mask[i].astype(int))
mask = mask.astype(bool)
rand = np.random.rand(self.imno,2)
rand[:,0] *= self.im.shape[0]
rand[:,1] *= self.im.shape[1]
rand = rand.astype(int)
iterate = 0
goal = 2*self.imno
while(self.imno < goal):
try:
randy = rand[iterate,0]
randx = rand[iterate,1]
except IndexError:
rand = np.random.rand(self.imno,2)
rand[:,0] *= self.im.shape[0]
rand[:,1] *= self.im.shape[1]
rand = rand.astype(int)
iterate=0
continue
try:
small = mask[randy-s:randy+s+1,randx-s:randx+s+1]
#print "shape is %d %d"%(small.shape[0],small.shape[1])
#print "val is %d"%np.sum(small)
except IndexError:
iterate+=1
continue
iterate+=1
if small.size - (self.window_size**2) < 10:
continue
elif np.sum(small) > 10:
continue
self.random_slides.append(self.im[randy-s:randy+s+1,randx-s:randx+s+1].astype(np.float32))
self.random_scores.append(np.float32(0))
self.imno += 1
#print "Adding random image"
#print "%d left to go"%(goal-self.imno)
def get_data(self):
flatten = lambda l: [item for sublist in l for item in sublist]
return flatten(self.image_slides)+self.random_slides, flatten(self.score_slides)+self.random_scores
def info(filename):
with open(filename,"r") as f:
slides = []
scores = []
while(True):
try:
imgpath = f.readline().split("\n")[0]+".jpg"
if imgpath == ".jpg":
return np.array(slides), np.array(scores)
#print imgpath
e = Image(imgpath,32)
numfaces = f.readline().strip()
#print numfaces
print(numfaces)
for i in xrange(int(numfaces)):
ellipse_info = f.readline().split("\n")[0]
#print ellipse_info
e.ellipse(ellipse_info)
#plt.imshow(e.im,cmap="gray",alpha=0.5)
#plt.imshow(e.ellipse(ellipse_info),alpha=0.1,cmap="gray")
#plt.show()
e.get_windows()
ims, im_scores = e.get_data()
for i in xrange(len(ims)):
slides.append(ims[i])
scores.append(im_scores[i])
#print
#e.get_windows()
except ValueError as a:
#pass
# print e
return
#return
#info(files[0])
#exit()
pool = Pool(4)
a = np.array(pool.map(info,files[:2]))
images = np.concatenate(a[:,0]).tolist()
scores = np.concatenate(a[:,1]).tolist()
i=0
while(True):
if i==len(images):
break
elif images[i].shape != (33,33):
del images[i]
del scores[i]
else:
i+=1
images = np.array(images)
scores = np.array(scores)
# images_flat = []
# scores_flat = []
# for i in xrange(len(images)):
# assert len(images[i]) == len(scores[i])
# for j in xrange(len(images[i])):
# print type(scores[i][j])
# images_flat.append(images[i][j])
# scores_flat.append(scores[i][j])
# images = np.array(images_flat)
# scores = np.array(scores_flat)
images = images[np.where(scores >= 0)]
scores = scores[np.where(scores >= 0)]
#scores_second = np.add(-1,scores)
#scores = np.concatenate((scores[:,np.newaxis],scores_second[:,np.newaxis]),axis=1)
#data = np.stack((images,scores[:,np.newaxis]),axis=1)
#np.random.shuffle(data)
#print(data.shape)
# plt.hist(scores,bins=50)
# plt.show()
# rand_range = (np.random.rand(10)*1000).astype(int)
# for i in xrange(10):
# print images[rand_range[i]].shape
# plt.imshow(images[rand_range[i]],cmap="gray",interpolation="nearest")
# print scores[rand_range[i]]
# plt.show()
print(scores.shape)
print(np.amin(scores))
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 33, 33),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=1,
nonlinearity=lasagne.nonlinearities.sigmoid)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(data,model='cnn', num_epochs=500):
# Load the dataset
print("Loading data...")
X = data[0].reshape(-1, 1, 33, 33)
X /= np.float32(255)
Y = np.round_(data[1]).astype(np.float32)
#X = X.astype(np.float32)
#Y = Y.astype(np.float32)
# X_train = X[0:300000]
# y_train = Y[0:300000]
# X_val = X[-20000:]
# y_val = Y[-20000:]
# X_test = X[300000:400000]
# y_test = Y[300000:400000]
X_train = X[0:50000]
y_train = Y[0:50000]
X_val = X[-4000:]
y_val = Y[-4000:]
X_test = X[50000:80000]
y_test = Y[50000:80000]
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.fvector('targets')
# Create neural network model (depending on first command line parameter)
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.binary_hinge_loss(prediction, target_var, log_odds=False)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.binary_hinge_loss(test_prediction,
target_var, log_odds=False)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(test_prediction, target_var),
dtype=theano.config.floatX)
#test_acc = T.mean(lasagne.objectives.binary_hinge_loss(prediction, target_var, log_odds=False),
# dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
main([images,scores])
| arvigj/cv_hw3 | new_eval.py | new_eval.py | py | 15,057 | python | en | code | 0 | github-code | 36 |
71257318823 | import math
from typing import List
import numpy as np
import torch
import torch.jit as jit
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from language_models.language_base_model import LanguageBaselightning
class RNNCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super(RNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
# Initialize the weights with random numbers.
self.weight_ih = Parameter(torch.randn(hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(hidden_size, hidden_size))
self.bias_ih = Parameter(torch.randn(hidden_size)) # input to hidden
self.bias_hh = Parameter(torch.randn(hidden_size)) # hidden to hidden
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
@jit.script_method
def forward(self, input: Tensor, state: Tensor):
# input is the input at the current timestep
# state is the hidden state from the previous timestep
hx = state
hidden = (
torch.mm(input, self.weight_ih.t())
+ self.bias_ih
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh
)
hy = torch.tanh(hidden)
return hy
class RNNLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(RNNLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input: Tensor, state: Tensor):
inputs = input.unbind(1)
outputs = torch.jit.annotate(List[Tensor], [])
for i in range(len(inputs)):
state = self.cell(inputs[i], state)
outputs += [state]
return torch.stack(outputs, 1), state
class JitRNN_language_model(LanguageBaselightning):
def __init__(
self,
vocab_size: int,
embedding_size: int,
hidden_size: int,
padding_idx: int,
learning_rate: int = 0.001,
):
super(JitRNN_language_model, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.padding_idx = torch.tensor(padding_idx).to(self.device)
self.padding_idx = torch.tensor(padding_idx)
self.learning_rate = learning_rate
self.embedding = nn.Embedding(
vocab_size, embedding_size, padding_idx=self.padding_idx
)
self.dense = nn.Linear(hidden_size, embedding_size)
self.rnn = RNNLayer(RNNCell, embedding_size, hidden_size)
self.output_layer = nn.Linear(embedding_size, vocab_size)
self.hidden = None
# tie the weights of the output embeddings with the input embeddings
self.output_layer.weight = self.embedding.weight
self.loss_func = nn.CrossEntropyLoss()
def forward(self, x, seq_length):
batch_size, seq_length = x.size()
# get embedding encoder
x = self.embedding(x)
# get output of rnn
self.hidden = torch.zeros(batch_size, self.hidden_size).type_as(x)
output, self.hidden = self.rnn(x, self.hidden)
out = self.dense(output)
out = self.output_layer(out)
return out.view(
batch_size, seq_length, self.vocab_size
) # Dimensions -> Batch x Sequence x Vocab
def reset_intermediate_vars(self):
self.hidden = None
def detach_intermediate_vars(self):
self.hidden = self.hidden.detach()
# class RNN(nn.Module):
# # you can also accept arguments in your model constructor
# # we don't use the output in this implemention
# def __init__(
# self,
# embed_size,
# hidden_size,
# ):
# super(RNN, self).__init__()
# self.hidden_size = hidden_size
# # input_size = embed_size + hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # self.i2h = nn.Linear(input_size, hidden_size)
# self.Wih = nn.Linear(embed_size, hidden_size)
# self.Whh = nn.Linear(hidden_size, hidden_size)
# # self.h2o = nn.Linear(input_size, output_size)
# def forward(self, data, last_hidden):
# wi = self.Wih(data)
# wh = self.Whh(last_hidden)
# hidden = torch.relu(wi + wh)
# # output = self.h2o(input)
# return hidden
# def initHidden(self, batch_size):
# # return torch.zeros(batch_size,self.hidden_size).to(self.device)
# return nn.init.kaiming_uniform_(torch.empty(batch_size, self.hidden_size)).to(
# self.device
# )
# class RNN_language_model(nn.Module):
# def __init__(
# self,
# vocab_size: int,
# embed_size: int,
# hidden_size: int,
# padding_idx: int,
# ):
# super(RNN_language_model, self).__init__()
# self.vocab_size = vocab_size
# self.hidden_size = hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.padding_idx = torch.tensor(padding_idx).to(self.device)
# self.embedding = nn.Embedding(
# vocab_size, embed_size, padding_idx=self.padding_idx
# )
# self.dense = nn.Linear(hidden_size, embed_size)
# # note that output_size = vocab_size
# self.rnn_cell = RNN(
# embed_size,
# hidden_size,
# )
# self.output_layer = nn.Linear(embed_size, vocab_size)
# # tie the weights of the output embeddings with the input embeddings
# # self.output_layer.weight = self.embedding.weight
# self.loss_func = nn.CrossEntropyLoss()
# def forward(self, x, seq_length):
# batch_size, seq_length = x.size()
# # get embedding encoder
# x = self.embedding(x)
# # get output of rnn
# self.hidden = self.rnn_cell.initHidden(batch_size)
# hiddens = []
# # recurrent rnn
# for i in range(seq_length):
# hidden_next = self.rnn_cell(x[:, i, :], self.hidden)
# hiddens.append(hidden_next.unsqueeze(1))
# self.hidden = hidden_next
# hidden_tensor = torch.cat(hiddens, 1)
# out = hidden_tensor.contiguous().view(-1, self.hidden_size)
# out = self.dense(out)
# out = self.output_layer(out)
# return (
# out.view(batch_size, seq_length, self.vocab_size),
# self.hidden,
# ) # Dimensions -> Batch x Sequence x Vocab
# def loss(self, predictions, y, mask):
# predictions = predictions.view(-1, predictions.size(2))
# predictions *= torch.stack([mask] * predictions.size(1)).transpose(0, 1).float()
# return self.loss_func(predictions, y)
| shuishen112/TensorLanguageModel | language_models/lightRNN.py | lightRNN.py | py | 7,044 | python | en | code | 0 | github-code | 36 |
20407317439 | class Graph:
def __init__(self,Vertices):
self.V = Vertices
self.graph = []
def addEdge(self,u,v,w):
self.graph.append([u,v,w])
def find(self,parent,i):
if parent[i] == i:
return i
return self.find(parent,parent[i])
def print_g(self):
print(self.graph)
def union(self,parent,rank,x,y):
xroot = self.find(parent,x) # locating the root node
yroot = self.find(parent,y)
if rank[xroot] < rank[yroot]: # assign the root which has maximum height
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else: # if height are same then assign either and increase the height by one
parent[yroot] = xroot
rank[xroot] += 1
def print_kruskal(self,result):
cost = 0
print("Edges in MST:")
for u,v,w in result:
cost += w
print(f"{u}--{v}--->{w}")
print(f"Cost-->{cost}")
def kruskal(self):
result = []
i,e = 0,0 # i -> index in the graph e->index of result arr
# sort the array acc to weights
self.graph = sorted(self.graph, key = lambda x:x[2])
parent = []
rank = []
for node in range(self.V):
parent.append(node) # every node points to itself
rank.append(0) # level is 0
while e<self.V-1:# picking the smallest weighted edge
u,v,w = self.graph[i]
i += 1
x = self.find(parent,u)
y = self.find(parent,v)
# if parent of both are diff then no cycle, add in the result
if x!=y:
e += 1
result.append([u,v,w])
self.union(parent,rank,x,y) # assign the parents and rank.
self.print_kruskal(result)
g = Graph(6)
g.addEdge(0,1,12)
g.addEdge(1, 2,15)
g.addEdge(1, 3,12)
g.addEdge(2, 4,13)
g.addEdge(2, 5,5)
g.addEdge(3, 2,6)
g.addEdge(3,4,6)
g.kruskal()
| Bishtman12/DSA---Python | Graph/Minimum Spanning Tree(kRUSKALS).py | Minimum Spanning Tree(kRUSKALS).py | py | 2,011 | python | en | code | 0 | github-code | 36 |
40264668629 | volume = int(input())
pipe1 = int(input())
pipe2 = int(input())
hours = float(input())
total_volume = (pipe1 + pipe2) * hours
if total_volume <= volume:
pool_percent = (total_volume / volume) * 100
pipe1_percent = ((pipe1 * hours) / total_volume) * 100
pipe2_percent = ((pipe2 * hours) / total_volume) * 100
print(f"The pool is {pool_percent:.2f}% full. Pipe 1: {pipe1_percent:.2f}%. Pipe 2: {pipe2_percent:.2f}%")
else:
over_volume = total_volume - volume
print(f"For {hours} hours the pool overflow with {over_volume:.2f} liters")
| ivoivanov0830006/1.1.Python_BASIC | 2.Conditional_statements/**01.Pool_pipes.py | **01.Pool_pipes.py | py | 557 | python | en | code | 1 | github-code | 36 |
32882450678 | #!/usr/bin/python3
import os, os.path
import json
import subprocess
from flask import Flask, request, redirect, abort
from time import sleep
app = Flask(__name__)
GITROOT = '/home/ubuntu/service/'
@app.route('/')
def index():
return redirect('https://github.com/TauWu/spider_monitor_api')
@app.route('/', methods=['POST'])
def commit():
payload = {"repository":{"name":"spider_monitor_api"}}
reponame = payload['repository']['name']
reponame = "%s/bash"%reponame
repodir = os.path.join(GITROOT, reponame)
os.chdir(repodir)
os.system("./start_service.sh")
sleep(10)
return 'success.'
application = app # For WSGI
if __name__ == '__main__':
app.run('0.0.0.0',port=9000,debug=True)
| TauWu/spider_monitor_api | extra/hook.py | hook.py | py | 732 | python | en | code | 0 | github-code | 36 |
34609621508 | """
650. 2 Keys Keyboard
There is only one character 'A' on the screen of a notepad. You can perform one
of two operations on this notepad for each step:
1. Copy All: You can copy all the characters present on the screen.
2. Paste: You can paste the characters which are copied last time.
Given an integer n, return the minimum number of operations to get the
character 'A' exactly n times on the screen.
"""
def min_steps_naive(n):
"""Naive recursive solution."""
# Base case: you start off with 1 'A' already on the screen, therefore, the
# number of operations needed to get to 1 'A' is 0.
if n == 1:
return 0
min_ops = float('inf')
for i in range(1, n):
if n % i == 0:
ops = min_steps_naive(i) + n // i
min_ops = min(min_ops, ops)
return min_ops
def min_steps_rec(n, dp):
"""Memoized dynamic programming solution."""
if n == 1:
return 0
# Check if solution was already computed in dp table.
if dp[n] != float('inf'):
return dp[n]
for i in range(1, n):
if n % i == 0:
ops = min_steps_rec(i, dp) + n // i
dp[n] = min(dp[n], ops)
return dp[n]
def min_steps(n):
"""Bottom-up dynamic programming solution."""
# dp[i] represents the min. no. of operations needed to get to i "A"s.
dp = [float('inf') for _ in range(n + 1)]
# Base case
dp[1] = 0
# Calculate dp[i] from 2 <= i <= n
for i in range(2, n + 1):
# If n % i != 0, there is no point in calculating dp[i].
if n % i == 0:
# Then try every possibility from 1 to i
for j in range(1, i):
if i % j == 0:
ops = dp[j] + i // j
dp[i] = min(dp[i], ops)
return dp[n]
| wuihee/data-structures-and-algorithms | programming-paradigm/dynamic_programming/min_max_path/keyboard.py | keyboard.py | py | 1,811 | python | en | code | 0 | github-code | 36 |
7044056393 | # !/usr/bin/env python
import rospy
import websocket
import json
# from msg.ObjectArray import ObjectArray
from detection.msg._ObjectArray import ObjectArray
LABELS = ["human", "unknown", "animals"]
try:
import thread
except ImportError:
import _thread as thread
import time
def on_message(ws, message):
pass
# print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
drone_id = 1
latitude = 55.751574
longtitude = 37.573856
while True:
if on_object_array.last_object_array is None:
continue
object_array = on_object_array.last_object_array
on_object_array.last_object_array = None
cnt = {l: 0 for l in LABELS}
for object in object_array.objects:
cnt[LABELS[object.label]] += 1
data = {
'drone_id': drone_id,
'latitude': latitude,
'longtitude': longtitude,
'timestamp': time.time(),
'humans': cnt['human'],
'animals': cnt["animals"],
'unknown': cnt["unknown"]
}
time.sleep(1)
latitude += 0.001
ws.send(json.dumps(data))
ws.close()
thread.start_new_thread(run, ())
def on_object_array(msg):
on_object_array.last_object_array = msg
on_object_array.last_object_array = None
if __name__ == "__main__":
rospy.init_node("sender")
sub_objects = rospy.Subscriber("/detection/yolo/objects", ObjectArray, on_object_array)
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://192.168.86.248:8080/drone/", on_message=on_message, on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
| cds-mipt/animal_ir_detection | sender/scripts/sender.py | sender.py | py | 1,890 | python | en | code | 0 | github-code | 36 |
3837799598 | import numpy as np
from numpy import array, trace, random, linalg
from numpy.linalg import norm
def projectorOnto(vector):
""" Returns a rank-1 projector onto a given vector
"""
return np.tensordot(vector, vector.conj(), 0)
def randomPureState(dim):
""" Generates Haar-random pure state density matrix
dim - dimension of the resulting matrix
"""
psi = random.randn(dim) + 1j*random.randn(dim)
proj = projectorOnto(psi)
proj /= trace(proj)
return proj
def randomMixedState(dim):
""" Generates Hilbert - Schmidt distributed random mixed states
dim - dimension of the resulting matrix
"""
state = random.randn(dim, dim) + 1j*random.randn(dim, dim)
state = state.dot(state.T.conj())
state /= trace(state)
return state
def isMeasurementValid(measurement, eps = 1e-12):
""" Checks whether a given measurement is valid (i.e. a normalized rank-1 projector)
measurement - the measurement to check
eps - internal test error
"""
return ((abs(trace(measurement) - 1.) <= eps) and
(measurement == measurement.T.conj()).all() and
(linalg.matrix_rank(measurement) == 1))
def isDensityMatrix(rho, eps = 1e-12):
""" Checks whether a given matrix rho is a density matrix
(i.e. Hermitian, positive semidefinite with unit trace)
rho - the density matrix to check
eps - internal test error
"""
if ((abs(trace(rho) - 1.) > eps) or
(rho != rho.T.conj()).all()):
return False
return (linalg.eigvalsh(rho) >= -eps).all()
def bornRule(trueState, measurement):
""" Calculates probability according to the Born's rule
trueState - the true state's density matrix
measurement - a normalized rank-1 projector
"""
return trace(trueState.dot(measurement)).real
def measure(trials, trueState, measurement, checkValidity = False):
""" Returns measured counts
trials - the number of repetitions of the measurement
trueState - the true density matrix to measure
measurement - a normalized rank-1 projector
"""
if checkValidity:
if ((not isMeasurementValid(measurement)) or
(not isDensityMatrix(trueState))):
raise ValueError('Invalid true state and/or measurement were given')
p = bornRule(trueState, measurement)
n = random.binomial(trials, np.clip(p, 0, 1))
return array([n, trials - n])
# Sample qubit true states
trueStates = array([
[[1., 0.], [0., 0.]], # H state
[[0.5, 0.5], [0.5, 0.5]], # D state
[[0.5, -0.5j], [0.5j, 0.5]], # R state
[[0.7, 0.], [0., 0.3]], # 30% mixed state
[[0.39759026+0.j, -0.48358514+0.07521739j], [-0.48358514-0.07521739j, 0.60240974+0.j]], # Random pure state #1
[[0.37719362+0.j, 0.18480147+0.44807032j], [0.18480147-0.44807032j, 0.62280638+0.j]], # Random pure state #2
[[0.62829064+0.j, 0.13397942-0.30318748j], [0.13397942+0.30318748j, 0.37170936+0.j]], # Random mixed state #1
[[0.75525869+0.j, 0.27476800-0.2559911j], [0.27476800+0.2559911j, 0.24474131+0.j]] # Random mixed state #2
])
| kanhaiya-gupta/quantum-learning | lib/simulator.py | simulator.py | py | 3,127 | python | en | code | null | github-code | 36 |
16283819127 | from django.conf.urls import url
from .views import(
AddQuestionCreateAPIView,
QuestionListAPIView,
QuestionRUDAPIView,
QuestionImageRUDAPIView,
UserQuestionListAPIView,
TopicCreateAPIView,
TopicRUDAPIView,
SubTopicCreateAPIView,
SubTopicRUDAPIView,
QuestionOptionCreateAPIView,
QuestionOptionRUDAPIView,
QuestionSolutionCreateAPIView,
QuestionSolutionRUDAPIView,
QuestionDiscussionCreateAPIView,
QuestionDiscussionRUDAPIView,
)
urlpatterns = [
url(r'^$' ,QuestionListAPIView.as_view() ,name="questions"),
url(r'user' ,UserQuestionListAPIView.as_view() ,name="user_questions"),
url(r'create' ,AddQuestionCreateAPIView.as_view() ,name="question_create"),
url(r'edit/(?P<pk>\d+)' ,QuestionRUDAPIView.as_view() ,name="question_edit"),
url(r'edit-image/(?P<pk>\d+)',QuestionImageRUDAPIView.as_view() ,name="question_edit_image"),
url(r'option-create' ,QuestionOptionCreateAPIView.as_view() ,name="question_answer_create"),
url(r'option-edit/(?P<pk>\d+)' ,QuestionOptionRUDAPIView.as_view() ,name="question_answer_edit"),
###################
url(r'solution-create' ,QuestionSolutionCreateAPIView.as_view() ,name="question_solution_create"),
url(r'solution-edit/(?P<pk>\d+)' ,QuestionSolutionRUDAPIView.as_view() ,name="question_solution_edit"),
url(r'discussion-create' ,QuestionDiscussionCreateAPIView.as_view(),name="discussion_create"),
url(r'discussion-edit/(?P<pk>\d+)' ,QuestionDiscussionRUDAPIView.as_view() ,name="discussion_edit"),
###################
url(r'topic-create' ,TopicCreateAPIView.as_view() ,name="topic_create"),
url(r'topic-edit/(?P<pk>\d+)' ,TopicRUDAPIView.as_view() ,name="topic_edit"),
url(r'subTopic-create' ,SubTopicCreateAPIView.as_view() ,name="subTopic_create"),
url(r'subTopic-edit/(?P<pk>\d+)' ,SubTopicRUDAPIView.as_view() ,name="subTopic_edit"),
]
| ashukesri/100Percentile | questions/urls.py | urls.py | py | 2,307 | python | en | code | 0 | github-code | 36 |
18915553573 | import pytest
from src.same_tree import Solution
from src.utils.binary_tree import list_to_tree
@pytest.mark.parametrize(
"list_p,list_q,equal",
[
([1, 2, 3], [1, 2, 3], True),
([1, 2], [1, None, 2], False),
([], [], True),
([1, 2, 1], [1, 1, 2], False),
],
)
def test_solution(list_p, list_q, equal):
p = list_to_tree(list_p)
q = list_to_tree(list_q)
assert Solution().isSameTree(p, q) is equal
| lancelote/leetcode | tests/test_same_tree.py | test_same_tree.py | py | 456 | python | en | code | 3 | github-code | 36 |
38830842298 | from rest_framework import status
def jwt_response_payload_handler(token, user=None, request=None):
return {
'code': status.HTTP_200_OK,
'message': '',
'result': {
'token': token,
'user_id': user.id,
'username': user.username
}
}
| helloming86/DjangoJWTDemo | users/utils.py | utils.py | py | 308 | python | en | code | 0 | github-code | 36 |
35555355731 | from datetime import date
from fastapi import APIRouter, Depends, Query
from sqlalchemy.ext.asyncio import AsyncSession
from api.deps import get_db
from crud.analytics import get_analytics_by_range_of_dates, get_analytics_by_student_id
from schemas.analytics import AnalyticsByRangeOfDates
router = APIRouter()
@router.get("/", response_model=list[AnalyticsByRangeOfDates | None])
async def get_analytics_by_dates(
date_start: date = Query(...), date_end: date = Query(...), db: AsyncSession = Depends(get_db)
):
"""Get analytics by all students by range of dates.
Args:
date_start: Range start date.
date_end: Range end date incl.
db: SQLAlchemy local session.
Returns:
List of AnalyticsByRangeOfDates each containing emotion, emotion's count and date.
"""
return await get_analytics_by_range_of_dates(
db=db, date_start=date_start, date_end=date_end
)
@router.get("/{student_track_id}", response_model=list[AnalyticsByRangeOfDates | None])
async def get_analytics_by_student(
student_track_id: int,
date_start: date = Query(...),
date_end: date = Query(...),
db: AsyncSession = Depends(get_db),
):
"""Get analytics by student's track id and range of dates.
Args:
student_track_id: Student's track ID.
date_start: Range date start.
date_end: Range date end.
db: SQLAlchemy local session,
Returns:
List of AnalyticsByRangeOfDates each containing emotion, emotion's count and date.
"""
return await get_analytics_by_student_id(
db=db, student_track_id=student_track_id, start_date=date_start, end_date=date_end
)
| starminalush/mfdp-2023-mvp | backend/api/endpoints/analytics.py | analytics.py | py | 1,680 | python | en | code | 0 | github-code | 36 |
34086502752 | from batch import create_udb
from projectMetrics import projectMetric
from subprocess import call
import git
import sys
import datetime
import os
import shutil
import time
def main():
git_repo = sys.argv[1] # git repo is the relative path from the folder
all_sha1 = []
sha_dtime = []
repo = git.Repo(git_repo)
for commit in repo.iter_commits('master'):
sha = commit.hexsha
get_sha = repo.git.rev_parse(sha)
all_sha1.append(get_sha)
sha_dtime.append(datetime.datetime.fromtimestamp(commit.committed_date))
start_time = time.time()
print(len(all_sha1))
exit()
g = git.Git(git_repo)
for i in range(len(all_sha1)):
sha = all_sha1[i]
d_time = sha_dtime[i]
g.checkout(sha)
db_name = create_udb(git_repo)
projectMetric(db_name,sha,d_time)
call('rm -f ' + db_name)
print("--- %s minutes ---" % round((time.time() - start_time) / 60,5))
if __name__ == '__main__':
main() | akhilsinghal1234/mdd-intern-work | Extraction/main.py | main.py | py | 1,009 | python | en | code | 0 | github-code | 36 |
39808955729 | import tensorflow as tf
def unpool(value, name='unpool'):
"""From: https://github.com/tensorflow/tensorflow/issues/2169
N-dimensional version of the unpooling operation from
https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
:param value: A Tensor of shape [b, d0, d1, ..., dn, ch]
:param name: A string for scope name.
:return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]
"""
with tf.name_scope(name) as scope:
sh = value.get_shape().as_list()
dim = len(sh[1:-1])
out = (tf.reshape(value, [-1] + sh[-dim:]))
for i in range(dim, 0, -1):
out = tf.concat([out, tf.zeros_like(out)], i)
out_size = [-1] + [s*2 for s in sh[1:-1]] + [sh[-1]]
out = tf.reshape(out, out_size, name=scope)
return out
class ConvolutionalAutoencoder(tf.keras.Model):
def __init__(self, num_neuron=256, kernal1=32, kernal2=16, shape=(32, 32, 3)):
assert type(num_neuron) == int
assert type(kernal1) == int
assert type(kernal2) == int
assert len(shape) == 3
super().__init__()
if not tf.executing_eagerly():
raise NotImplementedError('Eager execution is needed but it return as : {}'.format(tf.executing_eagerly()))
else:
init = tf.contrib.layers.xavier_initializer()
pooled_shape = (shape[0]/4, shape[1]/4, shape[2])
self.conv1 = tf.keras.layers.Conv2D(
filters=kernal1,
kernel_size=5,
padding='SAME',
activation=tf.nn.relu,
kernel_initializer=init)
self.max1 = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
strides=(2, 2),
padding='SAME')
self.conv2 = tf.keras.layers.Conv2D(
filters=kernal2,
kernel_size=5,
padding='SAME',
activation=tf.nn.relu,
kernel_initializer=init)
self.max2 = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
strides=(2, 2),
padding='SAME')
self.flatten1 = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
units=num_neuron,
activation=tf.nn.relu,
kernel_initializer=init)
self.dense2 = tf.keras.layers.Dense(
units=pooled_shape[0]*pooled_shape[1]*kernal2,
activation=tf.nn.relu,
kernel_initializer=init)
self.deflatten1 = tf.keras.layers.Reshape(
target_shape=(int(pooled_shape[0]), int(pooled_shape[1]), kernal2))
self.deconv1 = tf.keras.layers.Conv2DTranspose(
filters=kernal1,
kernel_size=5,
padding='SAME',
activation=tf.nn.relu,
kernel_initializer=init)
self.deconv2 = tf.keras.layers.Conv2DTranspose(
filters=shape[2],
kernel_size=5,
padding='SAME',
activation=tf.nn.sigmoid,
kernel_initializer=init)
def call(self, img):
"""Pass the batch of images to forward propagate into the networks.
:param img: Input images with shape as [batch_size, image_size, image_size, image_channel].
:return: Reconstruction images with shape as same as img.
"""
x = self.conv1(img)
x = self.max1(x)
x = self.conv2(x)
x = self.max2(x)
x = self.flatten1(x)
x = self.dense1(x)
x = self.dense2(x)
x = self.deflatten1(x)
x = unpool(x)
x = self.deconv1(x)
x = unpool(x)
x = self.deconv2(x)
return x
| ninfueng/convolutional-autoencoder-for-anomaly-detection | model.py | model.py | py | 3,847 | python | en | code | 1 | github-code | 36 |
42244011138 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo import SUPERUSER_ID
import io
import csv
import base64
import ftplib
from odoo.tools import pycompat
import logging
_logger = logging.getLogger(__name__)
from odoo.exceptions import UserError, AccessError
from odoo.addons.website_mail.models.mail_message import MailMessage
from datetime import datetime, timedelta
from odoo.http import request
from odoo.exceptions import ValidationError
from odoo.addons.website_sale.models.sale_order import SaleOrder
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
""" Method monkey patched to handle multiple UoM from website """
self.ensure_one()
print (kwargs, 'In Override method \n\n\n\n\n\n\n')
product_context = dict(self.env.context)
product_context.setdefault('lang', self.sudo().partner_id.lang)
SaleOrderLineSudo = self.env['sale.order.line'].sudo().with_context(product_context)
# change lang to get correct name of attributes/values
product_with_context = self.env['product.product'].with_context(product_context)
product = product_with_context.browse(int(product_id))
try:
if add_qty:
add_qty = float(add_qty)
except ValueError:
add_qty = 1
try:
if set_qty:
set_qty = float(set_qty)
except ValueError:
set_qty = 0
quantity = 0
order_line = False
if self.state != 'draft':
request.session['sale_order_id'] = None
raise UserError(_('It is forbidden to modify a sales order which is not in draft status.'))
if line_id is not False:
order_line = self._cart_find_product_line(product_id, line_id, **kwargs)[:1]
# Create line if no line with product_id can be located
if not order_line:
if not product:
raise UserError(_("The given product does not exist therefore it cannot be added to cart."))
no_variant_attribute_values = kwargs.get('no_variant_attribute_values') or []
received_no_variant_values = product.env['product.template.attribute.value'].browse([int(ptav['value']) for ptav in no_variant_attribute_values])
received_combination = product.product_template_attribute_value_ids | received_no_variant_values
product_template = product.product_tmpl_id
# handle all cases where incorrect or incomplete data are received
combination = product_template._get_closest_possible_combination(received_combination)
# get or create (if dynamic) the correct variant
product = product_template._create_product_variant(combination)
if not product:
raise UserError(_("The given combination does not exist therefore it cannot be added to cart."))
product_id = product.id
values = self._website_product_id_change(self.id, product_id, qty=1)
# add no_variant attributes that were not received
for ptav in combination.filtered(lambda ptav: ptav.attribute_id.create_variant == 'no_variant' and ptav not in received_no_variant_values):
no_variant_attribute_values.append({
'value': ptav.id,
})
# save no_variant attributes values
if no_variant_attribute_values:
values['product_no_variant_attribute_value_ids'] = [
(6, 0, [int(attribute['value']) for attribute in no_variant_attribute_values])
]
# add is_custom attribute values that were not received
custom_values = kwargs.get('product_custom_attribute_values') or []
received_custom_values = product.env['product.template.attribute.value'].browse([int(ptav['custom_product_template_attribute_value_id']) for ptav in custom_values])
for ptav in combination.filtered(lambda ptav: ptav.is_custom and ptav not in received_custom_values):
custom_values.append({
'custom_product_template_attribute_value_id': ptav.id,
'custom_value': '',
})
# save is_custom attributes values
if custom_values:
values['product_custom_attribute_value_ids'] = [(0, 0, {
'custom_product_template_attribute_value_id': custom_value['custom_product_template_attribute_value_id'],
'custom_value': custom_value['custom_value']
}) for custom_value in custom_values]
# create the line
order_line = SaleOrderLineSudo.create(values)
if 'product_uom_id' in kwargs:
order_line.product_uom = int(kwargs['product_uom_id'])
order_line.product_uom_change()
try:
order_line._compute_tax_id()
except ValidationError as e:
# The validation may occur in backend (eg: taxcloud) but should fail silently in frontend
_logger.debug("ValidationError occurs during tax compute. %s" % (e))
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty is not None:
quantity = order_line.product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
linked_line = order_line.linked_line_id
order_line.unlink()
if linked_line:
# update description of the parent
linked_product = product_with_context.browse(linked_line.product_id.id)
linked_line.name = linked_line.get_sale_order_line_multiline_description_sale(linked_product)
else:
# update line
no_variant_attributes_price_extra = [ptav.price_extra for ptav in order_line.product_no_variant_attribute_value_ids]
values = self.with_context(no_variant_attributes_price_extra=tuple(no_variant_attributes_price_extra))._website_product_id_change(self.id, product_id, qty=quantity)
if self.pricelist_id.discount_policy == 'with_discount' and not self.env.context.get('fixed_price'):
order = self.sudo().browse(self.id)
product_context.update({
'partner': order.partner_id,
'quantity': quantity,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
'force_company': order.company_id.id,
})
product_with_context = self.env['product.product'].with_context(product_context)
product = product_with_context.browse(product_id)
values['price_unit'] = self.env['account.tax']._fix_tax_included_price_company(
order_line._get_display_price(product),
order_line.product_id.taxes_id,
order_line.tax_id,
self.company_id
)
if 'product_uom_id' in kwargs:
values.update({'product_uom': int(kwargs['product_uom_id'])})
else:
del values['product_uom']
order_line.write(values)
order_line.product_uom_change()
# link a product to the sales order
if kwargs.get('linked_line_id'):
linked_line = SaleOrderLineSudo.browse(kwargs['linked_line_id'])
order_line.write({
'linked_line_id': linked_line.id,
})
linked_product = product_with_context.browse(linked_line.product_id.id)
linked_line.name = linked_line.get_sale_order_line_multiline_description_sale(linked_product)
# Generate the description with everything. This is done after
# creating because the following related fields have to be set:
# - product_no_variant_attribute_value_ids
# - product_custom_attribute_value_ids
# - linked_line_id
order_line.name = order_line.get_sale_order_line_multiline_description_sale(product)
option_lines = self.order_line.filtered(lambda l: l.linked_line_id.id == order_line.id)
return {'line_id': order_line.id, 'quantity': quantity, 'option_ids': list(set(option_lines.ids))}
SaleOrder._cart_update = _cart_update
class ProductBrand(models.Model):
_name = "product.brand"
name = fields.Char("Brand")
class product(models.Model):
_inherit = 'product.template'
brand_id = fields.Many2many("product.brand", string="Brand")
extra_units = fields.Many2many('uom.uom', 'product_id', 'uom_id', 'prod_uom_rel', string="Extra Units")
def units_web(self):
product = self.env['product.template'].sudo().browse(self.id)
units = [product.uom_id]
for item in product.extra_units:
units.append(item)
return units
| eqilibruim-solutions/Theme-1 | clarico_ext/models/product_template.py | product_template.py | py | 7,913 | python | en | code | 0 | github-code | 36 |
5972086108 | import random
import pygame
import copy
l = [[random.choice([0, 1]) for i in range(48)] for i in range(48)]
k = [[0 for i in range(48)] for i in range(48)]
pygame.init()
s = pygame.display.set_mode((480, 480), 0, 32)
o = True
def z(x, y):
m = 0
for i in (x - 1, x, x + 1):
for j in (y - 1, y, y + 1):
if i == x and y == j:
continue
if i < 0 or i > 47 or j < 0 or j > 47:
continue
if l[i][j] == 1:
m += 1
return m
while o:
s.fill((255, 255, 255))
for e in pygame.event.get():
if e.type == pygame.QUIT:
o = False
for x in range(48):
for y in range(48):
a = z(x, y)
if a == 2:
k[x][y] = l[x][y]
elif a == 3:
k[x][y] = 1
else:
k[x][y] = 0
for x in range(48):
for y in range(48):
if k[x][y] == 1:
s.fill((0, 0, 255), (y * 10, x * 10, 10, 10))
pygame.draw.rect(s, (0, 0, 0), (y * 10, x * 10, 10, 10), 1)
l = copy.deepcopy(k)
pygame.display.update()
pygame.time.wait(100)
| Lil-Shawn/game-of-life | main.py | main.py | py | 1,240 | python | en | code | 0 | github-code | 36 |
32253555423 | #Day 14 - 30 days of code, scope
class Difference:
def __init__(self, a):
self.__elements = a
def computeDifference(self):
new_array = list(map(lambda x: abs(x), self.__elements))
v_max = max(new_array)
v_min = min(new_array)
self.maximumDifference = v_max - v_min
mi_lista = Difference([-3,2,3])
mi_lista.computeDifference()
print(mi_lista.maximumDifference) | alexmagno6m/scripts | 30_days_14.py | 30_days_14.py | py | 409 | python | en | code | 0 | github-code | 36 |
33567308542 | import numpy as np
import ROOT
ROOT.gROOT.SetStyle("ATLAS")
software = [
'acts',
'athena',
]
sample = 'ttbar'
#sample = 'singleMu_100GeV'
event_name = "ttbar 14 TeV"
#event_name = "single Mu pT = 100 GeV"
string = '===>>> done processing event'
log_lines_grid = [line.replace('|TIMER ACTS| ','') for line in open('output_{}.txt'.format(software[0]+'_'+sample)) if string in line or '|TIMER ACTS|' in line]
acts_values = list(filter(None,[lines[:].strip() for lines in log_lines_grid]))
log_lines_grid = [line.replace('|TIMER ATHENA| ','') for line in open('output_{}.txt'.format(software[1]+'_'+sample)) if string in line or '|TIMER ATHENA|' in line]
athena_values = list(filter(None,[lines[:].strip() for lines in log_lines_grid]))
h_acts_grid_time_PPP = ROOT.TH1F("h_acts_grid_time_PPP", "", 25, 0.0, 60)
h_athena_grid_time_PPP = ROOT.TH1F("h_athena_grid_time_PPP", "", 25, 0.0, 60)
h_acts_grid_time_SSS = ROOT.TH1F("h_acts_grid_time_SSS", "", 50, 0.0, 50)
h_athena_grid_time_SSS = ROOT.TH1F("h_athena_grid_time_SSS", "", 50, 0.0, 50)
h_acts_finder_time_PPP = ROOT.TH1F("h_acts_finder_time_PPP", "", 25, 0.0, 17)
h_athena_finder_time_PPP = ROOT.TH1F("h_athena_finder_time_PPP", "", 25, 0.0, 17)
h_acts_finder_time_SSS = ROOT.TH1F("h_acts_finder_time_SSS", "", 100, 0.0, -500)
h_athena_finder_time_SSS = ROOT.TH1F("h_athena_finder_time_SSS", "", 100, 0.0, -500)
h_acts_filter2_time_PPP = ROOT.TH1F("h_acts_finder_time_SSS", "", 25, 0.0, -100)
h_athena_filter2_time_PPP = ROOT.TH1F("h_athena_finder_time_SSS", "", 25, 0.0, -100)
h_acts_filter2_time_SSS = ROOT.TH1F("h_acts_finder_time_SSS", "", 50, 0.0, 17)
h_athena_filter2_time_SSS = ROOT.TH1F("h_athena_finder_time_SSS", "", 50, 0.0, 17)
h_acts_tri_time_PPP = ROOT.TH1F("h_acts_finder_time_SSS", "", 25, 0.0, 2300)
h_athena_tri_time_PPP = ROOT.TH1F("h_athena_finder_time_SSS", "", 25, 0.0, 2300)
h_acts_tri_time_SSS = ROOT.TH1F("h_acts_finder_time_SSS", "", 25, 0.0, -500)
h_athena_tri_time_SSS = ROOT.TH1F("h_athena_finder_time_SSS", "", 25, 0.0, -500)
for file in [acts_values, athena_values]:
detector = ''
a = 0
b = 0
event = 0
for line in file:
if event == 101:
break
if 'events processed so far' in line:
print(event)
event += 1
#print(line)
if 'This is PPP' in line:
detector = 'PPP'
#h_acts_finder_time_PPP.Fill(a/1000)
#print(a/1000)
a = 0
if 'This is SSS' in line:
detector = 'SSS'
#h_acts_finder_time_SSS.Fill(b/1000)
#print(b/1000)
b = 0
if 'duplets' in line:
if file == acts_values:
if detector == 'PPP':
#print("h_acts_grid_time_PPP")
h_acts_grid_time_PPP.Fill(float(line[line.find('duplets:'):].replace('duplets:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_acts_grid_time_SSS")
h_acts_grid_time_SSS.Fill(float(line[line.find('duplets:'):].replace('duplets:', '').strip().split(" ")[0])/1000)
elif file == athena_values:
if detector == 'PPP':
#print("h_athena_grid_time_PPP")
h_athena_grid_time_PPP.Fill(float(line[line.find('duplets:'):].replace('duplets:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_athena_grid_time_SSS")
h_athena_grid_time_SSS.Fill(float(line[line.find('duplets:'):].replace('duplets:', '').strip().split(" ")[0])/1000)
elif 'filter 1' in line and 'triplets' not in line:
if file == acts_values:
if detector == 'PPP':
#print("h_acts_finder_time_PPP")
#print(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
h_acts_finder_time_PPP.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_acts_finder_time_SSS")
h_acts_finder_time_SSS.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif file == athena_values:
if detector == 'PPP':
#print("h_athena_finder_time_PPP")
#print(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
h_athena_finder_time_PPP.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_athena_finder_time_SSS")
h_athena_finder_time_SSS.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif 'triplets + filter 1' in line:
if file == acts_values:
if detector == 'PPP':
#print("h_acts_finder_time_PPP")
#print(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
h_acts_tri_time_PPP.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_acts_finder_time_SSS")
h_acts_tri_time_SSS.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif file == athena_values:
if detector == 'PPP':
#print("h_athena_finder_time_PPP")
#print(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
h_athena_tri_time_PPP.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_athena_finder_time_SSS")
h_athena_tri_time_SSS.Fill(float(line[line.find('filter 1:'):].replace('filter 1:', '').strip().split(" ")[0])/1000)
elif 'filter 2' in line:
if file == acts_values:
if detector == 'PPP':
#print("h_acts_finder_time_PPP")
#print(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
h_acts_filter2_time_PPP.Fill(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_acts_finder_time_SSS")
h_acts_filter2_time_SSS.Fill(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
elif file == athena_values:
if detector == 'PPP':
#print("h_athena_finder_time_PPP")
#print(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
h_athena_filter2_time_PPP.Fill(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
elif detector == 'SSS':
#print("h_athena_finder_time_SSS")
h_athena_filter2_time_SSS.Fill(float(line[line.find('filter 2:'):].replace('filter 2:', '').strip().split(" ")[0])/1000)
def setLegend(acts, athena, filter, dec_name):
legend = ROOT.TLegend(0.6,0.57,0.99,0.70)
legend.AddEntry(acts ,"ACTS")
legend.AddEntry(athena ,"Athena")
legend.SetLineWidth(0)
legend.SetFillStyle(0)
legend.Draw("same")
latex = ROOT.TLatex()
latex.SetNDC()
latex.SetTextSize(0.035)
latex.DrawText(0.61, 0.78, "{} space points".format(dec_name))
latex.DrawText(0.61, 0.83, event_name)
latex.DrawText(0.61, 0.73, filter)
return legend
def setLegend2(h1, h2, name1, name2, filter):
legend2 = ROOT.TLegend(0.6,0.57,0.99,0.70)
legend2.AddEntry(h1 , name1)
legend2.AddEntry(h2 , name2)
legend2.SetLineWidth(0)
legend2.SetFillStyle(0)
legend2.Draw("same")
latex2 = ROOT.TLatex()
latex2.SetNDC()
latex2.SetTextSize(0.035)
latex2.DrawText(0.61, 0.83, event_name)
latex2.DrawText(0.61, 0.73, filter)
return legend2
def plotOptions(h1, h2, y_label):
h1.GetYaxis().SetTitle(y_label)
h1.GetYaxis().SetLabelSize(0.);
h1.SetLineColor(95)
h2.SetLineColor(60)
h1.SetLineWidth(2)
h1.GetYaxis().SetTitleSize(20)
h1.GetYaxis().SetTitleFont(43)
h1.GetYaxis().SetTitleOffset(3)
h1.GetYaxis().SetLabelFont(43)
h1.GetYaxis().SetLabelSize(15)
h1.GetXaxis().SetLabelFont(43)
h1.GetXaxis().SetLabelSize(15)
h1.SetStats(0)
h2.SetLineWidth(2)
return h1, h2
def createPad1():
pad1 = ROOT.TPad("pad1", "pad1", 0, 0.3, 1, 1.0);
pad1.SetBottomMargin(0.05)
# pad1.SetGridx()i
pad1.SetLogy(1)
pad1.Draw()
pad1.cd()
return pad1
def createPad2():
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.08, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.3)
# pad2.SetGridx()
pad2.Draw()
pad2.cd()
return pad2
def createCanvasPads():
c = ROOT.TCanvas("c", "canvas", 800, 800)
# Upper histogram plot is pad1
pad1 = createPad1()
# Lower ratio plot is pad2
c.cd()
pad2 = createPad2()
return c, pad1, pad2
def plotRatio(h1, h2, x_label):
# Define the ratio plot
h3 = h1.Clone("h3")
h3.SetTitle("")
h3.SetLineColor(1)
h3.SetMinimum(0.8)
h3.SetMaximum(1.35)
h3.SetMarkerStyle(21)
h3.Sumw2()
h3.SetStats(0)
h3.Divide(h2)
# Y axis h1 plot settings
h1.GetYaxis().SetTitleSize(20)
h1.GetYaxis().SetTitleFont(43)
h1.GetYaxis().SetTitleOffset(1.6)
# Y axis ratio plot settings
h3.GetYaxis().SetTitle("")
h3.GetYaxis().SetNdivisions(505)
h3.GetYaxis().SetTitleSize(20)
h3.GetYaxis().SetTitleFont(43)
h3.GetYaxis().SetTitleOffset(1.6)
h3.GetYaxis().SetLabelFont(43)
h3.GetYaxis().SetLabelSize(15)
# X axis ratio plot settings
h3.GetXaxis().SetTitle(x_label)
h3.GetXaxis().SetTitleSize(20)
h3.GetXaxis().SetTitleFont(43)
h3.GetXaxis().SetTitleOffset(4)
h3.GetXaxis().SetLabelFont(43)
h3.GetXaxis().SetLabelSize(15)
return h3
canvas = ROOT.TCanvas()
canvas.Print("time_plots.pdf[")
canvas.Clear()
h1, h2 = plotOptions(h_acts_grid_time_PPP, h_athena_grid_time_PPP, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "duplets", "PPP")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_duplets.root")
canvas.Clear()
'''
# GRID SSS
h1, h2 = plotOptions(h_acts_grid_time_SSS, h_athena_grid_time_SSS, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "duplets", "SSS")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_grid_SSS.png")
canvas.Clear()
'''
# FINDER PPP
h1, h2 = plotOptions(h_acts_finder_time_PPP, h_athena_finder_time_PPP, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "newOneSeedWithCurvaturesComparisonPPP", "PPP")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_filter1.root")
canvas.Clear()
'''
# FINDER SSS
h1, h2 = plotOptions(h_acts_finder_time_SSS, h_athena_finder_time_SSS, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "newOneSeedWithCurvaturesComparisonPPP", "SSS")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_finder_SSS.png")
canvas.Clear()
'''
h1, h2 = plotOptions(h_acts_tri_time_PPP, h_athena_tri_time_PPP, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "triplets + newOneSeedWithCurvaturesComparisonPPP", "PPP")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_triplets_filter1.root")
canvas.Clear()
h1, h2 = plotOptions(h_acts_filter2_time_PPP, h_athena_filter2_time_PPP, "Events")
h3 = plotRatio(h1, h2, "Time (\mu s)")
canvas, pad1, pad2 = createCanvasPads()
pad1.cd()
h1.Draw("he")
h2.Draw("he same")
legend = setLegend(h1, h2, "fillSeeds", "PPP")
pad2.cd()
h3.Draw("he")
canvas.Print("time_plots.pdf")
canvas.Print("time_filter2.root")
canvas.Clear()
canvas.Print("time_plots.pdf]")
| LuisFelipeCoelho/seeding_analysis_tools | read_timer2.py | read_timer2.py | py | 11,952 | python | en | code | 0 | github-code | 36 |
39929411303 | import sys
n, m, r, c, k = map(int, sys.stdin.readline().split())
board = [list(map(int, sys.stdin.readline().split())) for r in range(n)]
move_list = list(map(int, sys.stdin.readline().split()))
dice = [0, 0, 0, 0, 0, 0]
def roll(dice, direction):
if direction == 1:
temp = dice[5]
dice[5] = dice[3]
dice[2:4] = dice[1:3]
dice[1] = temp
elif direction == 2:
temp = dice[5]
dice[5] = dice[1]
dice[1:3] = dice[2:4]
dice[3] = temp
elif direction == 3:
temp = dice[0]
dice[0] = dice[2]
dice[2] = dice[4]
dice[4] = dice[5]
dice[5] = temp
elif direction == 4:
temp = dice[5]
dice[5] = dice[4]
dice[4] = dice[2]
dice[2] = dice[0]
dice[0] = temp
# return top and bottom of the dice
return dice[2], dice[5]
directions = ((), (0, 1), (0, -1), (-1, 0), (1, 0))
cur = (r, c)
for move in move_list:
next_r = cur[0] + directions[move][0]
next_c = cur[1] + directions[move][1]
# skip this movement
if next_r < 0 or next_r >= n or next_c < 0 or next_c >= m:
continue
# roll the dice
roll(dice, move)
if board[next_r][next_c] == 0:
board[next_r][next_c] = dice[5]
else:
dice[5] = board[next_r][next_c]
board[next_r][next_c] = 0
print(dice[2])
cur = (next_r, next_c)
| Choi-Sung-Hoon/Algorithm_with_Python | BOJ/14499.py | 14499.py | py | 1,219 | python | en | code | 1 | github-code | 36 |
15991499425 | import os
import argparse
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import numpy as np
import cv2
from seg_metric import SegmentationMetric
import random
import shutil
import setproctitle
import time
import logging
from dataset import potsdam
from custom_transforms import Mixup, edge_contour
from loss import CrossEntropyLoss, Edge_loss, Edge_weak_loss
class FullModel(nn.Module):
def __init__(self, model, args2):
super(FullModel, self).__init__()
self.model = model
self.use_mixup = args2.use_mixup
self.use_edge = args2.use_edge
# self.ce_loss = Edge_weak_loss()
self.ce_loss = CrossEntropyLoss()
self.edge_loss = Edge_loss()
if self.use_mixup:
self.mixup = Mixup(use_edge=args2.use_edge)
def forward(self, input, label=None, train=True):
if train and self.use_mixup and label is not None:
if self.use_edge:
loss = self.mixup(input, label, [self.ce_loss, self.edge_loss], self.model)
else:
loss = self.mixup(input, label, self.ce_loss, self.model)
return loss
output = self.model(input)
if train:
losses = 0
if isinstance(output, (list, tuple)):
if self.use_edge:
for i in range(len(output) - 1):
loss = self.ce_loss(output[i], label)
losses += loss
losses += self.edge_loss(output[-1], edge_contour(label).long())
else:
for i in range(len(output)):
loss = self.ce_loss(output[i], label)
losses += loss
else:
losses = self.ce_loss(output, label)
return losses
else:
if isinstance(output, (list, tuple)):
return output[0]
else:
return output
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class params():
def __init__(self, args2):
if args2.dataset in ['potsdam', 'vaihingen']:
self.number_of_classes = 6
models = args2.models
if models == 'HRNet_32':
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [32, 64],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [32, 64, 128],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [32, 64, 128, 256],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
elif models == 'HRNet_48':
"hrnet48"
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [48, 96],
'BLOCK':'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [48, 96, 192],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [48, 96, 192, 384],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
def get_model(args2, device, models='DANet'):
if models in ['swinT', 'resT']:
print(models, args2.head)
else:
print(models)
if args2.dataset in ['potsdam', 'vaihingen']:
nclass = 6
assert models in ['danet', 'bisenetv2', 'pspnet', 'segbase', 'swinT',
'deeplabv3', 'fcn', 'fpn', 'unet', 'resT']
if models == 'danet':
from models.danet import DANet
model = DANet(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'bisenetv2':
from models.bisenetv2 import BiSeNetV2
model = BiSeNetV2(nclass=nclass)
if models == 'pspnet':
from models.pspnet import PSPNet
model = PSPNet(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'segbase':
from models.segbase import SegBase
model = SegBase(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'swinT':
from models.swinT import swin_tiny as swinT
model = swinT(nclass=nclass, pretrained=True, aux=True, head=args2.head, edge_aux=args2.use_edge)
if models == 'resT':
from models.resT import rest_tiny as resT
model = resT(nclass=nclass, pretrained=True, aux=True, head=args2.head, edge_aux=args2.use_edge)
if models == 'deeplabv3':
from models.deeplabv3 import DeepLabV3
model = DeepLabV3(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'fcn':
from models.fcn import FCN16s
model = FCN16s(nclass=nclass)
if models == 'fpn':
from models.fpn import FPN
model = FPN(nclass=nclass)
if models == 'unet':
from models.unet import UNet
model = UNet(nclass=nclass)
model = FullModel(model, args2)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(device)
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args2.local_rank], output_device=args2.local_rank, find_unused_parameters=True)
return model
def reduce_tensor(inp):
"""
Reduce the loss from all processes so that
process with rank 0 has the averaged results.
"""
world_size = get_world_size()
if world_size < 2:
return inp
with torch.no_grad():
reduced_inp = inp
torch.distributed.reduce(reduced_inp, dst=0)
return reduced_inp
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument("--dataset", type=str, default='vaihingen', choices=['potsdam', 'vaihingen'])
parser.add_argument("--end_epoch", type=int, default=200)
parser.add_argument("--warm_epochs", type=int, default=5)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--train_batchsize", type=int, default=1)
parser.add_argument("--val_batchsize", type=int, default=1)
parser.add_argument("--crop_size", type=int, nargs='+', default=[512, 512], help='H, W')
parser.add_argument("--information", type=str, default='RS')
parser.add_argument("--models", type=str, default='danet',
choices=['danet', 'bisenetv2', 'pspnet', 'segbase', 'resT',
'swinT', 'deeplabv3', 'fcn', 'fpn', 'unet'])
parser.add_argument("--head", type=str, default='seghead')
parser.add_argument("--seed", type=int, default=6)
parser.add_argument("--save_dir", type=str, default='./work_dir')
parser.add_argument("--use_edge", type=int, default=0)
parser.add_argument("--use_mixup", type=int, default=0)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args2 = parser.parse_args()
return args2
def save_model_file(save_dir, save_name):
save_dir = os.path.join(save_dir, save_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir + '/weights/')
os.makedirs(save_dir + '/outputs/')
for file in os.listdir('.'):
if os.path.isfile(file):
shutil.copy(file, save_dir)
if not os.path.exists(os.path.join(save_dir, 'models')):
shutil.copytree('./models', os.path.join(save_dir, 'models'))
logging.basicConfig(filename=save_dir + '/train.log', level=logging.INFO)
def train():
"""############### Notice ###############"""
distributed = True
args2 = parse_args()
if distributed:
torch.cuda.set_device(args2.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://",
)
torch.manual_seed(args2.seed)
torch.cuda.manual_seed(args2.seed)
random.seed(args2.seed)
np.random.seed(args2.seed)
save_name = "{}_lr{}_epoch{}_batchsize{}_{}".format(args2.models, args2.lr, args2.end_epoch,
args2.train_batchsize * get_world_size(), args2.information)
save_dir = args2.save_dir
if args2.local_rank == 0:
save_model_file(save_dir=save_dir, save_name=save_name)
device = torch.device(('cuda:{}').format(args2.local_rank))
model = get_model(args2, device, models=args2.models)
potsdam_train = potsdam(train=True, dataset=args2.dataset, crop_szie=args2.crop_size)
if distributed:
train_sampler = DistributedSampler(potsdam_train)
else:
train_sampler = None
dataloader_train = DataLoader(
potsdam_train,
batch_size=args2.train_batchsize,
shuffle=True and train_sampler is None,
num_workers=4,
pin_memory=True,
drop_last=True,
sampler=train_sampler)
potsdam_val = potsdam(train=False, dataset=args2.dataset, crop_szie=args2.crop_size)
if distributed:
val_sampler = DistributedSampler(potsdam_val)
else:
val_sampler = None
dataloader_val = DataLoader(
potsdam_val,
batch_size=args2.val_batchsize,
shuffle=False,
num_workers=4,
pin_memory=True,
sampler=val_sampler)
# optimizer = torch.optim.SGD([{'params':
# filter(lambda p: p.requires_grad,
# model.parameters()),
# 'lr': args2.lr}],
# lr=args2.lr,
# momentum=0.9,
# weight_decay=0.0005,
# nesterov=False,
# )
optimizer = torch.optim.AdamW([{'params':
filter(lambda p: p.requires_grad,
model.parameters()),
'lr': args2.lr}],
lr=args2.lr,
betas=(0.9, 0.999),
weight_decay=0.01,
)
start = time.time()
miou = 0
acc = 0
f1 = 0
precision = 0
recall = 0
best_miou = 0
best_acc = 0
best_f1 = 0
last_epoch = 0
test_epoch = args2.end_epoch - 3
ave_loss = AverageMeter()
world_size = get_world_size()
weight_save_dir = os.path.join(save_dir, save_name + '/weights')
model_state_file = weight_save_dir + "/{}_lr{}_epoch{}_batchsize{}_{}.pkl.tar" \
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information)
if os.path.isfile(model_state_file):
print('loaded successfully')
logging.info("=> loading checkpoint '{}'".format(model_state_file))
checkpoint = torch.load(model_state_file, map_location=lambda storage, loc: storage)
checkpoint = {k: v for k, v in checkpoint.items() if not 'loss' in k}
best_miou = checkpoint['best_miou']
best_acc = checkpoint['best_acc']
best_f1 = checkpoint['best_f1']
last_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info("=> loaded checkpoint '{}' (epoch {})".format(
model_state_file, checkpoint['epoch']))
for epoch in range(last_epoch, args2.end_epoch):
if distributed:
train_sampler.set_epoch(epoch)
model.train()
setproctitle.setproctitle("xzy:" + str(epoch) + "/" + "{}".format(args2.end_epoch))
for i, sample in enumerate(dataloader_train):
image, label = sample['image'], sample['label']
image, label = image.to(device), label.to(device)
label = label.long().squeeze(1)
losses = model(image, label)
loss = losses.mean()
ave_loss.update(loss.item())
lenth_iter = len(dataloader_train)
lr = adjust_learning_rate(optimizer,
args2.lr,
args2.end_epoch * lenth_iter,
i + epoch * lenth_iter,
args2.warm_epochs * lenth_iter
)
if i % 50 == 0:
reduced_loss = ave_loss.average()
print_loss = reduce_tensor(torch.from_numpy(np.array(reduced_loss)).to(device)).cpu() / world_size
print_loss = print_loss.item()
if args2.local_rank == 0:
time_cost = time.time() - start
start = time.time()
print("epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, "
"best_miou:{:.4f}, miou:{:.4f}, acc:{:.4f}, f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch,args2.end_epoch,i,len(dataloader_train),print_loss,time_cost,lr,
best_miou,miou, acc, f1, precision, recall))
logging.info(
"epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, "
"best_miou:{:.4f}, miou:{:.4f}, acc:{:.4f}, f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train), print_loss, time_cost, lr,
best_miou, miou, acc, f1, precision, recall))
model.zero_grad()
loss.backward()
optimizer.step()
if epoch > test_epoch:
miou, acc, f1, precision, recall = validate(dataloader_val, device, model, args2)
miou = (reduce_tensor(miou).cpu() / world_size).item()
acc = (reduce_tensor(acc).cpu() / world_size).item()
f1 = (reduce_tensor(f1).cpu() / world_size).item()
precision = (reduce_tensor(precision).cpu() / world_size).item()
recall = (reduce_tensor(recall).cpu() / world_size).item()
if args2.local_rank == 0:
if epoch > test_epoch and epoch != 0:
print('miou:{}, acc:{}, f1:{}, precision:{}, recall:{}'.format(miou, acc, f1, precision, recall))
torch.save(model.state_dict(),
weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_xzy_{}.pkl'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, epoch))
if miou >= best_miou and miou != 0:
best_miou = miou
best_acc, best_f1 = acc, f1
best_weight_name = weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_best_epoch_{}.pkl'.format(
args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, epoch)
torch.save(model.state_dict(), best_weight_name)
torch.save(model.state_dict(), weight_save_dir + '/best_weight.pkl')
torch.save({
'epoch': epoch + 1,
'best_miou': best_miou,
'best_acc': best_acc,
'best_f1':best_f1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}.pkl.tar'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information))
if args2.local_rank == 0:
torch.save(model.state_dict(),
weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_xzy_{}.pkl'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, args2.end_epoch))
try:
print("epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, best_miou:{:.4f}, "
"miou:{:.4f}, acc:{:.4f} f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train),
print_loss, time_cost, lr, best_miou, miou, acc, f1, precision, recall))
logging.info(
"epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, best_miou:{:.4f}, "
"miou:{:.4f}, acc:{:.4f} f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train),
print_loss, time_cost, lr, best_miou, miou, acc, f1, precision, recall))
except:
pass
logging.info("***************super param*****************")
logging.info("dataset:{} information:{} lr:{} epoch:{} batchsize:{} best_miou:{} best_acc:{} best_f1:{}"
.format(args2.dataset, args2.information, args2.lr, args2.end_epoch, args2.train_batchsize *
world_size, best_miou, best_acc, best_f1))
logging.info("***************end*************************")
print("***************super param*****************")
print("dataset:{} information:{} lr:{} epoch:{} batchsize:{} best_miou:{} best_acc:{} best_f1:{}"
.format(args2.dataset, args2.information, args2.lr, args2.end_epoch, args2.train_batchsize * world_size,
best_miou, best_acc, best_f1))
print("***************end*************************")
def adjust_learning_rate(optimizer, base_lr, max_iters,
cur_iters, warmup_iter=None, power=0.9):
if warmup_iter is not None and cur_iters < warmup_iter:
lr = base_lr * cur_iters / (warmup_iter + 1e-8)
elif warmup_iter is not None:
lr = base_lr*((1-float(cur_iters - warmup_iter) / (max_iters - warmup_iter))**(power))
else:
lr = base_lr * ((1 - float(cur_iters / max_iters)) ** (power))
optimizer.param_groups[0]['lr'] = lr
return lr
def validate(dataloader_val, device, model, args2):
model.eval()
MIOU = [0]
ACC = [0]
F1 = [0]
Precision = [0]
Recall = [0]
nclass = 6
metric = SegmentationMetric(nclass)
with torch.no_grad():
for i, sample in enumerate(dataloader_val):
image, label = sample['image'], sample['label']
image, label = image.to(device), label.to(device)
label = label.long().squeeze(1)
logit = model(image, label, train=False)
logit = logit.argmax(dim=1)
logit = logit.cpu().detach().numpy()
label = label.cpu().detach().numpy()
metric.addBatch(logit, label)
iou = metric.IntersectionOverUnion()
acc = metric.Accuracy()
precision = metric.Precision()
recall = metric.Recall()
miou = np.nanmean(iou[0:5])
mprecision = np.nanmean(precision[0:5])
mrecall = np.nanmean(recall[0:5])
MIOU = MIOU + miou
ACC = ACC + acc
Recall = Recall + mrecall
Precision = Precision + mprecision
F1 = F1 + 2 * Precision * Recall / (Precision + Recall)
MIOU = torch.from_numpy(MIOU).to(device)
ACC = torch.from_numpy(ACC).to(device)
F1 = torch.from_numpy(F1).to(device)
Recall = torch.from_numpy(Recall).to(device)
Precision = torch.from_numpy(Precision).to(device)
return MIOU, ACC, F1, Precision, Recall
if __name__ == '__main__':
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
# os.environ.setdefault('RANK', '0')
# os.environ.setdefault('WORLD_SIZE', '1')
# os.environ.setdefault('MASTER_ADDR', '127.0.0.1')
# os.environ.setdefault('MASTER_PORT', '29556')
cudnn.benchmark = True
cudnn.enabled = True
# don't use cudnn
#cudnn.benchmark = False
#cudnn.deterministic = True
train()
| zyxu1996/Efficient-Transformer | train.py | train.py | py | 21,965 | python | en | code | 67 | github-code | 36 |
13989585282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 350, 100)
self.setWindowTitle('Drawing rectangles')
self.show()
# 绘图是在paintEvent()方法中完成。
# QPainter 对象放在begin()方法和end()方法之间,它执行部件上的低层次的绘画和其他绘图设备。
# 实际的绘画我们委托给drawText()方法。
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
self.drawRectangles(event, painter)
painter.end()
def drawRectangles(self, event, painter):
color = QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
painter.setPen(color)
painter.setBrush(QColor(200, 0, 0))
painter.drawRect(10, 15, 90, 60)
painter.setBrush(QColor(255, 80, 0, 160))
painter.drawRect(130, 15, 90, 60)
painter.setBrush(QColor(25, 0, 90, 200))
painter.drawRect(250, 15, 90, 60)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MainWindow()
sys.exit(app.exec_())
| shellever/Python3Learning | thirdparty/pyqt5/painting/drawrectangles.py | drawrectangles.py | py | 1,308 | python | en | code | 0 | github-code | 36 |
2894217699 | from typing import Dict, Callable
from src.dialog.common.manage_entity.ManageEntityDialogMode import ManageEntityDialogMode
from src.property.Property import Property
from src.session.common.Session import Session
from src.storage.common.entity.Entity import Entity
from src.storage.common.entity.EntityStorage import EntityStorage
class ManageEntityContainerSaver:
def __init__(
self,
session: Session,
storage: EntityStorage,
close_dialog: Callable[[], None],
show_error: Callable[[str], None]
):
self.__session = session
self.__storage = storage
self.__close_dialog = close_dialog
self.__show_error = show_error
def save_entity(self, key: str, props: Dict[str, Property]):
if self.__session.get_manage_entity_mode() == ManageEntityDialogMode.CREATE:
self.handle_new_entity(key, props)
elif self.__session.get_manage_entity_mode() == ManageEntityDialogMode.EDIT and \
self.__session.get_edit_entity_key() != key:
self.handle_edit_entity_key_changed(key, props)
elif self.__session.get_manage_entity_mode() == ManageEntityDialogMode.EDIT and \
self.__session.get_edit_entity_key() == key:
self.handle_edit_entity_key_unchanged(key, props)
def handle_new_entity(self, key: str, props: Dict[str, Property]):
if not self.__storage.check_entity_exists(key):
self.put_entity_close_dialog(key, props)
else:
self.report_entity_exists(key)
def handle_edit_entity_key_changed(self, key: str, props: Dict[str, Property]):
if not self.__storage.check_entity_exists(key):
self.remove_session_entity()
self.put_entity_close_dialog(key, props)
else:
self.report_entity_exists(key)
def handle_edit_entity_key_unchanged(self, key: str, props: Dict[str, Property]):
self.put_entity_close_dialog(key, props)
def put_entity_close_dialog(self, key: str, props: Dict[str, Property]):
self.__storage.put_entity(
Entity(key, props)
)
self.__close_dialog()
def remove_session_entity(self):
self.__storage.remove_entity(self.__session.get_edit_entity_key())
def report_entity_exists(self, key: str):
self.__show_error("Дело об АП с номером" + key + " уже существует")
| andreyzaytsev21/MasterDAPv2 | src/dialog/common/manage_entity/ManageEntityContainerSaver.py | ManageEntityContainerSaver.py | py | 2,456 | python | en | code | 0 | github-code | 36 |
35396952388 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
from contextlib import contextmanager
import inspect
import logging
import os
import re
import sys
import traceback
from twitter.common import log
from twitter.common.collections import OrderedSet
from twitter.common.lang import Compatibility
from twitter.common.log.options import LogOptions
from pants.backend.core.tasks.task import QuietTaskMixin, Task
from pants.backend.jvm.tasks.nailgun_task import NailgunTask # XXX(pl)
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.config import Config
from pants.base.rcfile import RcFile
from pants.base.workunit import WorkUnit
from pants.commands.command import Command
from pants.engine.engine import Engine
from pants.engine.round_engine import RoundEngine
from pants.goal.context import Context
from pants.goal.error import GoalError
from pants.goal.initialize_reporting import update_reporting
from pants.goal.goal import Goal
from pants.option.bootstrap_options import create_bootstrapped_options
from pants.option.global_options import register_global_options
from pants.util.dirutil import safe_mkdir
StringIO = Compatibility.StringIO
class GoalRunner(Command):
"""Lists installed goals or else executes a named goal."""
class IntermixedArgumentsError(GoalError):
pass
__command__ = 'goal'
output = None
def __init__(self, *args, **kwargs):
self.targets = []
known_scopes = ['']
for goal in Goal.all():
# Note that enclosing scopes will appear before scopes they enclose.
known_scopes.extend(filter(None, goal.known_scopes()))
self.new_options = create_bootstrapped_options(known_scopes=known_scopes)
self.config = Config.from_cache() # Get the bootstrapped version.
super(GoalRunner, self).__init__(*args, needs_old_options=False, **kwargs)
def get_spec_excludes(self):
# Note: Only call after register_options() has been called.
return [os.path.join(self.root_dir, spec_exclude)
for spec_exclude in self.new_options.for_global_scope().spec_excludes]
@property
def global_options(self):
return self.new_options.for_global_scope()
@contextmanager
def check_errors(self, banner):
errors = {}
def error(key, include_traceback=False):
exc_type, exc_value, _ = sys.exc_info()
msg = StringIO()
if include_traceback:
frame = inspect.trace()[-2]
filename = frame[1]
lineno = frame[2]
funcname = frame[3]
code = ''.join(frame[4]) if frame[4] else None
traceback.print_list([(filename, lineno, funcname, code)], file=msg)
if exc_type:
msg.write(''.join(traceback.format_exception_only(exc_type, exc_value)))
errors[key] = msg.getvalue()
sys.exc_clear()
yield error
if errors:
msg = StringIO()
msg.write(banner)
invalid_keys = [key for key, exc in errors.items() if not exc]
if invalid_keys:
msg.write('\n %s' % '\n '.join(invalid_keys))
for key, exc in errors.items():
if exc:
msg.write('\n %s =>\n %s' % (key, '\n '.join(exc.splitlines())))
# The help message for goal is extremely verbose, and will obscure the
# actual error message, so we don't show it in this case.
self.error(msg.getvalue(), show_help=False)
def register_options(self):
# Add a 'bootstrap' attribute to the register function, so that register_global can
# access the bootstrap option values.
def register_global(*args, **kwargs):
return self.new_options.register_global(*args, **kwargs)
register_global.bootstrap = self.new_options.bootstrap_option_values()
register_global_options(register_global)
for goal in Goal.all():
goal.register_options(self.new_options)
def setup_parser(self, parser, args):
if not args:
args.append('help')
logger = logging.getLogger(__name__)
goals = self.new_options.goals
specs = self.new_options.target_specs
fail_fast = self.new_options.for_global_scope().fail_fast
for goal in goals:
if BuildFile.from_cache(get_buildroot(), goal, must_exist=False).exists():
logger.warning(" Command-line argument '{0}' is ambiguous and was assumed to be "
"a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
if self.new_options.is_help:
self.new_options.print_help(goals=goals)
sys.exit(0)
self.requested_goals = goals
with self.run_tracker.new_workunit(name='setup', labels=[WorkUnit.SETUP]):
spec_parser = CmdLineSpecParser(self.root_dir, self.address_mapper,
spec_excludes=self.get_spec_excludes())
with self.run_tracker.new_workunit(name='parse', labels=[WorkUnit.SETUP]):
for spec in specs:
for address in spec_parser.parse_addresses(spec, fail_fast):
self.build_graph.inject_address_closure(address)
self.targets.append(self.build_graph.get_target(address))
self.goals = [Goal.by_name(goal) for goal in goals]
rcfiles = self.config.getdefault('rcfiles', type=list,
default=['/etc/pantsrc', '~/.pants.rc'])
if rcfiles:
rcfile = RcFile(rcfiles, default_prepend=False, process_default=True)
# Break down the goals specified on the command line to the full set that will be run so we
# can apply default flags to inner goal nodes. Also break down goals by Task subclass and
# register the task class hierarchy fully qualified names so we can apply defaults to
# baseclasses.
sections = OrderedSet()
for goal in Engine.execution_order(self.goals):
for task_name in goal.ordered_task_names():
sections.add(task_name)
task_type = goal.task_type_by_name(task_name)
for clazz in task_type.mro():
if clazz == Task:
break
sections.add('%s.%s' % (clazz.__module__, clazz.__name__))
augmented_args = rcfile.apply_defaults(sections, args)
if augmented_args != args:
# TODO(John Sirois): Cleanup this currently important mutation of the passed in args
# once the 2-layer of command -> goal is squashed into one.
args[:] = augmented_args
sys.stderr.write("(using pantsrc expansion: pants goal %s)\n" % ' '.join(augmented_args))
def run(self):
# TODO(John Sirois): Consider moving to straight python logging. The divide between the
# context/work-unit logging and standard python logging doesn't buy us anything.
# Enable standard python logging for code with no handle to a context/work-unit.
if self.global_options.level:
LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper())
logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None)
if logdir:
safe_mkdir(logdir)
LogOptions.set_log_dir(logdir)
prev_log_level = None
# If quiet, temporarily change stderr log level to kill init's output.
if self.global_options.quiet:
prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level())
# loglevel_name can fail, so only change level if we were able to get the current one.
if prev_log_level is not None:
LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY)
log.init('goals')
if prev_log_level is not None:
LogOptions.set_stderr_log_level(prev_log_level)
else:
log.init()
# Update the reporting settings, now that we have flags etc.
def is_quiet_task():
for goal in self.goals:
if goal.has_task_of_type(QuietTaskMixin):
return True
return False
# Target specs are mapped to the patterns which match them, if any. This variable is a key for
# specs which don't match any exclusion regexes. We know it won't already be in the list of
# patterns, because the asterisks in its name make it an invalid regex.
_UNMATCHED_KEY = '** unmatched **'
def targets_by_pattern(targets, patterns):
mapping = defaultdict(list)
for target in targets:
matched_pattern = None
for pattern in patterns:
if re.search(pattern, target.address.spec) is not None:
matched_pattern = pattern
break
if matched_pattern is None:
mapping[_UNMATCHED_KEY].append(target)
else:
mapping[matched_pattern].append(target)
return mapping
is_explain = self.global_options.explain
update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker)
if self.global_options.exclude_target_regexp:
excludes = self.global_options.exclude_target_regexp
log.debug('excludes:\n {excludes}'.format(excludes='\n '.join(excludes)))
by_pattern = targets_by_pattern(self.targets, excludes)
self.targets = by_pattern[_UNMATCHED_KEY]
# The rest of this if-statement is just for debug logging.
log.debug('Targets after excludes: {targets}'.format(
targets=', '.join(t.address.spec for t in self.targets)))
excluded_count = sum(len(by_pattern[p]) for p in excludes)
log.debug('Excluded {count} target{plural}.'.format(count=excluded_count,
plural=('s' if excluded_count != 1 else '')))
for pattern in excludes:
log.debug('Targets excluded by pattern {pattern}\n {targets}'.format(pattern=pattern,
targets='\n '.join(t.address.spec for t in by_pattern[pattern])))
context = Context(
config=self.config,
new_options=self.new_options,
run_tracker=self.run_tracker,
target_roots=self.targets,
requested_goals=self.requested_goals,
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
address_mapper=self.address_mapper,
spec_excludes=self.get_spec_excludes()
)
unknown = []
for goal in self.goals:
if not goal.ordered_task_names():
unknown.append(goal)
if unknown:
context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown))
return 1
engine = RoundEngine()
return engine.execute(context, self.goals)
def cleanup(self):
# TODO: This is JVM-specific and really doesn't belong here.
# TODO: Make this more selective? Only kill nailguns that affect state? E.g., checkstyle
# may not need to be killed.
NailgunTask.killall(log.info)
sys.exit(1)
| fakeNetflix/square-repo-pants | src/python/pants/commands/goal_runner.py | goal_runner.py | py | 10,794 | python | en | code | 0 | github-code | 36 |
36496422479 | from peewee import *
from playhouse.fields import ManyToManyField
from backend.connection_manager import db
from backend.models.feed import Feed
from backend.models.route import Route
class Stop(Model):
id = PrimaryKeyField()
stop_id = BigIntegerField(null=False)
name = CharField(null=False)
lat = FloatField(null=False)
lng = FloatField(null=False)
timezone = CharField(max_length=100, null=True)
feed = ForeignKeyField(Feed, null=False, related_name='stops')
routes = ManyToManyField(Route, related_name='stops')
def __str__(self) -> str:
return '{s.id} - {s.stop_id} ({s.name})'.format(s=self)
@classmethod
def get_nearby(cls, lat, lng, distance):
cursor = db.execute_sql('''
SELECT
stop.id AS id,
%(distance_unit)s * DEGREES(ACOS(COS(RADIANS(%(lat)s))
* COS(RADIANS(stop.lat))
* COS(RADIANS(%(lng)s - stop.lng))
+ SIN(RADIANS(%(lat)s))
* SIN(RADIANS(stop.lat)))) AS distance
FROM stop
WHERE stop.lat
BETWEEN %(lat)s - (%(radius)s / %(distance_unit)s)
AND %(lat)s + (%(radius)s / %(distance_unit)s)
AND stop.lng
BETWEEN %(lng)s - (%(radius)s / (%(distance_unit)s * COS(RADIANS(%(lat)s))))
AND %(lng)s + (%(radius)s / (%(distance_unit)s * COS(RADIANS(%(lat)s))))
''', {
"lat": lat,
"lng": lng,
"radius": distance / 1000,
"distance_unit": 111.045,
})
result = []
for nearby in cursor.fetchall():
result.append(cls.get(id=nearby[0]))
return result
class Meta:
database = db
| seniorpreacher/timap | backend/models/stop.py | stop.py | py | 2,027 | python | en | code | 0 | github-code | 36 |
73571310823 | from datetime import datetime
from typing import List, Union
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.models import CharityProject, Donation
async def get_not_closed_investing_objects(
model: Union[CharityProject, Donation],
session: AsyncSession
) -> List[Union[CharityProject, Donation]]:
db_obj = await session.execute(
select(model).where(
model.fully_invested == 0
).order_by(model.create_date)
)
return db_obj.scalars().all()
def close_investing_object(
obj_to_close: Union[CharityProject, Donation]
):
obj_to_close.invested_amount = obj_to_close.full_amount
obj_to_close.fully_invested = True
obj_to_close.close_date = datetime.now()
def make_investing(
new_obj: Union[CharityProject, Donation],
model_obj: Union[CharityProject, Donation]
) -> (Union[CharityProject, Donation], Union[CharityProject, Donation]):
new_obj_free_amount = new_obj.full_amount - new_obj.invested_amount
model_obj_free_amount = model_obj.full_amount - model_obj.invested_amount
if new_obj_free_amount == model_obj_free_amount:
close_investing_object(new_obj)
close_investing_object(model_obj)
elif new_obj_free_amount > model_obj_free_amount:
new_obj.invested_amount += model_obj_free_amount
close_investing_object(model_obj)
else:
model_obj.invested_amount += new_obj_free_amount
close_investing_object(new_obj)
return new_obj, model_obj
async def investing_process(
new_object: Union[CharityProject, Donation],
model: Union[CharityProject, Donation],
session: AsyncSession
):
model_objects = await get_not_closed_investing_objects(model, session)
for model_object in model_objects:
new_obj, model_obj = make_investing(new_object, model_object)
session.add(new_obj)
session.add(model_obj)
await session.commit()
await session.refresh(new_object)
| ThatCoderMan/QRkot_spreadsheets | app/services/investing.py | investing.py | py | 2,014 | python | en | code | 1 | github-code | 36 |
5180131116 | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer
import pandas as pd
import numpy as np
from pickle import load
from sklearn.preprocessing import MinMaxScaler
model = Sequential([
InputLayer(input_shape=7),
Dense(5, activation = 'relu'),
Dense(4, activation = 'softmax')
])
model.load_weights("music_weights.hdf5")
model.compile(optimizer="Adam",loss='sparse_categorical_crossentropy',metrics='accuracy')
scaler = load(open('scaler.pkl', 'rb'))
emotions = ["Calm", "Energetic", "Happy", "Sad"]
def recommend_music(hist, mood):
x = hist[["Danceability", "Acousticness", "Energy", "Instrumentalness", "Valence", "Loudness", "Speechiness"]]
x = scaler.transform(x)
predictions = model.predict(x)
index_moods = np.argmax(predictions, axis =1)
moods = []
for index in index_moods:
moods.append(emotions[index])
hist["moods"] = moods
return hist[hist["moods"] == mood].reset_index(drop =True)
| adish13/Moodify-Learning | music_test.py | music_test.py | py | 993 | python | en | code | 1 | github-code | 36 |
12367122482 | #!/usr/bin/env ccp4-python
'''
Created on 16 Jan 2016
@author: hlfsimko
'''
import glob
import os
import sys
from ample.constants import SHARE_DIR
from ample.testing import test_funcs
from ample.testing.integration_util import AMPLEBaseTest
INPUT_DIR = os.path.join(SHARE_DIR, "examples", "single-model", "input")
TEST_DICT = {}
# vanilla test
args_vanilla = [
[ '-fasta', os.path.join(INPUT_DIR, '1ujb.fasta') ],
[ '-mtz', os.path.join(INPUT_DIR, '1ujb-sf.mtz') ],
]
###############################################################################
#
# test ensemble creation from a single structure based on residue scores
#
###############################################################################
args_from_single_model = args_vanilla + [
[ '-percent', '20' ],
[ '-single_model', os.path.join(INPUT_DIR, '3c7t.pdb') ],
[ '-subcluster_radius_thresholds', 1,2,3 ],
[ '-side_chain_treatments', 'polyAla', 'reliable', 'allatom' ],
[ '-truncation_scorefile', os.path.join(INPUT_DIR, '3c7t_scores.csv') ],
[ '-truncation_scorefile_header', 'residue', 'Concoord' ],
]
# Test class that holds the functions to test the RESULTS_PKL file that will be passed in
class AMPLETest(AMPLEBaseTest):
def test_from_single_model(self):
self.assertTrue(self.AMPLE_DICT['AMPLE_finished'])
nensembles = len(self.AMPLE_DICT['ensembles'])
self.assertEqual(nensembles, 15, "Incorrect number of ensembles produced: {0}".format(nensembles))
self.assertIn('ensembles', self.AMPLE_DICT)
self.assertGreater(len(self.AMPLE_DICT['ensembles']), 0, "No ensembles produced")
self.assertIn('mrbump_results', self.AMPLE_DICT)
self.assertGreater(len(self.AMPLE_DICT['mrbump_results']), 0, "No MRBUMP results")
self.assertTrue(self.AMPLE_DICT['success'])
self.assertGreater(self.AMPLE_DICT['mrbump_results'][0]['SHELXE_CC'], 25,"SHELXE_CC criteria not met")
return
TEST_DICT['from_single_model'] = { 'args' : args_from_single_model,
'test' : AMPLETest,
}
###############################################################################
#
# End Test Setup
#
###############################################################################
if __name__ == '__main__':
test_funcs.parse_args(TEST_DICT)
| rigdenlab/ample | examples/single-model/test_cases.py | test_cases.py | py | 2,414 | python | en | code | 6 | github-code | 36 |
27555873919 | class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
for i, num in enumerate(flowerbed):
isCurrentPlaceClear = flowerbed[i] == 0
isPreviousPlaceClear = i == 0 or flowerbed[i - 1] == 0
isNextPlaceClear = i == len(flowerbed) - 1 or flowerbed[i + 1] == 0
if isCurrentPlaceClear and isPreviousPlaceClear and isNextPlaceClear:
flowerbed[i] = 1
n -= 1
return n <= 0
| maxlevashov/leetcode | python/605_Can_Place_Flowers.py | 605_Can_Place_Flowers.py | py | 488 | python | en | code | 1 | github-code | 36 |
34684965876 | __all__ = ['nuke_all', 'is_empty']
def nuke_all(classes):
'Destroys all objects'
for cls in classes:
for obj in cls.objects.all():
obj.delete()
def is_empty(classes):
'''returns False if any instances of the given classes exist, else returns True'''
for cls in classes:
if 0 != len(cls.objects.all()):
return False
return True
| readcoor/MIDAS-MICrONS | django/nada/fixtures/utils.py | utils.py | py | 410 | python | en | code | 0 | github-code | 36 |
35851037675 | #Django Libs
from django.http.response import FileResponse, HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import View, CreateView, DeleteView, UpdateView, DetailView, ListView, TemplateView
from django.db.models import Sum
from django.core.serializers import serialize
#Self Libs
from .forms import ComprasForm, ConsumidorFinalForm, ContribuyenteForm, EmpresaF, LibroForm
from .models import *
from empresas.models import Empresa as Cliente
from .export import *
#Factura CF
class FacturaCFCV(CreateView):
model = FacturaCF
template_name = "iva/lfcf.html"
form_class = ConsumidorFinalForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturacf
context = super(FacturaCFCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fcf'
context['titulo'] = 'Crear Factura Consumidor Final'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total_exento=Sum('exento'))["total_exento"],
facturas.all().aggregate(total_local=Sum('locales'))["total_local"],
facturas.all().aggregate(total_exportacion=Sum('exportaciones'))["total_exportacion"],
facturas.all().aggregate(total_ventasNSujetas=Sum('ventasNSujetas'))["total_ventasNSujetas"],
facturas.all().aggregate(total_venta=Sum('ventaTotal'))["total_venta"],
facturas.all().aggregate(total_ventaCtaTerceros=Sum('ventaCtaTerceros'))["total_ventaCtaTerceros"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCFCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fcf",args=[libro.id])
#Factura Ct
class FacturaCtCV(CreateView):
model = FacturaCt
template_name = "iva/lfct.html"
form_class = ContribuyenteForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturact
context = super(FacturaCtCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fct'
context['titulo'] = 'Crear Factura Contribuyente'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total=Sum('venExentas'))["total"],
facturas.all().aggregate(total=Sum('venGravadas'))["total"],
facturas.all().aggregate(total=Sum('ventasNSujetas'))["total"],
facturas.all().aggregate(total=Sum('ivaDebFiscal'))["total"],
facturas.all().aggregate(total=Sum('vtVentas'))["total"],
facturas.all().aggregate(total=Sum('vtIVA'))["total"],
facturas.all().aggregate(total=Sum('ivaRetenido'))["total"],
facturas.all().aggregate(total=Sum('total'))["total"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCtCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fct",args=[libro.id])
#Factura Cm
class FacturaCmCV(CreateView):
model = FacturaCm
template_name = "iva/lfcm.html"
form_class = ComprasForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturacm
context = super(FacturaCmCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fcm'
context['titulo'] = 'Crear Factura Compra'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total=Sum('cExenteInterna'))["total"],
facturas.all().aggregate(total=Sum('cExenteImportaciones'))["total"],
facturas.all().aggregate(total=Sum('cGravadaInterna'))["total"],
facturas.all().aggregate(total=Sum('cGravadaImportaciones'))["total"],
facturas.all().aggregate(total=Sum('comprasNSujetas'))["total"],
facturas.all().aggregate(total=Sum('ivaCdtoFiscal'))["total"],
facturas.all().aggregate(total=Sum('totalCompra'))["total"],
facturas.all().aggregate(total=Sum('retencionPretencion'))["total"],
facturas.all().aggregate(total=Sum('anticipoCtaIva'))["total"],
facturas.all().aggregate(total=Sum('ivaTerceros'))["total"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCmCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fcm",args=[libro.id])
#Libros vistas
class LibroCV(CreateView):
model = Libro
template_name = "iva/modal.html"
form_class = LibroForm
def get_context_data(self, **kwargs):
context = super(LibroCV,self).get_context_data(**kwargs)
context["empresa"] = Cliente.objects.get(id=self.kwargs["empresa"])
context['direccion'] = 'iva:nuevo_libro'
context['titulo'] = 'Crear Libro'
context["tipo"] = self.kwargs["tipo"]
context["parametro"] = self.kwargs['empresa']
context["parametro2"] = self.kwargs['tipo']
return context
def get_initial(self, **kwargs):
initial = super(LibroCV,self).get_initial()
initial["cliente"] = Cliente.objects.get(id=self.kwargs["empresa"]).id
initial["tipo"] = self.kwargs["tipo"]
return initial
def get_success_url(self,**kwargs):
return reverse("iva:lista_libro",args=[self.kwargs["empresa"],self.kwargs["tipo"]])
class LibroLV(ListView):
model = Libro
template_name = "iva/llibro.html"
context_object_name = 'libros'
def get_context_data(self, **kwargs):
context = super(LibroLV,self).get_context_data(**kwargs)
context["cliente"] = Cliente.objects.get(id=self.kwargs['empresa'])
context["tipo"] = self.kwargs["tipo"]
return context
def get_queryset(self):
queryset = super(LibroLV, self).get_queryset()
queryset = queryset.filter(cliente__id = self.kwargs['empresa'],tipo=self.kwargs["tipo"]).order_by('ano','mes')
return queryset
class EmpresaDV(DetailView):
model = Cliente
template_name = "iva/detalle_cliente.html"
context_object_name = "cliente"
#Empresa Vistas
class EmpresaCV(CreateView):
model = Empresa
template_name = "iva/empresa.html"
form_class = EmpresaF
def get_context_data(self, **kwargs):
context = super(EmpresaCV,self).get_context_data(**kwargs)
context['direccion'] = 'cont:nuevo_empresa'
context['titulo'] = 'Crear Empresa'
return context
class EmpresaDetail(DetailView):
model = Empresa
template_name='empresaJson.html'
def get(self,request,*args, **kwarg ):
empresa = Empresa.objects.get(nRegistro = self.kwargs['nReg'])
empresa = serialize('json',[empresa,])
return HttpResponse(empresa,'application/json')
#Exportacion
class ExportarView(View):
def get(self, request, *args, **kwargs):
tipo = self.kwargs.get('tipo')
id_libro = self.kwargs.get('id_libro')
libro = Libro.objects.get(id=id_libro)
if tipo == 1:
tipol = "Consumidor"
libroEx = export_libroCF(id_libro)
elif tipo == 2:
tipol = "Contibuyente"
libroEx = export_libroct(id_libro)
elif tipo == 3:
tipol = "Compras"
libroEx = export_librocm(id_libro)
print(libro)
# create the HttpResponse object ...
response = FileResponse(open(libroEx, 'rb'))
return response
| RobertoMarroquin/garrobo | iva/views.py | views.py | py | 8,321 | python | en | code | 0 | github-code | 36 |
35906634663 | from flask import Flask, render_template, flash, redirect, request, url_for, jsonify
from multiprocessing import Process, Queue
from xBee_recieve import reciever
app = Flask(__name__)
processes = []
collectedData = []
def getNewXbeeData(q):
PORT = "COM2"
BAUD = 9600
MAC = "13A20041C7BFFC"
r = reciever(PORT, BAUD, MAC)
while True:
msg = r.check_for_message()
if msg:
q.put(msg) # data needs to first be parsed, so if the msg is a json, we need to format to [msg['x'], msg['y']]
#tester method to get generated data should function the same as getNewXbeeData
def getNewRandomData(q):
"""
temp()\n
accel()\n
mag()\n
gyro()\n
euler()\n
quaternion()\n
linear_accel()\n
gravity()\n
"""
import time
from random import randint
t = 0
lastAccel = [0,0,0]
while True:
r = randint(5,10)/10.0
print(r)
time.sleep(r)
t += r
data = {
"time" : t,
"accel" : [lastAccel[0] + randint(-20,20),lastAccel[1] + randint(-20,20),lastAccel[2] + randint(-20,20)],
"gyro" : [randint(-20,20),randint(-20,20),randint(-20,20)],
"temp" : randint(30,100),
}
q.put(data)
lastAccel = data["accel"]
@app.route("/", methods=["GET", ])
def main():
return render_template("main.html")
#main page
@app.route('/api/<data>/<num>', methods=['GET'])
def api(data, num):
q = processes[0][0]
while not q.empty():
d = q.get()
collectedData.append(d)
#num is current size of users data, so we only give them the data they dont have
out = []
if "accel" in data:
n = 0
if "Y" in data:
n = 1
elif "Z" in data:
n = 2
for d in collectedData[int(num)::]:
out.append([d["time"], d["accel"][n]])
elif "gyro" in data:
n = 0
if "Y" in data:
n = 1
elif "Z" in data:
n = 2
for d in collectedData[int(num)::]:
out.append([d["time"], d["gyro"][n]])
elif data == "temp":
for d in collectedData[int(num)::]:
out.append([d["time"], d["temp"]])
return jsonify(out)
if __name__ == '__main__':
q = Queue()
p = Process(target=getNewRandomData, args=[q,])
processes.append((q,p))
p.start()
app.run(host="0.0.0.0", port=80)
for p in processes:
p[1].terminate() | explosion33/PIPayload | ground/api.py | api.py | py | 2,488 | python | en | code | 1 | github-code | 36 |
27359625037 | #!/usr/bin/python3
import os
import requests
my_ip_file = os.path.join("/tmp", "myIp.txt")
def myIp():
return requests.get("https://gianlu.dev/ip").text.strip()
def writToFile(filename, content):
fp = open(filename, "wt", encoding="utf8")
fp.write(content)
fp.close()
def readFile(filename):
fp = open(filename, "rt", encoding="utf8")
content = fp.read().strip()
fp.close()
return content
if not os.path.exists(my_ip_file):
writToFile(my_ip_file, "")
current_ip = myIp()
if current_ip != readFile(my_ip_file):
print(current_ip, readFile(my_ip_file))
writToFile(my_ip_file, current_ip)
import Client
Client.mainFunc()
| GianluDeveloper/OpenRemotePort | CronKeeper.py | CronKeeper.py | py | 681 | python | en | code | 0 | github-code | 36 |
14199179259 | import time
class criatura():
"""docstring for criatura"""
def __init__(self, pos, dir, imagen, tic, toc, tiempo_entre_mov):
self.pos = None
self.dir = None
self.imagen = None
self.tic = None
self.toc = None
self.tiempo_entre_mov = None
class pac_man():
"""docstring for pac_man"""
def __init__(self):
criatura.__init__(
self,
pos = (23, 13), # posición actual (fila, columna)
dir = 1, # dirección hacia la que avanza
imagen = ('pacman0','pacman1','pacman2'), # imágenes del pacman
tic = time.time(), # para el cronómetro
toc = 0,
tiempo_entre_mov = 1.0/10, # muevase 10 cuadros por segundo
)
self.invencible = False # se comió la vitamina
self.boca_abierta = 1 # boca abierta 0, 1, 2, 3(1)
self.vidas = 3
class fantasma():
"""docstring for fantasma"""
def __init__(self):
criatura.__init__(
self,
pos = (0, 0), # posición actual (fila, columna)
dir = 1, # dirección hacia la que avanza
imagen = 0, # imágenes del fantasma
tic = time.time(), # para el cronómetro
toc = 0,
tiempo_entre_mov = 1.0/8 # se mueve 8 cuadros/segundo
)
self.__comido = False # se lo comio el pacman: True/False
def dar_comido(self, comido):
self.__comido = comido
def obtener_comido(self):
return self.__comido
if __name__ == '__main__':
pacman = pac_man()
print(pacman.pos)
fant = fantasma()
print(fant.obtener_comido()) | dsvalenciah/python-pacman | temp.py | temp.py | py | 1,671 | python | es | code | 0 | github-code | 36 |
5993681332 | if __name__ == "__main__":
# Метод Гаусса
f = open("Input.txt")
RANGE = int(f.read(1)) # Записываем количество строк матрицы
COLUMN = int(f.read(1)) # Записываем количество столбцов матрицы
mat = [] # Считываем матрицу из файла
f.read(1)
for line in f.readlines():
mat.append(line.split())
for i in range(RANGE): # Преобразуем матрицу к вещественным числам
for j in range(COLUMN):
mat[i][j] = float(mat[i][j])
f.close()
mas = [] # Создаем массив для записи искомых переменных (обратный ход)
def next_run(mainIndex): # Прямой ход
i = mainIndex
while i < RANGE - 1: # Зануляем все элементы под элементом главной диагонали
if mat[i + 1][mainIndex] != 0 and mat[mainIndex][mainIndex] != 0:
kof = mat[i + 1][mainIndex] / mat[mainIndex][
mainIndex] * -1 # Находим множитель для дальнейших преобразований
transf(i, mainIndex, kof)
i += 1
def transf(secondIndex, mainIndex, kof): # Складываем строку побочную с главной строкой умноженной на коэффициент
j = mainIndex
while j < COLUMN:
mat[secondIndex + 1][j] = mat[mainIndex][j] * kof + mat[secondIndex + 1][j]
j += 1
def back_run(mainIndex): # Обратный ход
j = mainIndex
sum = 0
while j + 1 < COLUMN - 1: # Находим сумму коэффициентов умноженную на найденные переменные
sum += mat[mainIndex][j + 1] * mas[case_mas(j + 1)]
j += 1
sum = (sum * -1 + mat[mainIndex][COLUMN - 1]) / mat[mainIndex][
mainIndex] # Находим неизвестное член и добавляем его в массив
mas.append(sum)
def case_mas(i): # Находим индекс элемента для соответствующего коэффициента
return RANGE - (i + 1)
print("Добро пожаловать сейчас мы приведем матрицу к " # Основное тело программы
"треугольному виду и найдем неизвестные методом Гаусса")
print("Исходная матрица")
for i in mat: # Выводим матрицу
print(i)
print()
for i in range(RANGE - 1): # Прямой ход
next_run(i)
print("Преобразованная матрица")
for i in mat: # Выводим матрицу
print(i)
print()
if mat[RANGE - 1][RANGE - 1] == 0: # Обратный ход
print("Метод Гаусса не работает")
else:
i = RANGE - 1
while i >= 0:
back_run(i)
i -= 1
print("Искомые переменные\n", mas) # Выводим искомый массив переменных
| alexneysis/numerical-methods | gauss_A0/my_method.py | my_method.py | py | 3,308 | python | ru | code | 0 | github-code | 36 |
25353888497 | import socketserver
import sys
from python import http
import requestParsing
class TCPHandler(socketserver.BaseRequestHandler):
def handle(self):
recieved_data = self.request.recv(1024)
print(self.client_address[0] + " is sending data")
print("----")
print(recieved_data.decode())
print("\n\n")
sys.stdout.flush()
header_end = "\r\n\r\n".encode() # convert end of of header mark to bytes
data = []
if recieved_data.find(header_end) != -1:
data = recieved_data.split(header_end, 1) # use it to separate request
else:
data = [recieved_data, b'']
# only decode header
header = data[0].decode().split("\r\n")
request_line = header[0].split(" ")
if request_line[0] == "GET":
if request_line[1] == '/':
response = http.html_header("website/index.html")
self.request.sendall(response)
elif request_line[1].find(".html") != -1:
fname = "website" + request_line[1]
response = http.html_header(fname)
self.request.sendall(response)
elif request_line[1].find(".js") != -1:
fname = "website" + request_line[1]
response = http.js_header(fname)
self.request.sendall(response)
elif request_line[1].find(".css") != -1:
fname = "website" + request_line[1]
response = http.css_header(fname)
self.request.sendall(response)
elif request_line[1].find("/images/") != -1:
fname = "website" + request_line[1]
f_type = fname.split(".")[1]
response = http.image_header(fname, f_type)
self.request.sendall(response)
else:
response = http.not_found()
self.request.sendall(response)
else:
postData = requestParsing.parseRequest(self, header, recieved_data)
if postData != ():
response = http.byteResponse(postData)
self.request.sendall(response)
if __name__ == '__main__':
host = "localhost"
port = 8000
server = socketserver.ThreadingTCPServer((host, port), TCPHandler)
server.serve_forever() | jackyzhu209/312-Project | website/server.py | server.py | py | 2,395 | python | en | code | 0 | github-code | 36 |
11014211257 |
from django.shortcuts import render, redirect, get_object_or_404
from .forms import LibroForm
from django.shortcuts import render
from .models import Libro
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
from django.views.generic.edit import UpdateView
class IngresarLibroView(LoginRequiredMixin, CreateView):
model = Libro
form_class = LibroForm
template_name = 'libros/ingresar_libro.html'
success_url = reverse_lazy('lista_libros')
def form_valid(self, form):
form.instance.usuario = self.request.user
titulo = form.cleaned_data['titulo']
autor = form.cleaned_data['autor']
if not Libro.objects.filter(titulo=titulo, autor=autor).exists():
return super().form_valid(form)
else:
form.add_error('titulo', 'Este libro ya existe en la biblioteca.')
return self.form_invalid(form)
class EditarLibroView(LoginRequiredMixin, UpdateView):
model = Libro
form_class = LibroForm
template_name = 'libros/editar_libro.html'
success_url = reverse_lazy('lista_libros')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['libro'] = self.get_object()
return context
def form_valid(self, form):
return super().form_valid(form)
def lista_libros(request):
libros = Libro.objects.all().order_by('titulo')
return render(request, 'libros/lista_libros.html', {'libros': libros})
def eliminar_libro(request, libro_id):
libro = get_object_or_404(Libro, id=libro_id)
if request.method == 'POST':
libro.delete()
return redirect('lista_libros')
return render(request, 'libros/eliminar_libro.html', {'libro': libro})
def lista_detalle_libros(request):
libros = Libro.objects.all().order_by('titulo')
return render(request, 'libros/lista_detalle_libros.html', {'libros': libros})
| ezecodo/Entrega1-Angeloni | libros/views.py | views.py | py | 2,052 | python | en | code | 0 | github-code | 36 |
43537572862 | range_lower = 146810
range_higher = 612564
def is_valid(number):
as_string = str(number)
adjacents = []
for i in range(1, len(as_string)):
current = int(as_string[i])
previous = int(as_string[i-1])
if current < previous:
return False
if current == previous:
adjacents.append((i, i-1, current))
# Part one, comment these two lines to get part two answer
if len(adjacents):
return True
number_of_adjacents = {}
for a in adjacents:
if a[2] in number_of_adjacents:
number_of_adjacents[a[2]] += 1
else:
number_of_adjacents[a[2]] = 1
# Part two
if 1 in number_of_adjacents.values():
return True
total = 0
for i in range(range_lower, range_higher+1):
if is_valid(i):
total += 1
print(total)
# print(is_valid(123444)) | ChrisWilliamson123/advent-of-code-2019 | day4/main.py | main.py | py | 799 | python | en | code | 1 | github-code | 36 |
41635503393 | from google.cloud import firestore, storage, exceptions
import os
db = firestore.Client()
content = db.collection('fl_content')
storage_client = storage.client.Client()
bucket = storage_client.get_bucket('psyclonic-studios-website.appspot.com')
def new_transaction():
return db.transaction()
@firestore.transactional
def get_artwork_collection(transaction, size, args):
artworks_query = content.where('_fl_meta_.schema', '==', 'artwork')
artworks_query = sort_query(artworks_query, args)
artworks = []
for artwork_ref in artworks_query.stream(transaction=transaction):
artwork = artwork_ref.to_dict()
image_refs = artwork['images']
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
artworks.append(artwork)
return artworks
@firestore.transactional
def get_artwork(transaction, id, size):
artwork = content.document(id).get(transaction=transaction).to_dict()
if not artwork:
return None
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
return artwork
@firestore.transactional
def get_artwork_from_ref(transaction, ref, size):
artwork = ref.get(transaction=transaction).to_dict()
if not artwork:
return None
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
return artwork
@firestore.transactional
def get_non_series_artwork_collection(transaction, size, args):
artworks_query = content.where('_fl_meta_.schema', '==', 'artwork').where('partOfASeries', '==', False)
artworks_query = sort_query(artworks_query, args)
artworks = []
for artwork_ref in artworks_query.stream(transaction=transaction):
artwork = artwork_ref.to_dict()
image_refs = artwork['images']
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artworks.append(artwork)
return artworks
@firestore.transactional
def get_series_collection(transaction, size, args):
series_query = content.where('_fl_meta_.schema', '==', 'series')
series_query = sort_query(series_query, args)
series_collection = []
for series_ref in series_query.stream(transaction=transaction):
series = series_ref.to_dict()
series_image_refs = series['seriesImages']
if series_image_refs:
series_image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in series_image_refs]
series['thumbnail_image'] = get_sized_image_urls(series_image_refs[0].get(transaction=transaction).to_dict(), size)
else:
artwork = series['artworks'][0].get(transaction=transaction).to_dict()
artwork_image = artwork['images'][0].get(transaction=transaction).to_dict()
artwork_image_url = get_sized_image_urls(artwork_image, size)
series['thumbnail_image'] = artwork_image_url
series_collection.append(series)
return series_collection
@firestore.transactional
def get_series(transaction, id, size):
series = content.document(id).get(transaction=transaction).to_dict()
# todo
if series is None:
return None
series_image_refs = series['seriesImages']
series_image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in series_image_refs]
if not series:
return None
artworks_resolved = []
for artwork_ref in series['artworks']:
artwork = artwork_ref.get(transaction=transaction).to_dict()
image_refs = artwork['images']
image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in image_refs]
artwork['images'] = image_urls
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
artworks_resolved.append(artwork)
series['artworks_resolved'] = artworks_resolved
series['series_images'] = series_image_urls
return series
#
#@firestore.transactional
#def get_blog_collection(transaction, size, args):
# blog_collection_query = content.where('_fl_meta_.schema', '==', 'posts').where('status', '==', 'published')
# blog_collection_query = sort_query(blog_collection_query, args)
# blog_collection = []
# for blog_ref in blog_collection_query.stream(transaction=transaction):
# blog = blog_ref.to_dict()
# blog_thumbnail_ref = blog['thumbnail'][0]
# blog_thumbnail = get_file_url(get_image_size_path(blog_thumbnail_ref.get(transaction=transaction).to_dict(), size))
# blog['thumbnail_image'] = blog_thumbnail
# blog_collection.append(blog)
# return blog_collection
#
#@firestore.transactional
#def get_blog(transaction, id, size):
# blog = content.document(id).get(transaction=transaction).to_dict()
# thumbnail_ref = blog['thumbnail'][0]
# blog['thumbnail_image'] = get_file_url(get_image_size_path(thumbnail_ref.get(transaction=transaction).to_dict(), size))
# return blog
@firestore.transactional
def get_home_images(transaction):
home_images_query = content.where('_fl_meta_.schema', '==', 'websiteImages').where('position', '==', 'Home').limit(1)
home_images = next(home_images_query.stream(transaction=transaction)).to_dict()
home_images['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict()) for image in home_images['images']]
return home_images
def get_cost(cost):
query = content.where('_fl_meta_.schema', '==', 'costs').where('name', '==', cost).limit(1)
cost = next(query.stream()).to_dict()
return cost['cost']
def get_international_shipping():
return get_cost('International shipping')
def get_website_component(component):
query = content.where('_fl_meta_.schema', '==', 'websiteComponents').where('component', '==', component).limit(1)
component = next(query.stream()).to_dict()
return component['content']
def get_home_text():
return get_website_component('Home')
def get_about():
return get_website_component('About')
def get_policies():
return get_website_component('Policies')
@firestore.transactional
def get_contribute_products(transaction, size, args):
contribute_products_query = content.where('_fl_meta_.schema', '==', 'supportProducts').where('available', '==', True)
contribute_products_query = sort_query(contribute_products_query, args)
contribute_products = []
for product_ref in contribute_products_query.stream(transaction=transaction):
product = product_ref.to_dict()
product['sku'] = f'sku_{product["id"]}'
product_artwork_image_ref = product['artworkImage'][0]
product['artwork_image'] = get_sized_image_urls(product_artwork_image_ref.get(transaction=transaction).to_dict(), size)
product_image_ref = product['productImage'][0]
product['product_image'] = get_sized_image_urls(product_image_ref.get(transaction=transaction).to_dict(), size)
contribute_products.append(product)
return contribute_products
#def sync_contribute_products_to_stripe():
# contribution_product_id = STRIPE_DATA['contribution_product_id']
# contribute_products = get_contribute_products(new_transaction(), 375, None)
# products = {product['sku']: product for product in contribute_products}
# stripe_skus = stripe.SKU.list(product=contribution_product_id, limit=100)['data']
# stripe_sku_list = [sku['id'] for sku in stripe_skus]
# existing_skus = filter(lambda sku: sku in stripe_sku_list, products.keys())
# new_skus = filter(lambda sku: sku not in stripe_sku_list, products.keys())
#
# for sku in existing_skus:
# product = products[sku]
# stripe.SKU.modify(
# sku,
# currency='aud',
# inventory={'type': 'infinite'},
# active=product['available'],
# price=int(product['basePrice'] * 100),
# image=product['product_image_url'],
# product=contribution_product_id,
# attributes={'name': product['title']}
# )
#
# for sku in new_skus:
# product = products[sku]
# stripe.SKU.create(
# id=product['sku'],
# currency='aud',
# inventory={'type': 'infinite'},
# active=product['available'],
# price=int(product['basePrice'] * 100),
# image=product['product_image_url'],
# product=contribution_product_id,
# attributes={'name': product['title']}
# )
#
#def get_donation_skus():
# donation_product_id = STRIPE_DATA['donation_product_id']
# donation_skus = stripe.SKU.list(product=donation_product_id)['data']
# return sorted(donation_skus, key=lambda sku: sku['price'])
#
#def get_shipping_sku():
# shipping_sku = stripe.SKU.retrieve(STRIPE_DATA['shipping_sku_id'])
# return shipping_sku
def get_contribute_text():
return get_website_component('Contribute')
def get_subscribe():
return get_website_component('Subscribe')
def get_contact_message():
return get_website_component('Contact message')
def get_contact_email_template():
return get_website_component('Contact email template')
def get_subscribe_success():
return get_website_component('Thankyou subscribe')
def post_email_address(email):
subscribers = db.collection('subscribers')
subscribers.document(email).set({'subscribe': True}, merge=True)
def get_artwork_buy_email_template():
return get_website_component('Artwork buy email')
def get_artwork_enquiry_email_template():
return get_website_component('Artwork enquire email')
def get_series_enquiry_email_template():
return get_website_component('Series enquire email')
def get_enquire_thankyou():
return get_website_component('Thankyou enquiry')
def get_payment_success():
return get_website_component('Thankyou payment')
def get_order(id):
order = db.collection('orders').document(id).get().to_dict()
transaction = new_transaction()
artworks = [{'artwork': get_artwork_from_ref(transaction, artwork['artwork'], 300), 'quantity': artwork['quantity']} for artwork in order['artworks']]
order['artworks'] = artworks
return order
def finalise_order(payment_intent):
orders = db.collection('orders')
order = orders.document(payment_intent.id)
order.update({
'payment_recieved': True,
'customer': {
'name': payment_intent.shipping.name,
'email': payment_intent.receipt_email
},
'shipping': {
'street': payment_intent.shipping.address.line1,
'city': payment_intent.shipping.address.city,
'state': payment_intent.shipping.address.state,
'country': payment_intent.shipping.address.country,
'postal_code': payment_intent.shipping.address.postal_code,
},
'paid_at': firestore.SERVER_TIMESTAMP
})
artworks = order.get().to_dict()['artworks']
for artwork in artworks:
artwork['artwork'].update({'inventory': firestore.Increment(-artwork['quantity'])})
def update_order(payment_intent_id, cart, subtotal, shipping_cost, total, payment_recieved):
orders = db.collection('orders')
order = orders.document(payment_intent_id)
try:
order_doc = order.get()
if not order_doc.to_dict():
order.set({'created_at': firestore.SERVER_TIMESTAMP}, merge=True)
except exceptions.NotFound:
order.set({'created_at': firestore.SERVER_TIMESTAMP}, merge=True)
artworks = [{'artwork': content.document(id), 'quantity': cart[id]} for id in cart]
order_update = {'payment_recieved': False, 'artworks': artworks, 'cost': {'subtotal': subtotal, 'shipping': shipping_cost, 'total': total}}
order.update(order_update)
def get_flamelink_file_url(path):
flamelink_path = 'flamelink/media'
blob = bucket.blob(os.path.join(flamelink_path, path))
return blob.public_url
def get_sized_image_urls(image_dict, upto=None):
filename = image_dict['file']
image_dict['full_size'] = {'width': 'full', 'storage_path': filename, 'url': get_flamelink_file_url(filename)}
sizes = image_dict['sizes']
if upto:
sizes = list(filter(lambda size: size['width'] <= upto, sizes))
for s in sizes:
s['storage_path'] = os.path.join('sized', str(s['path']), filename)
sizes = {s['width']: s for s in sizes}
sizes[240] = {'width': 240, 'storage_path': os.path.join('sized', str(240), filename)}
for size in sizes.values():
size['url'] = get_flamelink_file_url(size['storage_path'])
image_dict['full_size'] = sizes[max(sizes)]
image_dict['sizes'] = sizes
return image_dict
def sort_query(query, args=None):
if args is None:
return query
sort_by = args.get('sort_by','')
sort_direction = args.get('sort_direction','')
if sort_by:
if sort_direction == 'descending':
query = query.order_by(sort_by, direction=firestore.Query.DESCENDING)
elif sort_direction == 'ascending':
query = query.order_by(sort_by, direction=firestore.Query.ASCENDING)
else:
query = query.order_by(sort_by)
return query | Psyclonic-Studios/psyclonic-studios-website | server/crud.py | crud.py | py | 13,541 | python | en | code | 0 | github-code | 36 |
30791311833 | from .operator import Operator
from .loader import load_from_dir
from .built_in import BUILTIN_OPERATORS
class OperatorRegistry:
def __init__(self):
self.plugin_contexts = load_from_dir()
def list_operators(self):
"""Lists the available FiftyOne operators.
Returns:
a list of operators
"""
plugin_contexts = self.plugin_contexts
operators = []
for plugin_context in plugin_contexts:
for operator in plugin_context.instances:
if isinstance(operator, Operator):
operators.append(operator)
return operators + BUILTIN_OPERATORS
def list_errors(self):
"""Lists the errors that occurred during operator loading.
Returns:
a list of errors
"""
plugin_contexts = self.plugin_contexts
errors = []
for plugin_context in plugin_contexts:
errors.extend(plugin_context.errors)
return errors
def operator_exists(self, operator_uri):
"""Checks if the operator exists.
Args:
operator_uri: the URI of the operator
Returns:
``True`` if the operator exists, ``False`` otherwise
"""
operators = self.list_operators()
return operator_uri in [o.uri for o in operators]
def get_operator(self, operator_uri):
operators = self.list_operators()
for operator in operators:
if operator_uri == operator.uri:
return operator
def register_operator(operator):
"""Registers a built-in operator. For internal use only.
Args:
operator: the operator to register
"""
if operator.name in BUILTIN_OPERATORS:
raise ValueError("Operator '%s' already exists" % operator.name)
BUILTIN_OPERATORS[operator.name] = operator
| Rusteam/fiftyone | fiftyone/operators/registry.py | registry.py | py | 1,866 | python | en | code | null | github-code | 36 |
25338326690 | import torch
import seaborn as sn
from matplotlib import pyplot as plt
from model import ConvNet
from MnistDataset import Mydataset
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
torch.manual_seed(13)
def get_score(confusion_mat):
smooth = 0.0001 #防止出现除数为0而加上一个很小的数
tp = np.diagonal(confusion_mat)
fp = np.sum(confusion_mat, axis=0)
fn = np.sum(confusion_mat, axis=1)
precision = tp / (fp + smooth)
recall = tp / (fn + smooth)
f1 = 2 * precision * recall / (precision + recall + smooth)
return precision, recall, f1
def get_confusion(confusion_matrix, out, label):
idx = np.argmax(out.detach().numpy())
confusion_matrix[idx, label] += 1
return confusion_matrix
def main():
confusion_matrix = np.zeros((10, 10))
net = ConvNet()
net.load_state_dict(torch.load('model_parameter\\parameter_epo90.pth'))
test_path = ['test.txt', r'dataset/test_label.txt']
test_dataset = Mydataset(test_path[0], test_path[1], 'cpu')
test_dataloader = DataLoader(test_dataset, 1, True)
for i, (pic, label) in enumerate(test_dataloader):
out = net(pic)
confusion_matrix = get_confusion(confusion_matrix, out, label)
precision, recall, f1 = get_score(confusion_matrix)
print(f'precision: {np.average(precision)}\trecall: {np.average(recall)}\tf1: {np.average(f1)}')
confusion_mat = pd.DataFrame(confusion_matrix)
confusion_df = pd.DataFrame(confusion_mat, index=[i for i in range(10)], columns=[i for i in range(10)])
sn.heatmap(data=confusion_df, cmap='RdBu_r')
plt.show()
confusion_df.to_csv(r'confusion.csv', encoding='ANSI')
if __name__ == '__main__':
main()
| Huyf9/mnist_pytorch | test.py | test.py | py | 1,732 | python | en | code | 1 | github-code | 36 |
4108236217 | one = input()
two = input()
oneL = len(one)
twoL = len(two)
dp = [[0] * (twoL + 1) for _ in range(oneL + 1)]
for i in range(1, oneL + 1):
for j in range(1, twoL + 1):
if one[i - 1] == two[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
word = ""
i = oneL
j = twoL
while i > 0 and j > 0:
if one[i - 1] == two[j - 1]:
word += one[i - 1]
i -= 1
j -= 1
elif dp[i - 1][j] > dp[i][j - 1]:
i -= 1
else:
j -= 1
print(word[::-1])
| AAZZAZRON/DMOJ-Solutions | dpf.py | dpf.py | py | 559 | python | en | code | 1 | github-code | 36 |
41977316312 | import json
class Destinations:
def __init__(self):
self.destination = ""
self.file_name = "destinations.json"
def write_to_json_file(self):
dictionary = {
"destination": self.destination
}
json_object = json.dumps(dictionary, indent=1, ensure_ascii=False)
with open(self.file_name, 'a', encoding='utf-8') as f:
f.write(json_object)
| DistributedTravels/Scraper | scraper/destinations.py | destinations.py | py | 416 | python | en | code | 0 | github-code | 36 |
43753620311 | from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import (
api_view,
permission_classes,
authentication_classes
)
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from repository.models import Repository, Branch, Commit
from repository.serializers.repo_serializers import (
RepositorySerializer,
RepositoryCreateSerializer,
RepositoryUpdateSerializer,
)
from repository.serializers.branch_serializers import BranchSerializer
from repository.serializers.commit_serializers import CommitSerializer
from users.serializers import UserSerializer
from backend.exceptions import GeneralException
from datetime import datetime
import requests
import re
import json
import pytz
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_one_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_repos(request, username):
repos = Repository.objects.filter(user__username=username)
serializer = RepositorySerializer(repos, many=True)
return Response(serializer.data)
@api_view(['POST'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def create_repo(request):
repo_ser = RepositoryCreateSerializer(data=request.data)
if not repo_ser.is_valid():
raise GeneralException("Invalid request.")
found_repos = Repository.objects.filter(name=repo_ser.data['name'])
if len(found_repos) > 0:
raise GeneralException("Repository with given name already exists.")
repo = Repository.objects.create(
name=repo_ser.data['name'],
description=repo_ser.data['description'],
url=repo_ser.data['url'],
is_private=repo_ser.data['is_private'],
user=request.user,
)
repo.save()
load_repo(repo, request.user)
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def update_repo(request, repo_id):
repo_ser = RepositoryUpdateSerializer(data=request.data)
if not repo_ser.is_valid():
raise GeneralException("Invalid request.")
repo = get_object_or_404(Repository, pk=repo_id)
if (repo.name != repo_ser.data['name']):
found_repos = Repository.objects.filter(name=repo_ser.data['name'])
if len(found_repos) > 0:
raise GeneralException(
"Repository with given name already exists.")
repo.name = repo_ser.data['name']
repo.description = repo_ser.data['description']
repo.is_private = repo_ser.data['is_private']
repo.save()
repo.refresh_from_db()
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def reload_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
branches = Branch.objects.filter(repo__id=repo_id)
for branch in branches:
branch.delete()
load_repo(repo, request.user)
return Response()
@api_view(['DELETE'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def delete_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
repo.delete()
return Response()
def load_repo_readme(remote_username, remote_repo_name):
# Fetch readme
readme_info_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/readme'.format(remote_username, remote_repo_name))
readme_info = readme_info_resp.json()
readme_text_resp = requests.get(readme_info['download_url'])
return readme_text_resp.text
def load_repo(repo, user):
groups = re.findall(r"^https:\/\/github.com\/(.*)\/(.*)", repo.url)
remote_username = groups[0][0]
remote_repo_name = groups[0][1]
# Get and set README
repo.readme = load_repo_readme(remote_username, remote_repo_name)
repo.save()
branches_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/branches'.format(remote_username, remote_repo_name))
for b in branches_resp.json():
branch = Branch.objects.create(
name=b['name'],
creator=user,
repo=repo,
last_commit=None,
)
branch.save()
commits_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/commits?sha={2}'
.format(remote_username, remote_repo_name, b['name']))
for c in commits_resp.json():
c_time = datetime.strptime(
c['commit']['author']['date'], '%Y-%m-%dT%H:%M:%SZ')
timezone = pytz.timezone("Europe/Belgrade")
c_time_zoned = timezone.localize(c_time)
commit = Commit.objects.create(
message=c['commit']['message'],
hash=c['sha'],
timestamp=c_time_zoned,
author_email=c['commit']['author']['email'],
branch=branch,
)
# Add latest commit to branch
for b in branches_resp.json():
branches = Branch.objects.filter(
repo__name=repo.name, name=b['name'])
commits = Commit.objects.filter(
branch__name=b['name'], hash=b['commit']['sha'])
if len(branches) > 0:
if len(commits) > 0:
branches[0].last_commit = commits[0]
branches[0].save()
return Response(commits_resp.json())
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_branches(request, repo_name):
repos = Branch.objects.filter(repo__name=repo_name)
serializer = BranchSerializer(repos, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_commits(request, repo_name, branch_name):
repos = Commit.objects.filter(
branch__repo__name=repo_name).filter(branch__name=branch_name.replace('~', '/'))
serializer = CommitSerializer(repos, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_repo_collaborators(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
serializer = UserSerializer(repo.collaborators, many=True)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def update_collaborators(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
signed_in_user = request.user.id
if repo.user.id != signed_in_user:
raise GeneralException("Not authorized")
user_id_list = request.data
if len(user_id_list) > 0:
repo.collaborators.clear()
repo.collaborators.add(*user_id_list)
else:
repo.assignees.clear()
repo.save()
repo.refresh_from_db()
serializer = UserSerializer(repo.collaborators, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def search_users_for_collaborators(request, repo_id, search_value):
signed_in_user = request.user.id
repo = get_object_or_404(Repository, pk=repo_id);
if repo.user.id != signed_in_user:
raise GeneralException("Not authorized")
repo_collaborators = repo.collaborators.all()
potential_collaborators = User.objects.filter(is_active=True, is_superuser=False, is_staff=False, username__icontains=search_value).exclude(pk=signed_in_user)
serializer = UserSerializer(potential_collaborators.difference(repo_collaborators), many=True)
return Response(serializer.data)
| lazarmarkovic/uks2020 | backend/repository/views/repo_views.py | repo_views.py | py | 8,275 | python | en | code | 0 | github-code | 36 |
27616329139 | #coding=utf8
import numpy as np
np.random.seed(1337) # for reproducibility
import re
import h5py
import os
from nltk import tokenize
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from attention import Attention_input1, Attention_input2
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Reshape, Dense, Input, Flatten, Dropout, merge, BatchNormalization
from keras.layers import TimeDistributed, LSTM, GRU, Bidirectional
from keras.models import Model
from keras.optimizers import SGD, Adadelta, Adam, RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.core import Reshape, RepeatVector
from keras.callbacks import EarlyStopping
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Merge, Convolution1D, MaxPooling1D
GLOVE_DIR = '../data/'
MAX_SEQUENCE_LENGTH = 140
MAX_NB_WORDS = 10000
EMBEDDING_DIM = 200
VALIDATION_SPLIT = 0.1
NB_EPOCH = 100
NB_CLASS = 3
DIM_HIDDEN = 128
DIM_LSTM = 128
# datamode = 'mul'
datamode = 'single'
if datamode == 'mul':
DATA_PATH = '../data/MSVA_multiple_17024.h5'
BATCH_SIZE = 128
else:
DATA_PATH = '../data/MSVA_single_4511.h5'
BATCH_SIZE = 32
def load_data():
read_file = h5py.File(DATA_PATH, 'r')
texts = read_file['txt_data'][:]
labels = read_file['label'][:]
scenes = read_file['scene_data'][:]
objects = read_file['object_data'][:]
return texts,labels,scenes,objects
def split_data(data,VALIDATION_SPLIT):
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
data_train = data[:-(nb_validation_samples * 2)]
data_val = data[-(nb_validation_samples * 2):-(nb_validation_samples)]
data_test = data[-nb_validation_samples:]
return data_train,data_val,data_test
def dp_txt(txt):
# nonEnglish_regex = re.compile('[^a-zA-Z0-9\\?\\!\\,\\.@#\\+\\-=\\*\'\"><&\\$%\\(\\)\\[\\]:;]+')
hashtag_pattern = re.compile('#[a-zA-Z0-9]+')
at_pattern = re.compile('@[a-zA-Z0-9]+')
http_pattern = re.compile("((http|ftp|https)://)(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?")
txt = txt.strip()
txt_hashtag = re.sub(hashtag_pattern, '', txt)
txt_nonat = re.sub(at_pattern, '', txt_hashtag)
txt_nonhttp = re.sub(http_pattern, '', txt_nonat)
txt = txt_nonhttp
return txt
def fun():
texts,labels,scenes,objects = load_data()
new_texts = []
for idx in range(len(texts)):
text = texts[idx]
text = dp_txt(str(text))
new_texts.append(text)
texts = new_texts
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
text_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
# print('Text tensor shape:', text_data.shape)
# print('Label tensor shape:', labels.shape)
# print('Scene tensor shape:', scenes.shape)
# print('Object tensor shape:', objects.shape)
# # split the text_data into a training set and a validation set
rand = np.arange(labels.shape[0])
np.random.shuffle(rand)
indices = rand
text_data = text_data[indices]
labels = labels[indices]
scenes = scenes[indices]
objects = objects[indices]
text_train,text_val,text_test = split_data(text_data,VALIDATION_SPLIT)
label_train,label_val,label_test = split_data(labels,VALIDATION_SPLIT)
scene_train,scene_val,scene_test = split_data(scenes,VALIDATION_SPLIT)
object_train,object_val,object_test = split_data(objects,VALIDATION_SPLIT)
text_shape = text_train.shape[1:]
scene_shape = scene_train.shape[1:]
object_shape = object_train.shape[1:]
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.200d.txt'))
for line in f:
values = line.split()
word = values[2]
coefs = np.asarray(values[1], dtype='float32')
embeddings_index[word] = coefs
f.close()
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
save_best = ModelCheckpoint('../../model/{}.hdf5'.format('my_weight'), save_best_only=True)
elstop = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=5)
# Image Sence
scene_input = Input(shape=scene_shape, dtype='float32')
img_scene = Dense(DIM_HIDDEN, activation='relu')(scene_input)
img_scene_encoder = RepeatVector(text_shape[0], name='scene-repeat')(img_scene)
# Image Object
object_input = Input(shape=object_shape, dtype='float32')
img_object = Dense(DIM_HIDDEN, activation='relu')(object_input)
img_object_encoder = RepeatVector(text_shape[0], name='object-repeat')(img_object)
# Text
txt_input = Input(shape=text_shape, dtype='float32')
txt = embedding_layer(txt_input)
txt_hidden = (LSTM(DIM_HIDDEN, return_sequences=True, name='tweet-lstm'))(txt)
txt_att = Attention_input2(name='att_so')([txt_hidden, img_object_encoder, img_scene_encoder])
# Merge
img_txt = merge([img_scene, img_object, txt_att], mode='concat')
img_txt = Dense(DIM_HIDDEN, activation='relu')(img_txt)
img_txt_loss = Dense(NB_CLASS, activation='softmax', name='main_output')(img_txt)
model = Model(input=[txt_input, scene_input, object_input], output=[img_txt_loss])
model.compile(loss='categorical_crossentropy', optimizer='RMSprop',
metrics=['acc', 'fmeasure'])
model.fit([text_train, scene_train, object_train], [label_train],
validation_data=([text_val, scene_val, object_val], [label_val]),
nb_epoch=NB_EPOCH, batch_size=BATCH_SIZE, callbacks=[elstop,save_best], verbose=1)
model.load_weights('../../model/{}.hdf5'.format('my_weight'))
score = model.evaluate([text_test, scene_test, object_test], label_test, verbose=0)
print('results:', score[1], score[2])
return score[1:]
if __name__ == '__main__':
fun()
| xunan0812/MultiSentiNet | src/att_sc_ob_txt.py | att_sc_ob_txt.py | py | 6,726 | python | en | code | 16 | github-code | 36 |
43296965814 | # spaceconfig = {"usemodules" : ["_collections"]}
from _collections import deque
from pytest import raises
def test_basics():
assert deque.__module__ == 'collections'
d = deque(xrange(-5125, -5000))
d.__init__(xrange(200))
for i in xrange(200, 400):
d.append(i)
for i in reversed(xrange(-200, 0)):
d.appendleft(i)
assert list(d) == range(-200, 400)
assert len(d) == 600
left = [d.popleft() for i in xrange(250)]
assert left == range(-200, 50)
assert list(d) == range(50, 400)
right = [d.pop() for i in xrange(250)]
right.reverse()
assert right == range(150, 400)
assert list(d) == range(50, 150)
def test_maxlen():
raises(ValueError, deque, 'abc', -1)
raises(ValueError, deque, 'abc', -2)
it = iter(range(10))
d = deque(it, maxlen=3)
assert list(it) == []
assert repr(d) == 'deque([7, 8, 9], maxlen=3)'
assert list(d) == range(7, 10)
d.appendleft(3)
assert list(d) == [3, 7, 8]
d.extend([20, 21])
assert list(d) == [8, 20, 21]
d.extendleft([-7, -6])
assert list(d) == [-6, -7, 8]
def test_maxlen_zero():
it = iter(range(100))
d = deque(it, maxlen=0)
assert list(d) == []
assert list(it) == []
d.extend(range(100))
assert list(d) == []
d.extendleft(range(100))
assert list(d) == []
def test_maxlen_attribute():
assert deque().maxlen is None
assert deque('abc').maxlen is None
assert deque('abc', maxlen=4).maxlen == 4
assert deque('abc', maxlen=0).maxlen == 0
raises((AttributeError, TypeError), "deque('abc').maxlen = 10")
def test_runtimeerror():
d = deque('abcdefg')
it = iter(d)
d.pop()
raises(RuntimeError, it.next)
#
d = deque('abcdefg')
it = iter(d)
d.append(d.pop())
raises(RuntimeError, it.next)
#
d = deque()
it = iter(d)
d.append(10)
raises(RuntimeError, it.next)
def test_count():
for s in ('', 'abracadabra', 'simsalabim'*50+'abc'):
s = list(s)
d = deque(s)
for letter in 'abcdeilmrs':
assert s.count(letter) == d.count(letter)
class MutatingCompare:
def __eq__(self, other):
d.pop()
return True
m = MutatingCompare()
d = deque([1, 2, 3, m, 4, 5])
raises(RuntimeError, d.count, 3)
def test_comparisons():
d = deque('xabc'); d.popleft()
for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
assert (d==e) == (type(d)==type(e) and list(d)==list(e))
assert (d!=e) == (not(type(d)==type(e) and list(d)==list(e)))
args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
for x in args:
for y in args:
assert (x == y) == (list(x) == list(y))
assert (x != y) == (list(x) != list(y))
assert (x < y) == (list(x) < list(y))
assert (x <= y) == (list(x) <= list(y))
assert (x > y) == (list(x) > list(y))
assert (x >= y) == (list(x) >= list(y))
assert cmp(x,y) == cmp(list(x),list(y))
def test_extend():
d = deque('a')
d.extend('bcd')
assert list(d) == list('abcd')
d.extend(d)
assert list(d) == list('abcdabcd')
def test_iadd():
d = deque('a')
original_d = d
d += 'bcd'
assert list(d) == list('abcd')
d += d
assert list(d) == list('abcdabcd')
assert original_d is d
def test_extendleft():
d = deque('a')
d.extendleft('bcd')
assert list(d) == list(reversed('abcd'))
d.extendleft(d)
assert list(d) == list('abcddcba')
def test_getitem():
n = 200
l = xrange(1000, 1000 + n)
d = deque(l)
for j in xrange(-n, n):
assert d[j] == l[j]
raises(IndexError, "d[-n-1]")
raises(IndexError, "d[n]")
def test_setitem():
n = 200
d = deque(xrange(n))
for i in xrange(n):
d[i] = 10 * i
assert list(d) == [10*i for i in xrange(n)]
l = list(d)
for i in xrange(1-n, 0, -3):
d[i] = 7*i
l[i] = 7*i
assert list(d) == l
def test_delitem():
d = deque("abcdef")
del d[-2]
assert list(d) == list("abcdf")
def test_reverse():
d = deque(xrange(1000, 1200))
d.reverse()
assert list(d) == list(reversed(range(1000, 1200)))
#
n = 100
data = map(str, range(n))
for i in range(n):
d = deque(data[:i])
r = d.reverse()
assert list(d) == list(reversed(data[:i]))
assert r is None
d.reverse()
assert list(d) == data[:i]
def test_rotate():
s = tuple('abcde')
n = len(s)
d = deque(s)
d.rotate(1) # verify rot(1)
assert ''.join(d) == 'eabcd'
d = deque(s)
d.rotate(-1) # verify rot(-1)
assert ''.join(d) == 'bcdea'
d.rotate() # check default to 1
assert tuple(d) == s
d.rotate(500000002)
assert tuple(d) == tuple('deabc')
d.rotate(-5000002)
assert tuple(d) == tuple(s)
def test_len():
d = deque('ab')
assert len(d) == 2
d.popleft()
assert len(d) == 1
d.pop()
assert len(d) == 0
raises(IndexError, d.pop)
raises(IndexError, d.popleft)
assert len(d) == 0
d.append('c')
assert len(d) == 1
d.appendleft('d')
assert len(d) == 2
d.clear()
assert len(d) == 0
assert list(d) == []
def test_remove():
d = deque('abcdefghcij')
d.remove('c')
assert d == deque('abdefghcij')
d.remove('c')
assert d == deque('abdefghij')
raises(ValueError, d.remove, 'c')
assert d == deque('abdefghij')
def test_repr():
d = deque(xrange(20))
e = eval(repr(d))
assert d == e
d.append(d)
assert '...' in repr(d)
def test_hash():
raises(TypeError, hash, deque('abc'))
def test_roundtrip_iter_init():
d = deque(xrange(200))
e = deque(d)
assert d is not e
assert d == e
assert list(d) == list(e)
def test_reduce():
#
d = deque('hello world')
r = d.__reduce__()
assert r == (deque, (list('hello world'),))
#
d = deque('hello world', 42)
r = d.__reduce__()
assert r == (deque, (list('hello world'), 42))
#
class D(deque):
pass
d = D('hello world')
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), None), {'a': 5})
#
class D(deque):
pass
d = D('hello world', 42)
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), 42), {'a': 5})
def test_copy():
import copy
mut = [10]
d = deque([mut])
e = copy.copy(d)
assert d is not e
assert d == e
mut[0] = 11
assert d == e
def test_reversed():
for s in ('abcd', xrange(200)):
assert list(reversed(deque(s))) == list(reversed(s))
def test_free():
import gc
class X(object):
freed = False
def __del__(self):
X.freed = True
d = deque()
d.append(X())
d.pop()
gc.collect(); gc.collect(); gc.collect()
assert X.freed
def test_index_method():
d = deque([1, 2, 3, 4, 5])
class A(object):
def __index__(self):
return 1
assert d[A()] == 2
def test_index_method_mutates():
d = deque([1, 2, 3, 4, 5])
class A(object):
def __index__(self):
d.clear()
return 1
with raises(IndexError):
d[A()]
d = deque([1, 2, 3, 4, 5])
with raises(IndexError):
d[A()] = 2
| mozillazg/pypy | pypy/module/_collections/test/apptest_deque.py | apptest_deque.py | py | 7,397 | python | en | code | 430 | github-code | 36 |
7813396326 | import re
from math import ceil
import dateparser
from aspen.database.models import TreeType
from aspen.workflows.nextstrain_run.build_plugins.base_plugin import BaseConfigPlugin
class TreeTypePlugin(BaseConfigPlugin):
crowding_penalty: float = 0
tree_type: TreeType
subsampling_scheme: str = "NONE"
def _update_config_params(self, config):
if not config.get("builds"):
# TODO, force MPX structure to look more like SC2's
config["builds"] = {"aspen": {}}
build = config["builds"]["aspen"]
location = self.template_args["location"]
# Make a shortcut to decide whether this is a location vs division vs country level build
if not location.division:
self.tree_build_level = "country"
elif not location.location:
self.tree_build_level = "division"
# Fill out country/division/location fields if the group has them,
# or remove those fields if they don't.
location_fields = ["country", "division", "location"]
location_values = []
for field in location_fields:
value = getattr(location, field)
if value:
build[field] = value
location_values.append(value)
else:
if build.get(field):
del build[field]
# NOTE: <TreeTypePlugin>.subsampling_scheme is used in 3 places:
# - Its lowercase'd name is used to find a markdown file with an "about this tree" description
# - It refers to a subsampling_scheme key in the mega nextstrain template
# - It's title-case'd and included in the tree title as human-readable text
build["subsampling_scheme"] = self.subsampling_scheme
# Update the tree's title with build type, location and date range.
# We always provide some form of end date in the title.
end_date = self._get_formatted_tree_end_date()
# We base format of title on whether we have a `filter_start_date`
if self.template_args.get("filter_start_date") is not None:
title_template = "{tree_type} tree for samples collected in {location} between {start_date} and {end_date}"
build["title"] = title_template.format(
tree_type=self.subsampling_scheme.title(),
location=", ".join(location_values),
start_date=dateparser.parse(
self.template_args.get("filter_start_date")
).strftime("%Y-%m-%d"),
end_date=end_date,
)
else:
title_template = "{tree_type} tree for samples collected in {location} up until {end_date}"
build["title"] = title_template.format(
tree_type=self.subsampling_scheme.title(),
location=", ".join(location_values),
end_date=end_date,
)
if config.get("files"):
config["files"]["description"] = config["files"]["description"].format(
tree_type=self.subsampling_scheme.lower()
)
if config.get("priorities"):
config["priorities"]["crowding_penalty"] = self.crowding_penalty
def _get_formatted_tree_end_date(self):
"""Returns appropriate YYYY-MM-DD for tree's end date or "--" if none.
For tree titles, we want to always have an end date to display. If
the tree had a `filter_end_date` arg, we can use that. However, if no
filter arg was given for the end date, we use the implicit end date of
when the tree build was kicked off (from PhyloRun.start_datetime), as
the tree build process can only use samples up to the moment in time
when it was kicked off, so it's an implicit end date to samples.
If there is no date available at all, we return "--" as an absolute
fall back. PhyloRun.start_datetime is not actually guaranteed at the DB
level, but all our code that creates runs always provides one (as of
Nov 2022, every single run has a start_datetime). The fall back is
provided just to code defensively in case something weird ever happens.
"""
formatted_end_date = "--" # safe default, should never happen
filter_end_date = self.template_args.get("filter_end_date")
if filter_end_date is not None:
formatted_end_date = dateparser.parse(filter_end_date).strftime("%Y-%m-%d")
else:
# `run_start_datetime` is a `context` kwarg, so not guaranteed
run_start_datetime = getattr(self, "run_start_datetime", None)
if run_start_datetime is not None:
formatted_end_date = run_start_datetime.strftime("%Y-%m-%d")
else:
print("WARNING -- Run missing a start_datetime. Default to '--'")
return formatted_end_date
def update_config(self, config):
self._update_config_params(config)
subsampling = config["subsampling"][self.subsampling_scheme]
self.run_type_config(config, subsampling)
# Remove unused subsampling schemes from our output file
config["subsampling"] = {self.subsampling_scheme: subsampling}
def run_type_config(self, config, subsampling):
raise NotImplementedError("base class doesn't implement this")
class OverviewPlugin(TreeTypePlugin):
crowding_penalty = 0.1
tree_type = TreeType.OVERVIEW
subsampling_scheme = "OVERVIEW"
def run_type_config(self, config, subsampling):
if self.group.name == "Chicago Department of Public Health":
if "--query" in subsampling["group"]["query"]: # SC2 format
subsampling["group"][
"query"
] = '''--query "((location == '{location}') & (division == '{division}')) | submitting_lab == 'RIPHL at Rush University Medical Center'"'''
else: # MPX format
subsampling["group"]["query"] = (
"("
+ subsampling["group"]["query"]
+ ") | submitting_lab == 'RIPHL at Rush University Medical Center'"
)
# Handle sampling date & pango lineage filters
apply_filters(config, subsampling, self.template_args)
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# Update country and international max sequences.
if self.tree_build_level == "country":
subsampling["international"]["max_sequences"] = 1000
if self.tree_build_level == "division":
subsampling["country"]["max_sequences"] = 800
subsampling["international"]["max_sequences"] = 200
# If there aren't any selected samples
# Either due to being a scheduled run or no user selection
# Put reference sequences in include.txt so tree run don't break
if self.num_included_samples == 0:
if config.get("files", {}).get("include"):
del config["files"]["include"]
class NonContextualizedPlugin(TreeTypePlugin):
crowding_penalty = 0.1
tree_type = TreeType.NON_CONTEXTUALIZED
subsampling_scheme = "NON_CONTEXTUALIZED"
def run_type_config(self, config, subsampling):
# Handle sampling date & pango lineage filters
apply_filters(config, subsampling, self.template_args)
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# If there aren't any selected samples due to no user selection
# Put reference sequences in include.txt so tree run don't break
if self.num_included_samples == 0:
if config.get("files", {}).get("include"):
del config["files"]["include"]
# Set max_sequences for targeted builds.
class TargetedPlugin(TreeTypePlugin):
crowding_penalty = 0
tree_type = TreeType.TARGETED
subsampling_scheme = "TARGETED"
def run_type_config(self, config, subsampling):
"""
DATA we can use in this function:
config : the entire mega-template data structure, with some fields already updated by BaseNextstrainConfigBuilder.update_build()
subsampling : the subsampling scheme for *this build type only* (ex: mega_template["subsampling"]["TARGETED"])
self.subsampling_scheme : the value a few lines above
self.crowding_penalty : the value a few lines above
self.group : information about the group that this run is for (ex: self.group.name or self.group.default_tree_location)
self.num_sequences : the number of aspen samples written to our fasta input file
self.num_included_samples : the number of samples in include.txt (aspen + gisaid samples) for on-demand runs only
EXAMPLES SECTION:
Delete a group from a subsampling scheme:
del subsampling["international"]
Delete a setting from a group:
del subsampling["international"]["seq_per_group"]
Add a group to a subsampling scheme:
subsampling["my_new_group_name"] = {
"group_by": "region",
"max_sequences": 200,
"query": '--query "(foo != {bar})"'
}
Add a setting to a group (this is the same as updating an existing setting!):
subsampling["international"]["mynewsetting"] = "mynewvalue"
"""
# Adjust group sizes if we have a lot of samples.
closest_max_sequences = 100
other_max_sequences = 25
if self.num_included_samples >= 100:
closest_max_sequences = self.num_included_samples
other_max_sequences = int(ceil(self.num_included_samples / 4.0))
subsampling["closest"]["max_sequences"] = closest_max_sequences
subsampling["group"]["max_sequences"] = (
other_max_sequences * 2
) # Temp mitigation for missing on-demand overview
subsampling["state"]["max_sequences"] = (
other_max_sequences * 2
) # Temp mitigation for missing on-demand overview
subsampling["country"]["max_sequences"] = other_max_sequences
subsampling["international"]["max_sequences"] = other_max_sequences
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# Increase int'l sequences for state/country builds.
if (
self.tree_build_level != "location"
and subsampling["international"]["max_sequences"] < 100
):
subsampling["international"]["max_sequences"] = 100
def update_subsampling_for_location(tree_build_level, subsampling):
if tree_build_level == "country":
update_subsampling_for_country(subsampling)
if tree_build_level == "division":
update_subsampling_for_division(subsampling)
def update_subsampling_for_country(subsampling):
# State and country aren't useful
if "state" in subsampling:
del subsampling["state"]
if "country" in subsampling:
del subsampling["country"]
# Update our local group query
if "--query" in subsampling["group"]["query"]:
subsampling["group"]["query"] = '''--query "(country == '{country}')"'''
else:
subsampling["group"]["query"] = "(country == '{country}')"
def update_subsampling_for_division(subsampling):
# State isn't useful
if "state" in subsampling:
del subsampling["state"]
# Update our local group query
if "--query" in subsampling["group"]["query"]:
subsampling["group"][
"query"
] = '''--query "(division == '{division}') & (country == '{country}')"''' # Keep the country filter in case of multiple divisions worldwide
else:
subsampling["group"][
"query"
] = "(division == '{division}') & (country == '{country}')" # Keep the country filter in case of multiple divisions worldwide
def apply_filters(config, subsampling, template_args):
# MPX format
include_arguments_in_filters = False
lineage_field = "lineage"
if "--query" in subsampling["group"]["query"]:
# SC2 format
include_arguments_in_filters = True
lineage_field = "pango_lineage"
min_date = template_args.get("filter_start_date")
if min_date:
# Support date expressions like "5 days ago" in our cron schedule.
min_date = dateparser.parse(min_date).strftime("%Y-%m-%d")
if include_arguments_in_filters:
subsampling["group"][
"min_date"
] = f"--min-date {min_date}" # ex: --max-date 2020-01-01
else:
subsampling["group"]["min-date"] = str(min_date) # ex: max-date: 2020-01-01
max_date = template_args.get("filter_end_date")
if max_date:
# Support date expressions like "5 days ago" in our cron schedule.
max_date = dateparser.parse(max_date).strftime("%Y-%m-%d")
if include_arguments_in_filters:
subsampling["group"][
"max_date"
] = f"--max-date {max_date}" # ex: --max-date 2020-01-01
if "international_serial_sampling" in subsampling:
subsampling["international_serial_sampling"][
"max_date"
] = f"--max-date {max_date}" # ex: --max-date 2020-01-01
else:
subsampling["group"]["max-date"] = str(max_date) # ex: max-date: 2020-01-01
if "international_serial_sampling" in subsampling:
subsampling["international_serial_sampling"]["max-date"] = str(
max_date
) # ex: max-date: 2020-01-01
pango_lineages = template_args.get("filter_pango_lineages")
if pango_lineages:
# Nextstrain is rather particular about the acceptable syntax for
# values in the pango_lineages key. Before modifying please see
# https://discussion.nextstrain.org/t/failure-when-specifying-multiple-pango-lineages-in-a-build/670
clean_values = [re.sub(r"[^0-9a-zA-Z.]", "", item) for item in pango_lineages]
clean_values.sort()
config["builds"]["aspen"]["pango_lineage"] = clean_values
# Remove the last " from our old query so we can inject more filters
end_string = ""
old_query = subsampling["group"]["query"]
if old_query.endswith('"'):
end_string = '"'
old_query = old_query[:-1]
pango_query = " & (" + lineage_field + " in {pango_lineage})"
subsampling["group"]["query"] = old_query + pango_query + end_string
| chanzuckerberg/czgenepi | src/backend/aspen/workflows/nextstrain_run/build_plugins/type_plugins.py | type_plugins.py | py | 14,798 | python | en | code | 11 | github-code | 36 |
18553686764 | import random
from itertools import chain
import numpy as np
import pandas as pd
from cytoolz import itemmap, sliding_window, valmap
from skfusion import fusion
class DataFusionModel(object):
def __init__(
self, nodes, relations, init_type="random", random_state=666, n_jobs=1
):
self.nodes = nodes
self.relation_definitions = relations
self.random_state = random_state
self.n_jobs = n_jobs
self.init_type = init_type
def reconstruct(self, src, dst, idx=0, return_dataframe=True):
relation = list(
self.fuser.fusion_graph.get_relations(self.types[src], self.types[dst])
)[idx]
values = self.fuser.complete(relation)
if return_dataframe:
components = self.relation_definitions[(src, dst)][idx]
return pd.DataFrame(
values, index=components.index.values, columns=components.columns.values
)
return values
def factor(self, type_name, return_dataframe=True):
factor = self.fuser.factor(self.types[type_name])
if not return_dataframe:
return factor
profile = pd.DataFrame(
factor,
index=self.indices[type_name],
columns=[f"C{i:02}" for i in range(factor.shape[1])],
)
return profile
def _construct_relationship(self, path, updated_factors):
start_node = path[0]
end_node = path[-1]
computed_matrix = (
self.fuser.factor(start_node)
if not start_node.name in updated_factors
else updated_factors[start_node.name]
)
print(
type(start_node),
start_node,
start_node.name in updated_factors,
computed_matrix.shape,
)
for src, dst in sliding_window(2, path):
relation = list(self.fuser.fusion_graph.get_relations(src, dst))[0]
print(relation)
computed_matrix = np.dot(computed_matrix, self.fuser.backbone(relation))
end_factor = (
self.fuser.factor(end_node)
if not end_node.name in updated_factors
else updated_factors[end_node.name]
)
computed_matrix = np.dot(computed_matrix, end_factor.T)
return computed_matrix
def relation_profiles(self, src, dst, updated_factors=None, index=None):
if updated_factors is None:
updated_factors = {}
if index is None:
index = self.indices[src]
paths = list(self.fuser.chain(self.types[src], self.types[dst]))
relations = []
for path in paths:
rel = self._construct_relationship(path, updated_factors)
profile = pd.DataFrame(rel, index=index, columns=self.indices[dst])
relations.append(profile)
return list(zip(paths, relations))
def fit(self, method='factorization'):
self.types = dict(
zip(
self.nodes.keys(),
map(lambda x: fusion.ObjectType(*x), self.nodes.items()),
)
)
print(self.types)
self.relations = map(
lambda x: map(
lambda r: fusion.Relation(
r.values, self.types[x[0][0]], self.types[x[0][1]]
),
x[1],
),
self.relation_definitions.items(),
)
self.relations = list(chain(*self.relations))
print(self.relations)
self.indices = {}
for (src, dst), dfs in self.relation_definitions.items():
if not src in self.indices:
self.indices[src] = list(dfs[0].index)
if not dst in self.indices:
self.indices[dst] = list(dfs[0].columns)
random.seed(self.random_state)
np.random.seed(self.random_state)
self.fusion_graph = fusion.FusionGraph(self.relations)
if method == 'factorization':
fuser = fusion.Dfmf
elif method == 'completion':
fuser = fusion.Dfmc
else:
raise ValueError('method must be factorization or completion')
self.fuser = fuser(
init_type=self.init_type, random_state=self.random_state, n_jobs=self.n_jobs
)
self.fuser.fuse(self.fusion_graph)
| zorzalerrante/aves | src/aves/models/datafusion/base.py | base.py | py | 4,339 | python | en | code | 57 | github-code | 36 |
2535331354 | import numpy as np
import tensorflow as tf
import agents.utils as agent_utils
import config_constants as cc
from model.LehnertGridworldModelLatent import LehnertGridworldModelLatent
class LehnertGridworldModelGMM(LehnertGridworldModelLatent):
ENCODER_NAMESPACE = "encoder"
NUM_ACTIONS = 4
def __init__(self, config):
config[cc.BETA] = None
super(LehnertGridworldModelGMM, self).__init__(config)
self.num_components = config[cc.NUM_COMPONENTS]
self.learning_rate = config[cc.LEARNING_RATE]
self.beta0 = config[cc.BETA0]
self.beta1 = config[cc.BETA1]
self.beta2 = config[cc.BETA2]
self.mixtures_mu_init_sd = config[cc.MIXTURES_MU_INIT_SD]
self.mixtures_sd_init_mu = config[cc.MIXTURES_SD_INIT_MU]
self.mixtures_sd_init_sd = config[cc.MIXTURES_SD_INIT_SD]
def validate(self, states, actions, qs, batch_size=100):
num_steps = int(np.ceil(len(states) / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice],
self.actions_pl: actions[batch_slice],
self.qs_pl: qs[batch_slice]
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_encoder_entropy_t, self.full_prior_log_likelihood_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def encode(self, states, batch_size=100):
num_steps = int(np.ceil(len(states) / batch_size))
encodings = []
log_probs = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice]
}
tmp_encodings, tmp_log_probs = \
self.session.run([self.state_sample_t, self.z1_log_cond], feed_dict=feed_dict)
encodings.append(tmp_encodings)
log_probs.append(tmp_log_probs)
encodings = np.concatenate(encodings, axis=0)
log_probs = np.concatenate(log_probs, axis=0)
return encodings, log_probs
def build(self):
self.build_placeholders_()
self.build_model()
self.build_predictors_()
self.build_mixture_components_()
self.build_training_()
self.saver = tf.train.Saver()
def build_mixture_components_(self):
self.mixtures_mu_v = tf.get_variable(
"mixtures_mu", initializer=tf.random_normal_initializer(
mean=0.0, stddev=self.mixtures_mu_init_sd, dtype=tf.float32
), shape=(self.num_components, self.num_blocks)
)
self.mixtures_logvar_v = tf.get_variable(
"mixtures_var", initializer=tf.random_normal_initializer(
mean=self.mixtures_sd_init_mu, stddev=self.mixtures_sd_init_sd, dtype=tf.float32
), shape=(self.num_components, self.num_blocks)
)
self.mixtures_var_t = tf.exp(self.mixtures_logvar_v)
def build_training_(self):
self.global_step = tf.train.get_or_create_global_step()
self.build_q_loss_()
self.build_prior_likelihood_()
self.build_encoder_entropy_loss_()
self.build_weight_decay_loss_()
self.loss_t = self.beta0 * self.q_loss_t \
- self.beta1 * self.encoder_entropy_t \
- self.beta2 * self.prior_log_likelihood_t \
+ self.regularization_loss_t
self.prior_loss_t = - self.prior_log_likelihood_t
self.build_model_training_()
self.build_prior_training_()
self.train_step = tf.group(self.encoder_train_step, self.prior_train_step)
def build_prior_likelihood_(self):
self.z1_log_probs = self.many_multivariate_normals_log_pdf(
self.state_sample_t, self.mixtures_mu_v, self.mixtures_var_t, self.mixtures_logvar_v
)
self.z1_log_joint = self.z1_log_probs - np.log(self.num_components)
self.full_prior_log_likelihood_t = tf.reduce_logsumexp(self.z1_log_joint, axis=1)
self.prior_log_likelihood_t = tf.reduce_mean(self.full_prior_log_likelihood_t, axis=0)
self.z1_log_cond = self.z1_log_joint - tf.reduce_logsumexp(self.z1_log_joint, axis=1)[:, tf.newaxis]
def build_model_training_(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer, self.learning_rate)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
def build_prior_training_(self):
mixture_variables = [self.mixtures_mu_v, self.mixtures_logvar_v]
mixture_optimizer = agent_utils.get_optimizer(self.optimizer, self.learning_rate)
self.prior_train_step = mixture_optimizer.minimize(
self.prior_loss_t, global_step=self.global_step, var_list=mixture_variables
)
@staticmethod
def many_multivariate_normals_log_pdf(x, mu, var, logvar):
x = x[:, tf.newaxis, :]
mu = mu[tf.newaxis, :, :]
var = var[tf.newaxis, :, :]
logvar = logvar[tf.newaxis, :, :]
term1 = - (mu.shape[2].value / 2) * np.log(2 * np.pi)
term2 = - (1 / 2) * tf.reduce_sum(logvar, axis=2)
term3 = - (1 / 2) * tf.reduce_sum(tf.square(x - mu) / var, axis=2)
return term1 + term2 + term3
| ondrejbiza/discrete_abstractions | model/LehnertGridworldModelGMM.py | LehnertGridworldModelGMM.py | py | 5,729 | python | en | code | 4 | github-code | 36 |
39885168812 | import pytz
import base64
from typing import List
from flask import Blueprint, request, redirect, abort
from flask_login.utils import login_required
from datetime import datetime, timedelta, timezone
from flask.templating import render_template
from flask_login import current_user
from mib.rao.user_manager import UserManager, User
from mib.rao.message_manager import MessageManager, MessagePost, Message
from mib.rao.draft_manager import DraftManager, DraftPost, Draft
messages = Blueprint('messages', __name__)
@ messages.route('/messages/send', methods=['GET', 'POST'])
@login_required
def send_message():
''' GET: get the page for write and send a message to the chosen recipient/s
POST: send the message to the recipient/s at the chosen date '''
if request.method == 'POST':
emails = request.form.get('receiver').split(',')
recipient_list = []
recipient_error_list = []
message_ok = False
for email in emails:
email = email.strip(' ')
user = UserManager.get_user_by_email(email)
check = True
if user is not None and user.is_active:
recipient_list.append(user.id)
check = False
if check:
recipient_error_list.append(email)
new_message :MessagePost = MessagePost()
new_message.attachment_list = []
new_message.id_sender = current_user.id
new_message.recipients_list = recipient_list
message_date = request.form.get('date')
tz=timezone(timedelta(hours=1))
message_date = datetime.fromisoformat(message_date)
message_date = message_date.replace(tzinfo=tz)
message_date = message_date.astimezone(pytz.UTC)
message_date = message_date.isoformat()
new_message.date_delivery = message_date
new_message.text = request.form.get('text')
uploaded_files = request.files.getlist("files")
if uploaded_files and any(f for f in uploaded_files):
for file in uploaded_files:
if file:
attachment = file.read()
new_message.attachment_list.append(base64.b64encode(attachment).decode('ascii'))
new_message = MessageManager.send_message(new_message)
if new_message is not None:
message_ok = True
else:
for email in emails:
recipient_error_list.append(email)
return render_template("send_message.html", form=dict(), message_ok=message_ok,
recipient_error_list=recipient_error_list)
else:
# landing from the recipients page, we want to populate the field with the chosen one
recipient_message = request.args.items(multi=True)
form = {'recipient': ''}
for recipient in recipient_message:
if recipient[1] != '':
form['recipient'] += recipient[1] if form['recipient'] == '' else ', ' + recipient[1]
return render_template("send_message.html", form=form)
@messages.route('/messages/<message_id>', methods=["GET"])
@login_required
def view_message(message_id):
''' GET: visualize the chosen message '''
message: Message = MessageManager.get_message(message_id)
if message is None:
abort(404)
else:
recipient: User = UserManager.get_user_by_id(message.id_recipient)
sender: User = UserManager.get_user_by_id(message.id_sender)
return render_template("message.html",
sender=sender,
recipient=recipient,
message=message,
images=message.attachment_list)
@messages.route('/messages/<message_id>/delete', methods=["POST"])
@login_required
def deleteMessage(message_id):
''' POST: delete the chosen message '''
ret: int = MessageManager.delete_message(message_id)
if ret == 404:
abort(404)
elif ret == 403:
abort(403)
else:
return redirect('/inbox')
@messages.route("/messages/<id>/withdraw", methods=['POST'])
@login_required
def withdraw_message(id):
''' POST: withdraw a message not sent yet, paying points '''
ret: int = MessageManager.withdraw_message(id)
if ret == 404:
abort(404)
elif ret == 403:
abort(403)
else:
return redirect('/outbox')
@messages.route('/messages/<id_message>/forward', methods=['GET'])
@login_required
def send_forward_msg(id_message):
''' GET: get the send message page filled with the text to forward '''
recipient_message = request.args.items(multi=True)
text = MessageManager.get_message(id_message).text
form = dict(recipient="", text=text, message_id=id_message)
for recipient in recipient_message:
if recipient[1] != '':
form['recipient'] += recipient[1] if form['recipient'] == '' else ', ' + recipient[1]
return render_template("send_message.html", form=form, forward=True) | squad03mib/api-gateway | mib/views/messages.py | messages.py | py | 5,044 | python | en | code | 0 | github-code | 36 |
23469121006 | import yaml,os
class Common_funcs():
def get_datas(self,path:str)-> list:
# 打开文件
current_path = os.getcwd().split("lagou05")[0]
#print(current_path)
with open(current_path+"\\lagou05"+path) as f:
datas = yaml.safe_load(f)
#print(datas)
# 获取文件中key为datas的数据
# data_all = datas["datas"]
add_datas = datas["datas"]["add"]
# 获取文件中key为myids的数据
add_ids = datas["myids"]["add"]
# 获取文件中key为datas的数据
div_datas = datas["datas"]["div"]
# 获取文件中key为myids的数据
div_ids = datas["myids"]["div"]
# 获取文件中key为datas的数据
mul_datas = datas["datas"]["mul"]
# 获取文件中key为myids的数据
mul_ids = datas["myids"]["mul"]
# 获取文件中key为datas的数据
sub_datas = datas["datas"]["sub"]
# 获取文件中key为myids的数据
sub_ids = datas["myids"]["sub"]
#print(add_ids,add_datas)
#print(data_all)
f.close()
return [add_datas,add_ids,div_datas,div_ids,mul_datas,mul_ids,sub_datas,sub_ids]
| testroute/lagou05 | Common/Read_yaml.py | Read_yaml.py | py | 1,286 | python | zh | code | null | github-code | 36 |
71275872103 | # Import packages
from copy import deepcopy
from math import exp
from helpers import *
from worlds import *
# This is our Player class. I assume that from a cognitive standpoint it makes
# sense that all players have access to literal meanings, so I put them there
# Otherwise not very interesting
class Player:
def __init__(self, priors) -> None:
# These are the full priors for the player, the .priors attr
# of a Priors object
self.priors = deepcopy(priors.priors)
def conditionalization(self, world, prop, utt):
wworld = World(world)
def interpret(utt, prop):
return 1 if prop in wworld.interpretation_function[utt] else 0
prop_given_mw = (interpret(utt, prop)) / (
sum([interpret(utt, prop) for prop in wworld.properties])
)
return prop_given_mw
# This is the Speaker class, it takes a world and a temperature parameter as
# arguments. It inherits the literal interpretations from the player class.
class Speaker(Player):
def __init__(self, priors, alpha) -> None:
super().__init__(priors)
self.alpha = alpha
def utility(self, world, prop, utt):
return my_log(self.conditionalization(world, prop, utt) * self.priors[world][0])
def choice_rule(self, world, utt, messages, prop):
return exp(self.alpha * self.utility(world, prop, utt)) / sum(
[
exp(self.alpha * self.utility(world, prop, message))
for message in messages
]
)
# The two following methods return an object of the right shape to use the viz
# function on.
def prediction(self, world, messages):
props = World(world).properties
preds = {
p: {m: self.choice_rule(world, m, messages, p) for m in messages}
for p in props
}
return preds
def full_predictions(self, messages):
preds = {}
for w in self.priors:
props = World(w).properties
preds[w] = [
self.priors[w][0],
{
p: {m: self.choice_rule(w, m, messages, p) for m in messages}
for p in props
},
]
return preds
# This is the Listener class. Not much to say here except that this layout makes
# it clear that the listener envisions the speaker as having the same priors
# as them with regards to worlds, which is not necessarily true and something
# we might want to play with once we have more of an idea how clashes work.
# In any case, each listener envisions their own player.
class Listener(Player):
def __init__(self, priors, alpha, beta) -> None:
super().__init__(priors)
self._speaker = Speaker(priors, alpha)
self.alpha = alpha
self.beta = beta
def lis(self, world, prop, utt, messages):
return (
self.priors[world][1][prop]
* self._speaker.choice_rule(world, utt, messages, prop)
) / sum(
[
self.priors[world][1][p]
* self._speaker.choice_rule(world, utt, messages, p)
for p in World(world).properties
]
)
# This updates the priors over worlds. So far it is not very satisfactory and
# feels unnatural to describe formally, but it does the job.
def update_world_priors(self, utt, messages):
priors = deepcopy(self.priors)
scores = []
for w in priors:
for p in World(w).properties:
if World(w).order_of_worth.index(p) == 0:
s = priors[w][0] + priors[w][0] * self.lis(w, p, utt, messages)
else:
pass
scores.append(s)
i = 0
for w in priors:
priors[w][0] = exp(self.beta * scores[i]) / sum(
[exp(self.beta * s) for s in scores]
)
i += 1
return priors
def prediction(self, world, messages):
props = World(world).properties
preds = {
m: {p: self.lis(world, p, m, messages) for p in props} for m in messages
}
return preds
def full_predictions(self, messages):
preds = {}
priors = deepcopy(self.priors)
for m in messages:
priors = self.update_world_priors(m, messages)
preds[m] = {}
for w in priors:
props = World(w).properties
preds[m][w] = [
priors[w][0],
{
p: {m: self.lis(w, p, m, messages) for m in messages}
for p in props
},
]
priors = deepcopy(self.priors)
return preds
| LangdP/SMIC_boltanski_thevenot | ver_2/players.py | players.py | py | 4,795 | python | en | code | 0 | github-code | 36 |
24856735986 | def max_multiple(num, boundary):
max_mult = 0
for i in range(1, boundary + 1):
if i > 0 and i % num == 0:
max_mult = i
print(max_mult)
divisor = int(input())
count = int(input())
max_multiple(divisor, count) | BorisAtias/SoftUni-Python-Fundamentals-course | Basic Syntax, Conditional Statements and Loops - Exercise/04. Maximum Multiple.py | 04. Maximum Multiple.py | py | 253 | python | en | code | 0 | github-code | 36 |
35909112249 | '''
Created on 22/03/2015
@author: chips
'''
from FGAme.core import EventDispatcherMeta, signal, conf
from FGAme.draw import Color, Shape
from FGAme.util import lazy
DEBUG = False
class HasVisualization(object):
_is_mixin_ = True
_slots_ = ['_color', '_linecolor', '_linewidth']
def _init_has_visualization(self,
color='black',
linecolor=None, linewidth=1):
self._color = None if color is None else Color(color)
self._linecolor = None if linecolor is None else Color(linecolor)
self._linewwidth = linewidth
# Desenhando objeto #######################################################
@property
def color(self):
return self._color
@color.setter
def color(self, value):
if value is None:
self._color = None
else:
self._color = Color(value)
@property
def linecolor(self):
return self._linecolor
@linecolor.setter
def linecolor(self, value):
if value is None:
self._linecolor = None
else:
self._color = Color(value)
def _debug(self, screen):
if DEBUG:
self.paint_contact_points(screen)
def paint_contact_points(self, screen):
for col in self._contacts:
screen.paint_circle(2, col.pos, Color('black'))
@EventDispatcherMeta.decorate
class ObjectMixin(HasVisualization):
_mixin_args = set(['color', 'line_color'])
long_press = signal('long-press', 'key', delegate_to='_input')
key_up = signal('key-up', 'key', delegate_to='_input')
key_down = signal('key-down', 'key', delegate_to='_input')
mouse_motion = signal('mouse-motion', delegate_to='_input')
mouse_button_up = signal('mouse-button-up', 'button', delegate_to='_input')
mouse_button_down = signal('mouse-button-down', 'button',
delegate_to='_input')
mouse_long_press = signal('mouse-long-press', 'button',
delegate_to='_input')
@lazy
def _input(self):
return conf.get_input()
def __init__(self, *args, **kwds):
mixin_kwds = self._extract_mixin_kwargs(kwds)
self._init_physics(*args, **kwds)
self._init_mixin(**mixin_kwds)
def _extract_mixin_kwargs(self, kwds):
D = {}
mixin_args = self._mixin_args
for k in kwds:
if k in mixin_args:
D[k] = kwds[k]
for k in D:
del kwds[k]
return D
def _init_mixin(self,
world=None,
color='black', linecolor=None, linewidth=1):
self._init_has_visualization(color=color,
linecolor=linecolor, linewidth=linewidth)
| macartur-UNB/FGAme | src/FGAme/objects/mixins.py | mixins.py | py | 2,803 | python | en | code | null | github-code | 36 |
1206611132 | """Utils functions."""
import datetime
def MillisecondsSinceEpoch(hours):
"""Returns time in milliseconds since epoch for given time in hours.
Args:
hours: Int, the hours of the future timestamp.
Returns:
Int, the future timestamp in milliseconds.
"""
hours = datetime.datetime.now() + datetime.timedelta(hours=hours)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = hours - epoch
return int(delta.total_seconds() * 1000)
| DomRosenberger/google_bigquery | google_bigquery/common/utils.py | utils.py | py | 479 | python | en | code | 2 | github-code | 36 |
27757120954 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 15:44:55 2020
@author: ardaegeunlu
"""
import re
def LCCSortComparison(call1, call2):
preCutter1, postCutter1 = SeperateViaCutter(call1)
preCutter2, postCutter2 = SeperateViaCutter(call2)
preCutterComp = PreCutterComparison(preCutter1, preCutter2)
if preCutterComp != 0:
return preCutterComp
else:
return PostCutterComparison(postCutter1, postCutter2)
# Seperate the string into two parts at the cutter
def SeperateViaCutter(callno):
# the cutter is a DOT FOLLOWED BY A LETTER
cutter = re.search('\.\D+', callno)
if cutter != None:
return callno[:cutter.start()], callno[cutter.start()+1:]
else:
return callno, ''
# The cutter is generally made of two parts, a combination of letters, followed by a decimal
# There are occasional irregularities, date, vol., edition etc. which normally should be after
# cutter are sometimes before the cutter. (perhaps clerical errors?)
def SeperatePreCutterToParts(callno):
# find the decimal numbers span
parts = re.search('[-+]?\d*\.\d+|\d+', callno)
if parts == None:
return callno, 0
else:
spaceSplitParts = IsPreCutterSpaced(callno[parts.start():])
# uncommon-irregular format ------
# if there is white space before the cutter, date or vol. it is
# probably because they are placed pre-cutter
# throw away non-digits and merge
if len(spaceSplitParts) > 1:
return callno[:parts.start()], float(''.join([s for s in callno[parts.start():] if s.isdigit()]))
else:
return callno[:parts.start()], float(callno[parts.start():parts.end()])
# refer to above method
def IsPreCutterSpaced(part):
return part.split()
# compare string parts lexicographically, compare floats in regular manner
def PreCutterComparison(call1, call2):
lex1, num1 = SeperatePreCutterToParts(call1)
lex2, num2 = SeperatePreCutterToParts(call2)
if lex1 < lex2:
return -1
elif lex1 > lex2:
return 1
else:
# lexical parts are same, compare decimals
if num1 < num2:
return -1
elif num1 > num2:
return 1
else:
return 0
# compare initial letter groups, then compare the rest of the string
# char by char, digits take precendence in the latter part
def PostCutterComparison(call1, call2):
lex1, rest1 = SeperatePostCutterToParts(call1)
lex2, rest2 = SeperatePostCutterToParts(call2)
if lex1 < lex2:
return -1
elif lex1 > lex2:
return 1
else:
# this split is necessary to seperate the vol/edition parts which come after a spacing
pieces1 = rest1.split(' ', 1)
pieces2 = rest2.split(' ', 1)
for a, b in zip(pieces1[0], pieces2[0]):
if not str.isdigit(a) and str.isdigit(b):
return -1
elif str.isdigit(a) and not str.isdigit(b):
return 1
else:
if a > b:
return 1
elif a < b:
return -1
# else continue the loop
if len(pieces1[0]) > len(pieces2[0]):
return 1
elif len(pieces1[0]) < len(pieces2[0]):
return -1
if len(pieces1) > 1 and len(pieces2) > 1:
num_data1 = ''.join([s for s in pieces1[1] if s.isdigit() and ord(s) < 128])
num_data2 = ''.join([s for s in pieces2[1] if s.isdigit() and ord(s) < 128])
if len(num_data1) > 0 and len(num_data2) > 0:
num_data_1 = int(num_data1)
num_data_2 = int(num_data2)
if num_data_1 > num_data_2:
# print(num_data1 + '>' + num_data2)
return 1
elif num_data_1 < num_data_2:
# print(num_data1 + '<' + num_data2)
return -1
# if still havent returned, one of the strings have expired
# the shorter string should sort first
if len(rest1) > len(rest2):
return 1
else:
return -1
# the two parts are initial letter group followed by digits and possibly chars again
# eg. callno = QA1.A123B2 post-cutter = A123B2, A is letter group 123B2 is the rest.
def SeperatePostCutterToParts(callno):
cutter = re.search('\D+', callno)
if cutter != None:
lexPart = callno[cutter.start():cutter.end()]
rest = callno[cutter.end():]
return lexPart, rest
else:
return callno, ''
| ardaegeunlu/Library-of-Congress-Classification-Sorter | sort_comparison.py | sort_comparison.py | py | 4,939 | python | en | code | 1 | github-code | 36 |
74202841385 | import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK_SIZE = 1000
MAX_INT16 = np.iinfo(np.int16).max
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True)
for i in range(0, 18):
print(i)
f = open(str(i) + ".raw", "rb")
with f:
data = f.read()
data_float = np.frombuffer(data, dtype=np.float)
data_scaled = data_float * MAX_INT16
data_int = data_scaled.astype(int)
buff = memoryview(data_int).tobytes()
stream.write(buff)
stream.stop_stream()
stream.close()
p.terminate()
| gmamaladze/tf-voice-pi | tfvoicepi/tools/play.py | play.py | py | 663 | python | en | code | 1 | github-code | 36 |
32527125731 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
# input_data
import numpy as np
import pandas as pd
import pickle as pkl
def load_dc_data(dataset):
dc_adj1 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/adjacency_selected.csv')
adj1 = np.mat(dc_adj1)
dc_adj2 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/accessibility_selected.csv')
adj2 = np.mat(dc_adj2)
dc_adj3 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/landuse_selected.csv')
adj3 = np.mat(dc_adj3)
dc_adj4 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/demographic_selected.csv')
adj4 = np.mat(dc_adj4)
dc_dm = pd.read_pickle('C:/YimingXu/Micromobility_DL/data/Input_Selected_Zones.pkl')
return dc_dm, adj1, adj2, adj3, adj4
def preprocess_data(data, time_len, rate, seq_len, pre_len):
train_size = int(time_len * rate)
train_data = data[0:train_size]
test_data = data[train_size:time_len]
trainX, trainY, testX, testY = [], [], [], []
for i in range(len(train_data) - seq_len - pre_len):
a = train_data[i: i + seq_len + pre_len]
trainX.append(a[0 : seq_len])
trainY.append(a[seq_len : seq_len + pre_len])
for i in range(len(test_data) - seq_len -pre_len):
b = test_data[i: i + seq_len + pre_len]
testX.append(b[0 : seq_len])
testY.append(b[seq_len : seq_len + pre_len])
trainX1 = np.array(trainX)
trainY1 = np.array(trainY)
testX1 = np.array(testX)
testY1 = np.array(testY)
return trainX1, trainY1, testX1, testY1
# In[3]:
# utils
import tensorflow as tf
import scipy.sparse as sp
import numpy as np
def normalized_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
normalized_adj = normalized_adj.astype(np.float32)
return normalized_adj
def sparse_to_tuple(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
L = tf.SparseTensor(coords, mx.data, mx.shape)
return tf.sparse.reorder(L)
def calculate_laplacian(adj, lambda_max=1):
adj = normalized_adj(adj + sp.eye(adj.shape[0]))
adj = sp.csr_matrix(adj)
adj = adj.astype(np.float32)
return sparse_to_tuple(adj)
def weight_variable_glorot(input_dim, output_dim, name=""):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial,name=name)
# In[4]:
# TGCN Cell
from tensorflow.compat.v1.nn.rnn_cell import RNNCell
class tgcnCell(RNNCell):
"""Temporal Graph Convolutional Network """
def call(self, inputs, **kwargs):
pass
def __init__(self, num_units, adj, num_nodes, input_size=None,
act=tf.nn.tanh, reuse=None):
super(tgcnCell, self).__init__(_reuse=reuse)
self._act = act
self._nodes = num_nodes
self._units = num_units
self._adj = []
self._adj.append(calculate_laplacian(adj))
@property
def state_size(self):
return self._nodes * self._units
@property
def output_size(self):
return self._units
def __call__(self, inputs, state, scope=None):
with tf.compat.v1.variable_scope(scope or "tgcn",reuse=tf.compat.v1.AUTO_REUSE):
with tf.compat.v1.variable_scope("gates",reuse=tf.compat.v1.AUTO_REUSE):
value = tf.nn.sigmoid(
self._gc(inputs, state, 2 * self._units, bias=1.0, scope=scope))
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
with tf.compat.v1.variable_scope("candidate",reuse=tf.compat.v1.AUTO_REUSE):
r_state = r * state
c = self._act(self._gc(inputs, r_state, self._units, scope=scope))
new_h = u * state + (1 - u) * c
return new_h, new_h
def _gc(self, inputs, state, output_size, bias=0.0, scope=None):
## inputs:(-1,num_nodes)
inputs = tf.expand_dims(inputs, 2)
## state:(batch,num_node,gru_units)
state = tf.reshape(state, (-1, self._nodes, self._units))
## concat
x_s = tf.concat([inputs, state], axis=2)
input_size = x_s.get_shape()[2]
## (num_node,input_size,-1)
x0 = tf.transpose(x_s, perm=[1, 2, 0])
x0 = tf.reshape(x0, shape=[self._nodes, -1])
scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(scope):
for m in self._adj:
x1 = tf.sparse.sparse_dense_matmul(m, x0)
# print(x1)
x = tf.reshape(x1, shape=[self._nodes, input_size,-1])
x = tf.transpose(x,perm=[2,0,1])
x = tf.reshape(x, shape=[-1, input_size])
weights = tf.compat.v1.get_variable(
'weights', [input_size, output_size], initializer=tf.keras.initializers.glorot_normal)
x = tf.matmul(x, weights) # (batch_size * self._nodes, output_size)
biases = tf.compat.v1.get_variable(
"biases", [output_size], initializer=tf.constant_initializer(bias))
x = tf.nn.bias_add(x, biases)
x = tf.reshape(x, shape=[-1, self._nodes, output_size])
x = tf.reshape(x, shape=[-1, self._nodes * output_size])
return x
# In[5]:
import pickle as pkl
import tensorflow as tf
import pandas as pd
import numpy as np
import math
import os
import numpy.linalg as la
from sklearn.metrics import mean_squared_error,mean_absolute_error
import time
time_start = time.time()
###### Settings ######
# flags = tf.compat.v1.flags
# FLAGS = flags.FLAGS
# flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
# flags.DEFINE_integer('training_epoch', 1, 'Number of epochs to train.')
# flags.DEFINE_integer('gru_units', 64, 'hidden units of gru.')
# flags.DEFINE_integer('seq_len',12 , ' time length of inputs.')
# flags.DEFINE_integer('pre_len', 3, 'time length of prediction.')
# flags.DEFINE_float('train_rate', 0.8, 'rate of training set.')
# flags.DEFINE_integer('batch_size', 32, 'batch size.')
# flags.DEFINE_string('dataset', 'los', 'sz or los.')
# flags.DEFINE_string('model_name', 'tgcn', 'tgcn')
model_name = 'tgcn'
data_name = 'dc'
train_rate = 0.8
seq_len = 24
output_dim = pre_len = 3
batch_size = 32
lr = 0.001
training_epoch = 1
gru_units = 64
# In[6]:
###### load data ######
if data_name == 'dc':
data, adj1, adj2, adj3, adj4 = load_dc_data('dc')
time_len = data.shape[0]
num_nodes = data.shape[1]
data1 =np.mat(data,dtype=np.float32)
# In[7]:
#### normalization
# max_value = np.max(data1)
# data1 = data1/max_value
max_value=1
mean_value=np.mean(data1)
std_value=np.std(data1)
data1=(data1-mean_value)/std_value
trainX, trainY, testX, testY = preprocess_data(data1, time_len, train_rate, seq_len, pre_len)
totalbatch = int(trainX.shape[0]/batch_size)
training_data_count = len(trainX)
# In[8]:
def process_output(otp):
m = []
for i in otp:
o = tf.reshape(i,shape=[-1,num_nodes,gru_units])
o = tf.reshape(o,shape=[-1,gru_units])
m.append(o)
return m
# In[9]:
# TGCN
from tensorflow import keras
def TGCN(_X, _weights, _biases):
###
# multi-GCN-GRU
cell_1 = tgcnCell(gru_units, adj1, num_nodes=num_nodes)
cell_2 = tgcnCell(gru_units, adj2, num_nodes=num_nodes)
cell_3 = tgcnCell(gru_units, adj3, num_nodes=num_nodes)
cell_4 = tgcnCell(gru_units, adj4, num_nodes=num_nodes)
cell_11 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
cell_22 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_2], state_is_tuple=True)
cell_33 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_3], state_is_tuple=True)
cell_44 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_4], state_is_tuple=True)
_X = tf.unstack(_X, axis=1)
outputs_1, states_1 = tf.compat.v1.nn.static_rnn(cell_11, _X, dtype=tf.float32)
outputs_2, states_2 = tf.compat.v1.nn.static_rnn(cell_22, _X, dtype=tf.float32)
outputs_3, states_3 = tf.compat.v1.nn.static_rnn(cell_33, _X, dtype=tf.float32)
outputs_4, states_4 = tf.compat.v1.nn.static_rnn(cell_44, _X, dtype=tf.float32)
m_1 = process_output(outputs_1)
m_2 = process_output(outputs_2)
m_3 = process_output(outputs_3)
m_4 = process_output(outputs_4)
last_output_1 = m_1[-1]
last_output_2 = m_2[-1]
last_output_3 = m_3[-1]
last_output_4 = m_4[-1]
dense_input = tf.concat([last_output_1, last_output_2, last_output_3, last_output_4], 1)
# Dense
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(64, activation='sigmoid'))
model.add(tf.keras.layers.Dense(64))
last_output = model(dense_input)
output = tf.matmul(last_output, _weights['out']) + _biases['out']
output = tf.reshape(output,shape=[-1,num_nodes,pre_len])
output = tf.transpose(output, perm=[0,2,1])
output = tf.reshape(output, shape=[-1,num_nodes])
return output, m_1 , states_1
# In[10]:
###### placeholders ######
tf.compat.v1.disable_eager_execution()
inputs = tf.compat.v1.placeholder(tf.float32, shape=[None, seq_len, num_nodes])
labels = tf.compat.v1.placeholder(tf.float32, shape=[None, pre_len, num_nodes])
# In[11]:
# Graph weights
weights = {
'out': tf.Variable(tf.compat.v1.random_normal([gru_units, pre_len], mean=1.0), name='weight_o')}
biases = {
'out': tf.Variable(tf.compat.v1.random_normal([pre_len]),name='bias_o')}
if model_name == 'tgcn':
pred,ttts,ttto = TGCN(inputs, weights, biases)
y_pred = pred
# In[12]:
###### optimizer ######
lambda_loss = 0.0015
Lreg = lambda_loss * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.compat.v1.trainable_variables())
label = tf.reshape(labels, [-1,num_nodes])
##loss
loss = tf.reduce_mean(tf.nn.l2_loss(y_pred-label) + Lreg)
##rmse
error = tf.sqrt(tf.reduce_mean(tf.square(y_pred-label)))
optimizer = tf.compat.v1.train.AdamOptimizer(lr).minimize(loss)
# In[13]:
###### Initialize session ######
variables = tf.compat.v1.global_variables()
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
#sess = tf.Session()
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
sess.run(tf.compat.v1.global_variables_initializer())
out = 'out/%s'%(model_name)
#out = 'out/%s_%s'%(model_name,'perturbation')
path1 = '%s_%s_lr%r_batch%r_unit%r_seq%r_pre%r_epoch%r'%(model_name,data_name,lr,batch_size,gru_units,seq_len,pre_len,training_epoch)
path = os.path.join(out,path1)
if not os.path.exists(path):
os.makedirs(path)
# In[15]:
###### evaluation ######
def evaluation(a,b):
rmse = math.sqrt(mean_squared_error(a,b))
mae = mean_absolute_error(a, b)
F_norm = la.norm(a-b,'fro')/la.norm(a,'fro')
r2 = 1-((a-b)**2).sum()/((a-a.mean())**2).sum()
var = 1-(np.var(a-b))/np.var(a)
return rmse, mae, 1-F_norm, r2, var
x_axe,batch_loss,batch_rmse,batch_pred = [], [], [], []
test_loss,test_rmse,test_mae,test_acc,test_r2,test_var,test_pred = [],[],[],[],[],[],[]
training_epoch = 20
for epoch in range(training_epoch):
for m in range(totalbatch):
mini_batch = trainX[m * batch_size : (m+1) * batch_size]
mini_label = trainY[m * batch_size : (m+1) * batch_size]
_, loss1, rmse1, train_output = sess.run([optimizer, loss, error, y_pred],
feed_dict = {inputs:mini_batch, labels:mini_label})
batch_loss.append(loss1)
batch_rmse.append(rmse1 * max_value)
# Test completely at every epoch
loss2, rmse2, test_output = sess.run([loss, error, y_pred],
feed_dict = {inputs:testX, labels:testY})
test_label = np.reshape(testY,[-1,num_nodes])
rmse, mae, acc, r2_score, var_score = evaluation(test_label, test_output)
test_label1 = test_label * max_value
test_output1 = test_output * max_value
test_loss.append(loss2)
test_rmse.append(rmse * max_value)
test_mae.append(mae * max_value)
test_acc.append(acc)
test_r2.append(r2_score)
test_var.append(var_score)
test_pred.append(test_output1)
print('Iter:{}'.format(epoch),
'train_rmse:{:.4}'.format(batch_rmse[-1]),
'test_loss:{:.4}'.format(loss2),
'test_rmse:{:.4}'.format(rmse),
'test_mae:{:.4}'.format(mae))
if (epoch % 500 == 0):
saver.save(sess, path+'/model_100/TGCN_pre_%r'%epoch, global_step = epoch)
time_end = time.time()
print(time_end-time_start,'s')
# In[ ]:
# In[ ]:
# In[120]:
############## visualization ###############
b = int(len(batch_rmse)/totalbatch)
batch_rmse1 = [i for i in batch_rmse]
train_rmse = [(sum(batch_rmse1[i*totalbatch:(i+1)*totalbatch])/totalbatch) for i in range(b)]
batch_loss1 = [i for i in batch_loss]
train_loss = [(sum(batch_loss1[i*totalbatch:(i+1)*totalbatch])/totalbatch) for i in range(b)]
index = test_rmse.index(np.min(test_rmse))
test_result = test_pred[index]
var = pd.DataFrame(test_result)
# var.to_csv(path+'/test_result.csv',index = False,header = False)
#plot_result(test_result,test_label1,path)
#plot_error(train_rmse,train_loss,test_rmse,test_acc,test_mae,path)
print('min_rmse:%r'%(np.min(test_rmse)),
'min_mae:%r'%(test_mae[index]),
'max_acc:%r'%(test_acc[index]),
'r2:%r'%(test_r2[index]),
'var:%r'%test_var[index])
| xuyimingxym/MicroMobility-DL | Multi-GCN_GRU.py | Multi-GCN_GRU.py | py | 13,757 | python | en | code | 0 | github-code | 36 |
30143632560 | from itertools import product
# from PyMiniSolvers import minisolvers
import os
def req1(n: int, N: int, disjunctions_list):
i_range = range(n, N + n)
for i in i_range:
clauses = [(f"t_{i}_0_0_" ), (f"t_{i}_0_1_" ),
(f"t_{i}_1_0_" ), (f"-t_{i}_1_1_" )]
disjunctions_list.extend(clauses)
def req2(n: int, N: int, disjunctions_list):
i_range = range(n, n + N)
k_range = range(2)
j_range = range(N + n)
for (i, k) in product(i_range, k_range):
existence_cond_variables = list((f"c_{i}_{k}_{j}_" for j in range(i))) # range(i)))
disjunctions_list.append(existence_cond_variables)
for j_1 in j_range:
for j_2 in range(n, N + n):
if j_2 < j_1:
disjunction_clause = [f"-c_{i}_{k}_{j_1}_", f"-c_{i}_{k}_{j_2}_"]
disjunctions_list.append(disjunction_clause)
def req2_(n: int, N: int, disjunctions_list):
i_range = range(n, n + N)
k_range = range(2)
j_range = range(N + n)
for (i, k) in product(i_range, k_range):
existence_cond_variables = list((f"c_{i}_{k}_{j}_" for j in range(i))) # range(i)))
disjunctions_list.append(existence_cond_variables)
for j_1 in range(i + 1, N + n):
for j_2 in range(i + 1, N + n):
if j_2 == j_1:
continue
disjunction_clause = [f"-c_{i}_{k}_{j_1}_", f"-c_{i}_{k}_{j_2}_"]
disjunctions_list.append(disjunction_clause)
def req3(n: int, N: int, output_size_m: int, disjunctions_list):
i_range = range(n, n + N)
j_range = range(output_size_m)
for j in j_range:
existence_cond = list(f"o_{i}_{j}_" for i in i_range)
disjunctions_list.append(existence_cond)
for i_1 in i_range:
# for i_2 in range(i_1 + 1, n + N):
for i_2 in i_range:
if i_1 == i_2:
continue
# if i_1 < i_2:
disjunction_clause = [f"-o_{i_1}_{j}_", f"-o_{i_2}_{j}_"]
disjunctions_list.append(disjunction_clause)
def req4(n: int, input_sets, disjunctions_list):
i_range = range(n)
t_range = range(2 ** n)
assert len(input_sets) == 2 ** n
for (i, t) in product(i_range, t_range):
input_value = input_sets[t][i]
sign = '' if input_value == 1 else '-'
clause = (f"{sign}v_{i}_{t}_")
disjunctions_list.append(clause)
def req5(n: int, N: int, disjunctions_list):
i_range = range(n, N + n)
t_range = range(2 ** n)
bit_range = range(2)
for (i, r, i_0, i_1) in product(i_range, t_range, bit_range, bit_range):
for j_0 in range(0, i):
# for j_0 in i_range:
for j_1 in range(0, i):
i_0_sign = '-' if i_0 == 1 else ''
i_1_sign = '-' if i_1 == 1 else ''
clause_1 = [f"-c_{i}_{0}_{j_0}_", f"-c_{i}_{1}_{j_1}_", f"{i_0_sign}v_{j_0}_{r}_",
f"{i_1_sign}v_{j_1}_{r}_", f"v_{i}_{r}_", f"-t_{i}_{i_0}_{i_1}_"]
clause_2 = [f"-c_{i}_{0}_{j_0}_",
f"-c_{i}_{1}_{j_1}_",
f"{i_0_sign}v_{j_0}_{r}_",
f"{i_1_sign}v_{j_1}_{r}_",
f"-v_{i}_{r}_",
f"t_{i}_{i_0}_{i_1}_"]
disjunctions_list.append(clause_1)
disjunctions_list.append(clause_2)
def req6(n: int, N: int, output_size_m: int, values, disjunctions_list):
i_range = range(n, N + n)
r_range = range(2 ** n)
k_range = range(output_size_m)
for (i, r, k) in product(i_range, r_range, k_range):
value = values[r][k]
sign = '' if value == 0 else '-'
clause = [f"-o_{i}_{k}_", f"{sign}v_{i}_{r}_"]
disjunctions_list.append(clause)
vectorOfValue = "0111"
quantityOfElement = 2
import math
numOfVars = int(math.log2(len(vectorOfValue)))
if 2 ** numOfVars != len(vectorOfValue):
raise ValueError("bad length")
print(numOfVars)
vectorOfValue = vectorOfValue.replace("1", "a").replace("0", "1").replace("a", "0")
dis_list = []
req1(quantityOfElement, numOfVars, dis_list)
string_clause = ""
string_clause += "Λ".join(dis_list)
dis_list = []
req2(numOfVars, quantityOfElement, dis_list)
string_clause += "Λ" + "Λ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
req3(numOfVars, quantityOfElement, 1, dis_list)
string_clause += "Λ" + "Λ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
input_sets = list(product((0, 1), repeat=numOfVars))
req4(numOfVars, input_sets, dis_list)
string_clause += "Λ" + "Λ".join(dis_list)
dis_list = []
req5(numOfVars, quantityOfElement, dis_list)
string_clause += "Λ" + "Λ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
values = [(int(value),) for value in vectorOfValue]
req6(numOfVars, quantityOfElement, 1,values,dis_list)
string_clause += "Λ" + "Λ".join([ "V".join(dis) for dis in dis_list])
string_clause += f"Λo_{numOfVars + quantityOfElement - 1}_0_"
final = string_clause
fclause = [ [element for element in dis.split("V")] for dis in string_clause.split("Λ")]
# print(fclause)
variables = set()
for dis in fclause:
for element in dis:
if element[0]=="-":
variables.add(element[1:])
else:
variables.add(element)
variables = (list(variables))
map_index_to_item = {}
map_item_to_index = {}
for i, var in enumerate(variables):
map_index_to_item[i+1] = var
map_item_to_index[var] = i + 1
final = final.replace(var, str(map_item_to_index[var]))
lens = len(string_clause.split("Λ"))
for_minisat = f"p cnf {len(map_index_to_item)} {lens} \n"
for dis in string_clause.split("Λ"):
if "V" in dis:
for elem in dis.split("V"):
sign = (-1 if elem[0]=="-" else 1)
for_minisat += str(sign * map_item_to_index[elem[1:] if elem[0]=="-" else elem]) + " "
else:
for_minisat += str((-1 if dis[0]=="-" else 1) * map_item_to_index[dis[1:] if dis[0]=="-" else dis]) + " "
for_minisat+="0\n"
# print(for_minisat)
file_str = for_minisat
file = open("for_minisat", 'w')
file.write(file_str)
file.close()
minisat_solution = {}
def from_minisat(output_minisat):
output_minisat = output_minisat.split(" ")[:-1]
print(output_minisat)
for item in output_minisat:
if item[0] == "-":
minisat_solution[map_index_to_item[int(item[1:])]] = False
else:
minisat_solution[map_index_to_item[int(item)]] = True
os.system("minisat for_minisat output")
file = open("output", 'r')
output_minisat= file.read().split("\n")[1]
file.close()
from_minisat(output_minisat)
# print(minisat_solution)
body_string = "\n"
print(minisat_solution)
for key in minisat_solution.keys():
if minisat_solution[key]:
if key[0] == "c":
c = key
print(c)
c = c[2:-1]
c = c.split("_")
from_ = ("x"+c[2]) if int(c[2]) < numOfVars else ("element"+c[2])
to_ = ("x"+c[0]) if int(c[0]) < numOfVars else ("element"+c[0])
body_string = body_string + """ "{}" -> "{}";\n""".format(from_, to_)
if key[0] == "o":
o = key
print(o)
o = o[2:-1]
o = o.split("_")
o[0] = ("x"+o[0]) if int(o[0])< numOfVars else ("element"+o[0])
body_string = body_string + """ "{}" -> "{}";\n""".format(o[0], "end")
# os.system("rm scheme.dot")
# os.system("rm scheme.dot.png")
file_name = "scheme.dot"
file_str = """digraph G {\n""" + body_string + """\n}"""
file = open(file_name, 'w')
file.write(file_str)
file.close()
os.system("dot -T png -O " + file_name)
exit()
S = minisolvers.MinisatSolver()
for i in range(len(map_index_to_item)):
S.new_var()
for dis in final.split("Λ"):
clause = [ int(elem) for elem in dis.split("V")]
S.add_clause(clause)
print(S.solve())
solution = (list(S.get_model()))
print(solution) | PeterLarochkin/discrete_structures | HM2/final.py | final.py | py | 8,046 | python | en | code | 2 | github-code | 36 |
20763051017 | import os
import json
import time
from datetime import datetime
# Importing shared dependencies
from task_management import task_list
from ai_agent_management import ai_agents
sync_status = {}
def autoSync():
while True:
time.sleep(60) # Sync every minute
sync_status['last_sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
syncTasks()
syncAgents()
def syncTasks():
with open('TaskMaster/src/task_data.json', 'w') as file:
json.dump(task_list, file)
def syncAgents():
with open('TaskMaster/src/ai_agent_data.json', 'w') as file:
json.dump(ai_agents, file)
if __name__ == "__main__":
autoSync() | shadowaxe99/c | TaskMaster/src/auto_sync.py | auto_sync.py | py | 668 | python | en | code | 0 | github-code | 36 |
3598532170 | import unittest
import sys
import numpy as np
sys.path.append('.')
import ladi.preprocess as pp
np.random.seed(1)
class Test_preprocess(unittest.TestCase):
def test_round_to_zero(self):
T = 0.2
arr = np.random.normal(0., 1., size=(64,64))
rounded_arr = pp.round_to_zero(arr, T)
sum1 = (np.absolute(arr) < T).sum()
sum2 = (rounded_arr == 0.).sum()
self.assertEqual(sum1, sum2)
if __name__ == '__main__':
unittest.main() | asenogles/ladi | tests/test_preprocess.py | test_preprocess.py | py | 479 | python | en | code | 0 | github-code | 36 |
29197653617 | import re
import json
import torch
import logging
from tokenizers import ByteLevelBPETokenizer
from os.path import exists, join, abspath
from . import Target, Entity
from models.pre_abstract.model import LSTMTagger
class PreAbstractParser(Target):
def __init__(self, model_dir, device="cpu"):
super().__init__()
self.model_dir = abspath(model_dir)
assert exists(self.model_dir), f"model directory '{self.model_dir}' does not exist"
assert exists(join(self.model_dir, "classes.json")), f"classes file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "config.json")), f"configuration file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "merges.txt")), f"merges file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "weights.pt")), f"weights file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "vocab.json")), f"vocab file does not exist in {self.model_dir}"
with open(join(self.model_dir, "classes.json"), "r") as classes_file:
self.class_to_index = json.load(classes_file)
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
with open(join(self.model_dir, "config.json"), "r") as config_file:
self.model_config = json.load(config_file)
if not torch.cuda.is_available():
device = "cpu"
self.device = torch.device(device)
self.model = LSTMTagger(vocab_size=self.model_config["vocab_size"],
embedding_dim=self.model_config["embedding_dim"],
lstm_dim=self.model_config["lstm_dim"],
n_classes=len(self.class_to_index)).to(self.device)
weights = torch.load(join(self.model_dir, "weights.pt"), map_location=device)
self.model.load_state_dict(weights)
self.model = self.model.eval()
self.tokenizer = ByteLevelBPETokenizer(vocab_file=join(self.model_dir, "vocab.json"),
merges_file=join(self.model_dir, "merges.txt"),
lowercase=self.model_config["lowercase"])
self.noise_re = re.compile(r"[^A-Za-z ]")
self.department_re = re.compile(r"(?:,\s*)?[^,]*Department[^,]*(?:,)", re.IGNORECASE)
def __call__(self, document):
assert isinstance(document, dict), f"wrong input of type {type(document)} to author parser"
try:
lines, labels = self.annotate_lines(document["text"][:document["abstract_start"]])
except RuntimeError:
logging.error(f"could not parse pre abstract of {document['name']}")
return document
keep_lines = []
for line, label in zip(lines, labels):
if "meta" in document and self.noise_re.sub("", line) == self.noise_re.sub("", document["meta"]["title"]):
keep_lines.append(line)
elif label == "other":
keep_lines.append(line)
else:
self.create_annotation(document, line, label)
if "meta" in document:
keep_lines = self.post_process_lines(document, keep_lines)
document["text_cleaned"] = "\n".join(keep_lines) + document["text"][document["abstract_start"]:]
return document
def annotate_lines(self, text):
lines = text.split("\n")
tokenized = [x.ids for x in self.tokenizer.encode_batch(lines)]
# padding
max_tokens = max(len(sentence) for sentence in tokenized)
for sentence in range(len(tokenized)):
for _ in range(max_tokens - len(tokenized[sentence])):
tokenized[sentence].insert(0, 0)
tensor = torch.tensor([tokenized]).to(self.device)
predictions = self.model.forward(tensor)
predictions = torch.argmax(predictions[0], -1)
predictions = [self.index_to_class[prediction.item()] for prediction in predictions]
return lines, predictions
def create_annotation(self, document, line, label):
if label == "private":
document["entities"][Entity.PERSONAL_DATA].add(line)
elif label == "author":
document["entities"][Entity.AUTHOR].add(line)
elif label == "email":
document["entities"][Entity.EMAIL].add(line)
elif label == "organization":
for department_mention in self.department_re.findall(line):
document["entities"][Entity.PERSONAL_DATA].add(department_mention)
line = self.department_re.sub("", line)
document["entities"][Entity.INSTITUTION_COMPANY].add(line)
else:
logging.error(f"label '{label}' not recognized in {type(self)}")
raise ValueError(f"label '{label}' not recognized")
def post_process_lines(self, document, lines):
keep_lines = []
for line in lines:
mention = False
try:
for author in document["meta"]["authors"]:
if re.search("[\s\-]*".join(re.escape(name) for name in author.split()), line, re.IGNORECASE):
mention = True
document["entities"][Entity.AUTHOR].add(line)
for organization in document["meta"]["orgs"]:
if re.search("[\s\-]*".join(re.escape(name) for name in organization["name"].split()), line, re.IGNORECASE):
mention = True
document["entities"][Entity.INSTITUTION_COMPANY].add(line)
except KeyError:
logging.error(f"conferences meta file misses key for {document['name']}")
if not mention:
keep_lines.append(line)
return keep_lines
| kherud/native-language-identification | pipeline/pipes/pre_abstract.py | pre_abstract.py | py | 5,835 | python | en | code | 1 | github-code | 36 |
29314289325 | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
import bcrypt
# Create your views here.
def main(request):
if 'logged_in' in request.session:
# messages.success(request,"Welcome to Tom's Library!"),
return render(request, 'main/index.html',{
"books": Book.objects.order_by('created_at').reverse(),
"user": User.objects.get(id=request.session["logged_in"])
})
else:
return render(request, 'main/index.html',{
"books": Book.objects.order_by('created_at').reverse(),
})
def index(request):
if "logged_in" in request.session:
messages.success(request,"You already signed in!")
return redirect("/")
return render(request, 'main/login.html')
def register(request):
form = request.POST
errors = User.objects.basic_validator(form)
if len(errors) > 0:
for key, val in errors.items():
messages.error(request, val)
return redirect('/')
User.objects.create(
first_name=form["first_name"],
last_name=form["last_name"],
student_id=form["student_id"],
email=form["email"],
password=bcrypt.hashpw(form["password"].encode(), bcrypt.gensalt()),
)
user = User.objects.last()
request.session["logged_in"] = user.id
request.session["first_name"] = user.first_name
request.session["last_name"] = user.last_name
request.session["email"] = user.email
request.session["student_id"] = user.student_id
return redirect('/')
def login(request):
form = request.POST
try:
user=User.objects.get(email=form["login_email"])
except:
messages.error(request,"Please enter a correct email!")
return redirect("/login")
if bcrypt.checkpw(form["login_password"].encode(), user.password.encode()) == False:
messages.error(request,"Please enter a correct password!")
return redirect("/login")
errors = User.objects.login_validation(form)
if len(errors):
for key, value in errors.items():
messages.error(request, value)
user = User.objects.get(email=form['login_email'])
request.session["logged_in"] = user.id
request.session["email"] = user.email
request.session["first_name"] = user.first_name
request.session["last_name"] = user.last_name
request.session["student_id"] = user.student_id
return redirect('/login')
# return redirect("/login")
def logout(request):
# form = request.session
# errors = User.objects.logout_validation(form)
# user = User.objects.get(id=request.session["logged_in"])
# if not user:
# messages.error(request,"your didn't signin")
# else:
# if len(errors) > 0:
# for key, val in errors.items():
# messages.error(request, val)
request.session.clear()
return redirect('/login')
def add_question(request):
form = request.POST
Message.objects.create(
message= form['question_message'],
user= request.session["logged_in"]
)
return redirect('/')
def add_book(request,book_id):
return render(request,'main/product-single.html',{
"books": Book.objects.all(),
"user": User.objects.get(id=request.session["logged_in"]),
})
def about(request):
if "logged_in" not in request.session:
return render(request, 'main/about.html')
else:
return render(request, 'main/about.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def books(request):
if "logged_in" in request.session:
# this_book = Book.objects.get(id=request.session["logged_in"])
return render(request, 'main/books.html',{
"user": User.objects.get(id=request.session["logged_in"]),
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse()
})
else:
return render(request, 'main/books.html',{
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse()
})
def faq(request):
if "logged_in" not in request.session:
return render(request, 'main/faq.html')
else:
return render(request, 'main/faq.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def privacy_policy(request):
if "logged_in" not in request.session:
return render(request, 'main/privacy_policy.html')
else:
return render(request, 'main/privacy-policy.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def terms_conditions(request):
if "logged_in" not in request.session:
return render(request, 'main/terms-conditions.html')
else:
return render(request, 'main/terms-conditions.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def products(request):
if "logged_in" not in request.session:
return render(request, 'main/products.html',{
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse(),
})
else:
return render(request, 'main/products.html',{
"user": User.objects.get(id=request.session["logged_in"]),
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse(),
})
def book_detail(request,book_id):
if 'logged_in' not in request.session:
# messages.error(request, "You need to log in first!")
# return redirect('/login')
return render(request,'main/product-single.html',{
"this_book": Book.objects.get(id=book_id)
})
else:
this_book = Book.objects.get(id= book_id)
this_user = User.objects.get(id= request.session["logged_in"])
user_book= this_user.books.all
return render(request, 'main/product-single.html',{
"user": User.objects.get(id=request.session['logged_in']),
"this_book": Book.objects.get(id=book_id),
"books": Book.objects.all(),
"user_book": user_book,
})
def borrow(request,book_id):
if 'logged_in' not in request.session:
messages.error(request, "You need to log in first!")
return redirect('/login')
this_book = Book.objects.get(id= book_id)
this_user = User.objects.get(id= request.session["logged_in"])
if this_user in this_book.users.all():
messages.error(request,"You already chose this book!")
return redirect(f"/books/{book_id}")
else:
this_book.users.add(this_user)
messages.success(request,"Success!")
return redirect(f"/books/{book_id}")
# def choose_book(request,book_id):
# form = request.POST
# this_user = User.objects.get(id=request.session["logged_in"])
# this_book = Book.objects.get(id=request.session["logged_in"])
def question(request):
form = request.POST
# # errors = Message.objects.message_validator(form)
# if len(errors):
# for key, value in errors.items():
# messages.error(request, value)
# else:
Message.objects.create(message= form['question_message'],message_email= form['question_email'],message_name=form['question_name'])
return redirect('/')
def profile(request):
# book= Book.objects.all()
this_person = User.objects.get(id=request.session["logged_in"])
books_add = this_person.books.all()
return render(request,"main/profile.html",{
"user": User.objects.get(id=request.session["logged_in"]),
"books": books_add.order_by('created_at'),
"books_add": books_add,
})
def delete_book(request,book_id):
this_book = Book.objects.get(id=book_id)
this_user = User.objects.get(id=request.session["logged_in"])
this_user.books.remove(this_book)
return redirect('/profile')
def delete_book1(request,book_id):
this_book = Book.objects.get(id=book_id)
this_user = User.objects.get(id=request.session["logged_in"])
if this_book not in this_user.books.all():
messages.error(request,"You didn't choose this book!")
else:
this_user.books.remove(this_book)
messages.success(request,"Remove")
return redirect(f'/books/{book_id}')
# def search(request):
# if request.method == "GET":
# query = request.GET.get('q')
# submitbutton = request.GET.get('submit')
# if query is not None:
# lookup = Book(title= query) | tomnguyen103/Coding_Dojo | python_stack/django/Project1/apps/main/views.py | views.py | py | 8,660 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.