seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35748203688 | """empty message
Revision ID: 42088f0246e2
Revises:
Create Date: 2019-01-03 17:19:12.991241
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42088f0246e2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('publisher',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('book',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=False),
sa.Column('publisher_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['publisher_id'], ['publisher.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id', 'title', name='uni_id_title')
)
op.create_index('id', 'book', ['title'], unique=False)
op.create_table('book2tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['book.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('book2tag')
op.drop_index('id', table_name='book')
op.drop_table('book')
op.drop_table('tag')
op.drop_table('publisher')
# ### end Alembic commands ###
| zbjzbj/MyCode | FlaskPlug/migrations/versions/42088f0246e2_.py | 42088f0246e2_.py | py | 1,806 | python | en | code | 0 | github-code | 13 |
27839038685 | import pandas as pd
import xml.etree.ElementTree as ET
import sys
from random import *
import importlib
# load all data in Panda dataframes
# titles information
tittleInfo = pd.read_csv( "tittle_basics1.tsv", sep='\t' ) #100k
# tittleCrew = pd.read_csv( "tittle_crew1.tsv", sep='\t' )
tittleRatings = pd.read_csv( "tittle_ratings.tsv", sep='\t')
# tittleEpisode = pd.read_csv( "tittle_episode1.tsv", sep='\t')
# tittlePrinciple = pd.read_csv("tittle_principles1.tsv", sep='\t')
nameBasics = pd.read_csv("name_basics1.tsv",sep='\t')
tittleInfo.runtimeMinutes.replace(to_replace="\\N",value="120",inplace=True)
tittleMovies = tittleInfo.loc[ tittleInfo['titleType'] == 'movie'] #7k
tittleSeries = tittleInfo.loc[ tittleInfo['titleType'] == 'tvSeries'] #4k
tittleEP = tittleInfo.loc[ tittleInfo['titleType'] == 'tvEpisode']
root = ET.Element("IMDB")
Celebs = ET.SubElement(root,"Celebs")
crewDF = pd.read_csv("cdata.csv",sep=',')
for i in range(0,1500):
nconst = nameBasics.nconst.iloc[i]
nDF = crewDF[crewDF['celebID']==nconst]
if(nDF.empty==True):
continue
Celeb = ET.SubElement(Celebs,"Celeb")
Celeb.set("CelebID",str(nconst))
name = ET.SubElement(Celeb,"Name")
name.text = str(nameBasics.primaryName.iloc[i])
BirthYear = ET.SubElement(Celeb,"BirthYear")
BirthYear.text = str(nameBasics.birthYear.iloc[i])
primProf = ET.SubElement(Celeb,"PrimaryProfession")
primProf.text = str(nameBasics.primaryProfession.iloc[i])
knownFor = ET.SubElement(Celeb,"KnownFor")
# knownForTitles = nameBasics.knownForTitles.iloc[0]
for el in range(0,nDF.shape[0]) :
tt = ET.SubElement(knownFor,"TitleRef")
tt.text = str(nDF.ID.iloc[el])
| akhiln28/ontology_assignment1 | newCastData.py | newCastData.py | py | 1,713 | python | en | code | 0 | github-code | 13 |
33583439535 | import cv2
import dlib
import numpy as np
import copy
# mouth index to keep emotion
mouth_index = [[60],[61],[62],[63],[64],[65],[66],[67]]
mouth_index_set = set(i[0] for i in mouth_index)
def get_delaunay_triangles_index(points, indices):
# only construct triangles between hull and mouth
hull = cv2.convexHull(np.array(points))
rect = cv2.boundingRect(hull)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert(points)
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
points = np.array(points, np.int32)
delaunay_triangles_index = []
for t in triangles:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
tri_idx = []
for i, p in enumerate(points):
if ((pt1[0] == p[0] and pt1[1] == p[1])
or (pt2[0] == p[0] and pt2[1] == p[1])
or (pt3[0] == p[0] and pt3[1] == p[1])):
tri_idx.append(indices[i][0])
if len(tri_idx) == 3:
delaunay_triangles_index.append(tri_idx)
break
return delaunay_triangles_index
def get_triangles(landmarks_points, tri_index):
pt1 = landmarks_points[tri_index[0]]
pt2 = landmarks_points[tri_index[1]]
pt3 = landmarks_points[tri_index[2]]
return np.array([pt1, pt2, pt3], np.int32)
def warp_triangle(img1, img2, bb1, bb2, t1, t2):
# https://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
img1_cropped = img1[bb1[1]: bb1[1] + bb1[3], bb1[0]: bb1[0] + bb1[2]]
t1_offset = [
((t1[0][0] - bb1[0]), (t1[0][1] - bb1[1])),
((t1[1][0] - bb1[0]), (t1[1][1] - bb1[1])),
((t1[2][0] - bb1[0]), (t1[2][1] - bb1[1])),
]
t2_offset = [
((t2[0][0] - bb2[0]), (t2[0][1] - bb2[1])),
((t2[1][0] - bb2[0]), (t2[1][1] - bb2[1])),
((t2[2][0] - bb2[0]), (t2[2][1] - bb2[1])),
]
mask = np.zeros((bb2[3], bb2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_offset), (1.0, 1.0, 1.0), cv2.LINE_AA) #16, 0, cv2.LINE_AA
size = (bb2[2], bb2[3])
mat = cv2.getAffineTransform(np.float32(t1_offset), np.float32(t2_offset))
img2_cropped = cv2.warpAffine(
img1_cropped,
mat,
(size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101,
)
img2_cropped = img2_cropped * mask
# bb2_y = max(bb2[1], 0)
img2_cropped_slice = np.index_exp[
bb2[1]: bb2[1] + bb2[3], bb2[0]: bb2[0] + bb2[2]
]
img2[img2_cropped_slice] = img2[img2_cropped_slice] * ((1.0, 1.0, 1.0) - mask)
img2[img2_cropped_slice] = img2[img2_cropped_slice] + img2_cropped
def capture_best_img_from_source(source_video_loc):
print('Source image preprocessing start.')
print('Start capturing the largest face image from the source video.')
cap_s = cv2.VideoCapture(source_video_loc)
length = int(cap_s.get(cv2.CAP_PROP_FRAME_COUNT))
max_area = 0
best_source_img = None
trial = 0
while True:
if trial >= length:
break
trial += 1
print('trial:', trial, '/', length)
success, img = cap_s.read()
if not success:
continue
detects = detector(img)
if len(detects) != 0:
det = max(detects, key=lambda x: x.area())
det_area = det.area()
if det_area > max_area:
max_area = det_area
print('max image area now:', max_area, 'pixels.')
best_source_img = img
break
img_source = copy.deepcopy(best_source_img)
tri_indices = None
landmarks_points_source = None
detects_source = detector(img_source)
if len(detects_source) != 0:
det = max(detects_source, key=lambda x: x.area())
landmarks_source = predictor(img_source, det)
landmarks_points_source = []
for point in landmarks_source.parts():
landmarks_points_source.append((point.x, point.y))
# hull for mouth to keep emotion
hull_index_ori = cv2.convexHull(np.array(landmarks_points_source), returnPoints=False)
hull_index = np.concatenate((hull_index_ori, mouth_index))
landmark_idx_to_list_idx = {e[0]: i for i, e in enumerate(hull_index)}
points = [landmarks_points_source[i[0]] for i in hull_index]
tri_indices = get_delaunay_triangles_index(points, hull_index)
tri_source_lst = []
bb1_lst = []
for tri_index in tri_indices:
tri_source = get_triangles(landmarks_points_source, tri_index)
tri_source_lst.append(tri_source)
bb1 = cv2.boundingRect(np.float32([tri_source]))
bb1_lst.append(bb1)
detects = detector(best_source_img)
det = max(detects, key=lambda x: x.area())
# show face boundaries
cv2.rectangle(best_source_img, (det.left(), det.top()), (det.right(), det.bottom()), (0, 0, 255), 3)
print('Source image preprocessing done.')
print('Max image face area:', max_area, 'pixels.')
print('-------------------------------------')
print('You wanna have a look? Type y or n.')
while True:
option_input = input('')
# check options
if (option_input.upper() == 'Y'):
cv2. imshow('best_source_img', best_source_img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
while True:
key = cv2.waitKey(0)
if key in [27, ord('q'), ord('Q')]:
cv2.destroyAllWindows()
break
break
elif (option_input.upper() == 'N'):
break
else:
print('Invalid option, please type y or n.')
cap_s.release()
# cv2.destroyAllWindows()
return landmarks_points_source, tri_indices, img_source, tri_source_lst, bb1_lst,\
hull_index_ori, hull_index, landmark_idx_to_list_idx
def capture_source_img_from_img(image_path):
img_source = cv2.imread(image_path)
tri_indices = None
landmarks_points_source = None
# if not success:
# print("reading second image error")
detects_source = detector(img_source)
if len(detects_source) != 0:
det = max(detects_source, key=lambda x: x.area())
landmarks_source = predictor(img_source, det)
landmarks_points_source = []
for point in landmarks_source.parts():
landmarks_points_source.append((point.x, point.y))
hull_index_ori = cv2.convexHull(np.array(landmarks_points_source), returnPoints=False)
hull_index = np.concatenate((hull_index_ori, mouth_index))
landmark_idx_to_list_idx = {e[0]: i for i, e in enumerate(hull_index)}
points = [landmarks_points_source[i[0]] for i in hull_index]
tri_indices = get_delaunay_triangles_index(points, hull_index)
tri_source_lst = []
bb1_lst = []
for tri_index in tri_indices:
tri_source = get_triangles(landmarks_points_source, tri_index)
tri_source_lst.append(tri_source)
bb1 = cv2.boundingRect(np.float32([tri_source]))
bb1_lst.append(bb1)
return landmarks_points_source, tri_indices, img_source, \
tri_source_lst, bb1_lst, hull_index_ori, hull_index, landmark_idx_to_list_idx
if __name__ == '__main__':
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# source_video_loc = 'test video/dance1.mp4'
# landmarks_points_source, tri_indices, img_source, tri_source_lst,\
# bb1_lst, hull_index_ori, hull_index, landmark_idx_to_list_idx = capture_best_img_from_source(source_video_loc)
source_image_loc = 'videoAndPics/5.jpg'
landmarks_points_source, tri_indices, img_source, tri_source_lst,\
bb1_lst, hull_index_ori, hull_index, landmark_idx_to_list_idx = capture_source_img_from_img(source_image_loc)
print(hull_index)
video_loc = 'videoAndPics/1.mp4'
cap = cv2.VideoCapture(video_loc)
print('Start doing face swapping.')
frame_init = True
while True:
#imgm一帧图片
success, img = cap.read()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if not success:
continue
detects = detector(img)
if len(detects) != 0:
det = max(detects, key=lambda x: x.area())
landmarks = predictor(img, det)
# show face boundaries
# cv2.rectangle(img, (det.left(), det.top()), (det.right(), det.bottom()), (0, 0, 255), 1)
# cv2.imshow("face", img)
# cv2.waitKey(0)
# show face landmarks
# for point in landmarks.parts():
# cv2.circle(img, (point.x, point.y), 1, (0, 0, 255), 1)
# show convex hulls
landmarks_points_target = []
for point in landmarks.parts():
landmarks_points_target.append((point.x, point.y))
hull_target = [landmarks_points_target[i[0]] for i in hull_index]
original_hull_target = [landmarks_points_target[i[0]] for i in hull_index_ori]
# frame_init
if frame_init:
hull_target_last_frame = np.array(hull_target, np.float32)
img_gray_previous = copy.deepcopy(img_gray)
first_frame = True
hull2_next, *_ = cv2.calcOpticalFlowPyrLK(
img_gray_previous,
img_gray,
hull_target_last_frame,
np.array(hull_target, np.float32),
winSize=(101, 101),
maxLevel=5,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001),
)
current_factor = 0.5
for i, _ in enumerate(hull_target):
hull_target[i] = current_factor * np.array(hull_target[i]) + (1 - current_factor) * hull2_next[i]
hull_target_last_frame = np.array(hull_target, np.float32)
img_gray_previous = img_gray
img_source_warped = np.copy(img)
img_source_warped = np.float32(img_source_warped)
break_check = False
index = 0
for tri_index in tri_indices:
# remove mouth triangles
if (tri_index[0] in mouth_index_set and tri_index[1] in mouth_index_set and tri_index[2] in mouth_index_set):
index += 1
continue
tri_target = get_triangles(landmarks_points_target, tri_index)
bb2 = cv2.boundingRect(np.float32([tri_target]))
if bb2[1] < 0:
break_check = True
break
warp_triangle(img_source, img_source_warped, \
bb1_lst[index], bb2, tri_source_lst[index], tri_target)
index += 1
if break_check:
continue
mask = np.zeros_like(img_gray, dtype=img.dtype)
cv2.fillConvexPoly(mask, np.int32(original_hull_target), 255)
bb = cv2.boundingRect(np.float32(original_hull_target))
center = (bb[0] + int(bb[2] / 2), bb[1] + int(bb[3] / 2))
img = cv2.seamlessClone(
#图像叠加
np.uint8(img_source_warped), img, mask, center, cv2.NORMAL_CLONE
)
cv2.imshow("face", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| Spilen/exercices | main.py | main.py | py | 11,605 | python | en | code | 0 | github-code | 13 |
38007709433 | import vtk
def main():
# Sphere
sphere = vtk.vtkSphereSource()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetColor(1, 0, 1)
sphereActor.SetOrigin(2, 1, 3)
sphereActor.RotateY(6)
sphereActor.SetPosition(2.25, 0, 0)
# Cube
cube = vtk.vtkSphereSource()
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.GetProperty().SetColor(0, 0, 1)
cubeActor.SetPosition(0, 0.25, 0)
# Cone
cone = vtk.vtkSphereSource()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0, 1, 0)
coneActor.SetPosition(0, 0, 0.25)
# Cylinder
cylinder = vtk.vtkCylinderSource()
cylinder.SetResolution(36)
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.GetProperty().SetColor(1, 0, 0)
# Assembly
assembly = vtk.vtkAssembly()
assembly.AddPart(cylinderActor)
assembly.AddPart(sphereActor)
assembly.AddPart(coneActor)
assembly.SetOrigin(5, 10, 15)
assembly.AddPosition(5, 0, 0)
assembly.RotateX(15)
renderer = vtk.vtkRenderer()
renderer.AddActor(assembly)
renderer.AddActor(coneActor)
renderer.SetBackground(0.3, 0.5, 0.7)
window = vtk.vtkRenderWindow()
window.AddRenderer(renderer)
window.SetSize(800, 600)
windowInteractor = vtk.vtkRenderWindowInteractor()
windowInteractor.SetRenderWindow(window)
window.Render()
windowInteractor.Start()
if __name__ == "__main__":
main()
| dunyazad/VTK-Python-Users-Guide-Examples | 4-6 Controlling 3D Props.py | 4-6 Controlling 3D Props.py | py | 2,001 | python | en | code | 0 | github-code | 13 |
74461662098 | import torch
import torch.nn as nn
import torchvision.models as models
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizers = AutoTokenizer.from_pretrained("prajjwal1/bert-mini")
class CombineModel(nn.Module):
def __init__(self):
super(CombineModel, self).__init__()
self.efficient_net = models.efficientnet_b4(weights=models.EfficientNet_B4_Weights.DEFAULT)
self.efficient_net.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(in_features=self.efficient_net.classifier[1].in_features, out_features=19)
)
self.sig = nn.Sigmoid()
self.nlp_net = AutoModelForSequenceClassification.from_pretrained("prajjwal1/bert-mini")
self.pre_classifier = nn.Linear(in_features=2, out_features=64)
self.activation = nn.Tanh()
self.dropout = nn.Dropout(p=0.5)
self.classifier = nn.Linear(in_features=64, out_features=19)
self.linear = nn.Linear(in_features=38, out_features=19)
def forward(self, x, input_ids=None, attention_mask=None):
cnn_out = self.sig(self.efficient_net(x))
nlp_out = self.nlp_net(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = nlp_out[0]
out = hidden_state[:, 0]
out = self.pre_classifier(out)
out = self.activation(out)
out = self.dropout(out)
out = self.classifier(out)
output = torch.cat((out, cnn_out), dim=1)
return self.sig(self.linear(output))
| Re2z/USYD-2023s1-COMP5329-DeepLearning-Assignment | Assignment_2/Project/pre_model/combine_model.py | combine_model.py | py | 1,527 | python | en | code | 0 | github-code | 13 |
18313641086 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path("crear-organizacion", views.crear_organizacion, name="crear-organizacion"),
path("crear-usuario", views.crear_usuario, name="crear-usuario"),
path("organizaciones_list/", views.organizaciones_list, name="organizaciones_list"),
path("transitos_list/", views.transitos_list, name="transitos_list"),
path("crear-transito", views.crear_transito, name="crear-transito"),
] | patribu88/Tercera_pre-entrega_Crispens | project/apps/home/urls.py | urls.py | py | 498 | python | en | code | 0 | github-code | 13 |
44035671443 | import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
## logging configs
log_path = os.path.join(PROJECT_ROOT, "experiments", "logs")
# default parameter setting for synthetic dataset
nr_classes = 2
connectivity_list = [0.2, 0.3]
means = [0, 0.1]
std_devs = [0.5, 0.5]
| tamaramueller/DP-GNNs | src/utils/config.py | config.py | py | 351 | python | en | code | 7 | github-code | 13 |
8321533965 | from nextcord import Interaction, SlashOption, ChannelType, Activity, ActivityType
from nextcord.abc import GuildChannel
from nextcord.ext import commands
import os
import nextcord
import json
from argparse import ArgumentParser
from urllib.parse import parse_qsl, urlparse
import requests
import tweepy
intents = nextcord.Intents.all()
intents.members = True
intents.presences = True
client = commands.Bot(command_prefix='.', intents=intents)
guild_ids = [647250925282656287]
@client.event
async def on_ready():
print(f'{client.user} has logged in.')
await client.change_presence(activity=nextcord.Game(name="Trying my best"))
with open('./resources/config.json') as f:
data = json.load(f)
token = data["token"]
CONSUMER_KEY = data["CONSUMER_KEY"]
CONSUMER_SECRET = data["CONSUMER_SECRET"]
ACCESS_TOKEN = data["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = data["ACCESS_TOKEN_SECRET"]
@client.event
async def on_presence_update(before, after):
activity_type = None
streaming_role = after.guild.get_role(772062410789617696)
try:
activity_type = after.activity.type
except:
pass
if (activity_type is not nextcord.ActivityType.playing):
if streaming_role in after.roles:
print(f"{after.display_name} has stopped streaming")
await after.remove_roles(streaming_role)
else:
if streaming_role not in after.roles:
print(f"{after.display_name} has started streaming")
await after.add_roles(streaming_role)
for folder in os.listdir(f'./cogs/.'):
for filename in os.listdir(f'./cogs/{folder}/.'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{folder}.{filename[:-3]}')
print({filename})
@client.command()
@commands.is_owner()
async def reload(ctx):
try:
for folder in os.listdir(f'./cogs/.'):
for filename in os.listdir(f'./cogs/{folder}/.'):
if filename.endswith('.py'):
client.reload_extension(f'cogs.{folder}.{filename[:-3]}')
print(f'"**{filename}**" Cog reloaded')
except Exception as e:
return print(e)
def main():
twitter_auth_keys = {
"consumer_key" : CONSUMER_KEY,
"consumer_secret" : CONSUMER_SECRET,
"access_token" : ACCESS_TOKEN,
"access_token_secret" : ACCESS_TOKEN_SECRET
}
auth = tweepy.OAuthHandler(
twitter_auth_keys['consumer_key'],
twitter_auth_keys['consumer_secret']
)
auth.set_access_token(
twitter_auth_keys['access_token'],
twitter_auth_keys['access_token_secret']
)
api = tweepy.API(auth)
tweet = "Test 2 tweet python"
status = api.update_status(status=tweet)
# if __name__ == "__main__":
# main()
# print("Is this doing something?")
# activity_type = None
# streaming_role = after.guild.get_role(772062410789617696)
# try:
# activity_type = after.activity.type
# except:
# pass
# if not (activity_type is nextcord.ActivityType.playing):
# # User is doing something other than streaming
# if streaming_role in after.roles:
# print(f"{after.display_name} has stopped streaming")
# await after.remove_roles(streaming_role)
# else:
# if streaming_role not in after.roles:
# # If they don't have the role, give it to them
# # If they have it, we already know they're streaming so we don't need to do anything
# print(f"{after.display_name} has started streaming")
# await after.add_roles(streaming_role)
client.run(token) | Chris-ctrl-paste/Ivy-python | index.py | index.py | py | 3,800 | python | en | code | 0 | github-code | 13 |
37482330914 | '''
0 0 0 0 0
1 0 0 0 0
1 1 0 0 0
1 1 1 0 0
1 1 0 1 0
1 1 0 0 1
...
1.재귀
1-1. 재귀 종료조건 depth
1-2. 합이 100이면 종료
2. 방문 했는지 visit배열
'''
short_men = [int(input()) for _ in range(9)]
visited = [0,0,0,0,0,0,0,0,0]
def dfs(start,depth):
if depth == 7:
resultList = []
for i in range(len(visited)):
if visited[i] == 1:
resultList.append(short_men[i])
if sum(resultList) == 100:
for i in sorted(resultList):
print(i)
exit()
else:
return
else:
for i in range(start,len(visited)):
if visited[i] == 0:
visited[i] = 1
dfs(i+1,depth + 1)
visited[i] = 0
dfs(0,0)
| Choi-Seong-Hyeok/Algorithm | 완전탐색/일곱난쟁이(visit).py | 일곱난쟁이(visit).py | py | 784 | python | en | code | 0 | github-code | 13 |
22191467072 | import logging
from copy import copy
from dataclasses import dataclass, field
from typing import Dict, List, Optional
from linkml_runtime.linkml_model import (
Annotation,
ClassDefinition,
ClassDefinitionName,
Definition,
Prefix,
SchemaDefinition,
SlotDefinition,
)
from linkml_runtime.utils.schemaview import SchemaView, SlotDefinitionName
from sqlalchemy import Enum
class RelationalAnnotations(Enum):
PRIMARY_KEY = "primary_key"
FOREIGN_KEY = "foreign_key"
class ForeignKeyPolicy(Enum):
ALL_REFS_ARE_FKS = "all_refs_are_fks"
INJECT_FK_FOR_NESTED = "inject_fk_for_nested"
INJECT_FK_FOR_ALL_REFS = "inject_fk_for_all_refs"
NO_FOREIGN_KEYS = "no_foreign_keys"
@dataclass
class Link:
"""
Foreign key reference
"""
source_class: Optional[str] # optional for top-level slots
source_slot: str
target_class: str
target_slot: str = None
@dataclass
class RelationalMapping:
"""
Mapping between slot in source model and target in relational model
Example, with join table created:
RelationalMapping(source_class='Person',
source_slot='aliases',
target_class='Person_aliases',
target_slot='aliases',
uses_join_table=True)
"""
source_class: str = None
source_slot: str = None
mapping_type: str = None
target_class: str = None
target_slot: str = None # /
join_class: str = None # /
uses_join_table: bool = None # / ## True if extra join table is created
@dataclass
class OneToAnyMapping(RelationalMapping):
"""
A one-to-one or one-to-many mapping from a source class+slot to
a target class+slot
"""
target_class: str = None
target_slot: str = None
multivalued: bool = False
@dataclass
class ManyToManyMapping(RelationalMapping):
"""
A many-to-many relationship introduces a join class/table
See:
- https://docs.sqlalchemy.org/en/14/orm/basic_relationships.html#relationships-many-to-many
- https://stackoverflow.com/questions/5756559/how-to-build-many-to-many-relations-using-sqlalchemy-a-good-example
"""
join_class: str = None # aka secondary
target_class: str = None # actual target
mapping_type: str = "ManyToMany"
@dataclass
class MultivaluedScalar(RelationalMapping):
"""
See: https://docs.sqlalchemy.org/en/14/orm/extensions/associationproxy.html
"""
join_class: str = None
target_slot: str = None
mapping_type: str = "MultivaluedScalar"
def add_attribute(attributes: Dict[str, SlotDefinition], tgt_slot: SlotDefinition) -> None:
attributes[tgt_slot.name] = tgt_slot
def add_annotation(element: Definition, tag: str, value: str) -> None:
ann = Annotation(tag, value)
element.annotations[ann.tag] = ann
def get_primary_key_attributes(cls: ClassDefinition) -> List[SlotDefinitionName]:
return [
a.name
for a in cls.attributes.values()
if RelationalAnnotations.PRIMARY_KEY in a.annotations
]
def get_foreign_key_map(cls: ClassDefinition) -> Dict[SlotDefinitionName, str]:
return {
a.name: a.annotations[RelationalAnnotations.FOREIGN_KEY].value
for a in cls.attributes.values()
if RelationalAnnotations.FOREIGN_KEY in a.annotations
}
@dataclass
class TransformationResult:
"""
The result of a transformation is a target schema plus a collection of mappings
"""
schema: SchemaDefinition
mappings: List[RelationalMapping]
@dataclass
class RelationalModelTransformer:
"""
Transforms the source schema into a relational schema
"""
schemaview: SchemaView = None
# dialect: str = field(default_factory=lambda : 'sqlite')
skip_tree_root: bool = field(default_factory=lambda: False)
skip_abstract: bool = field(default_factory=lambda: True)
skip_mixins: bool = field(default_factory=lambda: True)
join_table_separator: str = field(default_factory=lambda: "_")
foreign_key_policy: ForeignKeyPolicy = field(
default_factory=lambda: ForeignKeyPolicy.INJECT_FK_FOR_NESTED
)
def transform(
self, tgt_schema_name: str = None, top_class: ClassDefinitionName = None
) -> TransformationResult:
"""
Transforms the source schema into a relational schema
:param tgt_schema_name:
:param top_class:
:return:
"""
join_sep = self.join_table_separator
links = self.get_reference_map()
source_sv = self.schemaview
source_sv.merge_imports()
source = source_sv.schema
src_schema_name = source.name
mappings = []
if tgt_schema_name is None:
tgt_schema_name = f"{src_schema_name}_relational"
tgt_schema_id = f"{source.id}_relational"
# TODO: recursively transform imports
target = SchemaDefinition(
id=tgt_schema_id,
name=tgt_schema_name,
default_range=source.default_range,
prefixes=source.prefixes,
imports=source.imports,
# imports=['linkml:types'],
from_schema=source.from_schema,
source_file=source.source_file,
types=source.types,
subsets=source.subsets,
enums=source.enums,
)
target.prefixes["rr"] = Prefix("rr", "http://www.w3.org/ns/r2rml#")
# copy source -> target
# roll-down all slots and create an attribute-only model
for cn, c in source_sv.all_classes().items():
c = ClassDefinition(
name=cn,
class_uri=source_sv.get_uri(c, expand=False),
mixin=c.mixin,
is_a=c.is_a,
tree_root=c.tree_root,
abstract=c.abstract,
description=c.description,
unique_keys=c.unique_keys,
)
for slot in source_sv.class_induced_slots(cn):
tgt_slot = copy(slot)
if slot.alias:
tgt_slot.name = slot.alias
# TODO: attrs not indexed
# tgt_slot.slot_uri = sv.get_uri(slot, expand=False)
tgt_slot.is_a = None
tgt_slot.mixins = []
add_attribute(c.attributes, tgt_slot)
# this is required in case an attribute inherits from a slot
for sn in source_sv.all_slots(attributes=False):
slot = source_sv.get_slot(sn)
# target.slots[slot.name] = copy(slot)
target.classes[c.name] = c
target_sv = SchemaView(target)
# create surrogate/autoincrement primary keys for any class (originally: that is referenced)
# for link in links:
for cn in target_sv.all_classes():
pk = self.get_direct_identifier_attribute(target_sv, cn)
if self.foreign_key_policy == ForeignKeyPolicy.NO_FOREIGN_KEYS:
logging.info(f"Will not inject any PKs, and policy == {self.foreign_key_policy}")
else:
if pk is None:
pk = self.add_primary_key(cn, target_sv)
logging.info(f"Added primary key {cn}.{pk.name}")
for link in links:
if link.target_class == cn:
link.target_slot = pk.name
# TODO: separate out the logic into separate testable methods
target_sv.set_modified()
# post-process target schema
for cn, c in target_sv.all_classes().items():
if self.foreign_key_policy == ForeignKeyPolicy.NO_FOREIGN_KEYS:
continue
pk_slot = self.get_direct_identifier_attribute(target_sv, cn)
# if self.is_skip(c) and len(incoming_links) == 0:
# logging.info(f'Skipping class: {c.name}')
# del target.classes[cn]
# continue
for src_slot in list(c.attributes.values()):
slot = copy(src_slot)
slot_range = slot.range
slot_range_is_class = slot_range in target_sv.all_classes()
is_shared = slot_range_is_class and (
slot.inlined or slot.inlined_as_list or "shared" in slot.annotations
)
if slot.multivalued:
slot.multivalued = False
slot_name = slot.name
sn_singular = slot.singular_name if slot.singular_name else slot.name
if pk_slot is None:
pk_slot = self.add_primary_key(c.name, target_sv)
backref_slot = SlotDefinition(
name=f"{c.name}_{pk_slot.name}",
description="Autocreated FK slot",
range=c.name,
slot_uri="rdf:subject",
# close_mappings=[pk_slot.slot_uri],
annotations=[
Annotation("backref", "true"),
Annotation("rdfs:subPropertyOf", "rdf:subject"),
],
)
# if is_only_ref_to_range and slot_range_is_class:
if is_shared:
# ONE-TO-MANY
# e.g. if Person->Address, and only Person has Address,
# we can make Address.Person_id
backref_slot.inverse = slot_name
backref_class = target.classes[slot_range]
add_attribute(backref_class.attributes, backref_slot)
# In SQLA, corresponds to source_class.source_slot = relationship(target_class)
mappings.append(
OneToAnyMapping(
source_class=cn,
source_slot=src_slot.name,
target_class=backref_class.name,
target_slot=backref_slot.name,
)
)
else:
# MANY-TO-MANY
# create new linking table
linker_class = ClassDefinition(
name=f"{cn}{join_sep}{sn_singular}",
from_schema=target.id,
class_uri="rdf:Statement",
annotations=[
Annotation("linkml:derived_from", cn),
Annotation("dcterms:conformsTo", "linkml:JoinTable"),
],
comments=[f"Linking class generated from {cn}.{slot_name}"],
)
slot.name = sn_singular
# On the linking table, it's inlined.
# This triggers that the slot.name gets appended with the pk column name on the target side
slot.inlined = True
add_attribute(linker_class.attributes, backref_slot)
add_attribute(linker_class.attributes, slot)
slot.slot_uri = "rdf:object"
target.classes[linker_class.name] = linker_class
if slot_range_is_class:
fwdann = Annotation("forwardref", "true")
slot.annotations[fwdann.tag] = fwdann
mappings.append(
ManyToManyMapping(
source_class=cn,
source_slot=src_slot.name,
target_class=slot_range,
# target_slot=backref_slot.name,
join_class=linker_class.name,
# target_slot=slot.name,
# uses_join_table=True,
)
)
else:
mappings.append(
MultivaluedScalar(
source_class=cn,
source_slot=src_slot.name,
target_slot=sn_singular,
join_class=linker_class.name,
)
)
# we delete the slot from the set of attributes for the class,
# but leave it present as a 'dangling' slot, where it can
# be referenced for mapping purposes
target.slots[slot_name] = src_slot
src_slot.owner = None
del c.attributes[slot_name]
target_sv.set_modified()
target.classes[c.name] = c
# add PK and FK anns
target_sv.set_modified()
fk_policy = self.foreign_key_policy
for c in target.classes.values():
if self.foreign_key_policy == ForeignKeyPolicy.NO_FOREIGN_KEYS:
continue
pk_slot = target_sv.get_identifier_slot(c.name)
for a in list(c.attributes.values()):
if pk_slot is None or a.name == pk_slot.name:
ann = Annotation("primary_key", "true")
a.annotations[ann.tag] = ann
if a.required:
ann = Annotation("required", "true")
a.annotations[ann.tag] = ann
if a.range in target.classes:
tc = target.classes[a.range]
# tc_pk_slot = target_sv.get_identifier_slot(tc.name)
tc_pk_slot = self.get_direct_identifier_attribute(target_sv, tc.name)
if tc_pk_slot is None:
raise ValueError(f"No PK for attribute {a.name} range {a.range}")
is_inlined = a.inlined or not source_sv.get_identifier_slot(tc.name)
if (
fk_policy == ForeignKeyPolicy.INJECT_FK_FOR_NESTED
and is_inlined
and not a.multivalued
) or (fk_policy == ForeignKeyPolicy.INJECT_FK_FOR_ALL_REFS):
# if it is already an injected backref, no need to re-inject
if "backref" not in a.annotations:
del c.attributes[a.name]
if "forwardref" not in a.annotations:
add_annotation(a, "original_slot", a.name)
a.alias = f"{a.name}_{tc_pk_slot.name}"
a.name = a.alias
c.attributes[a.name] = a
ann = Annotation("foreign_key", f"{tc.name}.{tc_pk_slot.name}")
a.annotations[ann.tag] = ann
target_sv.set_modified()
result = TransformationResult(target, mappings=mappings)
return result
def get_direct_identifier_attribute(
self, sv: SchemaView, cn: ClassDefinitionName
) -> Optional[SlotDefinition]:
c = sv.get_class(cn)
for a in c.attributes.values():
if a.identifier:
return a
if a.key:
return a
return None
def get_reference_map(self) -> List[Link]:
"""
Extract all class-slot-range references
:return: list of links
"""
# TODO: move this to schemaview
links = []
sv = self.schemaview
for cn, c in sv.all_classes().items():
for slot in sv.class_induced_slots(cn):
if slot.range in sv.all_classes():
links.append(
Link(
source_class=cn,
source_slot=slot.name,
target_class=slot.range,
)
)
for sn, slot in sv.all_slots().items():
if slot.range in sv.all_classes():
links.append(
Link(
source_class=None,
source_slot=slot.name,
target_class=slot.range,
)
)
return links
def is_skip(self, c: ClassDefinition) -> bool:
return (
(c.abstract and self.skip_abstract)
or (c.mixin and self.skip_mixins)
or (c.tree_root and self.skip_tree_root)
)
def add_primary_key(self, cn: str, sv: SchemaView) -> SlotDefinition:
"""
Adds a surrogate/autoincrement primary key to a class
:param cn:
:param sv:
:return:
"""
c = sv.get_class(cn)
candidate_names = ["id", "uid", "identifier", "pk"]
valid_candidate_names = [n for n in candidate_names if n not in c.attributes]
if not valid_candidate_names:
raise ValueError(f"Cannot add primary key to class {cn}: no valid candidate names")
pk = SlotDefinition(name=valid_candidate_names[0], identifier=True, range="integer")
add_annotation(pk, "dcterms:conformsTo", "rr:BlankNode")
add_annotation(pk, "autoincrement", "true")
if pk.name in c.attributes:
raise ValueError(
f"Cannot inject primary key {pk.name} as a non-unique attribute with this name already exists in {cn}"
)
# add PK to start of attributes
atts = copy(c.attributes)
c.attributes.clear() # See https://github.com/linkml/linkml/issues/370
add_attribute(c.attributes, pk) # add to start
c.attributes.update(atts)
sv.set_modified()
return pk
| linkml/linkml | linkml/transformers/relmodel_transformer.py | relmodel_transformer.py | py | 17,956 | python | en | code | 228 | github-code | 13 |
27834573284 | def loose_change(cents):
dic = {'Nickels':0, 'Pennies':0, 'Dimes':0, 'Quarters':0}
if cents <= 0:
return dic
else:
cents = int(cents)
dic['Quarters'] = cents // 25
cents -= cents // 25 * 25
dic['Dimes'] = cents // 10
cents -= cents // 10 * 10
dic['Pennies'] = cents // 5
cents -= cents // 5 * 5
dic['Nickels'] = cents // 1
return dic
| rbnmartins/codewars | LooseChange.py | LooseChange.py | py | 426 | python | en | code | 0 | github-code | 13 |
13740030644 | import random
import matplotlib.pyplot as plt
def estimate_pi(n):
num_points_circle = 0
for i in range(n):
x = random.uniform(0, 1)
y = random.uniform(0, 1)
checkCircleC = (x - 1/2)**2 + (y - 1/2)**2
if checkCircleC <= 1/4:
num_points_circle += 1
return 4 * num_points_circle / n
ns = [10, 100, 1000, 10000, 100000, 1000000, 10000000]
estimates = [estimate_pi(n) for n in ns]
plt.plot(ns, estimates)
plt.show()
| Solsol1014/Study | pythonpractice/Probability_Statistics/prj1.py | prj1.py | py | 470 | python | en | code | 0 | github-code | 13 |
17056060894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RentInfoDTO import RentInfoDTO
class MiniGoodsDetailInfoDTO(object):
def __init__(self):
self._body = None
self._categories_tree = None
self._goods_category = None
self._goods_id = None
self._goods_name = None
self._image_material_id = None
self._item_cnt = None
self._out_item_id = None
self._out_sku_id = None
self._platform_item_version_id = None
self._rent_info = None
self._sale_price = None
self._sale_real_price = None
self._show_url = None
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def categories_tree(self):
return self._categories_tree
@categories_tree.setter
def categories_tree(self, value):
self._categories_tree = value
@property
def goods_category(self):
return self._goods_category
@goods_category.setter
def goods_category(self, value):
self._goods_category = value
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
self._goods_id = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def image_material_id(self):
return self._image_material_id
@image_material_id.setter
def image_material_id(self, value):
self._image_material_id = value
@property
def item_cnt(self):
return self._item_cnt
@item_cnt.setter
def item_cnt(self, value):
self._item_cnt = value
@property
def out_item_id(self):
return self._out_item_id
@out_item_id.setter
def out_item_id(self, value):
self._out_item_id = value
@property
def out_sku_id(self):
return self._out_sku_id
@out_sku_id.setter
def out_sku_id(self, value):
self._out_sku_id = value
@property
def platform_item_version_id(self):
return self._platform_item_version_id
@platform_item_version_id.setter
def platform_item_version_id(self, value):
self._platform_item_version_id = value
@property
def rent_info(self):
return self._rent_info
@rent_info.setter
def rent_info(self, value):
if isinstance(value, RentInfoDTO):
self._rent_info = value
else:
self._rent_info = RentInfoDTO.from_alipay_dict(value)
@property
def sale_price(self):
return self._sale_price
@sale_price.setter
def sale_price(self, value):
self._sale_price = value
@property
def sale_real_price(self):
return self._sale_real_price
@sale_real_price.setter
def sale_real_price(self, value):
self._sale_real_price = value
@property
def show_url(self):
return self._show_url
@show_url.setter
def show_url(self, value):
self._show_url = value
def to_alipay_dict(self):
params = dict()
if self.body:
if hasattr(self.body, 'to_alipay_dict'):
params['body'] = self.body.to_alipay_dict()
else:
params['body'] = self.body
if self.categories_tree:
if hasattr(self.categories_tree, 'to_alipay_dict'):
params['categories_tree'] = self.categories_tree.to_alipay_dict()
else:
params['categories_tree'] = self.categories_tree
if self.goods_category:
if hasattr(self.goods_category, 'to_alipay_dict'):
params['goods_category'] = self.goods_category.to_alipay_dict()
else:
params['goods_category'] = self.goods_category
if self.goods_id:
if hasattr(self.goods_id, 'to_alipay_dict'):
params['goods_id'] = self.goods_id.to_alipay_dict()
else:
params['goods_id'] = self.goods_id
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.image_material_id:
if hasattr(self.image_material_id, 'to_alipay_dict'):
params['image_material_id'] = self.image_material_id.to_alipay_dict()
else:
params['image_material_id'] = self.image_material_id
if self.item_cnt:
if hasattr(self.item_cnt, 'to_alipay_dict'):
params['item_cnt'] = self.item_cnt.to_alipay_dict()
else:
params['item_cnt'] = self.item_cnt
if self.out_item_id:
if hasattr(self.out_item_id, 'to_alipay_dict'):
params['out_item_id'] = self.out_item_id.to_alipay_dict()
else:
params['out_item_id'] = self.out_item_id
if self.out_sku_id:
if hasattr(self.out_sku_id, 'to_alipay_dict'):
params['out_sku_id'] = self.out_sku_id.to_alipay_dict()
else:
params['out_sku_id'] = self.out_sku_id
if self.platform_item_version_id:
if hasattr(self.platform_item_version_id, 'to_alipay_dict'):
params['platform_item_version_id'] = self.platform_item_version_id.to_alipay_dict()
else:
params['platform_item_version_id'] = self.platform_item_version_id
if self.rent_info:
if hasattr(self.rent_info, 'to_alipay_dict'):
params['rent_info'] = self.rent_info.to_alipay_dict()
else:
params['rent_info'] = self.rent_info
if self.sale_price:
if hasattr(self.sale_price, 'to_alipay_dict'):
params['sale_price'] = self.sale_price.to_alipay_dict()
else:
params['sale_price'] = self.sale_price
if self.sale_real_price:
if hasattr(self.sale_real_price, 'to_alipay_dict'):
params['sale_real_price'] = self.sale_real_price.to_alipay_dict()
else:
params['sale_real_price'] = self.sale_real_price
if self.show_url:
if hasattr(self.show_url, 'to_alipay_dict'):
params['show_url'] = self.show_url.to_alipay_dict()
else:
params['show_url'] = self.show_url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MiniGoodsDetailInfoDTO()
if 'body' in d:
o.body = d['body']
if 'categories_tree' in d:
o.categories_tree = d['categories_tree']
if 'goods_category' in d:
o.goods_category = d['goods_category']
if 'goods_id' in d:
o.goods_id = d['goods_id']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'image_material_id' in d:
o.image_material_id = d['image_material_id']
if 'item_cnt' in d:
o.item_cnt = d['item_cnt']
if 'out_item_id' in d:
o.out_item_id = d['out_item_id']
if 'out_sku_id' in d:
o.out_sku_id = d['out_sku_id']
if 'platform_item_version_id' in d:
o.platform_item_version_id = d['platform_item_version_id']
if 'rent_info' in d:
o.rent_info = d['rent_info']
if 'sale_price' in d:
o.sale_price = d['sale_price']
if 'sale_real_price' in d:
o.sale_real_price = d['sale_real_price']
if 'show_url' in d:
o.show_url = d['show_url']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MiniGoodsDetailInfoDTO.py | MiniGoodsDetailInfoDTO.py | py | 7,918 | python | en | code | 241 | github-code | 13 |
19544628229 | from django.urls import path
from . import views
urlpatterns = [
path('',views.login),
path('layer3',views.threelayercr),
path('signup',views.signup),
path("log3",views.home),
path("home",views.logged),
path("logout",views.logout)
] | riz4d/Layer3 | layer/urls.py | urls.py | py | 257 | python | en | code | 0 | github-code | 13 |
28326130537 | from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class EventRegistration(models.Model):
_inherit = "event.registration"
promotion_id = fields.Many2one(
comodel_name="hr.promotion",
string="Promotion",
related="employee_id.promotion_id",
)
class Event(models.Model):
_inherit = "event.event"
promotion_id = fields.Many2one(
comodel_name="hr.promotion", string="Promotion", required=False
)
@api.multi
def button_register_promotion(self):
self.ensure_one()
if not self.promotion_id:
raise ValidationError(_("Enter a promotion first."))
for employee in self.promotion_id.employee_ids:
self.env["event.registration"].create(
{
"event_id": self.id,
"name": employee.name,
"email": employee.work_email,
"phone": employee.work_phone,
"employee_id": employee.id,
}
)
| odoo-cae/odoo-addons-hr-incubator | hr_cae_event_promotion/models/event.py | event.py | py | 1,057 | python | en | code | 0 | github-code | 13 |
17588091592 | import os
import xmltodict
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from typing import List
class Dataset(Dataset):
def __init__(self, data_dir, labels_dir, transforms, S=7, C=3, file_format='txt', convert_to_yolo=True):
self.class2tag = {}
with open(labels_dir, 'r') as f:
for line in f:
(val, key) = line.split()
self.class2tag[key] = val
self.image_paths = []
self.box_paths = []
for tag in self.class2tag:
for file in os.listdir(data_dir + '/' + tag):
if file.endswith('.jpg'):
self.image_paths.append(data_dir + '/' + tag + '/' + file)
if file.endswith('.' + file_format):
self.box_paths.append(data_dir + '/' + tag + '/' + file)
# sorting to access values by equivalent files
self.image_paths = sorted(self.image_paths)
self.box_paths = sorted(self.box_paths)
assert len(self.image_paths) == len(self.box_paths)
self.transforms = transforms
self.S = S
self.C = C
self.file_format = file_format
self.convert_to_yolo = convert_to_yolo
def __getitem__(self, idx):
image = np.array(Image.open(self.image_paths[idx]).convert("RGB"))
if self.file_format == 'xml':
bboxes, class_labels = self.__get_boxes_from_xml(self.box_paths[idx])
if self.file_format == 'txt':
bboxes, class_labels = self.__get_boxes_from_txt(self.box_paths[idx])
if self.convert_to_yolo:
for i, box in enumerate(bboxes):
bboxes[i] = self.__convert_to_yolo_box_params(box, image.shape[1], image.shape[0])
transformed = self.transforms(image=image, bboxes=bboxes, class_labels=class_labels)
transformed_image = transformed['image']
transformed_bboxes = torch.tensor(transformed['bboxes'])
transformed_class_labels = torch.tensor(transformed['class_labels'])
"""
create a target matrix
each grid cell = [P, x, y, w, h, c1, c2, c3]
size of grid cell = S * S
if we have more then one box in grid cell then we choose the last box
x, y values are calculated relative to the grid cell
"""
target = torch.tensor([[0] * (5 + self.C)] * self.S * self.S, dtype=torch.float32)
target = target.reshape((self.S, self.S, (5 + self.C)))
for i, box in enumerate(transformed_bboxes):
class_tensor = torch.zeros(self.C, dtype=torch.float32)
class_tensor[transformed_class_labels[i]] = 1
x_cell = int(self.S * box[0])
y_cell = int(self.S * box[1])
target[y_cell, x_cell] = torch.cat((torch.tensor(
[
1,
self.S * box[0] - x_cell,
self.S * box[1] - y_cell,
box[2],
box[3]
]
), class_tensor), dim=0)
return {"image": transformed_image, "target": target}
def __len__(self):
return len(self.image_paths)
def __get_boxes_from_txt(self, txt_filename: str):
boxes = []
class_labels = []
with open(txt_filename) as f:
for obj in f:
param_list = list(map(float, obj.split()))
boxes.append(param_list[1:])
class_labels.append(int(param_list[0]))
return boxes, class_labels
def __get_boxes_from_xml(self, xml_filename: str):
boxes = []
class_labels = []
with open(xml_filename) as f:
xml_content = xmltodict.parse(f.read())
xml_object = xml_content['annotation']['object']
if type(xml_object) is dict:
xml_object = [xml_object]
if type(xml_object) is list:
for obj in xml_object:
boxe_list = list(map(float, [obj['bndbox']['xmin'], obj['bndbox']['ymin'], obj['bndbox']['xmax'],
obj['bndbox']['ymax']]))
boxes.append(boxe_list)
class_labels.append(self.class2tag[obj['name']])
return boxes, class_labels
def __convert_to_yolo_box_params(self, box_coordinates: List[int], im_w, im_h):
ans = list()
ans.append((box_coordinates[0] + box_coordinates[2]) / 2 / im_w) # x_center
ans.append((box_coordinates[1] + box_coordinates[3]) / 2 / im_h) # y_center
ans.append((box_coordinates[2] - box_coordinates[0]) / im_w) # width
ans.append((box_coordinates[3] - box_coordinates[1]) / im_h) # height
return ans
| AlexeyDate/YOLOv1 | model/dataset.py | dataset.py | py | 4,723 | python | en | code | 2 | github-code | 13 |
22101954822 | decimal_str = input("Enter an integer")
decimal = int(decimal_str)
remainder = 0
count = 0
binary = []
if decimal > 256:
print("Only numbers less than 256")
elif decimal == 0:
print("0")
elif decimal < 0:
print("Only positive numbers can be used")
else:
while decimal > 0:
remainder = decimal % 2
decimal = decimal // 2
binary.append(remainder)
print(binary[::-1]) #Must print in reverse order to work
| JLevins189/Python | Labs/Lab3/decimalToBinary.py | decimalToBinary.py | py | 444 | python | en | code | 0 | github-code | 13 |
73957271699 | from jinja2 import Environment, FileSystemLoader
ENV = Environment(loader=FileSystemLoader('.'))
template = ENV.get_template('template.j2')
import os
interface_dict = {
"name": "GigabitEthernet0/1",
"description": "Server Port",
"vlan": 10
}
interface_dict2 = {"name": "GigabitEthernet0/2", "description": "Access Port", "vlan": 20}
# {
# {"name": "GigabitEthernet0/2",
# "description": "Access Port",
# "vlan": 10},
# {"name": "GigabitEthernet0/3",
# "description": "Access Port",
# "vlan": 20
# },
# }
x = (template.render(interface=interface_dict2))
print(x)
print(type(x))
if os.path.exists('template_config.txt'):
os.remove('template_config.txt')
w = open('template_config.txt', 'w')
w.writelines(x)
w.close()
| eddyedwards454/CS-Ping-Script | template.py | template.py | py | 775 | python | en | code | 0 | github-code | 13 |
30749148123 | import random
import os
from collections import defaultdict, deque
import numpy as np
import matplotlib as mpl
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import csv
from src.icn_gym import *
## Global Parameters
actions = ["xy", "random_oblivious", "turn_model_oblivious", "turn_model_adaptive"]
a_size = len(actions) # space size of action
Q = defaultdict(lambda: np.zeros(a_size)) # Q-Table
dicts = defaultdict(list)
action_index = random.randint(0, 100)%2
action = actions[action_index]
iter_step = 6 # injection from 0.1 to 0.6
total_episodes = 1 # Game Playing times
epsilon = 1.0 # exploration rate
eps_min = 0.01
eps_decay = 0.999
### Plot Notebooks
time_history = []
rew_history = []
Q = defaultdict(lambda: np.zeros(a_size))
state = 0.1 # = Injection_rate as reset state env.reset()
# dicts = ICN_env(state, action) # ICM simulate()
for i in range(iter_step):
state = (i+1)/10 # get next state
action = "xy"
dicts = ICN_env(state, action)
# action = actions[random.randint(0, 100)%2]
rew_history.append(0) # Recording rewards
print('Q-Table = ', Q)
print('Reward = ', rew_history)
# print('Dicts = ',dicts)
csv_columns = ['average_flit_latency','average_packet_queueing_latency','average_flit_network_latency','average_flit_queueing_latency','packets_injected', 'average_packet_network_latency', 'average_hops', 'flits_injected', 'packets_received', 'flits_received', 'average_packet_latency']
csv_file = 'Inter_Connect_Networks/Tables/env_base_'+str(iter_step)+'_' +str(total_episodes)+ '.csv'
try:
with open(csv_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(csv_columns)
for i in range(len(dicts['average_flit_latency'])):
writer.writerow([dicts[key][i] for key in csv_columns])
except IOError:
print("I/O error")
# np.savetxt("Reward_history.csv", rew_history, delimiter=",")
### Plotting
# print("Learning Performance")
mpl.rcdefaults()
mpl.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(figsize=(10,4))
# plt.grid(True, linestyle='--')
plt.title('ICNs Learning')
# plt.plot(range(len(time_history)), time_history, label='Steps', marker="^", linestyle=":")#, color='red')
plt.plot(range(len(rew_history)), rew_history, label='Reward', marker="", linestyle="-")#, color='k')
plt.xlabel('Episodes')
plt.ylabel('Reward')
plt.savefig('Inter_Connect_Networks/Figures/shuffle_SARSA_'+str(iter_step)+'_'+str(total_episodes)+'_ICN.png', bbox_inches='tight') | felix0901/interconnect-routing-gym | example/Baseline_xyRouting_example.py | Baseline_xyRouting_example.py | py | 2,479 | python | en | code | 14 | github-code | 13 |
31237331929 | def homework_6(nodes):
# 請使用 Prim Algorithms / Kruskal Algorithms
dist = {}
n = len(nodes)
for i in range(n):
if i == 0:
dist[nodes[i][0], nodes[i][1]] = 0 #[0,0]的距離為零
else:
dist[nodes[i][0], nodes[i][1]] = float("inf")
res = 0
while dist:
k = float("inf")
for i in dist:
if dist[i] <= k: #找出最短距離
k = dist[i]
x, y = i[0], i[1]
res += dist.pop((x,y)) #加上之後移除繼續下一個資料 更換基準點
for i in dist:
k = (abs(x-i[0])+abs(y-i[1]))
if k < dist[i]:
dist[i] = k
return res
if __name__ == '__main__':
nodes = [[0,0],[2,6],[3,9],[6,4],[7,1]]
print(homework_6(nodes))
# 22
| daniel880423/Member_System | file/hw6/1100411/hw6_s1100411_1.py | hw6_s1100411_1.py | py | 820 | python | en | code | 0 | github-code | 13 |
24511290892 | '''
The Euclidean algorithm, or Euclid's algorithm, is an efficient method for computing the greatest common divisor (GCD) of two integers (numbers),
the largest number that divides them both without a remainder.
'''
def gcd(a,b):
if b==0:
return a
return gcd(b,a%b)
def lcm(a,b):
return (a*b)//gcd(a,b)
t = int(input("Enter number of test cases"))
for _ in range(t):
a,b= map(int, input().split())
print(gcd(a,b), lcm(a,b), sep=" ")
| Rajjada001/Top-25-Algorithms | 1.EuclideanAlgo.py | 1.EuclideanAlgo.py | py | 467 | python | en | code | 0 | github-code | 13 |
34929305819 | import discord
import os
import random
import praw
import hostbot
from hostbot import keep_alive
from itertools import cycle
from discord.ext import commands, tasks
from dotenv import load_dotenv
import requests
import math
import pyjokes
import datetime
from imgurpython import ImgurClient
import configparser
import asyncio
load_dotenv()
TOKEN=os.getenv('DISCORD_TOKEN')
intents = discord.Intents.default()
intents.members = True
#used prefix
client = commands.Bot(command_prefix=".",intents=intents)
#cycle bot statuses
status = cycle(['Coded By: Luffyguy', '.help', " with Luffyguy"])
#remove default help command
client.remove_command('help')
#tells us when bot is active
@client.event
async def on_ready():
change_status.start()
print("Bot is ready")
#loop bot statuses
@tasks.loop(seconds=5)
async def change_status():
await client.change_presence(activity=discord.Game(next(status)))
#-----general responses-----
#says hi
@client.command()
async def hi(ctx):
await ctx.send("Hello")
#creep
@client.command()
async def porn(ctx):
await ctx.send('```\nLook for it yourself!```')
#just laughs
@client.command()
async def laugh(ctx):
await ctx.send("Ha Ha Ha.....")
#responds randomly
@client.command()
async def lol(ctx):
responces = [
"Was it that funny?",
"Noob Alert!",
"Watch your tone dude!",
"You are that dumb Lmao",
"No comments",
"You are such a kid dude",
]
await ctx.send(random.choice(responces))
#square of a number
@client.command()
async def square(ctx,number):
squared_number = int(number) ** 2
await ctx.send("The square of " + str(number) + " is " + str(squared_number))
#cube of a number
@client.command()
async def cube(ctx,number1):
cubed_number = int(number1) ** 3
await ctx.send("The cube of " + str(number1) + " is " + str(cubed_number))
#adds 2 numbers
@client.command()
async def add(ctx,number1,number2):
summed_number = int(number1) + int(number2)
await ctx.send("The sum of " + str(number1) + " and " + str(number2) + " is " + str(summed_number))
#subtracts 2 numbers
@client.command()
async def diff(ctx,number1,number2):
subtracted_number = int(number1) - int(number2)
await ctx.send("The differnce of " + str(number1) + " and " + str(number2) + " is " + str(subtracted_number))
#----------------------------------------------
#to clear chat
@client.command()
@commands.has_permissions(manage_messages=True)
async def clear(ctx,amount=1):#algorithm channel id #project channel id
if ctx.channel.id != 778499893249310730 and ctx.channel.id !=778225956971347988:
await ctx.channel.purge(limit=amount+1)
#new Help(embed)
@client.command()
async def help(ctx):
embed = discord.Embed(
title = 'Help',
description = '```\nPrefix : .```',
colour = discord.Colour.red()
)
embed.set_footer(text='by Luffyguy')
embed.set_image(url='https://media.giphy.com/media/8aSSX6v0OwcDsHYnZ7/giphy.gif')
embed.set_thumbnail(url='https://media.giphy.com/media/oaqHoQWu1Bk9FB5wsv/giphy.gif')
embed.set_author(name= "Help",
icon_url='https://imgur.com/f1nKCsD.png')
embed.add_field(name= '- Hi/Lol/Laugh : ', value= '```\n.hi/lol/laugh```', inline=False)
embed.add_field(name= '- Add : ', value= '```\n.add <number1> <number2>```', inline=False)
embed.add_field(name= '- Differnce : ', value= '```\n.diff <number1> <number2>```', inline=True)
embed.add_field(name= '- Clear Chat : ', value= '```\n.clear [amount=1]>```', inline=False)
embed.add_field(name= '- Square/Cube : ', value= '```\n.square/cube <number>```', inline=True)
embed.add_field(name= '- Wallpapers : ', value= '```\n.wp <keyword>```', inline=True)
embed.add_field(name= '- Meme : ', value= '```\n.meme <keyword>```', inline=False)
embed.add_field(name= '- Gif : ', value= '```\n.gif <keyword>```', inline=True)
embed.add_field(name= '- Server Info : ', value= '```\n.server```', inline=True)
embed.add_field(name= '- Jokes : ', value= '```\n.joke```', inline=True)
embed.add_field(name= '- Bot Commands: ', value= '```\n.bc (you will get a dm)```', inline=True)
await ctx.send(embed=embed)
#Commands(embed dm)
@client.command(pass_context=True)
async def bc(ctx):
author = ctx.message.author
embed = discord.Embed(title = 'Commands',
description = '```\nPrefix : .```',
colour = discord.Colour.red()
)
embed.set_footer(text='by Luffyguy')
embed.set_image(url='https://media.giphy.com/media/8aSSX6v0OwcDsHYnZ7/giphy.gif')
embed.set_thumbnail(url='https://media.giphy.com/media/oaqHoQWu1Bk9FB5wsv/giphy.gif')
embed.set_author(name= "Commands",
icon_url='https://imgur.com/f1nKCsD.png')
embed.add_field(name= '- Hi/Lol/Laugh : ', value= '```\n.hi/lol/laugh```', inline=False)
embed.add_field(name= '- Add : ', value= '```\n.add <number1> <number2>```', inline=False)
embed.add_field(name= '- Differnce : ', value= '```\n.diff <number1> <number2>```', inline=True)
embed.add_field(name= '- Clear Chat : ', value= '```\n.clear [amount=1]>```', inline=False)
embed.add_field(name= '- Square/Cube : ', value= '```\n.square/cube <number>```', inline=True)
embed.add_field(name= '- Wallpapers : ', value= '```\n.wp <keyword>```', inline=True)
embed.add_field(name= '- Meme : ', value= '```\n.meme <keyword>```', inline=False)
embed.add_field(name= '- Gif : ', value= '```\n.gif <keyword>```', inline=True)
embed.add_field(name= '- Server Info : ', value= '```\n.server```', inline=True)
embed.add_field(name= '- Jokes : ', value= '```\n.joke```', inline=True)
await ctx.message.author.send(embed=embed)
#server info
@client.command()
async def server(ctx):
name = str(ctx.guild.name)
description = str(ctx.guild.description)
owner = str(ctx.guild.owner)
id = str(ctx.guild.id)
region = str(ctx.guild.region)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
embed = discord.Embed(
title = name + " Server Information",
color=discord.Color.blue()
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Owner", value=owner, inline=True)
embed.add_field(name="Server ID", value=id, inline=True)
embed.add_field(name="Region", value=region, inline=True)
embed.add_field(name="Member Count", value=memberCount, inline=True)
await ctx.send(embed=embed)
#reddit memes
reddit = praw.Reddit(client_id = "_xC37StY7xVaAg",client_secret = os.getenv('R_CLIENTSECRET'),username = os.getenv('R_USERNAME'),password = os.getenv('R_PASSWORD'),user_agent = "Luffybot")
@client.command(pass_context=True)
async def meme(ctx,subred = 'memes'):
subreddit = reddit.subreddit(subred)
all_subs =[]
top = subreddit.top(limit = 50)
for submission in top:
all_subs.append(submission)
random_sub =random.choice(all_subs)
name = random_sub.title
url = random_sub.url
em = discord.Embed(title = name, color=discord.Colour.green())
em.set_image(url = url)
#channel = client.get_channel(778661570980741160)
#await channel.send(embed = em)
await ctx.send(embed = em)
#jokes
@client.command()
async def joke(ctx):
await ctx.send(pyjokes.get_joke())
#fetch wallpapers from wallhaven.cc
@client.command()
async def wp(ctx,keyword='anime'):
response = requests.get('https://wallhaven.cc/api/v1/search?q='+keyword +'&purity=100&apikey='+os.getenv('WALL_API'))
json1=response.json()
print(json1)
index=math.floor(random.random() * len(json1['data']))
#channel = client.get_channel(778649939764576338)
try:
#await channel.send(json1['data'][index]["path"])
await ctx.send(json1['data'][index]["path"])
except:
response = requests.get('https://wallhaven.cc/api/v1/search?q=anime' +'&purity=100&apikey='+os.getenv('WALL_API'))
json1=response.json()
index=math.floor(random.random() * len(json1['data']))
#channel = client.get_channel(778649939764576338)
#await channel.send(json1['data'][index]["path"])
await ctx.send(json1['data'][index]["path"])
#await ctx.send(json1['data'][index]["path"])
#finally:
#auth = ctx.author
#channel = client.get_channel(778649939764576338)
#await channel.send(f'here {auth.mention}')
#fetch nsfw wallpapers from wallhaven.cc
@client.command(pass_context=True)
async def sx(ctx,keyword='nsfw'):
timeout=1
response = requests.get('https://wallhaven.cc/api/v1/search?q='+random.choice(keyword) +'&purity=111&apikey='+os.getenv('WALL_API'))
json1=response.json()
print(len(json1['data']))
while True:
index=math.floor(random.random() * len(json1['data']))
channel = client.get_channel(803603760953294889)
try:
await channel.send(json1['data'][index]["path"])
await asyncio.sleep(timeout*2)
except:
response = requests.get('https://wallhaven.cc/api/v1/search?q=nude' +'&purity=111&apikey='+os.getenv('WALL_API'))
json1=response.json()
index=math.floor(random.random() * len(json1['data']))
channel = client.get_channel(803603760953294889)
await channel.send(json1['data'][index]["path"])
await asyncio.sleep(timeout*2)
#await ctx.send(json1['data'][index]["path"])
#finally:
#auth = ctx.author
#channel = client.get_channel(803603760953294889)
#await channel.send(f'here {auth.mention}')
#fetch gif
@client.command()
async def gif(ctx,keyword='code'):
response = requests.get('https://api.tenor.com/v1/search?q='+keyword+'&key='+os.getenv('TENOR')+'&limit=8')
json1=response.json()
print(json1)
index=math.floor(random.random() * len(json1['results']))
try:
await ctx.channel.send(json1['results'][index]["url"])
except:
response = requests.get('https://api.tenor.com/v1/search?q=code&key='+os.getenv('TENOR')+'&limit=8')
json1=response.json()
index=math.floor(random.random() * len(json1['results']))
await ctx.channel.send(json1['results'][index]["url"])
#technical Stuff
@client.command()
async def rd(ctx,subred = 'NSFW_Wallpapers'):
c=0
while c!=60:
timeout=1
subreds=['wallpapers']
all_subs =[]
index=math.floor(random.random()*len(subreds))
subreddit = reddit.subreddit(subreds[index])
print(subreds[index])
top = subreddit.top(limit = 25)
hot=subreddit.hot(limit = 50)
try:
for submission in top:
all_subs.append(submission)
except:
for submission in hot:
all_subs.append(submission)
random_sub =random.choice(all_subs)
name = random_sub.title
url = random_sub.url
em = discord.Embed(title = name, color=discord.Colour.green())
em.set_image(url = url)
channel = client.get_channel(803603760953294889)
await channel.send(url)
await asyncio.sleep(timeout*60)
c=c+1
#fetch images from imgur
@client.command()
async def img(ctx,keyword):
keyword=random.choice(["anime","Fighting"])
config = configparser.ConfigParser()
config.read('auth.ini')
client_id = config.get('credentials', 'client_id')
client_secret = config.get('credentials', 'client_secret')
client = ImgurClient(client_id, client_secret)
# Extracts the items (images) on the front page of imgur.
items = client.gallery_search(f'{keyword}', advanced=None, sort='time', window='all', page=0)
n=math.floor(random.random()*len(items))
await ctx.channel.send(items[n].link+'.jpg')
#welcome message
@client.event
async def on_member_join(member):
guild =client.get_guild(777598102882091018)
channel = guild.get_channel(778645688929615902)
embed = discord.Embed(
title = "**Welcome**",
description = (f'Welcome to the {guild.name } server , {member.mention}!:partying_face: \n You are the {len(list(member.guild.members))} member ! '),
colour = discord.Colour.green(),
timestamp=datetime.datetime.utcfromtimestamp(1611660157)
)
embed.set_footer(text='by Luffyguy')
embed.set_image(url='https://media.giphy.com/media/8aSSX6v0OwcDsHYnZ7/giphy.gif')
embed.set_thumbnail(url=f'{member.avatar_url}')
embed.set_author(name= "HellHole",
icon_url=f'{member.guild.icon_url}')
await channel.send(embed=embed)
await member.send(embed=embed)
keep_alive()
client.run(TOKEN)
| luffyguy/Discord-bot | Luffy_discord_bot.py | Luffy_discord_bot.py | py | 12,939 | python | en | code | 0 | github-code | 13 |
71540947537 | import os
import sys
import time
import json
import win32pipe
import win32file
import pywintypes
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'):
command = 'cls'
os.system(command)
def get_data():
quit = False
while not quit:
try:
handle = win32file.CreateFile(
r"\\.\pipe\dota_data",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
0,
None,
)
res = win32pipe.SetNamedPipeHandleState(
handle, win32pipe.PIPE_READMODE_MESSAGE, None, None
)
result, dota_data = win32file.ReadFile(handle, 64 * 1024)
return json.loads(dota_data.decode("utf-8"))
except pywintypes.error as e:
if e.args[0] == 2:
time.sleep(1)
elif e.args[0] == 109:
quit = True
def main():
while True:
clearConsole()
print(get_data())
if __name__ == "__main__":
main()
| FixedOctocat/Dota2-helper | src/console.py | console.py | py | 1,168 | python | en | code | 2 | github-code | 13 |
9037679305 | import pytest
from pages.loginPage import Login_Page
from testData import constants as constants
from pages.auditLogs import Audit_Logs
from pages.verifyData import Envelope_History
from utilities.utils import Util_Test
@pytest.mark.usefixtures("test_setup")
class Test_EnvelopeHistory():
def test_verify_envelopeHistory_auditLogs(self):
# Verify data:
driver = self.driver
self.driver.get(constants.baseUrl)
login = Login_Page(driver)
login.login_page(constants.senderEmail, constants.senderPassword)
data = Envelope_History(driver)
data.verify_dateFormat()
# Verify Envelope history
data = Envelope_History(driver)
data.verify_envelope_history()
# Verify Audit logs
logs = Audit_Logs(driver)
logs.verify_auditLogs()
csv = Util_Test(driver)
csv.read_data_from_csv(constants.csv_envelope_report)
| Sathvik41/DocuSignAutomation1 | tests/envelope_history.py | envelope_history.py | py | 925 | python | en | code | 0 | github-code | 13 |
26118119445 | # _*_ coding:utf-8 _*_
from __future__ import print_function
import pandas as pd
import csv
import sys
number1 = 0
wrongnumber1 = 0
number2 = 0
wrongnumber2 = 0
miss = 0
temp = sys.stdout
sys.stdout = open('FILE_3.csv','w')
with open("data.csv", 'r') as csvfile:
with open("predict.csv", 'r') as predictfile:
lines = list(csv.reader(csvfile, delimiter=','))
linetuples = list(csv.reader(predictfile, delimiter=','))
l = len(lines)
for i in range(l):
line = lines[i]
linetuple = linetuples[i]
if linetuple[1] == '1':
print("{},{},{}"\
.format(line[0].strip(),line[7].strip(),float(linetuple[2].strip())))
| Mr-Phoebe/CS-GY-9223 | Assigment3/Score.py | Score.py | py | 719 | python | en | code | 0 | github-code | 13 |
38431508362 | from __future__ import annotations
import copy
import datetime
import logging
import time
from .exceptions import *
FAN_MODES = ["auto", "on", "circulate", "follow schedule"]
SYSTEM_MODES = ["emheat", "heat", "off", "cool", "auto", "auto"]
HOLD_TYPES = ["schedule", "temporary", "permanent"]
EQUIPMENT_OUTPUT_STATUS = ["off/fan", "heat", "cool"]
_LOG = logging.getLogger("somecomfort")
def _hold_quarter_hours(deadline):
if deadline.minute not in (0, 15, 30, 45):
raise SomeComfortError("Invalid time: must be on a 15-minute boundary")
return int(((deadline.hour * 60) + deadline.minute) / 15)
def _hold_deadline(quarter_hours) -> datetime.time:
minutes = quarter_hours * 15
return datetime.time(hour=int(minutes / 60), minute=minutes % 60)
class Device(object):
"""Device class for Honeywell device."""
def __init__(self, client, location):
self._client = client
self._location = location
self._data = {}
self._last_refresh = 0
self._deviceid = None
self._macid = None
self._name = None
self._alive = None
self._commslost = None
@classmethod
async def from_location_response(cls, client, location, response) -> Device:
"""Extract device from location response."""
self = cls(client, location)
self._deviceid = response["DeviceID"]
self._macid = response["MacID"]
self._name = response["Name"]
await self.refresh()
return self
async def refresh(self) -> None:
"""Refresh the Honeywell device data."""
data = await self._client.get_thermostat_data(self.deviceid)
if data is not None:
if not data["success"]:
_LOG.error("API reported failure to query device %s" % self.deviceid)
self._alive = data["deviceLive"]
self._commslost = data["communicationLost"]
self._data = data["latestData"]
self._last_refresh = time.time()
@property
def deviceid(self) -> str:
"""The device identifier"""
return self._deviceid
@property
def mac_address(self) -> str:
"""The MAC address of the device"""
return self._macid
@property
def name(self) -> str:
"""The user-set name of this device"""
return self._name
@property
def is_alive(self) -> bool:
"""A boolean indicating whether the device is connected"""
return self._alive and not self._commslost
@property
def fan_running(self) -> bool:
"""Returns a boolean indicating the current state of the fan"""
if self._data["hasFan"]:
return self._data["fanData"]["fanIsRunning"]
return False
@property
def fan_mode(self) -> str | None:
"""Returns one of FAN_MODES indicating the current setting"""
try:
return FAN_MODES[self._data["fanData"]["fanMode"]]
except (KeyError, TypeError, IndexError):
if self._data["hasFan"]:
raise APIError("Unknown fan mode %s" % self._data["fanData"]["fanMode"])
else:
return None
async def set_fan_mode(self, mode) -> None:
"""Set the fan mode async."""
try:
mode_index = FAN_MODES.index(mode)
except ValueError as ex:
raise SomeComfortError("Invalid fan mode %s" % mode) from ex
key = f"fanMode{mode.title()}Allowed"
if not self._data["fanData"][key]:
raise SomeComfortError("Device does not support %s" % mode)
await self._client.set_thermostat_settings(
self.deviceid, {"FanMode": mode_index}
)
self._data["fanData"]["fanMode"] = mode_index
@property
def system_mode(self) -> str:
"""Returns one of SYSTEM_MODES indicating the current setting"""
try:
return SYSTEM_MODES[self._data["uiData"]["SystemSwitchPosition"]]
except KeyError as exc:
raise APIError(
"Unknown system mode %s"
% (self._data["uiData"]["SystemSwitchPosition"])
) from exc
async def set_system_mode(self, mode) -> None:
"""Async set the system mode."""
try:
mode_index = SYSTEM_MODES.index(mode)
except ValueError as exc:
raise SomeComfortError(f"Invalid system mode {mode}") from exc
if mode == "emheat":
key = "SwitchEmergencyHeatAllowed"
else:
key = f"Switch{mode.title()}Allowed"
try:
if not self._data["uiData"][key]:
raise SomeComfortError(f"Device does not support {mode}")
except KeyError as exc:
raise APIError(f"Unknown Key: {key}") from exc
await self._client.set_thermostat_settings(
self.deviceid, {"SystemSwitch": mode_index}
)
self._data["uiData"]["SystemSwitchPosition"] = mode_index
@property
def setpoint_cool(self) -> float:
"""The target temperature when in cooling mode"""
return self._data["uiData"]["CoolSetpoint"]
async def set_setpoint_cool(self, temp) -> None:
"""Async set the target temperature when in cooling mode"""
lower = self._data["uiData"]["CoolLowerSetptLimit"]
upper = self._data["uiData"]["CoolUpperSetptLimit"]
if temp > upper or temp < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
await self._client.set_thermostat_settings(
self.deviceid, {"CoolSetpoint": temp}
)
self._data["uiData"]["CoolSetpoint"] = temp
@property
def setpoint_heat(self) -> float:
"""The target temperature when in heating mode"""
return self._data["uiData"]["HeatSetpoint"]
async def set_setpoint_heat(self, temp) -> None:
"""Async set the target temperature when in heating mode"""
lower = self._data["uiData"]["HeatLowerSetptLimit"]
upper = self._data["uiData"]["HeatUpperSetptLimit"]
# HA sometimes doesn't send the temp, so set to current
if temp is None:
temp = self._data["uiData"]["HeatSetpoint"]
_LOG.error("Didn't receive the temp to set. Setting to current temp.")
if temp > upper or temp < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
await self._client.set_thermostat_settings(
self.deviceid, {"HeatSetpoint": temp}
)
self._data["uiData"]["HeatSetpoint"] = temp
def _get_hold(self, which) -> bool | datetime.time:
try:
hold = HOLD_TYPES[self._data["uiData"][f"Status{which}"]]
except KeyError as exc:
mode = self._data["uiData"][f"Status{which}"]
raise APIError(f"Unknown hold mode {mode}") from exc
period = self._data["uiData"][f"{which}NextPeriod"]
if hold == "schedule":
return False
if hold == "permanent":
return True
else:
return _hold_deadline(period)
async def _set_hold(self, which, hold, temperature=None) -> None:
settings = {}
if hold is True:
settings = {
"StatusCool": HOLD_TYPES.index("permanent"),
"StatusHeat": HOLD_TYPES.index("permanent"),
# "%sNextPeriod" % which: 0,
}
elif hold is False:
settings = {
"StatusCool": HOLD_TYPES.index("schedule"),
"StatusHeat": HOLD_TYPES.index("schedule"),
# "%sNextPeriod" % which: 0,
}
elif isinstance(hold, datetime.time):
qh = _hold_quarter_hours(hold)
settings = {
"StatusCool": HOLD_TYPES.index("temporary"),
"CoolNextPeriod": qh,
"StatusHeat": HOLD_TYPES.index("temporary"),
"HeatNextPeriod": qh,
}
else:
raise SomeComfortError("Hold should be True, False, or datetime.time")
if temperature:
lower = self._data["uiData"][f"{which}LowerSetptLimit"]
upper = self._data["uiData"][f"{which}UpperSetptLimit"]
if temperature > upper or temperature < lower:
raise SomeComfortError(f"Setpoint outside range {lower}-{upper}")
settings.update({f"{which}Setpoint": temperature})
await self._client.set_thermostat_settings(self.deviceid, settings)
self._data["uiData"].update(settings)
@property
def hold_heat(self) -> bool:
"""Return hold heat mode."""
return self._get_hold("Heat")
async def set_hold_heat(self, value, temperature=None) -> None:
"""Async set hold heat mode."""
await self._set_hold("Heat", value, temperature)
@property
def hold_cool(self) -> bool:
"""Return hold cool mode."""
return self._get_hold("Cool")
async def set_hold_cool(self, value, temperature=None) -> None:
"""Async set hold cool mode."""
await self._set_hold("Cool", value, temperature)
@property
def current_temperature(self) -> float:
"""The current measured ambient temperature"""
return self._data["uiData"]["DispTemperature"]
@property
def current_humidity(self) -> float | None:
"""The current measured ambient humidity"""
return (
self._data["uiData"].get("IndoorHumidity")
if self._data["uiData"].get("IndoorHumiditySensorAvailable")
and self._data["uiData"].get("IndoorHumiditySensorNotFault")
else None
)
@property
def equipment_output_status(self) -> str:
"""The current equipment output status"""
if self._data["uiData"]["EquipmentOutputStatus"] in (0, None):
if self.fan_running:
return "fan"
else:
return "off"
return EQUIPMENT_OUTPUT_STATUS[self._data["uiData"]["EquipmentOutputStatus"]]
@property
def outdoor_temperature(self) -> float | None:
"""The current measured outdoor temperature"""
if self._data["uiData"]["OutdoorTemperatureAvailable"]:
return self._data["uiData"]["OutdoorTemperature"]
return None
@property
def outdoor_humidity(self) -> float | None:
"""The current measured outdoor humidity"""
if self._data["uiData"]["OutdoorHumidityAvailable"]:
return self._data["uiData"]["OutdoorHumidity"]
return None
@property
def temperature_unit(self) -> str:
"""The temperature unit currently in use. Either 'F' or 'C'"""
return self._data["uiData"]["DisplayUnits"]
@property
def raw_ui_data(self) -> dict:
"""The raw uiData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["uiData"])
@property
def raw_fan_data(self) -> dict:
"""The raw fanData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["fanData"])
@property
def raw_dr_data(self) -> dict:
"""The raw drData structure from the API.
Note that this is read only!
"""
return copy.deepcopy(self._data["drData"])
def __repr__(self) -> str:
return f"Device<{self.deviceid}:{self.name}>"
| mkmer/AIOSomecomfort | aiosomecomfort/device.py | device.py | py | 11,436 | python | en | code | 4 | github-code | 13 |
17062304384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZhimaMerchantOrderRentCancelModel(object):
def __init__(self):
self._order_no = None
self._product_code = None
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaMerchantOrderRentCancelModel()
if 'order_no' in d:
o.order_no = d['order_no']
if 'product_code' in d:
o.product_code = d['product_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ZhimaMerchantOrderRentCancelModel.py | ZhimaMerchantOrderRentCancelModel.py | py | 1,422 | python | en | code | 241 | github-code | 13 |
42731045462 | import sys
sys.path.append('/starterbot/Lib/site-packages')
import os
import time
import requests
from slackclient import SlackClient
from testrail import *
# settings
project_dict = {'Consumer Site': '1', 'Agent Admin': '2','Domain SEO': '5', 'Mobile Site': '6', 'Find An Agent': '10', 'Digital Data': '11'}
client = APIClient('https://domainau.testrail.net/')
client.user = 'test-emails3@domain.com.au'
client.password = 'Perfect123'
# starterbot's ID as an environment variable
BOT_ID = 'U2QK6K08J'
# constants
AT_BOT = "<@" + BOT_ID + ">:"
EXAMPLE_COMMAND = "do"
# instantiate Slack & Twilio clients
slack_client = SlackClient('xoxb-92652646290-HlpbFnWom59Zxt58XaW2Wo8F')
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
def list_channels():
channels_call = slack_client.api_call("channels.list")
if channels_call.get('ok'):
return channels_call['channels']
return None
def send_message(channel_id, message):
slack_client.api_call(
"chat.postMessage",
channel=channel_id,
text=message,
username='TestRail',
icon_emoji=':testrail:'
)
def get_results():
new_results = ''
for project in project_dict:
runs = client.send_get('get_runs/' + project_dict[project])
run = runs[0]
new_result = '''
\n_*%s*_
*Run Name*: %s
*Total*: %s
*Passed*: %s
*Failed*: %s
*Blocked*: %s
*Link*: %s
\n
''' %( project, str(run['name']), str(run['passed_count'] + run['failed_count'] + run['blocked_count']), str(run['passed_count']), str(run['failed_count']), str(run['blocked_count']), str(run['url']) )
new_results += new_result
return new_results
def get_failed_tests():
new_results = ''
for project in project_dict:
runs = client.send_get('get_runs/' + project_dict[project])
run = runs[0]
return new_results
if __name__ == "__main__":
#READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
#time.sleep(READ_WEBSOCKET_DELAY)
channels = list_channels()
results = get_results()
message = send_message('C2QJFRUU8', results)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| RajaBellebon/helper | python/PythonSlackIntegration/TestRailResults.py | TestRailResults.py | py | 3,968 | python | en | code | 0 | github-code | 13 |
41701910645 | import os
import sys
import dash_bootstrap_components as dbc
import hashlib
import pandas as pd
from datetime import datetime
from dash import callback, dcc, html, Input, Output, State, ctx, get_asset_url
from PIL import ImageGrab, Image
from .config import features, DBNAME, tooltip_delay
IMG15=None
IMG3=None
file_names={'img15':'','img3':''}
def get_img_clipboard():
img = ImageGrab.grabclipboard()
if type(img) is list:
img = Image.open(img[0])
return img
layout = dbc.Container([
dbc.Row([
dbc.Form([
dbc.InputGroup([
dbc.InputGroupText("Ticker"),
dbc.Input(placeholder="...",type='text',id='ticker'),
dbc.FormFeedback("Set the Ticker", type="invalid"),
], className='pb-1'),
dbc.InputGroup([
dbc.InputGroupText("Дата"),
dbc.Input(value = f"{datetime.now().strftime('%d.%m.%Y')}",type='text',id='datefield'),
dbc.FormFeedback("Неверный формат даты. Пример: 02.12.2023", type="invalid"),
], className='py-1'),
dbc.Checklist(
options=[{'label':f,'value':i} for i,(f,v) in enumerate(features) if f not in ['PNL','Datetime']],
value=[],
id='checklist',
className='py-1'),
dbc.InputGroup([
dbc.InputGroupText("PNL:"),
dbc.Select(
options=[
{"label": "Not applicable", "value": 'NA'},
{"label": "Positive", "value": '+'},
{"label": "Negative", "value": '-'},
{"label": "Zero", "value": '0'},
],
value=0,
id='pnl_select',
),
]),
dbc.Alert(
'There is no image in clipboard',
id="img-alert",
is_open=False,
color='warning',
duration=4000,
className='mt-2',
),
dbc.Alert(
'DB updated',
id="updated-alert",
is_open=False,
color='success',
duration=4000,
className='mt-2',
),
html.Div([
dbc.Button(
html.Span([
html.I(className='bi bi-save2', style=dict(paddingRight='.5vw')),
'Фон']),
id='save_15min',
className="me-1"),
dbc.Button(
html.Span([
html.I(className='bi bi-save2', style=dict(paddingRight='.5vw')),
'Рабочий']),
id='save_3min',
className="me-1"),
dbc.Button('Update', id='update',className="me-1", disabled=True),
dbc.Tooltip(
'Сохранить Фоновый Таймфрейм',
target='save_15min',
placement='bottom',
delay=tooltip_delay),
dbc.Tooltip(
'Сохранить Рабочий Таймфрейм',
target='save_3min',
placement='bottom',
delay=tooltip_delay),
dbc.Tooltip(
'Сохранить запись в базу данных',
target='update',
placement='bottom',
delay=tooltip_delay),
],
className='pt-2'),
],
className='col-3'
),
dbc.Col([
dbc.Col([html.Div('Фоновый Таймфрейм'),html.Img(className='w-100',id='img15')]),
dbc.Col([html.Div('Рабочий Таймфрейм'),html.Img(className='w-100',id='img3')])
]),
]),
html.Div([
dbc.NavLink(
dbc.Button(
html.I(className='bi bi-pie-chart-fill fs-3'),
outline=False,
className='btn btn-info',
id='trade_statistic',
),
href="/view_records",
),
dbc.Tooltip(
'Trade Statistic',
target='trade_statistic',
placement='left',
delay=tooltip_delay),
],
className='d-flex flex-column align-items-end fixed-bottom me-3 mb-3'
),
],
className='p-3 mx-1'
)
@callback(
Output('ticker','invalid'),
Output('img15', 'src'),
Output('img3', 'src'),
Output('img-alert','is_open'),
Output('update','disabled'),
Output('datefield','invalid'),
Output('updated-alert','is_open'),
Input('save_15min','n_clicks'),
Input('save_3min','n_clicks'),
Input('ticker', 'value'),
Input('datefield','value'),
Input('update','n_clicks'),
State('img15','src'),
State('img3','src'),
State('checklist','value'),
State('pnl_select','value'),
)
def get_img(n15, n3, ticker,date,updclk,img15,img3,checklist,pnl):
global IMG15, IMG3, file_names
trg_id = ctx.triggered_id
if trg_id in ['save_15min','save_3min']:
if not ticker:
return True, IMG15, IMG3, False, True, False,False
img = get_img_clipboard()
if not img:
return False, IMG15, IMG3, True, True, False,False
md5hash = hashlib.md5(img.tobytes()).hexdigest()
if trg_id == 'save_15min':
IMG15 = img
fn = f'{ticker}_LT_{md5hash}.png'
file_names['img15']=fn
elif trg_id=='save_3min':
IMG3 = img
fn = f'{ticker}_ST_{md5hash}.png'
file_names['img3']=fn
elif trg_id=='ticker':
return ticker in ['',None], IMG15,IMG3,False,True, False,False
elif trg_id=='datefield':
try:
res = not bool(datetime.strptime(date, '%d.%m.%Y'))
except ValueError:
res = True
return False, IMG15,IMG3,False,True, res,False
elif trg_id == 'update':
update_db(updclk,ticker,checklist,pnl,date)
return False, IMG15, IMG3, False, True, False,True
else:
return False, IMG15, IMG3, False, True, False,False
snapshot_folder = get_asset_url('snapshots')[1:]
if not os.path.exists(snapshot_folder):
os.mkdir(snapshot_folder)
img.save(os.path.join(snapshot_folder,fn),'PNG')
return False, IMG15, IMG3, False, not all(file_names.values()), False,False
def update_db(n_clicks,ticker,checklist,pnl, date):
global DBNAME, features, file_names, IMG15, IMG3
if not all(file_names.values()):
return False
if not os.path.exists(DBNAME):
db = pd.DataFrame(columns=['ticker']+[f for f,v in features]+['filenames'])
else:
db = pd.read_csv(
DBNAME,
header=0,
index_col=None,
keep_default_na=False
)
last_cols = 2 #count_special_the_last_cols
#2 because of PNL feature is not checkbox, Datetime is hidden
chbx = [0]*(len(features)-last_cols)
for i in checklist:
chbx[i] = 1
# check new features and add to db
if len(features) != len(db.columns)-2:
for nc in set([f for f,v in features])-set(db.columns):
print('Adding column:',nc)
db.insert(len(db.columns)-last_cols,nc,-1)
new_row = [ticker]+chbx+[date,pnl,list(file_names.values())]
db.loc[len(db.index)]=new_row
db.to_csv(DBNAME,index=False)
for k in file_names:
file_names[k] = ''
IMG15 = None
IMG3 = None
return True
| jazzzman/TradeMemo | pages/new_trade.py | new_trade.py | py | 8,037 | python | en | code | 0 | github-code | 13 |
29709438913 | import json
from pprint import pprint
import argparse
X = 0
Y = 0
def testOutput():
scan_data=open('out-0.txt')
data = json.load(scan_data)
output = []
for item in data:
output.append({'y':item['y'], 'offset':abs(item['x']-X)})
pprint(output)
scan_data.close()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-x','--xcheck', type=int, help='X Offset')
args = parser.parse_args()
X = args.xcheck
testOutput()
| mjavaid/fyp-uottawa | Scanning/testOutput.py | testOutput.py | py | 496 | python | en | code | 0 | github-code | 13 |
71984712017 | # SWEA 5209번 최소 생산 비용
'''
각 제품에 대한 각 공장별 생산 비용,
전체 제품을 생산하는데 최소 생산 비용을 계산하는 프로그램을 만들어라
'''
import sys
sys.stdin = open('input.txt', 'r')
def backtracking(arr, row):
global N, sum_, min_sum
if row == N: # 가장 아래줄까지 도착했을 때
if min_sum > sum_:
min_sum = sum_
return
else:
if sum_ > min_sum: # 가장 아래줄까지 도달 전인데 합계의 값이 최소값보다 커졌을 때
return
for i in range(N):
if visited[i] == 0: # 하나의 공장당 하나의 제품임으로 visited사용
sum_ += arr[row][i] # 합계를 더해주고
visited[i] = 1 # 공장을 돌리는 중이라는 표시
row += 1 # 다음 제품 확인
backtracking(arr,row)
row -= 1
visited[i] = 0
sum_ -= arr[row][i]
T = int(input())
for test_case in range(1, T+1):
N = int(input()) # 제품의 개수
arr = [list(map(int,input().split())) for _ in range(N)]
sum_ = 0
min_sum = 9999999
visited = [0]*N
backtracking(arr,0)
print(f'#{test_case}', min_sum)
| euneuneunseok/TIL | SWEA/SWEA_5209_최소 생산 비용.py | SWEA_5209_최소 생산 비용.py | py | 1,317 | python | ko | code | 0 | github-code | 13 |
6847529635 | # @Time : 2021/1/28
# @Author : Tianyu Zhao
# @Email : tyzhao@bupt.edu.cn
import argparse
from openhgnn.experiment import Experiment
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', default='RGCN', type=str, help='name of models')
parser.add_argument('--task', '-t', default='node_classification', type=str, help='name of task')
# link_prediction / node_classification
parser.add_argument('--dataset', '-d', default='acm4GTN', type=str, help='name of datasets')
parser.add_argument('--gpu', '-g', default='-1', type=int, help='-1 means cpu')
parser.add_argument('--use_best_config', action='store_true', help='will load utils.best_config')
parser.add_argument('--load_from_pretrained', action='store_true', help='load model from the checkpoint')
args = parser.parse_args()
experiment = Experiment(model=args.model, dataset=args.dataset, task=args.task, gpu=args.gpu,
use_best_config=args.use_best_config, load_from_pretrained=args.load_from_pretrained)
experiment.run() | BUPT-GAMMA/OpenHGNN | main.py | main.py | py | 1,100 | python | en | code | 710 | github-code | 13 |
15295089968 | from collections import ChainMap
import os
from botocore.model import ServiceModel
from botocore.loaders import Loader
from botocore.serialize import create_serializer
from botocore.parsers import create_parser
from botocore.awsrequest import AWSRequest
from botocore import session
REGION = os.environ['AWS_REGION']
def get_endpoint(service_name):
this_session = session.get_session()
client = this_session.create_client(service_name, region_name=REGION)
return client.meta.endpoint_url
def create_request():
pass
def create_response():
pass
def integration_template():
return {
"IntegrationHttpMethod": "",
"IntegrationResponses": [
{
"StatusCode": 200,
"ResponseTemplates": {
"application/json": ''
},
},
{"StatusCode": 400, "SelectionPattern": "4[0-9]{2}"},
{"StatusCode": 500, "SelectionPattern": "5[0-9]{2}"}
],
"PassthroughBehavior": "WHEN_NO_MATCH",
"RequestParameters": {},
"RequestTemplates": {"application/json": ""},
"Type": "AWS",
"Uri": ""
}
class InvalidTypeException(Exception):
pass
def handle_method(fragment):
if fragment["Type"] != "AWS::ApiGateway::Method":
response_string = "Macro only supports \"AWS::ApiGateway::Method\", user supplied {}"
raise InvalidTypeException(response_string.format(fragment["Type"]))
service_name = fragment["Properties"]["Integration"].pop("Service").lower()
action = fragment["Properties"]["Integration"].pop("Action")
response_maps = fragment["Properties"]["Integration"].pop("ResponseMaps")
try:
fragment.pop("Fn::Transform")
except:
pass
loader = Loader()
service_description = loader.load_service_model(service_name=service_name, type_name='service-2')
service_model = ServiceModel(service_description)
protocol = service_model.protocol
op_model = service_model.operation_model(action["Name"])
request_parameters = action.get("Parameters", {})
params = dict(ChainMap(*request_parameters))
print("params: {}".format(params))
serializer = create_serializer(protocol)
response_parser = create_parser(protocol)
print(service_model.protocol)
request = serializer.serialize_to_request(params, op_model)
request_object = AWSRequest(
method=request['method'],
url=get_endpoint(service_model.service_name),
data=request['body'],
headers=request['headers'])
X = request_object.prepare()
print("Raw request: {}".format(request))
print("Prepared request: {}".format(X))
integration = fragment["Properties"]["Integration"]
new_integration = integration_template()
# Copy the existing values to the new template
for entry in integration.keys():
new_integration[entry] = integration[entry]
# Add headers to cfn template
if X.headers is not None and callable(getattr(X.headers, "keys", None)):
for header in X.headers.keys():
if header.lower() != 'Content-Length'.lower():
new_integration["RequestParameters"].update({"integration.request.header.{}".format(header): "'{}'".format(X.headers[header])})
# Add Query Strings to cfn template
if 'query_string' in request and callable(getattr(request['query_string'], "keys", None)):
for query in request['query_string'].keys():
new_integration["RequestParameters"].update({"integration.request.querystring.{}".format(query): "'{}'".format(request['query_string'][query])})
# Set the body
if isinstance(X.body, str):
new_integration["RequestTemplates"]["application/json"] = X.body
else:
new_integration["RequestTemplates"]["application/json"] = str(X.body, "utf-8") if X.body else ''
new_integration["Uri"] = ":".join([
"arn",
"aws",
"apigateway",
REGION,
service_model.endpoint_prefix,
"path/" + request["url_path"]
])
new_integration["IntegrationHttpMethod"] = X.method
fragment["Properties"]["Integration"] = new_integration
print(fragment)
return fragment
def lambda_handler(event, _context):
status = "success"
fragment = event["fragment"]
try:
fragment = handle_method(fragment)
print("transformed fragment: {}".format(fragment))
except InvalidTypeException as e:
print("Invalid type supplied: {}".format(e))
status = "failure"
return {
"requestId": event["requestId"],
"status": status,
"fragment": fragment,
}
| rhboyd/SimpleAPI | lambda_code/simple_api.py | simple_api.py | py | 4,671 | python | en | code | 2 | github-code | 13 |
24578029796 | # 物件的 __get__、__set__
class Celsius: # 摄氏
def __get__(self, instance, owner):
return 5 * (instance.fahrenheit - 32) / 9
def __set__(self, instance, value):
instance.fahrenheit = 32 + 9 * value / 5
class Temperature: # 温度
celsius = Celsius() # 组合(内部类别)
def __init__(self, fahrenheit):
self.fahrenheit = fahrenheit
if __name__ == '__main__':
temp = Temperature(212)
print('华氏:', temp.fahrenheit, '摄氏:', temp.celsius) # 呼叫 __get__
t = 0
temp.celsius = t # 呼叫 __set__
print('摄氏:', t, '华氏:', temp.fahrenheit) | vincenttuan/yzu_python_20211215 | day7_oo/OO12.py | OO12.py | py | 632 | python | zh | code | 1 | github-code | 13 |
73035549138 | import ast
import json
from api.transactions import get_neighbours_with_depth, save_to_file
from api.walletexplorer_api import get_label
# Write to json file with naming of address by using .format(address)
# WRITE_FILE_STRUCTURE = '../converted_database/converted_{}.json'
# Write to standard json file
WRITE_FILE_STRUCTURE = '../converted_database/converted_file.json'
# Reading from txt file with naming of address and certain depth by using .format(address, depth)
READ_FILE_STRUCTURE = '../databases/results/address_{}_with_depth_{}.txt'
def get_relative_width(nodes):
max_width = 0
for n in nodes:
print("sum")
print(sum(n['in'].values()))
print(sum(n['out'].values()))
max_width = max(max_width, max(max(n['in'].values()), max(n['out'].values())))
print(max_width)
return max_width / 20
def get_width(value, max_value):
result = 1
if value is not None:
result = value / max_value
print(result)
return result
def convert(n):
""""
Converts all transactions to a usable JSON object.
:param n: The complete dictionary object with all transactions.
:return: JSON object.
"""
j_obj = {"nodes": [], "edges": []}
main_node = n['main_node']
j_obj['nodes'].append(
{"id": main_node, "label": main_node[:10] + "..", "title": main_node, "group": 1,
"color": {"background": "rgb(233,9,26)", "border": "rgb(233,9,26)"}}
)
possible_mal = set()
def color_nodes(in_dict, out_dict, color_in, color_out, is_in=False):
""""
Adds nodes and edges to the main JSON object to return.
:param in_dict: Dictionary with all addresses used to fund the transaction.
:param out_dict: Dictionary with all addresses where transactions ends up.
:param color_in: Color of in-address-nodes.
:param color_out: Color of out-address-nodes.
:param is_in: Set to True when malicious node is in in_dict, meaning that other nodes in in_dict can be
malicious as well. This goes on recursively for these nodes as well.
"""
first = True
for i in in_dict:
if i != 'null':
if i not in [jo['id'] for jo in j_obj['nodes']]:
j_obj['nodes'].append(
{"id": i, "label": i[:10] + "..", "title": i, "group": 1,
"color": {"background": color_in, "border": color_in}}
)
for j in out_dict:
if j != 'null':
if is_in:
possible_mal.add(j)
if first and j not in [jo['id'] for jo in j_obj['nodes']]:
j_obj['nodes'].append(
{"id": j, "label": j[:10] + "..", "title": j, "group": 2,
"color": {"background": color_out, "border": color_out}}
)
j_obj['edges'].append(
{"from": i, "to": j, "title": str(format(out_dict.get(j), ',d')),
"width": (out_dict.get(j) / (sum(out_dict.values()) / 10)) + 0.5,
"color.color": "rgb(233,150,122)", "color.highlight": "rgb(10,9,233)", "arrows": "to"}
)
first = False
def add_trans(arr):
""""
Add an transaction's addresses to the JSON object.
:param arr: The transaction which contains the in addresses and out addresses.
"""
in_dict = arr.get('in')
out_dict = arr.get('out')
# If the malicious node is in out-addresses, the in-addresses are possible victims.
if main_node in out_dict:
color_nodes(in_dict, out_dict, "rgb(26,19,233)", "rgb(159,159,163)")
# If malicious node is in in-addresses, other addresses are colored gray as they are potentially related to
# the malicious node.
elif main_node in in_dict:
color_nodes(in_dict, out_dict, "rgb(159,159,163)", "rgb(159,159,163)", True)
# If malicious node not in either in or out,
# check whether the added possible malicious nodes are in the in-addresses
for i in possible_mal:
if i in in_dict:
color_nodes(in_dict, out_dict, "rgb(159,159,163)", "rgb(159,159,163)", True)
break
def find_trans(arr, key=None):
""""
Recursively add transactions to the JSON object.
:param arr: All transactions.
:param key: Address that was used to lookup the transaction.
"""
rec_k = list(arr.keys())
# Position in dictionary where transactions are:
if str(rec_k[0]) == '1':
# Possible exchange, add/change label of node from address to the exchange name.
if len(rec_k) >= 50:
label = get_label(key)
if label is not None:
if key not in [jo['id'] for jo in j_obj['nodes']]:
j_obj['nodes'].append(
{"id": key, "label": key[:10] + "..", "title": label, "group": 1,
"color": {"background": "rgb(102,233,64)", "border": "rgb(102,233,64)"}}
)
else:
for ns in j_obj['nodes']:
if ns['id'] == key:
ns['title'] = label
ns['color']['background'] = "rgb(102,233,64)"
ns['color']['border'] = "rgb(102,233,64)"
# Add all transactions
for key in rec_k:
add_trans(arr.get(key))
# Not position in dictionary where transactions are, go one layer deeper for every key.
else:
for key in rec_k:
if key != 'null':
find_trans(arr.get(key), key)
find_trans(n['data'])
return j_obj
def start_analysis(address, depth):
# Save address' transactions to a certain depth in databases/results
save_to_file(address=address, depth=depth,
resulting_neighbours_dict=get_neighbours_with_depth(address=address, depth=depth))
# Load in result
with open(READ_FILE_STRUCTURE.format(address, depth), 'r') as f:
s = f.read()
node = ast.literal_eval(s)
# If an address is not found on blockchain.info, then return nothing.
if 'main_node' not in node.keys():
return
# Convert results to JSON file and save it in converted_database
with open(WRITE_FILE_STRUCTURE, 'w') as f:
f.seek(0)
f.truncate()
json.dump(convert(node), f)
print("Done.")
if __name__ == '__main__':
start_analysis('1LYz7EgAF8PU6bSN8GDecnz9Gg814fs81W', 2)
| GijsBeernink/UT-BLT-Backend | api/converter.py | converter.py | py | 6,851 | python | en | code | 1 | github-code | 13 |
6163675149 | from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
import time, socket, pickle, os, sys
from encrypt_decrypt import encrypt, decrypt
from cryptography.hazmat.primitives import serialization
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 65433 # The port used by the server
alice_priv = ec.generate_private_key(ec.SECP384R1())
digest = hashes.Hash(hashes.SHA256())
filename = sys.argv[1]
fileContents = open(filename, 'rb')
fileStuff = fileContents.read()
digest.update(fileStuff)
fileHash = digest.finalize()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
s.connect((HOST, PORT))
serialKey = alice_priv.public_key().public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
s.sendall(serialKey)
bob_public = s.recv(1024)
loaded_public_key = serialization.load_pem_public_key(bob_public)
alice_shared = alice_priv.exchange(ec.ECDH(), loaded_public_key)
alice_hkdf = HKDF(algorithm=hashes.SHA256(),length=32,salt=None,info=b'',).derive(alice_shared)
iv, ciphertext, tag, associated_data = encrypt(alice_hkdf,fileHash,b"Alice's Hash")
myCiphertext = ciphertext
s.sendall(pickle.dumps((iv,ciphertext,tag, associated_data)))
(iv, ciphertext, tag, associated_data) = pickle.loads(s.recv(102400))
results = decrypt(alice_hkdf, associated_data, iv, ciphertext,tag)
isSame = b""
if results == fileHash:
isSame = b"Success!"
else:
isSame = b"Failed!"
iv, ciphertext, tag, associated_data = encrypt(alice_hkdf, isSame ,b"Alice's Result")
s.sendall(pickle.dumps((iv, ciphertext, tag, associated_data)))
print("Our result: ", isSame.decode('utf-8'))
(iv, ciphertext, tag, associated_data) = pickle.loads(s.recv(102400))
bob_result = decrypt(alice_hkdf, associated_data, iv, ciphertext, tag)
print("Bob result:", bob_result.decode('utf-8'))
| rajKarra69420/cs355project | alice.py | alice.py | py | 2,104 | python | en | code | 0 | github-code | 13 |
15356193405 | # Compare two strings represented as linked lists
# Given two linked lists, represented as linked lists (every character is a node in linked list). Write a function compare() that works similar to strcmp(), i.e., it returns 0 if both strings are same, 1 if first linked list is lexicographically greater, and -1 if second string is lexicographically greater.
# Examples:
# Input: list1 = g->e->e->k->s->a
# list2 = g->e->e->k->s->b
# Output: -1
# Input: list1 = g->e->e->k->s->a
# list2 = g->e->e->k->s
# Output: 1
# Input: list1 = g->e->e->k->s
# list2 = g->e->e->k->s
# Output: 0
class Node:
# Constructor to create a new node
def __init__(self, char):
self.c = char
self.next = None
def compare(str1, str2):
# Case 1: both strings are the same, return 0
# Case 2: first string is lexograph. greater, return 1
# Case 3: second string is greater, return -1
# Iterate through both until one ends, or not equal
while (str1 and str2) and str1.c == str2.c:
str1 = str1.next
str2 = str2.next
# When we get here, if both are still defined
if str1 and str2:
if str1.c > str2.c:
return 1
return -1
# If either ended
if not str1:
return -1
if not str2:
return 1
return 0
# Driver program
list1 = Node("g")
list1.next = Node("e")
list1.next.next = Node("e")
list1.next.next.next = Node("k")
list1.next.next.next.next = Node("s")
list1.next.next.next.next.next = Node("b")
list2 = Node("g")
list2.next = Node("e")
list2.next.next = Node("e")
list2.next.next.next = Node("k")
list2.next.next.next.next = Node("s")
list2.next.next.next.next.next = Node("a")
print(compare(list1, list2))
| vsoch/algorithms | compare-linked-list/compare.py | compare.py | py | 1,742 | python | en | code | 1 | github-code | 13 |
16974327843 |
COLOR_BLUE_1 = ''
COLOR_RED_1 = ''
COLOR_PURPLE_1 = ''
COLOR_NEUTRAL_1 = ''
COLOR_BOMB_1 = ''
# Used to store/load game_code to/from session
GAME_CODE_KEY = 'game_code'
# Used to store/load client_id to/from session
CLIENT_ID_KEY = 'client_id'
# Used to transmit cached player_id from browser cookies to server
OLD_ID_KEY = 'old_id'
| AChelikani/Codenames | constants.py | constants.py | py | 339 | python | en | code | 2 | github-code | 13 |
40341835982 | import paho.mqtt.client as paho
import logging
import time
import queue
from json import loads, dumps
from jsonschema import Draft7Validator
import ssl
from jsonschema import ValidationError
import threading
KV_SCHEMA = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 1,
}
SCHEMA_FOR_CLIENT_RPC = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 0,
}
TS_KV_SCHEMA = {
"type": "object",
"properties": {
"ts": {
"type": "integer"
},
"values": KV_SCHEMA
},
"additionalProperties": False
}
DEVICE_TS_KV_SCHEMA = {
"type": "array",
"items": TS_KV_SCHEMA
}
DEVICE_TS_OR_KV_SCHEMA = {
"type": "array",
"items": {
"anyOf":
[
TS_KV_SCHEMA,
KV_SCHEMA
]
}
}
RPC_VALIDATOR = Draft7Validator(SCHEMA_FOR_CLIENT_RPC)
KV_VALIDATOR = Draft7Validator(KV_SCHEMA)
TS_KV_VALIDATOR = Draft7Validator(TS_KV_SCHEMA)
DEVICE_TS_KV_VALIDATOR = Draft7Validator(DEVICE_TS_KV_SCHEMA)
DEVICE_TS_OR_KV_VALIDATOR = Draft7Validator(DEVICE_TS_OR_KV_SCHEMA)
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
log = logging.getLogger(__name__)
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class TBPublishInfo():
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, messageInfo):
self.messageInfo = messageInfo
def rc(self):
return self.messageInfo.rc
def mid(self):
return self.messageInfo.mid
def get(self):
self.messageInfo.wait_for_publish()
return self.messageInfo.rc
class TBDeviceMqttClient:
def __init__(self, host, token=None):
self._client = paho.Client()
self.__host = host
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = threading.Lock()
self._attr_request_dict = {}
self.__timeout_queue = queue.Queue()
self.__timeout_thread = None
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
# TODO: enable configuration available here:
# https://pypi.org/project/paho-mqtt/#option-functions
def _on_log(self, client, userdata, level, buf):
log.debug(buf)
pass
def _on_publish(self, client, userdata, result):
log.debug("Data published to ThingsBoard!")
pass
def _on_connect(self, client, userdata, flags, rc, *extra_params):
result_codes = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
if self.__connect_callback:
self.__connect_callback(client, userdata, flags, rc, *extra_params)
if rc == 0:
self.__is_connected = True
log.info("connection SUCCESS")
self._client.subscribe(ATTRIBUTES_TOPIC, qos=1)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", 1)
self._client.subscribe(RPC_REQUEST_TOPIC + '+')
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=1)
else:
if rc in result_codes:
log.error("connection FAIL with error {rc} {explanation}".format(rc=rc,
explanation=result_codes[rc]))
else:
log.error("connection FAIL with unknown error")
def _on_disconnect(self, client, userdata, rc):
log.debug("MQTT client disconnected")
self.__is_connected = False
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, port=1883, ca_certs=None, cert_file=None, key_file=None):
if tls:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
self._client.connect(self.__host, port)
self._client.loop_start()
self.__connect_callback = callback
self.reconnect_delay_set(min_reconnect_delay, timeout)
self.__timeout_thread = threading.Thread(target=self.__timeout_check)
self.__timeout_thread.do_run = True
self.__timeout_thread.start()
def disconnect(self):
self._client.disconnect()
if self.__timeout_thread:
self.__timeout_thread.do_run = False
self.__timeout_thread.join()
self.__timeout_thread = None
log.info("Disconnected from ThingsBoard!")
def _on_message(self, client, userdata, message):
content = self._decode(message)
self._on_decoded_message(content, message)
@staticmethod
def _decode(message):
content = loads(message.payload.decode("utf-8"))
log.debug(content)
log.debug(message.topic)
return content
@staticmethod
def validate(validator, data):
try:
validator.validate(data)
except ValidationError as e:
log.error(e)
raise e
def _on_decoded_message(self, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
self.__device_client_rpc_dict.pop(request_id)(request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for x in self.__device_sub_dict["*"]:
self.__device_sub_dict["*"][x](content, None)
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for x in self.__device_sub_dict[key]:
self.__device_sub_dict[key][x](content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC+"/response/"):])
# pop callback and use it
self._attr_request_dict.pop(req_id)(content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=1, wait_for_publish=False):
if quality_of_service != 0 and quality_of_service != 1:
log.error("Quality of service (qos) value must be 0 or 1")
return
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
self.validate(RPC_VALIDATOR, params)
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=1)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos != 0 and qos != 1:
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
else:
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=1):
if type(telemetry) is not list:
telemetry = [telemetry]
self.validate(DEVICE_TS_OR_KV_VALIDATOR, telemetry)
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=1):
self.validate(KV_VALIDATOR, attributes)
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for x in self.__device_sub_dict:
if self.__device_sub_dict[x].get(subscription_id):
del self.__device_sub_dict[x][subscription_id]
log.debug("Unsubscribed from {attribute}, subscription id {sub_id}".format(attribute=x,
sub_id=subscription_id))
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v is not {})
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to {key} with id {id}".format(key=key, id=self.__device_max_sub_id))
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
if client_keys is None and shared_keys is None:
log.error("There are no keys to request")
return False
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, ts):
self.__timeout_queue.put({"ts": ts, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
t = threading.currentThread()
while getattr(t, "do_run", True):
try:
try:
item = self.__timeout_queue.get(False)
except queue.Empty:
time.sleep(0.1)
continue
if item is not None:
while getattr(t, "do_run", True):
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
else:
time.sleep(0.1)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(None, TBTimeoutException("Timeout while waiting for reply from ThingsBoard!"))
else:
time.sleep(0.1)
except Exception as e:
log.warning(e)
| Tknika/kura-thingsboard-gateway | src/tb_mqtt_client/tb_device_mqtt.py | tb_device_mqtt.py | py | 15,987 | python | en | code | 2 | github-code | 13 |
36553651643 | import os
from typing import Union
from omegaconf import DictConfig
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
import hydra
from hydra.core.config_store import ConfigStore
import wandb
def flatten_dict(
input_dict: Union[dict, DictConfig],
separator: str = '_',
prefix: str = ''
):
"""flattening dict,
used in wandb log.
"""
if isinstance(input_dict, DictConfig):
input_dict = dict(input_dict)
return {
prefix + separator + k if prefix else k : v
for kk, vv in input_dict.items()
for k, v in flatten_dict(vv, separator, kk).items()
} if isinstance(input_dict, dict) else {prefix: input_dict}
def register_config(configs_dict: Union[dict, DictConfig]) -> None:
"""hydra register configuration"""
cs = ConfigStore.instance()
for k, merged_cfg in configs_dict.items():
cs.store(name=k, node=merged_cfg)
def get_optimizer_element(
opt_cfg: DictConfig, lr_sch_cfg: DictConfig,
):
optimizer = None
scheduler = None
# setup lr scheduler
if lr_sch_cfg is None:
pass
elif lr_sch_cfg.name == "LinearWarmupLRSchedule":
scheduler = LinearWarmupLRSchedule(
**lr_sch_cfg.kwargs
)
else:
raise NotImplementedError(f"Not supported lr_scheduler")
lr = scheduler if scheduler is not None else opt_cfg.learning_rate
# setup optimizer
if opt_cfg.name == "RectifiedAdam":
optimizer = tfa.optimizers.RectifiedAdam(
learning_rate=lr, **opt_cfg.other_kwargs
)
elif opt_cfg.name == "SGD":
optimizer = tf.optimizers.SGD(
learning_rate=lr, **opt_cfg.other_kwargs
)
elif opt_cfg.name == "AdamP":
optimizer = tfa.optimizers.AdamP(
learning_rate=lr, **opt_cfg.other_kwargs
)
elif opt_cfg.name == "Adam":
optimizer = tf.optimizers.Adam(
learning_rate=lr, **opt_cfg.other_kwargs
)
elif opt_cfg.name == "RMSprop":
optimizer = tf.optimizers.RMSprop(
learning_rate=lr, **opt_cfg.other_kwargs
)
else:
raise NotImplementedError(f"Not supported optimizer: {opt_cfg.name}")
return optimizer, scheduler
def get_callbacks(log_cfg: DictConfig):
"""Get callbacks"""
callbacks = []
callbacks_cfg = log_cfg.callbacks
for name, kwargs_dict in callbacks_cfg.items():
if name == "TensorBoard":
callbacks.append(
tf.keras.callbacks.TensorBoard(**kwargs_dict)
)
elif name == "EarlyStopping":
callbacks.append(
tf.keras.callbacks.EarlyStopping(**kwargs_dict)
)
else:
raise NotImplementedError(f"invalid callbacks_cfg name {name}")
return callbacks
class LinearWarmupLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Warmup scheduler"""
def __init__(
self,
lr_peak: float,
warmup_end_steps: int,
):
super().__init__()
self.lr_peak = lr_peak
self.warmup_end_steps = warmup_end_steps
def __call__(self, step):
step_float = tf.cast(step, tf.float32)
warmup_step = tf.cast(self.warmup_end_steps, tf.float32)
lr_peak = tf.cast(self.lr_peak, tf.float32)
return tf.cond(
step_float < warmup_step,
lambda: lr_peak * ((step_float + 1) / warmup_step),
lambda: lr_peak
)
# @tf.keras.utils.register_keras_serializable(package="Addons")
class AdamP(tf.keras.optimizers.Optimizer):
"""Code is from https://github.com/taki0112/AdamP-Tensorflow/blob/master/adamp_tf.py with modifications"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
weight_decay=0.0,
delta=0.1, wd_ratio=0.1, nesterov=False,
name='AdamP',
**kwargs
):
super(AdamP, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self._set_hyper('delta', delta)
self._set_hyper('wd_ratio', wd_ratio)
self.epsilon = epsilon or backend_config.epsilon()
self.weight_decay = weight_decay
self.nesterov = nesterov
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
for var in var_list:
self.add_slot(var, 'p')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamP, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = apply_state[(var_device, var_dtype)]['lr_t']
bias_correction1 = 1 - beta_1_power
bias_correction2 = 1 - beta_2_power
delta = array_ops.identity(self._get_hyper('delta', var_dtype))
wd_ratio = array_ops.identity(self._get_hyper('wd_ratio', var_dtype))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
weight_decay=ops.convert_to_tensor_v2(self.weight_decay, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
delta=delta,
wd_ratio=wd_ratio))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(AdamP, self).set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'] + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'] + v_scaled_g_values, use_locking=self._use_locking)
denorm = (math_ops.sqrt(v_t) / math_ops.sqrt(coefficients['bias_correction2'])) + coefficients['epsilon']
step_size = coefficients['lr'] / coefficients['bias_correction1']
if self.nesterov:
perturb = (coefficients['beta_1_t'] * m_t + coefficients['one_minus_beta_1_t'] * grad) / denorm
else:
perturb = m_t / denorm
# Projection
wd_ratio = 1
if len(var.shape) > 1:
perturb, wd_ratio = self._projection(var, grad, perturb, coefficients['delta'], coefficients['wd_ratio'], coefficients['epsilon'])
# Weight decay
if self.weight_decay > 0:
var = state_ops.assign(var, var * (1 - coefficients['lr'] * coefficients['weight_decay'] * wd_ratio), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, step_size * perturb, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
"""
Adam
"""
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
denorm = (math_ops.sqrt(v_t) / math_ops.sqrt(coefficients['bias_correction2'])) + coefficients['epsilon']
step_size = coefficients['lr'] / coefficients['bias_correction1']
if self.nesterov:
p_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
perturb = m_t * coefficients['beta_1_t']
perturb = self._resource_scatter_add(perturb, indices, p_scaled_g_values) / denorm
else:
perturb = m_t / denorm
# Projection
wd_ratio = 1
if len(var.shape) > 1:
perturb, wd_ratio = self._projection(var, grad, perturb, coefficients['delta'], coefficients['wd_ratio'], coefficients['epsilon'])
# Weight decay
if self.weight_decay > 0:
var = state_ops.assign(var, var * (1 - coefficients['lr'] * coefficients['weight_decay'] * wd_ratio), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, step_size * perturb, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _channel_view(self, x):
return array_ops.reshape(x, shape=[x.shape[0], -1])
def _layer_view(self, x):
return array_ops.reshape(x, shape=[1, -1])
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = math_ops.euclidean_norm(x, axis=-1) + eps
y_norm = math_ops.euclidean_norm(y, axis=-1) + eps
dot = math_ops.reduce_sum(x * y, axis=-1)
return math_ops.abs(dot) / x_norm / y_norm
def _projection(self, var, grad, perturb, delta, wd_ratio, eps):
# channel_view
cosine_sim = self._cosine_similarity(grad, var, eps, self._channel_view)
cosine_max = math_ops.reduce_max(cosine_sim)
compare_val = delta / math_ops.sqrt(math_ops.cast(self._channel_view(var).shape[-1], dtype=delta.dtype))
perturb, wd = control_flow_ops.cond(pred=cosine_max < compare_val,
true_fn=lambda : self.channel_true_fn(var, perturb, wd_ratio, eps),
false_fn=lambda : self.channel_false_fn(var, grad, perturb, delta, wd_ratio, eps))
return perturb, wd
def channel_true_fn(self, var, perturb, wd_ratio, eps):
expand_size = [-1] + [1] * (len(var.shape) - 1)
var_n = var / (array_ops.reshape(math_ops.euclidean_norm(self._channel_view(var), axis=-1), shape=expand_size) + eps)
perturb -= var_n * array_ops.reshape(math_ops.reduce_sum(self._channel_view(var_n * perturb), axis=-1), shape=expand_size)
wd = wd_ratio
return perturb, wd
def channel_false_fn(self, var, grad, perturb, delta, wd_ratio, eps):
cosine_sim = self._cosine_similarity(grad, var, eps, self._layer_view)
cosine_max = math_ops.reduce_max(cosine_sim)
compare_val = delta / math_ops.sqrt(math_ops.cast(self._layer_view(var).shape[-1], dtype=delta.dtype))
perturb, wd = control_flow_ops.cond(cosine_max < compare_val,
true_fn=lambda : self.layer_true_fn(var, perturb, wd_ratio, eps),
false_fn=lambda : self.identity_fn(perturb))
return perturb, wd
def layer_true_fn(self, var, perturb, wd_ratio, eps):
expand_size = [-1] + [1] * (len(var.shape) - 1)
var_n = var / (array_ops.reshape(math_ops.euclidean_norm(self._layer_view(var), axis=-1), shape=expand_size) + eps)
perturb -= var_n * array_ops.reshape(math_ops.reduce_sum(self._layer_view(var_n * perturb), axis=-1), shape=expand_size)
wd = wd_ratio
return perturb, wd
def identity_fn(self, perturb):
wd = 1.0
return perturb, wd
def get_config(self):
config = super(AdamP, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'delta': self._serialize_hyperparameter('delta'),
'wd_ratio': self._serialize_hyperparameter('wd_ratio'),
'epsilon': self.epsilon,
'weight_decay': self.weight_decay,
'nesterov': self.nesterov
})
return config
| reasonmii/ref_DataScience | fastcampus_deeplearning/config_utils_tf.py | config_utils_tf.py | py | 14,339 | python | en | code | 14 | github-code | 13 |
10552001357 | '''
http://qiita.com/nacasora/items/cf0e27d38b09654cf701
'''
import bpy
import numpy as np
from PIL import Image, ImageFilter
# blimg = bpy.data.images['Lenna.png']
# width, height = blimg.size
'''accece used data image'''
# width, height = blimg.size
# print(width, height)
'''set and get pixil info'''
# # get pixil info
# # R,G,B,A 1 pixil
# print(blimg.pixels[0], blimg.pixels[1], blimg.pixels[2], blimg.pixels[3])
# # R,G,B,A 2 pixil
# print(blimg.pixels[4], blimg.pixels[5], blimg.pixels[6], blimg.pixels[7])
#
# # set pixil info
# blimg.pixels[0] = 1.0
# blimg.pixels[1] = 0.0
# blimg.pixels[2] = 0.0
# blimg.pixels[3] = 1.0
# # => set red of 1 pixil
'''set and get pixil array'''
# # get array all pixil info
# pxs = list(blimg.pixels[:])
#
# for i in range(0, width*height*4, 4):
# pxs[i] = 1.0 # R
# pxs[i+1] = 0.0 # G
# pxs[i+2] = 0.0 # B
# pxs[i+3] = 1.0 # A
#
# # set all array to add process
# blimg.pixels = pxs
'''set and get pixil array-'''
# pxs0 = blimg.pixels[:]
# pxs = [0] * len(pxs0)
# # or
#pxs = [0] * (width * height * 4)
#
# for i in range(0, width*height*4, 4):
# pxs[i] = pxs0[i] * 0.5 # R
# pxs[i+1] = pxs0[i+1] * 0.5 # G
# pxs[i+2] = pxs0[i+2] * 0.5 # B
# pxs[i+3] = pxs0[i+3] # A
#
# blimg.pixels = pxs
'''set pixil value of coodenate'''
# pxs = list(blimg.pixels[:])
#
# for y in range(10, 40):
# for x in range(10, 20):
# # conform converse image of x,y
# if 0<=x and x<width and 0<=y and y<height:
# i = (y*width+x)*4
# pxs[i] = 1.0 # R
# pxs[i+1] = 1.0 # G
# pxs[i+2] = 1.0 # B
# pxs[i+3] = 1.0 # A
#
# blimg.pixels = pxs
'''BoxBlur'''
# # <!> do 1.0 alpha value image
# pxs0 = blimg.pixels[:]
# pxs = [0] * len(pxs0)
#
# def inside(x,y):
# return 0<=x and x<width and 0<=y and y<height
#
# size = 5
# for y in range(height):
# for x in range(width):
# i = (y*width+x)*4
# r=0
# g=0
# b=0
# n=0
# for v in range(y-size, y+size+1):
# for u in range(x-size, x+size+1):
# if inside(u,v):
# j = (v*width+u)*4
# r += pxs0[j]
# g += pxs0[j+1]
# b += pxs0[j+2]
# n += 1
# pxs[i] = r/n
# pxs[i+1] = g/n
# pxs[i+2] = b/n
# pxs[i+3] = 1.0
#
# blimg.pixels = pxs
'''output another name'''
# imagename = 'BPY Output.png'
# width = 32
# height = 32
# blimg = bpy.data.images.new(imagename, width, height, alpha=True)
# blimg.pixels = [1.0]*(width*height*4)
'''array pixil to transform numpy array'''
# arr = np.array(blimg.pixels[:])
'''do active NumPy'''
# # substitute 0 all R element
# arr[0::4] = 0.0
#
# blimg2 = bpy.data.images.new('B', width, height, alpha=True)
# blimg2.pixels = arr
'''again Box Blur'''
# W, H = blimg.size
#
# a = np.array(blimg.pixels[:])
# b = np.ndarray(len(a))
# a.resize(H, W*4)
# b.resize(H, W*4)
#
# a_R = a[::, 0::4]
# a_G = a[::, 1::4]
# a_B = a[::, 2::4]
# b_R = b[::, 0::4]
# b_G = b[::, 1::4]
# b_B = b[::, 2::4]
#
# size = 5
# for y in range(H):
# y0 = max(0, y-size)
# y1 = min(H-1, y+size)
# for x in range(W):
# x0 = max(0, x-size)
# x1 = min(W-1, x+size)
# n = (y1-y0)*(x1-x0)
# b_R[y][x] = np.ndarray.sum(a_R[y0:y1, x0:x1]) / n
# b_G[y][x] = np.ndarray.sum(a_G[y0:y1, x0:x1]) / n
# b_B[y][x] = np.ndarray.sum(a_B[y0:y1, x0:x1]) / n
#
# # Alpha == 1.0
# b[::, 3::4] = 1.0
# b = b.flatten()
#
# blimg2 = bpy.data.images.new('B', W, H, alpha=True)
# blimg2.pixels = b
'''use to transform path file'''
def save_as_png(img, path):
s = bpy.context.scene.render.image_settings
prev, prev2 = s.file_format, s.color_mode
s.file_format, s.color_mode = 'PNG', 'RGBA'
img.save_render(path)
s.file_format, s.color_mode = prev, prev2
blimg = bpy.data.images['Lenna.png']
W,H = blimg.size
temppath = 'd:/temp/bpytemp.png'
# 一時ファイルに保存(Blender)
save_as_png(blimg, temppath)
# 一時ファイルから読み込み(PIL)
pimg = Image.open(temppath)
# PILのフィルタを適用する(ガウシアンブラー)
pimg2 = pimg.filter(ImageFilter.GaussianBlur(radius=5))
# 一時ファイルに保存(PIL)
pimg2.save(temppath)
# 一時ファイルから読み込み(Blender)
blimg2 = bpy.data.images.load(temppath)
blimg2.name = 'B'
| UE4yochi/Blender_python | blender-image-process.py | blender-image-process.py | py | 4,128 | python | en | code | 0 | github-code | 13 |
14885373269 | txt = input()
u = 0
l = 0
for i in range[len(txt)]:
if txt[i].isupper():
u+= 1
if txt[i].islower():
l+= 1
print("Uppercase letters:", u)
print("Lowercase letters:", l) | AbdullaAzadov/PP2 | week6/built_in/2.py | 2.py | py | 194 | python | en | code | 0 | github-code | 13 |
24703636654 | # fits2wfabc.py
# converts fits data file to wfabc data file
# 2019-01-03 first version created - output to stdout
import sys
import pandas as pd
# load and pre-processing
def LoadFITSFile( my_filename, population_size ):
# maybe define column data types with dtype?
freqs_df = pd.read_table( my_filename )
# take only the mutant
freqs_df = freqs_df[ freqs_df["allele"] == 1 ];
# we need copy number, not frequencies
freqs_df["copy_number"] = freqs_df["freq"] * population_size
freqs_df["copy_number"] = freqs_df["copy_number"].astype(int)
# do we have a position column?
if not "pos" in freqs_df.columns:
freqs_df["pos"] = -1
return(freqs_df)
def FITSData2WFABCData( fits_df, population_size ):
gen_list = fits_df["gen"].unique()
pos_list = fits_df["pos"].unique()
num_generations = len(gen_list)
num_positions = len(pos_list)
sample_size_list = [str(population_size)] * num_generations
print( str(num_positions) + " " + str(num_generations) )
print( ",".join( map(str, gen_list) ) )
for current_position in pos_list:
position_df = fits_df[ fits_df["pos"] == current_position ]
copy_number_list = position_df["copy_number"].tolist()
if len(copy_number_list) != num_generations:
print("Error: number of generations is " + str(num_generations) + " but copy number list size is " + str(len(copy_number_list )) )
return
print( ",".join( map(str, sample_size_list) ) )
print( ",".join( map(str, copy_number_list) ) )
if len(sys.argv) < 3:
print( "Syntax: fits2wfabc.py fits_data population_size" )
quit(1)
data_filename = sys.argv[1]
pop_size = int(sys.argv[2])
fits_df = LoadFITSFile( data_filename, pop_size )
FITSData2WFABCData( fits_df, pop_size )
| SternLabTAU/SternLab | FITS/fits2wfabc.py | fits2wfabc.py | py | 1,712 | python | en | code | 1 | github-code | 13 |
6517022871 | from typing import Union, Dict, Type
from pydantic import BaseModel
class CatBaseSchema(BaseModel):
name: str
age: int
isNice: bool
class CatDto(CatBaseSchema):
id: str
class ToyBaseSchema(BaseModel):
title: str
description: str
price: int
class OwnerBaseSchema(BaseModel):
email: str
catsNumber: int
collection_models: Dict[str, Type] = {
"cats": CatBaseSchema,
"toys": ToyBaseSchema,
"owners": OwnerBaseSchema
}
types = Union[tuple(collection_models.values())]
| Davy5Jones/Python-FastApi | app/schemes.py | schemes.py | py | 523 | python | en | code | 0 | github-code | 13 |
42226156866 | ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_volume_facts
short_description: Retrieve facts about the OneView Volumes.
description:
- Retrieve facts about the Volumes from OneView.
version_added: "2.5"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author: "Mariana Kreisig (@marikrg)"
options:
name:
description:
- Volume name.
required: false
options:
description:
- "List with options to gather additional facts about Volume and related resources.
Options allowed:
- C(attachableVolumes)
- C(extraManagedVolumePaths)
- C(snapshots). For this option, you may provide a name."
required: false
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Volumes
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
- debug: var=storage_volumes
- name: Gather paginated, filtered and sorted facts about Volumes
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 2
sort: 'name:descending'
filter: "provisionType='Thin'"
- debug: var=storage_volumes
- name: "Gather facts about all Volumes, the attachable volumes managed by the appliance and the extra managed
storage volume paths"
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- attachableVolumes # optional
- extraManagedVolumePaths # optional
- debug: var=storage_volumes
- debug: var=attachable_volumes
- debug: var=extra_managed_volume_paths
- name: Gather facts about a Volume by name with a list of all snapshots taken
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "{{ volume_name }}"
options:
- snapshots # optional
- debug: var=storage_volumes
- debug: var=snapshots
- name: "Gather facts about a Volume with one specific snapshot taken"
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "{{ volume_name }}"
options:
- snapshots: # optional
name: "{{ snapshot_name }}"
- debug: var=storage_volumes
- debug: var=snapshots
'''
RETURN = '''
storage_volumes:
description: Has all the OneView facts about the Volumes.
returned: Always, but can be null.
type: dict
attachable_volumes:
description: Has all the facts about the attachable volumes managed by the appliance.
returned: When requested, but can be null.
type: dict
extra_managed_volume_paths:
description: Has all the facts about the extra managed storage volume paths from the appliance.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class VolumeFactsModule(OneViewModule):
def __init__(self):
argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
super(VolumeFactsModule, self).__init__(additional_arg_spec=argument_spec)
self.set_resource_object(self.oneview_client.volumes)
def execute_module(self):
ansible_facts = {}
networks = self.facts_params.pop('networks', None)
if self.module.params.get('name'):
ansible_facts['storage_volumes'] = self.resource_client.get_by('name', self.module.params['name'])
ansible_facts.update(self._gather_facts_about_one_volume(ansible_facts['storage_volumes']))
else:
ansible_facts['storage_volumes'] = self.resource_client.get_all(**self.facts_params)
if networks:
self.facts_params['networks'] = networks
ansible_facts.update(self._gather_facts_from_appliance())
return dict(changed=False, ansible_facts=ansible_facts)
def _gather_facts_from_appliance(self):
facts = {}
if self.options:
if self.options.get('extraManagedVolumePaths'):
extra_managed_volume_paths = self.resource_client.get_extra_managed_storage_volume_paths()
facts['extra_managed_volume_paths'] = extra_managed_volume_paths
if self.options.get('attachableVolumes'):
query_params = self.options['attachableVolumes']
query_params = {} if type(query_params) is not dict else query_params
if 'connections' in query_params:
query_params['connections'] = str(query_params['connections'])
attachable_volumes = self.resource_client.get_attachable_volumes(**query_params)
facts['attachable_volumes'] = attachable_volumes
return facts
def _gather_facts_about_one_volume(self, volumes):
facts = {}
if self.options.get('snapshots') and len(volumes) > 0:
options_snapshots = self.options['snapshots']
if isinstance(options_snapshots, dict) and 'name' in options_snapshots:
facts['snapshots'] = self.current_resource.get_snapshot_by('name', options_snapshots['name'])
else:
facts['snapshots'] = self.current_resource.get_snapshots()
return facts
def main():
VolumeFactsModule().run()
if __name__ == '__main__':
main()
| HewlettPackard/oneview-ansible | library/oneview_volume_facts.py | oneview_volume_facts.py | py | 5,687 | python | en | code | 103 | github-code | 13 |
28135603304 | import pyexcel
import psycopg2
connection = psycopg2.connect( dbname= 'ddm0bfn9sojtr1',
host= 'ec2-54-161-208-31.compute-1.amazonaws.com',
user= 'bposbinnkvtuhx',
password= '25b6386f513ee2e12e6ab8b2f2b69a12d656224a0ddb893d122273a5033932dc',
port= '5432')
if __name__ == "__main__":
cursor = connection.cursor()
cursor.execute("SELECT username, balance FROM clients")
array = cursor.fetchall()
data = []
for item in array:
if item[0] is not None:
data.append(item)
try:
myfile = open("./balance.xlsx", "r+")
pyexcel.save_as(array= data, dest_file_name="balance.xlsx")
print("balance.xlsx обновлён")
except IOError:
print("Нельзя обновить файл, пока он открыт. Закрой balance.xlsx и попробуй снова.")
| bat-py/pantera | update_balace.py | update_balace.py | py | 1,004 | python | en | code | 0 | github-code | 13 |
26414769762 | from ursina import *
class TestSliderVariable(Slider):
def __init__(self, text: str, min, max, default, step):
super().__init__(
min=min,
max=max,
default=default,
text=text,
step=step,
scale=(1, 1)
)
self.name = text
self.disable()
| GDcheeriosYT/Gentrys-Quest-Ursina | Screens/Testing/TestSliderVariable.py | TestSliderVariable.py | py | 345 | python | en | code | 1 | github-code | 13 |
44040984006 | class Node:
def __init__(self,data):
self.data = data
self.ref = None
class LinkedList:
def __init__(self):
self.head = None
def add_begin(self,data):
node = Node(data)
node.ref = self.head
self.head = node
def printLL(self):
n=self.head
while n is not None:
print(n.data,"--->",end=' ')
n=n.ref
LL1 = LinkedList()
LL1.add_begin(100)
LL1.add_begin(200)
LL1.add_begin(300)
LL1.printLL()
| itzzyashpandey/python-data-science | dsa/linkedlist.py | linkedlist.py | py | 526 | python | en | code | 0 | github-code | 13 |
8915706550 | from adapt.intent import IntentBuilder
from mycroft.util.log import getLogger
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.skills.context import *
LOGGER = getLogger(__name__)
class SillyNameMakerSkill(MycroftSkill):
def __init__(self):
super(SillyNameMakerSkill, self).__init__(name="SillyNameMakerSkill")
@intent_handler(IntentBuilder("SillyNameMakerIntent").require("SillyNameMakerStart").build())
@adds_context('SillyNameMakerContext')
def handle_silly_name_maker_start(self, message):
self.speak_dialog("hello", expect_response=True)
@intent_handler(IntentBuilder("NumberIntent").require("LuckyNumber").require("SillyNameMakerContext").build())
@adds_context('NumberContext')
def handle_number(self, message):
self.number = message.data.get("LuckyNumber")
self.speak_dialog("question.color", expect_response=True)
LOGGER.debug(self.number)
@intent_handler(IntentBuilder("ColorIntent").require("FavoriteColor").require("NumberContext").build())
@removes_context('NumberContext')
@removes_context('SillyNameMakerContext')
def handle_color(self, message):
self.color = message.data.get("FavoriteColor")
self.speak_dialog("result", data={"favorite_color": self.color, "lucky_number": self.number})
LOGGER.debug(self.color)
@removes_context('NumberContext')
@removes_context('SillyNameMakerContext')
def stop(self):
pass
def create_skill():
return SillyNameMakerSkill()
| RHackrid/deviloper-silly-name-maker | __init__.py | __init__.py | py | 1,541 | python | en | code | 0 | github-code | 13 |
70196308817 | from read_nmnist import *
from brian2 import us, ms, second
from dvs_utils import Plotter2d, DVSmonitor
import cv2
import os
import matplotlib.pyplot as plt
# Load Data
a = read_dataset('data/00004.bin')
# Get events from data
ev_x = a.data.x
ev_y = a.data.y
ev_t = a.data.ts - a.data.ts[0]
ev_p = a.data.p.astype(int)
# Frame Size of input data
frame_height = a.height
frame_width = a.width
# Save events as images - similar to the DVS exercise #
dvs_monitor = DVSmonitor(ev_x, ev_y, ev_t, ev_p, unit=us)
# Choose plotting parameters.
# You have to select these in such a way such that you can recognise
# the digits once you save them as frames
plot_dt = 100000
filtersize = 1
xy_dimensions_dvs = [frame_height, frame_width]
start_end_times = [0, 10]
dvs_plotter = Plotter2d(dvs_monitor, dims=(xy_dimensions_dvs[0], xy_dimensions_dvs[1]),
plotrange=(start_end_times[0] * second, start_end_times[1] * second))
# Save event stream as numpy arrays
# video_dvs is numpy array version of events.
video_dvs = dvs_plotter.plot3d(plot_dt=plot_dt * us, filtersize=plot_dt * us * filtersize)
_, x_dim, y_dim = video_dvs.shape
# Save numpy arrays as frames in order to see if you can clearly recognise the digits from the data
save_path = 'frames'
if not os.path.exists(save_path):
os.mkdir(save_path)
print('Saving Frames...')
for iFrame in range(len(video_dvs)):
filename = save_path + '/frame' + str(iFrame) + '.png'
cv2.imwrite(filename, video_dvs[iFrame])
for i in range(10):
plt.imshow(video_dvs[i]) | errorplaye/P-S-Spiking-Neural-Network | Event_To_Frame/load_data.py | load_data.py | py | 1,550 | python | en | code | 0 | github-code | 13 |
33209188089 | import numpy as np
import matplotlib.pyplot as plt
import numpy.polynomial.polynomial as poly
from helper import displayEpipolarF, calc_epi_error, toHomogenous, _singularize, refineF
# Insert your package here
'''
Q2.2: Seven Point Algorithm for calculating the fundamental matrix
Input: pts1, 7x2 Matrix containing the corresponding points from image1
pts2, 7x2 Matrix containing the corresponding points from image2
M, a scalar parameter computed as max (imwidth, imheight)
Output: Farray, a list of estimated 3x3 fundamental matrixes.
HINTS:
(1) Normalize the input pts1 and pts2 scale paramter M.
(2) Setup the seven point algorithm's equation.
(3) Solve for the least square solution using SVD.
(4) Pick the last two colum vector of vT.T (the two null space solution f1 and f2)
(5) Use the singularity constraint to solve for the cubic polynomial equation of F = a*f1 + (1-a)*f2 that leads to
det(F) = 0. Sovling this polynomial will give you one or three real solutions of the fundamental matrix.
Use np.polynomial.polynomial.polyroots to solve for the roots
(6) Unscale the fundamental matrixes and return as Farray
'''
def sevenpoint(pts1, pts2, M):
Farray = []
# ----- TODO -----
# YOUR CODE HERE
N = pts1.shape[0]
# Normalization
pts1, pts2 = pts1/float(M), pts2/float(M)
xcoords1, ycoords1 = pts1[:, 0], pts1[:, 1]
xcoords2, ycoords2 = pts2[:, 0], pts2[:, 1]
# A Matix
cul0 = xcoords2 * xcoords1
cul1 = xcoords2 * ycoords1
cul2 = xcoords2
cul3 = ycoords2 * xcoords1
cul4 = ycoords2 * ycoords1
cul5 = ycoords2
cul6 = xcoords1
cul7 = ycoords1
cul8 = np.ones((N,), dtype=np.float32)
A = np.stack((cul0, cul1, cul2, cul3, cul4, cul5, cul6, cul7, cul8), axis=1)
# Get F1 and F2
_, _, Vt = np.linalg.svd(A)
F1_vec, F2_vec = Vt[-1, :], Vt[-2, :] #(9,)
F1, F2 = F1_vec.reshape(3, 3), F2_vec.reshape(3, 3)
#Find the coefficients for F1 and F2 spanning the null space
a, b = F1-F2, F2
funct = lambda x: np.linalg.det(x*a + b)
c0 = funct(0)
c1 = (2.0/3)*(funct(1)-funct(-1)) - (1.0/12)*(funct(2)-funct(-2))
c3 = (1.0/12)*(funct(2) - funct(-2)) - (1.0/6)*(funct(1)-funct(-1))
c2 = funct(1) - c0 - c1 - c3
#Solve the polynomial
roots = poly.polyroots([c0, c1, c2, c3])
# Unscale F
T = np.zeros((3, 3), dtype=np.float32)
T[0, 0] = T[1, 1] = 1.0 / M
T[2, 2] = 1.0
for root in roots:
F_norm = root*a + b
F_norm = _singularize(F_norm)
# F_norm = refineF(F_norm, pts1, pts2)
F_final = T.transpose() @ F_norm @ T
Farray.append(F_final)
return Farray
if __name__ == "__main__":
correspondence = np.load('data/some_corresp.npz') # Loading correspondences
intrinsics = np.load('data/intrinsics.npz') # Loading the intrinscis of the camera
K1, K2 = intrinsics['K1'], intrinsics['K2']
pts1, pts2 = correspondence['pts1'], correspondence['pts2']
im1 = plt.imread('data/im1.png')
im2 = plt.imread('data/im2.png')
# indices = np.arange(pts1.shape[0])
# indices = np.random.choice(indices, 7, False)
indices = np.array([18, 19, 24, 54, 56, 82, 84])
M = np.max([*im1.shape, *im2.shape])
Farray = sevenpoint(pts1[indices, :], pts2[indices, :], M)
# print(Farray)
F = Farray[2]
F /= F[2,2]
# fundamental matrix must have rank 2!
assert(np.linalg.matrix_rank(F) == 2)
displayEpipolarF(im1, im2, F)
# Simple Tests to verify your implementation:
# Test out the seven-point algorithm by randomly sampling 7 points and finding the best solution.
np.random.seed(1) #Added for testing, can be commented out
pts1_homogenous, pts2_homogenous = toHomogenous(pts1), toHomogenous(pts2)
max_iter = 500
pts1_homo = np.hstack((pts1, np.ones((pts1.shape[0], 1))))
pts2_homo = np.hstack((pts2, np.ones((pts2.shape[0], 1))))
ress = []
F_res = []
choices = []
M=np.max([*im1.shape, *im2.shape])
for i in range(max_iter):
choice = np.random.choice(range(pts1.shape[0]), 7)
pts1_choice = pts1[choice, :]
pts2_choice = pts2[choice, :]
Fs = sevenpoint(pts1_choice, pts2_choice, M)
for F in Fs:
choices.append(choice)
res = calc_epi_error(pts1_homo,pts2_homo, F)
F_res.append(F)
ress.append(np.mean(res))
min_idx = np.argmin(np.abs(np.array(ress)))
F = F_res[min_idx]
F /= F[2,2]
print("Error:", ress[min_idx])
# print(F)
assert(F.shape == (3, 3))
assert(F[2, 2] == 1)
assert(np.linalg.matrix_rank(F) == 2)
assert(np.mean(calc_epi_error(pts1_homogenous, pts2_homogenous, F)) < 1)
print(F)
np.savez('q2_2.npz', F, M, pts1, pts2)
| Haejoon-lee/3D-Reconstruction | code/q2_2_sevenpoint.py | q2_2_sevenpoint.py | py | 4,928 | python | en | code | 0 | github-code | 13 |
1931344904 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 19 22:23:27 2019
@author: ASPIRE E 14
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv(r"C:\Users\ASPIRE E 14\Music\DTS FGA 2019 - Unmul\projek akhir\ProjekAkhir\diabetes.csv")
data.head()
data.dtypes #Tipe Data yakni semuanya integer atau data numerik
#Langkah awal adalah melakukan analisis statistika deskriptif dengan melihat Max,Min,Mean,Standar deviasi
data.describe()
#Kemudian mengehitung banyaknya Status atau kelompok layak dan tidak layak pada data
data['Diabetic'].value_counts() #Status '0'=Tidak dan '1'=Diabet
#bagi data menjadi 2 bagian yakni variabel terikat (y) dan variabel bebas (x)
y=data['Diabetic'].values
x=data[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values
#membuat histogram dari variabel bebas
plt.hist(x)
#dari histogram dapat diinterpretasikan bahwa data tidak mengikuti sebaran yang normal sehingga perlu dinormalisasi
#Proses Normalisasi data
from sklearn import preprocessing
x_norm=preprocessing.StandardScaler().fit(x).transform(x.astype(float))
plt.hist(x_norm)
#Setelah dinormalisasi data mengikuti sebaran yang normal
#Menentukan data training dan data testing dengan perbandingan 80:20
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x_norm,y,test_size=0.2)
print('Banyaknya Data Training:',x_train.shape,y_train.shape)
print('Banyaknya Data Testing:',x_test.shape,y_test.shape)
#Proses KNN dengan k=2
from sklearn.neighbors import KNeighborsClassifier
k=2
KNN=KNeighborsClassifier(n_neighbors=k).fit(x_train,y_train)
#Hasil Prediksi
y_predict=KNN.predict(x_test)
y_predict
#perbandingan data aktual dan data prediksi
print('Data Aktual: ',y_test)
print('Data Prediksi:',y_predict)
#menghitung nilai akurasi, semakin besar akurasi maka prediksi mendekati aktualnya
from sklearn import metrics
print('Akurasi:',metrics.accuracy_score(y_test,y_predict))
#Melakukan 10 kemungkinan nilai k
hasil=[]
for i in range(1,11):
knn=KNeighborsClassifier(n_neighbors=i).fit(x_train,y_train)
prediksi=knn.predict(x_test)
akurasi=metrics.accuracy_score(y_test,prediksi)
hasil.append(akurasi)
print(hasil)
plt.plot(hasil)
plt.xlabel('k')
plt.ylabel('Akurasi')
plt.xticks(np.arange(10),('1','2','3','4','5','6','7','8','9','10'))
plt.savefig('KNN.png')
plt.show() | fannyaqmarina/DTS_ProjekAkhir | KNN.py | KNN.py | py | 2,543 | python | id | code | 0 | github-code | 13 |
17068539804 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsDataDsbImageUploadRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._estimate_no = None
self._frame_no = None
self._image_format = None
self._image_name = None
self._image_path = None
self._image_properties = None
self._image_source = None
self._image_store_type = None
self._image_type = None
self._license_no = None
self._report_no = None
self._shoot_time = None
self._image_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def estimate_no(self):
return self._estimate_no
@estimate_no.setter
def estimate_no(self, value):
self._estimate_no = value
@property
def frame_no(self):
return self._frame_no
@frame_no.setter
def frame_no(self, value):
self._frame_no = value
@property
def image_format(self):
return self._image_format
@image_format.setter
def image_format(self, value):
self._image_format = value
@property
def image_name(self):
return self._image_name
@image_name.setter
def image_name(self, value):
self._image_name = value
@property
def image_path(self):
return self._image_path
@image_path.setter
def image_path(self, value):
self._image_path = value
@property
def image_properties(self):
return self._image_properties
@image_properties.setter
def image_properties(self, value):
self._image_properties = value
@property
def image_source(self):
return self._image_source
@image_source.setter
def image_source(self, value):
self._image_source = value
@property
def image_store_type(self):
return self._image_store_type
@image_store_type.setter
def image_store_type(self, value):
self._image_store_type = value
@property
def image_type(self):
return self._image_type
@image_type.setter
def image_type(self, value):
self._image_type = value
@property
def license_no(self):
return self._license_no
@license_no.setter
def license_no(self, value):
self._license_no = value
@property
def report_no(self):
return self._report_no
@report_no.setter
def report_no(self, value):
self._report_no = value
@property
def shoot_time(self):
return self._shoot_time
@shoot_time.setter
def shoot_time(self, value):
self._shoot_time = value
@property
def image_content(self):
return self._image_content
@image_content.setter
def image_content(self, value):
if not isinstance(value, FileItem):
return
self._image_content = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ins.data.dsb.image.upload'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.estimate_no:
if hasattr(self.estimate_no, 'to_alipay_dict'):
params['estimate_no'] = json.dumps(obj=self.estimate_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['estimate_no'] = self.estimate_no
if self.frame_no:
if hasattr(self.frame_no, 'to_alipay_dict'):
params['frame_no'] = json.dumps(obj=self.frame_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['frame_no'] = self.frame_no
if self.image_format:
if hasattr(self.image_format, 'to_alipay_dict'):
params['image_format'] = json.dumps(obj=self.image_format.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_format'] = self.image_format
if self.image_name:
if hasattr(self.image_name, 'to_alipay_dict'):
params['image_name'] = json.dumps(obj=self.image_name.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_name'] = self.image_name
if self.image_path:
if hasattr(self.image_path, 'to_alipay_dict'):
params['image_path'] = json.dumps(obj=self.image_path.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_path'] = self.image_path
if self.image_properties:
if hasattr(self.image_properties, 'to_alipay_dict'):
params['image_properties'] = json.dumps(obj=self.image_properties.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_properties'] = self.image_properties
if self.image_source:
if hasattr(self.image_source, 'to_alipay_dict'):
params['image_source'] = json.dumps(obj=self.image_source.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_source'] = self.image_source
if self.image_store_type:
if hasattr(self.image_store_type, 'to_alipay_dict'):
params['image_store_type'] = json.dumps(obj=self.image_store_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_store_type'] = self.image_store_type
if self.image_type:
if hasattr(self.image_type, 'to_alipay_dict'):
params['image_type'] = json.dumps(obj=self.image_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['image_type'] = self.image_type
if self.license_no:
if hasattr(self.license_no, 'to_alipay_dict'):
params['license_no'] = json.dumps(obj=self.license_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['license_no'] = self.license_no
if self.report_no:
if hasattr(self.report_no, 'to_alipay_dict'):
params['report_no'] = json.dumps(obj=self.report_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['report_no'] = self.report_no
if self.shoot_time:
if hasattr(self.shoot_time, 'to_alipay_dict'):
params['shoot_time'] = json.dumps(obj=self.shoot_time.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['shoot_time'] = self.shoot_time
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.image_content:
multipart_params['image_content'] = self.image_content
return multipart_params
| alipay/alipay-sdk-python-all | alipay/aop/api/request/AlipayInsDataDsbImageUploadRequest.py | AlipayInsDataDsbImageUploadRequest.py | py | 9,731 | python | en | code | 241 | github-code | 13 |
73839359699 | # -*- coding: utf-8 -*-
__author__ = "winking324@gmail.com"
import argparse
from helper import analyze
def analyze_dynamic_key(key):
version = key[:3]
analyze_handler = {
'003': analyze.analyze_key_v3,
'004': analyze.analyze_key_v4,
'005': analyze.analyze_key_v5,
'006': analyze.analyze_key_v6,
}
try:
if version in analyze_handler:
print('version: {}'.format(version))
analyze_handler[version](key)
else:
ret = analyze.analyze_key_v2(key)
if ret[0]:
return
ret = analyze.analyze_key_v1(key)
if ret[0]:
return
print('Error: analyze key failed')
except Exception as e:
print('Error: failed, error: {}'.format(repr(e)))
def main():
arg_parser = argparse.ArgumentParser(description='Analyze Agora Token')
arg_parser.add_argument('token', help='agora token')
args = arg_parser.parse_args()
analyze_dynamic_key(args.token)
if __name__ == '__main__':
main()
| imagora/agora-token-helper | analyzer.py | analyzer.py | py | 1,073 | python | en | code | 1 | github-code | 13 |
42542180753 | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth import get_user_model
User = get_user_model()
from django.conf import settings
from finder import distance
class OwnerAccount(models.Model):
# link the OwnerAccount to a User model instance
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def __str__(self):
return self.user.email
@classmethod
def create(cls, user):
owner = cls(user=user)
# do something with the book
return owner
class Business(models.Model):
owner = models.ForeignKey(OwnerAccount, on_delete=models.CASCADE)
businessName = models.CharField(max_length=128)
address = models.CharField(max_length=256, validators=[distance.validate_address])
description = models.CharField(max_length=1024)
workingTime = models.CharField(max_length=128)
offersUntil = models.TimeField()
tags = models.CharField(max_length=256)
#default image from https://www.vecteezy.com/free-vector/food-icon
picture = models.ImageField(upload_to='businesses', blank=True, default="businesses/default.svg")
lat = models.FloatField()
long = models.FloatField()
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.businessName)
#the get_coords function returns a tuple with latitude and longitude
coords = distance.get_coords(self.address)
self.lat = coords[0]
self.long = coords[1]
super(Business, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Businesses'
def __str__(self):
return self.businessName
class Offer(models.Model):
business = models.OneToOneField(Business, on_delete=models.CASCADE)
portionAmount = models.IntegerField()
def save(self, *args, **kwargs):
if self.portionAmount < 0:
self.portionAmount = 0
super(Offer, self).save(*args, **kwargs)
def __str__(self):
return self.business.businessName + " ( " + str(self.portionAmount) + " )"
class UserAccount(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# a foreign key so that it is possible to trace with which business the user made a reservation
reservation = models.ForeignKey(Offer, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.user.email | ASimeonovUoG/FreeFoodFinder | finder/models.py | models.py | py | 2,493 | python | en | code | 0 | github-code | 13 |
23006219652 | import sys
from collections import deque
n, m = map(int, sys.stdin.readline().split())
graph = []
for _ in range(n):
graph.append(list(sys.stdin.readline().strip()))
# 4개 방향 찾기
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
def bfs(start):
queue = deque()
queue.append(start)
while queue:
nodes = queue.popleft()
for node in nodes:
# 현재 위치 기준으로 네 방향 검사하기
temp = []
for i in range(4):
nx = node[0] + dx[i]
ny = node[1] + dy[i]
# 범위를 벗어났으면 스킵
if not (0 <= nx <= m - 1 and 0 <= ny <= n - 1):
continue
# 벽 혹은 첫칸으로 돌아온경우 스킵
if graph[ny][nx] == "0" or (nx == 0 and ny == 0):
continue
# 아직 가지 않은 '1' 칸이면, 현재 칸에서 +1 해주기
if graph[ny][nx] == "1":
graph[ny][nx] = int(graph[node[1]][node[0]]) + 1
temp.append((nx, ny))
queue.append(temp)
return graph[n - 1][m - 1]
print(bfs([(0, 0)]))
| Jeong-Junhwan/BOJ | 2178.py | 2178.py | py | 1,190 | python | ko | code | 0 | github-code | 13 |
12702828514 | """Write a program to reverse a linked list"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertAtEnd(self, data):
node = Node(data)
if self.head is None:
self.head = node
node.next = None
else:
iterator = self.head
while iterator.next:
iterator = iterator.next
iterator.next = node
node.next = None
def insertList(self, dataList):
for data in dataList:
self.insertAtEnd(data)
def display(self):
iterator = self.head
while iterator:
print(iterator.data, end=' --> ')
iterator = iterator.next
print(None)
def reverseLinkedList(self):
iterator = self.head
string = []
while iterator:
string.append(iterator.data)
iterator = iterator.next
self.head = None
self.insertList(string[::-1])
if __name__ == '__main__':
ll = LinkedList()
ll.insertList([1, 2, 3, 4, 5])
ll.display()
ll.reverseLinkedList()
ll.display()
| Mayur-Debu/Datastructures | Linked List/Intermediate/Exercise_3_differentStrtergy.py | Exercise_3_differentStrtergy.py | py | 1,218 | python | en | code | 0 | github-code | 13 |
44337637304 |
import pygame
import sys
pygame.init()
pygame.font.init()
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
vel = 4
size = (800, 600)
ventana = pygame.display.set_mode(size)
char = pygame.transform.scale(pygame.image.load(
"assets/char.jpg").convert_alpha(), (64, 64))
enemy = pygame.transform.scale(pygame.image.load(
"assets/enemy.png").convert_alpha(), (64, 64))
clock = pygame.time.Clock()
pygame.display.set_caption("Juego")
FONT = pygame.font.Font("assets/Fonts/Roboto-Regular.ttf",
30) # La fuente y su tamaño
class Entity:
def __init__(self, x, y, sprite, health):
self.x = x
self.y = y
self.sprite = sprite
self.health = health
self.speed = 4
self.atk_speed = 20
self.mask = pygame.mask.from_surface(self.sprite)
def dibujar(self, where):
where.blit(self.sprite, (self.x, self.y))
def collision(self, obj):
return Collide(obj, self)
class Player(Entity):
def __init__(self, x, y, sprite, health):
super().__init__(x, y, sprite, health)
self.maxHealth = health
def Collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
player = Player(400, 300, char, 100)
enemy_1 = Entity(600, 300, enemy, 100)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_a]:
enemy_1.x += enemy_1.speed
if keys[pygame.K_d]:
enemy_1.x -= enemy_1.speed
if keys[pygame.K_w]:
enemy_1.y += enemy_1.speed
if keys[pygame.K_s]:
enemy_1.y -= enemy_1.speed
ventana.fill(BLACK)
# ----ZONA DE DIBUJO----
player.dibujar(ventana)
enemy_1.dibujar(ventana)
# que escribir en el label y su color
vidas_label = FONT.render(
f"Vida:{player.health}/{player.maxHealth}", 1, (RED))
ventana.blit(vidas_label, (10, 10)) # Donde dibujar el label
# ---ZONA DE DIBUJO
# actualiza pantalla
pygame.display.flip()
clock.tick(30)
| SantiagoFantoni/python | index.py | index.py | py | 2,208 | python | en | code | 0 | github-code | 13 |
38251135602 | from openpyxl import Workbook
wb = Workbook() # 새 워크북 생성
ws = wb.active
ws.title = "Nadosheet"
# A1 셀에 1이라는 값을 입력
ws["A1"] = 1
ws["A2"] = 2
ws["A3"] = 3
ws["B1"] = 4
ws["B2"] = 5
ws["B3"] = 6
print(ws["A1"]) # A1 셀의 정보를 출력
print(ws["A1"].value) # A1셀의 '값'을 출력
print(ws["A10"].value) # 값이 없을 떈 'None' 을 출력
# row = 1, 2, 3, ...
# column = A(1), B(2), C(3), ... , 위 방식보다 입력하기 어렵지만 반복문을 수월하기엔 쉬움
print(ws.cell(column=1, row=1).value) # ws["A1"].value
print(ws.cell(column=2, row=1).value) # ws["B1"].value
c = ws.cell(column= 3, row= 1, value=10) # ws["c1"].value = 10
print(c.value) # ws["C1"].value
from random import *
# 반복문을 이용해 랜덤 숫자 채우기
index = 1
for x in range(1, 11) : # 10개 row
for y in range(1, 11) : # 10개 column
ws.cell(row=x, column=y, value=randint(0, 100)) # 0~100 사이의 숫자의 값을 A1~J10셀에 넣기
ws.cell(row=x+11, column=y, value=index) # index 값을 A12~J22셀에 넣기
index += 1
wb.save("sample.xlsx")
| johnpark144/Practical_Study | Python_RPA/1_Excel/3_cell.py | 3_cell.py | py | 1,162 | python | ko | code | 3 | github-code | 13 |
31515991803 | import re
from trac.config import IntOption, Option, BoolOption
from trac.core import *
from trac.wiki.api import IWikiChangeListener
from trac.wiki.model import WikiPage
from tracspamfilter.api import IFilterStrategy, N_
class IPRegexFilterStrategy(Component):
"""Spam filter for submitter's IP based on regular expressions
defined in BadIP page.
"""
implements(IFilterStrategy, IWikiChangeListener)
karma_points = IntOption('spam-filter', 'ipregex_karma', '20',
"""By how many points a match with a pattern on the BadIP page
impacts the overall karma of a submission.""", doc_domain="tracspamfilter")
badcontent_file = Option('spam-filter', 'ipbadcontent_file', '',
"""Local file to be loaded to get BadIP. Can be used in
addition to BadIP wiki page.""", doc_domain="tracspamfilter")
show_blacklisted = BoolOption('spam-filter', 'show_blacklisted_ip', 'true',
"""Show the matched bad IP patterns in rejection message.""", doc_domain="tracspamfilter")
def __init__(self):
self.patterns = []
page = WikiPage(self.env, 'BadIP')
if page.exists:
self._load_patterns(page)
if self.badcontent_file != '':
file = open(self.badcontent_file,"r")
if file == None:
self.log.warning('BadIP file cannot be opened')
else:
lines = file.read().splitlines()
pat = [re.compile(p.strip()) for p in lines if p.strip()]
self.log.debug('Loaded %s patterns from BadIP file', len(pat))
self.patterns += pat
# IFilterStrategy implementation
def is_external(self):
return False
def test(self, req, author, content, ip):
gotcha = []
points = 0
for pattern in self.patterns:
match = pattern.search(ip)
if match:
gotcha.append("'%s'" % pattern.pattern)
self.log.debug('Pattern %s found in submission',
pattern.pattern)
points -= abs(self.karma_points)
if points != 0:
if self.show_blacklisted:
matches = ", ".join(gotcha)
return points, N_('IP catched by these blacklisted patterns: %s'), matches
else:
return points, N_('IP catched by %s blacklisted patterns'), str(len(gotcha))
def train(self, req, author, content, ip, spam=True):
return 0
# IWikiChangeListener implementation
def wiki_page_changed(self, page, *args):
if page.name == 'BadIP':
self._load_patterns(page)
wiki_page_added = wiki_page_changed
wiki_page_version_deleted = wiki_page_changed
def wiki_page_deleted(self, page):
if page.name == 'BadIP':
self.patterns = []
# Internal methods
def _load_patterns(self, page):
if '{{{' in page.text and '}}}' in page.text:
lines = page.text.split('{{{', 1)[1].split('}}}', 1)[0].splitlines()
self.patterns = [re.compile(p.strip()) for p in lines if p.strip()]
self.log.debug('Loaded %s patterns from BadIP',
len(self.patterns))
else:
self.log.warning('BadIP page does not contain any patterns')
self.patterns = []
| adium/trac-spamfilter | tracspamfilter/filters/ip_regex.py | ip_regex.py | py | 3,349 | python | en | code | 1 | github-code | 13 |
2883809345 | import json
from os.path import exists
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import tqdm
from hyper_data_loader.HyperDataLoader import HyperDataLoader
from sklearn.model_selection import train_test_split
from models.deep_sets.data_loader import create_data_loader
from models.utils.train_test import train_model, simple_test_model
import torch
from algorithms import ISSC, WALUDI, WALUMI, LP, MMCA
from models.mlp.mlp import MlpModel
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
INPUT_SHAPE_PAVIA = 103
NUM_CLASSES_PAVIA = 10
INPUT_SHAPE_DRIVE = 25
NUM_CLASSES_DRIVE = 10
def filter_hafe_1(X, y):
# idx = np.argsort(y)
idx = np.where(y != 1)
idx_too_much = np.where(y == 1)
final=np.concatenate((idx[0],idx_too_much[0][int(len(idx_too_much[0])/4):int(len(idx_too_much[0])/2)]))
y = y[final]
X = X[final, :]
return X, y
def data_loaders(bands):
loader = HyperDataLoader()
data = loader.generate_vectors("PaviaU", (1, 1), shuffle=True, limit=10)
labeled_data = next(data)
X, y = labeled_data.image, labeled_data.lables
X, y = loader.filter_unlabeled(X, y)
# X, y = filter_hafe_1(X, y)
X = X.squeeze()
X = X.astype(int)
if bands is not None:
X = X[:, bands]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
y_train = np.eye(NUM_CLASSES_PAVIA)[y_train]
train_loader = create_data_loader(X_train, y_train, 256)
test_loader = create_data_loader(X_test, y_test, 256)
return train_loader,test_loader
def test_bands_mlp(bands):
train_loader, test_loader = data_loaders(bands)
mlp = MlpModel(len(bands), NUM_CLASSES_PAVIA)
train_model(mlp, train_loader, epochs=100, lr=0.000025, device=device)
return simple_test_model(mlp, test_loader, device=device)
def load_history(filepath: str) -> Dict[str, List]:
if not exists(filepath):
return defaultdict(list)
with open(filepath, 'r') as f:
d = json.load(f)
return d
def save_history(res: Dict[str, List], filepath: str):
with open(filepath, 'w') as f:
json.dump(res, f)
if __name__ == '__main__':
hdl = HyperDataLoader()
pavia = next(hdl.load_dataset_supervised("PaviaU", patch_shape=(1, 1)))
lables = pavia.lables
data = pavia.image.squeeze()
algorithms = {
'ISSC': ISSC,
'MMCA': MMCA,
'LP': LP,
'WALUMI': WALUMI,
'WALUDI': WALUDI
}
history_filename = 'acc_results.json'
algs_benchmarks = load_history(history_filename)
MIN_NUM_BANDS = 0 if len(algs_benchmarks['MMCA']) == 0 else len(algs_benchmarks['MMCA'])
MAX_NUM_BANDS = 103
for i in tqdm.trange(MIN_NUM_BANDS + 1, MAX_NUM_BANDS + 1, initial=MIN_NUM_BANDS, total=MAX_NUM_BANDS):
for algo_name, f in algorithms.items():
print(f'Using {algo_name} for current iteration')
model = f(i)
model.fit(data)
if algo_name == 'MMCA':
_, bands = model.predict(data, lables, eps=0.4)
else:
_, bands = model.predict(data)
acc = test_bands_mlp(bands)
algs_benchmarks[algo_name].append(acc)
save_history(algs_benchmarks, history_filename)
for algo_name, accs in algs_benchmarks.items():
plt.plot(range(MAX_NUM_BANDS), accs, label=algo_name)
plt.legend()
plt.savefig('benchmark-algorithms results.png')
plt.show()
# acc=test_bands_mlp(range(1,103))
# print(acc)
| YanivZimmer/HyperBenchmark | experiments/pavia_university/pavia_university.py | pavia_university.py | py | 3,595 | python | en | code | 0 | github-code | 13 |
23372167249 | import random
count = 0
def rock_paper_scissors():
global count
print('------>Welcome and may the force be with you<------')
Times_to_run = int(input("How many times do you want to play r->p->s(e.g 5): "))
Won = 0
Lost = 0
Tied = 0
while count < Times_to_run:
computer = random.choice(['r','p','s'])
user = input('r:rock p:paper s:scissors: ')
if whowon(user,computer) == 'tie':
print("you tied")
Tied += 1
if whowon(user,computer) and whowon(user,computer) != 'tie':
Won += 1
print("you won")
elif whowon(user,computer) == False:
Lost += 1
print("you lost")
count = count + 1
print(f"won => {Won} ,tied => {Tied} ,lost => {Lost}")
def whowon(u,c):
if (u == 'r' and c == 'r') or (u == 'p' and c == 'p') or u == 's' and c == 's':
return "tie"
if (u == 'r' and c == 's') or (u == 'p' and c == 'r') or u == 's' and c == 'p':
return True
return False
rock_paper_scissors()
# by olaoluwa
| olaoluwaayanbola/Rock-paper-scissors-python | rockpaperscissors/rockpaperscissors.py | rockpaperscissors.py | py | 1,089 | python | en | code | 0 | github-code | 13 |
33565561898 | ##This not mine, I found it somewhere on google code
import re
import urllib
import simplejson as json
import yaml
class UrlOpener(urllib.FancyURLopener):
version = "py-gtranslate/1.0"
class InvalidLanguage(Exception): pass
base_uri = "http://ajax.googleapis.com/ajax/services/language/translate"
default_params = {'v': '1.0'}
langs = yaml.load(file('langs.yml', 'r').read())
def translate(src, to, phrase):
src = langs.get(src, src)
to = langs.get(to, to)
if not src in langs.values() or not to in langs.values():
raise InvalidLanguage("%s=>%s is not a valid translation" % (src, to))
args = default_params.copy()
args.update({
'langpair': '%s%%7C%s' % (src, to),
'q': urllib.quote_plus(phrase),
})
argstring = '%s' % ('&'.join(['%s=%s' % (k,v) for (k,v) in args.iteritems()]))
resp = json.load(UrlOpener().open('%s?%s' % (base_uri, argstring)))
try:
return resp['responseData']['translatedText']
except:
# should probably warn about failed translation
return phrase
| sonicrules1234/sonicbot | translate.py | translate.py | py | 1,216 | python | en | code | 10 | github-code | 13 |
31741787525 | # encoding: utf-8
"""
@author: nanjixiong
@time:
@file: example06.py
@desc:
"""
import numpy as np
vector = np.array(['1', '2', '3'])
print(vector.dtype)
vector = vector.astype(float)
print(vector.dtype)
print(vector)
matrix = np.array([
[5, 10, 15],
[20, 25, 30],
[35, 40, 45],
])
print(matrix.sum(axis=1))
| lixixi89055465/py_stu | tangyudi/base/numpy/example06.py | example06.py | py | 325 | python | en | code | 1 | github-code | 13 |
14729428785 | # Uses python3
import sys
#capacity = 50
#weights = [20, 50, 30]
#values = [60, 100, 120]
def get_optimal_value(capacity, weights, values):
value = 0.0
densityList = [float(x)/float(y) for x, y in zip(values, weights)]
#make a list of lists with weights, values and densities
totalList = []
for i in range(len(weights)):
totalList.append([weights[i], values[i], densityList[i]])
#sort totalList
totalList.sort(key=lambda x: x[2], reverse=True)
for i in range(len(totalList)):
if capacity == 0:
return value
amount = min(totalList[i][0], capacity)
value += float(amount)*float(totalList[i][2])
capacity -= amount
totalList[i][0] -= amount
return value
if __name__ == "__main__":
data = list(map(int, sys.stdin.read().split()))
n, capacity = data[0:2]
values = data[2:(2 * n + 2):2]
weights = data[3:(2 * n + 2):2]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
| price-dj/Algorithmic_Toolbox | Week3/02_greedy_algorithms_starter_files/fractional_knapsack/fractional_knapsack.py | fractional_knapsack.py | py | 1,036 | python | en | code | 0 | github-code | 13 |
8930038868 | from django.urls import path
from . import views
app_name = 'social_app'
urlpatterns = [
path('keeper/', views.KeeperPage.as_view(), name="keeper"),
path('thanks/', views.ThanksPage.as_view(), name="thanks"),
path('', views.HomePage.as_view(), name="home"),
]
| primarypartition/py-dev | social_project/social_app/urls.py | urls.py | py | 303 | python | en | code | 0 | github-code | 13 |
4320793811 | # ##############################################################################
# # Copyright (C) 2018, 2019, 2020 Dominic O'Kane
# ##############################################################################
import numpy as np
from ...utils.date import Date
from ...utils.calendar import CalendarTypes
from ...utils.calendar import BusDayAdjustTypes
from ...utils.calendar import DateGenRuleTypes
from ...utils.day_count import DayCountTypes
from ...utils.frequency import FrequencyTypes
from ...utils.global_vars import gDaysInYear
from ...utils.math import ONE_MILLION
from ...utils.global_types import FinExerciseTypes
from ...utils.global_types import SwapTypes
from ...utils.error import FinError
from ...utils.helpers import label_to_string, check_argument_types
from ...products.rates.ibor_swap import IborSwap
from ...models.bdt_tree import BDTTree
from ...models.bk_tree import BKTree
from ...models.hw_tree import HWTree
###############################################################################
class IborBermudanSwaption:
""" This is the class for the Bermudan-style swaption, an option to enter
into a swap (payer or receiver of the fixed coupon), that starts in the
future and with a fixed maturity, at a swap rate fixed today. This swaption
can be exercised on any of the fixed coupon payment dates after the first
exercise date. """
def __init__(self,
settlement_date: Date,
exercise_date: Date,
maturity_date: Date,
fixed_leg_type: SwapTypes,
exercise_type: FinExerciseTypes,
fixed_coupon: float,
fixed_frequency_type: FrequencyTypes,
fixed_day_count_type: DayCountTypes,
notional=ONE_MILLION,
float_frequency_type=FrequencyTypes.QUARTERLY,
float_day_count_type=DayCountTypes.THIRTY_E_360,
calendar_type=CalendarTypes.WEEKEND,
bus_day_adjust_type=BusDayAdjustTypes.FOLLOWING,
date_gen_rule_type=DateGenRuleTypes.BACKWARD):
""" Create a Bermudan swaption contract. This is an option to enter
into a payer or receiver swap at a fixed coupon on all of the fixed
# leg coupon dates until the exercise date inclusive. """
check_argument_types(self.__init__, locals())
if settlement_date > exercise_date:
raise FinError("Settlement date must be before expiry date")
if exercise_date > maturity_date:
raise FinError("Exercise date must be before swap maturity date")
if exercise_type == FinExerciseTypes.AMERICAN:
raise FinError("American optionality not supported.")
self._settlement_date = settlement_date
self._exercise_date = exercise_date
self._maturity_date = maturity_date
self._fixed_leg_type = fixed_leg_type
self._exercise_type = exercise_type
self._fixed_coupon = fixed_coupon
self._fixed_frequency_type = fixed_frequency_type
self._fixed_day_count_type = fixed_day_count_type
self._notional = notional
self._float_frequency_type = float_frequency_type
self._float_day_count_type = float_day_count_type
self._calendar_type = calendar_type
self._bus_day_adjust_type = bus_day_adjust_type
self._date_gen_rule_type = date_gen_rule_type
self._pv01 = None
self._fwdSwapRate = None
self._forwardDf = None
self._underlyingSwap = None
self._cpn_times = None
self._cpn_flows = None
###############################################################################
def value(self,
valuation_date,
discount_curve,
model):
""" Value the Bermudan swaption using the specified model and a
discount curve. The choices of model are the Hull-White model, the
Black-Karasinski model and the Black-Derman-Toy model. """
float_spread = 0.0
# The underlying is a swap in which we pay the fixed amount
self._underlyingSwap = IborSwap(self._exercise_date,
self._maturity_date,
self._fixed_leg_type,
self._fixed_coupon,
self._fixed_frequency_type,
self._fixed_day_count_type,
self._notional,
float_spread,
self._float_frequency_type,
self._float_day_count_type,
self._calendar_type,
self._bus_day_adjust_type,
self._date_gen_rule_type)
# I need to do this to generate the fixed leg flows
self._pv01 = self._underlyingSwap.pv01(valuation_date, discount_curve)
texp = (self._exercise_date - valuation_date) / gDaysInYear
tmat = (self._maturity_date - valuation_date) / gDaysInYear
#######################################################################
# For the tree models we need to generate a vector of the coupons
#######################################################################
cpn_times = [texp]
cpn_flows = [0.0]
# The first flow is the expiry date
num_flows = len(self._underlyingSwap._fixed_leg._payment_dates)
swap = self._underlyingSwap
for iFlow in range(0, num_flows):
flow_date = self._underlyingSwap._fixed_leg._payment_dates[iFlow]
if flow_date > self._exercise_date:
cpn_time = (flow_date - valuation_date) / gDaysInYear
cpn_flow = swap._fixed_leg._payments[iFlow-1] / self._notional
cpn_times.append(cpn_time)
cpn_flows.append(cpn_flow)
cpn_times = np.array(cpn_times)
cpn_flows = np.array(cpn_flows)
self._cpn_times = cpn_times
self._cpn_flows = cpn_flows
# Allow exercise on coupon dates but control this later for europeans
self._call_times = cpn_times
df_times = discount_curve._times
df_values = discount_curve._dfs
face_amount = 1.0
strike_price = 1.0 # Floating leg is assumed to price at par
#######################################################################
# For both models, the tree needs to extend out to maturity because of
# the multi-callable nature of the Bermudan Swaption
#######################################################################
if isinstance(model, BDTTree) or isinstance(model, BKTree) or isinstance(model, HWTree):
model.build_tree(tmat, df_times, df_values)
v = model.bermudan_swaption(texp,
tmat,
strike_price,
face_amount,
cpn_times,
cpn_flows,
self._exercise_type)
else:
raise FinError("Invalid model choice for Bermudan Swaption")
if self._fixed_leg_type == SwapTypes.RECEIVE:
v = self._notional * v['rec']
elif self._fixed_leg_type == SwapTypes.PAY:
v = self._notional * v['pay']
return v
###############################################################################
def print_swaption_value(self):
print("SWAP PV01:", self._pv01)
n = len(self._cpn_times)
for i in range(0, n):
print("CPN TIME: ", self._cpn_times[i], "FLOW", self._cpn_flows[i])
n = len(self._call_times)
for i in range(0, n):
print("CALL TIME: ", self._call_times[i])
###############################################################################
def __repr__(self):
s = label_to_string("OBJECT TYPE", type(self).__name__)
s += label_to_string("EXERCISE DATE", self._exercise_date)
s += label_to_string("MATURITY DATE", self._maturity_date)
s += label_to_string("SWAP FIXED LEG TYPE", self._fixed_leg_type)
s += label_to_string("EXERCISE TYPE", self._exercise_type)
s += label_to_string("FIXED COUPON", self._fixed_coupon)
s += label_to_string("FIXED FREQUENCY", self._fixed_frequency_type)
s += label_to_string("FIXED DAYCOUNT TYPE", self._fixed_day_count_type)
s += label_to_string("FLOAT FREQUENCY", self._float_frequency_type)
s += label_to_string("FLOAT DAYCOUNT TYPE", self._float_day_count_type)
s += label_to_string("NOTIONAL", self._notional)
return s
###############################################################################
def _print(self):
print(self)
###############################################################################
| domokane/FinancePy | financepy/products/rates/bermudan_swaption.py | bermudan_swaption.py | py | 9,390 | python | en | code | 1,701 | github-code | 13 |
42173007842 | def read_performance():
f = open('perfomance.txt', 'r')
perfomance = f.readlines()
f.close()
perfomance = [x.replace('\t', ' ').replace('\n', '') for x in perfomance]
perfomance = [float(x) for x in perfomance]
return perfomance
def read_population():
f = open('input-population.txt', 'r')
population_strings = f.readlines()
f.close()
population = []
for line in population_strings:
line = list(line.split(','))
line = [x.replace('\n', '') for x in line]
value_float = [float(x) for x in line]
population.append(value_float)
return population
def read_covar():
f = open('covar.txt', 'r')
covar_strings = f.readlines()
f.close()
covar_strings = [x.replace('\t', ' ').replace('\n', ' ') for x in covar_strings]
covar_floats = []
for line in covar_strings:
line = list(line.split())
value_float = [float(x) for x in line]
covar_floats.append(value_float)
return covar_floats | MarUser04/AlgGenetico | utils/read_files.py | read_files.py | py | 1,012 | python | en | code | 0 | github-code | 13 |
38251132912 | from openpyxl import Workbook
wb = Workbook() # 새 워크북 생성
wb.active
ws = wb.create_sheet() # 새로운 sheet 기본 이름으로 생성
ws2 = wb.create_sheet("Newsheet", 2) # 2번째 index에 Sheet 생성,(0,1,2,3....)
ws.title = "Mysheet" # sheet 이름 변경
ws.sheet_properties.tabColor = "117F87" # RGB 형태로 값을 넣어주면 탭 색상 변경
new_ws = wb["Newsheet"] # Dict 형태로 Sheet에 접근
print(wb.sheetnames) # 모든 Sheet 이름 확인
# Sheet 복사
new_ws["A1"] = "Test"
target = wb.copy_worksheet(new_ws)
target.title = "Copied Sheet"
wb.save("sample.xlsx")
| johnpark144/Practical_Study | Python_RPA/1_Excel/2_sheet.py | 2_sheet.py | py | 623 | python | ko | code | 3 | github-code | 13 |
25566074763 | from collections import deque
cups = deque([int(x) for x in input().split()])
bottles = deque([int(x) for x in input().split()])
wasted_water = 0
while cups and bottles:
current_cup = cups.popleft()
current_bottle = bottles.pop()
if current_cup <= current_bottle:
wasted_water += current_bottle - current_cup
else:
cups.appendleft(current_cup - current_bottle)
if cups:
print("Cups:", end=" ")
print(*cups, sep=" ")
if bottles:
print("Bottles:", end=" ")
print(*bottles, sep=" ")
print(f"Wasted litters of water: {wasted_water}") | mustanska/SoftUni | Python_Advanced/Lists as Stacks and Queues/cups_and_bottles.py | cups_and_bottles.py | py | 584 | python | en | code | 0 | github-code | 13 |
19902594497 | from matplotlib import pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.optimize import fsolve
from scipy.integrate import odeint
from fourlinkchain_rhs import fourlinkchain
class parameters:
def __init__(self):
self.m1 = 1; self.m2 = 1; self.m3 = 1; self.m4 = 1;
self.I1 = 0.1; self.I2 = 0.1; self.I3 = 0.1; self.I4 = 0.1;
# ### minitaur leg length ###
# self.l1 = 1; self.l2 = 2; self.l3 = 1; self.l4 = 2;
### atrias/digit leg ###
self.l1 = 1; self.l2 = 2; self.l3 = 2; self.l4 = 1;
self.lx = 0; self.ly = 0;
self.g = 9.81
self.pause = 0.02
self.fps = 20
def cos(angle):
return np.cos(angle)
def sin(angle):
return np.sin(angle);
def interpolation(t, z, params):
#interpolation
t_interp = np.arange(t[0], t[len(t)-1], 1/params.fps)
# [rows, cols] = np.shape(z)
[cols, rows] = np.shape(z)
z_interp = np.zeros((len(t_interp), rows))
for i in range(0, rows):
f = interpolate.interp1d(t, z[:,i])
z_interp[:,i] = f(t_interp)
return t_interp, z_interp
def animate(t_interp, z_interp, params):
lx, ly = params.lx, params.ly
l1, l2, l3, l4 = params.l1, params.l2, params.l3, params.l4
ll = 1.5*(l1+l2)+0.2
# #plot
for i in range(0,len(t_interp)):
theta1 = z_interp[i,0]
theta2 = z_interp[i,2]
theta3 = z_interp[i,4]
theta4 = z_interp[i,6]
O = np.array([0, 0])
P1 = np.array([l1*sin(theta1), -l1*cos(theta1)])
P2 = np.array([
(l2*sin(theta1 + theta2)) + l1*sin(theta1),
- (l2*cos(theta1 + theta2)) - l1*cos(theta1)
])
O2 = np.array([lx, ly])
P3 = np.array([
lx + (l3*sin(theta3)),
ly - (l3*cos(theta3))
])
P4 = np.array([
lx + (l4*sin(theta3 + theta4)) + l3*sin(theta3),
ly - (l4*cos(theta3 + theta4)) - l3*cos(theta3)
])
h1, = plt.plot([O[0], P1[0]],[O[1], P1[1]],linewidth=5, color='red')
h2, = plt.plot([P1[0], P2[0]],[P1[1], P2[1]],linewidth=5, color='green')
h3, = plt.plot([O2[0], P3[0]],[O2[1], P3[1]],linewidth=5, color='blue')
h4, = plt.plot([P3[0], P4[0]],[P3[1], P4[1]],linewidth=5, color='cyan')
plt.xlim([-ll, ll])
plt.ylim([-ll, ll])
plt.gca().set_aspect('equal')
plt.pause(params.pause)
if (i < len(t_interp)-1):
h1.remove()
h2.remove()
h3.remove()
h4.remove()
#plt.show()
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_result(t, z):
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(t,z[:,0],color='red',label=r'$ \theta_1 $');
plt.plot(t,z[:,2],color='green',label=r'$ \theta_2 $');
plt.plot(t,z[:,4],color='blue',label=r'$ \theta_3 $');
plt.plot(t,z[:,6],color='cyan',label=r'$ \theta_4 $');
plt.ylabel("angle")
plt.legend(loc="upper left")
plt.subplot(2, 1, 2)
plt.plot(t,z[:,1],color='red',label=r'$ w_1 $');
plt.plot(t,z[:,3],color='green',label=r'$ w_2 $');
plt.plot(t,z[:,5],color='blue',label=r'$ w_3 $');
plt.plot(t,z[:,7],color='cyan',label=r'$ w_4 $');
plt.xlabel("t")
plt.ylabel("angular rate")
plt.legend(loc="lower left")
plt.show()
def position_last_link_tip(z, params):
l1, l2, l3, l4 = params.l1, params.l2, params.l3, params.l4
lx, ly = params.lx, params.ly
q1, q2, q3, q4 = z
del_x = l2*sin(q1 + q2) - lx - l4*sin(q3 + q4) + l1*sin(q1) - l3*sin(q3)
del_y = l4*cos(q3 + q4) - l2*cos(q1 + q2) - ly - l1*cos(q1) + l3*cos(q3)
return del_x, del_y, 0, 0
def velocity_last_link_tip(z, params, q_star):
l1, l2, l3, l4 = params.l1, params.l2, params.l3, params.l4
q1, q2, q3, q4 = q_star
u1, u2, u3, u4 = z
del_vx = u1*(l2*cos(q1 + q2) + l1*cos(q1)) - u3*(l4*cos(q3 + q4) + l3*cos(q3)) + l2*u2*cos(q1 + q2) - l4*u4*cos(q3 + q4);
del_vy = u1*(l2*sin(q1 + q2) + l1*sin(q1)) - u3*(l4*sin(q3 + q4) + l3*sin(q3)) + l2*u2*sin(q1 + q2) - l4*u4*sin(q3 + q4);
return del_vx, del_vy, 0, 0
if __name__=="__main__":
params = parameters()
z = None
total_time = 5
t = np.linspace(0, total_time, 100*total_time)
### Solve q's such that end of final link is at lx,ly ###
q1, q2, q3, q4 = -np.pi/3, np.pi/2, np.pi/3, -np.pi/2
q0 = [q1, q2, q3, q4]
q_star = fsolve(position_last_link_tip, q0, params)
q1, q2, q3, q4 = q_star
print(f"q1: {q1}, q2: {q2}, q3: {q3}, q4: {q4}")
### Solve u's such that end of final link is linear velocity 0,0 ###
u1, u2, u3, u4 = 0, 0, 0, 0
u0 = [u1, u2, u3, u4]
fsolve_params = (params, q_star)
u_star = fsolve(velocity_last_link_tip, u0, fsolve_params)
u1, u2, u3, u4 = u_star
print(f"u1: {u1}, u2: {u2}, u3: {u3}, u4: {u4}")
### Use ode45 to do simulation ###
z0 = np.array([
q1, u1,
q2, u2,
q3, u3,
q4, u4
])
try:
z = odeint(
fourlinkchain, z0, t, args=(params,),
rtol=1e-9, atol=1e-9, mxstep=5000
)
except Exception as e:
print(e)
finally:
t_interp, z_interp = interpolation(t, z, params)
animate(t_interp, z_interp, params)
plot_result(t, z)
print("done")
| kimsooyoung/robotics_python | lec26_closed_chain_ctrl/dynamics_fourlink_chain/fourlinkchain_main.py | fourlinkchain_main.py | py | 5,475 | python | en | code | 18 | github-code | 13 |
2085115979 | __author__ = "Geoffrey Bachelot"
def fibonacci(amount: int):
out = [1]
while len(out) < amount:
out.append(out[-1] + out[-1])
return out
if __name__ == "__main__":
choice = int(input('Choose fibonacci length: '))
print(fibonacci(choice))
| jffz/python-exercises | practicepython.org/13_fibonacci.py | 13_fibonacci.py | py | 270 | python | en | code | 0 | github-code | 13 |
32649939693 | from belay import Device, list_devices
class Pico(Device):
@Device.setup
def setup1(argument=False):
from machine import Pin
led = Pin(25, Pin.OUT)
@Device.task
def led_toggle():
led.toggle()
if __name__ == "__main__":
port = list_devices()[-1]
with Pico(port) as pico:
pico.setup1(argument=True)
pico.led_toggle() | roaldarbol/BelayExperiments | experiments/devices/Pico copy.py | Pico copy.py | py | 383 | python | en | code | 0 | github-code | 13 |
74164961936 | # %%
import os
os.chdir('../ssl_neuron/')
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import json
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import networkx as nx
import scipy.signal as signal
from allensdk.core.cell_types_cache import CellTypesCache
from allensdk.api.queries.glif_api import GlifApi
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron
from allensdk.ephys.ephys_extractor import EphysSweepFeatureExtractor
from ssl_neuron.datasets import AllenDataset
# %%
config = json.load(open('./ssl_neuron/configs/config.json'))
config['data']['n_nodes'] = 1000
ctc = CellTypesCache(manifest_file='./ssl_neuron/data/cell_types/manifest.json')
dset = AllenDataset(config, mode='all')
cell_idx = 0
cell_id = dset.cell_ids[cell_idx]
# %%
data_set = ctc.get_ephys_data(cell_id)
sweep_info = []
for sweep_number in data_set.get_sweep_numbers():
md = data_set.get_sweep_metadata(sweep_number)
md['sweep_number'] = sweep_number
sweep_info.append(md)
sweep_info = pd.DataFrame(sweep_info)
stimulus_name = b'Noise 2'
noise1_sweep_numbers = sweep_info[sweep_info.aibs_stimulus_name == stimulus_name].sweep_number.tolist()
print(sweep_info[sweep_info.aibs_stimulus_name == stimulus_name].aibs_stimulus_amplitude_pa)
# %%
sampling_rate = None
sweep_spike_times = []
for sweep_number in noise1_sweep_numbers:
sweep_data = data_set.get_sweep(sweep_number)
index_range = sweep_data["index_range"]
i = sweep_data["stimulus"][0:index_range[1]+1].copy() # in A
v = sweep_data["response"][0:index_range[1]+1].copy() # in V
i *= 1e12 # to pA
v *= 1e3 # to mV
if sampling_rate is None:
sampling_rate = sweep_data["sampling_rate"] # in Hz
else:
assert sampling_rate == sweep_data["sampling_rate"]
t = np.arange(0, len(v)) * (1.0 / sampling_rate)
sweep_ext = EphysSweepFeatureExtractor(t=t, v=v, i=i) #, start=0, end=2.02)
sweep_ext.process_spikes()
spike_times = sweep_ext.spike_feature("threshold_t")
sweep_spike_times.append(spike_times)
# %%
glif_api = GlifApi()
nm = glif_api.get_neuronal_models(cell_id)
if len(nm) < 1:
# print(f'{cell_id}*, ', end='')
assert 0, "No neuron models found"
nm = nm[0]['neuronal_models']
model_id = None
for model in nm:
if '3' in model['name'][:2]: # get basic LIF neurons
model_id = model['id']
try:
var = model['neuronal_model_runs'][0]['explained_variance_ratio']
except:
var = None
break
if model_id is None:
# print(f'{cell_id}-, ', end='')
assert 0, "No neuron models found"
# %%
neuron_config = glif_api.get_neuron_configs([model_id])[model_id]
glif_neuron = GlifNeuron.from_dict(neuron_config)
glif_neuron.dt = (1.0 / sampling_rate)
stimulus = sweep_data["stimulus"][0:index_range[1]+1]
import time; print(time.time())
output = glif_neuron.run(stimulus)
print(time.time())
# spike_times = output['interpolated_spike_times']
grid_spike_indices = output['spike_time_steps']
# %%
t = np.arange(0, len(stimulus)) * glif_neuron.dt
glif_spikes = np.zeros(len(t))
glif_spikes[grid_spike_indices] = 1.
# %%
sweep_spikes = []
for spike_times in sweep_spike_times:
spike_idxs = np.round(spike_times / glif_neuron.dt).astype(int)
spikes = np.zeros(len(t))
if np.any(spike_idxs > len(t)):
assert 0, "spikes longer than stimulus"
spikes[spike_idxs] = 1.
sweep_spikes.append(spikes)
# %%
def explained_variance(psth1, psth2):
var1 = np.var(psth1)
var2 = np.var(psth2)
diffvar = np.var(psth1 - psth2)
return (var1 + var2 - diffvar) / (var1 + var2)
def explained_variance_ratio(sweep_spikes, glif_spikes, kern_sd_samp, kern_width_samp):
kernel = signal.gaussian(kern_width_samp, kern_sd_samp, sym=True)
kernel /= kernel.sum()
glif_psth = signal.convolve(glif_spikes, kernel, mode='same')
sweep_stpsth = []
for spikes in sweep_spikes:
stpsth = signal.convolve(spikes, kernel, mode='same')
sweep_stpsth.append(stpsth)
sweep_psth = np.stack(sweep_stpsth).mean(axis=0)
glif_var = 0
sweep_var = 0
for stpsth in sweep_stpsth:
glif_var += explained_variance(glif_psth, stpsth)
sweep_var += explained_variance(sweep_psth, stpsth)
return glif_var / sweep_var
# %%
print(f'Truth: {var:.6f}')
# for kern_sd_samp in [200, 600, 1000, 2000, 4000]:
# ev = explained_variance_ratio(sweep_spikes, glif_spikes, kern_sd_samp, kern_sd_samp * 6)
# print(f'{kern_sd_samp}: {ev:.6f}')
kern_sd_samp = 2000 # 10 ms, best match
ev = explained_variance_ratio(sweep_spikes, glif_spikes, kern_sd_samp, kern_sd_samp * 6)
print(f'{kern_sd_samp}: {ev:.6f}')
# import pdb; pdb.set_trace()
| felixp8/bmed7610-final-project | analysis/glif_scoring.py | glif_scoring.py | py | 4,843 | python | en | code | 0 | github-code | 13 |
6132342110 | import os
import hashlib
import asyncio
import logging
from fastapi import FastAPI
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from bahire_hasab import BahireHasab
from aiogram import Bot, Dispatcher, types
from aiogram.dispatcher import FSMContext
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types import (
InlineKeyboardButton,
InlineQuery,
InlineQueryResultArticle,
InputTextMessageContent,
InlineKeyboardMarkup,
Message,
)
from aiogram.utils.executor import start_webhook
from aiogram.contrib.middlewares.logging import LoggingMiddleware
TOKEN = os.environ.get("TOKEN")
WEBHOOK_HOST = "https://bahirehasab-bot.vercel.app"
WEBHOOK_PATH = "/webhook"
WEBHOOK_URL = f"{WEBHOOK_HOST}{WEBHOOK_PATH}"
app = FastAPI()
bot = Bot(token=TOKEN)
storage = MemoryStorage()
store = {}
dp = Dispatcher(bot, storage=storage)
dp.middleware.setup(LoggingMiddleware())
logging.basicConfig(level=logging.DEBUG)
class SenderReceiverStates(StatesGroup):
SENDER_NAME = State()
RECEIVER_NAME = State()
SEND_IMAGE = State()
@app.get("/")
def index():
return {"Message": "Post card service working"}
def draw_post_card(sender_name: str, reciever_name: str, template_name: str):
if template_name == "images/template-1.png":
color = (109, 46, 0)
elif template_name == "images/template-2.png":
color = (255, 227, 80)
img = Image.open(template_name)
bio = BytesIO()
bio.name = "drawn-template.png"
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("fonts/noto.ttf", size=25)
draw.text((201, 63), sender_name, color, font=font)
draw.text((490, 337), reciever_name, color, font=font)
img.save(bio, "PNG")
bio.seek(0)
return bio
@dp.message_handler(commands=["start", "help"])
async def start(msg: Message):
keyboards = [
[
InlineKeyboardButton("ЪЊЁ рІерІўрѕўріЉ рѕЏрІЇрїФ", callback_data="calc_other"),
InlineKeyboardButton("ЪњА ріЦрїѕрІЏ рѕІрѕЏрїЇріўрЅх", callback_data="help"),
],
[
InlineKeyboardButton("ЪЄфЪЄ╣ рІерІўріЋрІхрѕ« рѕЏрІЇрїФ", callback_data="this_year"),
InlineKeyboardButton("Рюе рІерѕїрѕІ рІЊрѕўрЅх рѕЏрІЇрїФ", callback_data="calc_other"),
],
[InlineKeyboardButton("ЪЦ│ рЇќрѕхрЅ░ ріФрѕГрІх рѕѕрѕўрѕІріе", callback_data="post_card")],
]
mark_up = InlineKeyboardMarkup(inline_keyboard=keyboards)
await bot.send_message(
chat_id=msg.chat.id,
text=f"""
Welcome {msg.from_user.full_name} to рЅБрѕЁрѕе рѕљрѕ│рЅЦ
This bot is made by Hundera Awoke ┬Е
Follow me on:-
github: @hunderaweke
linkedin @hunderaweke
telegram @hun_era
For more about the code of the bot visit:-
https://github.com/hunderaweke/bahirehasab-bot
Join my Telegram Ъњ╗ Channel:-
@cod_nghub
""",
)
await bot.send_message(
chat_id=msg.chat.id,
text="Ъќљ ріЦріЋрі│ріЋ рІѕрІ░ рЅБрѕЁрѕе рѕљрѕ│рЅЦ ЪЌЃ№ИЈ рѕўрЅђрѕўрѕфрІФ рЅарІ░рѕЁріЊ рѕўрїА",
reply_markup=mark_up,
)
@dp.callback_query_handler(text="this_year")
async def this_year(query: types.CallbackQuery):
year = 2015
await query.answer()
bh = BahireHasab(year=year)
await bot.send_message(chat_id=query.message.chat.id, text=f"{bh.erget}")
@dp.callback_query_handler(text="post_card")
async def post_card(query: types.CallbackQuery):
await query.answer()
template_1 = open("images/template-1.png", "rb")
template_2 = open("images/template-2.png", "rb")
keyboard = [
[InlineKeyboardButton("РўЮ рІГрѕЁріЋ Template Ъќ╝ рЅ░рїарЅђрѕЮ", callback_data="template_1")],
[InlineKeyboardButton("РўЮ рІГрѕЁріЋ Template Ъќ╝ рЅ░рїарЅђрѕЮ", callback_data="template_2")],
]
await bot.send_photo(
chat_id=query.message.chat.id,
photo=template_1,
reply_markup=InlineKeyboardMarkup(inline_keyboard=[keyboard[0]]),
)
await bot.send_photo(
chat_id=query.message.chat.id,
photo=template_2,
reply_markup=InlineKeyboardMarkup(inline_keyboard=[keyboard[1]]),
)
@dp.callback_query_handler(text="template_1")
@dp.callback_query_handler(text="template_2")
async def send_post_card(query: types.CallbackQuery, state: FSMContext):
await bot.delete_message(
chat_id=query.from_user.id, message_id=query.message.message_id
)
keyboard = [
[
InlineKeyboardButton("рѕхрѕЮ рѕѕрѕЏрѕхрїѕрЅБрЅх", callback_data="sender-name"),
],
]
selected_template = "images/template-1.png"
if query.data == "template_1":
selected_template = "images/template-1.png"
else:
selected_template = "images/template-2.png"
store["selected_template"] = selected_template
await bot.send_message(
chat_id=query.from_user.id,
text=f"ЪњЂ Send the sender's and receiver's name please or press /skip \n рІерѕџрѕЇріерІЇріЋ ріЦріЊ рІерЅ░рЅђрЅБрІГ рѕ░рІЇ рѕхрѕЮ рІФрѕхрїѕрЅА \n Default(рІерѕІріфрІЇ): {query.from_user.full_name}",
reply_markup=InlineKeyboardMarkup(inline_keyboard=keyboard),
)
await state.update_data(selected_template=selected_template)
await state.update_data(current_state="SENDER_NAME_STATE")
@dp.callback_query_handler(text="sender-name")
async def sender_name_handler(query: types.CallbackQuery):
await query.answer()
await bot.delete_message(
chat_id=query.from_user.id, message_id=query.message.message_id
)
await bot.send_message(
chat_id=query.from_user.id, text="Send the sender's name Ъцъ\n рІерѕџрѕѕріГрІЇ рѕ░рІЇ рѕхрѕЮрЇА "
)
await SenderReceiverStates.SENDER_NAME.set()
@dp.message_handler(state=SenderReceiverStates.SENDER_NAME)
async def get_sender_name(message: Message):
sender_name = message.from_user.full_name
sender_name = message.text
store["sender_name"] = sender_name
await SenderReceiverStates.RECEIVER_NAME.set()
await bot.send_message(
chat_id=message.chat.id, text="Send the receiver's name ЪјЂ \n рІерЅ░рЅђрЅБрІГ рѕ░рІЇ рѕхрѕЮрЇА "
)
@dp.message_handler(state=SenderReceiverStates.RECEIVER_NAME)
async def get_receiver_name(message: Message):
receiver_name = message.text
store["receiver_name"] = receiver_name
await SenderReceiverStates.SEND_IMAGE.set()
keyboard = [
[
InlineKeyboardButton("Send Image", callback_data="send-post-card"),
InlineKeyboardButton("Cancel", callback_data="cancel"),
]
]
await bot.send_message(
chat_id=message.from_user.id,
text=f"рІерѕџрѕІріГрѕѕрЅх рѕ░рІЇ рѕхрѕЮ:ЪЉЅ {receiver_name}ЪЊГ \n рІерѕџрѕЇріерІЇ рѕ░рІЇ рѕхрѕЮ:ЪЉЅ {store['sender_name']} Ъўј",
reply_markup=InlineKeyboardMarkup(inline_keyboard=keyboard),
)
@dp.callback_query_handler(text="cancel", state=SenderReceiverStates.SEND_IMAGE)
async def cancel(query: types.CallbackQuery, state: FSMContext):
keyboards = [
[
InlineKeyboardButton("ЪЊЁ рІерІўрѕўріЉ рѕЏрІЇрїФ", callback_data="calc_other"),
InlineKeyboardButton("ЪњА ріЦрїѕрІЏ рѕІрѕЏрїЇріўрЅх", callback_data="help"),
],
[
InlineKeyboardButton("ЪЄфЪЄ╣ рІерІўріЋрІхрѕ« рѕЏрІЇрїФ", callback_data="this_year"),
InlineKeyboardButton("Рюе рІерѕїрѕІ рІЊрѕўрЅх рѕЏрІЇрїФ", callback_data="calc_other"),
],
[InlineKeyboardButton("ЪЦ│ рЇќрѕхрЅ░ ріФрѕГрІх рѕѕрѕўрѕІріе", callback_data="post_card")],
]
await query.answer("Canceled Successfully!")
await bot.delete_message(
chat_id=query.from_user.id, message_id=query.message.message_id
)
await bot.send_message(
chat_id=query.from_user.id,
text="ЪўЄ рІ│рїЇрѕЮ рѕѕрѕўрѕъріерѕГ ЪЦ│ рЇА ",
reply_markup=InlineKeyboardMarkup(inline_keyboard=keyboards),
)
await state.finish()
@dp.callback_query_handler(text="send-post-card", state=SenderReceiverStates.SEND_IMAGE)
async def send_image(query: types.CallbackQuery, state: FSMContext):
receiver_name = store["receiver_name"]
sender_name = store["sender_name"]
selected_template = store["selected_template"]
img = draw_post_card(
sender_name=sender_name,
reciever_name=receiver_name,
template_name=selected_template,
)
await bot.send_chat_action(
chat_id=query.from_user.id, action=types.ChatActions.UPLOAD_PHOTO
)
await bot.send_photo(chat_id=query.from_user.id, photo=img)
await state.finish()
@dp.inline_handler()
async def this_year_inline(query: InlineQuery):
year = int(query.query) or 2015
bh = BahireHasab(year=year)
input_content = InputTextMessageContent(f"{bh.erget}")
result_id: str = hashlib.md5(str(year).encode()).hexdigest()
result_id2: str = hashlib.md5(str(year + 1).encode()).hexdigest()
items = [
InlineQueryResultArticle(
id=result_id, title="ріЦрѕГрїѕрЅх", input_message_content=input_content
),
InlineQueryResultArticle(
id=result_id2,
title="рЅхріЋрѕ│ріц",
input_message_content=InputTextMessageContent(f"{bh.tnsae}"),
),
]
await bot.answer_inline_query(query.id, results=items, cache_time=1)
# async def on_startup(dp: Dispatcher):
# await bot.set_webhook(WEBHOOK_URL)
# async def on_shutdown(dp: Dispatcher):
# logging.warning("Shutting down ....")
# await bot.delete_webhook()
# await dp.storage.close()
# await dp.storage.wait_closed()
# logging.warning("Good bye!")
asyncio.run(
dp.start_polling()
)
# asyncio.run(
# start_webhook(
# dispatcher=dp,
# webhook_path=WEBHOOK_PATH,
# on_startup=on_startup,
# on_shutdown=on_shutdown,
# skip_updates=True,
# )
# )
| hunderaweke/bahirehasab-bot | api/index.py | index.py | py | 9,866 | python | en | code | 2 | github-code | 13 |
13813257738 | import json
import logging
import time
import requests
VT_URL_report = 'https://www.virustotal.com/vtapi/v2/url/report'
VT_URL_scan = 'https://www.virustotal.com/vtapi/v2/url/scan'
VT_API_key = open('tools/VT_APIkey.txt').readlines()[0].strip()
def format_url(url):
# If we need the 'http://' format
new_url = 'http://www.' + url
return new_url
def scan_request(url):
params = {'apikey': VT_API_key, 'url': url}
response = requests.post(VT_URL_scan, params=params)
json_response = response.json()
return json_response
def report_request(url):
params = {'apikey': VT_API_key, 'resource': url}
response = requests.post(VT_URL_report, params=params)
json_response = response.json()
return json_response
def parsing_response(report_json):
# report_data = json.dumps(report_json)
result_data = {}
result_data['url'] = report_json['url']
result_data['VT_score'] = report_json['positives']
result_data['VT_scan'] = {}
for av in report_json['scans']:
if report_json['scans'][av]['detected'] == True:
result_data['VT_scan'].update({av: report_json['scans'][av]})
return result_data
def VT_API_call(url):
new_url = format_url(url)
ret = {}
try:
scan_json = scan_request(new_url)
# print('scan_request sucessful')
# print scan_json
except:
print('scan_request failed for ', new_url)
# Threading ? Or Queueing ?
time.sleep(10) # waiting for verification completion
try:
report_json = report_request(new_url)
# print('report_request sucessful')
# print report_json
except:
print('report_request failed for ', new_url)
try:
ret = parsing_response(report_json)
# print('Result from VirusTotal verification :')
# print(ret)
except:
print('parsing_response failed for ', new_url)
return ret
# VT_API_call('banovici.gov.ba')
| AlexisCAL/SRES_phishing | tools/vt.py | vt.py | py | 1,960 | python | en | code | 0 | github-code | 13 |
37127859044 | import math
from unicodedata import name
class Student:
def __init__(self, name, math, science, social, english):
self.name=name
self.math=math
self.sci=science
self.soc=social
self.eng=english
def __sub__(self,other):
math=self.math-other.math
sci=self.sci-other.sci
soc=self.soc-other.soc
eng=self.eng-other.eng
return Student('Default',math, sci,soc,eng)
def __str__(self):
return 'Name :{} Math={} Science={} Social={} English={}'.format(self.name, self.math, self.sci, self.soc, self.eng)
s1=Student("Nirajan",84,81,89,91)
s2=Student("Bimal",75,89,80,95)
s3=s1-s2
print(s3)
| NirajanJoshi5059/python | operator_overloading_task.py | operator_overloading_task.py | py | 691 | python | en | code | 0 | github-code | 13 |
7159626127 | # -*- coding:utf-8 -*-
# 本程序运行在工作目录projects/下
import os
import codecs
import re
from utils.mytools import yfillvars
prog_name,p_name,ch_name,chapter_total,media_sub_name,\
src_file_doc,books_path,books_media_name=yfillvars()
cwd = os.path.dirname(os.path.abspath(__file__))
p_path=os.path.join(cwd,p_name)
ch_path=os.path.join(p_path,ch_name)
prj_media_path = os.path.join(ch_path,media_sub_name)
if not os.path.exists(prj_media_path):
os.makedirs(prj_media_path)
print('Dir have been made: %s' % prj_media_path)
books_media_path = os.path.join(books_path, books_media_name)
if not os.path.exists(books_media_path):
os.makedirs(books_media_path)
print('books_media_path have been made: %s' % books_media_path)
#--------------------------
txt_file_name=ch_name+'.txt'
txt_file_path = os.path.join(ch_path,txt_file_name)
if not os.path.exists(txt_file_path):
print("%s not ready. exits now." %txt_file_name)
exit()
#f_txt_file=codecs.open(txt_file_path, 'r', encoding='utf-8')
html_file = ch_name+'.html'
html_file= os.path.join(ch_path,html_file)
if not os.path.exists(html_file):
f_dst = codecs.open(html_file, 'w', encoding='utf-8')
else:
print('%s存在, 其内容将被擦除.'% html_file)
f_dst = codecs.open(html_file, 'w', encoding='utf-8')
f_dst.truncate()
f_src=codecs.open(txt_file_path,'r',encoding='utf-8')
# 🌿枝叶 🍀四叶草 🍁枫叶 🍂落叶 🍃叶子在风中飘落
# 💄 🐾 👙💦 🍺 🍒 🍓 💡🍴⇗
# ○ • ● ○ ◦
# 🐶 😾
yiji_dot='>'
erji_dot='>>'
sanji_dot='>>>'
tu_lead='↗'
mylines = f_src.readlines() #分开每行, 创建一个list变量
tu_seq=1
for line in mylines:
line=line.strip()
line=re.sub(':', ':', line) #英文冒号':'--替换中文冒号':'
#三级标题的情况
if line and re.search(r'^###\d*.', line): # 如果不是空行,并且match
cd_line=re.sub(r'###\d*.', '', line,count=1) # 移除类似'###12.'这样的字符串. count=1 means only do once.
#print(cd_line)
pic_pos=re.search(r'图\d*:', cd_line) # 看line中是否有'图10:'这样的patt
if pic_pos:
# print(pic_pos,pic_pos[0],len(pic_pos))
#ynum=re.sub(r'[图:]','', pic_pos[0]) #清除掉"图"和":", 只保留数字
ynum='{0:03d}'.format(tu_seq) # 将剩下的数字变成001这样的式样
tu_seq=tu_seq+1
line = p_name+ch_name+'image'+ynum+'.png'
# line=project002ch01image001.png
dst_line= "图"+ynum+tu_lead+'<img src ="/media/%s">'%(line)
# the above line will match u3d_nginx.conf settings.
dst_line= '<div class="yimg">'+dst_line+'</div>'
else:
dst_line='<p class="sanji">'+sanji_dot+cd_line+ '</p>'
#二级标题的情况
elif re.search(r'^##\d*.', line):
dst_line = re.sub(r'##\d*.', '', line) # 清除掉'##12.'
dst_line = '<p class="erji">' +erji_dot + dst_line + '</p>'
#一级标题的情况
elif re.search(r'^#\d+.', line):
dst_line = re.sub(r'#\d+.', '', line) # 清除掉'#12.' # re.sub(r'\d+.png', new_text, text_for_post)
dst_line = '<p class="yiji">' +yiji_dot + dst_line + '</p>'
else:
dst_line=line
f_dst.write(dst_line+'\n')
f_src.close()
f_dst.close() | maxisbest/ue5web | toHTML/book3_make_html.py | book3_make_html.py | py | 3,436 | python | en | code | 0 | github-code | 13 |
17038618934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceTransportOilproductInfoQueryModel(object):
def __init__(self):
self._agent = None
self._ext_info = None
self._shop_id = None
@property
def agent(self):
return self._agent
@agent.setter
def agent(self, value):
self._agent = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.agent:
if hasattr(self.agent, 'to_alipay_dict'):
params['agent'] = self.agent.to_alipay_dict()
else:
params['agent'] = self.agent
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceTransportOilproductInfoQueryModel()
if 'agent' in d:
o.agent = d['agent']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayCommerceTransportOilproductInfoQueryModel.py | AlipayCommerceTransportOilproductInfoQueryModel.py | py | 1,799 | python | en | code | 241 | github-code | 13 |
25510020642 | from flask import Flask,render_template,request,redirect,url_for,abort,session
from flask_assets import Environment
from webassets.loaders import PythonLoader as PythonAssetsLoader
import os
import assets
app = Flask(__name__)
assets_env = Environment(app)
assets_loader = PythonAssetsLoader(assets)
for name,bundle in assets_loader.load_bundles().iteritems():
assets_env.register(name,bundle)
env = os.environ.get('EXAMPLE_ENV','prod')#will default to production env if no var exported
app.config.from_object('example.settings.%sConfig' %env.capitalize())
app.config['ENV'] = env
from models import *
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'asldkjaslduredj'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://example.db'
@app.route('/')
def home():
return render_template('index.html')
@app.route('/signup',methods=['POST'])
def signup():
user = User(request.form['username'], request.form['message'])
db.session.add(user)
db.session.commit()
#session['username'] = request.form['username']
#session['message'] = request.form['message']
#return redirect(url_for('message'))
return redirect(url_for('message'),username = user.username)
@app.route('/message<username>')
def message(username):
#if not 'username' in session:
# return abort(403)
user = User.query.filter_by(username=username).first_or_404()
#return render_template('message.html',username = session['username'],message = session['message'])
return render_template('message.html',username = user.username,message = user.message)
if __name__ == '__main__':
app.run()
| trtg/flask_assets_tutorial | example/__init__.py | __init__.py | py | 1,616 | python | en | code | 6 | github-code | 13 |
38252940782 | import threading
from karabo.bound import (
IMAGEDATA_ELEMENT, KARABO_CLASSINFO, NODE_ELEMENT, OUTPUT_CHANNEL,
DaqDataType, Encoding, Hash, ImageData, PythonDevice, Schema, Types)
@KARABO_CLASSINFO("ImageSource", "2.7")
class ImageSource(PythonDevice):
"""
Base class for image sources.
It provides two output channels - 'output' and 'daqOutput' - for sending
out images, and three functions - 'update_output_schema', 'write_channels'
and 'signal_eos'.
The function 'update_output_schema' will update the schema for the output
channels and make it fit for the DAQ.
The function 'write_channels' will write the input data to both the
output channels, taking care of reshaping them for the DAQ.
The function 'signal_eos' will send an end-of-stream signal to both the
output channels.
"""
def __init__(self, conf):
super().__init__(conf)
self.write_lock = threading.Lock()
@staticmethod
def expectedParameters(expected):
output_data = Schema()
(
NODE_ELEMENT(output_data).key("data")
.displayedName("Data")
.setDaqDataType(DaqDataType.TRAIN)
.commit(),
IMAGEDATA_ELEMENT(output_data).key("data.image")
.displayedName("Image")
# Set initial dummy values for DAQ
.setDimensions([0, 0])
.setType(Types.UINT16)
.setEncoding(Encoding.UNDEFINED)
.commit(),
OUTPUT_CHANNEL(expected).key("output")
.displayedName("Output")
.dataSchema(output_data)
.commit(),
# Second output channel for the DAQ
OUTPUT_CHANNEL(expected).key("daqOutput")
.displayedName("DAQ Output")
.dataSchema(output_data)
.commit(),
)
def update_output_schema(self, shape, encoding, k_type):
"""
Update the schema of 'output' and 'daqOutput' channels
:param shape: the shape of image, e.g. (height, width)
:param encoding: the encoding of the image. e.g. Encoding.GRAY
:param k_type: the data type, e.g. Types.UINT16
:return:
"""
schema_update = Schema()
def schema_update_helper(node_key, displayed_name):
data_schema = Schema()
(
NODE_ELEMENT(data_schema).key("data")
.displayedName("Data")
.setDaqDataType(DaqDataType.TRAIN)
.commit(),
IMAGEDATA_ELEMENT(data_schema).key("data.image")
.displayedName("Image")
.setDimensions(list(shape))
.setType(k_type)
.setEncoding(encoding)
.commit(),
OUTPUT_CHANNEL(schema_update).key(node_key)
.displayedName(displayed_name)
.dataSchema(data_schema)
.commit(),
)
schema_update_helper("output", "Output")
# NB DAQ wants shape in CImg order, eg (width, height)
shape = tuple(reversed(shape))
schema_update_helper("daqOutput", "DAQ Output")
self.appendSchema(schema_update)
def write_channels(self, data, binning=None, bpp=None, encoding=None,
roi_offsets=None, timestamp=None, header=None):
"""
Write an image to 'output' and 'daqOutput' channels
:param data: the image data as numpy.ndarray
:param binning: the image binning, e.g. (1, 1)
:param bpp: the bits-per-pixel, e.g. 12
:param encoding: the image encoding, e.g. Encoding.GRAY
:param roi_offsets: the ROI offset, e.g. (0, 0)
:param timestamp: the image timestamp - if none the current timestamp\
will be used
:param header: the image header
:return:
"""
def write_channel(node_key):
image_data = ImageData(data)
if binning:
image_data.setBinning(binning)
if bpp:
image_data.setBitsPerPixel(bpp)
if encoding:
image_data.setEncoding(encoding)
if roi_offsets:
image_data.setROIOffsets(roi_offsets)
if header:
image_data.setHeader(header)
self.writeChannel(node_key, Hash("data.image", image_data),
timestamp)
with self.write_lock:
write_channel('output')
# Reshape image for DAQ
# NB DAQ wants shape in CImg order, eg (width, height)
data = data.reshape(*reversed(data.shape))
write_channel('daqOutput')
def signal_eos(self):
"""
Send an end-of-stream signal to 'output' and 'daqOutput' channels
:return:
"""
self.signalEndOfStream("output")
self.signalEndOfStream("daqOutput")
| European-XFEL/imageSourcePy | src/imageSource/ImageSource.py | ImageSource.py | py | 4,929 | python | en | code | 0 | github-code | 13 |
16746285681 | # Complete the function below.
def maxLength(a, k):
# to keep track of longest phrase
result = [0]
# to accumulate character length
totalCharacters = 0
# to accumulate number of words in phrase
numberOfWords = 0
for index in range(len(a)):
# add the number of characters for each word
totalCharacters = totalCharacters + a[index]
if (totalCharacters) <= k:
# keep track of longest phrase possible
numberOfWords = numberOfWords + 1
# last element of result stores the maximum value of longest phrase possible
result.append(max(numberOfWords,result[-1]))
else:
# FIFO, if exceed k, remove from the first element added to total characters
totalCharacters -= a[index-numberOfWords]
# return maximum length of longest phrase
return result[-1]
| sanadhis/code-practice | twitter-university-2018/twitter-maxphrase.py | twitter-maxphrase.py | py | 930 | python | en | code | 0 | github-code | 13 |
31046008263 | from flask import Flask, request, render_template, redirect, flash, url_for, jsonify
import json
import os
import requests
import math
weather_key = str(os.environ["WEATHER_KEY"])
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route("/weather")
def main_page():
return render_template("weather.html")
@app.route("/weather", methods = ["POST"])
def get_weather():
if request.form["zip"] == "":
flash("Error: ZIP code is required!")
return render_template("weather.html")
zip_code = request.form["zip"]
r = requests.get(f"https://api.openweathermap.org/data/2.5/weather?zip={zip_code},us&appid="+weather_key)
y = r.json()
temp = ""
area_name = y["name"]
if request.form["temptype"] == "fahrenheit":
temp = (y["main"]["temp"] - 273.15) * 9/5 + 32.0
else:
temp = y["main"]["temp"] - 273.15
return render_template("searched_weather.html", output=round(temp), selected=request.form["temptype"], zip=zip_code, area=area_name) | alexgvoz/weather-app | app.py | app.py | py | 1,030 | python | en | code | 0 | github-code | 13 |
28374577329 | import argparse
from typing import Callable, Dict, List, Optional
import pandas as pd
from omegaconf import OmegaConf
from rl_utils.plotting.utils import MISSING_VALUE
from rl_utils.plotting.wb_query import fetch_data_from_cfg
def plot_table(
df: pd.DataFrame,
col_key: str,
row_key: str,
cell_key: str,
col_order: List[str],
row_order: List[str],
renames: Optional[Dict[str, str]] = None,
error_scaling=1.0,
n_decimals=2,
missing_fill_value=MISSING_VALUE,
error_fill_value=0.3444,
get_row_highlight: Optional[Callable[[str, pd.DataFrame], Optional[str]]] = None,
make_col_header: Optional[Callable[[int], str]] = None,
x_label: str = "",
y_label: str = "",
skip_toprule: bool = False,
include_err: bool = True,
write_to=None,
err_key: Optional[str] = None,
add_tabular: bool = True,
add_botrule: bool = False,
bold_row_names: bool = True,
show_row_labels: bool = True,
show_col_labels: bool = True,
compute_err_fn: Optional[Callable[[pd.Series], pd.Series]] = None,
value_scaling: float = 1.0,
midrule_formatting: str = "\\midrule\n",
botrule_formatting: str = "\\bottomrule",
custom_cell_format_fn: Optional[
Callable[
[
float,
float,
],
str,
]
] = None,
):
"""
:param df: The index of the data frame does not matter, only the row values and column names matter.
:param col_key: A string from the set of columns.
:param row_key: A string from the set of columns (but this is used to form the rows of the table).
:param renames: Only used for display name conversions. Does not affect functionality.
:param make_col_header: Returns the string at the top of the table like
"ccccc". Put "c|ccccc" to insert a vertical line in between the first
and other columns.
:param x_label: Renders another row of text on the top that spans all the columns.
:param y_label: Renders a side column with vertically rotated text that spawns all the rows.
:param err_key: If non-None, this will be used as the error and override any error calculation.
:param show_row_labels: If False, the row names are not diplayed, and no
column for the row name is displayed.
Example: the data fame might look like
```
democount type final_train_success
0 100 mirl train 0.9800
1 100 mirl train 0.9900
3 100 mirl eval 1.0000
4 100 mirl eval 1.0000
12 50 mirl train 0.9700
13 50 mirl train 1.0000
15 50 mirl eval 1.0000
16 50 mirl eval 0.7200
```
`col_key='type', row_key='demcount', cell_key='final_train_success'` plots
the # of demos as rows and the type as columns with the final_train_success
values as the cell values. Duplicate row and columns are automatically
grouped together.
"""
df[cell_key] = df[cell_key] * value_scaling
if make_col_header is None:
def make_col_header(n_cols):
return "c" * n_cols
if renames is None:
renames = {}
df = df.replace("missing", missing_fill_value)
df = df.replace("error", error_fill_value)
rows = {}
for row_k, row_df in df.groupby(row_key):
grouped = row_df.groupby(col_key)
df_avg_y = grouped[cell_key].mean()
df_std_y = grouped[cell_key].std() * error_scaling
sel_err = False
if err_key is not None:
err = grouped[err_key].mean()
if not err.hasnans:
df_std_y = err
sel_err = True
if not sel_err and compute_err_fn is not None:
df_std_y = compute_err_fn(grouped[cell_key])
rows[row_k] = (df_avg_y, df_std_y)
col_sep = " & "
row_sep = " \\\\\n"
all_s = []
def clean_text(s):
return s.replace("%", "\\%").replace("_", " ")
# Add the column title row.
row_str = []
if show_row_labels:
row_str.append("")
for col_k in col_order:
row_str.append("\\textbf{%s}" % clean_text(renames.get(col_k, col_k)))
all_s.append(col_sep.join(row_str))
for row_k in row_order:
if row_k == "hline":
all_s.append("\\hline")
continue
row_str = []
if show_row_labels:
if bold_row_names:
row_str.append("\\textbf{%s}" % clean_text(renames.get(row_k, row_k)))
else:
row_str.append(clean_text(renames.get(row_k, row_k)))
row_y, row_std = rows[row_k]
if get_row_highlight is not None:
sel_col = get_row_highlight(row_k, row_y)
else:
sel_col = None
for col_k in col_order:
if col_k not in row_y:
row_str.append("-")
else:
val = row_y.loc[col_k]
std = row_std.loc[col_k]
if val == missing_fill_value * value_scaling:
row_str.append("-")
elif val == error_fill_value:
row_str.append("E")
else:
if custom_cell_format_fn is None:
err = ""
if include_err:
err = f"$ \\pm$ %.{n_decimals}f " % std
err = f"{{\\scriptsize {err} }}"
txt = f" %.{n_decimals}f {err}" % val
if col_k == sel_col:
txt = "\\textbf{ " + txt + " }"
else:
txt = custom_cell_format_fn(val, err)
row_str.append(txt)
all_s.append(col_sep.join(row_str))
n_columns = len(col_order)
if show_row_labels:
n_columns += 1
col_header_s = make_col_header(n_columns)
if y_label != "":
col_header_s = "c" + col_header_s
start_of_line = " & "
toprule = ""
midrule = "\\cmidrule{2-%s}\n" % (n_columns + 1)
botrule = midrule
row_lines = [start_of_line + x for x in all_s[1:]]
row_lines[0] = (
"\\multirow{4}{1em}{\\rotatebox{90}{%s}}" % y_label
) + row_lines[0]
else:
row_lines = all_s[1:]
start_of_line = ""
toprule = "\\toprule\n"
midrule = midrule_formatting
botrule = botrule_formatting
if skip_toprule:
toprule = ""
if x_label != "":
toprule += ("& \\multicolumn{%i}{c}{%s}" % (n_columns, x_label)) + row_sep
ret_s = ""
if add_tabular:
ret_s += "\\begin{tabular}{%s}\n" % col_header_s
# Line above the table.
ret_s += toprule
if show_col_labels:
# Separate the column headers from the rest of the table by a line.
ret_s += start_of_line + all_s[0] + row_sep
ret_s += midrule
all_row_s = ""
for row_line in row_lines:
all_row_s += row_line
# Do not add the separator to the last element if we are not in tabular mode.
if "hline" not in row_line:
all_row_s += row_sep
else:
all_row_s += "\n"
ret_s += all_row_s
# Line below the table.
if add_tabular:
ret_s += botrule
ret_s += "\n\\end{tabular}\n"
if add_botrule:
ret_s += botrule
if write_to is not None:
with open(write_to, "w") as f:
f.write(ret_s)
print(f"Wrote result to {write_to}")
else:
print(ret_s)
return ret_s
def plot_from_file(plot_cfg_path, add_query_fields=None):
cfg = OmegaConf.load(plot_cfg_path)
df = fetch_data_from_cfg(plot_cfg_path, add_query_fields)
plot_table(df, cell_key=cfg.plot_key, **cfg.sub_plot_params)
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cfg", type=str, required=True)
args = parser.parse_args()
plot_from_file(args.cfg)
| ASzot/rl-utils | rl_utils/plotting/auto_table.py | auto_table.py | py | 8,089 | python | en | code | 3 | github-code | 13 |
44233386440 | from importlib import import_module
__version__ = "1.1"
#Set classes to be available directly from upper tfcomb, i.e. "from tfcomb import CombObj"
global_classes = ["tfcomb.objects.CombObj",
"tfcomb.objects.DiffCombObj",
"tfcomb.objects.DistObj"]
for c in global_classes:
module_name = ".".join(c.split(".")[:-1])
attribute_name = c.split(".")[-1]
module = import_module(module_name)
attribute = getattr(module, attribute_name)
globals()[attribute_name] = attribute
| loosolab/TF-COMB | tfcomb/__init__.py | __init__.py | py | 563 | python | en | code | 8 | github-code | 13 |
71212805137 | from tkinter import *
from pytube import*
from tkinter import ttk
from PIL import Image,ImageTk
import requests
import io
import os
class Youtube_app:
def __init__(self, root):
self.root = root
self.root.title("Youtube Dowanloader.Developed By Fahad")
self.root.geometry("500x420+300+50")
self.root.resizable(False,False)
self.root.config(bg='white')
title=Label(self.root,text=' Youtube Dowanloader.Developed By Fahad',font=("times new roman",15),bg="#262626",fg="white",anchor="w").pack(side=TOP,fill=X)
self.var_url=StringVar()
lbl_url=Label(self.root,text='Video url',font=("times new roman",15,'bold'),bg="white").place(x=10,y=50)
entry = Entry(self.root,font=("times new roman", 13),textvariable=self.var_url, bg="lightyellow").place(x=120, y=50,width=350)
file_type = Label(self.root, text='File Type', font=("times new roman", 15, 'bold'), bg="white").place(x=10, y=90)
self.var_fillType=StringVar()
self.var_fillType.set('Video')
video_radio=Radiobutton(self.root, text='Video',variable=self.var_fillType,value='Video', font=("times new roman", 13), bg="white",activebackground="white").place(x=120, y=90)
audio_radio = Radiobutton(self.root, text='Audio',variable=self.var_fillType,value='Audio', font=("times new roman", 13), bg="white",activebackground="white").place(x=220, y=90)
btn_search=Button(self.root,text="Search",command=self.search,font=('times new roman',15),bg='blue',fg='white').place(x=300,y=90,height=30,width=120)
frame1=Frame(self.root,bd=2,relief=RIDGE,bg='lightyellow')
frame1.place(x=10,y=130,width=480,height=180)
self.video_title = Label(frame1,text='Video Title Here', font=("times new roman", 12),bg="lightgray", fg="white", anchor="w")
self.video_title.place(x=0,y=0,relwidth=1)
self.video_image = Label(frame1, text='Video \nImage', font=("times new roman", 15), bg="lightgray",bd=2,relief=RIDGE)
self.video_image.place(x=5,y=30, width=180,height=140)
lbl_desc = Label(frame1, text='Description', font=("times new roman", 15), bg="lightyellow").place(x=190,y=30)
self.video_desc =Text(frame1,font=("times new roman", 12), bg="lightyellow")
self.video_desc.place(x=190,y=60, width=280,height=110)
self.lbl_size = Label(self.root, text='Total Size:', font=("times new roman", 15), bg="white")
self.lbl_size.place(x=10, y=320)
self.lbl_percentage = Label(self.root, text='Dowanloading:', font=("times new roman", 15), bg="white")
self.lbl_percentage.place(x=160, y=320)
btn_clear= Button(self.root, text="Clear",command=self.clear,font=('times new roman', 13), bg='blue', fg='white').place(x=350,y=320,height=25,width=70)
self.btn_dowanload = Button(self.root, text="Download",state=DISABLED,command=self.dowanload,font=('times new roman', 13), bg='green', fg='white')
self.btn_dowanload.place(x=410, y=320, height=25,width=90)
self.prog=ttk.Progressbar(self.root,orient=HORIZONTAL,length=590,mode='determinate')
self.prog.place(x=10,y=360,width=485,height=20)
self.lbl_message = Label(self.root, text='', font=("times new roman", 13), bg="white")
self.lbl_message.place(x=0, y=385,relwidth=1)
if os.path.exists('Audios')==FALSE:
os.mkdir('Audios')
if os.path.exists('Videos')==FALSE:
os.mkdir('Videos')
#====================================================================================================================================================================================
def search(self):
if self.var_url.get()=='':
self.lbl_message.config(text="Video URL is Required",fg='red')
else:
yt = YouTube(self.var_url.get())
#======convert image url to image======
response=requests.get(yt.thumbnail_url)
img_byte=io.BytesIO(response.content)
self.img=Image.open(img_byte)
self.img=self.img.resize((180,140),Image.ANTIALIAS)
self.img=ImageTk.PhotoImage(self.img)
self.video_image.config(image=self.img)
#=======fatch as the size as per type=====
if self.var_fillType.get()=='Video':
select_file=yt.streams.filter(progressive=TRUE).first()
if self.var_fillType.get()=='Audio':
select_file=yt.streams.filter(only_audio=TRUE).first()
self.size_inBytes=select_file.filesize
max_size=self.size_inBytes/1024000
self.mb=str(round(max_size,2))+"MB"
#====updating the frame elements=======
self.lbl_size.config(text='Total Size: '+self.mb)
self.video_title.config(text=yt.title)
self.video_desc.delete("1.0",END)
self.video_desc.insert(END,yt.description[:200])
self.btn_dowanload.config(state=NORMAL)
def progress_(self,streams,chunk,bytes_remanining):
percentage=(float(abs(bytes_remanining-self.size_inBytes)/self.size_inBytes))*float(100)
self.prog['value']=percentage
self.prog.update()
self.lbl_percentage.config(text=f'Dowanloading: {str(round(percentage,2))}%')
if round(percentage,2)==100:
self.lbl_message.config(text="Dowanload Complete",fg="green")
self.btn_dowanload.config(state=DISABLED)
def clear(self):
self.var_fillType.set("Video")
self.var_url.set('')
self.prog['value']=0
self.btn_dowanload.config(state=DISABLED)
self.lbl_message.config(text='')
self.video_title.config(text='Video Title Here')
self.video_image.config(image='')
self.video_desc.delete('1.0',END)
self.lbl_size.config(text="Total Size: MB")
self.lbl_percentage.config(text="Dowanloading:0%")
def dowanload(self):
yt = YouTube(self.var_url.get(),on_progress_callback=self.progress_)
# =======fatch as the size as per type=====
if self.var_fillType.get() == 'Video':
select_file = yt.streams.filter(progressive=TRUE).first()
select_file.download("Videos/")
if self.var_fillType.get() == 'Audio':
select_file = yt.streams.filter(only_audio=TRUE).first()
select_file.download("Audios/")
root = Tk()
obj = Youtube_app(root)
root.mainloop()
| Fahad2021/Youtube-Video-Dowanloader- | youtube.py | youtube.py | py | 6,442 | python | en | code | 0 | github-code | 13 |
37945618848 | # This file was automatically created by FeynRules 2.0.17
# Mathematica version: 8.0 for Mac OS X x86 (64-bit) (November 6, 2010)
# Date: Wed 10 Dec 2014 14:05:51
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
lam2 = Parameter(name = 'lam2',
nature = 'external',
type = 'real',
value = 0.1,
texname = '\\lambda _2',
lhablock = 'POTENTIALPARAM',
lhacode = [ 1 ])
lam3 = Parameter(name = 'lam3',
nature = 'external',
type = 'real',
value = 0.1,
texname = '\\lambda _3',
lhablock = 'POTENTIALPARAM',
lhacode = [ 2 ])
lam4 = Parameter(name = 'lam4',
nature = 'external',
type = 'real',
value = 0.1,
texname = '\\lambda _4',
lhablock = 'POTENTIALPARAM',
lhacode = [ 3 ])
lam5 = Parameter(name = 'lam5',
nature = 'external',
type = 'real',
value = 0.1,
texname = '\\lambda _5',
lhablock = 'POTENTIALPARAM',
lhacode = [ 4 ])
M1coeff = Parameter(name = 'M1coeff',
nature = 'external',
type = 'real',
value = 100,
texname = 'M_1',
lhablock = 'POTENTIALPARAM',
lhacode = [ 5 ])
M2coeff = Parameter(name = 'M2coeff',
nature = 'external',
type = 'real',
value = 100,
texname = 'M_2',
lhablock = 'POTENTIALPARAM',
lhacode = [ 6 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\text{aS}',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
tanth = Parameter(name = 'tanth',
nature = 'external',
type = 'real',
value = 0.1,
texname = 't_H',
lhablock = 'VEV',
lhacode = [ 1 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.0025499999999999997,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172.,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.0005110000000000001,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.0005110000000000001,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MM = Parameter(name = 'MM',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MM}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.0025499999999999997,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.42,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Mh = Parameter(name = 'Mh',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{Mh}',
lhablock = 'MASS',
lhacode = [ 25 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
Wh = Parameter(name = 'Wh',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{Wh}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 252 ])
WH3p = Parameter(name = 'WH3p',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH3p}',
lhablock = 'DECAY',
lhacode = [ 253 ])
WH3z = Parameter(name = 'WH3z',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH3z}',
lhablock = 'DECAY',
lhacode = [ 254 ])
WH5pp = Parameter(name = 'WH5pp',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH5pp}',
lhablock = 'DECAY',
lhacode = [ 255 ])
WH5p = Parameter(name = 'WH5p',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH5p}',
lhablock = 'DECAY',
lhacode = [ 256 ])
WH5z = Parameter(name = 'WH5z',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WH5z}',
lhablock = 'DECAY',
lhacode = [ 257 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\text{aEW}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
v = Parameter(name = 'v',
nature = 'internal',
type = 'real',
value = '1/(2**0.25*cmath.sqrt(Gf))',
texname = 'v')
sh = Parameter(name = 'sh',
nature = 'internal',
type = 'real',
value = 'tanth/cmath.sqrt(1 + tanth**2)',
texname = 's_H')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
ch = Parameter(name = 'ch',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sh**2)',
texname = 'c_H')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
vchi = Parameter(name = 'vchi',
nature = 'internal',
type = 'real',
value = 'sh/(2.*2**0.75*cmath.sqrt(Gf))',
texname = 'v_{\\chi }')
MH3 = Parameter(name = 'MH3',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(v**2*(lam5/2. + M1coeff/(4.*vchi)))',
texname = 'M_3')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
vphi = Parameter(name = 'vphi',
nature = 'internal',
type = 'real',
value = '(2*vchi*cmath.sqrt(2))/tanth',
texname = 'v_{\\phi }')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
Mat12sq = Parameter(name = 'Mat12sq',
nature = 'internal',
type = 'real',
value = '((-M1coeff + 4*(2*lam2 - lam5)*vchi)*vphi*cmath.sqrt(3))/2.',
texname = '\\text{Mat12sq}')
Mat22sq = Parameter(name = 'Mat22sq',
nature = 'internal',
type = 'real',
value = '-6*M2coeff*vchi + 8*(lam3 + 3*lam4)*vchi**2 + (M1coeff*vphi**2)/(4.*vchi)',
texname = '\\text{Mat22sq}')
MH5 = Parameter(name = 'MH5',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(12*M2coeff*vchi + 8*lam3*vchi**2 + (3*lam5*vphi**2)/2. + (M1coeff*vphi**2)/(4.*vchi))',
texname = 'M_5')
mu3sq = Parameter(name = 'mu3sq',
nature = 'internal',
type = 'real',
value = '6*M2coeff*vchi - 4*(lam3 + 3*lam4)*vchi**2 - (2*lam2 - lam5)*vphi**2 + (M1coeff*vphi**2)/(4.*vchi)',
texname = '\\text{mu3sq}')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vphi',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vphi',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vphi',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vphi',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vphi',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vphi',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vphi',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vphi',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vphi',
texname = '\\text{yup}')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
lam1 = Parameter(name = 'lam1',
nature = 'internal',
type = 'real',
value = '(Mh**2 + Mat12sq**2/(Mat22sq - Mh**2))/(8.*vphi**2)',
texname = '\\lambda _1')
Mat11sq = Parameter(name = 'Mat11sq',
nature = 'internal',
type = 'real',
value = '8*lam1*vphi**2',
texname = '\\text{Mat11sq}')
mu2sq = Parameter(name = 'mu2sq',
nature = 'internal',
type = 'real',
value = '(3*M1coeff*vchi)/2. - 3*(2*lam2 - lam5)*vchi**2 - 4*lam1*vphi**2',
texname = '\\text{mu2sq}')
MH = Parameter(name = 'MH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(Mat11sq + Mat22sq - Mh**2)',
texname = 'M_H')
sa = Parameter(name = 'sa',
nature = 'internal',
type = 'real',
value = 'cmath.sin(0.5*cmath.asin((2*Mat12sq)/(-Mh**2 + MH**2)))',
texname = 's_{\\alpha }')
ca = Parameter(name = 'ca',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sa**2)',
texname = 'c_{\\alpha }')
| rushioda/PIXELVALID_athena | athena/Generators/MadGraphModels/python/models/GM_UFO/parameters.py | parameters.py | py | 19,155 | python | en | code | 1 | github-code | 13 |
33756000446 | from flask import Flask, request, abort, make_response
from flask.json import jsonify
from flask_cors import CORS
from sqlalchemy import func
from models.__init__ import setup_db, Category, Question
from utils import format_categories, format_questions, \
format_categories_from_questions
QUESTIONS_PER_PAGE = 10
def create_app():
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PATCH,POST,DELETE,OPTIONS')
return response
@app.route('/categories')
def get_categories():
"""
@COMPLETED:
Create an endpoint to handle GET requests
for all available categories.
"""
categories = Category.query.all()
return jsonify({
"success": True,
"error": None,
"message": "Get categories successfully.",
"payload": {
"categories": format_categories(categories)
}
})
@app.route('/questions')
def get_questions():
"""
@COMPLETED:
Create an endpoint to handle GET requests for questions,
including pagination (every 10 questions).
This endpoint should return a list of questions,
number of total questions, current category, categories.
"""
page = request.args.get('page', 1, type=int)
limit = request.args.get('limit', QUESTIONS_PER_PAGE, type=int)
questions_query = Question.query.paginate(page, limit, False)
questions = format_questions(questions_query.items)
categories = format_categories_from_questions(questions)
current_category = categories[0] if categories else None
return jsonify({
"success": True,
"error": None,
"message": "Get questions successfully.",
"payload": {
"questions": questions,
"page": questions_query.page,
"limit": questions_query.per_page,
"total": questions_query.total,
"categories": categories,
"current_category": current_category
}
})
@app.route('/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
"""
@COMPLETED: Create an endpoint to DELETE question using a question ID.
"""
question = Question.query.get(question_id)
if not question:
abort(404)
question.delete()
return make_response(jsonify({
"success": True,
"error": None,
"message": "Delete question successfully.",
"payload": {
"question": question.format(),
}
}), 200)
@app.route('/questions', methods=['POST'])
def create_question():
"""
@COMPLETED: Create an endpoint to POST a new question, which will
require the question and answer text, category, and difficulty score.
"""
try:
question_body = request.get_json()
question = Question(
question=question_body.get('question'),
answer=question_body.get('answer'),
category=int(question_body.get('category')),
difficulty=int(question_body.get('difficulty'))
)
question.insert()
return make_response(jsonify({
"success": True,
"error": None,
"message": "Create question successfully.",
"payload": {
"question": question.format(),
}
}), 201)
except Exception as err:
print(err)
abort(422)
@app.route('/questions/search', methods=['POST'])
def search_questions():
"""
@COMPLETED: Create a POST endpoint to get questions based on a search
term. It should return any questions for whom the search term
is a substring of the question.
"""
search_term = request.get_json().get('search_term')
if search_term is None:
abort(422)
search = "%{}%".format(search_term)
questions = Question.query.filter(
Question.question.ilike(search)).all()
questions = format_questions(questions)
categories = format_categories_from_questions(questions)
current_category = categories[0] if categories else None
return make_response(jsonify({
"success": True,
"error": None,
"message": "Search question successfully.",
"payload": {
"questions": questions,
"total_questions": len(questions),
"current_category": current_category,
"categories": categories
}
}), 200)
@app.route('/categories/<int:category_id>/questions')
def get_questions_by_category(category_id):
"""
@COMPLETED: Create a GET endpoint to get questions based on category.
"""
questions = Question.query.filter_by(category=category_id).all()
return jsonify({
"success": True,
"error": None,
"message": "Get questions by category successfully.",
"payload": {
"questions": format_questions(questions),
"total_questions": len(questions),
"current_category": category_id
}
})
@app.route('/quizzes', methods=['POST'])
def play_trivia():
"""
@COMPLETED: Create a POST endpoint to get questions to play the quiz.
This endpoint should take category and previous question parameters
and return a random questions within the given category,
if provided, and that is not one of the previous questions.
"""
request_body = request.get_json()
quiz_category = request_body.get('quiz_category')
previous_questions = request_body.get('previous_questions')
filters = []
if quiz_category:
filters.append(Question.category == int(quiz_category))
if previous_questions:
filters.append(~Question.id.in_(previous_questions))
question = Question \
.query \
.filter(*filters) \
.order_by(func.random()) \
.first()
return make_response(jsonify({
"success": True,
"error": None,
"message": "Start trivia successfully.",
"payload": {
"question": question.format() if question else None,
}
}), 200)
@app.errorhandler(404)
def not_found(error):
"""
@COMPLETED: Create error handlers for all expected errors,
including 404 and 422.
"""
print(error)
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
print(error)
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
print(error)
return jsonify({
"success": False,
"error": 400,
"message": "bad request"
}), 400
return app
| ClaudiuBogdan/trivia_api | backend/flaskr/__init__.py | __init__.py | py | 7,723 | python | en | code | 0 | github-code | 13 |
73163204499 | #!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
VERSION = "Sim_gui v0.1beta"
class Sim_main_menu(Gtk.MenuBar):
def __init__(self, toplevel):
super(Sim_main_menu, self).__init__()
self.main_menu = {}
self.toplevel = toplevel
for key in ["File", "Edit", "Tools", "Help"]:
item = Gtk.MenuItem(key)
self.main_menu[key] = Gtk.Menu()
item.set_submenu(self.main_menu[key])
self.add(item)
self.add_items_to("File", (("Quit", lambda x: Gtk.main_quit()), ))
self.add_items_to("Help", (("About", self.on_about_activated), ))
def add_items_to(self, main_item, items):
for item, handler in items:
if item == None:
it = Gtk.SeparatorMenuItem()
else:
it = Gtk.ImageMenuItem(item)
it.connect("activate", handler)
self.main_menu[main_item].insert(it, 0)
def on_about_activated(self, menuitem):
#pxb = GdkPixbuf.Pixbuf.new_from_file("picide.png")
dlg = Gtk.AboutDialog(version = VERSION,program_name = "PixIDE",
license_type = Gtk.License.GPL_3_0)
dlg.set_transient_for(self.toplevel)
dlg.run()
dlg.destroy()
| iguerra94/msp430-simulator | main_menu.py | main_menu.py | py | 1,318 | python | en | code | 0 | github-code | 13 |
71311918738 | import numpy as np
OFFSET_DTYPE = np.int64
def rlencode(array, chunksize=None):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
array = np.array(array)
n = len(array)
if n == 0:
return (
np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=array.dtype),
)
if chunksize is None:
chunksize = n
starts, values = [], []
last_val = np.nan
for i in range(0, n, chunksize):
x = array[i: i + chunksize]
locs = where(x[1:] != x[:-1]) + 1
if x[0] != last_val:
locs = np.r_[0, locs]
starts.append(i + locs)
values.append(x[locs])
last_val = x[-1]
starts = np.concatenate(starts)
lengths = np.diff(np.r_[starts, n])
values = np.concatenate(values)
return starts, lengths, values
| ChouYunShuo/scHiC_server | src/api/hic/utils.py | utils.py | py | 1,182 | python | en | code | 0 | github-code | 13 |
42272567345 | import logging
def log(message, log='info'):
"""
Logs a message to the console and file, if wish.
"""
print(message)
if log == 'error':
logging.error(message)
elif log == 'debug':
logging.debug(message)
elif log == 'warning':
logging.warning(message)
else:
logging.info(message)
def print_dict(dictionary, logger=False):
"""
Prints a dictionary in a pretty way.
"""
for key, value in dictionary.items():
if logger:
log(f'{key}: {value}')
else:
print(f'{key}: {value}')
| mx-jeff/scrapper-boilerplate | scrapper_boilerplate/output/__init__.py | __init__.py | py | 612 | python | en | code | 0 | github-code | 13 |
11095954236 | """
Example script to show the classifier being used to highlight suspect areas.
"""
import os
import torch
from torchvision import transforms
import numpy as np
import lycon
from polygeist.CNN.model import PDNet
def _im_to_tensor(filename):
"""
Loads an image to a torch tensor
"""
# Load the file
im = lycon.load(filename)
if im is None:
raise IOError(f"Error loading {filename}")
# Make sure we have a three channel image
assert im.shape[2] == 3
# permute the channels from (y,x,c) to (c, y, x)
return torch.tensor(im).permute(2, 0, 1)
def _chunk_tensor(tensor, chunk_size=512):
"""
Generator to split the tensor into chunks.
"""
_, yy, xx = tensor.shape
# Tumble over the slice using a fixed window size
for x in np.arange(0, xx - chunk_size, chunk_size):
for y in np.arange(0, yy - chunk_size, chunk_size):
# Note, this can be achieved by using tensor.unfold.
# However, we are doing this to assure correctness.
yield tensor[:, y : y + chunk_size, x : x + chunk_size].unsqueeze(0)
def _im_to_chunked_tensor(filename, chunk_size=512):
"""
Creates a stacked tensor [Stack, Channels, Y, X],
by consuming the chunk iterator on _im_to_tensor.
"""
return torch.vstack(
list(_chunk_tensor(_im_to_tensor(filename), chunk_size=chunk_size))
)
def _im_to_chunked_iterator(filename, chunk_size=512, batches=100):
"""
Iterates over a stacked list of chunks, with an aim of n batches
"""
batches = _im_to_chunked_tensor(filename, chunk_size=chunk_size).split(batches)
for batch in batches:
yield batch
def label_image_with_confidence(
model_file: str,
file_to_stain: str,
output_path: str,
threshold=20,
pc=0.005,
chunk_size=512,
) -> None:
"""
Annotate the stained slide and save to disk.
@arg model_file: path to the model weight data checkpoint (str)
@arg file_to_stain: path to the original svs file (str)
@arg output_path: path where the image should be saved (str)
@arg threshold: value same as used in synthetic staining
@arg pc: value same as used in synthetic staining
@arg chunk_size: size of patch in pixels
"""
# We redefine our transforms based on the image already being a torch tensor
input_size = 299
downsample_for_tensor = transforms.Compose([transforms.Resize(input_size)])
# Create our model
model_ft = PDNet()
# Apply state
model_ft.apply_state(model_file)
# Send the model to GPU and set in eval mode
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_ft.to(device)
model_ft = model_ft.eval()
model_ft = model_ft.half()
# in evaluation mode
with torch.set_grad_enabled(False):
# list of results which we will pass to marking function
results = []
# Chunk through our image in chunks of 512px
for batch in _im_to_chunked_iterator(file_to_stain, chunk_size=512, batches=50):
# Apply downsample
downsampled = downsample_for_tensor(batch)
# Format as float
formatted = downsampled.type(torch.cuda.HalfTensor).to(device) / 255.0
# Append with results of the batch
results.append(model_ft(formatted))
# make a 1-d array of results
flat_results = np.hstack(np.vstack([x.tolist() for x in results]))
image = lycon.load(file_to_stain)
if image is None:
raise IOError(f"Error loading {file_to_stain}")
# Get the height and width of the slice
xx, yy, _ = image.shape
# Tumble over the slice using a fixed window size
c = 0
for x in np.arange(0, xx - chunk_size, chunk_size):
for y in np.arange(0, yy - chunk_size, chunk_size):
# Calculate the difference in our green channel
section = image[x : x + chunk_size, y : y + chunk_size, :]
diff = np.abs(
section[:, :, 0].astype(float) - section[:, :, 1].astype(float)
)
# Test to see if this is a sufficiently stained section
# - these params (threshold and pc) need to be
# the same as used in the synthetic staining routine
if np.sum(diff > threshold) / (chunk_size**2) > pc:
if flat_results[c] > 0.95:
section[:, 0:10, 0] = 255
section[0:10, :, 0] = 255
section[-10:-1, :, 0] = 255
section[:, -10:-1, 0] = 255
image[x : x + chunk_size, y : y + chunk_size, :] = section
# tick up our array counter
c += 1
lycon.save(f"{output_path}/{os.path.basename(file_to_stain)}", image)
| gmagoulas/skunkworks-parkinsons-detection | polygeist/example.py | example.py | py | 4,763 | python | en | code | null | github-code | 13 |
13589334153 | # Menyimpan huruf dan mod 26-nya
huruf_ke_angka = {
'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8,
'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16,
'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24,
'z': 25
}
angka_ke_huruf = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
# Implementasi fungsi enkripsi Vigenere cipher dengan plain teks P
# dan kunci K
def enkripsi(P='', K=''):
# Menghilangkan huruf spasi
P = P.lower()
K = K.lower()
# Memasangkan huruf kunci dan plain teksnya
# Jika panjang P lebih besar dari panjang K maka K akan diulang
# secara periodik.
# Untuk setiap pasagan plain teks dan kuncinya, kita lakukan enkripsi
# menggunakan rumus: Ek(P) = (P1+k1, P2+k2, .. , PN+km)
cipher_teks = []
if len(P) > len(K):
ik = 0
for huruf in P:
if huruf == ' ':
cipher_teks.append(' ')
continue
# Kita ulang secara periodik
kunci = ''
if ik < len(K):
kunci = K[ik]
else:
ik = 0
kunci = K[ik]
# Ci = Pi+Kj
hasil_cipher = ((huruf_ke_angka[huruf]
+ huruf_ke_angka[kunci]) % 26)
huruf_cipher = angka_ke_huruf[hasil_cipher]
cipher_teks.append(huruf_cipher)
ik += 1
else:
# Tidak perlu diulang secara periodik
ik = 0
for huruf in P:
kunci = K[ik]
# Pi+Kj
hasil_cipher = ((huruf_ke_angka[huruf]
+ huruf_ke_angka[kunci]) % 26)
huruf_cipher = angka_ke_huruf[hasil_cipher]
cipher_teks.append(huruf_cipher)
ik += 1
cipher_teks = ''.join(cipher_teks).upper()
return cipher_teks
# Implementasi fungsi dekripsi Vigenere cipher dengan cipher teks C
# dan kunci K
def dekripsi(C='', K=''):
# Transformasi cipher teks ke huruf kecil
C = C.lower()
K = K.lower()
# Memasangkan huruf kunci dan cipher teksnya
# Jika panjang C lebih besar dari panjang K maka K akan diulang
# secara periodik.
# Untuk setiap pasagan plain teks dan kuncinya, kita lakukan dekripsi
# menggunakan rumus: Dk(C) = (C1+k1, C2+k2, .. , CN+km)
plain_teks = []
if len(C) > len(K):
ik = 0
for huruf in C:
if huruf == ' ':
plain_teks.append(' ')
continue
# Kita ulang secara periodik
kunci = ''
if ik < len(K):
kunci = K[ik]
else:
ik = 0
kunci = K[ik]
# Pi = Ci-Kj
hasil_plain = ((huruf_ke_angka[huruf]
- huruf_ke_angka[kunci]) % 26)
huruf_plain = angka_ke_huruf[hasil_plain]
plain_teks.append(huruf_plain)
ik += 1
else:
# Tidak perlu diulang secara periodik
ik = 0
for huruf in C:
kunci = K[ik]
# Ci-Kj
hasil_plain = ((huruf_ke_angka[huruf]
- huruf_ke_angka[kunci]) % 26)
huruf_plain = angka_ke_huruf[hasil_plain]
plain_teks.append(huruf_plain)
ik += 1
plain_teks = ''.join(plain_teks)
return plain_teks
| pyk/vigenere-cipher | vigenere.py | vigenere.py | py | 3,494 | python | id | code | 0 | github-code | 13 |
39959861453 | lista = []
while True:
numero = int(input('Digite um valor: '))
if lista.count(numero) >= 1:
print('Numero repetido, não irei adicionar!')
else:
lista.append(numero)
print('Numero adicionado com sucesso!')
contiuar = str(input('Deseja adicionar mais números? [S/N] ')).strip().upper()[0]
while contiuar not in 'SN':
print('Comando inválido.. Tente novamente!')
contiuar = str(input('Deseja adicionar mais números? [S/N] ')).strip().upper()[0]
if contiuar == 'N':
break
lista.sort()
print(f'Você adicionou os números {lista}')
| the-oliveira/python-guanabara | Curso - Guanabara/Exercicios/ex079-Listas(parte1).py | ex079-Listas(parte1).py | py | 605 | python | pt | code | 0 | github-code | 13 |
38603896825 | class NodoArbol:
def __init__(self, value, left=None, right=None):
self.data = value
self.left = left
self.right = right
def recorrido_prefijo(self):
print(self.data)
aux = self.left
for nodo in range (5):
if aux != None:
print(aux.data)
aux = aux.left
aux = self.right
for nodo in range (5):
if aux != None:
print(aux.data)
aux = aux.right
def recorrido_sufijo(self):
aux = self.left
i = 1
for nodo in range(5):
if aux != None:
print(aux.data)
aux = aux.left
i+=1
arbol = NodoArbol("R", NodoArbol("C"), NodoArbol("H"))
#print(arbol.left.data)
#print(arbol.right.data)
#print(arbol.data)
arbol2 = NodoArbol(4, NodoArbol(3, NodoArbol(2, NodoArbol(2, None, None)), None), NodoArbol(5,None,None))
arbol.recorrido_prefijo()
print("--------------------")
arbol2.recorrido_prefijo()
print("--------------------")
arbol2.recorrido_sufijo()
| Alejandro-Duran/Edd_2020_clases | Arboles_19Enero/prueba_arbol.py | prueba_arbol.py | py | 1,126 | python | es | code | 0 | github-code | 13 |
73306452499 | # else clause will be executed when the condition fail
def is_comment(item):
# "and" is a short-circuit logical operator
return isinstance(item, str) and item.startswith('#')
def execute(program):
""" Execute a stack program
Args:
program: Any stack-like containing where each item in the stack is callable operators or non-callable operands.
The top most items on the stack may be strings beginning with '#' for the purposes of documentation.
Stack-like means support for:
item = stack.pop() # Remove and return the top item
stack.append(item) # Push an item to the top
if stack: # False in a boolean context when empty
"""
# Find the start of the 'program' by skipping any item which is a comment
while program: # iterables are evaluated to false when they have no elements
item = program.pop()
if not is_comment(item):
program.append(item)
break # if a break statement is used inside the while loop, else is not executed
else: # nobreak
print("Empty program!")
return # this statement will exit the program (not the loop). At this point, the loop does not exist anymore
# Evaluate the program
pending = []
while program:
item = program.pop()
if callable(item):
try:
result = item(*pending)
except Exception as e:
print("Error: ", e)
break # if a break statement is used inside the while loop, else is not executed
program.append(result)
pending.clear()
else:
pending.append(item)
else: # nobreak
print("Program successful.")
print("Result: ", pending)
print("Finished")
if __name__ == '__main__':
import operator
program = list(reversed((
"# A short stack program to add",
"# and multiply some constants",
5,
2,
operator.add,
3,
operator.mul
)))
execute(program) | diego-guisosi/python-norsk | 03-advanced/01-flow-control/01-while-else.py | 01-while-else.py | py | 2,103 | python | en | code | 0 | github-code | 13 |
1353002890 | import requests
from lxml import etree
URL='https://www.qiushibaike.com/8hr/page/{}/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
class QiushiSpider(object):
def __init__(self):
self.url = URL
self.headers = headers
def get_urllists(self):
url_lists = []
for i in range(13):
url_lists.append(self.url.format(i+1))
return url_lists
def parse_url(self,url):
response = requests.get(url=url,headers=self.headers)
temp_html = etree.HTML(response.content.decode())
html_elements_lists = temp_html.xpath(r"//div[contains(@class,'article')]")
"""
父元素 ://div[contains(@class,'article')]
"""
if __name__ == '__main__':
r = QiushiSpider()
print(r.get_urllists()) | avalonFate/python- | 爬虫练习/day4/爬取糗事百科.py | 爬取糗事百科.py | py | 886 | python | en | code | 0 | github-code | 13 |
10687647296 | import os
from datetime import datetime
import time
import board
import busio
import adafruit_bme280
import requests
from setproctitle import setproctitle
# Create library object using our Bus I2C port
setproctitle("bme280")
i2c = busio.I2C(board.SCL, board.SDA)
bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c, address=0x76)
# OR create library object using our Bus SPI port
# spi = busio.SPI(board.SCK, board.MOSI, board.<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ISO)
# bme_cs = digitalio.DigitalInOut(board.D10)
# bme280 = adafruit_bme280.Adafruit_BME280_SPI(spi, bme_cs)
# change this to match the location's pressure (hPa) at sea level
bme280.sea_level_pressure = 1013.25
def get_bme280_values():
vals = f"\nTemperature: {bme280.temperature:0.1f} C"
vals += f"\nHumidity: {bme280.relative_humidity:0.1f} %"
vals += f"\nPressure: {bme280.pressure:0.1f} hPa"
vals += f"\nAltitude: {bme280.altitude:0.2f} meters"
return vals
def push_bme280_values():
datas = {
"humidity": float(f"{bme280.relative_humidity:0.1f}"),
"temperature": float(f"{bme280.temperature:0.1f}"),
"pressure": float(f"{bme280.pressure:0.1f}"),
"altitude": float(f"{bme280.altitude:0.2f}"),
}
SERVER_IP = os.environ.get("SERVER_IP")
APIKEY = f"{os.environ.get('API_KEY_NAME')}={os.environ.get('API_KEY')}"
req = f"https://{SERVER_IP}/bme280?{APIKEY}"
ret = requests.post(req, json=datas)
print(ret.json())
while True:
push_bme280_values()
time.sleep(3600)
| llPekoll/aquaPoney | raspi/x_sensor_bme280.py | x_sensor_bme280.py | py | 1,517 | python | en | code | 0 | github-code | 13 |
3499227543 | #!/usr/bin/python3
"""Python I/O"""
def read_lines(filename="", nb_lines=0):
"""reads n lines of a text file"""
co = 0
with open(filename) as fi:
li = fi.readlines()
for co in range(len(li)):
if co == nb_lines and nb_lines != 0:
break
print(li[co], end="")
| Immaannn2222/holbertonschool-higher_level_programming | 0x0B-python-input_output/2-read_lines.py | 2-read_lines.py | py | 327 | python | en | code | 0 | github-code | 13 |
37064000943 | # Это программа помогает учить английский
import functions
score = 0 # счётчик баллов
name = input("Введите имя пользователя\n")
with open("words.txt", "r") as file:
for word in file:
cipher = functions.shuffle_letters(word)
print(f"Угадай слово: {cipher}")
answer = input()
if answer.lower() == word.replace("\n", ""):
print("Верно! Вы получаете 10 баллов\n")
score += 10
else:
print(f"Неверно! Верный ответ - {word}")
functions.write_to_the_top(name, score)
functions.print_statistic()
| BlackWizlock/WorkingFlow | HW5/main.py | main.py | py | 694 | python | ru | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.