seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
245936115 | #!/usr/bin/python
import threading
import numpy as np
import cv2
import rospy
from viman_utility.msg import CamZ
"""
Common class where calibrated CV image matrix is stored and accessed
"""
class Output:
# dummy image
img = np.zeros((640,480,3), np.uint8)
lock = threading.Condition()
def __init__(self):
pass
class IdColor(threading.Thread):
"""
Define color ranges for each color. Color sequence:
green-red-purple-blue-yellow
"""
colors = ['green','red','purple','blue','yellow']
lower_limits = [(41,45,0), (0,0,190), (144,0,0), (94,127,0), (26,0,86)]
upper_limits = [(64,255,255), (7,255,255), (180,255,255), (124,255,255), (44,255,255)]
def __init__(self, w=640, h=480, ch=3):
threading.Thread.__init__(self)
# create a publisher to send ID-ed color
self.color_pub = rospy.Publisher('/viman/color_id', CamZ, queue_size=100)
self.colorid = CamZ()
# default values
self.w = w # width of the image frame
self.h = h # height of the image frame
self.center_rad = 50 # radius of circle defining mid point of frame's sensitivity
self.window_name = 'SLAM Z Vision' # name of the output window
self.thresh_area = 65000.0 # minimum area of the color in frame
self.img = np.zeros((h, w, ch), np.uint8) # the actual image frame from camera
self.masks = np.zeros((h, w, len(self.colors)), np.uint8) # matrix array to store masks for each color
self.stop_process = False # flag variable to start/stop processing
def run(self):
kernal = np.ones((5,5), "uint8")
cv2.namedWindow(self.window_name)
while not self.stop_process:
Output.lock.acquire()
max_area = 0
max_contour = None
self.colorid.name = ''
self.colorid.area = 0
try:
Output.lock.wait(0.1)
self.img = Output.img
except:
print('some issue')
finally:
Output.lock.release()
# create masks for each color, dilating to eliminate noise
frame_hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
for count, _ in enumerate(self.colors):
self.masks[:,:, count] = cv2.inRange(frame_hsv,
self.lower_limits[count],
self.upper_limits[count])
self.masks[:,:, count] = cv2.dilate(self.masks[:,:,count],
kernal)
# find contours of each color
for count, col_name in enumerate(self.colors):
_, contours, _ = cv2.findContours(self.masks[:,:,count].copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# get contour of maximum area of a color
color_max = 0
area = 0
c = None
for _, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > color_max):
color_max = area
c = contour
# take the max area contour amongst the colors
if(color_max > self.thresh_area and color_max > max_area):
max_area = color_max
# store the max contour of color of max area
max_contour = c
# store IDs characteristics of max area color
self.colorid.name = col_name
self.colorid.area = area
# proceed if actually found a contour
if max_contour is not None:
# compute the center of the contour for max area color
M = cv2.moments(max_contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if(abs(cX-self.w/2)<=self.center_rad and abs(cY-self.h/2)<=self.center_rad):
# draw bounding rectangle and centeroid
x, y, w, h = cv2.boundingRect(max_contour)
self.img = cv2.rectangle(self.img, (x,y), (x+w, y+h),
(0,0,0), 2)
self.img = cv2.circle(self.img, (cX, cY), 5, (255, 255, 255), -1)
else:
self.colorid.name = ''
self.colorid.area = 0
self.color_pub.publish(self.colorid)
# draw frame center and sensitivity region
self.img = cv2.circle(self.img, (self.w/2, self.h/2), 5, (0, 0, 0), -1)
self.img = cv2.circle(self.img, (self.w/2, self.h/2), self.center_rad, (0, 0, 0), 1)
cv2.imshow(self.window_name, self.img)
cv2.waitKey(1)
cv2.destroyAllWindows()
print('Stopped IDing colors...')
if __name__ == '__main__':
print('Please run the node z_vision.py')
| ProjectViman/viman | viman_control/scripts/slam_Z/process_vision.py | process_vision.py | py | 4,123 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "threading.Condition",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
... |
31716326895 | """
游戏主程序
1. 封装 主游戏类
2. 创建 游戏对象
3. 启动游戏
"""
import sys
import pygame
import background
from plane_sprites import SCREEN_RECT, FRAME_PER_SEC
from enemy import *
from plane_sprites import *
from hero import *
class PlaneGame:
"""用于配置及初始化游戏内容"""
def __init__(self):
"""初始化"""
print("游戏初始化...")
pygame.init()
print("设置游戏窗口中...")
self.screen = pygame.display.set_mode(SCREEN_RECT.size)
print("创建游戏时钟中...")
self.clock = pygame.time.Clock()
print("调用私有方法,创建精灵和精灵组...")
self.__create_sprites()
print("设置定时器事件")
pygame.time.set_timer(CREATE_ENEMY_EVENT, 1000)
pygame.time.set_timer(HERO_FIRE_EVENT, 500)
def start_game(self):
"""开始游戏"""
print("游戏开始...")
while True:
# 设置刷新帧率
self.clock.tick(FRAME_PER_SEC)
# 事件监听
self.__event_handler()
# 碰撞检测
self.__check_collide()
# 更新/绘制精灵
self.__update_sprites()
# 更新显示
pygame.display.update()
@property.setter
def __screen(self, wide, height):
"""屏幕"""
@property.setter
def __clock(self, time):
"""游戏时钟"""
def __create_sprites(self):
"""创建精灵组"""
bg1 = background.Background()
bg2 = background.Background(True)
self.back_group = pygame.sprite.Group(bg1, bg2)
# 创建敌机的精灵组
self.enemy_group = pygame.sprite.Group()
# 创建英雄
self.hero = Hero()
self.hero_group = pygame.sprite.Group(self.hero)
def __event_handler(self):
"""事件监听"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
PlaneGame.__game_over() # 使用类名的方式调用静态方法
elif event.type == CREATE_ENEMY_EVENT:
print("敌机出场")
# 创建敌机精灵
enemy = Enemy()
# 敌机精灵添加到敌机组
self.enemy_group.add(enemy)
elif event.type == HERO_FIRE_EVENT:
self.hero.fire()
# elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
# self.hero.rect.x += 1 这种方法,只能检测按下
# 应该使用键盘模块,可以按下键盘一直不放
keys_pressed = pygame.key.get_pressed()
if keys_pressed[pygame.K_RIGHT]:
self.hero.speed = 2
elif keys_pressed[pygame.K_LEFT]:
self.hero.speed = -2
else:
self.hero.speed = 0
def __check_collide(self):
"""碰撞检测"""
# 子弹摧毁敌机
pygame.sprite.groupcollide(self.hero.bullets, self.enemy_group, True, True)
# 敌机撞毁英雄
enemies = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)
if len(enemies) != 0:
# 英雄牺牲
self.hero.kill()
# 结束游戏
self.__game_over()
def __update_sprites(self):
"""更新精灵组"""
self.back_group.update()
self.back_group.draw(self.__screen)
self.enemy_group.update()
self.enemy_group.draw(self.__screen)
self.hero_group.update()
self.hero_group.draw(self.__screen)
self.hero.bullets.update()
self.hero.bullets.draw(self.__screen)
@staticmethod
def __game_over():
"""游戏结束"""
print("游戏结束...")
pygame.quit()
sys.exit()
if __name__ == '__main__':
# 创建游戏对象
game = PlaneGame()
# 开始游戏
game.start_game()
| NekoSilverFox/EasyQQ | plane_main.py | plane_main.py | py | 3,988 | python | zh | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "plane_sprites... |
34707631023 | from pathlib import Path
from util.util import get_data_path
from .base_loader import BaseLoader
class JSONLoader(BaseLoader):
def __init__(self, path: str, params: dict, index=None):
super().__init__(
name="JSONLoader",
path=path,
params=params,
data_loader_name="JSONReader",
index=index,
)
def load_data(self):
if self.data_loader is None:
raise ValueError("Data loader is not provided.")
return self.data_loader.load_data(Path(get_data_path(self.params["path"])))
@staticmethod
def get_params_types():
return {"path": "str"}
| SherifNeamatalla/jarvis | src/loaders/json_loader.py | json_loader.py | py | 663 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "base_loader.BaseLoader",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "util.util.get_data_path",
"line_number": 21,
"usage_type": "call"
}
] |
35805629848 | #!/usr/bin/env python
import networkx as nx
import matplotlib.pyplot as plt
#create empty grapgh
G = nx.Graph()
print(G.nodes())
print(G.edges())
print(type(G.nodes()))
print(type(G.edges()))
#adding just one node:
G.add_node("a")
# a list of nodes:
G.add_nodes_from(["b","c"])
#add edges
G.add_edge(1,2)
edge = ("d", "e")
G.add_edge(*edge)
edge = ("a", "b")
G.add_edge(*edge)
#add other edges
G.add_edges_from([("a","c"),("c","d"), ("a",1), (1,"d"), ("a",2)])
#print the graph
print("Nodes of graph: ")
print(G.nodes())
print("Edges of graph: ")
print(G.edges())
#draw the graph
nx.draw(G)
#save the graph as png
# plt.savefig("simple_path.png")
#print the graph
plt.show() | StoneNLD/python-projects | graph/graph_from_links.py | graph_from_links.py | py | 683 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "networkx.Graph",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot"... |
42936198452 | from primitives.base_market_agent import BaseMarketAgent
from high_frequency_trading.hft.trader import ELOInvestor
from utility import MockWSMessage
from discrete_event_emitter import RandomOrderEmitter
import draw
from db import db
import logging
log = logging.getLogger(__name__)
# this is a passive agent
# given a random order generator
# 'next's it and derives orders from data
# to send the matching engine/proxy
# does not react (except handling exchange responses)
# or adjusts market position via price producer model,
# does not handle public messages as well.
class PaceMakerAgent(BaseMarketAgent):
message_class = MockWSMessage
trader_model_cls = ELOInvestor
def __init__(self, session_id, *args, **kwargs):
super().__init__(session_id, *args, **kwargs)
self.model = self.trader_model_cls(self.session_id, 0, 1, 0, 'investor',
0, 0, firm=self.account_id, **kwargs)
self.exchange_connection = None
@db.freeze_state()
def handle_OUCH(self, msg):
event = self.event_cls('exchange', msg)
log.info('agent %s:%s --> handling ouch message %s' % (
self.account_id ,self.typecode, event.event_type))
self.model.handle_event(event)
return event
def handle_discrete_event(self, event_data):
if event_data['type'] is 'investor_arrivals':
self.enter_order(event_data)
def enter_order(self, order_data):
message = MockWSMessage(order_data, type='investor_arrivals',
subsession_id=0, market_id=0, player_id=0)
event = self.event_cls('random_order', message)
self.model.handle_event(event)
while event.exchange_msgs:
message = event.exchange_msgs.pop()
if self.exchange_connection is not None:
self.exchange_connection.sendMessage(message.translate(), message.delay)
else:
self.outgoing_msg.append((message.translate(), message.delay))
| hademircii/financial_market_simulator | agents/pacemaker_agent.py | pacemaker_agent.py | py | 2,013 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "primitives.base_market_agent.BaseMarketAgent",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "utility.MockWSMessage",
"line_number": 22,
"usage_type": "name"
},
{
... |
70040530274 | import numpy as np
import cv2
lineThickness = 3
url = "http:/localhost:8082/?action=stream"
# initialize USB webcam video capture.
# unable to capture directly from /dev/video0, but by using mjpg-streamer in another terminal,
# capture here works from URL instead.
cap = cv2.VideoCapture(url)
# limiting this to '1' means that the sending-receving between mjpg-streamer and this script doesn't cause a frames buildup,
# (which causes a huge lag)
# try running the program with this line commented and you will see.
cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
xml_path = '/home/devchu/.virtualenvs/cv/lib/python3.7/site-packages/cv2/data/'
face_cascade = cv2.CascadeClassifier(xml_path + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(xml_path + 'haarcascade_eye.xml')
# only attempt to read if it is opened
if cap.isOpened:
while(True):
ret, frame = cap.read()
if True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
#for each face...
for (x, y, w, h) in faces:
# draw a rectangle around the face
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), lineThickness)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),lineThickness)
cv2.imshow('frame', frame)
else:
print("Error reading capture device")
break
if cv2.waitKey(1) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
else:
print("Failed to open capture device")
| elicorrales/Fort.Laud.Robotics.Meetup.Group | Meetup.5/loopFaceEyeDetect.py | loopFaceEyeDetect.py | py | 1,847 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_BUFFERSIZE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "c... |
11687840437 | #!/usr/bin/python
# This file is used for extracting images from one or more bag files.
# Before run this file, remember to create a folder to store the images and 2 txt files which will store the
# image name with its corresponding omega.
# Then change the folder name to the one you created in line 52 & 53.
# In this file, I open three rosbags in order and transform all of it into training data.
# If you have more bags or less, just add/delete the bag part.
# Code from 57-94 is the same as 95-132 and 133-170, only the name of .bag is different
# After all set, run this file with 'python bag2txt.py'
#PKG = 'beginner_tutorials'
import roslib #roslib.load_manifest(PKG)
import rosbag
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from cv_bridge import CvBridgeError
import numpy as np
# Reading bag filename from command line or roslaunch parameter.
#import os
#import sys
class ImageCreator():
def __init__(self):
self.i = 0
self.t = 0
self.n = 0
self.omega = 0
self.omega_gain = 8.4
self.bridge = CvBridge()
f1 = open('lanefollowing/train.txt', 'w') # change "ele" to the folder name you created/
f2 = open('lanefollowing/test.txt', 'w')
#---bag part---
with rosbag.Bag('1.bag', 'r') as bag: #open first .bag
print("1.bag")
for topic,msg,t in bag.read_messages():
#print topic
if topic == "/super_pi02/joy_mapper_node/car_cmd": #change ros_master name from aiplus1 to your duckiebot's name
#print topic, msg.header.stamp
self.omega = msg.omega
self.v = msg.v
self.t = 1
elif topic == "/super_pi02/camera_node/image/compressed":
if self.t == 1:
try:
#print topic, msg.header.stamp
#cv_image = self.bridge.imgmsg_to_cv2(msg)
np_arr = np.fromstring(msg.data, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
except CvBridgeError as e:
print(e)
#timestr = "%.6f" % msg.header.stamp.to_sec()
image_name = str(self.n)+ ".jpg"
cv2.imwrite("lanefollowing/"+image_name, cv_image)
if self.i == 9:
f2.write("joystick/"+image_name+" "+str(self.omega)+" "+str(self.v)+"\n")
self.i = 0
else:
f1.write("joystick/"+image_name+" "+str(self.omega)+" "+str(self.v)+"\n")
self.i += 1
print("image crop:",self.n)
self.n += 1
self.t = 0
#---bag part---
f1.close()
f2.close()
if __name__ == '__main__':
#rospy.init_node(PKG)
try:
image_creator = ImageCreator()
except rospy.ROSInterruptException:
pass
| OpenPPAT/ai-course-2019 | 08-imitation-learning/bag2txt.py | bag2txt.py | py | 3,216 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "cv_bridge.CvBridge",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "rosbag.Bag",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"lin... |
11190749780 | from __future__ import annotations
import contextlib
import difflib
import hashlib
import os
import pathlib
import random
import string
from typing import Callable, Protocol
from xml.dom import minidom
from xml.etree import ElementTree
import numpy as np
import pytest
from click.testing import CliRunner
from PIL import Image
import vpype as vp
def random_line(length: int) -> np.ndarray:
return np.random.rand(length) + 1j * np.random.rand(length)
@pytest.fixture
def runner():
return CliRunner(mix_stderr=False)
@pytest.fixture
def config_manager():
return vp.ConfigManager()
@contextlib.contextmanager
def set_current_directory(path: pathlib.Path):
origin = path.absolute()
try:
os.chdir(path)
yield
finally:
os.chdir(origin)
@pytest.fixture(scope="session")
def config_file_factory(tmpdir_factory):
def _make_config_file(text: str) -> str:
path = os.path.join(tmpdir_factory.mktemp("config_file"), "config.toml")
with open(path, "w") as fp:
fp.write(text)
return path
return _make_config_file
class LineCollectionMaker(Protocol):
def __call__(
self, line_count: int | None = ..., with_metadata: bool = ...
) -> vp.LineCollection:
...
@pytest.fixture
def make_line_collection() -> LineCollectionMaker:
def _make_line_collection(
line_count: int | None = None, with_metadata: bool = True
) -> vp.LineCollection:
if line_count is None:
line_count = random.randint(1, 10)
if with_metadata:
metadata = {
vp.METADATA_FIELD_COLOR: vp.Color(
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
),
vp.METADATA_FIELD_NAME: "".join(
random.choice(string.ascii_letters) for _ in range(10)
),
vp.METADATA_FIELD_PEN_WIDTH: vp.convert_length(f"{random.random()}mm"),
}
else:
metadata = {}
lc = vp.LineCollection(metadata=metadata)
for _ in range(line_count):
lc.append(
[
random.random() * 100 + random.random() * 100j
for _ in range(random.randint(2, 10))
]
)
return lc
return _make_line_collection
class DocumentMaker(Protocol):
def __call__(self, layer_count: int = ...) -> vp.Document:
...
@pytest.fixture
def make_document(make_line_collection) -> DocumentMaker:
def _make_document(layer_count: int = 1) -> vp.Document:
doc = vp.Document()
doc.page_size = 1000.0 * random.random(), 1000.0 * random.random()
for _ in range(layer_count):
doc.add(make_line_collection(), with_metadata=True)
return doc
return _make_document
# IMAGE COMPARE SUPPORT
# ideally, this would be cleanly factored into a pytest plug-in akin to pytest-mpl
REFERENCE_IMAGES_DIR = (
os.path.dirname(__file__) + os.path.sep + "data" + os.path.sep + "baseline"
)
def pytest_addoption(parser):
parser.addoption(
"--store-ref-images",
action="store_true",
help="Write reference image for assert_image_similarity().",
)
parser.addoption(
"--skip-image-similarity",
action="store_true",
help="Skip tests using assert_image_similarity().",
)
parser.addoption(
"--store-ref-svg",
action="store_true",
help="Write reference SVGs for reference_svg().",
)
def write_image_similarity_fail_report(
image: Image.Image,
reference_image: Image.Image,
image_array: np.ndarray,
reference_image_array: np.ndarray,
test_id: str,
diff: float,
) -> None:
report_dir = pathlib.Path.cwd() / "test_report_img_sim" / test_id
report_dir.mkdir(parents=True, exist_ok=True)
diff_img = Image.fromarray(np.abs(reference_image_array - image_array).astype(np.uint8))
image.save(str(report_dir / "test_image.png"))
reference_image.save(str(report_dir / "reference_image.png"))
diff_img.save(str(report_dir / "difference_image.png"))
np.save(str(report_dir / "test_image_array.npy"), image_array)
np.save(str(report_dir / "reference_image_array.npy"), reference_image_array)
with open(str(report_dir / "report.txt"), "w") as fp:
fp.write(f"Test ID: {test_id}\nComputed different: {diff}")
@pytest.fixture
def assert_image_similarity(request) -> Callable:
if request.config.getoption("--skip-image-similarity"):
pytest.skip("image similarity test skipped (--skip-image-similarity)")
store_ref_image = request.config.getoption("--store-ref-images")
test_id = request.node.name
test_id = test_id.replace("[", "-").replace("]", "-").replace("/", "_").rstrip("-")
path = REFERENCE_IMAGES_DIR + os.path.sep + test_id + ".png"
def _assert_image_similarity(img: Image.Image) -> None:
nonlocal store_ref_image, test_id, path
if store_ref_image: # pragma: no cover
img.save(path)
pytest.skip("storing reference images")
else:
try:
ref_img = Image.open(path)
except FileNotFoundError:
pytest.fail(f"reference image {path} not found")
return
img = img.convert(ref_img.mode)
img = img.resize(ref_img.size)
ref_img_arr = np.asarray(ref_img).astype("float")
img_arr = np.asarray(img).astype("float")
sum_sq_diff = np.mean((ref_img_arr - img_arr) ** 2)
if sum_sq_diff != 0:
normalized_sum_sq_diff = sum_sq_diff / np.sqrt(sum_sq_diff)
if normalized_sum_sq_diff > 6.5: # pragma: no cover
write_image_similarity_fail_report(
img, ref_img, img_arr, ref_img_arr, test_id, normalized_sum_sq_diff
)
pytest.fail(
f"image similarity test failed (rms: {normalized_sum_sq_diff})"
)
return _assert_image_similarity
def _read_svg_lines(path: pathlib.Path) -> list[str]:
tree = ElementTree.parse(path)
xml_str = ElementTree.tostring(tree.getroot())
# ET.canonicalize doesn't exist on Python 3.7
canon = ElementTree.canonicalize(xml_str, strip_text=True) # type: ignore
lines = minidom.parseString(canon).toprettyxml().splitlines()
return [line for line in lines if "<dc:source" not in line and "<dc:date" not in line]
def _write_reference_svg_fail_report(
ref_lines: list[str],
test_lines: list[str],
test_id: str,
) -> None:
report_dir = pathlib.Path(__file__).parent.parent / "test_report_reference_svg" / test_id
report_dir.mkdir(parents=True, exist_ok=True)
(report_dir / "reference.svg").write_text("\n".join(ref_lines))
(report_dir / "test.svg").write_text("\n".join(test_lines))
@pytest.fixture
def reference_svg(request, tmp_path) -> Callable:
"""Compare an SVG output to a saved reference.
Use `--store-ref-svg` to save reference SVGs.
Example::
def test_ref_svg(reference_svg):
with reference_svg() as path:
export_svg_to(path)
"""
store_ref_svg = request.config.getoption("--store-ref-svg")
test_id = "refsvg_" + hashlib.md5(request.node.name.encode()).hexdigest() + ".svg"
ref_path = pathlib.Path(REFERENCE_IMAGES_DIR) / test_id
temp_file = tmp_path / test_id
@contextlib.contextmanager
def _reference_svg():
nonlocal ref_path, temp_file, store_ref_svg
yield temp_file
if store_ref_svg: # pragma: no cover
ref_path.write_bytes(temp_file.read_bytes())
else:
if not ref_path.exists(): # pragma: no cover
pytest.fail("reference SVG does not exist")
temp_lines = _read_svg_lines(temp_file)
ref_lines = _read_svg_lines(ref_path)
if len(temp_lines) != len(ref_lines) or not all(
a == b for a, b in zip(temp_lines, ref_lines)
):
delta = difflib.unified_diff(
temp_lines,
ref_lines,
fromfile="<test result>",
tofile=str(ref_path.relative_to(pathlib.Path(REFERENCE_IMAGES_DIR))),
lineterm="",
)
_write_reference_svg_fail_report(ref_lines, temp_lines, request.node.name)
pytest.fail("generated SVG does not match reference:\n" + "\n".join(delta))
return _reference_svg
| abey79/vpype | tests/conftest.py | conftest.py | py | 8,656 | python | en | code | 618 | github-code | 1 | [
{
"api_name": "numpy.random.rand",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "click.testing.... |
2359487852 | """NATS implementation of messenger."""
from dataclasses import asdict
import json
from typing import Callable, Dict, Optional
from logzero import logger
from pynats import NATSClient, NATSMessage
from blackcap.configs.base import BaseConfig
from blackcap.messenger.base import BaseMessenger
from blackcap.schemas.message import Message
from blackcap.utils.json_encoders import UUIDEncoder
class NATSMessenger(BaseMessenger):
"""NATS implementation of Messenger."""
CONFIG_KEY_VAL = "NATS"
def __init__(self: "NATSMessenger", config: BaseConfig) -> None:
"""Initialize Messenger with app config.
Args:
config (BaseConfig): Config to initialize messenger
"""
self.config = config
@property
def client(self: "NATSMessenger") -> NATSClient:
"""NATS client object."""
client = NATSClient(url=self.config.NATS_ENDPOINT, name=self.config.FLASK_APP)
client.connect()
return client
def publish(self: "NATSMessenger", msg: Dict, topic_id: str) -> str:
"""Publish msg on the GCP Pub/Sub queue.
Args:
msg (Dict): Messsag to publish
topic_id (str): Id of the topic
Returns:
str: Id of the published msg
"""
# Msg must be a bytestring
msg = json.dumps(msg, cls=UUIDEncoder).encode("utf-8")
self.client.publish(subject=topic_id, payload=msg)
self.client.close()
return "ok"
def subscribe(
self: "NATSMessenger",
callback: Callable,
sub_id: str,
timeout: Optional[float] = None,
) -> None:
"""Subscribe to a topic.
Args:
callback (Callable): Callback to invoke when a msg is received
sub_id (str): Id of the topic.
timeout (Union[float, None]): Time to wait for msgs. Defaults to None. # noqa: E501
"""
try:
client = self.client
sub = client.subscribe(subject=sub_id, callback=callback)
logger.info(f"Subscription created: {sub}")
client.wait(count=None)
client.close()
except Exception as e:
logger.error(
f"NATSMessenger subscribe error while pulling messages. Error: {e}"
)
client.close()
def parse_messenger_msg(
self: "NATSMessenger", messenger_msg: NATSMessage
) -> Message:
"""Parse messenger msg to blackcap mesage schema.
Args:
messenger_msg (NATSMessage): NATSMessenger message
Returns:
Message: Parsed Message
"""
return Message.parse_raw(messenger_msg.payload.decode("utf-8"))
def echo_msg(self: "NATSMessenger", msg: NATSMessage) -> None:
"""Echo msgs to stdout.
Args:
msg (NATSMessage): Message to echo
"""
print(asdict(msg))
| EBI-Metagenomics/orchestra | blackcap/src/blackcap/messenger/nats_messenger.py | nats_messenger.py | py | 2,908 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "blackcap.messenger.base.BaseMessenger",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "blackcap.configs.base.BaseConfig",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pynats.NATSClient",
"line_number": 32,
"usage_type": "call"
},
... |
43467567605 | from bs4 import BeautifulSoup
excluidos = ['Adamo',
'Todos',
'Agrale',
'Ariel',
'Asia',
'Avallone',
'Bianco',
'BRM',
'Caterham',
'CBT',
'Chamonix',
'Chana',
'Changan',
'Cross Lander',
'Daewoo',
'Daihatsu',
'DeLorean',
'Effa',
'Engesa',
'Enseada',
'Envemo',
'Farus',
'Geely',
'Gurgel',
'Hafei',
'Hennessey',
'Hofstetter',
'Iveco',
'Jinbei',
'JPX',
'Koenigsegg',
'KTM',
'Lada',
'Lafer',
'Lincoln',
'Lobini',
'Mahindra',
'Mercury',
'MG',
'Miura',
'Plymouth',
'Pontiac',
'Ragge',
'Rely',
'Saab',
'Saleen',
'Santa Matilde',
'Saturn',
'Seat',
'Shineray',
'SSC',
'TAC',
'W Motors']
def getFabri(html12):
doc = BeautifulSoup(html12, "html.parser")
links = doc.find_all(["font"], style="font-size: 15px; font-family:arial; color:black;")
links_reais = []
for link in links:
a_tags = link.find_all('a')
for a_tag in a_tags:
text = a_tag.get_text()
if text not in excluidos:
links_reais.append(a_tag.get('href'))
return links_reais
| Sankhay/Estudos | Python/selenium/getFabri.py | getFabri.py | py | 1,005 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 58,
"usage_type": "call"
}
] |
42470067186 | import torch
import numpy as np
import math
import random
from skimage.metrics import structural_similarity as SSIM
from skimage.metrics import peak_signal_noise_ratio as PSNR
def proto_with_quality(output, target, output_proto, target_proto, criterion, acc_proto, images, num_cluster):
# InfoNCE loss
loss = criterion(output, target)
# ProtoNCE loss
if output_proto is not None:
loss_proto = 0
for proto_out, proto_target in zip(output_proto, target_proto):
loss_proto += criterion(proto_out, proto_target)
accp = accuracy(proto_out, proto_target)[0]
acc_proto.update(accp[0], images[0].size(0))
# average loss across all sets of prototypes
loss_proto /= len(num_cluster)
loss += loss_proto
# Quality loss
im_q = torch.split(images[0], split_size_or_sections=1, dim=0)
im_q = [torch.squeeze(im, dim=0) for im in im_q]
im_k = torch.split(images[1], split_size_or_sections=1, dim=0)
im_k = [torch.squeeze(im, dim=0) for im in im_k]
l_psnr = []
l_ssim = []
for i in range(min(len(im_q), len(im_k))):
k = im_k[i]
q_index = random.randint(0,i-2)
if q_index >= i:
q_index += 1
q = im_q[q_index]
psnr_temp = PSNR(k,q)
if psnr_temp >= 50:
psnr_temp = 0
elif psnr_temp <= 30:
psnr_temp = 1
else:
psnr_temp = (50-psnr_temp)/20
l_psnr.append(psnr_temp)
l_ssim.append(1-SSIM(k,q))
loss += np.mean(l_psnr)+np.mean(l_ssim)
return loss
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | leafliber/myPCL | scripts/loss.py | loss.py | py | 2,230 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.squeeze",
"line_numbe... |
9175181375 | from datetime import datetime
import json
import logging
from bs4 import BeautifulSoup
from db.models import Victim
from net.proxy import Proxy
from .sitecrawler import SiteCrawler
class Ragnar(SiteCrawler):
actor = "Ragnar"
def scrape_victims(self):
with Proxy() as p:
r = p.get(f"{self.url}", headers=self.headers)
soup = BeautifulSoup(r.content.decode(), "html.parser")
script_list = soup.find_all("script")
# they include the list in javascript code instead of HTML
# So we have to parse it
js_victims_raw = ""
js_marker = "var post_links = "
for script in script_list:
script = str(script)
if js_marker in script:
js_victims_raw = script
break
if not js_victims_raw:
raise Exception(f"js victim list not found (tried to locate '{js_marker}')")
raw_victim_list = js_victims_raw.split(f"{js_marker}[{{")[1].split(
"}]"
)[0]
victim_list = json.loads(f"[{{{raw_victim_list}}}]")
for victim in victim_list:
victim_name = victim["title"]
if "-" in victim_name:
victim_name = victim_name[:victim_name.find("-")]
published = int(victim["timestamp"])
published_dt = datetime.utcfromtimestamp(published)
victim_leak_site = self.url + "/?" + victim["link"] + "/"
q = self.session.query(Victim).filter_by(
url=victim_leak_site, site=self.site)
if q.count() == 0:
# new victim
v = Victim(name=victim_name, url=victim_leak_site, published=published_dt,
first_seen=datetime.utcnow(), last_seen=datetime.utcnow(), site=self.site)
self.session.add(v)
self.new_victims.append(v)
else:
# already seen, update last_seen
v = q.first()
v.last_seen = datetime.utcnow()
# add the org to our seen list
self.current_victims.append(v)
self.session.commit()
self.site.last_scraped = datetime.utcnow()
# just for good measure
self.session.commit()
| captainGeech42/ransomwatch | src/sites/ragnar.py | ragnar.py | py | 2,485 | python | en | code | 294 | github-code | 1 | [
{
"api_name": "sitecrawler.SiteCrawler",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "net.proxy.Proxy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads"... |
23495538946 | from youtube_transcript_api import YouTubeTranscriptApi
from yt_concate.pipeline.steps.step import Step
class DownloadCaptions(Step):
def process(self, data, inputs, utils):
for yt in data:
url = yt.url
video_id = yt.id
captions_dir = yt.captions_dir
if utils.caption_exists(yt):
print(video_id, 'captions already downloaded')
continue
try:
srt = YouTubeTranscriptApi.get_transcript(video_id, languages=['en'])
except:
print('KeyError when downloading', url)
continue
with open(captions_dir, 'w', encoding='utf-8') as f:
for i in srt:
f.write("{}\n".format(i))
f.close()
return data
| NoelTW/yt-concate | yt_concate/pipeline/steps/download_caption.py | download_caption.py | py | 819 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "yt_concate.pipeline.steps.step.Step",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "youtube_transcript_api.YouTubeTranscriptApi.get_transcript",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "youtube_transcript_api.YouTubeTranscriptApi",
"... |
23719298346 | import databaseConnectivity as db
import os
import shutil
import datetime
import re
import sys
# Should be uncommented with Python 2.7
reload(sys)
sys.setdefaultencoding('utf8')
from botocore.errorfactory import ClientError
# Empty list to store S3obectKeys, documentIds, customerIds and source paths
documentIds = []
S3objectKeys = []
customerIds = []
sources = []
# VALID_CHARACTERS holds the valid characters that a file name can have
VALID_CHARACTERS = set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!-_ .*\'()')
class InvalidDocuments:
def __init__(self, customerId, referralId,employeeId,documentId,createdOn,comment,source):
self.customerId = customerId
self.referralId=referralId
self.employeeId=employeeId
self.documentId=documentId
self.createdOn = createdOn
self.comment=comment
self.source = source
# Define setListNull: takes in variable list arguments and set their values to empty list
def setListNull(*args):
for arg in args:
arg[:] = []
def addObjectKeyDocumentId(S3objectKey,documentId,customerId,source):
documentIds.append(documentId)
S3objectKeys.append(S3objectKey)
customerIds.append(customerId)
sources.append(source)
# Define copyFiles: copies file from the source to the destination
def copyFiles(src,dst,filename,conn):
destination = os.path.abspath(dst+'/'+filename)
shutil.copy(src,destination)
# Define createStructureCopyFile: Creates folder structure for a customer and calls copyFiles
def createStructureCopyFile(source,destination,filename,documentId,conn,customerId):
src = source
path = destination
file = removeInvalidCharacters(filename)
file = renameFile(path, file)
if not(os.path.exists(path)): #If destination doesn't exists create folder
os.makedirs(path)
copyFiles(src,path,file,conn)
addObjectKeyDocumentId(path+'/'+file,documentId,customerId,'Appian')
else: #If destination exists copy files from source
copyFiles(src, path, file,conn)
addObjectKeyDocumentId(path+'/'+file, documentId,customerId,'Appian')
return file
def getFileName(bucketName,s3path,s3fileName,s3object):
fileName = removeInvalidCharacters(s3fileName)
fileName = renameFile(fileName, s3object, bucketName, s3path)
return s3path + '/' +fileName,fileName
def uploadDocument(file,bucket,s3ObjectUpload,key):
s3ObjectUpload.meta.client.upload_file(file,bucket,key,ExtraArgs={"ServerSideEncryption":"AES256"})
# Define logInvalidDocuments: Create enteries in DCM_InvalidDocuments tables for invalid documents
def logInvalidDocuments(conn,invalidDocuments):
cursor = conn.cursor()
cursor.execute("INSERT INTO DCM_InvalidDocuments(CustomerId,ReferralId,EmployeeId,DocumentId,CreatedOn,Comment,Source) VALUES (%d, %d, %d, %d, %s, %s, %s)",(invalidDocuments.customerId, invalidDocuments.referralId, invalidDocuments.employeeId, invalidDocuments.documentId, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), invalidDocuments.comment, invalidDocuments.source))
conn.commit()
cursor.close()
def queryCustomerName(conn,customerId=None):
cursor = conn.cursor()
sql = """Select CustomerName from Customer """ + \
' WHERE Id = ' + str(customerId)
cursor.execute(sql)
data = cursor.fetchall()
customer_name = ''
for row in data:
customer_name = str(row[0])
cursor.close()
return customer_name
# Define logToFile: Creates a log file
def logToFile(text,filePath):
if text:
with open(filePath, 'a') as logFile:
print >>logFile, text #for version 2.7.5
# Define rreplace: Replaces the last occurrence 'old' string from string s by 'new'
def rreplace(s, old, new):
return (s[::-1].replace(old,new, 1))[::-1]
# Define getExtension: Returns the filename and extension from a given file name
def splitFileName(filename):
if os.path.splitext(filename)[1] == '':
return filename,''
else:
dotIndex = filename.rfind('.')
return filename[:dotIndex],filename[dotIndex + 1:]
# Define checkPath: Return True False based on whether the path exists or not
def checkPath(path,filename):
pathExists = os.path.exists(r'{0}/{1}'.format(path,filename))
return pathExists
def find_between(s, start, end):
number =(s.split(start))[-1].split(end)[0]
return rreplace(s,number,str(int(number)+1))
# Define removeInvalidCharacters: Remove invalid characters which are not present in the valid characters list
def removeInvalidCharacters(filename):
my_new_string = ''.join(filter(VALID_CHARACTERS.__contains__, filename))
return my_new_string.replace("\\", "")
# Define renameFile: Checks if there exist a file of same name at the given path, If yes renames it
def renameFile(filename,s3,bucketName,s3folderPath):
new_filename = filename
key = s3folderPath + '/' + filename
try:
s3.head_object(Bucket=bucketName, Key=key)
if not(re.search('_\([0-9]\)$', filename.split('.')[0])):
if os.path.splitext(new_filename)[1] == '':
new_filename = new_filename+'_(1)'
else:
name, extension = os.path.splitext(new_filename)
new_filename = filename.replace(filename, name + '_(1)')
new_filename=renameFile(new_filename,s3,bucketName,s3folderPath)
else:
new_filename=find_between(filename,'(',')')
new_filename=renameFile(new_filename,s3,bucketName,s3folderPath)
except ClientError:
pass
return new_filename
| viratY/doc-management-utilities | document-extraction/src/utilityMethods.py | utilityMethods.py | py | 5,701 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
... |
73898604835 | """
Dataloader building logic.
Author: JiaWei Jiang
This file contains the basic logic of building dataloaders for training
and evaluation processes.
"""
from typing import Any, Union
import numpy as np
import pandas as pd
from omegaconf.dictconfig import DictConfig
from torch.utils.data import DataLoader
from .dataset import DemoDataset
def build_dataloader(
data: Union[pd.DataFrame, np.ndarray], data_split: str, dataset_cfg: DictConfig, **dataloader_cfg: Any
) -> DataLoader:
"""Cretae and return dataloader.
Args:
data: data to be fed into torch Dataset
data_split: data split
dataset_cfg: hyperparameters of dataset
dataloader_cfg: hyperparameters of dataloader
Returns:
dataloader: dataloader
"""
collate = None
shuffle = dataloader_cfg["shuffle"] if data_split == "train" else False
dataloader = DataLoader(
DemoDataset(data, **dataset_cfg),
batch_size=dataloader_cfg["batch_size"],
shuffle=shuffle,
num_workers=dataloader_cfg["num_workers"],
collate_fn=collate,
pin_memory=dataloader_cfg["pin_memory"],
drop_last=dataloader_cfg["drop_last"],
)
return dataloader
| JiangJiaWei1103/Competitive-DS-Made-Easy | data/build.py | build.py | py | 1,217 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "omegaconf.dictc... |
30896395982 | from torchvision import transforms
from torchvision.datasets import MNIST
import torch
from PIL import Image
import numpy as np
from tqdm import tqdm
class MNISTInvase(MNIST):
def __init__(self, *args, **kwargs):
super(MNISTInvase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
img = img.view(-1)
# Below -1 is due to G being undefined
return img, target, -1
def one_hot(arr):
temp = torch.zeros((arr.shape[0], arr.max() + 1))
temp[torch.arange(arr.shape[0]), arr] = 1
return temp
def get_mnist(args):
base_path = "./data-dir"
batch_size = args.batch_size if args.batch_size else 256
test_batch_size = args.batch_size if args.batch_size else 512
num_workers = args.workers if args.workers else 4
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_data = MNISTInvase(base_path, train=True, download=True,
transform=transform)
train_data.means = (0.1307,)
train_data.stds = (0.3081,)
train_data.bounds = [0, 1]
train_data.input_size = 784
train_data.output_size = 10
train_data.targets = one_hot(train_data.targets)
test_data = MNISTInvase(base_path, train=False,
transform=transform)
test_data.targets = one_hot(test_data.targets)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
shuffle=True, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=test_batch_size,
shuffle=False, num_workers=num_workers)
return train_loader, test_loader
| choheeee22/invase-pytorch | data/mnist.py | mnist.py | py | 2,173 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.zeros",... |
31324875227 | ##read the markers from file and search in Graingenes.Each one seek for five times.
#If one makrer hasn't been found even by five times,it will be recorded in the file.
import re
import requests
def get_marker(url):
r = requests.get(url, timeout=20)
t = r.text
seq_compiles = re.compile("PCR primers.+?\n")
primer_list = re.findall(seq_compiles,t)
if primer_list:
for i in primer_list:
primer = re.sub('<.+?>','',i)
return primer
lost=open("f:/ljy/atom/lost.txt",'w',encoding='utf-8')
with open('F:/ljy/atom/markerf.txt', encoding='utf-8') as m:
marker_line = m.read().splitlines()
with open('F:/ljy/atom/primer1.txt', 'w', encoding='utf-8') as f:
for i in marker_line:
key = i
headers = {'user-agent': "Chrome"}
url = f"https://wheat.pw.usda.gov/cgi-bin/GG3/report.cgi?class=marker;query=*{key}*;name={key}"
count =0
while count < 5:
try:
f.write(f'{key}:',get_marker(url))
count=6
except:
count = count + 1
if count ==5:
print(f"{key} is lost with three times of search.")
lost.write(f"{key}")
lost.write("\n")
with open('F:/ljy/atom/marker2.txt', encoding='utf-8') as m:
marker_line = m.read().splitlines()
compiles = re.compile("5'.+?3'")
with open('F:/ljy/atom/primer2.txt', 'w', encoding='utf-8') as f:
for i in marker_line:
key = i
headers = {'user-agent': "Chrome"}
url = f"https://wheat.pw.usda.gov/cgi-bin/GG3/report.cgi?class=marker;query=*{key}*;name={key}"
count =0
while count < 5:
try:
f.write(f'{key}:',get_marker(url))
count=6
except:
count = count + 1
if count ==5:
print(f"{key} is lost with five times of search.")
lost.write(f"{key}")
lost.write("\n")
lost.close()
print('OK!')
| Jiny000/BIOinformatics | python/requests.py | requests.py | py | 2,124 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 12,
"... |
13037432263 | import stripe
import json
from django.conf import settings
from django.shortcuts import redirect
from rest_framework.decorators import api_view,permission_classes
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from .models import CustomerPayment
from accounts.models import EmailUser
FRONTEND_SUBSCRIPTION_SUCCESS_URL = settings.SUBSCRIPTION_SUCCESS_URL
FRONTEND_SUBSCRIPTION_CANCEL_URL = settings.SUBSCRIPTION_FAILED_URL
webhook_secret = settings.STRIPE_WEBHOOK_SECRET
stripe.api_key = settings.STRIPE_SECRET_KEY
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_payment(request):
try:
price_id = ''
if request.data['plan'] == 'basic':
price_id = 'price_1LD7XcKPyJLjGRo4peZvp8ed'
if request.data['plan'] == 'premium':
price_id = 'price_1LD7Y5KPyJLjGRo4B150gCta'
if CustomerPayment.objects.filter(customer=request.user).exists():
return Response(status=status.HTTP_409_CONFLICT, data={"detail": "You Already have a subscription , First Cancel that then Subscribe to New one!"})
else:
checkout_session = stripe.checkout.Session.create(
line_items=[
{
'price': price_id,
'quantity': 1
}
],
mode='subscription',
success_url=FRONTEND_SUBSCRIPTION_SUCCESS_URL +
"/{CHECKOUT_SESSION_ID}",
cancel_url=FRONTEND_SUBSCRIPTION_CANCEL_URL,
customer_email=request.user.email
)
# return redirect(checkout_session.url, code=303)
return Response({"url":checkout_session.url})
except Exception as err:
raise err
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def save_customer(request):
session_id = request.data['session_id']
session = stripe.checkout.Session.retrieve(session_id)
customer = stripe.Customer.retrieve(session.customer)
subscription_details = stripe.Subscription.list(customer=customer)
subscriptionid = subscription_details.data[0].id
customerid = subscription_details.data[0].customer
priceid = subscription_details.data[0].plan.id
productid = subscription_details.data[0].plan.product
res = {
"subscriptionid": subscriptionid,
"customerid": customerid,
"priceid" : priceid,
"productid": productid
}
customer_payment = CustomerPayment()
customer_user = EmailUser.objects.get(email=request.user.email)
customer_payment.customer = customer_user
customer_payment.subscriptionid = subscriptionid
customer_payment.customerid = customerid
customer_payment.priceid = priceid
customer_payment.productid = productid
customer_payment.save()
return Response(json.dumps(res))
@api_view(['POST','GET'])
@permission_classes([IsAuthenticated])
def cancel_subscription(request):
customer_payment = CustomerPayment.objects.get(customer=request.user)
subscription_id = customer_payment.subscriptionid
stripe.Subscription.delete(subscription_id)
customer_payment.delete()
return Response(status=status.HTTP_202_ACCEPTED)
@api_view(['POST', 'GET'])
@permission_classes([IsAuthenticated])
def get_payment_status(request):
plan = ''
try:
customer_payment = CustomerPayment.objects.get(customer=request.user)
if customer_payment.priceid == 'price_1LD7XcKPyJLjGRo4peZvp8ed':
plan = 'basic'
if customer_payment.priceid == 'price_1LD7Y5KPyJLjGRo4B150gCta':
plan = 'premium'
except Exception as e :
plan = 'free'
return Response({"plan":plan})
| DevDhira/tubemize | backend/payment/views.py | views.py | py | 3,955 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.settings.SUBSCRIPTION_SUCCESS_URL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SUBSCRIPTION_FAILED_URL",
"line_number": 17,
... |
20714729925 | import datetime
import json
import random
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from Website import image_verification
from .decorators import unauthenticated_user
from .forms import CreateUserForm, EditUserProfileSettingsForm, CreateTeamForm, UploadImageToVeryficate, \
MyPasswordChangeForm
from .models import *
from .utilities import send_invitation, send_invitation_accepted
def home(request):
incomingTournaments = Tournament.objects.filter(status='in_progress').order_by('dateTime')[:4]
latestTournaments = Tournament.objects.filter(status='completed').order_by('-dateTime')[:4]
if request.user.is_authenticated:
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if userInvitations:
context = {'incomingTournaments': incomingTournaments, 'latestTournaments': latestTournaments,
'userInvitations': userInvitations}
return render(request, 'index.html', context)
context = {'incomingTournaments': incomingTournaments, 'latestTournaments': latestTournaments}
return render(request, 'index.html', context)
@unauthenticated_user
def register_page(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
Profile.objects.create(user=user)
messages.success(request, 'Konto utworzone')
return redirect('login')
context = {'form': form}
return render(request, 'accounts/register.html', context)
@unauthenticated_user
def login_page(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
email = request.user.email
invitations = Invitation.objects.filter(email=email, status=Invitation.INVITED)
if invitations:
return redirect('home')
else:
return redirect('home')
else:
messages.error(request, 'Nieprawidłowa nazwa lub hasło')
context = {}
return render(request, 'accounts/login.html', context)
def logout_user(request):
logout(request)
return redirect('home')
@login_required(login_url='login')
def profile_page(request):
form = CreateTeamForm(request.POST)
teams = request.user.teams.all()
gamesWon = request.user.profile.gamesWon
gamesPlayed = request.user.profile.gamesPlayed
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if userInvitations:
messages.info(request, 'Masz oczekujące zaproszenie do drużyny')
def check_winrate():
if gamesPlayed > 0:
value = "%.2f" % ((gamesWon / gamesPlayed) * 100)
else:
value = 0
return value
winratePercentage = check_winrate()
if request.method == 'POST':
teamName = request.POST.get('teamName')
nameExists = Team.objects.filter(teamName=teamName).count()
if form.is_valid() and nameExists == 0:
team = Team.objects.create(teamName=teamName, createdBy=request.user)
team.members.add(request.user)
team.save()
messages.success(request, 'Stworzono drużynę')
return redirect('profile')
else:
messages.error(request, 'Wprowadzona nazwa jest juz zajęta')
else:
form = CreateTeamForm(request.user)
context = {'form': form, 'winratePercentage': winratePercentage, 'teams': teams, 'userInvitations': userInvitations}
return render(request, 'accounts/profile.html', context)
@login_required(login_url='login')
def edit_profile(request):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
profile = request.user.profile
form = EditUserProfileSettingsForm(instance=profile)
if request.method == 'POST':
form = EditUserProfileSettingsForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form.save()
messages.success(request, 'Pomyślnie zmieniono dane')
return redirect('profile_settings')
else:
messages.error(request, 'Wprowadzona nazwa jest juz zajęta')
context = {'form': form, 'userInvitations': userInvitations}
return render(request, 'accounts/profile_settings.html', context)
@login_required(login_url='login')
def change_password(request):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if request.method == 'POST':
form = MyPasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('profile_settings')
else:
form = MyPasswordChangeForm(request.user)
return render(request, 'accounts/password_change.html', {'form': form, 'userInvitations': userInvitations})
@login_required(login_url='login')
def view_team(request, team_id):
team = get_object_or_404(Team, pk=team_id, members__in=[request.user])
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
invitations = Invitation.objects.filter(status='Invited', team=team)
context = {'team': team, 'invitations': invitations, 'userInvitations': userInvitations}
if request.method == 'POST':
if 'cancel' in request.POST:
if invitations:
invitation = invitations[0]
invitation.status = Invitation.CANCELLED
invitation.save()
messages.info(request, 'Zaproszenie anulowane')
return redirect('view_team', team_id=team_id)
if 'update' in request.POST:
teamName = request.POST.get('teamName')
nameExists = Team.objects.filter(teamName=teamName).count()
if nameExists == 0:
team.teamName = teamName
team.save()
messages.success(request, 'Nazwa drużyny została zmieniona')
return redirect('view_team', team_id=team_id)
else:
messages.error(request, 'Wprowadzona nazwa jest juz zajęta')
if 'delete_member' in request.POST:
user = User.objects.get(username=request.POST.get('username'))
if team.members.filter(teams__members__in=[user.id]):
if team.createdBy == user:
messages.error(request, 'Nie można usunąć twórcy drużyny')
else:
team.members.remove(user.id)
team.save()
invitations = Invitation.objects.filter(team=team, email=user.email)
if invitations:
Invitation.objects.filter(team=team, email=user.email).delete()
messages.success(request, 'Usunięto użytkownika')
if 'delete_team' in request.POST:
team.delete()
team.save()
invitations = Invitation.objects.filter(team=team)
if invitations:
Invitation.objects.filter(team=team).delete()
messages.success(request, 'Pomyślnie usunięto drużynę')
return redirect('profile')
if 'leave_team' in request.POST:
team.members.remove(request.user)
team.save()
invitations = Invitation.objects.filter(team=team, email=request.user.email)
if invitations:
Invitation.objects.filter(team=team, email=request.user.email).delete()
messages.success(request, 'Pomyślnie opuszczono drużynę')
return redirect('profile')
return render(request, 'teams/view_team.html', context)
@login_required(login_url='login')
def invite(request, team_id):
team = get_object_or_404(Team, pk=team_id, members__in=[request.user])
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
try:
if request.method == 'POST':
email = request.POST.get('email')
user = User.objects.get(email__exact=email)
if email:
if user.profile.summonerName != None:
invitations = Invitation.objects.filter(team=team, email=email)
if not invitations:
code = ''.join(random.choice('abcdefghijklmnopqrstuvwxyz123456789') for _ in range(4))
status = 'Invited'
Invitation.objects.create(team=team, email=email, code=code, user=user, status=status)
messages.info(request, 'Zaproszenie zsotało wysłane')
send_invitation(email, code, team)
return redirect('view_team', team_id=team_id)
if invitations:
if invitations.filter(status=Invitation.CANCELLED) or invitations.filter(
status=Invitation.DECLINED):
code = ''.join(random.choice('abcdefghijklmnopqrstuvwxyz123456789') for _ in range(4))
status = 'Invited'
Invitation.objects.update(team=team, email=email, code=code, user=user, status=status)
messages.info(request, 'Zaproszenie zsotało wysłane')
send_invitation(email, code, team)
return redirect('view_team', team_id=team_id)
if invitations.filter(status=Invitation.ACCEPTED):
messages.error(request, 'Użytkownik znajduje się już w drużynie')
else:
messages.error(request, 'Użytkownik został już wcześniej zaproszony')
else:
messages.error(request, "Użytkownik musi podać nazwę przywoływacza w swoim profilu")
except:
messages.error(request, 'Błędny adres e-mail')
return render(request, 'teams/invite.html', {'team': team, 'userInvitations': userInvitations})
@login_required(login_url='login')
def accept_invitation(request):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if userInvitations:
def show_invited_team_names():
invitations = Invitation.objects.filter(status="Invited")
listOfTeamsNames = []
if invitations:
teamNames = invitations.values('team__teamName')
for key in range(len(teamNames)):
teamName = [a for a in teamNames[key].values()]
listOfTeamsNames.append(teamName[0])
return listOfTeamsNames
listOfTeamsNames = show_invited_team_names()
if request.method == 'POST':
code = request.POST.get('code')
userInvitations = Invitation.objects.filter(code=code, email=request.user.email)
if 'accept' in request.POST:
if userInvitations.filter(status='Invited'):
invitation = userInvitations[0]
invitation.status = Invitation.ACCEPTED
invitation.save()
team = invitation.team
team.members.add(request.user)
team.save()
messages.info(request, 'Dołączyłeś do drużyny')
send_invitation_accepted(team, invitation)
return redirect('profile')
else:
messages.error(request, 'Błędny kod')
return redirect('accept_invitation')
if 'decline' in request.POST:
if userInvitations:
invitation = userInvitations[0]
invitation.status = Invitation.DECLINED
invitation.save()
messages.info(request, 'Zaproszenie odrzucone')
return redirect('profile')
else:
return render(request, 'teams/accept_invitation.html',
{'listOfTeamsNames': listOfTeamsNames, 'userInvitations': userInvitations})
messages.info(request, 'Nie masz żadnych zaproszeń')
return redirect('profile')
def show_open_tournaments(request):
tournaments = Tournament.objects.filter(status='in_progress').order_by('dateTime')
if request.user.is_authenticated:
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if userInvitations:
context = {'tournaments': tournaments, 'userInvitations': userInvitations}
return render(request, 'tournaments/tournaments_open.html', context)
context = {'tournaments': tournaments}
return render(request, 'tournaments/tournaments_open.html', context)
@login_required(login_url='login')
def details_tournament(request, tournament_id):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
tournament = get_object_or_404(Tournament, pk=tournament_id)
teams = Team.objects.filter(createdBy=request.user)
tournamentDateTime = tournament.dateTime.strftime("%d/%m/%Y %H:%M:%S")
tournamentDate = tournament.dateTime.date().strftime("%d/%m/%Y")
tournamentTime = tournament.dateTime.time().strftime("%H:%M:%S")
currentDateTime = datetime.now()
currentDateTime = currentDateTime.strftime("%d/%m/%Y %H:%M:%S")
def check_teams():
teamsInTournament = 0
for _ in teams:
if tournament.registeredTeams.filter(registeredTeams__registeredTeams=_.id):
teamsInTournament += 1
return teamsInTournament
def get_registered_teams_summoner_names_list():
tour = tournament.registeredTeams.all()
list = []
for t in tour:
for x in t.members.all():
list.append(x.profile.summonerName)
return list
registeredMembers = get_registered_teams_summoner_names_list()
if request.method == 'POST':
if 'join' in request.POST:
if tournamentDateTime >= currentDateTime:
team = Team.objects.get(teamName=request.POST.get('teamName'))
print(team.members.count())
members = team.members.all()
teamSummonerNameList = []
summonerNameCounter = 0
for member in members:
teamSummonerNameList.append(member.profile.summonerName)
for elem in teamSummonerNameList:
if elem in registeredMembers:
summonerNameCounter += 1
if team.members.count() == 5:
if summonerNameCounter == 0:
if check_teams() == 0:
if tournament.registeredTeams.count() < tournament.maxTeams:
tournament.registeredTeams.add(team.id)
tournament.save()
messages.success(request, "Drużyna dołączyła do turnieju")
else:
messages.error(request, 'Nie ma już wolnych miejsc dla nowej drużyny')
else:
messages.error(request, 'Zapisałeś już jedną swoją drużynę')
else:
messages.error(request, 'Członek twojej drużyny znajduje się już w innej zapisanej drużynie')
else:
messages.error(request, 'W drużynie nie ma 5 zawodników')
else:
messages.error(request, 'Turniej się już rozpoczął. Nie ma już możliwości dołączenia')
context = {'tournament': tournament, 'userInvitations': userInvitations, 'teams': teams,
'tournamentDate': tournamentDate, 'tournamentTime': tournamentTime,
'tournamentDateTime': tournamentDateTime, 'currentDateTime': currentDateTime}
return render(request, 'tournaments/tournament_view.html', context)
@login_required(login_url='login')
def show_tournament_teams(request, tournament_id):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
tournament = get_object_or_404(Tournament, pk=tournament_id)
teams = Team.objects.filter(createdBy=request.user)
tournamentDateTime = tournament.dateTime.strftime("%d/%m/%Y %H:%M:%S")
tournamentDate = tournament.dateTime.date().strftime("%d/%m/%Y")
tournamentTime = tournament.dateTime.time().strftime("%H:%M:%S")
currentDateTime = datetime.now()
currentDateTime = currentDateTime.strftime("%d/%m/%Y %H:%M:%S")
def get_registered_teams_summoner_names_list():
tour = tournament.registeredTeams.all()
list = []
for t in tour:
for x in t.members.all():
list.append(x.profile.summonerName)
return list
registeredMembers = get_registered_teams_summoner_names_list()
def check_teams():
teamInfo = ['name', 0]
for _ in teams:
if tournament.registeredTeams.filter(registeredTeams__registeredTeams=_.id):
teamInfo[0] = _
teamInfo[1] = 1
return teamInfo
if request.method == 'POST':
if 'leave' in request.POST:
if tournamentDateTime >= currentDateTime:
teamName = check_teams()[0]
tournament.registeredTeams.remove(teamName.id)
tournament.save()
messages.success(request, 'Drużyna wypisana z turnieju')
else:
messages.error(request, 'Turniej się już rozpoczął. Nie ma już możliwości opuszczenia')
if 'join' in request.POST:
if tournamentDateTime >= currentDateTime:
team = Team.objects.get(teamName=request.POST.get('teamName'))
members = team.members.all()
teamSummonerNameList = []
summnerNameCounter = 0
for member in members:
teamSummonerNameList.append(member.profile.summonerName)
for elem in teamSummonerNameList:
if elem in registeredMembers:
summnerNameCounter += 1
if team.members.count() == 5:
if summnerNameCounter == 0:
if check_teams()[1] == 0:
if tournament.registeredTeams.count() < tournament.maxTeams:
tournament.registeredTeams.add(team.id)
tournament.save()
messages.success(request, "Drużyna dołączyła do turnieju")
else:
messages.error(request, 'Nie ma już wolnych miejsc dla nowej drużyny')
else:
messages.error(request, 'Zapisałeś już jedną swoją drużynę')
else:
messages.error(request, 'Członek twojej drużyny znajduje się już w innej zapisanej drużynie')
else:
messages.error(request, 'W drużynie nie ma 5 zawodników')
else:
messages.error(request, 'Turniej się już rozpoczął. Nie ma już możliwości dołączenia')
context = {'tournament': tournament, 'userInvitations': userInvitations, 'teams': teams,
'tournamentDate': tournamentDate, 'tournamentTime': tournamentTime,
'tournamentDateTime': tournamentDateTime, 'currentDateTime': currentDateTime}
return render(request, 'tournaments/teams_in_tournament.html', context)
@login_required(login_url='login')
def show_tournament_bracket(request, tournament_id):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
tournament = get_object_or_404(Tournament, pk=tournament_id)
matches = Match.objects.filter(tournamentName=tournament.id)
teams = Team.objects.filter(members__in=[request.user])
tournamentDateTime = tournament.dateTime.strftime("%d/%m/%Y %H:%M:%S")
tournamentDate = tournament.dateTime.date().strftime("%d/%m/%Y")
tournamentTime = tournament.dateTime.time().strftime("%H:%M:%S")
currentDateTime = datetime.now()
currentDateTime = currentDateTime.strftime("%d/%m/%Y %H:%M:%S")
def create_bracket_4(matchCounter, counter):
for team1, team2 in grouped(tournamentTeamList, 2):
matchName = counter
teams = Match.objects.create(tournamentName=tournament, matchName=matchName)
teams.teamsInMatch.add(team1, team2)
teams.save
matchCounter += 1
counter += 1
while matchCounter != 4:
matchName = counter
teams = Match.objects.create(tournamentName=tournament, matchName=matchName)
teams.save()
matchCounter += 1
counter += 1
def create_bracket_8(matchCounter, counter):
for team1, team2 in grouped(tournamentTeamList, 2):
matchName = counter
teams = Match.objects.create(tournamentName=tournament, matchName=matchName)
teams.teamsInMatch.add(team1, team2)
teams.save
matchCounter += 1
counter += 1
while matchCounter != 8:
matchName = counter
teams = Match.objects.create(tournamentName=tournament, matchName=matchName)
teams.save()
matchCounter += 1
counter += 1
if tournamentDateTime >= currentDateTime:
messages.error(request, 'Drabinka nie jest jeszcze gotowa, poczekaj na rozpoczęcie turnieju')
else:
if not matches:
tournament = get_object_or_404(Tournament, pk=tournament.id)
tournamentTeamList = [tournament for tournament in tournament.registeredTeams.all()]
def grouped(iterable, n):
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(iterable)] * n)
if tournament.registeredTeams.count() <= 4:
matchCounter = 0
counter = 1
if len(tournamentTeamList) % 2 == 0:
create_bracket_4(matchCounter, counter)
else:
create_bracket_4(matchCounter, counter)
lastTeam = str(tournamentTeamList[len(tournamentTeamList) - 1])
lastTeamObject = Team.objects.get(teamName=lastTeam)
nextEmptyCreatedMatch = Match.objects.filter(tournamentName=tournament.id,
teamsInMatch__teamsInMatch=None)
nextEmptyCreatedMatch[0].teamsInMatch.add(lastTeamObject.id)
nextEmptyCreatedMatch[0].save()
else:
matchCounter = 0
counter = 1
if len(tournamentTeamList) % 2 == 0:
create_bracket_8(matchCounter, counter)
else:
create_bracket_8(matchCounter, counter)
lastTeam = str(tournamentTeamList[len(tournamentTeamList) - 1])
lastTeamObject = Team.objects.get(teamName=lastTeam)
nextEmptyCreatedMatch = Match.objects.filter(tournamentName=tournament.id,
teamsInMatch__teamsInMatch=None)
nextEmptyCreatedMatch[0].teamsInMatch.add(lastTeamObject.id)
nextEmptyCreatedMatch[0].save()
return redirect('bracket_in_tournament', tournament.id)
else:
if tournament.registeredTeams.count() <= 4:
def set_status_for_empty_matches():
for match in matches:
if match.teamsInMatch.count() == 0:
match.status = Match.COMPLETED
match.save()
set_status_for_empty_matches()
def set_matches_with_one_team():
for match in matches:
if match.teamsInMatch.count() == 1:
currentMatch = Match.objects.get(matchName=match.matchName, tournamentName=tournament.id)
teamObject = currentMatch.teamsInMatch.all().values_list('teamName')
team = Team.objects.get(teamName=teamObject[0][0])
if match.matchName == 2 and match.status == 'active':
currentMatch.status = Match.COMPLETED
currentMatch.save()
nextMatch = Match.objects.get(matchName=3, tournamentName=tournament.id)
nextMatch.teamsInMatch.add(team.id)
nextMatch.status = Match.ACTIVE
nextMatch.save()
set_matches_with_one_team()
def get_team_list():
teams = []
for team in tournament.registeredTeams.all():
teams.append(team.teamName)
while len(teams) != 4:
teams.append(None)
return teams
teamList = get_team_list()
def get_match_results():
results = []
for match in matches:
results.append(match.pointBlue)
results.append(match.pointRed)
while len(results) < (2 * len(teamList)):
results.append(None)
return results
else:
def set_status_for_empty_matches():
for match in matches:
if match.teamsInMatch.count() == 0:
match.status = Match.COMPLETED
match.save()
set_status_for_empty_matches()
def set_matches_with_one_team():
for match in matches:
if match.teamsInMatch.count() == 1:
currentMatch = Match.objects.get(matchName=match.matchName, tournamentName=tournament.id)
teamObject = currentMatch.teamsInMatch.all().values_list('teamName')
team = Team.objects.get(teamName=teamObject[0][0])
if match.matchName == 4:
currentMatch.status = Match.COMPLETED
currentMatch.save()
nextMatch = Match.objects.get(matchName=6)
nextMatch.teamsInMatch.add(team.id)
nextMatch.status = Match.ACTIVE
nextMatch.save()
if match.matchName == 3:
currentMatch.status = Match.COMPLETED
currentMatch.save()
nextMatch = Match.objects.get(matchName=7)
nextMatch.teamsInMatch.add(team.id)
nextMatch.status = Match.ACTIVE
nextMatch.save()
else:
pass
set_matches_with_one_team()
def get_team_list():
teams = []
for team in tournament.registeredTeams.all():
teams.append(team.teamName)
while len(teams) != 8:
teams.append(None)
return teams
teamList = get_team_list()
def get_match_results():
results = []
for match in matches:
results.append(match.pointBlue)
results.append(match.pointRed)
while len(results) < (2 * len(teamList)):
results.append(None)
return results
resusltsList = get_match_results()
teamList = json.dumps(teamList)
resusltsList = json.dumps(resusltsList)
def if_team_is_registered():
for name in teams:
val = True
if tournament.registeredTeams.filter(registeredTeams__registeredTeams=name.id):
val = True
else:
pass
return val
if if_team_is_registered():
def get_team_registered_by_user():
for name in teams:
if tournament.registeredTeams.filter(registeredTeams__registeredTeams=name.id):
teamRegisteredByUser = name
return teamRegisteredByUser
teamRegisteredByUser = get_team_registered_by_user()
def get_match_object():
if teamRegisteredByUser:
for _ in matches:
for match in matches:
if match.teamsInMatch.filter(teamsInMatch__teamsInMatch=teamRegisteredByUser.id,
teamsInMatch__status='active'):
matchObject = match
return matchObject
else:
pass
matchObject = get_match_object()
def get_teams_for_summary():
if tournament.registeredTeams.count() <= 4:
summaryListOfTeams = []
firstPlace = matches.get(matchName=3, tournamentName=tournament.id).winner
summaryListOfTeams.append(firstPlace)
secondPlace = matches.get(matchName=3, tournamentName=tournament.id).losser
summaryListOfTeams.append(secondPlace)
thirdPlace = matches.get(matchName=4, tournamentName=tournament.id).winner
summaryListOfTeams.append(thirdPlace)
fourthPlace = matches.get(matchName=4, tournamentName=tournament.id).losser
summaryListOfTeams.append(fourthPlace)
return summaryListOfTeams
elif tournament.registeredTeams.count() > 4:
summaryListOfTeams = []
firstPlace = matches.get(matchName=7, tournamentName=tournament.id).winner
summaryListOfTeams.append(firstPlace)
secondPlace = matches.get(matchName=7, tournamentName=tournament.id).losser
summaryListOfTeams.append(secondPlace)
thirdPlace = matches.get(matchName=8, tournamentName=tournament.id).winner
summaryListOfTeams.append(thirdPlace)
fourthPlace = matches.get(matchName=8, tournamentName=tournament.id).losser
summaryListOfTeams.append(fourthPlace)
return summaryListOfTeams
summaryListOfTeams = get_teams_for_summary()
firstPlace = summaryListOfTeams[0]
secondPlace = summaryListOfTeams[1]
thirdPlace = summaryListOfTeams[2]
fourthPlace = summaryListOfTeams[3]
def end_tournament():
completedMatches = matches.filter(status='completed')
if tournament.registeredTeams.count() <= 4 and len(completedMatches) == 4:
tournament.status = tournament.COMPLETED
tournament.save()
elif tournament.registeredTeams.count() <= 8 and len(completedMatches) == 8:
tournament.status = tournament.COMPLETED
tournament.save()
if matches:
end_tournament()
context = {'tournament': tournament, 'userInvitations': userInvitations,
'teamRegisteredByUser': teamRegisteredByUser, 'teamList': teamList,
'resusltsList': resusltsList, 'matchObject': matchObject,
'tournamentDate': tournamentDate, 'tournamentTime': tournamentTime,
'tournamentDateTime': tournamentDateTime, 'currentDateTime': currentDateTime,
'firstPlace': firstPlace, 'secondPlace': secondPlace, 'thirdPlace': thirdPlace,
'fourthPlace': fourthPlace}
return render(request, 'tournaments/bracket_in_tournament.html', context)
context = {'tournament': tournament, 'userInvitations': userInvitations, 'teamList': teamList,
'resusltsList': resusltsList}
return render(request, 'tournaments/bracket_in_tournament.html', context)
context = {'tournament': tournament, 'userInvitations': userInvitations,
'matches': matches, 'tournamentDate': tournamentDate, 'tournamentTime': tournamentTime,
'tournamentDateTime': tournamentDateTime, 'currentDateTime': currentDateTime}
return render(request, 'tournaments/bracket_in_tournament.html', context)
@login_required(login_url='login')
def rules_tournament(request, tournament_id):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
tournament = get_object_or_404(Tournament, pk=tournament_id)
tournamentDateTime = tournament.dateTime.strftime("%d/%m/%Y %H:%M:%S")
tournamentDate = tournament.dateTime.date().strftime("%d/%m/%Y")
tournamentTime = tournament.dateTime.time().strftime("%H:%M:%S")
currentDateTime = datetime.now()
currentDateTime = currentDateTime.strftime("%d/%m/%Y %H:%M:%S")
context = {'userInvitations': userInvitations, 'tournament': tournament, 'tournamentDate': tournamentDate,
'tournamentTime': tournamentTime, 'tournamentDateTime': tournamentDateTime,
'currentDateTime': currentDateTime}
return render(request, 'tournaments/tournament_rules.html', context)
@login_required(login_url='login')
def show_match_in_tournament(request, tournament_id, match_id):
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
tournament = get_object_or_404(Tournament, pk=tournament_id)
match = get_object_or_404(Match, pk=match_id)
requestTeam = Team.objects.filter(createdBy=request.user)
teamNames = [team for team in match.teamsInMatch.all()]
teamNamesList = [team.teamName for team in teamNames]
teamBlue = Team.objects.get(teamName=teamNamesList[0])
teamRed = Team.objects.get(teamName=teamNamesList[1])
blueUsers = User.objects.filter(teams__teamName=teamNamesList[0])
redUsers = User.objects.filter(teams__teamName=teamNamesList[1])
def player_stats_win(team):
for member in team.members.all():
member.profile.gamesPlayed += 1
member.profile.gamesWon += 1
member.profile.rating += 10
member.profile.save()
def player_stats_loss(team):
for member in team.members.all():
member.profile.gamesPlayed += 1
member.profile.gamesLost += 1
member.profile.rating += 5
member.profile.save()
def bracket_move(nextMatch, team):
nextMatch.status = nextMatch.ACTIVE
nextMatch.teamsInMatch.add(team.id)
nextMatch.save()
form = UploadImageToVeryficate(instance=Match)
if match.status == 'active':
try:
if request.method == 'POST':
form = UploadImageToVeryficate(request.POST, request.FILES, instance=match)
if form.is_valid():
form.save()
winnerTeamName = image_verification.verify_iamge(request, match.id)
if winnerTeamName == 0:
messages.error(request, 'Wykryto edycję obrazu, prześlij screen ponownie.')
else:
if teamNamesList[0] == winnerTeamName:
winnerTeamName = teamNamesList[0]
losserTeamName = teamNamesList[1]
elif teamNamesList[1] == winnerTeamName:
winnerTeamName = teamNamesList[1]
losserTeamName = teamNamesList[0]
print('win team: ', winnerTeamName)
print('loss team: ', losserTeamName)
messages.success(request, 'Zrzut ekranu został wysłany')
messages.info(request, "Zwyciężyła drużyna: " + winnerTeamName)
if teamNamesList[0] == winnerTeamName:
match.pointBlue = 1
match.status = Match.COMPLETED
match.winner = winnerTeamName
match.losser = losserTeamName
match.save()
if match.winner == winnerTeamName:
team = Team.objects.get(teamName=winnerTeamName)
player_stats_win(team)
if tournament.registeredTeams.count() <= 4:
if match.matchName == 1:
nextMatch = Match.objects.get(matchName=3, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=3, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif tournament.registeredTeams.count() > 4:
if match.matchName == 1:
nextMatch = Match.objects.get(matchName=5, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=5, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 3 and tournament.registeredTeams.count() == 6:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 3 and tournament.registeredTeams.count() > 6:
nextMatch = Match.objects.get(matchName=6, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 4:
nextMatch = Match.objects.get(matchName=6, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 5:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 6:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
if match.losser == losserTeamName:
team = Team.objects.get(teamName=losserTeamName)
player_stats_loss(team)
if tournament.registeredTeams.count() <= 4:
if match.matchName == 1 and tournament.registeredTeams.count() < 4:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
nextMatch.teamsInMatch.add(team.id)
nextMatch.winner = team.teamName
nextMatch.status = nextMatch.COMPLETED
nextMatch.save()
if match.matchName == 1 and tournament.registeredTeams.count() == 4:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif tournament.registeredTeams.count() > 4:
if match.matchName == 5 and tournament.registeredTeams.count() > 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 5 and tournament.registeredTeams.count() <= 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 6 and tournament.registeredTeams.count() > 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
return redirect('bracket_in_tournament', tournament.id)
elif teamNamesList[1] == winnerTeamName:
match.pointRed = 1
match.winner = winnerTeamName
match.losser = losserTeamName
match.status = Match.COMPLETED
match.save()
if match.winner == winnerTeamName:
team = Team.objects.get(teamName=winnerTeamName)
player_stats_win(team)
if tournament.registeredTeams.count() <= 4:
if match.matchName == 1:
nextMatch = Match.objects.get(matchName=3, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=3, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif tournament.registeredTeams.count() > 4:
if match.matchName == 1:
nextMatch = Match.objects.get(matchName=5, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=5, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 3 and tournament.registeredTeams.count() == 6:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 3 and tournament.registeredTeams.count() > 6:
nextMatch = Match.objects.get(matchName=6, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 4:
nextMatch = Match.objects.get(matchName=6, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 5:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 6:
nextMatch = Match.objects.get(matchName=7, tournamentName=tournament.id)
bracket_move(nextMatch, team)
if match.losser == losserTeamName:
team = Team.objects.get(teamName=losserTeamName)
player_stats_loss(team)
if tournament.registeredTeams.count() <= 4:
if match.matchName == 1 and tournament.registeredTeams.count() < 4:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
nextMatch.teamsInMatch.add(team.id)
nextMatch.winner = team.teamName
nextMatch.status = nextMatch.COMPLETED
nextMatch.save()
if match.matchName == 1 and tournament.registeredTeams.count() == 4:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 2:
nextMatch = Match.objects.get(matchName=4, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif tournament.registeredTeams.count() > 4:
if match.matchName == 5 and tournament.registeredTeams.count() > 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 5 and tournament.registeredTeams.count() <= 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
elif match.matchName == 6:
nextMatch = Match.objects.get(matchName=8, tournamentName=tournament.id)
bracket_move(nextMatch, team)
else:
messages.error(request, 'Nie udało się dodać wyniku')
return redirect('bracket_in_tournament', tournament.id)
except Exception as e:
print('Error: ', e)
messages.error(request, 'Dane z obrazu nie zostały poprawnie dopasowane, prześlij ponownie plik')
else:
messages.error(request, 'Mecz zakończony')
context = {'tournament': tournament, 'userInvitations': userInvitations, 'form': form, 'match': match,
'teamBlue': teamBlue, 'teamRed': teamRed, 'blueUsers': blueUsers, 'redUsers': redUsers,
'requestTeam': requestTeam}
return render(request, 'tournaments/match_view.html', context)
def show_ranking_view(request):
allProfiles = Profile.objects.all().order_by('-rating', '-gamesPlayed')
rankingTable = []
def get_number_of_users_list():
list = []
for element in range(1, len(allProfiles) + 1):
list.append(element)
return list
numbersList = get_number_of_users_list()
def check_winrate():
list = []
for profile in allProfiles:
if profile.gamesPlayed != 0:
list.append(str("%.0f" % ((profile.gamesWon / profile.gamesPlayed) * 100) + '%'))
else:
list.append(str("%.0f" % 0 + '%'))
return list
winratePercentageList = check_winrate()
for iterator, profile, winrate in zip(numbersList, allProfiles, winratePercentageList):
innerList = []
innerList.append(iterator)
innerList.append(profile.user.username)
innerList.append(winrate)
innerList.append(profile.gamesPlayed)
innerList.append(profile.gamesWon)
innerList.append(profile.gamesLost)
innerList.append(profile.rating)
rankingTable.append(innerList)
if request.user.is_authenticated:
userInvitations = Invitation.objects.filter(email=request.user.email, status='Invited')
if userInvitations:
context = {'rankingTable': rankingTable, 'userInvitations': userInvitations}
return render(request, 'ranking/ranking_view.html', context)
context = {'rankingTable': rankingTable}
return render(request, 'ranking/ranking_view.html', context)
| Szaneron/Battlewind | Website/views.py | views.py | py | 50,644 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "forms.CreateUserForm",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "... |
70735088674 | import os
import traceback
from functools import partial
from typing import Union, Any
import h5pyd
from Prop3D.common.featurizer import ProteinFeaturizer
from Prop3D.util import safe_remove
from Prop3D.generate_data.data_stores import data_stores
from toil.job import Job
from toil.realtimeLogger import RealtimeLogger
class CalculateFeaturesError(RuntimeError):
"""A new Error to handle errors during protein featurization
Parameters
----------
job : toil Job
Current running job
cath_domain : str
Name of cath domain
stage : str
function name where error occured
message : str
Error message
errors : list or None
Orignal error objects
*args : any
any xtra args
**kwds : any
ANY XTRA KWDS
"""
def __init__(self, job: Job, cath_domain: str, stage: str, message:str, errors: Union[list, None] = None, *args: Any, **kwds: Any) -> None:
super().__init__(*args, **kwds)
self.cath_domain = cath_domain
self.stage = stage
self.message = message
self.errors = errors if isinstance(errors, list) else []
self.jobStoreName = os.path.basename(job.fileStore.jobStore.config.jobStore.split(":")[-1])
def __str__(self):
"""Convert errors into string
"""
return "Error during {}: {}\nErrors:\n".format(self.stage, self.message,
"\n".join(map(str, self.errors)))
def save(self, store=None):
"""Save errors to file in an IOStore
Parameters
----------
store : IOStore
"""
if store is None:
store = data_stores(job).cath_features
fail_file = "{}.{}".format(self.cath_domain, self.stage)
with open(fail_file, "w") as f:
print(self.message, file=f)
print(self.errors, file=f)
store.write_output_file(fail_file,
f"errors/{self.jobStoreName}/{os.path.basename(fail_file)}")
safe_remove(fail_file)
def calculate_features(job: Job, cath_full_h5: str, cath_domain: str, cathcode: str, update_features: Union[list[str], None] = None,
domain_file: Union[str, None] = None, work_dir: Union[str, None] = None, edge_features: bool = True) -> None:
"""Featurize a protein at the atom, residue, and graph level saving all data into the h5 file on HSDS endpoint
Parameters
----------
job : toil Job
Currently running job
cath_full_h5 : str
Path to h5 on hsds endpoint
cath_domain : str
CATH domain (7-letter code) PDB ID, CHAIN, Domain ID, eg. 1zyzA00
cathcode : str
Superfamily cath domain belongs to (Use / instead of .)
update_features : list of str or None
Select which features update (either indidual feature names or whole group names). If None, all features will be calculated.
Default is None.
domain_file : str or None
Path to pdb file. If None, it will be downloaded from the raw IOStore (see data_stores)
work_dir : str
Where to save temp files
edge_features: bool
Include edge feature or not
"""
if work_dir is None:
if job is not None and hasattr(job, "fileStore"):
work_dir = job.fileStore.getLocalTempDir()
else:
work_dir = os.getcwd()
to_remove = []
if cathcode is not None:
cath_key = f"/{cathcode}/domains/{cath_domain}"
s3_cath_key = "{}/{}".format(cathcode, cath_domain)
elif os.path.isfile(cath_domain):
cath_key = os.path.splitext(os.path.basename(cath_domain))[0]
s3_cath_key = None
else:
cath_key = cath_domain
s3_cath_key = None
if update_features is not None:
#Save features for cath domain in new seperate h5 files to be red in by the Featurizer
store = h5pyd.File(cath_full_h5, mode="r", use_cache=False)
try:
feat_files = list(store[cath_key].keys())
if len(feat_files) == 3 and "atom" in feat_files and \
"residue" in feat_files and feat_files and "edges":
for feature_type, index_col in (("atoms", "serial_number"), ("residues", "residue_id")):
df = pd.DataFrame(store[f"{cath_key}/{feature_type}"]).set_index(index_col)
feature_file = os.path.join(work_dir, f"{cath_domain}_{feature_type:-1]}.h5")
df.to_hdf(feature_file, "table")
del df
to_remove.append(feature_file)
else:
update_features = None
except KeyError:
feats_exist = False
finally:
store.close()
if s3_cath_key is not None:
domain_file = os.path.join(work_dir, "{}.pdb".format(cath_domain))
try:
data_stores(job).prepared_cath_structures.read_input_file(
s3_cath_key+".pdb", domain_file)
except Exception as e:
RealtimeLogger.info("Failed to download prepared cath file {}".format(
cath_key+".pdb"))
raise
output_name = cath_domain
else:
domain_file = domain_file if domain_file is not None else cath_domain
cath_domain = None
output_name = cath_key
try:
structure = ProteinFeaturizer(
domain_file, cath_domain, job, work_dir,
force_feature_calculation=update_features is None,
update_features=update_features)
except:
import traceback as tb
RealtimeLogger.info(f"{tb.format_exc()}")
raise
for ext, calculate in (("atom", structure.calculate_flat_features),
("residue", structure.calculate_flat_residue_features),
("edges", partial(structure.calculate_graph, edgelist=True))):
try:
out, _ = calculate(write=False)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
tb = traceback.format_exc()
raise
CalculateFeaturesError(job, cath_domain, ext.split(".",1)[0], tb).save()
return
if ext=="edges":
df = out
special_col_types = {"src":"<S8", "dst":"<S8"}
df["src"] = df["src"].apply(lambda s: "".join(map(str,s[1:])).strip())
df["dst"] = df["dst"].apply(lambda s: "".join(map(str,s[1:])).strip())
else:
del out
df = structure.get_pdb_dataframe(include_features=True, coarse_grained = ext=="residue")
special_col_types = {"serial_number":"<i8", "atom_name":"<S5",
"residue_id":"<S8", "residue_name":"<S8", "chain":"<S2"}
column_dtypes = {col:special_col_types.get(col, '<f8') for col in df.columns}
rec_arr = df.to_records(index=False, column_dtypes=column_dtypes)
with h5pyd.File(cath_full_h5, mode="a", use_cache=False, retries=100) as store:
if f"{cath_key}/{ext}" in store.keys():
try:
del store[f"{cath_key}/{ext}"]
except OSError:
pass
if f"{cath_key}/{ext}" in store.keys():
try:
del store[f"{cath_key}/{ext}"]
except:
pass
try:
ds1 = store.create_table(f"{cath_key}/{ext}", data=rec_arr, dtype=list(column_dtypes.items()),
chunks=True, compression="gzip", compression_opts=9)
except OSError as e:
if "Request Entity Too Large" in str(e):
#Dataset too lareg to pass over http PUT
span = 500 #atoms in structure int(len(rec_arr)/4)
for i, start in enumerate(range(0, len(rec_arr), span)):
small_data = rec_arr[start:start+span]
if i==0:
RealtimeLogger.info(f"Create small data: with: {len(small_data)}")
store.create_table(f"{cath_key}/{ext}", data=small_data, dtype=list(column_dtypes.items()),
chunks=True, compression="gzip", compression_opts=9)
else:
RealtimeLogger.info(f"Add small data: with: {len(small_data)}")
store[f"{cath_key}/{ext}"].resize((store[f"{cath_key}/{ext}"].shape[0] + small_data.shape[0]), axis=0)
store[f"{cath_key}/{ext}"][-small_data.shape[0]:] = small_data
RealtimeLogger.info("Finished {} features for: {} {}".format(ext, cathcode, output_name))
RealtimeLogger.info("Finished features for: {} {}".format(cathcode, output_name))
safe_remove(domain_file)
if update_features:
for f in to_remove:
safe_remove(f)
| bouralab/Prop3D | Prop3D/generate_data/calculate_features_hsds.py | calculate_features_hsds.py | py | 8,865 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "toil.job.Job",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_num... |
29359771140 | from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
DATASET = 'dataset_7_classes_v3.xlsx'
TEST_SIZE = 0.15
VALIDATION_SIZE = 0.2
FAILURE_MECHANISMS = ['NF', 'RM', 'DPF', 'NPF', 'DWF', 'NWF']
BINARY_LABELS = ['No_Failure', 'Failure']
def make_binary_classification(df: pd.DataFrame) -> pd.DataFrame:
"""
Transform a dataframe of multiclass classication into a binary classification one
"""
map_dict = {}
for fm in FAILURE_MECHANISMS:
if fm == 'NF':
map_dict[fm] = BINARY_LABELS[0]
else:
map_dict[fm] = BINARY_LABELS[1]
df = df.replace({'fm': map_dict})
# Invert the logic for binary classification
df['Failure'] = 1 - df['NF'] # = df.rename(columns={'NF': 'Failure'})
return df
def run(dataset_path: Path) -> None:
"""
Takes the dataset, make the labels binary (failure / no failure), encodes the angle features into cos/sin
and splits it into train, validation and test sets.
"""
df = pd.read_excel(dataset_path)
# Encode the angles to better representation
df['slope_ira_cos'] = np.cos(np.radians(df.slope_ira))
df['slope_ira_sin'] = np.sin(np.radians(df.slope_ira))
df['int_1_dip_cos'] = np.cos(np.radians(df.interface_1_dip))
df['int_1_dip_sin'] = np.sin(np.radians(df.interface_1_dip))
df['int_1_dd_cos'] = np.cos(np.radians(df.interface_1_dd))
df['int_1_dd_sin'] = np.sin(np.radians(df.interface_1_dd))
df['int_1_fri_cos'] = np.cos(np.radians(df.interface_1_fri))
df['int_1_fri_sin'] = np.sin(np.radians(df.interface_1_fri))
df['int_2_dip_cos'] = np.cos(np.radians(df.interface_2_dip))
df['int_2_dip_sin'] = np.sin(np.radians(df.interface_2_dip))
df['int_2_dd_cos'] = np.cos(np.radians(df.interface_2_dd))
df['int_2_dd_sin'] = np.sin(np.radians(df.interface_2_dd))
df['int_2_fri_cos'] = np.cos(np.radians(df.interface_2_fri))
df['int_2_fri_sin'] = np.sin(np.radians(df.interface_2_fri))
# Add distance between mapping points
df['distance'] = np.sqrt((df.interface_1_x - df.interface_2_x)**2 + (df.interface_1_y - df.interface_2_y)**2 +
(df.interface_1_z - df.interface_2_z)**2)
# Get the ratio of the distance between mapping points and slope height
df['ratio'] = df.distance / df.slope_height
# Add one column to the dataframe transforming labels from one-hot-encoding to a column of str names
df['fm'] = df[FAILURE_MECHANISMS].idxmax(axis=1)
# Make binary classification
df = make_binary_classification(df=df)
# Put the labels at the end for easy of use
current_cols = df.columns.tolist()
index_i = df.columns.get_loc("NF")
index_f = df.columns.get_loc("NWF")
adjusted_cols = current_cols[0:index_i] + current_cols[index_f+1:] + current_cols[index_i:index_f+1]
df = df[adjusted_cols]
# Re stablish fm column to make a stratified split
df['fm'] = df[FAILURE_MECHANISMS].idxmax(axis=1)
# Dataset is highly imbalanced
train, test = train_test_split(df,
test_size=TEST_SIZE,
random_state=42,
shuffle='True',
stratify=df['fm'])
train, validation = train_test_split(train,
test_size=VALIDATION_SIZE,
random_state=42,
shuffle='True',
stratify=train['fm'])
labels = ['Failure']+FAILURE_MECHANISMS
others = ['uuid', 'server', 'folder', 'expansion_factor', 'zone_size_ref', 'min_zones_in_slope', 'slope_length',
'slope_width', 'xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax', 'max_dist_d', 'max_d_factor',
'interface_1_x', 'interface_1_y', 'interface_1_z', 'interface_2_x', 'interface_2_y', 'interface_2_z',
'interface_1_dip', 'interface_1_dd', 'interface_2_dip', 'interface_2_dd', 'slope_ira',
'interface_1_fri', 'interface_2_fri', 'fm']
predictors = [i for i in df.columns if i not in [k for k in labels + others]]
y_train, y_val, y_test = train[labels], validation[labels], test[labels]
X_train, X_val, X_test = train[predictors], validation[predictors], test[predictors]
dataframes_dict = {'y_train': y_train,
'y_val': y_val,
'y_test': y_test,
'X_train': X_train,
'X_val': X_val,
'X_test': X_test}
for k, v in dataframes_dict.items():
if k.split('_')[0] == 'y':
for label in BINARY_LABELS:
if label == 'No_Failure':
proportion = v.loc[v['Failure'] == 0].count().values[0]/v.shape[0]
print(f"For {k}, {label} has {v.loc[v['Failure'] == 0].count().values[0]} cases which is: "
f"{round(proportion,2)}")
else:
proportion = v.loc[v['Failure'] == 1].count().values[0]/v.shape[0]
print(f"For {k}, {label} has {v.loc[v['Failure'] == 1].count().values[0]} cases which is: "
f"{round(proportion,2)}")
for label in FAILURE_MECHANISMS:
if label == 'NF':
proportion = v.loc[v['NF'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['NF'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
if label == 'RM':
proportion = v.loc[v['RM'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['RM'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
if label == 'DPF':
proportion = v.loc[v['DPF'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['DPF'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
if label == 'NPF':
proportion = v.loc[v['NPF'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['NPF'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
if label == 'DWF':
proportion = v.loc[v['DWF'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['DWF'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
if label == 'NWF':
proportion = v.loc[v['NWF'] == 1].count().values[0] / v.shape[0]
print(f"For {k}, {label} has {v.loc[v['NWF'] == 1].count().values[0]} cases which is: "
f"{round(proportion, 2)}")
print(f"For {k}, the total amount of cases is: {v.shape[0]}")
print("-"*50)
path_to_save = Path.cwd() / f"{k}.csv"
v.to_csv(path_to_save, index=False)
if __name__ == '__main__':
"""
Data Split for Multiclass Multilabel Classification
"""
dataset_path = Path().cwd().parent.parent / '01_dataset_generation' / DATASET
run(dataset_path=dataset_path) | cristian-castro-a/slope-stability-surrogate-model | 04_multiclass_multilabel_classification_model/04_01_data_split/run_multiclass_data_split.py | run_multiclass_data_split.py | py | 7,464 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
... |
17871789895 | from optparse import OptionParser
from bigdl.dataset import mnist
from bigdl.dataset.transformer import *
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
def build_model(class_num):
model = Sequential()
model.add(Reshape([1, 28, 28]))
model.add(SpatialConvolution(1, 6, 5, 5))
model.add(Tanh())
model.add(SpatialMaxPooling(2, 2, 2, 2))
model.add(Tanh())
model.add(SpatialConvolution(6, 12, 5, 5))
model.add(SpatialMaxPooling(2, 2, 2, 2))
model.add(Reshape([12 * 4 * 4]))
model.add(Linear(12 * 4 * 4, 100))
model.add(Tanh())
model.add(Linear(100, class_num))
model.add(LogSoftMax())
return model
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels)
# Target start from 1 in BigDL
record = images.zip(labels).map(lambda features_label:
Sample.from_ndarray(features_label[0], features_label[1] + 1))
return record
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-a", "--action", dest="action", default="train")
parser.add_option("-b", "--batchSize", type=int, dest="batchSize", default="128")
parser.add_option("-o", "--modelPath", dest="modelPath", default="lenet5/model.470")
parser.add_option("-c", "--checkpointPath", dest="checkpointPath", default="lenet5")
parser.add_option("-t", "--endTriggerType", dest="endTriggerType", default="epoch")
parser.add_option("-n", "--endTriggerNum", type=int, dest="endTriggerNum", default="20")
(options, args) = parser.parse_args(sys.argv)
sc = SparkContext(appName="lenet5", conf=create_spark_conf())
init_engine()
if options.action == "train":
def get_end_trigger():
if options.endTriggerType.lower() == "epoch":
return MaxEpoch(options.endTriggerNum)
else:
return MaxIteration(options.endTriggerNum)
train_data = get_mnist(sc, "train").map(
normalizer(mnist.TRAIN_MEAN, mnist.TRAIN_STD))
test_data = get_mnist(sc, "test").map(
normalizer(mnist.TEST_MEAN, mnist.TEST_STD))
optimizer = Optimizer(
model=build_model(10),
training_rdd=train_data,
criterion=ClassNLLCriterion(),
optim_method=SGD(learningrate=0.01, learningrate_decay=0.0002),
end_trigger=get_end_trigger(),
batch_size=options.batchSize)
optimizer.set_validation(
batch_size=options.batchSize,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.set_checkpoint(EveryEpoch(), options.checkpointPath)
trained_model = optimizer.optimize()
parameters = trained_model.parameters()
elif options.action == "test":
# Load a pre-trained model and then validate it through top1 accuracy.
test_data = get_mnist(sc, "test").map(
normalizer(mnist.TEST_MEAN, mnist.TEST_STD))
model = Model.load(options.modelPath)
results = model.test(test_data, options.batchSize, [Top1Accuracy()])
for result in results:
print(result)
sc.stop()
| PacktPublishing/Learning-Generative-Adversarial-Networks | Chapter05/Code/BigDL/BigDL-MNIST.py | BigDL-MNIST.py | py | 3,403 | python | en | code | 33 | github-code | 1 | [
{
"api_name": "bigdl.dataset.mnist.read_data_sets",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bigdl.dataset.mnist",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "optparse.OptionParser",
"line_number": 39,
"usage_type": "call"
},
{
"api_... |
8511838057 | from typing import List, Tuple, Dict
from math import sqrt
import torch
from torch import Tensor
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from graph.detectors.models.common_model import GNNPool
import numpy as np
EPS = 1e-15
class ExplainerBase(nn.Module):
def __init__(self, model: nn.Module, epochs=0, lr=0, explain_graph=False, molecule=False, device="cpu"):
super().__init__()
self.model = model
self.lr = lr
self.epochs = epochs
self.explain_graph = explain_graph
self.molecule = molecule
self.mp_layers = [module for module in self.model.modules() if isinstance(module, MessagePassing)]
self.num_layers = len(self.mp_layers)
self.ori_pred = None
self.ex_labels = None
self.edge_mask = None
self.hard_edge_mask = None
self.num_edges = None
self.num_nodes = None
self.device = device
def __set_masks__(self, x, edge_index, init="normal"):
(N, F), E = x.size(), edge_index.size(1)
std = 0.1
self.node_feat_mask = torch.nn.Parameter(torch.randn(F, requires_grad=True, device=self.device) * 0.1)
std = torch.nn.init.calculate_gain('relu') * sqrt(2.0 / (2 * N))
self.edge_mask = torch.nn.Parameter(torch.randn(E, requires_grad=True, device=self.device) * std)
# self.edge_mask = torch.nn.Parameter(100 * torch.ones(E, requires_grad=True))
for module in self.model.modules():
if isinstance(module, MessagePassing):
module.__explain__ = True
module.__edge_mask__ = self.edge_mask
def __clear_masks__(self):
for module in self.model.modules():
if isinstance(module, MessagePassing):
module.__explain__ = False
module.__edge_mask__ = None
self.node_feat_masks = None
self.edge_mask = None
@property
def __num_hops__(self):
if self.explain_graph:
return -1
else:
return self.num_layers
def __flow__(self):
for module in self.model.modules():
if isinstance(module, MessagePassing):
return module.flow
return 'source_to_target'
def forward(self,
x: Tensor,
edge_index: Tensor,
**kwargs
):
self.num_edges = edge_index.shape[1]
self.num_nodes = x.shape[0]
self.device = x.device
def eval_related_pred(self, x, edge_index, edge_masks, **kwargs):
node_idx = kwargs.get('node_idx')
node_idx = 0 if node_idx is None else node_idx # graph level: 0, node level: node_idx
related_preds = []
for ex_label, edge_mask in enumerate(edge_masks):
self.edge_mask.data = float('inf') * torch.ones(edge_mask.size(), device=self.device)
ori_pred = self.model(x=x, edge_index=edge_index, **kwargs)
self.edge_mask.data = edge_mask
masked_pred = self.model(x=x, edge_index=edge_index, **kwargs)
# mask out important elements for fidelity calculation
self.edge_mask.data = - edge_mask # keep Parameter's id
maskout_pred = self.model(x=x, edge_index=edge_index, **kwargs)
# zero_mask
self.edge_mask.data = - float('inf') * torch.ones(edge_mask.size(), device=self.device)
zero_mask_pred = self.model(x=x, edge_index=edge_index, **kwargs)
related_preds.append({'zero': zero_mask_pred[node_idx],
'masked': masked_pred[node_idx],
'maskout': maskout_pred[node_idx],
'origin': ori_pred[node_idx]})
return related_preds
class WalkBase(ExplainerBase):
def __init__(self, model: nn.Module, epochs=0, lr=0, explain_graph=False, molecule=False, device="cpu"):
super().__init__(model, epochs, lr, explain_graph, molecule, device)
def extract_step(self, x, edge_index, detach=True, split_fc=False):
layer_extractor = []
hooks = []
def register_hook(module: nn.Module):
if not list(module.children()) or isinstance(module, MessagePassing):
hooks.append(module.register_forward_hook(forward_hook))
def forward_hook(module: nn.Module, input: Tuple[Tensor], output: Tensor):
# input contains x and edge_index
if detach:
layer_extractor.append((module, input[0].clone().detach(), output.clone().detach()))
else:
layer_extractor.append((module, input[0], output))
# --- register hooks ---
self.model.apply(register_hook)
pred = self.model(x, edge_index)
for hook in hooks:
hook.remove()
# --- divide layer sets ---
walk_steps = []
fc_steps = []
pool_flag = False
step = {'input': None, 'module': [], 'output': None}
for layer in layer_extractor:
if isinstance(layer[0], MessagePassing) or isinstance(layer[0], GNNPool):
if isinstance(layer[0], GNNPool):
pool_flag = True
if step['module'] and step['input'] is not None:
walk_steps.append(step)
step = {'input': layer[1], 'module': [], 'output': None}
if pool_flag and split_fc and isinstance(layer[0], nn.Linear):
if step['module']:
fc_steps.append(step)
step = {'input': layer[1], 'module': [], 'output': None}
step['module'].append(layer[0])
step['output'] = layer[2]
for walk_step in walk_steps:
if hasattr(walk_step['module'][0], 'nn') and walk_step['module'][0].nn is not None:
# We don't allow any outside nn during message flow process in GINs
walk_step['module'] = [walk_step['module'][0]]
if split_fc:
if step['module']:
fc_steps.append(step)
return walk_steps, fc_steps
else:
fc_step = step
return walk_steps, fc_step
def walks_pick(self,
edge_index: Tensor,
pick_edge_indices: List,
walk_indices: List=[],
num_layers=0
):
walk_indices_list = []
for edge_idx in pick_edge_indices:
# Adding one edge
walk_indices.append(edge_idx)
_, new_src = src, tgt = edge_index[:, edge_idx]
next_edge_indices = np.array((edge_index[0, :] == new_src).nonzero().view(-1))
# Finding next edge
if len(walk_indices) >= num_layers:
# return one walk
walk_indices_list.append(walk_indices.copy())
else:
walk_indices_list += self.walks_pick(edge_index, next_edge_indices, walk_indices, num_layers)
# remove the last edge
walk_indices.pop(-1)
return walk_indices_list
def eval_related_pred(self, x, edge_index, masks, **kwargs):
# x, edge_index为输入data,masks为解释的结果
node_idx = kwargs.get('node_idx')
node_idx = 0 if node_idx is None else node_idx # graph level: 0, node level: node_idx
related_preds = []
mask = masks[0]
# origin pred
for edge_mask in self.edge_mask:
edge_mask.data = float('inf') * torch.ones(mask.size(), device=self.device)
ori_pred = self.model(x=x, edge_index=edge_index, **kwargs)
for edge_mask in self.edge_mask:
edge_mask.data = mask
masked_pred = self.model(x=x, edge_index=edge_index, **kwargs)
# mask out important elements for fidelity calculation
for edge_mask in self.edge_mask:
edge_mask.data = - mask
maskout_pred = self.model(x=x, edge_index=edge_index, **kwargs)
# zero_mask
for edge_mask in self.edge_mask:
edge_mask.data = - float('inf') * torch.ones(mask.size(), device=self.device)
zero_mask_pred = self.model(x=x, edge_index=edge_index, **kwargs)
# Store related predictions for further evaluation.
related_preds.append({'zero': zero_mask_pred[node_idx],
'masked': masked_pred[node_idx],
'maskout': maskout_pred[node_idx],
'origin': ori_pred[node_idx]})
return related_preds
def explain_edges_with_loop(self, x: Tensor, walks: Dict[Tensor, Tensor], ex_label):
walks_ids = walks['ids']
walks_score = walks['score'][:walks_ids.shape[0], ex_label].reshape(-1)
idx_ensemble = torch.cat([(walks_ids == i).int().sum(dim=1).unsqueeze(0) for i in range(self.num_edges + self.num_nodes)], dim=0)
hard_edge_attr_mask = (idx_ensemble.sum(1) > 0).long()
hard_edge_attr_mask_value = torch.tensor([float('inf'), 0], dtype=torch.float, device=self.device)[hard_edge_attr_mask]
edge_attr = (idx_ensemble * (walks_score.unsqueeze(0))).sum(1)
# idx_ensemble1 = torch.cat(
# [(walks_ids == i).int().sum(dim=1).unsqueeze(1) for i in range(self.num_edges + self.num_nodes)], dim=1)
# edge_attr1 = (idx_ensemble1 * (walks_score.unsqueeze(1))).sum(0)
return edge_attr - hard_edge_attr_mask_value
class connect_mask(object):
def __init__(self, cls):
self.cls = cls
def __enter__(self):
self.cls.edge_mask = [nn.Parameter(torch.randn(self.cls.x_batch_size * (self.cls.num_edges + self.cls.num_nodes))) for _ in
range(self.cls.num_layers)] if hasattr(self.cls, 'x_batch_size') else \
[nn.Parameter(torch.randn(1 * (self.cls.num_edges + self.cls.num_nodes))) for _ in
range(self.cls.num_layers)]
for idx, module in enumerate(self.cls.mp_layers):
module.__explain__ = True
module.__edge_mask__ = self.cls.edge_mask[idx]
def __exit__(self, *args):
for idx, module in enumerate(self.cls.mp_layers):
module.__explain__ = False | for-just-we/VulDetectArtifact | graph/explainers/approaches/common.py | common.py | py | 10,334 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
257427796 | import datetime
import django_filters
from django import forms
from .choices import EXPERIENCIES, HIERARCHIES, MODALITIES, PERIOD_CHOICES
from .models import Job
class JobFilter(django_filters.FilterSet):
q = django_filters.CharFilter(field_name='title', lookup_expr='icontains')
modality = django_filters.MultipleChoiceFilter(
choices=MODALITIES, widget=forms.CheckboxSelectMultiple
)
hierarchy = django_filters.MultipleChoiceFilter(
choices=HIERARCHIES, widget=forms.CheckboxSelectMultiple
)
experience = django_filters.MultipleChoiceFilter(
choices=EXPERIENCIES, widget=forms.CheckboxSelectMultiple
)
period = django_filters.ChoiceFilter(
choices=PERIOD_CHOICES, method='filter_by_period'
)
def filter_by_period(self, queryset, name, value):
today = datetime.date.today()
if value == 'today':
return queryset.filter(posted_at__date=today)
elif value == 'last_3_days':
three_days_ago = today - datetime.timedelta(days=3)
return queryset.filter(posted_at__date__gte=three_days_ago)
elif value == 'last_week':
last_week = today - datetime.timedelta(days=7)
return queryset.filter(posted_at__date__gte=last_week)
elif value == 'last_2_weeks':
two_weeks_ago = today - datetime.timedelta(days=14)
return queryset.filter(posted_at__date__gte=two_weeks_ago)
elif value == 'last_month':
last_month = today - datetime.timedelta(days=30)
return queryset.filter(posted_at__date__gte=last_month)
class Meta:
model = Job
fields = {
'category': ('exact',),
'address__uf': ('exact',),
}
| Ricardo-Jackson-Ferrari/jobfinder | apps/job/filters.py | filters.py | py | 1,761 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "django_filters.FilterSet",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django_filters.CharFilter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django_filters.MultipleChoiceFilter",
"line_number": 12,
"usage_type": "call"
}... |
20292781823 | from datetime import datetime
from lorem_text import lorem as lorem_func
import pyotp
from flask import abort, Blueprint, make_response, redirect, \
render_template, Response, send_file, send_from_directory, url_for, request, session
from flask_login import login_user, current_user
# noinspection PyPackageRequirements
from sqlalchemy import or_
from webapp.members_routes import MemberPost
# noinspection PyPep8Naming
from webapp.models import BlogImage, BlogPost, Files, KeyValue, Member, Post, Show, ShowPhotos, User, \
MemberShowLink as MSL, db
from flask import current_app as app
bp = Blueprint("routes", __name__)
class NavItem:
def __init__(self, tit, lin):
self.title = tit
self.link = lin
class MemberRenderer:
has_user = False
has_diff = False
def __init__(self, member, user=None):
try:
self.role_type = member.cast_or_crew
except AttributeError:
self.role_type = ""
try:
self.role = member.role_name
except AttributeError:
self.role = ""
if user is None:
self.id = member.id
self.firstname = member.firstname
self.lastname = member.lastname
else:
self.id = user.id
self.firstname = user.firstname
self.lastname = user.lastname
self.has_user = True
if (member.firstname != user.firstname) or (member.lastname != user.lastname):
self.as_firstname = member.firstname
self.as_lastname = member.lastname
self.has_diff = True
def get_link(self):
return "/".join(["/past-shows", ["m", "u"][self.has_user], self.id, "-".join([self.firstname, self.lastname])])
# noinspection PyUnresolvedReferences
@bp.route("/", methods=["GET"])
def frontpage():
latest_show = Show.query \
.filter(Show.date < datetime.now()) \
.order_by(Show.date.desc()) \
.first()
all_photos = ShowPhotos.query \
.filter_by(show_id=latest_show.id) \
.all()
photos = []
for i in all_photos:
size = i.photo_desc.split(",")
if size[0] > size[1]:
photos.append(i)
if len(photos) == (5 - int(KeyValue.query.filter_by(key="tickets-active").first().value)):
break
post = Post.query \
.filter(or_(Post.type == "public", Post.type == "auditions")) \
.join(Show, Post.show_id == Show.id) \
.filter(Show.date > datetime.now()) \
.order_by(Show.date.asc()) \
.order_by(Post.date.desc()) \
.with_entities(
Post.date,
Post.title,
Post.content,
Post.type,
Post.linked_files,
Show.title.label("show_title"),
Show.subtitle.label("show_subtitle")
) \
.first()
if post is None:
files = []
post = Show.query \
.filter(Show.date > datetime.now()) \
.order_by(Show.date.asc()) \
.with_entities(
Show.title.label("show_title"),
Show.subtitle.label("show_subtitle")
) \
.first()
else:
files = [
MemberPost(
post_id=i.id,
title=i.name,
date=i.date,
post_type="file"
) for i in Files.query\
.filter(
Files.id.in_(post.linked_files["files"])
)\
.all()
]
print(files)
return render_template(
"frontpage.html",
latest_show=latest_show,
post=post,
photos=photos,
frontpage=True,
title="Home",
files=files,
no_portal=True,
css=["m_dashboard.css", "frontpage.css"],
js="carousel.js"
)
@bp.get("/links")
def links():
return render_template(
"layout.html",
css="links.css"
)
# noinspection PyUnresolvedReferences
@bp.route("/auditions", methods=["GET"])
def auditions():
post = Post.query \
.filter_by(type="auditions") \
.join(Show, Post.show_id == Show.id) \
.filter(Show.date > datetime.now()) \
.order_by(Show.date.asc()) \
.order_by(Post.date.desc()) \
.with_entities(
Post.date,
Post.title,
Post.content,
Show.title.label("show_title"),
Show.subtitle.label("show_subtitle"),
Post.linked_files
) \
.first()
if post is not None:
files = [
MemberPost(
post_id=i.id,
title=i.name,
date=i.date,
post_type="file"
) for i in Files.query\
.filter(
Files.id.in_(post.linked_files["files"])
)\
.all()
]
else:
files = []
return render_template(
"auditions.html",
post=post,
files=files,
no_portal=True,
css=["m_dashboard.css", "frontpage.css"]
)
@bp.route("/search", methods=["GET"])
def search():
class Result:
def __init__(self, link, title, searchable_text, result_type):
self.link = link
self.title = title
self.searchable = searchable_text
self.type = result_type
results = []
for result in Show.query.filter(Show.date < datetime.now()).order_by(Show.date).all():
searchable = " ".join([
result.title or "",
result.subtitle or "",
str(result.date.year),
result.season or "",
result.text_blob or "",
result.author or "",
result.show_type or "",
result.genre or ""
])
results.append(
Result(
f"/past-show/{result.id}/{'-'.join(result.title.split(' '))}",
result.title,
searchable,
"Show"
)
)
for result in Post.query \
.filter(Post.date < datetime.now()) \
.filter_by(type="blog") \
.order_by(Post.date).all():
searchable = " ".join([
result.title or "",
result.type or "",
result.content or ""
])
results.append(
Result(
f"/blog/{result.id}",
result.title,
searchable,
"Blog"
)
)
for result in Post.query \
.filter(Post.date < datetime.now()) \
.filter(or_(Post.type == "public", Post.type == "auditions")) \
.order_by(Post.date.desc()).all():
searchable = " ".join([
result.title or "",
result.type or "",
result.content or ""
])
results.append(
Result(
f"/blog/{result.id}",
result.title,
searchable,
"Post"
)
)
users_list = {}
members_list = []
for member in Member.query.all():
if member.associated_user is not None:
if member.associated_user not in users_list.keys():
user = User.query.filter_by(id=member.associated_user).first()
for i in Member.query.filter_by(associated_user=member.associated_user).all():
users_list.setdefault(member.associated_user, []).append(
MemberRenderer(
i,
user
)
)
else:
members_list.append(
MemberRenderer(
member
)
)
for result in [*users_list.values(), *members_list]:
if isinstance(result, list):
renderer = result[0]
searchable = " ".join([
f"{i.as_firstname} {i.as_lastname}"
if i.has_diff else
f"{i.firstname} {i.lastname}"
for i in result
])
else:
renderer = result
searchable = " ".join([
result.firstname,
result.lastname
])
results.append(
Result(
renderer.get_link(),
f"{renderer.firstname} {renderer.lastname}",
searchable,
"Member"
)
)
return render_template(
"search.html",
results=results,
js="quicksearch.js",
css="search.css"
)
@bp.route("/blog", methods=["GET"])
def blogs():
posts = Post.query.filter_by(type="blog").filter(Post.date < datetime.now()).order_by(Post.date.desc()).all()
return render_template(
"blogs.html",
template={True: "blank_template.html", False: "layout.html"}["embedded" in request.args],
embedded={True: "embedded", False: ""}["embedded" in request.args],
css="blogs.css",
posts=posts
)
@bp.route("/blog/latest", methods=["GET"])
def latest_blog():
post = Post.query.filter_by(type="blog").filter(Post.date < datetime.now()).order_by(Post.date.desc()).first_or_404()
return redirect("/".join(["/blog", post.id]))
@bp.route("/blog/<post_id>", methods=["GET"])
def blog_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
author = User.query.filter_by(id=post.author).first()
post.views += 1
db.session.commit()
return render_template(
"post.html",
template={True: "blank_template.html", False: "layout.html"}["embedded" in request.args],
embedded={True: "embedded", False: ""}["embedded" in request.args],
post=post,
author=author,
css="post.css"
)
@bp.get("/blog_img/<blog_id>/<int:image_no>")
def blog_img(blog_id, image_no):
img = BlogImage.query.get((blog_id, image_no)).image
if img is not None:
return make_response(img)
else:
abort(404)
@bp.route("/past-shows")
def past_shows():
shows = Show.query \
.filter(Show.date < datetime.now()) \
.order_by(Show.date.desc()) \
.all()
return render_template(
"past_shows.html",
shows=shows,
manage_shows=False,
no_portal=True,
css="past_shows.css",
js=["past_shows.js", "quicksearch.js"]
)
@bp.route("/past-shows/<show_id>", methods=["GET"])
def past_show_redirect(show_id):
show = Show.query.filter_by(id=show_id).first_or_404()
title = show.title.lower().replace(" ", "-")
return redirect("/".join([show_id, title]))
@bp.route("/past-shows/<show_id>/<test>")
def past_show_page(show_id, test):
test += " "
show = Show.query.filter_by(id=show_id).first_or_404()
raw_cast = MSL.query \
.filter_by(show_id=show_id, cast_or_crew="cast") \
.join(Member, MSL.member_id == Member.id) \
.with_entities(
MSL.show_id,
MSL.cast_or_crew,
MSL.role_name,
Member.id,
Member.firstname,
Member.lastname,
Member.associated_user
) \
.order_by(MSL.order_val) \
.all()
cast = {}
for member in raw_cast:
user = User.query.filter_by(id=member.associated_user).first()
cast.setdefault(member.role_name, []).append(MemberRenderer(member, user))
raw_crew = MSL.query \
.filter_by(show_id=show_id, cast_or_crew="crew") \
.join(Member, MSL.member_id == Member.id) \
.with_entities(
MSL.show_id,
MSL.cast_or_crew,
MSL.role_name,
Member.id,
Member.firstname,
Member.lastname,
Member.associated_user
) \
.order_by(MSL.order_val) \
.all()
crew = {}
for member in raw_crew:
user = User.query.filter_by(id=member.associated_user).first()
crew.setdefault(member.role_name, []).append(MemberRenderer(member, user))
photos = ShowPhotos.query.filter_by(show_id=show_id, photo_type="photo").all()
videos = ShowPhotos.query.filter_by(show_id=show_id, photo_type="video").all()
if "embedded" in request.args.keys():
return render_template(
"past_show_photos.html",
show=show,
cast=cast,
crew=crew,
photos=photos,
videos=videos,
title=show.title,
css="past_show_page.css",
js="past_show_page.js"
)
else:
return render_template(
"past_show_page.html",
show=show,
cast=cast,
crew=crew,
photos=photos,
videos=videos,
title=show.title,
css="past_show_page.css",
js="past_show_page.js"
)
@bp.route("/past-shows/u/<user_id>")
def u_redirect(user_id):
user = User.query.filter_by(id=user_id).first_or_404()
page_title = "-".join([user.firstname, user.lastname])
return redirect("/".join([user_id, page_title]))
# noinspection DuplicatedCode
@bp.route("/past-shows/u/<user_id>/<test>")
def u(user_id, test):
test += " "
user_members = [
i[0] for i in
Member.query
.filter(Member.associated_user == user_id)
.with_entities(Member.id)
.all()
]
# noinspection PyUnresolvedReferences
msls = MSL.query \
.filter(MSL.member_id.in_(user_members)) \
.join(Member, MSL.member_id == Member.id) \
.with_entities(
MSL.show_id,
MSL.cast_or_crew,
MSL.role_name,
Member.id,
Member.firstname,
Member.lastname,
Member.associated_user
) \
.all()
user = User.query.filter_by(id=user_id).first_or_404()
shows = {}
for link in msls:
shows.setdefault(link.show_id, []).append(MemberRenderer(link, user))
# noinspection PyUnresolvedReferences
show_details = {i.id: i for i in Show.query.order_by(Show.date.desc()).filter(Show.id.in_(shows.keys())).all()}
return render_template(
"shows_by_person.html",
shows=shows,
show_details=show_details,
no_portal=True,
user=user,
js="past_shows.js",
css="past_shows.css"
)
@bp.route("/past-shows/m/<member_id>")
def m_redirect(member_id):
member = Member.query.filter_by(id=member_id).first_or_404()
page_title = "-".join([member.firstname, member.lastname])
return redirect("/".join([member_id, page_title]))
# noinspection DuplicatedCode
@bp.route("/past-shows/m/<member_id>/<test>")
def m(member_id, test):
test += " "
member = Member.query.filter_by(id=member_id).first_or_404()
msls = MSL.query \
.filter(MSL.member_id == member_id) \
.join(Member, MSL.member_id == Member.id) \
.with_entities(
MSL.show_id,
MSL.cast_or_crew,
MSL.role_name,
Member.id,
Member.firstname,
Member.lastname,
Member.associated_user
).all()
# user = User.query.filter_by(id=user_id).first()
shows = {}
for link in msls:
shows.setdefault(link.show_id, []).append(MemberRenderer(link))
# noinspection PyUnresolvedReferences
show_details = {i.id: i for i in Show.query.order_by(Show.date.desc()).filter(Show.id.in_(shows.keys())).all()}
return render_template(
"shows_by_person.html",
shows=shows,
show_details=show_details,
no_portal=True,
user=member,
js="past_shows.js",
css="past_shows.css"
)
@bp.route("/lorem", methods=["GET"])
def lorem():
# text = lorem.paragraphs(20)
return render_template("lorem.html", lorem=lorem_func.paragraphs(30), css="frontpage.css")
@bp.route("/database", methods=["GET"])
def database():
stats = {
"Shows": Show.query.count(),
"Members": Member.query.count(),
"Cast/Crew Roles": MSL.query.count(),
"Blogposts": BlogPost.query.count(),
"Photos": ShowPhotos.query.count()
}
return render_template("database.html", stats=stats, css="frontpage.css")
@bp.route("/about-us")
def about():
return render_template(
"about-us.html",
maps_url=KeyValue.query.filter_by(key="maps-url").first(),
about=KeyValue.query.filter_by(key="about").first_or_404(),
css="frontpage.css"
)
@bp.route("/tickets")
def tickets():
return render_template(
"tickets.html",
css="frontpage.css"
)
# noinspection PyUnusedLocal
@app.errorhandler(401)
@bp.route("/members", methods=["GET", "POST"])
def members(*args):
if current_user.is_authenticated:
return redirect(url_for("members_routes.dashboard"))
else:
if request.method == "POST":
user = User.query.filter_by(email=request.form['email']).first()
if user is not None and user.verify_password(request.form['password']):
if user.otp_secret != "" and user.otp_secret is not None:
session['email'] = request.form['email']
return redirect(url_for("routes.otp"))
else:
session['set_password'] = request.form.get('password') == user.id
login_user(user)
return redirect(url_for('members_routes.dashboard'))
else:
return redirect(url_for("routes.members", error="bad_login"))
elif request.method == "GET":
files = [
MemberPost(
title="Adult Membership Subs",
date=datetime.utcnow(),
text="Click to Pay",
link="https://checkout.square.site/buy/2RAQ4QC2TWDCTY6WTQSAXZHG",
post_type="link"
),
MemberPost(
title="Junior Membership Subs",
date=datetime.utcnow(),
text="Click to Pay",
link="https://checkout.square.site/buy/PMRGF2GUVKGHNFZCOJMQQKXT",
post_type="link"
)
] + [
MemberPost(
post_id=i.id,
title=i.name,
date=i.date,
post_type="file"
) for i in Files.query.filter_by(show_id="members_public").all()
]
return render_template(
"members.html",
error=request.args.get("error") or "",
files=files,
no_portal=True,
css=["m_dashboard.css", "members.css"]
)
@bp.route("/members/otp", methods=["GET", "POST"])
def otp():
if request.method == "GET":
if 'email' not in session:
return redirect(url_for('routes.members'))
return render_template("otp.html", css="members.css")
else:
user = User.query.filter_by(email=session['email']).first()
totp = pyotp.TOTP(user.otp_secret)
session.pop('email', None)
if totp.verify(request.form['otp']):
login_user(user)
return redirect(url_for("members_routes.dashboard"))
else:
return redirect(url_for("routes.members"))
@bp.route("/js/<string:filename>", methods=["GET"])
def js(filename):
fp = 'static/js/' + filename
try:
response = make_response(send_file(fp.replace("\\", "/")))
response.headers['mimetype'] = 'text/javascript'
return response
except OSError:
abort(404)
@bp.route("/css/<string:filename>", methods=["GET"])
def css(filename):
fp = 'static/css/' + filename
try:
response = make_response(send_file(fp.replace("\\", "/")))
response.headers['mimetype'] = 'text/css'
return response
except OSError:
abort(404)
# noinspection PyUnresolvedReferences
@bp.get("/sounds/<filename>")
def sound(filename):
if filename not in app.available_sounds:
abort(404)
else:
response = send_from_directory(app.sounds_path, filename)
return response
@bp.get("/favicon.svg")
def favicon():
response = Response(KeyValue.query.filter_by(key="site_logo").first().value, mimetype='image/svg+xml')
return response
# @bp.route("/emrg")
# def emergency_user():
# kwargs = {
# "email": "test2@example.com",
# "password": "test"
# }
# used_ids = [value[0] for value in User.query.with_entities(User.id).all()]
# new_id = corha.rand_string(kwargs["email"], 16, used_ids)
# kwargs["id"] = new_id
# new_user = User(**kwargs, otp_secret=pyotp.random_base32())
# db.session.add(new_user)
# db.session.commit()
#
# return redirect(url_for("frontpage"))
@bp.route("/accessibility")
def accessibility():
if request.args.get('theme') is not None:
session["theme"] = request.args.get('theme')
if request.args.get('fontsize') is not None:
session["fontsize"] = request.args.get('fontsize')
session.modified = True
return redirect(request.referrer)
@bp.route("/tempest")
def tempest_redirect():
return redirect("/past-shows/L2hhNXIZPeXgGyY/the-tempest")
# TODO: change this to allow configuring custom redirects through admin settings
| mattl1598/open-amdram-portal | webapp/routes.py | routes.py | py | 17,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "webapp.models.Show.query.filter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "webapp.models.Show.query",
"line_number": 64,
"usage_type": "attribute"
},
{
"api... |
31637033591 | import os
import json
import time
from S3utility.s3_notification_info import parse_activity_data
from provider import digest_provider, download_helper, email_provider, utils
from activity.objects import Activity
class activity_ValidateDigestInput(Activity):
"ValidateDigestInput activity"
def __init__(self, settings, logger, client=None, token=None, activity_task=None):
super(activity_ValidateDigestInput, self).__init__(
settings, logger, client, token, activity_task
)
self.name = "ValidateDigestInput"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Download digest file input from the bucket, parse it, check for "
+ "valid data, and raise an error if it is invalid."
)
# Track some values
self.input_file = None
self.digest = None
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.statuses = {"build": None, "valid": None, "email": None}
# Load the config
self.digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
def do_activity(self, data=None):
"""
Activity, do the work
"""
if self.logger:
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
self.make_activity_directories()
# parse the data with the digest_provider
real_filename, bucket_name, bucket_folder = parse_activity_data(data)
# Download from S3
self.input_file = download_helper.download_file_from_s3(
self.settings,
real_filename,
bucket_name,
bucket_folder,
self.directories.get("INPUT_DIR"),
)
# Parse input and build digest
self.statuses["build"], self.digest = digest_provider.build_digest(
self.input_file,
self.directories.get("TEMP_DIR"),
self.logger,
self.digest_config,
)
# Approve files for emailing
self.statuses["valid"], error_messages = digest_provider.validate_digest(
self.digest
)
if not self.statuses.get("build") or not self.statuses.get("valid"):
# Send error email
self.statuses["email"] = self.email_error_report(
real_filename, error_messages
)
self.log_statuses(self.input_file)
return self.ACTIVITY_PERMANENT_FAILURE
self.log_statuses(self.input_file)
return True
def log_statuses(self, input_file):
"log the statuses value"
self.logger.info(
"%s for input_file %s statuses: %s"
% (self.name, str(input_file), self.statuses)
)
def email_error_report(self, filename, error_messages):
"send an email on error"
datetime_string = time.strftime(utils.DATE_TIME_FORMAT, time.gmtime())
body = email_provider.simple_email_body(datetime_string, error_messages)
subject = error_email_subject(filename)
sender_email = self.settings.digest_sender_email
recipient_email_list = email_provider.list_email_recipients(
self.settings.digest_validate_error_recipient_email
)
connection = email_provider.smtp_connect(self.settings, self.logger)
# send the emails
for recipient in recipient_email_list:
# create the email
email_message = email_provider.message(subject, sender_email, recipient)
email_provider.add_text(email_message, body)
# send the email
email_provider.smtp_send(
connection, sender_email, recipient, email_message, self.logger
)
return True
def error_email_subject(filename):
"email subject for an error email"
return "Error processing digest file: {filename}".format(filename=filename)
| elifesciences/elife-bot | activity/activity_ValidateDigestInput.py | activity_ValidateDigestInput.py | py | 4,401 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "activity.objects.Activity",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
33288174422 | import store
import os
import FTP_Cryptography
import bson
from bson.binary import Binary
import mongodb
from cryptography.fernet import Fernet
files = mongodb.db.files
FILE_DATA = store.FILE_DATA
INFO_FILE = store.INFO_FILE
def check_file_name(file_name):
return os.path.exists(file_name)
def send_file(socket, file_name):
file = open(file_name, "rb")
size = int(os.stat(file_name).st_size)
socket.send(str(size).encode())
LISTEN_MSG = socket.recv(1024)
data = file.read(1024)
while data:
socket.send(data)
data = file.read(1024)
def recv_file(socket, file_name):
MSG = socket.recv(1024)
size = int(MSG.decode())
socket.send("continue".encode())
time_loop = int(size / 1024)
if os.path.exists(file_name):
os.remove(file_name)
data = b''
i = 1
file = open(file_name, "wb")
while i <= time_loop:
data = socket.recv(1024)
file.write(data)
i = i + 1
last = size - time_loop*1024
data = socket.recv(last)
file.write(data)
return
# UPLOAD -------------------------------------------------
def send_file_upload(socket, file_name, cryptography):
file = open(file_name, "rb")
size = int(os.stat(file_name).st_size)
INFO_FILE["size"] = size
INFO_FILE["cryptography"] = cryptography
INFO_FILE_MSG = store.send_repr(INFO_FILE)
socket.send(INFO_FILE_MSG)
LISTEN_MSG = socket.recv(1024)
data = file.read(1024)
while data:
socket.send(data)
data = file.read(1024)
def recv_file_upload(socket, file_name):
MSG = socket.recv(1024)
MSG = store.recv_repr(MSG)
size = MSG["size"]
cryptography = MSG["cryptography"]
socket.send("start".encode())
time_loop = int(size / 1024)
file = b''
data = b''
i = 1
while i <= time_loop:
data = Binary(socket.recv(1024))
file = file + data
i = i + 1
last = size - time_loop*1024
data = socket.recv(last)
file = file + data
insert_file(file_name, file, size, cryptography)
return
def insert_file(file_name, file, size, cryptography):
global FILE_DATA
result = check_file_name_db(file_name)
if(result != False):
myquery = {"filename": file_name}
files.delete_one(myquery)
FILE_DATA = {
"_id": file_name,
"filename": file_name,
"file": file,
"size": size,
"cryptography": cryptography,
}
files.insert_one(FILE_DATA)
# DOWNLOAD -------------------------------------------------
def check_file_name_db(filename):
check_files = {}
list_files = files.find({"filename": filename})
for file in list_files:
check_files = file
if (check_files != {}):
return check_files
else:
return False
def send_file_download(socket, file_name, cryptography):
file = check_file_name_db(file_name)
if(file != False):
INFO_FILE["size"] = file["size"]
INFO_FILE["cryptography"] = file["cryptography"]
INFO_FILE_MSG = store.send_repr(INFO_FILE)
socket.send(INFO_FILE_MSG)
LISTEN_MSG = socket.recv(1024)
socket.send(file['file'])
def recv_file_download(socket, file_name):
MSG = socket.recv(1024)
MSG = store.recv_repr(MSG)
size = MSG["size"]
cryptography = MSG["cryptography"]
socket.send("start".encode())
time_loop = int(size / 1024)
if os.path.exists(file_name):
os.remove(file_name)
save_file = open(file_name, "wb")
file = b''
data = b''
i = 1
file = Binary(socket.recv(size))
if(cryptography == True):
key = FTP_Cryptography.load_key()
f = Fernet(key)
data = f.decrypt(file)
else:
data = file
save_file.write(data)
| boom-chill/Chat-app-MMT-DA | file.py | file.py | py | 3,796 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mongodb.db",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "store.FILE_DATA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "store.INFO_FILE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.exis... |
10451723483 | import os
import maya.cmds as cmds
from functools import partial
import Core
reload(Core)
class UIContainer():
frozen = False
def build(parent,
imagesPath,
iconSize=25,
height=20,
marginSize=5):
"""
build widget
@param parent : parent layout in maya
@imagesPath : str path
"""
# - Freeze Viewport---------------------------------------------------------------
cmds.rowLayout(numberOfColumns=1, parent=parent)
cmds.iconTextButton(style='iconOnly',
image1=os.path.join(imagesPath, 'freeze.png'),
hi=os.path.join(imagesPath, 'freeze_hi.png'),
width=iconSize, mw=marginSize, height=iconSize, mh=marginSize,
label='freeze',
annotation='Freeze / unFreeze viewport',
c=freeze)
cmds.setParent("..")
def freeze(*args):
"""
"""
if UIContainer.frozen == True:
cmds.refresh(su=False)
print('Viewport is unfrozen')
UIContainer.frozen = False
else:
cmds.refresh(su=True)
print('Viewport is frozen')
UIContainer.frozen = True
| darkuress/animBuddy | animBuddy/ViewportRefresh/Widget.py | Widget.py | py | 1,205 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "maya.cmds.rowLayout",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "maya.cmds.iconTextButton",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
... |
25136656838 | import time
from bs4 import BeautifulSoup
import requests
from requests import Session
from Extractor.extractor import EbayKleinanzeigenExtractor
import json
from print_dict import pd
class Cookies:
def __init__(self, filename: str = "default.json", log: bool = True,
cookies: dict = None, save=False, mode: str = "client", keep_old=False):
# Test for custom configs
self.log = log
self.mode = mode
self.save = save
self.filename = filename
self.googleChromeCookie = []
self.request_cookies = dict()
deploy_mode = "server"
if deploy_mode == "online":
self.cookie_domain = ".ebay-zakir-1996.onrender.com"
elif deploy_mode == "offline":
self.cookie_domain = ".ebay-kleinanzeigen-zakir.de"
elif deploy_mode == "mobile":
self.cookie_domain = ".192.168.151.149"
elif deploy_mode == "server":
self.cookie_domain = ".kleinanzeigen.de"
# self.ebay_url = "https://www.ebay-kleinanzeigen.de/"
# user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:108.0) Gecko/20100101 Firefox/108.0"
# self.headers = {'User-Agent': user_agent, 'Connection': 'keep-alive',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept': '*/*'}
if mode == "client":
if cookies:
self.request_cookies = cookies.copy()
self.load_google_chrome_cookies_from_request(log=log)
elif mode == "server":
self.load_google_chrome_cookies_from_file(log=log)
if not keep_old:
print("refreshing cookies")
self.refresh_google_chrome_cookies()
for cook in self.googleChromeCookie:
# print(cook['name'])
self.request_cookies[cook['name']] = cook['value']
###############################################
# load google chrome cookies
###############################################
def load_google_chrome_cookies_from_file(self, log=True):
with open(self.filename, 'r') as f:
self.googleChromeCookie = json.load(f)
# for cook in self.googleChromeCookie:
# if "kleinanzeigen.de" not in cook.get("domain"):
# self.googleChromeCookie.remove(cook)
# if log:
# print("Loading default config")
###############################################
# load google chrome cookies
###############################################
def load_google_chrome_cookies_from_request(self, log=True):
for name, value in self.request_cookies.items():
cook = dict(name=name, value=value, path="/",
domain=self.cookie_domain,
expirationDate="")
self.googleChromeCookie.append(cook)
self.save_cookies()
###############################################
# saving cookies to a file
###############################################
def save_cookies(self):
f = open(self.filename, "w")
f.write(json.dumps(self.googleChromeCookie))
f.close()
################################################################
# removing expired cookies bevor sending request (mode == server
################################################################
def refresh_google_chrome_cookies(self, log=True):
for cook in self.googleChromeCookie:
# cook['domain'] = self.cookie_domain
index = 0
if cook.get('expirationDate') is not None:
# print("with expiration ",cook.get("name"))
rem = int(cook.get('expirationDate')) - time.time()
if rem <= 0:
self.googleChromeCookie.remove(cook)
if log:
print(cook['name'] + " was removed from cookies")
else:
pass
# self.googleChromeCookie.remove(cook)
# if log:
# print(cook['name'] + " was removed from cookies")
pass
pass
###############################################
# setting
###############################################
def set_cookies(self, session: Session):
# for co in session.cookies:
# for key , value in co.items()
# c.
self.cookies_temp = [
{'name': c.name, 'secure': c.secure, 'hostOnly': True ,
'httpOnly': False, 'value': c.value, "sameSite": "unspecified",
'expirationDate': c.expires, "session": c.expires is None, "storeId": 0,
'domain': c.domain, 'path': c.path}
for c in session.cookies]
for cook in self.cookies_temp:
self.request_cookies[cook['name']] = cook['value']
found = False
for cook in self.cookies_temp:
for cook2 in self.googleChromeCookie:
if cook['name'] == cook2['name']:
found = True
self.googleChromeCookie.remove(cook2)
self.googleChromeCookie.append(cook)
if not found:
# if cook['expirationDate']:
self.googleChromeCookie.append(cook)
found = False
self.refresh_google_chrome_cookies(self.log)
for cook in self.googleChromeCookie:
self.request_cookies[cook['name']] = cook['value']
self.save_cookies()
###############################################
# resetting cookies
###############################################
def reset_cookies(self):
return
self.request_cookies = {}
self.googleChromeCookie = []
self.save_cookies()
def print_request_cookies(self):
# for key,value in self.request_cookies.items():
# print(key,value)
for cook in self.googleChromeCookie:
print(cook['name'],cook['domain'])
def remove_specific_cookies(self):
un_wanted_cookies = ["ak_bmsc","_gat","bm_sv"]
for unw_cook in un_wanted_cookies:
if self.request_cookies.get(unw_cook):
print("removing",unw_cook)
self.request_cookies.pop(unw_cook)
for cook in self.googleChromeCookie:
for unw_cook in un_wanted_cookies:
if cook['name'] == unw_cook:
print("removing 2", unw_cook)
self.googleChromeCookie.remove(cook) | zakir0101/ebay-kleineanzeigen-api | Cookies/cookies.py | cookies.py | py | 6,574 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": ... |
41543948505 | from flask import Blueprint, request, jsonify, json
from db import db, app, ma
from flask import Flask, redirect, request, jsonify, json, session, render_template
from model.turnos import turnos, turnosSchema
routes_turnos = Blueprint("routes_turnos", __name__)
turno_schema = turnosSchema()
turnos_schema = turnosSchema(many=True)
@routes_turnos.route('/turnos', methods=['GET'])
def obtenerturnos():
returnall = turnos.query.all()
resultado_turnos = turnos_schema.dump(returnall)
return jsonify(resultado_turnos)
@routes_turnos.route('/saveturnos', methods=['POST'])
def guardar_turnos():
newturnos = request.json['id_vehiculo','id_rol','Hora_inicio','Hora_Fin']
new_tur = turnos(newturnos)
db.session.add(new_tur)
db.session.commit()
return redirect('/turnos')
@routes_turnos.route('/eliminarturnos/<id>', methods=['GET'] )
def eliminartur(id):
tur = turnos.query.get(id)
db.session.delete(tur)
db.session.commit()
return jsonify(turnos_schema.dump(tur))
@routes_turnos.route('/actualizarturnos', methods=['POST'] )
def actualizartur():
id = request.json['id']
tur = request.json['id_vehiculo','id_rol','Hora_inicio','Hora_Fin']
pusuario = turnos.query.get(id)
pusuario.Nombre_turnos = tur
db.session.commit()
return redirect('/turnos') | winsignares/lavadocarro | CARW/api/turno.py | turno.py | py | 1,332 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "model.turnos.turnosSchema",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "model.turnos.turnosSchema",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mode... |
14346415777 | from random import randint
import math
from PIL import Image, ImageDraw, ImageFont
# Did you know that calculating the correct font size
# is NP-Hard? This function is for when they will solve
# P = NP?
def calculate_font_size(string, image_width):
global FONT_SIZE
return FONT_SIZE
def pad_string(text, how_many):
length = len(text)
for _ in range(how_many):
position = randint(1, length-1)
text = text[:position] + ' ' + text[position:]
return text
def generate_strings(text, how_many):
already = [0] # because we don't like it in the first char
strings = []
for i in range(how_many):
# This is to generate always different random numbers
position = randint(0, len(text))
if len(already) == len(text):
already = [0]
while position in already:
position = randint(0, len(text))
already.append(position)
character = ['/', '_'][randint(0, 1)]
strings.append("{}{}{}".format(
text[:position],
character,
text[position:]
))
return strings
def create_image(strings, colors, size, background):
font_size = calculate_font_size(strings[0], size)
line_height = 5
image = Image.new('RGB', size)
image.paste(background)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', font_size)
for i in range(len(strings)):
draw.text(
(0, (font_size+line_height) * i),
strings[i],
fill=colors[i % len(colors)],
font=font
)
return image
# Size of the output image
IMAGE_SIZE = (256, 256)
# Use 34 if you want to write 'IN RAINBOWS'
FONT_SIZE = 34
# These are the colors of the original album art
COLORS = [
(245, 230, 70), # yellow
(70, 138, 200), # blue
(237, 104, 42), # orange
(64, 185, 74), # green
(234, 173, 30), # darker yellow
(227, 34, 46), # red
(158, 222, 232) # white/very light blue
]
main_text = "IN RAINBOWS"
lower_text = "RADIOHEAD"
if len(main_text) > len(lower_text):
lower_text = pad_string(lower_text, len(main_text) - len(lower_text))
else:
main_text = pad_string(main_text, len(lower_text) - len(main_text))
image = create_image(
generate_strings(main_text, 4) + generate_strings(lower_text, 2),
COLORS,
IMAGE_SIZE,
Image.open('background.png', mode='r')
)
image.save("output.png") | mrkct/cool-experiments | inrainbows-album-art/generate.py | generate.py | py | 2,495 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.randint",
"li... |
14990079057 | from datetime import datetime
import cv2 as cv
import matplotlib.pyplot as plt
def perform_sift_performant(query, train):
start = datetime.now()
img1 = cv.imread(query, cv.IMREAD_GRAYSCALE) # queryImage
img2 = cv.imread(train, cv.IMREAD_GRAYSCALE) # trainImage
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for _ in range(len(matches))]
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.6 * n.distance:
matchesMask[i] = [1, 0]
print(datetime.now() - start)
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=cv.DrawMatchesFlags_DEFAULT)
img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3, ), plt.show()
| Mini-Sylar/Fingerprint-Matching-System | Algorithms/SIFT/Performant_SIFT.py | Performant_SIFT.py | py | 1,399 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE"... |
9269156525 | import os
import json
import requests
class app_info_mode():
def __init__(self):
self.path = os.path.abspath(os.path.dirname(__file__))
self.mode = "translate"
self.print_format = "t_mode"
self.version = "v2.0"
self.fromlang = 'en'
self.tolang = 'zh'
#get the appid&seckey;if none set before,then input
#and store in info.json,if stored in info.json,then
#read it
def translate_init(self):
fd = open(self.path+"/info.json","r")
info_dict = json.load(fd)
info_private_appid = info_dict["private"]["appid"]
info_private_seckey = info_dict["private"]["seckey"]
if info_private_appid == info_private_seckey:
self.appid = input("[translate mode]set the appid:")
self.seckey = input("[translate mode]set the seckey:")
fd.close()
info_dict["private"]["appid"]=self.appid
info_dict["private"]["seckey"]=self.seckey
#make the str that will write into the info.json
#file a easy format to read
info_str = str()
for i in json.dumps(info_dict):
info_str = info_str + i
if i == ',':
info_str = info_str+'\n'
#wirte the str into the info.json
fd = open(self.path+"/info.json","w")
fd.write(info_str)
fd.close()
else:
self.appid = info_private_appid
self.seckey = info_private_seckey
fd.close()
def robot_token_update(self):
api_id = input("[robot mode]set api_id:")
seckey = input("[robot mode]set seckey:")
url = "https://aip.baidubce.com/oauth/2.0/token"
req_url = url+f"?grant_type=client_credentials&client_id={api_id}&client_secret={seckey}"
payload = ""
headers = {'Content-Type': 'application/json','Accept': 'application/json'}
response = requests.request("POST", req_url, headers=headers, data=payload)
response = json.loads(response.text)
if response["access_token"]:
self.token = response["access_token"]
fd = open(self.path+"/info.json",'r')
info_dict = json.load(fd)
fd.close()
info_dict["token"]=response["access_token"]
info_dict = json.dumps(info_dict)
info_str = str()
for i in info_dict:
info_str = info_str+i
if i == ',':
info_str=info_str+'\n'
fd = open(self.path+"/info.json",'w')
fd.write(info_str)
fd.close()
def token_get(self):
fd = open(self.path+"/info.json",'r')
info_dict = json.load(fd)
#print(info_dict["token"])
if info_dict["token"]=="none":
#print(info_dict["token"])
self.robot_token_update()
else:
#print('111')
self.token = info_dict["token"]
if __name__ == "__main__":
app_info = app_info_mode()
print(app_info.appid,app_info.seckey)
| ZZP-DMU/tub | init.py | init.py | py | 3,115 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_numbe... |
14386114791 | '''
47. 이진 트리 직렬화 & 역직렬화
이진트리를 배열로 직렬화하고, 반대로 역직렬화하는 기능을 구현하라.
https://leetcode.com/problems/serialize-and-deserialize-binary-tree/
'''
from collections import deque
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
queue = deque()
queue.append(root)
answer = [-1]
while queue:
node = queue.popleft()
if not node:
answer.append(None)
else:
answer.append(node.val)
queue.append(node.left)
queue.append(node.right)
while answer[-1] is None:
answer.pop()
return answer
def deserialize(self, data):
if len(data) == 1:
return None
data = deque(data[1:])
root = TreeNode(data.popleft())
queue = deque([root])
while queue:
node = queue.popleft()
if node and data:
left = data.popleft()
node.left = TreeNode(left) if left is not None else None
queue.append(node.left)
if node and data:
right = data.popleft()
node.right = TreeNode(right) if right is not None else None
queue.append(node.right)
return root
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(4)
root.right.right = TreeNode(5)
root.right.left.left = TreeNode(6)
root.right.left.right = TreeNode(7)
ser = Codec()
deser = Codec()
print(ser.serialize(root))
print(deser.deserialize(ser.serialize(root))) | hyo-eun-kim/algorithm-study | ch14/hyoeun/ch14_6_hyoeun.py | ch14_6_hyoeun.py | py | 1,870 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 39,
"usage_type": "call"
}
] |
2440120696 | # Polinomial Linear Regression
# 1) Importing Libraries
import numpy as np # Mathematics fucntopns
import matplotlib.pyplot as plt # Plot and Charts
import pandas as pd #Data Set management and import them
import seaborn as sns # Covariance Matrix
# 2) importing Dataset
poli_dataset = pd.read_csv('Position_Salaries.csv')
X = poli_dataset.iloc[:,1:2].values # dependend variable matrix means that will helo in prediction [rows , columns]
Y = poli_dataset.iloc[:,2].values # independent variable vector means that need to be predicted [rows , columns]
#poli_dataset.iloc[:,1:2].values we specify 1:2 in it to make it matrix ,upper bound will not be included so relax
# 3)Taking Care of Missing Data (We take mean of that column and put in missing area thats simple)
from sklearn.preprocessing import Imputer # importing class for data missing values
imputer = Imputer(missing_values = 'NaN' , strategy = 'mean' , axis = 0) # making object of that class
imp_1 = imputer.fit(X[:, 0:1]) # fit above imputer object to our matrix X (we can do this way as well to select column by index no by imp_1 = imputer.fit(X[:, [1,2]]) )
X[:, 0:1] = imp_1.transform(X[:, 0:1]) # LHS X dataset and RHs imp_object to implement missing values ( X[:, 1:3] = imp_1.transform(X[:, 1:3]) )
"""
# 4) Categorical Data and Encoding it - in our case STATE is categorical variable
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder_X = LabelEncoder()
startup_X[:, 3] = labelencoder_X.fit_transform(startup_X[:, 3])
#X = np.array(X,dtype= 'int64')
#Here we are splitting categorial variable states in column as no of catogories - no of columns
onehotencoder = OneHotEncoder(categorical_features = [3])
startup_X = onehotencoder.fit_transform(startup_X).toarray() # Dummy variable is created against state
#labelencoder_Y = LabelEncoder()
#Y = labelencoder_Y.fit_transform(Y)
# 5) Avoiding Dummy Variable Trap
# We always remove 1 column from dummy variable like in our case we generated three columns and dummy_var = dn-1
startup_X = startup_X[:, 1:]
"""
#NOTE We have very small data , to predict very accurate result we are not splitting our dataset instead we are going with full -Skipping this step
# 6) Split Data into Train and Test
from sklearn.cross_validation import train_test_split
X_train , X_test , Y_train, Y_test = train_test_split(X,Y , test_size = 0.2 , random_state=0) #test_size can be 0.3 or 1/3
# Plotting data into chart to see curve
plt.scatter(X , Y, color = 'red') #regressor.predict(X_train) tp predict the value of salary
plt.title('Polinomial Regression')
plt.xlabel('Position')
plt.ylabel('Salary')
plt.show()
# 6) Fitting Linear Regression to Dataset (It is just made to compare)
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg .fit(X,Y)
# 7) Fitting Polynomial Regression to Dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
lin_reg2 = LinearRegression()
lin_reg2 .fit(X_poly,Y)
# 8 ) Visualization
# Linear Regression
plt.scatter(X, Y, color = 'red')
plt.plot(X,lin_reg.predict(X) ,color = 'blue') #regressor.predict(X) tp predict the value of salary
plt.title('Linear Regression')
plt.xlabel('Exp position')
plt.ylabel('Salary')
plt.show() # If we see result it is not goog model to show
# Polynomial Linear Regression
plt.scatter(X, Y, color = 'red')
plt.plot(X,lin_reg2.predict(X_poly) ,color = 'blue') #Ploynomial regressor.predict(X) tp predict the value of salary
plt.title('Polynimial Regression')
plt.xlabel('Exp position')
plt.ylabel('Salary')
plt.show()
# Polynomial Linear Regression (better visulaization)
x_grid = np.arange(min(X),max(X),0.1)
x_grid = x_grid.reshape(len(x_grid),1)
plt.scatter(X, Y, color = 'red')
plt.plot(x_grid,lin_reg2.predict(poly_reg.fit_transform(x_grid)) ,color = 'blue') #Ploynomial regressor.predict(X) tp predict the value of salary
plt.title('Polynimial Regression')
plt.xlabel('Exp position')
plt.ylabel('Salary')
plt.show()
# 9) Predictions Result
# Linear Regression Prediction
pos_pred = lin_reg.predict(X)
pos_pred1 = lin_reg.predict(6.5)
# Polunomial Regression Prediction
pos_pred2 = lin_reg2.predict(X_poly)
pos_pred3 = lin_reg2.predict(poly_reg.fit_transform(6.5)) | awaisajaz1/Machine-Learning-Learning-Path | Machine Learning A-Z/Part 2 - Regression/Section 6 - Polynomial Regression/POLINOMIAL_LINEAR_REGRESSION.py | POLINOMIAL_LINEAR_REGRESSION.py | py | 4,396 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Imputer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.train_test_split",
"line_number": 52,
"usage_type": "call"
},
... |
25140299013 | import inspect
import logging
import ssl
import signal
from pathlib import Path
from queue import Queue
from threading import Event, Lock, Thread, current_thread
from time import sleep
from typing import (
Any,
Callable,
List,
Optional,
Tuple,
Union,
no_type_check,
Generic,
TypeVar,
TYPE_CHECKING,
)
from telegram.error import InvalidToken, RetryAfter, TimedOut, Forbidden, TelegramError
from telegram._utils.warnings import warn
from telegram.ext import Dispatcher
from telegram.ext._utils.webhookhandler import WebhookAppClass, WebhookServer
from telegram.ext._utils.stack import was_called_by
from telegram.ext._utils.types import BT
if TYPE_CHECKING:
from telegram.ext._builders import InitUpdaterBuilder
DT = TypeVar('DT', bound=Union[None, Dispatcher])
class Updater(Generic[BT, DT]):
"""
This class, which employs the :class:`telegram.ext.Dispatcher`, provides a frontend to
:class:`telegram.Bot` to the programmer, so they can focus on coding the bot. Its purpose is to
receive the updates from Telegram and to deliver them to said dispatcher. It also runs in a
separate thread, so the user can interact with the bot, for example on the command line. The
dispatcher supports handlers for different kinds of data: Updates from Telegram, basic text
commands and even arbitrary types. The updater can be started as a polling service or, for
production, use a webhook to receive updates. This is achieved using the WebhookServer and
WebhookHandler classes.
Note:
This class may not be initialized directly. Use :class:`telegram.ext.UpdaterBuilder` or
:meth:`builder` (for convenience).
.. versionchanged:: 14.0
* Initialization is now done through the :class:`telegram.ext.UpdaterBuilder`.
* Renamed ``user_sig_handler`` to :attr:`user_signal_handler`.
* Removed the attributes ``job_queue``, and ``persistence`` - use the corresponding
attributes of :attr:`dispatcher` instead.
Attributes:
bot (:class:`telegram.Bot`): The bot used with this Updater.
user_signal_handler (:obj:`function`): Optional. Function to be called when a signal is
received.
.. versionchanged:: 14.0
Renamed ``user_sig_handler`` to ``user_signal_handler``.
update_queue (:obj:`Queue`): Queue for the updates.
dispatcher (:class:`telegram.ext.Dispatcher`): Optional. Dispatcher that handles the
updates and dispatches them to the handlers.
running (:obj:`bool`): Indicates if the updater is running.
exception_event (:class:`threading.Event`): When an unhandled exception happens while
fetching updates, this event will be set. If :attr:`dispatcher` is not :obj:`None`, it
is the same object as :attr:`telegram.ext.Dispatcher.exception_event`.
.. versionadded:: 14.0
"""
__slots__ = (
'dispatcher',
'user_signal_handler',
'bot',
'logger',
'update_queue',
'exception_event',
'last_update_id',
'running',
'is_idle',
'httpd',
'__lock',
'__threads',
)
def __init__(
self: 'Updater[BT, DT]',
*,
user_signal_handler: Callable[[int, object], Any] = None,
dispatcher: DT = None,
bot: BT = None,
update_queue: Queue = None,
exception_event: Event = None,
):
if not was_called_by(
inspect.currentframe(), Path(__file__).parent.resolve() / '_builders.py'
):
warn(
'`Updater` instances should be built via the `UpdaterBuilder`.',
stacklevel=2,
)
self.user_signal_handler = user_signal_handler
self.dispatcher = dispatcher
if self.dispatcher:
self.bot = self.dispatcher.bot
self.update_queue = self.dispatcher.update_queue
self.exception_event = self.dispatcher.exception_event
else:
self.bot = bot
self.update_queue = update_queue
self.exception_event = exception_event
self.last_update_id = 0
self.running = False
self.is_idle = False
self.httpd = None
self.__lock = Lock()
self.__threads: List[Thread] = []
self.logger = logging.getLogger(__name__)
@staticmethod
def builder() -> 'InitUpdaterBuilder':
"""Convenience method. Returns a new :class:`telegram.ext.UpdaterBuilder`.
.. versionadded:: 14.0
"""
# Unfortunately this needs to be here due to cyclical imports
from telegram.ext import UpdaterBuilder # pylint: disable=import-outside-toplevel
return UpdaterBuilder()
def _init_thread(self, target: Callable, name: str, *args: object, **kwargs: object) -> None:
thr = Thread(
target=self._thread_wrapper,
name=f"Bot:{self.bot.id}:{name}",
args=(target,) + args,
kwargs=kwargs,
)
thr.start()
self.__threads.append(thr)
def _thread_wrapper(self, target: Callable, *args: object, **kwargs: object) -> None:
thr_name = current_thread().name
self.logger.debug('%s - started', thr_name)
try:
target(*args, **kwargs)
except Exception:
self.exception_event.set()
self.logger.exception('unhandled exception in %s', thr_name)
raise
self.logger.debug('%s - ended', thr_name)
def start_polling(
self,
poll_interval: float = 0.0,
timeout: float = 10,
bootstrap_retries: int = -1,
read_timeout: float = 2.0,
allowed_updates: List[str] = None,
drop_pending_updates: bool = None,
) -> Optional[Queue]:
"""Starts polling updates from Telegram.
.. versionchanged:: 14.0
Removed the ``clean`` argument in favor of ``drop_pending_updates``.
Args:
poll_interval (:obj:`float`, optional): Time to wait between polling updates from
Telegram in seconds. Default is ``0.0``.
timeout (:obj:`float`, optional): Passed to :meth:`telegram.Bot.get_updates`.
drop_pending_updates (:obj:`bool`, optional): Whether to clean any pending updates on
Telegram servers before actually starting to poll. Default is :obj:`False`.
.. versionadded :: 13.4
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
:class:`telegram.ext.Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
allowed_updates (List[:obj:`str`], optional): Passed to
:meth:`telegram.Bot.get_updates`.
read_timeout (:obj:`float` | :obj:`int`, optional): Grace time in seconds for receiving
the reply from server. Will be added to the ``timeout`` value and used as the read
timeout from server (Default: ``2``).
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
dispatcher_ready = Event()
polling_ready = Event()
if self.dispatcher:
self._init_thread(self.dispatcher.start, "dispatcher", ready=dispatcher_ready)
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
read_timeout,
bootstrap_retries,
drop_pending_updates,
allowed_updates,
ready=polling_ready,
)
self.logger.debug('Waiting for polling to start')
polling_ready.wait()
if self.dispatcher:
self.logger.debug('Waiting for Dispatcher to start')
dispatcher_ready.wait()
# Return the update queue so the main thread can insert updates
return self.update_queue
return None
def start_webhook(
self,
listen: str = '127.0.0.1',
port: int = 80,
url_path: str = '',
cert: str = None,
key: str = None,
bootstrap_retries: int = 0,
webhook_url: str = None,
allowed_updates: List[str] = None,
drop_pending_updates: bool = None,
ip_address: str = None,
max_connections: int = 40,
) -> Optional[Queue]:
"""
Starts a small http server to listen for updates via webhook. If :attr:`cert`
and :attr:`key` are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path. Also calls :meth:`telegram.Bot.set_webhook` as required.
.. versionchanged:: 13.4
:meth:`start_webhook` now *always* calls :meth:`telegram.Bot.set_webhook`, so pass
``webhook_url`` instead of calling ``updater.bot.set_webhook(webhook_url)`` manually.
.. versionchanged:: 14.0
Removed the ``clean`` argument in favor of ``drop_pending_updates`` and removed the
deprecated argument ``force_event_loop``.
Args:
listen (:obj:`str`, optional): IP-Address to listen on. Default ``127.0.0.1``.
port (:obj:`int`, optional): Port the bot should be listening on. Must be one of
:attr:`telegram.constants.SUPPORTED_WEBHOOK_PORTS`. Defaults to ``80``.
url_path (:obj:`str`, optional): Path inside url.
cert (:obj:`str`, optional): Path to the SSL certificate file.
key (:obj:`str`, optional): Path to the SSL key file.
drop_pending_updates (:obj:`bool`, optional): Whether to clean any pending updates on
Telegram servers before actually starting to poll. Default is :obj:`False`.
.. versionadded :: 13.4
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
:class:`telegram.ext.Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
webhook_url (:obj:`str`, optional): Explicitly specify the webhook url. Useful behind
NAT, reverse proxy, etc. Default is derived from ``listen``, ``port`` &
``url_path``.
ip_address (:obj:`str`, optional): Passed to :meth:`telegram.Bot.set_webhook`.
.. versionadded :: 13.4
allowed_updates (List[:obj:`str`], optional): Passed to
:meth:`telegram.Bot.set_webhook`.
max_connections (:obj:`int`, optional): Passed to
:meth:`telegram.Bot.set_webhook`.
.. versionadded:: 13.6
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
webhook_ready = Event()
dispatcher_ready = Event()
if self.dispatcher:
self._init_thread(self.dispatcher.start, "dispatcher", dispatcher_ready)
self._init_thread(
self._start_webhook,
"updater",
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
drop_pending_updates,
webhook_url,
allowed_updates,
ready=webhook_ready,
ip_address=ip_address,
max_connections=max_connections,
)
self.logger.debug('Waiting for webhook to start')
webhook_ready.wait()
if self.dispatcher:
self.logger.debug('Waiting for Dispatcher to start')
dispatcher_ready.wait()
# Return the update queue so the main thread can insert updates
return self.update_queue
return None
@no_type_check
def _start_polling(
self,
poll_interval,
timeout,
read_timeout,
bootstrap_retries,
drop_pending_updates,
allowed_updates,
ready=None,
): # pragma: no cover
# Thread target of thread 'updater'. Runs in background, pulls
# updates from Telegram and inserts them in the update queue of the
# Dispatcher.
self.logger.debug('Updater thread started (polling)')
self._bootstrap(
bootstrap_retries,
drop_pending_updates=drop_pending_updates,
webhook_url='',
allowed_updates=None,
)
self.logger.debug('Bootstrap done')
def polling_action_cb():
updates = self.bot.get_updates(
self.last_update_id,
timeout=timeout,
read_timeout=read_timeout,
allowed_updates=allowed_updates,
)
if updates:
if not self.running:
self.logger.debug('Updates ignored and will be pulled again on restart')
else:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
return True
def polling_onerr_cb(exc):
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(exc)
if ready is not None:
ready.set()
self._network_loop_retry(
polling_action_cb, polling_onerr_cb, 'getting Updates', poll_interval
)
@no_type_check
def _network_loop_retry(self, action_cb, onerr_cb, description, interval):
"""Perform a loop calling `action_cb`, retrying after network errors.
Stop condition for loop: `self.running` evaluates :obj:`False` or return value of
`action_cb` evaluates :obj:`False`.
Args:
action_cb (:obj:`callable`): Network oriented callback function to call.
onerr_cb (:obj:`callable`): Callback to call when TelegramError is caught. Receives the
exception object as a parameter.
description (:obj:`str`): Description text to use for logs and exception raised.
interval (:obj:`float` | :obj:`int`): Interval to sleep between each call to
`action_cb`.
"""
self.logger.debug('Start network loop retry %s', description)
cur_interval = interval
while self.running:
try:
if not action_cb():
break
except RetryAfter as exc:
self.logger.info('%s', exc)
cur_interval = 0.5 + exc.retry_after
except TimedOut as toe:
self.logger.debug('Timed out %s: %s', description, toe)
# If failure is due to timeout, we should retry asap.
cur_interval = 0
except InvalidToken as pex:
self.logger.error('Invalid token; aborting')
raise pex
except TelegramError as telegram_exc:
self.logger.error('Error while %s: %s', description, telegram_exc)
onerr_cb(telegram_exc)
cur_interval = self._increase_poll_interval(cur_interval)
else:
cur_interval = interval
if cur_interval:
sleep(cur_interval)
@staticmethod
def _increase_poll_interval(current_interval: float) -> float:
# increase waiting times on subsequent errors up to 30secs
if current_interval == 0:
current_interval = 1
elif current_interval < 30:
current_interval *= 1.5
else:
current_interval = min(30.0, current_interval)
return current_interval
@no_type_check
def _start_webhook(
self,
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
drop_pending_updates,
webhook_url,
allowed_updates,
ready=None,
ip_address=None,
max_connections: int = 40,
):
self.logger.debug('Updater thread started (webhook)')
# Note that we only use the SSL certificate for the WebhookServer, if the key is also
# present. This is because the WebhookServer may not actually be in charge of performing
# the SSL handshake, e.g. in case a reverse proxy is used
use_ssl = cert is not None and key is not None
if not url_path.startswith('/'):
url_path = f'/{url_path}'
# Create Tornado app instance
app = WebhookAppClass(url_path, self.bot, self.update_queue)
# Form SSL Context
# An SSLError is raised if the private key does not match with the certificate
if use_ssl:
try:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(cert, key)
except ssl.SSLError as exc:
raise TelegramError('Invalid SSL Certificate') from exc
else:
ssl_ctx = None
# Create and start server
self.httpd = WebhookServer(listen, port, app, ssl_ctx)
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
# We pass along the cert to the webhook if present.
if cert is not None:
with open(cert, 'rb') as cert_file:
self._bootstrap(
cert=cert_file,
max_retries=bootstrap_retries,
drop_pending_updates=drop_pending_updates,
webhook_url=webhook_url,
allowed_updates=allowed_updates,
ip_address=ip_address,
max_connections=max_connections,
)
else:
self._bootstrap(
max_retries=bootstrap_retries,
drop_pending_updates=drop_pending_updates,
webhook_url=webhook_url,
allowed_updates=allowed_updates,
ip_address=ip_address,
max_connections=max_connections,
)
self.httpd.serve_forever(ready=ready)
@staticmethod
def _gen_webhook_url(listen: str, port: int, url_path: str) -> str:
return f'https://{listen}:{port}{url_path}'
@no_type_check
def _bootstrap(
self,
max_retries,
drop_pending_updates,
webhook_url,
allowed_updates,
cert=None,
bootstrap_interval=5,
ip_address=None,
max_connections: int = 40,
):
retries = [0]
def bootstrap_del_webhook():
self.logger.debug('Deleting webhook')
if drop_pending_updates:
self.logger.debug('Dropping pending updates from Telegram server')
self.bot.delete_webhook(drop_pending_updates=drop_pending_updates)
return False
def bootstrap_set_webhook():
self.logger.debug('Setting webhook')
if drop_pending_updates:
self.logger.debug('Dropping pending updates from Telegram server')
self.bot.set_webhook(
url=webhook_url,
certificate=cert,
allowed_updates=allowed_updates,
ip_address=ip_address,
drop_pending_updates=drop_pending_updates,
max_connections=max_connections,
)
return False
def bootstrap_onerr_cb(exc):
if not isinstance(exc, Forbidden) and (max_retries < 0 or retries[0] < max_retries):
retries[0] += 1
self.logger.warning(
'Failed bootstrap phase; try=%s max_retries=%s', retries[0], max_retries
)
else:
self.logger.error('Failed bootstrap phase after %s retries (%s)', retries[0], exc)
raise exc
# Dropping pending updates from TG can be efficiently done with the drop_pending_updates
# parameter of delete/start_webhook, even in the case of polling. Also we want to make
# sure that no webhook is configured in case of polling, so we just always call
# delete_webhook for polling
if drop_pending_updates or not webhook_url:
self._network_loop_retry(
bootstrap_del_webhook,
bootstrap_onerr_cb,
'bootstrap del webhook',
bootstrap_interval,
)
retries[0] = 0
# Restore/set webhook settings, if needed. Again, we don't know ahead if a webhook is set,
# so we set it anyhow.
if webhook_url:
self._network_loop_retry(
bootstrap_set_webhook,
bootstrap_onerr_cb,
'bootstrap set webhook',
bootstrap_interval,
)
async def stop(self) -> None:
"""Stops the polling/webhook thread, the dispatcher and the job queue."""
with self.__lock:
if self.running or (self.dispatcher and self.dispatcher.has_running_threads):
self.logger.debug(
'Stopping Updater %s...', 'and Dispatcher ' if self.dispatcher else ''
)
self.running = False
self._stop_httpd()
self._stop_dispatcher()
self._join_threads()
# Clear the connection pool only if the bot is managed by the Updater
# Otherwise `dispatcher.stop()` already does that
if not self.dispatcher:
await self.bot.shutdown()
@no_type_check
def _stop_httpd(self) -> None:
if self.httpd:
self.logger.debug(
'Waiting for current webhook connection to be '
'closed... Send a Telegram message to the bot to exit '
'immediately.'
)
self.httpd.shutdown()
self.httpd = None
@no_type_check
def _stop_dispatcher(self) -> None:
if self.dispatcher:
self.logger.debug('Requesting Dispatcher to stop...')
self.dispatcher.stop()
@no_type_check
def _join_threads(self) -> None:
for thr in self.__threads:
self.logger.debug('Waiting for %s thread to end', thr.name)
thr.join()
self.logger.debug('%s thread has ended', thr.name)
self.__threads = []
@no_type_check
def _signal_handler(self, signum, frame) -> None:
self.is_idle = False
if self.running:
self.logger.info(
'Received signal %s (%s), stopping...',
signum,
# signal.Signals is undocumented for some reason see
# https://github.com/python/typeshed/pull/555#issuecomment-247874222
# https://bugs.python.org/issue28206
signal.Signals(signum), # pylint: disable=no-member
)
self.stop()
if self.user_signal_handler:
self.user_signal_handler(signum, frame)
else:
self.logger.warning('Exiting immediately!')
# pylint: disable=import-outside-toplevel, protected-access
import os
os._exit(1)
def idle(
self, stop_signals: Union[List, Tuple] = (signal.SIGINT, signal.SIGTERM, signal.SIGABRT)
) -> None:
"""Blocks until one of the signals are received and stops the updater.
Args:
stop_signals (:obj:`list` | :obj:`tuple`): List containing signals from the signal
module that should be subscribed to. :meth:`Updater.stop()` will be called on
receiving one of those signals. Defaults to (``SIGINT``, ``SIGTERM``, ``SIGABRT``).
"""
for sig in stop_signals:
signal.signal(sig, self._signal_handler)
self.is_idle = True
while self.is_idle:
sleep(1)
| nishidage/-1 | telegram/ext/_updater.py | _updater.py | py | 24,967 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "telegram.ext.Dispatc... |
25346439097 | from sys import exit
from kojak import cli
from kojak.exceptions import KojakException
from kojak.models import Analyze
from kojak.reports import Report
from pbr.version import VersionInfo
def main():
args = cli.argparser().parse_args()
version = VersionInfo("kojak")
if args.version:
print("kojak v{version}".format(version=version))
return 0
filename = args.file
if not filename:
return 0
try:
analyze = Analyze(filename)
except KojakException as err:
print(str(err))
return 1
else:
report = Report()
report.rendering("cli.txt", analyze)
footer = "Generated by kojak v{version}".format(version=version)
print("-" * len(footer))
print(footer)
print("-" * len(footer))
return 0
if __name__ == "__main__":
exit(main())
| openuado/kojak | kojak/__main__.py | __main__.py | py | 845 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "kojak.cli.argparser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "kojak.cli",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pbr.version.VersionInfo",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "kojak.models.An... |
4622716657 | import os
from shutil import copyfile
from typing import List, TYPE_CHECKING
import jinja2
from ..checks.issues import Issues
from ..checks.file_size import FileSizeCheck
from .utils import create_parent_directory
from ..checks.base import Check
if TYPE_CHECKING:
from combine.core import ContentDirectory
class File:
def __init__(self, path: str, content_directory: "ContentDirectory") -> None:
self.path = path
self.references: List[str] = []
self.content_directory = content_directory
self.content_relative_path = os.path.relpath(
self.path, self.content_directory.path
)
self.root, self.extension = os.path.splitext(self.content_relative_path)
self.root_parts = os.path.split(self.root)
self.name_without_extension = self.root_parts[-1]
self.output_relative_path = self._get_output_relative_path()
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.path}>"
def _get_output_relative_path(self) -> str:
return self.content_relative_path
def load(self, jinja_environment: jinja2.Environment) -> None:
"""Load properties that can vary depending on content of the file"""
self.references = []
def render(self, output_path: str, jinja_environment: jinja2.Environment) -> None:
self.output_path = self._render_to_output(output_path, jinja_environment)
def _render_to_output(
self, output_path: str, jinja_environment: jinja2.Environment
) -> str:
target_path = os.path.join(output_path, self.output_relative_path)
create_parent_directory(target_path)
if os.path.exists(target_path):
os.remove(target_path)
copyfile(self.path, target_path)
return target_path
def check_output(self) -> Issues:
issues = Issues()
for check in self.get_checks():
for issue in check.run():
issues.append(issue)
if issues:
issues.print(f"Issues in {self.content_relative_path}")
return issues
def get_checks(self) -> List[Check]:
if self.output_path:
# Not all files have an output
return [
FileSizeCheck(path=self.output_path),
]
return []
| dropseed/combine | combine/files/core.py | core.py | py | 2,317 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.relpath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
23525174675 | from selenium.webdriver.common.by import By
from lib import Lib
from time import sleep
import pyautogui
class TestsPage(Lib):
'Coloquem os localizadores dos elementos aqui, vai facilitar!'
URL = 'https://undbclassroom.undb.edu.br'
__LOGIN_DIRETO_URL = 'https://undbclassroom.undb.edu.br/login/index.php#'
__URL_ICON = 'https://cdn-icons-png.flaticon.com/512/1521/1521260.png'
__EMAIL_TEXTBOX = By.ID, 'username'
__SENHA_TEXTBOX = By.ID, 'password'
__ACESSAR_BUTTON = By.CLASS_NAME, 'btn-primary'
__ESQUECEU_SENHA_BUTTON = By.XPATH, '//a[contains(text(), "Esqueceu o seu usuário ou senha?")]'
__ID_USUARIO_TEXTBOX = By.ID, 'id_username'
__BUSCAR_BUTTON = By.NAME, 'submitbuttonusername'
__ID_EMAIL_TEXTBOX = By.ID, 'id_email'
__ACESSAR_TEIA = By.XPATH, '//*[@id="page"]/div[2]/div/div/div[1]/div/a'
__ACESSAR_PORTAL = By.XPATH, '//*[@id="page"]/div[2]/div/div/div[2]/div/a'
__ACESSAR_SITE = By.XPATH, '//*[@id="page"]/div[2]/div/div/div[3]/div/a'
__LEMBRAR_USUARIO = By.NAME, "rememberusername"
__ACESSAR_LINK_PLAYSTORE = By.XPATH, '//*[@id="top-footer"]/div/div/div[1]/h4/a[1]/img'
__CALENDARIO = By.XPATH, '//*[@id="nav-drawer"]/ul/li[3]/a'
__SETA_CALENDARIO = By.CLASS_NAME, 'arrow_text'
__ACESSAR_LINK_APPSTORE = By.XPATH, '//*[@id="top-footer"]/div/div/div[1]/h4/a[2]/img'
__ACESSAR_EMAIL_INSTITUCIONAL = By.XPATH, '//*[@id="boxForm"]/div/div/div/a'
__ACESSAR_BARRA_OPCOES = By.XPATH, '//*[@id="action-menu-toggle-1"]'
__ACESSSAR_SAIR = By.XPATH, '//*[@id="action-menu-1-menu"]/a[7]'
__EMAIL_GOOGLE = By.ID, 'identifierId'
__BOTAO_PROXIMO = By.XPATH, '//*[@id="identifierNext"]/div/button'
__DISCIPLINA_TESTAGEM = By.XPATH, '//*[@id="frontpage-course-list"]/div/div[1]/div[2]/div[3]/div/a'
__CLICK_SARAIVA = By.XPATH, '//*[@id="module-120666"]/div/div/div[2]/div/a'
__CLICK_PEARSON = By.XPATH, '//*[@id="module-120667"]/div/div/div[2]/div'
__EMAIL_NAV = By.CLASS_NAME, 'fa-envelope'
__NEW_EMAIL = By.CLASS_NAME, 'mail-navbar-menu-compose-link'
__SEND_EMAIL = By.NAME, 'send'
__ERROR_DESTINATARIO = By.ID, 'id_error_recipients'
__PESQUISA_SATISFACAO = By.XPATH, '//*[@id="module-120665"]/div/div/div[2]/div/a/span'
__RESPONDA_PESQUISA = By.XPATH, '//*[@id="region-main"]/div[1]/div/div/div[2]/div/a'
__SIM_BUTTTON = By.ID, 'id_multichoice_16594_1'
__NAO_BUTTTON = By.ID, 'id_multichoice_16594_2'
__NOTIFICACOES = By.CLASS_NAME, 'slicon-bell'
__PREFERENCIAS_NOTIFICACOES = By.CLASS_NAME, 'slicon-settings'
__NOTIFICAO_WEB = By.CLASS_NAME, 'preference-state-status-container'
__AV_QUALIS = By.XPATH, '//span[contains(text(), "Av Qualis - Produto")]'
__ADICIONAR_ENVIO = By.XPATH, '//button[contains(text(), "Adicionar envio")]'
__SALVAR_ENVIO = By.NAME, 'submitbutton'
__ALERTA_ENVIO = By.CLASS_NAME, 'alert-danger'
__PERFIL_USER = By.XPATH, '//*[@id="action-menu-toggle-1"]/span/span[2]/span/img'
__PREFERENCIAS_USER = By.XPATH, '//*[@id="action-menu-1-menu"]/a[6]'
__MODIFICAR_PERFIL = By.XPATH, '//*[@id="region-main"]/div/div/div/div/div[1]/div/div/div/div[1]/a'
__SOLTAR_IMAGEM = By.CLASS_NAME, 'dndupload-arrow'
__UTILIZAR_URL = By.XPATH, '//span[contains(text(), "Utilizar uma URL")]'
__DOWNLOAD_IMAGE = By.CLASS_NAME, 'fp-login-submit'
__IMAGEM = By.CLASS_NAME, 'fp-reficons2'
__FILL_URL = By.XPATH, '//input[@id="fileurl"]'
__ATV__TESTAGEM = By.XPATH, '//*[@id="module-122898"]/div/div/div[2]/div/a/span'
__TEST_SEND_AGAIN = By.XPATH, '//*[@id="yui_3_17_2_1_1632675935201_861"]/a[1]/div[1]/div[3]'
__NOTIFICATION_CHOICE = By.XPATH, '//*[@id="popover-region-container-6150a8ed2ee9f6150a8ed05dd017"]/div[2]/div/div[1]/div[1]/a[1]/div[1]/div[2]'
__SEARCH_COURSE = By.XPATH, '//*[@id="shortsearchbox"]'
__GO_TO_COURSE = By.XPATH, '//*[@id="coursesearch"]/fieldset/button'
__TOTURIAL_COURSE = By.XPATH, '//*[@id="page"]/div[2]/div/div/div[4]/div/a'
__NIVELAMENTO = By.XPATH, '//*[@id="module-120647"]/div/div/div[2]/div/a/span'
__NIVELAMENTO_CHECK = By.XPATH, '//*[@id="q10210:3_answer0"]'
__UNIDADE_APRENDIZADO = By.XPATH, '//*[@id="section-4"]/div/div[1]/div[2]/button'
__CONF_ACESS = By.XPATH, '//*[@id="themesettings-control"]/span'
__XPATH_CX_DISLX = By.XPATH, '//*[@id="fonttype"]'
__XPATH_DISLX_SOP = By.XPATH, '//*[@id="fonttype"]/option[2]'
__CLASS_PERFIL = By.CLASS_NAME, 'userpicture defaultuserpic'
__XPATH_PREFERENCIAS = By.XPATH, '//*[@id="actionmenuaction-6"]'
__XPATH_IDIOMA_PREFERIDO = By.XPATH, '//*[@id="region-main"]/div/div/div/div/div[1]/div/div/div/div[3]/a'
__XPATH_CX_IDIOMA = By.XPATH, '//*[@id="id_lang"]'
__XPATH_INGLES = By.XPATH, '//*[@id="id_lang"]/option[1]'
__NAME_SAVE_IDIOMA = By.NAME, 'Salvar mudanças'
__XPATH_ICON_MSG = By.XPATH, '//*[@id="message-drawer-toggle-614f5910d1996614f5910a546218"]'
__XPATH_MSG_FAV = By.XPATH, '//*[@id="yui_3_17_2_1_1632590098721_72"]'
__XPATH_MSG_SELF_USER = By.XPATH, '//*[@id="view-overview-favourites-target-614f5910d6a43614f5910a546229"]/div[2]/a/div[1]/p'
__TESTE_SELF_TEXTBOX_ = By.XPATH, '//*[@id="message-drawer-614f5910d6a43614f5910a546229"]/div[3]/div[1]/div[1]/div[2]/textarea'
__ICON_BARRA_LATERAL = By.CLASS_NAME, 'btn nav-link float-sm-left mr-1'
__LINK_GEST_QUALD_SFT = By.LINK_TEXT, 'https://undbclassroom.undb.edu.br/course/view.php?id=3835'
__ICON_PARTICIPANTES = By.LINK_TEXT, 'https://undbclassroom.undb.edu.br/user/index.php?id=3835'
__LINK_MANUAL_DO_CASE = By.XPATH, '//*[@id="module-29817"]/div/div/div[2]/div/a/span'
__XPATH_DOWNLOAD_CASE_PAPPER = By.XPATH, '//*[@id="icon"]/iron-icon'
__LINK_MANUAL_PAPPER = By.XPATH, '//*[@id="module-29818"]/div/div/div[2]/div/a/span'
__ICON_ID_NOTAS = By.ID, 'actionmenuaction-4'
def __init__(self, driver):
self.driver = driver
def ct_0001(self, user, password):
#Teste de login
self.open_page(self.URL)
self.fill(self.__EMAIL_TEXTBOX, user)
self.fill(self.__SENHA_TEXTBOX, password)
self.click(self.__ACESSAR_BUTTON)
text = 'UNDB Classroom'
assert text in self.driver.title
def ct_0002(self, user):
#Alteração de usuário ou senha pela identificação do usuário
self.open_page(self.URL)
self.click(self.__ESQUECEU_SENHA_BUTTON)
self.fill(self.__ID_USUARIO_TEXTBOX, user)
self.click(self.__BUSCAR_BUTTON)
text = 'Se o usuário e o email estiverem corretos um email deve ter sido enviado a você.'
assert text in self.driver.title
def ct_0003(self, email):
#Alteração de usuário ou senha pelo email institucional
self.open_page(self.URL)
self.click(self.__ESQUECEU_SENHA_BUTTON)
self.fill(self.__ID_EMAIL_TEXTBOX, email)
self.click(self.__BUSCAR_BUTTON)
text = 'Se o usuário e o email estiverem corretos um email deve ter sido enviado a você.'
assert text in self.driver.title
def ct_0004(self):
#Alteração de Gravatar
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__PERFIL_USER)
self.click(self.__PREFERENCIAS_USER)
self.click(self.__MODIFICAR_PERFIL)
self.click(self.__SOLTAR_IMAGEM)
self.click(self.__UTILIZAR_URL)
self.click(self.__FILL_URL)
self.fill(self.__FILL_URL, self.__URL_ICON)
self.click(self.__DOWNLOAD_IMAGE)
result = self.driver.find_element(*self.__IMAGEM).is_enabled()
assert result is True
def ct_0005(self):
#Biblioteca Online Saraiva
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__CLICK_SARAIVA)
text = 'Saraiva Online'
assert text in self.driver.title
def ct_0006(self):
#Biblioteca Online Pearson
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__CLICK_PEARSON)
text = 'Pearson'
assert text in self.driver.title
def ct_0007(self):
#Envio de email sem destinatário
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__EMAIL_NAV)
self.click(self.__NEW_EMAIL)
self.click(self.__SEND_EMAIL)
result = self.driver.find_element(*self.__ERROR_DESTINATARIO).is_enabled()
assert result is True
def ct_0008(self):
#Teste de múltipla escolha. marcando apenas 1 alternativa
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__PESQUISA_SATISFACAO)
self.click(self.__RESPONDA_PESQUISA)
self.click(self.__SIM_BUTTTON)
self.click(self.__NAO_BUTTTON)
result = self.driver.find_element(*self.__SIM_BUTTTON).is_selected()
assert result is False
def ct_0009(self):
#Desativar alguma notificação
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__NOTIFICACOES)
self.click(self.__PREFERENCIAS_NOTIFICACOES)
buttons = self.driver.find_elements(*self.__NOTIFICAO_WEB)
buttons[0].click()
result = buttons[0].is_enabled()
assert result is True
def ct_0010(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__AV_QUALIS)
self.click(self.__ADICIONAR_ENVIO)
self.click(self.__SALVAR_ENVIO)
result = self.driver.find_element(*self.__ALERTA_ENVIO).is_enabled()
assert result is True
def ct_0011(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__AV_QUALIS)
self.click(self.__ADICIONAR_ENVIO)
self.click(self.__SALVAR_ENVIO)
result = self.driver.find_element(*self.__ALERTA_ENVIO).is_enabled()
assert result is False
def ct_0012(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__ATV__TESTAGEM)
self.click(self.__ADICIONAR_ENVIO)
self.click(self.__SALVAR_ENVIO)
result = self.driver.find_element(*self.__ALERTA_ENVIO).is_enabled()
assert result is True
def ct_0013(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__ATV__TESTAGEM)
self.click(self.__ADICIONAR_ENVIO)
self.click(self.__TEST_SEND_AGAIN)
self.click(self.__ADICIONAR_ENVIO)
self.click(self.__SALVAR_ENVIO)
result = self.driver.find_element(*self.__ALERTA_ENVIO).is_enabled()
assert result is True
def ct_0014(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__ICON_NOTFICATION)
def ct_0015(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__ICON_NOTFICATION)
sleep(2)
self.click(self.__NOTIFICATION_CHOICE)
def ct_0016(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.fill(self.__SEARCH_COURSE, 'Testagem')
self.click(self.__GO_TO_COURSE)
def ct_0017(self):
#Verificação de sucesso no envio
self.open_page(self.URL)
self.click(self.__TOTURIAL_COURSE )
def ct_0018(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__NIVELAMENTO)
def ct_0019(self):
#Verificação de sucesso no envio
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__NIVELAMENTO)
self.click(self.__NIVELAMENTO_CHECK)
def ct_0020(self):
self.ct_0001('insira o usuário!', 'insira sua senha!')
self.click(self.__DISCIPLINA_TESTAGEM)
self.click(self.__UNIDADE_APRENDIZADO)
def ct_0021(self):
#Redirecionamento do link do site TEIA
self.open_page(self.URL)
sleep(2)
self.click(self.__ACESSAR_TEIA)
text = 'Espaço Teia'
assert text in self.driver.title
def ct_0022(self,):
#Redirecionamento do link do site do portal academico
self.open_page(self.URL)
sleep(2)
self.click(self.__ACESSAR_PORTAL)
text = 'Portal do Aluno'
assert text in self.driver.title
def ct_0023(self,):
#Redirecionamento do link do site da UNDB
self.open_page(self.URL)
sleep(2)
self.click(self.__ACESSAR_SITE)
text = 'Sou UNDB'
assert text in self.driver.title
def ct_0024(self,email):
#Testagem de e-mail institucional
self.ct_0029()
self.fill(self.__EMAIL_GOOGLE, email)
result = self.driver.find_element(*self.__BOTAO_PROXIMO).is_enabled()
assert result is True
def ct_0025(self,):
#Checagem de identificação de usuário
self.open_page(self.__LOGIN_DIRETO_URL)
self.click(self.__LEMBRAR_USUARIO)
result = self.driver.find_element(*self.__LEMBRAR_USUARIO).is_selected()
assert result is True
def ct_0026(self,):
#Redirecionamento do site da google play store
self.open_page(self.URL)
self.click(self.__ACESSAR_LINK_PLAYSTORE)
text = 'UNDB Classroom'
assert text in self.driver.title
def ct_0027(self,):
#Chave de eventos e visualização mensal
self.ct_0001('user', 'password')
self.click(self.__CALENDARIO)
result = self.driver.find_element(*self.__SETA_CALENDARIO).is_enabled()
assert result is True
def ct_0028(self,):
#Redirecionamento do site da app store
self.open_page(self.URL)
self.click(self.__ACESSAR_LINK_APPSTORE)
text = 'UNDB Classroom'
assert text in self.driver.title
def ct_0029(self,):
#Testagem de login com e-mail institucional
self.open_page(self.URL)
self.click(self.__ACESSAR_EMAIL_INSTITUCIONAL)
text = 'Fazer login nas Contas do Google'
assert text in self.driver.title
def ct_0030(self,):
#Testagem caixa de comando "sair"
self.ct_0001('user', 'password')
self.click(self.__ACESSAR_BARRA_OPCOES)
self.click(self.__ACESSSAR_SAIR )
text = 'UNDB Classroom'
assert text in self.driver.title
def ctt_0031(self):
#TESTAGEM DE FONTE DE DISLEXIA
self.ct_0001('user', 'password')
self.click(self.__CONF_ACESS)
self.click(self.__XPATH_CX_DISLX)
self.click(self.__XPATH_DISLX_SOP)
self.click(self.__XPATH_CX_DISLX)
def ctt_0032(self):
#SALVAMENTO DA FONTE DE DISLEXIA
self.ct_0001('user', 'password')
self.click(self.__CONF_ACESSIBILIDADE)
self.click(self.__XPATH_CX_DISLX)
sleep(3)
self.click(self.__XPATH_DISLX_SOP)
sleep(3)
self.click(self.__XPATH_CX_DISLX)
sleep(2)
pyautogui.moveTo(1280, 394)
pyautogui.doubleClick()
def ctt_0033(self):
#TESTAGEM MUDAR IDIOMA DO SISTEMA
self.ct_0001('user', 'password')
self.click(self.__CLASS_PERFIL)
self.click(self.__XPATH_PREFERENCIAS)
self.click(self.__XPATH_IDIOMA_PREFERIDO)
self.click(self.__XPATH_CX_IDIOMA)
self.click(self.__XPATH_INGLES)
self.click(self.__XPATH_CX_IDIOMA)
self.click(self.__NAME_SAVE_IDIOMA)
def ctt_0034(self, test_text):
#TESTAR ENVIAR MENSAGEM PARA SI MESMO
self.ct_0001('user', 'password')
self.click(self.__XPATH_ICON_MSG)
self.click(self.__XPATH_MSG_FAV)
self.click(self.__XPATH_MSG_SELF_USER)
self.fill(self.__TESTE_SELF_TEXTBOX_, test_text)
def ctt_0035(self):
#TESTAGEM DE MENU LATERAL
self.ct_0001('user', 'password')
self.click(self.__ICON_BARRA_LATERAL)
def ctt_0036(self):
#ACESSO AOS PARTICIPANTES DA DISCIPLINA
self.ct_0001('user', 'password')
self.click(self.__LINK_GEST_QUALD_SFT)
self.click(self.__ICON_BARRA_LATERAL)
self.click(self.__ICON_PARTICIPANTES)
def ctt_0037(self):
#PREFERÊNCIAS DE USUÁRIO
self.ct_0001('user', 'password')
self.click(self.__CLASS_PERFIL)
self.click(self.__XPATH_PREFERENCIAS)
def ctt_0038(self):
#DOWNLOAD MANUAL DO CASE
self.ct_0001('user', 'password')
self.click(self.__LINK_MANUAL_DO_CASE)
self.click(self.__XPATH_DOWNLOAD_CASE_PAPPER)
pyautogui.press('Enter')
def ctt_0039(self):
#DOWNLOAD DO MANUAL DO PAPPER
self.ct_0001('user', 'password')
self.click(self.__LINK_MANUAL_PAPPER)
self.click(self.__XPATH_DOWNLOAD_CASE_PAPPER)
pyautogui.press('Enter')
def ctt_0040(self):
#VISUALIZAÇÃO DE NOTAS
self.ct_0001('user', 'password')
self.click(self.__CLASS_PERFIL)
self.click(self.__ICON_ID_NOTAS)
| soniaelisabeth/classroom_automation | tests_page.py | tests_page.py | py | 17,791 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "lib.Lib",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
26714113006 | import os
import sys
import math
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import matplotlib
matplotlib.use('Agg')
import numpy as np
from models import *
import datasets as dsets
from datasets import PairwiseDataset
models = {
'cyclegan': CycleGAN,
'unit': UNIT
}
def load_data(data_type):
if data_type == 'mnist':
return dsets.mnist.load_data()
elif data_type == 'svhn':
return dsets.svhn.load_data()
else:
return dsets.load_data(data_type)
def main():
# Parsing arguments
parser = argparse.ArgumentParser(description='Training GANs or VAEs')
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--first-data', type=str, required=True)
parser.add_argument('--second-data', type=str, required=True)
parser.add_argument('--epoch', type=int, default=500)
parser.add_argument('--batchsize', type=int, default=50)
parser.add_argument('--output', default='output')
parser.add_argument('--zdims', type=int, default=128)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--testmode', action='store_true')
args = parser.parse_args()
# select gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
# Make output direcotiry if not exists
if not os.path.isdir(args.output):
os.mkdir(args.output)
# Load datasets
x_data = load_data(args.first_data)
x_data = x_data.images * 2.0 - 1.0
y_data = load_data(args.second_data)
y_data = y_data.images * 2.0 - 1.0
datasets = PairwiseDataset(x_data, y_data)
num_data = len(datasets)
# Construct model
if args.model not in models:
raise Exception('Unknown model:', args.model)
model = models[args.model](
input_shape=datasets.shape[1:],
z_dims=args.zdims,
output=args.output
)
if args.resume is not None:
model.load_model(args.resume)
# Make samples
x_samples = datasets.x_data[num_data:num_data+25]
y_samples = datasets.y_data[num_data:num_data+25]
samples = (x_samples, y_samples)
# Training loop
model.main_loop(datasets, samples,
epochs=args.epoch,
batchsize=args.batchsize,
reporter=['loss', 'g_loss', 'd_loss', 'g_acc', 'd_acc'])
if __name__ == '__main__':
main()
| tatsy/keras-generative | train_im2im.py | train_im2im.py | py | 2,392 | python | en | code | 123 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datasets.mnist.load_data",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datasets.mnist"... |
72153324834 | """Tests for pydent.base.py."""
import copy
import pytest
from pydent import AqSession
from pydent import ModelBase
from pydent import ModelRegistry
from pydent.exceptions import NoSessionError
from pydent.marshaller import add_schema
from pydent.marshaller import exceptions
from pydent.marshaller import fields
from pydent.marshaller import SchemaRegistry
# def test_model_base():
# """
# Upon instantiation, .session should be None
# """
# m = ModelBase()
# assert m.session is None
@pytest.fixture(scope="function")
def base():
old_schemas = dict(SchemaRegistry.schemas)
old_models = dict(ModelRegistry.models)
yield ModelBase
new_schemas = set(SchemaRegistry.schemas.keys()).difference(set(old_schemas))
new_models = set(ModelRegistry.models.keys()).difference(set(old_models))
for s in new_schemas:
SchemaRegistry.schemas.pop(s)
for m in new_models:
ModelRegistry.models.pop(m)
@pytest.fixture(scope="function")
def mymodel(base):
@add_schema
class MyModel(base):
pass
yield MyModel
def test_record_id():
"""Creating a ModelBase should create a new record id 'rid.' For each
instance, a new 'rid' should be created."""
@add_schema
class MyModel(ModelBase):
pass
@add_schema
class MyOtherModel(ModelBase):
pass
m = MyModel()
m2 = MyOtherModel()
m3 = MyModel()
assert m.rid != m2.rid
assert m2.rid != m3.rid
assert m.rid != m3.rid
def test_deepcopy():
"""Deepcopy should retain attributes exactly."""
@add_schema
class MyModel(ModelBase):
pass
m = MyModel()
copied = copy.deepcopy(m)
assert m.rid == copied.rid
@pytest.mark.parametrize(
"copy_method",
[pytest.param(lambda x: x.copy()), pytest.param(lambda x: copy.copy(x))],
)
def test_copy(copy_method):
"""Copy should anonymize models."""
@add_schema
class MyModel(ModelBase):
def __init__(self, id):
super().__init__(id=id)
m = MyModel(5)
copied = copy_method(m)
assert m.rid != copied.rid
assert copied.id is None
def test_copy_anonymizes_nested_relationships():
"""Copy should recursively anonymize all models."""
@add_schema
class MyModel(ModelBase):
def __init__(self, id):
super().__init__(id=id)
@add_schema
class MyOtherModel(ModelBase):
def __init__(self, id):
super().__init__(id=id)
m = MyModel(1)
m2 = MyOtherModel(2)
m3 = MyModel(3)
m3.other = m2
m2.other = m
rid1 = m.rid
rid2 = m2.rid
rid3 = m3.rid
copied = m3.copy()
assert copied.id is None
assert copied.other.id is None
assert copied.other.other.id is None
assert copied.rid != rid1
assert copied.other.rid != rid2
assert copied.other.other.rid != rid3
assert m3.rid == rid3
assert m3.other.rid == rid2
assert m3.other.other.rid == rid1
def test_basic_constructor(mymodel):
"""Model should absorb the kwargs."""
m = mymodel(name="SomeName", id=2)
assert m.name == "SomeName"
assert m.id == 2
data = m.dump()
data.pop("rid")
assert data == {"name": "SomeName", "id": 2}
def test_base_constructor_with_marshaller(mymodel):
"""MyModel initializes should absorb kwargs into attributes.
With a schema, those attributes are also tracked and available for
dumping.
"""
m = mymodel(name="model", id=5)
assert m.name == "model"
assert m.id == 5
mdump = m.dump()
del mdump["rid"]
assert mdump == {"name": "model", "id": 5}
def test_connect_to_session(mymodel, fake_session):
"""Connecting to other sessions afterward should not be allowed."""
m = mymodel()
assert m.session is None
# connect to session
m.connect_to_session(fake_session)
assert m.session == fake_session
# attempt to connect to another session
fake_session2 = copy.copy(fake_session)
with pytest.raises(Exception):
m.connect_to_session(fake_session2)
def test_empty_relationships(mymodel):
m = mymodel()
assert m.get_relationships() == {}
def test_check_for_session(mymodel, fake_session):
"""If session is none, _check_for_session should raise an
AttributeError."""
m = mymodel()
with pytest.raises(NoSessionError):
m._check_for_session()
m.connect_to_session(fake_session)
m._check_for_session()
def test_model_registry(mymodel):
"""We expect get_model to return the value in the models dictionary."""
assert "MyModel" in ModelRegistry.models
assert ModelRegistry.models["MyModel"] == mymodel
assert ModelRegistry.get_model("MyModel") == mymodel
del ModelRegistry.models["MyModel"]
def test_no_model_in_registry():
"""ModelRegistry should raise error if model doesn't exist."""
with pytest.raises(exceptions.ModelRegistryError):
ModelRegistry.get_model("SomeModelThatDoesntExist")
def test_find_no_session(mymodel):
"""ModelBase should raise AttributeError if no session is attacheded."""
m = mymodel()
with pytest.raises(NoSessionError):
m.find_callback(None, None)
def test_where_no_session(mymodel):
"""ModelBase should raise AttributeError if no session is attacheded."""
m = mymodel()
with pytest.raises(NoSessionError):
m.where_callback(None, None)
def test_where_and_find(mymodel, monkeypatch, fake_session):
"""Calling the 'where' wrapper on a ModelBase should attempt to get a model
interface and call 'where' or 'find' on the interface.
In this case, a fake model interface is returned in which the
methods return the parameter passed in
"""
def fake_model_interface(self, model_name):
"""A fake model interface to test where."""
class FakeInterface:
def find(id):
return id
def where(params):
return params
return FakeInterface
monkeypatch.setattr(
AqSession, AqSession.model_interface.__name__, fake_model_interface
)
m = mymodel()
ModelRegistry.models["FakeModel"] = ModelBase
m.connect_to_session(fake_session)
assert m.where_callback("FakeModel", 5) == 5
assert m.find_callback("FakeModel", 6) == 6
def test_print(mymodel):
m = mymodel()
print(m)
m.print()
def test_load_many(base, fake_session):
@add_schema
class Child(base):
pass
@add_schema
class Parent(base):
fields = dict(children=fields.Relationship("Child", "get_children", many=True))
def get_children(self, model_name):
return None
parent = Parent.load_from(
{"id": 10, "children": [{"id": 1, "name": "Child1"}, {"id": 2}]},
fake_session.utils,
)
print(parent.children)
assert len(parent.children) == 2
assert isinstance(parent.children[0], Child)
assert parent.children[0].name == "Child1"
def test_uri(base):
"""Expect with with the `include_uri` key includes the default URI."""
@add_schema
class MyModel(base):
pass
model = MyModel()
model.id = 10
assert model.uri == "http://aquarium.org/my_models/10"
def test_dump_uri(base):
"""Expect with with the `include_uri` key includes the default URI."""
@add_schema
class MyModel(base):
pass
model = MyModel()
model.id = 10
no_uri = model.dump()
with_uri = model.dump(include_uri=True)
assert "__uri__" not in no_uri
assert "__uri__" in with_uri
assert with_uri["__uri__"] == "http://aquarium.org/my_models/10"
def test_dump_model(base):
"""Expect with with the `include_uri` key includes the default URI."""
@add_schema
class MyModel(base):
pass
model = MyModel()
model.id = 10
no_mt = model.dump()
with_mt = model.dump(include_model_type=True)
assert "__model__" not in no_mt
assert "__model__" in with_mt
assert with_mt["__model__"] == "MyModel"
| aquariumbio/pydent | tests/test_pydent/test_base.py | test_base.py | py | 8,013 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pydent.marshaller.SchemaRegistry.schemas",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pydent.marshaller.SchemaRegistry",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pydent.ModelRegistry.models",
"line_number": 26,
"usage_ty... |
21030594928 | import time
import random
import datetime
import sys
import numpy as np
from utils.parameters import parse_command_line
from utils.bayesian_optimization import *
from utils.default_params import *
np.set_printoptions(precision = 4, suppress = True)
np.set_printoptions(threshold=sys.maxsize)
###### TRAIN PARAMETERS ##################
args = parse_command_line()
seed = args.seed
depth = args.p
nwarmup = args.nwarmup
nbayes = args.nbayes
quantum_noise = args.quantum_noise
type_of_graph = args.type_of_graph
lattice_spacing = args.lattice_spacing
verbose_ = args.verbose
kernel_choice = args.kernel
shots = args.shots
discard_percentage = args.discard_percentage
np.random.seed(seed)
random.seed(seed)
####### CREATE BAYES OPT INSTANCE ########
bo = Bayesian_optimization(depth,
type_of_graph,
lattice_spacing,
quantum_noise,
nwarmup,
nbayes,
kernel_choice,
shots,
discard_percentage,
seed,
verbose_
)
bo.print_info()
bo.init_training()
bo.run_optimization() | simonetibaldi/BATQuO | src/main_pulser.py | main_pulser.py | py | 1,272 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "utils.... |
41035420324 | """Test the TextClause and related constructs."""
from sqlalchemy import and_
from sqlalchemy import asc
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import desc
from sqlalchemy import exc
from sqlalchemy import extract
from sqlalchemy import Float
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.sql import column
from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.sql import quoted_name
from sqlalchemy.sql import sqltypes
from sqlalchemy.sql import table
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.selectable import LABEL_STYLE_DISAMBIGUATE_ONLY
from sqlalchemy.sql.selectable import LABEL_STYLE_NONE
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertions import expect_raises_message
from sqlalchemy.types import NullType
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable", column("otherid", Integer), column("othername", String)
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic(self):
self.assert_compile(
text("select * from foo where lala = bar"),
"select * from foo where lala = bar",
)
def test_text_adds_to_result_map(self):
t1, t2 = text("t1"), text("t2")
stmt = select(t1, t2)
compiled = stmt.compile()
eq_(
compiled._result_columns,
[
(None, None, (t1,), sqltypes.NULLTYPE),
(None, None, (t2,), sqltypes.NULLTYPE),
],
)
class SelectCompositionTest(fixtures.TestBase, AssertsCompiledSQL):
"""test the usage of text() implicit within the select() construct
when strings are passed."""
__dialect__ = "default"
def test_select_composition_one(self):
self.assert_compile(
select(
literal_column("foobar(a)"),
literal_column("pk_foo_bar(syslaal)"),
)
.where(text("a = 12"))
.select_from(
text("foobar left outer join lala on foobar.foo = lala.foo")
),
"SELECT foobar(a), pk_foo_bar(syslaal) FROM foobar "
"left outer join lala on foobar.foo = lala.foo WHERE a = 12",
)
def test_select_composition_two(self):
s = select()
s = s.add_columns(column("column1"), column("column2"))
s = s.where(text("column1=12"))
s = s.where(text("column2=19"))
s = s.order_by("column1")
s = s.select_from(text("table1"))
self.assert_compile(
s,
"SELECT column1, column2 FROM table1 WHERE "
"column1=12 AND column2=19 ORDER BY column1",
)
def test_select_composition_three(self):
self.assert_compile(
select(column("column1"), column("column2"))
.select_from(table1)
.alias("somealias")
.select(),
"SELECT somealias.column1, somealias.column2 FROM "
"(SELECT column1, column2 FROM mytable) AS somealias",
)
def test_select_composition_four(self):
# test that use_labels doesn't interfere with literal columns
self.assert_compile(
select(
text("column1"),
column("column2"),
column("column3").label("bar"),
table1.c.myid,
)
.select_from(table1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT column1, column2, column3 AS bar, "
"mytable.myid AS mytable_myid "
"FROM mytable",
)
def test_select_composition_five(self):
# test that use_labels doesn't interfere
# with literal columns that have textual labels
self.assert_compile(
select(
text("column1 AS foobar"),
text("column2 AS hoho"),
table1.c.myid,
)
.select_from(table1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS mytable_myid FROM mytable",
)
def test_select_composition_six(self):
# test that "auto-labeling of subquery columns"
# doesn't interfere with literal columns,
# exported columns don't get quoted.
# [ticket:4730] refines this but for the moment the behavior with
# no columns is being maintained.
self.assert_compile(
select(
literal_column("column1 AS foobar"),
literal_column("column2 AS hoho"),
table1.c.myid,
)
.select_from(table1)
.subquery()
.select(),
"SELECT anon_1.column1 AS foobar, anon_1.column2 AS hoho, "
"anon_1.myid FROM "
"(SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS myid FROM mytable) AS anon_1",
)
def test_select_composition_seven(self):
self.assert_compile(
select(literal_column("col1"), literal_column("col2"))
.select_from(table("tablename"))
.alias("myalias"),
"SELECT col1, col2 FROM tablename",
)
def test_select_composition_eight(self):
self.assert_compile(
select(table1.alias("t"), text("foo.f"))
.where(text("foo.f = t.id"))
.select_from(text("(select f from bar where lala=heyhey) foo")),
"SELECT t.myid, t.name, t.description, foo.f FROM "
"(select f from bar where lala=heyhey) foo, "
"mytable AS t WHERE foo.f = t.id",
)
def test_expression_element_role(self):
"""test #7287"""
self.assert_compile(
extract("year", text("some_date + :param")),
"EXTRACT(year FROM some_date + :param)",
)
@testing.combinations(
(
None,
"SELECT mytable.myid, whatever FROM mytable "
"UNION SELECT mytable.myid, whatever FROM mytable",
),
(
LABEL_STYLE_NONE,
"SELECT mytable.myid, whatever FROM mytable "
"UNION SELECT mytable.myid, whatever FROM mytable",
),
(
LABEL_STYLE_DISAMBIGUATE_ONLY,
"SELECT mytable.myid, whatever FROM mytable "
"UNION SELECT mytable.myid, whatever FROM mytable",
),
(
LABEL_STYLE_TABLENAME_PLUS_COL,
"SELECT mytable.myid AS mytable_myid, whatever FROM mytable "
"UNION SELECT mytable.myid AS mytable_myid, whatever FROM mytable",
),
)
def test_select_composition_nine(self, label_style, expected):
s1 = select(table1.c.myid, text("whatever"))
if label_style:
s1 = s1.set_label_style(label_style)
s2 = select(table1.c.myid, text("whatever"))
if label_style:
s2 = s2.set_label_style(label_style)
stmt = s1.union(s2)
self.assert_compile(stmt, expected)
@testing.combinations(
(
None,
"SELECT anon_1.myid FROM (SELECT mytable.myid AS myid, "
"whatever FROM mytable UNION SELECT mytable.myid AS myid, "
"whatever FROM mytable) AS anon_1",
),
(
LABEL_STYLE_NONE,
"SELECT anon_1.myid FROM (SELECT mytable.myid AS myid, "
"whatever FROM mytable UNION SELECT mytable.myid AS myid, "
"whatever FROM mytable) AS anon_1",
),
(
LABEL_STYLE_DISAMBIGUATE_ONLY,
"SELECT anon_1.myid FROM (SELECT mytable.myid AS myid, "
"whatever FROM mytable UNION SELECT mytable.myid AS myid, "
"whatever FROM mytable) AS anon_1",
),
(
LABEL_STYLE_TABLENAME_PLUS_COL,
"SELECT anon_1.mytable_myid FROM "
"(SELECT mytable.myid AS mytable_myid, whatever FROM mytable "
"UNION SELECT mytable.myid AS mytable_myid, whatever "
"FROM mytable) AS anon_1",
),
)
def test_select_composition_ten(self, label_style, expected):
s1 = select(table1.c.myid, text("whatever"))
if label_style:
s1 = s1.set_label_style(label_style)
s2 = select(table1.c.myid, text("whatever"))
if label_style:
s2 = s2.set_label_style(label_style)
stmt = s1.union(s2).subquery().select()
self.assert_compile(stmt, expected)
@testing.combinations(
(None, "SELECT mytable.myid, whatever FROM mytable"),
(LABEL_STYLE_NONE, "SELECT mytable.myid, whatever FROM mytable"),
(
LABEL_STYLE_DISAMBIGUATE_ONLY,
"SELECT mytable.myid, whatever FROM mytable",
),
(
LABEL_STYLE_TABLENAME_PLUS_COL,
"SELECT mytable.myid AS mytable_myid, whatever FROM mytable",
),
)
def test_select_composition_eleven(self, label_style, expected):
stmt = select(table1.c.myid, text("whatever"))
if label_style:
stmt = stmt.set_label_style(label_style)
self.assert_compile(stmt, expected)
@testing.combinations(
(None, ["myid", "description"]),
(LABEL_STYLE_NONE, ["myid", "description"]),
(LABEL_STYLE_DISAMBIGUATE_ONLY, ["myid", "description"]),
(
LABEL_STYLE_TABLENAME_PLUS_COL,
["mytable_myid", "mytable_description"],
),
)
def test_select_selected_columns_ignores_text(self, label_style, expected):
stmt = select(table1.c.myid, text("whatever"), table1.c.description)
if label_style:
stmt = stmt.set_label_style(label_style)
eq_(stmt.selected_columns.keys(), expected)
def test_select_bundle_columns(self):
self.assert_compile(
select(
table1,
table2.c.otherid,
text("sysdate()"),
text("foo, bar, lala"),
).where(
and_(
text("foo.id = foofoo(lala)"),
text("datetime(foo) = Today"),
table1.c.myid == table2.c.otherid,
),
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, sysdate(), foo, bar, lala "
"FROM mytable, myothertable WHERE foo.id = foofoo(lala) AND "
"datetime(foo) = Today AND mytable.myid = myothertable.otherid",
)
class BindParamTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_positional(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), bindparam("whee", 7))
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bar=4, whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_positional_plus_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_literal_binds(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), whee="whee")
self.assert_compile(
t,
"select * from foo where lala=4 and hoho='whee'",
checkparams={},
literal_binds=True,
)
def _assert_type_map(self, t, compare):
map_ = {b.key: b.type for b in t._bindparams.values()}
for k in compare:
assert compare[k]._type_affinity is map_[k]._type_affinity
def test_typing_construction(self):
t = text("select * from table :foo :bar :bat")
self._assert_type_map(
t, {"foo": NullType(), "bar": NullType(), "bat": NullType()}
)
t = t.bindparams(bindparam("foo", type_=String))
self._assert_type_map(
t, {"foo": String(), "bar": NullType(), "bat": NullType()}
)
t = t.bindparams(bindparam("bar", type_=Integer))
self._assert_type_map(
t, {"foo": String(), "bar": Integer(), "bat": NullType()}
)
t = t.bindparams(bat=45.564)
self._assert_type_map(
t, {"foo": String(), "bar": Integer(), "bat": Float()}
)
def test_binds_compiled_named(self):
self.assert_compile(
text(
"select * from foo where lala=:bar and hoho=:whee"
).bindparams(bar=4, whee=7),
"select * from foo where lala=%(bar)s and hoho=%(whee)s",
checkparams={"bar": 4, "whee": 7},
dialect="postgresql",
)
def test_unique_binds(self):
# unique binds can be used in text() however they uniquify across
# multiple text() constructs only, not within a single text
t1 = text("select :foo").bindparams(bindparam("foo", 5, unique=True))
t2 = text("select :foo").bindparams(bindparam("foo", 10, unique=True))
stmt = select(t1, t2)
self.assert_compile(
stmt,
"SELECT select :foo_1, select :foo_2",
checkparams={"foo_1": 5, "foo_2": 10},
)
def test_binds_compiled_positional(self):
self.assert_compile(
text(
"select * from foo where lala=:bar and hoho=:whee"
).bindparams(bar=4, whee=7),
"select * from foo where lala=? and hoho=?",
checkparams={"bar": 4, "whee": 7},
dialect="sqlite",
)
def test_missing_bind_kw(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
foo=5,
bar=7,
)
def test_missing_bind_posn(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
bindparam("foo", value=5),
bindparam("bar", value=7),
)
def test_escaping_colons(self):
# test escaping out text() params with a backslash
self.assert_compile(
text(
r"select * from foo where clock='05:06:07' "
r"and mork='\:mindy'"
),
"select * from foo where clock='05:06:07' and mork=':mindy'",
checkparams={},
params={},
dialect="postgresql",
)
def test_escaping_double_colons(self):
self.assert_compile(
text(
r"SELECT * FROM pg_attribute WHERE "
r"attrelid = :tab\:\:regclass"
),
"SELECT * FROM pg_attribute WHERE " "attrelid = %(tab)s::regclass",
params={"tab": None},
dialect="postgresql",
)
def test_double_colons_dont_actually_need_escaping(self):
# this is news to me. bound param won't work but you can put the
# double colons in
self.assert_compile(
text(
r"SELECT * FROM pg_attribute WHERE "
r"attrelid = foo::regclass"
),
"SELECT * FROM pg_attribute WHERE " "attrelid = foo::regclass",
params={},
dialect="postgresql",
)
def test_text_in_select_nonfrom(self):
generate_series = text(
"generate_series(:x, :y, :z) as s(a)"
).bindparams(x=None, y=None, z=None)
s = select(
(func.current_date() + literal_column("s.a")).label("dates")
).select_from(generate_series)
self.assert_compile(
s,
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={"y": None, "x": None, "z": None},
)
self.assert_compile(
s.params(x=5, y=6, z=7),
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={"y": 6, "x": 5, "z": 7},
)
def test_escaping_percent_signs(self):
stmt = text("select '%' where foo like '%bar%'")
self.assert_compile(
stmt, "select '%' where foo like '%bar%'", dialect="sqlite"
)
self.assert_compile(
stmt, "select '%%' where foo like '%%bar%%'", dialect="mysql"
)
def test_percent_signs_literal_binds(self):
stmt = select(literal("percent % signs %%"))
self.assert_compile(
stmt,
"SELECT 'percent % signs %%' AS anon_1",
dialect="sqlite",
literal_binds=True,
)
self.assert_compile(
stmt,
"SELECT 'percent %% signs %%%%' AS anon_1",
dialect="mysql",
literal_binds=True,
)
class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic_toplevel_resultmap_positional(self):
t = text("select id, name from user").columns(
column("id", Integer), column("name")
)
col_pos = {col.name: idx for idx, col in enumerate(t.selected_columns)}
compiled = t.compile()
eq_(
compiled._create_result_map(),
{
"id": (
"id",
(t.selected_columns.id, "id", "id", "id"),
t.selected_columns.id.type,
col_pos["id"],
),
"name": (
"name",
(t.selected_columns.name, "name", "name", "name"),
t.selected_columns.name.type,
col_pos["name"],
),
},
)
def test_basic_toplevel_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
col_pos = {col.name: idx for idx, col in enumerate(t.selected_columns)}
compiled = t.compile()
eq_(
compiled._create_result_map(),
{
"id": (
"id",
(t.selected_columns.id, "id", "id", "id"),
t.selected_columns.id.type,
col_pos["id"],
),
"name": (
"name",
(t.selected_columns.name, "name", "name", "name"),
t.selected_columns.name.type,
col_pos["name"],
),
},
)
def test_basic_subquery_resultmap(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.subquery()
)
stmt = select(table1.c.myid).select_from(
table1.join(t, table1.c.myid == t.c.id)
)
compiled = stmt.compile()
eq_(
compiled._create_result_map(),
{
"myid": (
"myid",
(table1.c.myid, "myid", "myid", "mytable_myid"),
table1.c.myid.type,
0,
)
},
)
def test_column_collection_ordered(self):
t = text("select a, b, c from foo").columns(
column("a"), column("b"), column("c")
)
eq_(t.selected_columns.keys(), ["a", "b", "c"])
def test_column_collection_pos_plus_bykey(self):
# overlapping positional names + type names
t = text("select a, b, c from foo").columns(
column("a"), column("b"), b=Integer, c=String
)
eq_(t.selected_columns.keys(), ["a", "b", "c"])
eq_(t.selected_columns.b.type._type_affinity, Integer)
eq_(t.selected_columns.c.type._type_affinity, String)
def _xy_table_fixture(self):
m = MetaData()
t = Table("t", m, Column("x", Integer), Column("y", Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return {
elem: key
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
}
def test_select_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = text("select x AS a, y AS b FROM t").columns(l1, l2)
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = text("select x AS a, y AS b FROM t").columns(l1, l2).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y)
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._xy_table_fixture()
x = t.c.x
ta = t.alias()
s = text("select ta.x, ta.y FROM t AS ta").columns(ta.c.x, ta.c.y)
mapping = self._mapping(s)
assert x not in mapping
def test_subquery_accessors(self):
t = self._xy_table_fixture()
s = text("SELECT x from t").columns(t.c.x)
self.assert_compile(
select(s.scalar_subquery()), "SELECT (SELECT x from t) AS anon_1"
)
self.assert_compile(
select(s.subquery()),
"SELECT anon_1.x FROM (SELECT x from t) AS anon_1",
)
def test_select_label_alt_name_table_alias_column(self):
t = self._xy_table_fixture()
x = t.c.x
ta = t.alias()
l1, l2 = ta.c.x.label("a"), ta.c.y.label("b")
s = text("SELECT ta.x AS a, ta.y AS b FROM t AS ta").columns(l1, l2)
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_cte(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.cte("t")
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH t AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id",
)
def test_cte_recursive(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.cte("t", recursive=True)
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH RECURSIVE t(id, name) AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id",
)
def test_unions(self):
s1 = text("select id, name from user where id > 5").columns(
id=Integer, name=String
)
s2 = text("select id, name from user where id < 15").columns(
id=Integer, name=String
)
stmt = union(s1, s2)
eq_(stmt.selected_columns.keys(), ["id", "name"])
self.assert_compile(
stmt,
"select id, name from user where id > 5 UNION "
"select id, name from user where id < 15",
)
def test_subquery(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.subquery()
)
stmt = (
select(table1.c.myid)
.select_from(table1.join(t, table1.c.myid == t.c.id))
.order_by(t.c.name)
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable JOIN "
"(select id, name from user) AS anon_1 "
"ON mytable.myid = anon_1.id ORDER BY anon_1.name",
)
def test_alias(self):
t = (
text("select id, name from user")
.columns(id=Integer, name=String)
.alias("t")
)
s = select(table1).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, (select id, name from user) AS t "
"WHERE mytable.myid = t.id",
)
def test_scalar_subquery(self):
t = text("select id from user").columns(id=Integer)
subq = t.scalar_subquery()
assert subq.type._type_affinity is Integer()._type_affinity
s = select(table1.c.myid, subq).where(table1.c.myid == subq)
self.assert_compile(
s,
"SELECT mytable.myid, (select id from user) AS anon_1 "
"FROM mytable WHERE mytable.myid = (select id from user)",
)
def test_build_bindparams(self):
t = text("select id from user :foo :bar :bat")
t = t.bindparams(bindparam("foo", type_=Integer))
t = t.columns(id=Integer)
t = t.bindparams(bar=String)
t = t.bindparams(bindparam("bat", value="bat"))
eq_(set(t.element._bindparams), {"bat", "foo", "bar"})
class TextErrorsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _test(self, fn, arg, offending_clause):
arg = util.to_list(arg)
assert_raises_message(
exc.ArgumentError,
r"Textual (?:SQL|column|SQL FROM) expression %(stmt)r should be "
r"explicitly declared (?:with|as) text\(%(stmt)r\)"
% {"stmt": util.ellipses_string(offending_clause)},
fn,
*arg,
)
def test_where(self):
self._test(select(table1.c.myid).where, "myid == 5", "myid == 5")
def test_column(self):
self._test(select, ["myid"], "myid")
def test_having(self):
self._test(select(table1.c.myid).having, "myid == 5", "myid == 5")
def test_from(self):
self._test(select(table1.c.myid).select_from, "mytable", "mytable")
class OrderByLabelResolutionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _test_exception(self, stmt, offending_clause, dialect=None):
assert_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / "
"DISTINCT etc. "
"Textual SQL "
"expression %r should be explicitly "
r"declared as text\(%r\)" % (offending_clause, offending_clause),
stmt.compile,
dialect=dialect,
)
def test_order_by_label(self):
stmt = select(table1.c.myid.label("foo")).order_by("foo")
self.assert_compile(
stmt, "SELECT mytable.myid AS foo FROM mytable ORDER BY foo"
)
def test_no_order_by_text(self):
stmt = select(text("foo")).order_by("foo")
with expect_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / ",
):
stmt.compile()
def test_order_by_colname(self):
stmt = select(table1.c.myid).order_by("name")
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable ORDER BY mytable.name"
)
def test_order_by_alias_colname(self):
t1 = table1.alias()
stmt = (
select(t1.c.myid)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("name")
)
self.assert_compile(
stmt,
"SELECT mytable_1.myid AS mytable_1_myid "
"FROM mytable AS mytable_1 ORDER BY mytable_1.name",
)
@testing.combinations(
((column("q") + 5).label("a"), "a", ()),
(column("q").op("+")(5).label("a"), "a", ()),
((column("q") + 5).label("a"), "a DESC", (desc,)),
(column("q").op("+")(5).label("a"), "a DESC", (desc,)),
)
def test_order_by_expr(self, case, expected, modifiers):
order_by = case
for mod in modifiers:
order_by = mod(order_by)
stmt = select(case).order_by(order_by)
col_expr = str(case)
self.assert_compile(
stmt, "SELECT %s AS a ORDER BY %s" % (col_expr, expected)
)
def test_order_by_named_label_from_anon_label(self):
s1 = select(table1.c.myid.label(None).label("foo"), table1.c.name)
stmt = s1.order_by("foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo, mytable.name "
"FROM mytable ORDER BY foo",
)
def test_order_by_outermost_label(self):
# test [ticket:3335], assure that order_by("foo")
# catches the label named "foo" in the columns clause only,
# and not the label named "foo" in the FROM clause
s1 = select(table1.c.myid.label("foo"), table1.c.name).alias()
stmt = select(s1.c.name, func.bar().label("foo")).order_by("foo")
self.assert_compile(
stmt,
"SELECT anon_1.name, bar() AS foo FROM "
"(SELECT mytable.myid AS foo, mytable.name AS name "
"FROM mytable) AS anon_1 ORDER BY foo",
)
def test_unresolvable_warning_order_by(self):
stmt = select(table1.c.myid).order_by("foobar")
self._test_exception(stmt, "foobar")
def test_distinct_label(self):
stmt = select(table1.c.myid.label("foo")).distinct("foo")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (foo) mytable.myid AS foo FROM mytable",
dialect="postgresql",
)
def test_distinct_label_keyword(self):
stmt = select(table1.c.myid.label("foo")).distinct("foo")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (foo) mytable.myid AS foo FROM mytable",
dialect="postgresql",
)
def test_unresolvable_distinct_label(self):
from sqlalchemy.dialects import postgresql
stmt = select(table1.c.myid.label("foo")).distinct("not a label")
self._test_exception(stmt, "not a label", dialect=postgresql.dialect())
def test_group_by_label(self):
stmt = select(table1.c.myid.label("foo")).group_by("foo")
self.assert_compile(
stmt, "SELECT mytable.myid AS foo FROM mytable GROUP BY foo"
)
def test_group_by_colname(self):
stmt = select(table1.c.myid).group_by("name")
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable GROUP BY mytable.name"
)
def test_unresolvable_warning_group_by(self):
stmt = select(table1.c.myid).group_by("foobar")
self._test_exception(stmt, "foobar")
def test_asc(self):
stmt = select(table1.c.myid).order_by(asc("name"), "description")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description",
)
def test_group_by_subquery(self):
stmt = select(table1).alias()
stmt = (
select(stmt)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.group_by("myid")
)
self.assert_compile(
stmt,
"SELECT anon_1.myid AS anon_1_myid, anon_1.name AS anon_1_name, "
"anon_1.description AS anon_1_description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS anon_1 "
"GROUP BY anon_1.myid",
)
def test_order_by_literal_col_quoting_one(self):
col = literal_column("SUM(ABC)").label("SUM(ABC)")
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)" FROM my_table ORDER BY "SUM(ABC)"',
)
def test_order_by_literal_col_quoting_two(self):
col = literal_column("SUM(ABC)").label("SUM(ABC)_")
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)_" FROM my_table ORDER BY '
'"SUM(ABC)_"',
)
def test_order_by_literal_col_quoting_one_explicit_quote(self):
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True))
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)" FROM my_table ORDER BY "SUM(ABC)"',
)
def test_order_by_literal_col_quoting_two_explicit_quote(self):
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True))
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)_" FROM my_table ORDER BY '
'"SUM(ABC)_"',
)
def test_order_by_func_label_desc(self):
stmt = select(func.foo("bar").label("fb"), table1).order_by(desc("fb"))
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS fb, mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY fb DESC",
)
def test_pg_distinct(self):
stmt = select(table1).distinct("name")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (mytable.name) mytable.myid, "
"mytable.name, mytable.description FROM mytable",
dialect="postgresql",
)
def test_over(self):
stmt = select(column("foo"), column("bar")).subquery()
stmt = select(
func.row_number().over(order_by="foo", partition_by="bar")
).select_from(stmt)
self.assert_compile(
stmt,
"SELECT row_number() OVER "
"(PARTITION BY anon_2.bar ORDER BY anon_2.foo) "
"AS anon_1 FROM (SELECT foo, bar) AS anon_2",
)
def test_union_column(self):
s1 = select(table1)
s2 = select(table1)
stmt = union(s1, s2).order_by("name")
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY name",
)
def test_union_label(self):
s1 = select(func.foo("hoho").label("x"))
s2 = select(func.foo("Bar").label("y"))
stmt = union(s1, s2).order_by("x")
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x",
)
def test_standalone_units_stringable(self):
self.assert_compile(desc("somelabel"), "somelabel DESC")
def test_columnadapter_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label("t1name"),
func.foo("hoho").label("x"),
]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta, anonymize_labels=True)
s1 = (
select(*[adapter.columns[expr] for expr in exprs])
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("myid", "t1name", "x")
)
assert_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / "
"DISTINCT etc. "
"Textual SQL "
"expression 't1name' should be explicitly "
r"declared as text\('t1name'\)",
s1.compile,
)
def test_columnadapter_non_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label("t1name"),
func.foo("hoho").label("x"),
]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta)
s1 = (
select(*[adapter.columns[expr] for expr in exprs])
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("myid", "t1name", "x")
)
# labels are maintained
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS t1name, foo(:foo_1) AS x "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x",
)
| sqlalchemy/sqlalchemy | test/sql/test_text.py | test_text.py | py | 38,430 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "sqlalchemy.sql.table",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.column",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "sql... |
71918011235 | import datetime
import re
def regex_replace(s, find, replace):
"""Find and replace using regular expressions
Args:
s (string): The string containing the text whose be replaced
find (string): The regex pattern
replace (string): The string whose replace the pattern
Returns:
string: The new string
"""
return re.sub(find, replace, s)
def elapsed_time(s):
"""Format the elapsed time since a post was published
Args:
s (string): The date and time when the post was created
Returns:
string: The formated elapsed time
"""
now = datetime.datetime.now()
created_at = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
elapsed = (now - created_at).total_seconds()
minute = 60
hour = minute * 60
day = hour * 24
month = day * 30
year = month * 12
if elapsed <= 1:
return 'há pouco'
elif elapsed < minute:
return 'há {0} segundos'.format(round(elapsed)) if round(elapsed) > 1 else 'há 1 segundo'
elif elapsed < hour:
return 'há {0} minutos'.format(round(elapsed / minute)) if round(elapsed / minute) > 1 else 'há 1 minuto'
elif elapsed < day:
return 'há {0} horas'.format(round(elapsed / hour)) if round(elapsed / hour) > 1 else 'há 1 hora'
elif elapsed < month:
return 'há {0} dias'.format(round(elapsed / day)) if round(elapsed / day) > 1 else 'há 1 dia'
elif elapsed < year:
return 'há {0} meses'.format(round(elapsed / month)) if round(elapsed / month) > 1 else 'há 1 mês'
return 'há {0} anos'.format(round(elapsed / year)) if round(elapsed / year) > 1 else 'há 1 ano'
| GrindLabs/lsmakeupstudio | lsmakeupstudio/utils/template.py | template.py | py | 1,679 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime... |
31982254831 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import (
Persona,
CallesIndependencia,
)
from .forms import (
PersonaForm,
PersonaVerificacionForm
)
# AUTOCOMPLETADO CALLES
from django.http import JsonResponse
#ENTRADA A PREGUNTA USUARIO
@login_required
def persona(request):
verificador_de_personas = PersonaVerificacionForm()
if request.method == 'POST':
verificador_de_personas = PersonaVerificacionForm(request.POST)
if verificador_de_personas.is_valid():
# INCLUYE PUNTOS Y GIONES AL RUT
tipo_identificacion_ver = verificador_de_personas.cleaned_data.get('tipo_identificacion')
numero_identificacion_ver = verificador_de_personas.cleaned_data.get('numero_identificacion')
if tipo_identificacion_ver == "RUT":
ni = numero_identificacion_ver
if len(ni)==0:
None
elif len(ni)>10:
rut = ni[:-10]+'.'+ni[-10:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
elif len(ni)==9:
rut = ni[-10:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
else:
rut = ni[-9:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
# BUSCA PERSONA SI ES QUE EXISTE
persona_buscada = Persona.objects.filter(numero_identificacion=numero_identificacion_ver)
if persona_buscada:
pk = persona_buscada[0].id
return redirect('comprobanteventa-create', pk=pk)
else:
return redirect('persona-crear')
context = {
'v_persona': verificador_de_personas,
}
return render(request, 'core/persona.html', context)
#FORMULARIO DE CREACION DE PERSONA
@login_required
def persona_crear(request):
persona = PersonaForm()
if request.method == 'POST':
form = PersonaForm(request.POST)
if form.is_valid():
# INCLUYE PUNTOS Y GIONES AL RUT
tipo_identificacion_ver = form.cleaned_data.get('tipo_identificacion')
numero_identificacion_ver = form.cleaned_data.get('numero_identificacion')
if tipo_identificacion_ver == "RUT":
ni = numero_identificacion_ver
if len(ni)==0:
None
elif len(ni)>10:
rut = ni[:-10]+'.'+ni[-10:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
elif len(ni)==9:
rut = ni[-10:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
else:
rut = ni[-9:-7]+'.'+ni[-7:-4]+'.'+ni[-4:-1]+'-'+ni[-1]
numero_identificacion_ver = rut
form.save()
messages.success(request, f'La persona fue creado con exito')
persona_buscada = Persona.objects.get(numero_identificacion=numero_identificacion_ver)
pk = persona_buscada.id
return redirect('comprobanteventa-create', pk=pk)
context = {
'persona': persona,
}
return render(request, 'core/persona_form.html', context)
| independencia-datalake/datalake | datalake/core/views.py | views.py | py | 3,396 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "forms.PersonaVerificacionForm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "forms.PersonaVerificacionForm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Persona.objects.filter",
"line_number": 43,
"usage_type": "call"
},
... |
32961592369 | import base64
import json
import re
from datetime import datetime, timezone, timedelta
from typing import Optional, Any
from .log import Log
def lowercase(string): return str(string).lower()
def uppercase(string): return str(string).upper()
def snakecase(string):
string = re.sub(r"[\-\.\s]", '_', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), string[1:])
def camelcase(string):
string = re.sub(r"\w[\s\W]+\w", '', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[\-_\.\s]([a-z])", lambda matched: uppercase(matched.group(1)), string[1:])
def capitalcase(string: str):
string = str(string)
if not string:
return string
return uppercase(string[0]) + string[1:]
def pascalcase(string: str): return capitalcase(camelcase(string))
def titlecase(string): return ' '.join([capitalcase(word) for word in snakecase(string).split("_")])
def clean_camelcase(key: str):
use_key = camelcase(key)
if use_key[-1] == '_':
use_key = use_key[0:-1]
return use_key
def ex_message(e: Exception):
if hasattr(e, 'message'):
return e.message
return str(e)
def log(o: Any):
print(o)
return o
def index_of(target: str, needle: str):
try:
return target.index(needle)
except ValueError:
return -1
def last_index_of(target: str, needle: str):
try:
return target.rindex(needle)
except ValueError:
return -1
def left_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = index_of(str_val, needle)
return str_val if pos == -1 else str_val[:pos]
def right_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = index_of(str_val, needle)
return str_val if pos == -1 else str_val[pos + len(needle):]
def last_left_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = last_index_of(str_val, needle)
return str_val if pos == -1 else str_val[:pos]
def last_right_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = last_index_of(str_val, needle)
return str_val if pos == -1 else str_val[pos + len(needle):]
def split_on_first(s: Optional[str], c: str):
if str is None or str == "":
return [s]
pos = index_of(s, c)
if pos >= 0:
return [s[:pos], s[pos + 1:]]
return [s]
def split_on_last(s: Optional[str], c: str):
if str is None or str == "":
return [s]
pos = last_index_of(s, c)
if pos >= 0:
return [s[:pos], s[pos + 1:]]
return [s]
def to_timespan(duration: timedelta):
total_seconds = duration.total_seconds()
whole_seconds = total_seconds // 1
seconds = whole_seconds
sec = int(seconds % 60 if seconds >= 60 else seconds)
seconds = seconds // 60
min = int(seconds % 60)
seconds = seconds // 60
hours = int(seconds % 60)
days = seconds // 24
remaining_secs = float(sec + (total_seconds - whole_seconds))
sb = ["P"]
if days > 0:
sb.append(f"{days}D")
if days == 0 or hours + min + sec + remaining_secs > 0:
sb.append("T")
if hours > 0:
sb.append(f"{hours}H")
if min > 0:
sb.append(f"{min}M")
if remaining_secs > 0:
sec_fmt = "{:.7f}".format(remaining_secs)
sec_fmt = sec_fmt.rstrip('0')
sec_fmt = sec_fmt.rstrip('.')
sb.append(sec_fmt)
sb.append("S")
elif len(sb) == 2: # PT
sb.append("0S")
xsd = ''.join(sb)
# print(f"XSD: {xsd}, {days}:{hours}:{min}:{remaining_secs}")
return xsd
def from_timespan(s: Optional[str]):
if s is None:
return None
days = 0
hours = 0
minutes = 0
seconds = 0
ms = 0.0
if s[0] != "P":
raise ValueError(f"{s} is not a valid XSD Duration")
s = s[1:] # strip P
t = split_on_first(s, 'T')
has_time = len(t) == 2
d = split_on_first(t[0], 'D')
if len(d) == 2:
days = int(d[0])
if has_time:
h = split_on_first(t[1], 'H')
if len(h) == 2:
hours = int(h[0])
m = split_on_first(h[len(h) - 1], 'M')
if len(m) == 2:
minutes = int(m[0])
s = split_on_first(m[len(m) - 1], 'S')
if len(s) == 2:
ms = float(s[0])
seconds = int(ms)
ms -= seconds
# print(f"\n\ntimedelta({str})[{has_time}] = {hours}:{minutes}:{seconds}\n\n")
return timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds, milliseconds=int(ms * 1000))
_MIN_UTC_DATE = datetime.min.replace(tzinfo=timezone.utc)
_MIN_EPOCH = _MIN_UTC_DATE.timestamp()
_MAX_UTC_DATE = datetime.max.replace(tzinfo=timezone.utc)
def to_datetime(date: datetime):
try:
return f"/Date({int(date.timestamp() * 1000)})/"
except Exception as e:
Log.debug(f"to_datetime({date}): e")
return None
def from_datetime(json_date: str):
if json_date.startswith("/Date("):
epoch_and_zone = left_part(right_part(json_date, "("), ")")
epoch_str = epoch_and_zone
if index_of(epoch_and_zone[1:], '-') >= 0:
epoch_str = last_left_part(epoch_and_zone, '-')
if index_of(epoch_and_zone[1:], '+') >= 0:
epoch_str = last_left_part(epoch_and_zone, '+')
epoch = int(epoch_str)
try:
return datetime.fromtimestamp(epoch / 1000, timezone.utc)
except Exception as e:
if epoch < _MIN_EPOCH:
return _MIN_UTC_DATE
else:
return _MAX_UTC_DATE
# need to reduce to 6f precision and remove trailing Z
has_sec_fraction = index_of(json_date, '.') >= 0
is_utc = json_date.endswith('Z')
if is_utc:
json_date = json_date[0:-1]
if has_sec_fraction:
sec_fraction = last_right_part(json_date, '.')
tz = ''
if '+' in sec_fraction:
tz = '+' + right_part(sec_fraction, '+')
sec_fraction = left_part(sec_fraction, '+')
elif '-' in sec_fraction:
sec_fraction = left_part(sec_fraction, '-')
if len(sec_fraction) > 6:
json_date = last_left_part(json_date, '.') + '.' + sec_fraction[0:6] + tz
if is_utc:
return datetime.fromisoformat(json_date).replace(tzinfo=timezone.utc)
else:
return datetime.fromisoformat(json_date)
def to_bytearray(value: Optional[bytes]):
if value is None:
return None
return base64.b64encode(value).decode('ascii')
def from_bytearray(base64str: Optional[str]):
return base64.b64decode(base64str)
def from_base64url_safe(input_str: str):
output = input_str
output = output.replace('-', '+')
output = output.replace('_', '/')
pad = len(output) % 4
if pad == 2:
output += "=="
elif pad == 3:
output += "="
elif pad != 0:
raise ValueError("Illegal base46url string!")
return base64.b64decode(output)
def _decode_base64url_payload(payload: str):
payload_bytes = from_base64url_safe(payload)
payload_json = payload_bytes.decode('utf-8')
return json.loads(payload_json)
def inspect_jwt(jwt: str):
head = _decode_base64url_payload(left_part(jwt, '.'))
body = _decode_base64url_payload(left_part(right_part(jwt, '.'), '.'))
exp = int(body['exp'])
return head, body, datetime.fromtimestamp(exp, timezone.utc)
| ServiceStack/servicestack-python | servicestack/utils.py | utils.py | py | 7,623 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 27,
"usage_type"... |
73034166753 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import win_disk
class MockKernel32(object):
'''
Mock windll class
'''
def __init__(self):
pass
@staticmethod
def GetLogicalDrives():
'''
Mock GetLogicalDrives method
'''
return 1
class MockWindll(object):
'''
Mock windll class
'''
def __init__(self):
self.kernel32 = MockKernel32()
class MockCtypes(object):
'''
Mock ctypes class
'''
def __init__(self):
self.windll = MockWindll()
win_disk.ctypes = MockCtypes()
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinDiskTestCase(TestCase):
'''
Test cases for salt.modules.win_disk
'''
# 'usage' function tests: 1
def test_usage(self):
'''
Test if it return usage information for volumes mounted on this minion.
'''
self.assertDictEqual(win_disk.usage(),
{'A:\\': {'available': None,
'1K-blocks': None,
'used': None,
'capacity': None,
'filesystem': 'A:\\'}})
if __name__ == '__main__':
from integration import run_tests
run_tests(WinDiskTestCase, needs_daemon=False)
| shineforever/ops | salt/tests/unit/modules/win_disk_test.py | win_disk_test.py | py | 1,683 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "salttesting.helpers.ensure_in_syspath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "salt.modules.win_disk.ctypes",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "salt.modules.win_disk",
"line_number": 54,
"usage_type": "name"
... |
275881198 | from git import Repo
import datetime
import time
from datetime import date, timedelta
import json
import requests
now = datetime.datetime.now()
clean_now = now.strftime("%Y-%b-%d, %A %I:%M:%S")
message = "Commit made on: "
full = message + clean_now
working_tree_dir = '/home/ec2-user/crontGit/ltserge.github.io'
file = "ltserge.github.io/pf24h.txt"
COMMIT_MESSAGE = full
# unix time for api
unix_time = int(time.time())
print('--------------')
print(COMMIT_MESSAGE)
print('Unix Code: ')
print(unix_time)
# creating the date object of today's date
todays_date = date.today()
# print('Today Date: ' + todays_date)
value = 0
value_raw = 0
# insert your API key here
API_KEY = '20yrbsOgJltKYbi4xNzPWrkR9WT'
# make API request
try:
res = requests.get('https://api.glassnode.com/v1/metrics/derivatives/futures_funding_rate_perpetual_all',
params={'a': 'BTC', 'api_key': API_KEY, 'f': 'JSON', 'i': '24h', 's': unix_time})
except requests.ConnectionError:
print('The Value from Glassnode did not arrive')
json_str = json.loads(res.text)
value_raw = json_str[0]['o']['mean']
value_raw = value_raw * 100
value = round(value_raw, 3)
print('Daily Perp Funding All Exch: ')
print(value)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
day = yesterday.day
year = yesterday.year
month = yesterday.month
def alter_file(file):
with open(file, "r") as in_file:
buf = in_file.readlines()
with open(file, "w") as out_file:
for line in buf:
if line == "//Include Above\n":
line = f"d := t == timestamp({year}, {month}, {day}, 0, 0, 0) ? {value} : d\n" + line
out_file.write(line)
def git_push():
try:
repo = Repo('/home/ec2-user/crontGit/ltserge.github.io')
repo.git.add(update=True)
repo.index.commit(COMMIT_MESSAGE)
origin = repo.remote(name='origin')
origin.push()
print('--------------')
except:
print('Some error occured while pushing the code')
print('--------------')
alter_file(file)
git_push()
| LtSerge/ltserge.github.io | ltserge.py | ltserge.py | py | 2,098 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.date.to... |
28144934766 | from ignite.metrics import Metric
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
import torch
EPSILON_FP16 = 1e-5
class MultiClassAccuracy(Metric):
def __init__(self, threshold=0.5, num_classes=1000, output_transform=lambda x: x):
self.correct = None
self.total = None
self.num_classes = num_classes
self.threshold = threshold
super(MultiClassAccuracy, self).__init__(output_transform=output_transform)
@reinit__is_reduced
def reset(self):
self.correct = torch.zeros(self.num_classes)
self.total = torch.zeros(self.num_classes)
super(MultiClassAccuracy, self).reset()
@reinit__is_reduced
def update(self, output):
y_pred, y = output
y = y.detach().cpu()
y_p = y_pred.detach().float().cpu()>=self.threshold
correct_matches = (y_p==y)
for cm in range(correct_matches.shape[1]):
self.correct[cm] += (correct_matches[:,cm] & (y[:,cm]==1.0)).sum()
self.total[cm] += (y[:,cm]==1.0).sum()
@sync_all_reduce("correct", "total")
def compute(self):
no_label_classes = self.total != 0
return torch.mean(self.correct[no_label_classes] / self.total[no_label_classes]) | ryanwongsa/ECCV22_Chalearn-MSSL | metrics/multiclassaccuracy.py | multiclassaccuracy.py | py | 1,276 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "ignite.metrics.Metric",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ignite.metrics.metric.re... |
32029297558 | from PIL import Image
import numpy as np
from tqdm import tqdm
import pickle
def load_data(file):
try:
with open(file, 'rb') as fo:
data = pickle.load(fo)
return data
except:
with open(file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
a = load_data('FC100_train.pickle')
for i, img in tqdm(enumerate(a['data']), total=len(a['data'])):
img = Image.fromarray(img, 'RGB')
img.save('images/base_%.3d_%.5d.png' % (a['labels'][i], i))
a = load_data('FC100_test.pickle')
for i, img in tqdm(enumerate(a['data']), total=len(a['data'])):
img = Image.fromarray(img, 'RGB')
img.save('images/novel_%.3d_%.5d.png' % (a['labels'][i], i))
a = load_data('FC100_val.pickle')
for i, img in tqdm(enumerate(a['data']), total=len(a['data'])):
img = Image.fromarray(img, 'RGB')
img.save('images/val_%.3d_%.5d.png' % (a['labels'][i], i))
| OscarcarLi/meta-analysis-classification | datasets/filelists/FC100/process.py | process.py | py | 974 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pickle._Unpickler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"lin... |
5558903910 | import os, re, json
import xml.etree.ElementTree as ET
FACULTIES = {"Факультет інформатики", "Факультет економічних наук"}
# these are hardcoded, but idk how to figure them out from the file
DOCSPECS = {"Факультет економічних наук": {
"ек": "Економіка",
"мар": "Маркетинг",
"мен": "Менеджмент",
"фін": "Фінанси, банківська справа та страхування"}}
# П'ятниця is kinda broken, has different apostrophes in different files
DAYS = {"Понеділок", "Вівторок", "Середа", "Четвер", "П`ятниця", "’ятниця", "Субота", "Неділя"}
TIMEREGEXP = "([01]?[0-9]|2[0-4])[:.]([0-5]\d)-([01]?[0-9]|2[0-4])[:.]([0-5]\d)"
LESSONORDER = ("subject,teacher", "group", "weeks", "location")
# that's what the innocent-looking w: actually parses to
WPREFIX = "{http://schemas.openxmlformats.org/wordprocessingml/2006/main}"
INDIR = "input"
OUTFILE = "schedule.json"
# stolen from stackoverflow
def merge_dicts(tgt, enhancer):
for key, val in enhancer.items():
if key not in tgt:
tgt[key] = val
continue
if isinstance(val, dict):
merge_dicts(tgt[key], val)
else:
tgt[key] = val
return tgt
def parseTSV(fd) -> dict:
dayindex = 0
output = {}
root = output
dayroot = root
curtime = ""
curlessons = []
pastheader = False
for line in fd.readlines():
for t in line.split("\t"):
t = t.strip()
if not t: continue
# print(t)
# continue
if not pastheader:
if t in FACULTIES:
output[t] = {}
root = root[t]
elif t.startswith("Спеціальність"):
spl = t.split('"')
# year
root[spl[2][2]] = {}
root = root[spl[2][2]]
# specialty
spec = spl[1].strip()
root[spec] = {}
root = root[spec]
if t in DAYS:
root[t] = {}
dayroot = root[t]
pastheader = True
# print(t)
elif pastheader:
# switch time if encountered
if re.match(TIMEREGEXP, t):
curlessons = [{}]
dayroot[t] = curlessons
dayindex = 0
continue
# reset the lesson here to not add empty dicts
if dayindex >= len(LESSONORDER):
curlessons.append({})
dayindex = 0
# separate subject and teacher
if dayindex == 0:
spl = t.split(",", 1)
# print(t)
curlessons[-1]["subject"] = spl[0].strip()
curlessons[-1]["teacher"] = spl[1].strip()
# convert weeks to a list of numbers
elif dayindex == 2:
weeks = []
nums = t.split(",")
for n in nums:
r = n.split("-")
# print(r)
if len(r) == 1:
weeks.append(int(r[0]))
else:
weeks.extend(list(range(int(r[0]), int(r[1]) + 1)))
curlessons[-1][LESSONORDER[dayindex]] = weeks
# handle everything else
else:
# print(dayindex, t)
curlessons[-1][LESSONORDER[dayindex]] = t
dayindex += 1
return output
def getXMLBeforeTable(fd, parser):
faculty = ""
year = ""
specs = []
isspec = False
for line in fd:
parser.feed(line)
for event, elem in parser.read_events():
tag = elem.tag.removeprefix(WPREFIX)
if event == "start":
if tag == "tbl":
return (faculty, year, specs)
continue
if not elem.text: continue
if tag == "t":
if elem.text in FACULTIES:
faculty = elem.text
if faculty in DOCSPECS:
specs.extend(DOCSPECS[faculty].values())
elif elem.text.startswith("Спеціальність"):
isspec = True
elif isspec:
# if xml is used for the other type, look for specialty here
year = elem.text[-6]
isspec = False
return (faculty, year, specs)
def XMLTableRowGen(fd, parser):
row = []
cell = ""
for line in fd:
parser.feed(line)
for event, elem in parser.read_events():
tag = elem.tag.removeprefix(WPREFIX)
if event == "start":
continue
if tag == "tr":
yield row
row = []
elif tag == "tc":
row.append(cell.strip())
cell = ""
if tag == "t" and elem.text:
cell += elem.text + "\t"
def parseXML(fd):
parser = ET.XMLPullParser(["start", "end"])
output = {}
root = output
header = getXMLBeforeTable(fd, parser)
faculty = header[0]
root[header[0]] = {}
root = root[header[0]]
root[header[1]] = {}
root = root[header[1]]
for spec in header[2]:
root[spec] = {}
curday = ""
curtime = ""
rows = XMLTableRowGen(fd, parser)
# skip the header
next(rows)
for row in rows:
if row[0]:
curday = row[0]
for spdict in root.values():
spdict[row[0]] = {}
if row[1]:
curtime = row[1]
for spdict in root.values():
spdict[curday][row[1]] = []
# some lessons are empty, even if they have the week or time
if not row[2]: continue
# separate subject, teacher, specialty
before = row[2].find("(")
after = row[2].rfind(")")
if before >= 0:
subject = row[2][:before]
teacher = row[2][after+1:]
else:
subject = row[2].split("\t")[0]
teacher = row[2].split("\t")[1]
subject = subject.strip().replace("\t", "")
teacher = teacher.strip().replace("\t", "")
# convert weeks to number range
weeks = []
nums = row[4].split(",")
for n in nums:
r = n.split("-")
if len(r) == 1:
weeks.append(int(r[0]))
else:
weeks.extend(list(range(int(r[0]), int(r[1]) + 1)))
# save the data
for spcode in DOCSPECS[faculty]:
if before < 0 or spcode in row[2][before:after]:
outdict = {}
root[DOCSPECS[faculty][spcode]][curday][curtime].append(outdict)
outdict["subject"] = subject
outdict["teacher"] = teacher
outdict["weeks"] = weeks
outdict["group"] = row[3].replace("\t", "")
outdict["location"] = row[5].replace("\t", "")
return output
workdir = os.path.abspath(os.path.dirname(__file__))
indir = os.path.join(workdir, INDIR)
output = {}
print("\n\n\n")
for fn in [os.path.join(indir, f) for f in os.listdir(INDIR) if os.path.isfile(os.path.join(INDIR, f))]:
if fn.endswith(".tsv"):
with open(fn, encoding="utf-8") as infd:
merge_dicts(output, parseTSV(infd))
if fn.endswith(".xml"):
with open(fn, encoding="utf-8") as infd:
merge_dicts(output, parseXML(infd))
with open(os.path.join(workdir, OUTFILE), "w", encoding="utf-8") as outfd:
json.dump(output, outfd, ensure_ascii=False, indent="\t") | Doodlinka/FidoTask | scheduleParser.py | scheduleParser.py | py | 8,036 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.match",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.XMLPullParser",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "... |
27155993930 | import ast
import json
from http.server import BaseHTTPRequestHandler
from it.inspector import Inspector
from it.session import Session
from it.utils import Group, logger
class InspectorServer(BaseHTTPRequestHandler):
def do_GET(self, *args, **kwargs):
self.respond(200, message="use post method")
def do_POST(self, *args, **kwargs):
content_length = int(self.headers.get("Content-Length", "-1"))
body = self.rfile.read(content_length)
logger.info("Got this: {}")
try:
body = json.loads(body.decode())
except json.JSONDecodeError:
logger.exception("Couldn't parse body")
return self.fail("Request body should be JSON!")
source = body.get("source")
if source is None:
logger.exception("Missing body item")
return self.fail("Request body should contain a source field!")
try:
source = ast.parse(source)
except (SyntaxError, TypeError) as exc:
logger.exception("Couldn't parse source")
return self.fail(
message=f"Couldn't parse the source code. {exc!r}"
)
session = Session()
session.start()
inspection = session.single_inspection(source)
return self.respond(
status="success",
result=dict(session.group_by(inspection, group=Group.LINENO)),
)
def _respond(self, code):
self.send_response(code)
self.end_headers()
def respond(self, code=200, **data):
self._respond(code)
self.wfile.write(json.dumps(data).encode())
def fail(self, message, code=400):
self.respond(code=code, status="fail", message=message)
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET")
self.send_header(
"Cache-Control", "no-store, no-cache, must-revalidate"
)
return super().end_headers()
| three-headed-giant/it | it/server/handler.py | handler.py | py | 2,035 | python | en | code | 79 | github-code | 1 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "it.utils.logger.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "it.utils.logger",
"line_number": 17,
"usage_type": "name"
},
{
"api_name"... |
25597986377 |
# coding: utf-8
# In[2]:
import argparse
import pandas as pd
import numpy as np
def main():
# Parse arguments from command line
parser = argparse.ArgumentParser()
# Set up required arguments this script
parser.add_argument('function', type=str, help='function to call')
parser.add_argument('start_date', type=str, help='first argument')
parser.add_argument('end_date', type=str, help='second argument')
parser.add_argument('commodity_type', type=str, help='third argument')
# Parse the given arguments
args = parser.parse_args()
# Get the function based on the command line argument and
# call it with the other two command line arguments as
# function arguments
eval(args.function)(args. start_date, args. end_date, args.commodity_type)
def getCommodityPrice(start_date,end_date,commodity_type):
if (commodity_type == 'gold'):
data = pd.read_csv('gold.csv', sep=';')
else:
data = pd.read_csv('silver.csv', sep=';')
mean = data['Price'].mean()
variance = np.var(data['Price'])
new = commodity_type + " "+str(mean) +" " +str(variance)
print (new)
if __name__ == '__main__':
main()
# In[ ]:
| jayacl5/bigdatafed | getCommodityPrice.py | getCommodityPrice.py | py | 1,201 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.var",
... |
35042213128 | from django.contrib import admin
from .forms import CustomUserCreationForm, AdminUserChangeForm
from .models import CustomUser
from django.contrib.auth.admin import UserAdmin
# Register your models here.
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = AdminUserChangeForm
model = CustomUser
list_display = (
"email",
"username",
)
fieldsets = (
(
None,
{
"fields": (
"username",
"password",
"email",
"organizations",
)
},
),
)
admin.site.register(CustomUser, CustomUserAdmin)
| kjaymiller/diversity-orgs-django | accounts/admin.py | admin.py | py | 716 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "django.contrib.auth.admin.UserAdmin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "forms.CustomUserCreationForm",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "forms.AdminUserChangeForm",
"line_number": 11,
"usage_type": "name"
},
... |
33159388072 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
import fitz
import img2pdf
class WqxtPipeline(object):
def process_item(self, item, spider):
return item
def open_spider(self, spider):
print('爬虫开始了...')
def close_spider(self, spider):
self.img_to_pdf(spider)
print('爬虫结束了...')
def img_to_pdf(self, spider):
path = spider.path
if path:
book_name = spider.book_name
pdf_path_ = "{}/{}_.pdf".format(path, book_name)
pdf_path = "{}/{}.pdf".format(path, book_name)
print(pdf_path_)
with open(pdf_path_, "wb") as f:
img_list = []
for img_name in range(0, spider.pages):
img_name = "%s/%s.jpeg" % (path, img_name)
img_list.append(img_name)
print(img_name)
pfn_bytes = img2pdf.convert(img_list)
f.write(pfn_bytes)
print("转换完成")
doc = fitz.open(pdf_path_)
toc = spider.toc
doc.setToC(toc)
doc.save(pdf_path)
doc.close()
print('添加目录完成')
print(pdf_path_, pdf_path)
if (os.path.exists(pdf_path_)):
os.remove(pdf_path_)
for img_name in range(0, spider.pages):
img_name = "%s/%s.jpeg" % (path, img_name)
if (os.path.exists(img_name)):
os.remove(img_name)
print('已删除')
| ithomia/wenquan_scrapy | wqxt/pipelines.py | pipelines.py | py | 1,713 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "img2pdf.convert",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "fitz.open",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
71704829473 | from __future__ import print_function
import sys
import re
import argparse
p = argparse.ArgumentParser( description="Read ascii index "
"and output a list of registers and descriptions." )
p.add_argument("-i",required=True, help="input file",dest='inxfile')
p.add_argument("-o", help="output file name", dest='outf' )
a = p.parse_args()
with open (a.inxfile, 'r' ) as l :
inx = l.read()
l.close()
# ' 1.3 Peripheral availability. . . . . . . . 48'
#rse = re.compile( r'(\d+.*?\s+\.\s+\d+)', re.MULTILINE | re.DOTALL )
rse = re.compile( r'(\d+.*?\s+\d+$)', re.MULTILINE | re.DOTALL )
inx = re.sub( r'\n\s+(?=[A-Za-z\(])'," ",inx )
rs = rse.findall( inx )
inxrow = re.compile( r'(\d+.*?)\s+(.*?)(\d+)$' )
regi = {}
regs = []
sections = []
for reg in rs :
reg = re.sub( r'[\s\n]+', " ", reg )
m = inxrow.match(reg)
if m is None :
print("Couldn't fit {0}".format(reg))
continue
s = m.group(1)
t = m.group(2)
p = m.group(3)
t = re.sub( r'\.', "", t )
t = re.sub( r'\s+', " ", t )
t = re.sub( r'\s+$', "", t)
t = re.sub( r'&', '&', t)
t = re.sub( r'<', '<', t)
t = re.sub( r'>', '>', t)
regline = re.compile( r'register.*\(([A-Za-z0-9_]+)\)' )
m = regline.search(t)
if m is None :
sections.append ("<section name='%s' page='%s'>%s</section>" %(s,p,t))
else:
r = m.group(1)
if not r in regi :
regs.append(r)
regi[r] = t
else:
if len(t) < len(regi[r]) :
regi[r] = t
sections.append( "<section name='%s' page='%s' reg='%s'>%s</section>" %
( s, p, r,t) )
with (sys.stdout if a.outf is None or a.outf=='-' else open(a.outf,'w')) as o:
o.write ("<index>\n <sections>\n")
for s in sections :
o.write(" %s\n" % s)
o.write (" </sections>\n<registers>\n")
for r in regs :
o.write (" <register name='%s'>%s</register>\n" % ( r, regi[r] ))
o.write (" </registers>\n</index>\n")
o.close()
| ashima/embedded-STM32F-lib | readDocs/scripts/regsOfindex.py | regsOfindex.py | py | 1,939 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "re.DOTALL",
"... |
14935617233 | #!/usr/bin/env python3
import sys
import curses
import argparse
import debugoutput
import keyinput
import mapgenfuncs
from gameworld import GameWorld, GameMap
from screenpanels import MessagePanel, ListMenu
def draw_screen(stdscr, gameworld, gamewindow, panellist, show_debug_text=False):
"""Display the current game state on the screen"""
#Update non-game panels
for panel in panellist:
panel.display()
#Draw the gameworld to its window
window_height, window_width = gamewindow.getmaxyx()
view = gameworld.get_view(view_width=window_width, view_height=window_height, center_on_player=True)
for y, row in enumerate(view):
for x, tile in enumerate(row):
gamewindow.addstr(y, x, tile.char, tile.color)
gamewindow.refresh()
#Flush debug text
if show_debug_text:
debugoutput.flush_debug_text()
def layout_panels(stdscr):
"""Build panel layout and create sub-windows of stdscr
Return: A tuple with the game window and a list of other panels
"""
screen_width = curses.COLS-1
screen_height = curses.LINES-1
messagepanel_height = 5
gamewindow_width = 3 * (screen_width // 4)
#Arguments for creating sub-windows are height, width, y coord of top, x coord of left
#0,0 is top left corner of the screen
messagepanel = MessagePanel(stdscr.subwin(messagepanel_height, gamewindow_width, 0, 0))
gamewindow = stdscr.subwin(screen_height-messagepanel_height, gamewindow_width, messagepanel_height+1, 0)
menupanel = ListMenu(stdscr.subwin(screen_height, (screen_width // 4), 0, gamewindow_width+1))
return (gamewindow, [messagepanel, menupanel])
def main(stdscr):
#SETUP
args = get_args()
curses.curs_set(False) #Turn off the cursor
stdscr.clear() #Clear the screen
show_debug_text = args.debugging_output
debugoutput.init(stdscr)
gamewindow, panellist = layout_panels(stdscr)
if args.mapfile:
gameworld = GameWorld(genfunc=mapgenfuncs.load_from_file,
mapfile=args.mapfile)
else:
gameworld = GameWorld(genfunc=mapgenfuncs.empty_box,
width=20, height=20)
#GAME LOOP
while True:
try:
draw_screen(stdscr, gameworld, gamewindow, panellist, show_debug_text=show_debug_text)
keyinput.handle_key(stdscr.getkey())
gameworld.update_world()
except KeyboardInterrupt:
#The user pressed Ctrl-C
stdscr.refresh()
sys.exit()
except SystemExit:
stdscr.refresh()
sys.exit()
def get_args():
"""Parse the command line arguments and return a dictionary"""
parser = argparse.ArgumentParser(description="""
A curses-based roguelike something-or-other
Movement:
7 8 9 y k u
\|/ \|/
4-@-6 or h-@-l
/|\ /|\\
1 2 3 b j n
Commands:
i - list inventory
d - drop inventory
Maps to try are in the maps/ subfolder, or make your own.
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("mapfile",
nargs='?',
help="Path to a text file describing a game map",
type=argparse.FileType('r'))
parser.add_argument("-D", "--debugging-output", help="Print debugging messages", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
#Parse the command line arguments before curses so that the help message can show
get_args()
#This will run the main function in a curses scope, and clean up
#the terminal mode when the program ends.
curses.wrapper(main)
| DeepwaterCreations/rockslike | rockslike.py | rockslike.py | py | 3,731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gameworld.get_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "debugoutput.flush_debug_text",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "curses.COLS",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "curs... |
34636894694 | from enum import Enum
class Level(Enum):
SMALL = 0
MEDIUM = 1
LARGE = 2
class Fuzz():
def __init__(self):
pass
class Fuzzifier():
@staticmethod
def to_level(s, m, l):
return Level([s, m, l].index(max([s, m, l])))
@staticmethod
def fl(input_):
f = Fuzzifier()
s = f.side_small(input_)
m = f.side_medium(input_)
l = f.side_large(input_)
return Fuzzifier.to_level(s, m, l)
@staticmethod
def fr(input_):
f = Fuzzifier()
s = f.side_small(input_)
m = f.side_medium(input_)
l = f.side_large(input_)
return Fuzzifier.to_level(s, m, l)
@staticmethod
def f(input_):
f = Fuzzifier()
s = f.front_small(input_)
m = f.front_medium(input_)
l = f.front_large(input_)
return Fuzzifier.to_level(s, m, l)
def side_small(self, input_):
if input_ < 5:
return 1
elif input_ < 7:
return (7 - input_)/2
else:
return 0
def side_medium(self, input_):
if 4 < input_ and input_ <= 8:
return (input_-4)/4
elif 8 < input_ and input_ <= 12:
return (12-input_)/4
else:
return 0
def side_large(self, input_):
if 8 < input_ and input_ <= 16:
return (input_-8)/8
elif input_ > 16:
return 1
else:
return 0
def front_small(self, input_):
if input_ < 5:
return 1
elif input_ < 10:
return (10 - input_)/5
else:
return 0
def front_medium(self, input_):
if 14 < input_ and input_ <= 16:
return (input_-14)/16
elif 16 < input_ and input_ <= 18:
return (18-input_)/16
else:
return 0
def front_large(self, input_):
if input_ > 30:
return 1
else:
return 0
class Rules():
def __init__(self):
pass
@staticmethod
def apply(fl, f, fr):
if fr == Level.SMALL:
return -40
if fl == Level.SMALL:
return 40
if fr == Level.MEDIUM and f == Level.SMALL:
return -30
if fl == Level.MEDIUM and f == Level.SMALL:
return 30
return 0
| ysam12345/ci_hw1 | src/fuzz.py | fuzz.py | py | 2,344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
}
] |
20426891679 | from typing import Iterable, Any
def is_present(substring: str, iterable: Iterable[str]):
for string in iterable:
if string is not None and substring in string:
return True
return False
def set_to_empty(iterable: list[Any]) -> list[Any]:
if iterable == ['']:
iterable = []
return iterable | BilakshanP/SketchX | Main/core/helpers/misc_helper.py | misc_helper.py | py | 340 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Iterable",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 10,
"usage_type": "name"
}
] |
22745833081 | from django.urls import path
from .views import new_post,post_update,post_delete,post_detail,AddCommentVİew
urlpatterns = [
path('newpost/', new_post, name='post_create' ),
path('detail/<int:id>', post_detail, name='post_detail' ),
path('detail/<int:id>/comment', AddCommentVİew.as_view(), name='add_comment' ),
path('update/<int:id>', post_update, name='post_update' ),
path('delete/<int:id>', post_delete, name='post_delete' ),
] | aemingenc/blogApp-django | blog/urls.py | urls.py | py | 464 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.new_post",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.post_detail",... |
72895055393 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse(
'horizon:project:data_processing.data_plugins:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.data_plugins:details', args=['id'])
class DataProcessingPluginsTests(test.TestCase):
@test.create_stubs({api.sahara: ('plugin_list',)})
def test_index(self):
api.sahara.plugin_list(IsA(http.HttpRequest)) \
.AndReturn(self.plugins.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/data_processing.data_plugins/plugins.html')
self.assertContains(res, 'vanilla')
self.assertContains(res, 'plugin')
@test.create_stubs({api.sahara: ('plugin_get',)})
def test_details(self):
api.sahara.plugin_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.plugins.list()[0])
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(
res, 'project/data_processing.data_plugins/details.html')
self.assertContains(res, 'vanilla')
self.assertContains(res, 'plugin')
self.assertContains(res, 'Plugin Overview')
| CiscoSystems/avos | openstack_dashboard/dashboards/project/data_processing/data_plugins/tests.py | tests.py | py | 1,916 | python | en | code | 47 | github-code | 1 | [
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "openstack_dashboard.test.helpers.TestCase",
"line_number": 28,
"usage_t... |
28420576380 | # -*- coding: utf-8 -*-
'''
作业2
'''
# @Time : 2021/4/5 18:40
# @Author : LINYANZHEN
# @File : pytorch2_2.py
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import datetime
import time
# 读取数据集并分割
def load_data():
data = np.loadtxt("meant.csv", delimiter=",", skiprows=1, usecols=2)
x = np.zeros((data.shape[0] - 6, 6))
y = np.zeros((data.shape[0] - 6, 1))
for i in range(6):
x[:, i] = data[i:-(6 - i)]
y[:, 0] = data[6:]
x_train = x[:33108]
y_train = y[:33108]
x_test = x[33108:]
y_test = y[33108:]
# print(x_train)
return x_train, y_train, x_test, y_test
# 定义网络结构
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 2 input image channel, 1 output channels
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(6, 4)
self.fc2 = nn.Linear(4, 2)
self.fc3 = nn.Linear(2, 1)
def forward(self, x):
# If the size is a square you can only specify a single number
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def train(net, x, y, lr, epouch):
myrange = list(range(y.shape[0]))
for i in range(epouch):
print('loop=%d' % i)
random.shuffle(myrange)
# 遍历整个训练集
for j in myrange:
# 提取x,y
x_ture = torch.tensor(x[j], dtype=torch.float32)
y_ture = torch.tensor(y[j], dtype=torch.float32)
# 梯度归零
net.zero_grad()
# 计算预测y
y_pred = net(x_ture)
# 计算损失
criterion = nn.MSELoss()
loss = criterion(y_pred, y_ture)
# backward
# 反向传播更新参数
loss.backward()
for f in net.parameters():
f.data = f.data - f.grad.data * lr
def test(x_test, y_test):
ypredlist = []
for j in range(y_test.shape[0]):
xt = torch.tensor(x_test[j], dtype=torch.float32)
ypred = net(xt)
ypredlist.append(np.array(ypred.data))
ypredlist = np.array(ypredlist)
ypredlist = ypredlist.reshape(y_test.shape[0])
MSE = np.sum((y_test - ypredlist) ** 2) / y_test.shape[0]
# 画图
plt.figure(figsize=(10, 5))
plt.plot(y_test, 'r+', label='real data')
plt.plot(ypredlist, 'b*', label='pred data')
plt.legend()
plt.grid()
plt.title('MSE=%5.2f' % MSE)
plt.savefig('out2.jpg', dpi=256)
plt.close()
print(MSE)
if __name__ == '__main__':
start_time = time.time()
net = Net()
x_train, y_train, x_test, y_test = load_data()
##train loop
lr = 0.01
epouch = 200
train(net, x_train, y_train, lr, epouch)
test(x_test, y_test)
end_time = time.time()
print("用时:{}min".format((end_time - start_time) / 60))
| AWSDJIKL/Artificial-Intelligence-and-Neural-Network | pytorch2/pytorch2_2.py | pytorch2_2.py | py | 3,039 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.loadtxt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_num... |
6668106671 | import json
from flask import Flask, request, Response
import language_tool_python
app = Flask(__name__)
my_tool = language_tool_python.LanguageTool('en-US')
class ResponseData(object):
def __init__(self):
self.json_data = {
"errno": 0,
"message": "success",
"data": [],
}
def convert_to_new_json_format_v2(data, original_text, response_data, corrected_text):
for error in data:
item_data = {"word": original_text[error.offset:(error.offset + error.errorLength)],
"substitute": error.replacements[0], "description": error.message,
"from": error.offset, "to": error.offset + error.errorLength, "explanation": error.message,
"existence": True}
if error.ruleIssueType == 'misspelling' or error.ruleIssueType == 'typographical':
item_data['type'] = 'spelling'
else:
item_data['type'] = 'grammar'
pre_error_text = original_text[:error.offset]
post_error_text = original_text[(error.offset + error.errorLength):]
item_data["example"] = [
{
"correct": [pre_error_text + replacement + post_error_text for replacement in error.replacements],
"incorrect": original_text
}
]
response_data.json_data["data"].append(item_data)
return response_data
def check_for_errors(text):
matches = my_tool.check(text)
corrections = language_tool_python.utils.correct(text, matches)
return matches, corrections
def validate_request(req_json):
try:
original_text = req_json['text']
except (TypeError, KeyError):
return json.dumps({"error": "Please send the parameter 'text' with your request."}), None
if len(original_text) == 0:
return json.dumps({"error": "Input text too short."}), None
return None, original_text
@app.route("/api/v1/textCheck", methods=['POST'])
def check_grammar():
req_json = request.get_json()
# RETURN ERROR IF BAD VALIDATION
validation_result, original_text = validate_request(req_json)
if validation_result:
return Response(validation_result,
status=200, mimetype='application/json')
# RESPONSE DATA CLASS
response_data = ResponseData()
results, corrected_text = check_for_errors(original_text)
response_data = convert_to_new_json_format_v2(results, original_text, response_data, corrected_text)
# SEND RES TO CLIENT
res = json.dumps(response_data.json_data)
res = Response(res, status=200, mimetype='application/json')
return res
@app.route("/api/v1/debug", methods=['GET'])
def debug_msg():
msg = json.dumps({"Welcome": "Debug page"})
res = Response(msg, status=200, mimetype='application/json')
return res
@app.errorhandler(404)
def not_found(error):
return Response(json.dumps({'error': 'Not found'}), status=404, mimetype='application/json')
if __name__ == '__main__':
app.run(
host='0.0.0.0'
)
| robert1ridley/writing_error_correction | application_v2.py | application_v2.py | py | 3,050 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "language_tool_python.LanguageTool",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "language_tool_python.utils.correct",
"line_number": 42,
"usage_type": "call"
},
{
"ap... |
8650838427 | import nltk
import numpy as np
import pandas as pd
df = pd.DataFrame(columns=['Original_Name','Fixed_Name','UniqueID','SourceID',
'Type','Text','SentimentScore','EffectScore','SourceType','Text','Stemmed_Text',
'Lowercased','WithoutStopwords', 'WordNgrams','trigram','n4grams','n5grams','BoC',
'SenticentScores','Word2VecArrays','DateTime'])
PostsDF = pd.DataFrame(columns=['ID','Source','Type','Text','Effect','RelatedNER','Date','likes','views','Sentiment'])
WordsDf = pd.DataFrame(columns=['Post','Word','NER','POS','Stem','Word2Vec','Sentiment','trigram','n4grams','n5grams'])
gsdf = pd.read_csv('EnglishGS.csv')
texts = gsdf.values[:,1]
texts = texts.tolist()
tokens = []
for text in texts:
regtokenzr = nltk.RegexpTokenizer('[A-Z,a-z,\$]\w+')
tarray = regtokenzr.tokenize(str(text))
tokens.append(tarray)
print(text,': ',tarray)
| OmarMeriwani/Fake-Financial-News-Detection | Sentiments/preproc.py | preproc.py | py | 936 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
3505597412 | import os
import socket
import json
import requests
import datetime
import threading
from urllib import parse
from flask import Flask,request,redirect,url_for,render_template
from Core import Request
from Classifier import Classifier
from Configuration import Configuration
from Log import LogController
class TransparentProxy():
def __init__(self,Sock):
self.Config = Configuration()
self.ServerIP,self.ServerPort,self.FirewallIP,self.FirewallPort = self.Config.SetTransparentConfig()
self.Sock = Sock
def ParseThreat(self,Req):
if not isinstance(Req, Request):
raise TypeError("Object should be a Request")
Model = Classifier()
return Model.Run(Req)
def __ParseUrl(self,Url):
Detail = parse.urlparse(Url)
Param = Detail.query
Param = Param.split("&")
Params = {}
for p in Param:
if p == "":
continue
label, value = p.split('=')
label = label.strip()
value = value.strip()
Params[label] = value
return Params
def __ParseBody(self,Body):
Body = Body.split('\r\n')
Params = {}
for Each in Body:
if Each == "":
continue
Param = Each.split("&")
# print("Param=",Param)
for p in Param:
label, value = p.split("=")
label = label.strip()
value = value.strip()
Params[label] = value
return Params
def __GenerateUrl(self,Req):
Host = Req.Header['Host']
if Req.Protocol == 'HTTP/1.1':
Url = 'http://' + Host + Req.Url
return Url
def __SendMessage(self,Req):
Cookies = ''
if "cookies" in Req.Header:
Cookies = Req.Header['Cookies']
Req.Header.pop('cookies')
BodyParam = self.__ParseBody(Req.Body)
UrlParam = self.__ParseUrl(Req.Url)
Method = Req.Method.lower()
Url = self.__GenerateUrl(Req)
Respone = requests.request(Method,Url,params=UrlParam,data=BodyParam,cookies=Cookies)
return Respone
def __GenerateRespone(self,Respone,Req):
Protocol = Req.Protocol + ' '
Statue = Respone.status_code
Explain = ' OK'
ResponeLine = Protocol + str(Statue) + Explain + '\r\n'
ResponeBody = Respone.text + '\r\nTransparent'
ResponeHeader = ""
Headers = Respone.headers
for index, content in Headers.items():
if index == "Content-Length":
content = str(len(ResponeBody))
ResponeHeader = ResponeHeader + index + ": " + content + "\r\n"
ResponeHeader = ResponeHeader + "\r\n"
return ResponeLine + ResponeHeader + ResponeBody
def __GenerateSecurityRespone(self,Req):
ResponeLine = "HTTP/1.1 200 OK\r\n"
ResponeBody = "Attack Detected!!!<br/>"
AttackResult = ""
for key,value in Req.ThreatType.items():
Location,value = value.split(':')
Attack = Location + '->' + value + ':' + key
AttackResult = AttackResult + '<xmp>' + Attack +'</xmp>'
ResponeBody = ResponeBody + AttackResult
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
date = datetime.datetime.utcnow().strftime(GMT_FORMAT)
ResponHeaders = "Content-Type: text/html; charset=utf-8\r\nContent-Length: " + str(len(ResponeBody)) + "\r\nServer: WebFirewall/1.0 Python/3.7.9\r\nDate: " + str(date) + "\r\n\r\n"
Respone = ResponeLine + ResponHeaders +ResponeBody
return Respone
def Proxy(self):
connection,addr = self.Sock.accept()
SourceIP = addr[0]
Data = connection.recv(2048)
if len(Data) == 0:
return Request(Url='' , Header='', Body='')
#print('Recv!')
Req = Request()
Req.ParseRequest(Data)
Req.ParaseSourceIP(SourceIP)
Req.ThreatType = self.ParseThreat(Req)
if not Req.ThreatType:
Respone = self.__SendMessage(Req)
Respone = self.__GenerateRespone(Respone,Req)
connection.send(Respone.encode())
else:
Rspone = self.__GenerateSecurityRespone(Req)
connection.send(Rspone.encode())
return Req
class TransparentProxyServer():
def __init__(self):
self.Config = Configuration()
self.ServerIP, self.ServerPort, self.FirewallIP, self.FirewallPort = self.Config.SetTransparentConfig()
self.LogStatue,self.LogPath = self.Config.SetLogSystem()
def ParseThreat(self,Req):
if not isinstance(Req, Request):
raise TypeError("Object should be a Request")
Model = Classifier()
return Model.Run(Req)
def __ParseUrl(self, Url):
Detail = parse.urlparse(Url)
Param = Detail.query
Param = Param.split("&")
Params = {}
for p in Param:
if p == "":
continue
label, value = p.split('=',1)
label = label.strip()
value = value.strip()
Params[label] = value
return Params
def __ParseBody(self, Body):
Body = Body.split('\r\n')
Params = {}
for Each in Body:
if Each == "":
continue
Param = Each.split("&")
# print("Param=",Param)
for p in Param:
label, value = p.split("=")
label = label.strip()
value = value.strip()
Params[label] = value
return Params
def __GenerateUrl(self, Req):
Host = Req.Header['Host']
if not os.listdir("./ca"):
Url = 'http://' + Host + Req.Url
else:
Url = 'https://' + Host + Req.Url
return Url
def __SendMessage(self,Req):
Cookies = ''
if "cookies" in Req.Header:
Cookies = Req.Header['Cookies']
Req.Header.pop('cookies')
BodyParam = self.__ParseBody(Req.Body)
UrlParam = self.__ParseUrl(Req.Url)
Method = Req.Method.lower()
Url = self.__GenerateUrl(Req)
Respone = requests.request(Method,Url,verify=False,params=UrlParam,data=BodyParam,cookies=Cookies)
return Respone
def proxy(self):
app = Flask(__name__)
@app.before_request
def GetData():
Req = Request(ID='',Timestamp='',SourceIP ='',Method = '',Url = '',Protocol='',Header={},Body='',ThreatType={})
Req.SetTime()
from builtins import str
Req.SourceIP = str(request.remote_addr)
Req.Method = str(request.method)
try:
parm ='?'+str(request.url).split('?')[1]
except:
parm=''
Req.Url = str(request.path) + parm
Req.Header = Req.Str2Dic(str(request.headers))
json_str = json.dumps(request.form)
data = json.loads(json_str)
str = ""
for key,value in data.items():
str = str + key + "=" + value + "&"
Req.Body = str[0:-1]
Req.ThreatType = self.ParseThreat(Req)
if Req.ThreatType == {}:
Req.Operation = 'Pass'
else:
Req.Operation = 'Intercept'
if self.LogStatue == True:
Log = LogController()
Log.Save(Req)
Log.close()
if Req.ThreatType == {}:
respone = self.__SendMessage(Req)
return respone.text
else:
return render_template('Attack.html',log=Req)
if not os.listdir("./ca"):
app.run(host=str(self.FirewallIP), port=int(self.FirewallPort))
else:
app.run(host=str(self.FirewallIP), port=int(self.FirewallPort),ssl_context=('./ca/server.crt', './ca/server.key'))
if __name__=='__main__':
server = TransparentProxyServer()
server.proxy()
| Scentedtea0210/Web-Application-Firewall | WAF/TransparentProxy.py | TransparentProxy.py | py | 8,066 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Configuration.Configuration",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Core.Request",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "Classifier.Classifier",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ur... |
5407069729 | # coding: utf-8
"""
This module contains all data objects getted from forum and wiki API. These API provides JSON data, and this module enhances data model by mirroring data attributes to data model. Here is an exemple :
.. code-block:: python
from campbot import CampBot
bot = CampBot(use_demo=True)
waypoint = bot.wiki.get_waypoint(107702)
# this two lines are perfectly equivalents
elevation = waypoint["elevation"] # standard way to access data
elevation = waypoint.elevation # and sexier way.
# set values is possible
waypoint.elevation = 1000
assert waypoint["elevation"] == 1000 # it's true!
We try to use the second way every times it's possible in this documentation.
"""
from __future__ import print_function, unicode_literals, division
import re
import logging
from .differ import get_diff_report
def _input(message): # pragma: no cover
try: # py 2
return raw_input(message)
except NameError: # py 3
return input(message)
def get_constructor(document_type):
return {
"u": WikiUser,
"a": Area,
"w": Waypoint,
"o": Outing,
"i": Image,
"m": Map,
"x": Xreport,
"c": Article,
"b": Book,
"r": Route,
}[document_type]
class BotObject(dict):
"""
Base class for all data object
"""
def __init__(self, campbot, data):
super(BotObject, self).__init__(data)
self._campbot = campbot
# make instance.key equivalent to instance["key"]
def __getattr__(self, item):
if item.startswith("_"):
raise AttributeError(
"Object {} has not attribute {}".format(self.__class__.__name__, item)
)
if item not in self: # pragma: no cover
print("666777", self)
raise AttributeError(
"Object {} has not attribute {}".format(self.__class__.__name__, item)
)
return self[item]
def __setattr__(self, key, value):
if key in self:
self[key] = value
else:
super(BotObject, self).__setattr__(key, value)
def _convert_list(self, name, constructor):
if name in self:
self[name] = [constructor(self._campbot, data) for data in self[name]]
# def _convert_dict(self, name, constructor):
# if name in self:
# self[name] = {
# key: constructor(self._campbot, self[name][key]) for key in self[name]
# }
class Version(BotObject):
"""
A historical version of one wiki document.
"""
def __init__(self, campbot, data):
super(Version, self).__init__(campbot, data)
if self["document"]:
self["document"] = get_constructor(self["document"]["type"])(
campbot, self["document"]
)
else:
self["document"] = None
def get_diff_url(self, lang):
constructor = get_constructor(document_type=self.document.type)
if not self.previous_version_id:
return self.document.get_url(lang)
return "{}/{}/diff/{}/{}/{}/{}".format(
self._campbot.wiki.ui_url,
constructor.url_path,
self.document.document_id,
lang,
self.previous_version_id,
self.version["version_id"],
)
def get_locale_length(self, lang):
locale = self.document.get_locale(lang)
return locale.get_length() if locale else 0
class Contribution(BotObject):
def __init__(self, campbot, data):
super(Contribution, self).__init__(campbot, data)
self["document"] = get_constructor(self["document"]["type"])(
campbot, self["document"]
)
self["user"] = ShortWikiUser(campbot, self["user"])
def get_full_document(self):
return self._campbot.wiki.get_wiki_object(
self.document["document_id"], document_type=self.document["type"]
)
class Locale(BotObject):
"""
Locale is a set of field, given a lang.
"""
def get_title(self):
"""
Get the title, with prefix if it exists.
:return: String, pretty title
"""
if "title_prefix" in self:
return "{} : {}".format(self.title_prefix, self.title)
else:
return self.title
def get_locale_fields(self):
return (
"description",
"gear",
"remarks",
"route_history",
"summary",
"access",
"access_period",
"title",
"external_resources",
"other_comments",
"slope",
"slackline_anchor1",
"slackline_anchor2",
)
def get_length(self):
"""
Get text length
:return: Integer, number of characters
"""
result = 0
for field in self.get_locale_fields():
if field in self and self[field]:
result += len(self[field])
return result
class WikiObject(BotObject):
"""
Base object for all wiki documents
"""
url_path = None
def __init__(self, campbot, data):
super(WikiObject, self).__init__(campbot, data)
if "associations" in self and self["associations"] is not None:
self["associations"] = BotObject(campbot=campbot, data=self["associations"])
self.associations._convert_list("images", Image)
self._convert_list("locales", Locale)
self._data = data
def get_url(self, lang=None):
"""
:return: camptocamp.org URL.
"""
return "{}/{}/{}{}".format(
self._campbot.wiki.ui_url,
self.url_path,
self.document_id,
"" if lang is None else "/" + lang,
)
def get_history_url(self, lang):
"""
:return: camptocamp.org version list URL
"""
return "{}/{}/history/{}/{}".format(
self._campbot.wiki.ui_url, self.url_path, self.document_id, lang
)
def get_title(self, lang):
locale = self.get_locale(lang)
return locale.get_title() if locale else ""
def get_locale(self, lang):
"""
:param lang: fr, en, de ...
:return: String, or None if locale does not exists in this lang
"""
if "locales" not in self:
return None
for locale in self.locales:
if locale.lang == lang:
return locale
def search(self, patterns, lang):
"""
Search a pattern (regular expression)
:param lang: fr, de, en...
:return: True if pattern is found, False otherwise
"""
locale = self.get_locale(lang)
for field in locale.get_locale_fields():
if field in locale and locale[field]:
for pattern in patterns:
if re.search(pattern, locale[field]):
return True
return False
def print_diff(self):
report = get_diff_report(self._data, self)
for l in report:
print(l)
def _build_payload(self, message):
return {"document": self, "message": message}
def save(self, message, ask_before_saving=True):
"""
Save object to camptocamp.org. Bot must be authentified.
:param message: Modification comment
:param ask_before_saving: Boolean, ask user before saing document
:return: raw request response, useless.
"""
self.print_diff()
if ask_before_saving:
if _input("Save {} : {}, y/[n] ?\n".format(self.get_url(), message)) != "y":
return None
else:
logging.info(f"Saving {self.get_url()} : {message}")
return self._campbot.wiki.put(
"/{}/{}".format(self.url_path, self.document_id),
self._build_payload(message),
)
def is_valid(self):
"""
:return: True if document can be saved
"""
return self.get_invalidity_reason() is None
def is_personal(self):
return False
def get_invalidity_reason(self):
return None
class ShortWikiUser(BotObject):
def get_contributions_url(self):
return "{}/whatsnew#u={}".format(self._campbot.wiki.ui_url, self.user_id)
def is_newbie(self):
contribs = self._campbot.wiki.get(
"/documents/changes?limit=50&u={}".format(self.user_id)
)
return len(contribs["feed"]) < 50
def get_wiki_user(self):
return self._campbot.wiki.get_user(user_id=self.user_id)
class WikiUser(WikiObject):
url_path = "profiles"
def get_contributions(self, oldest_date=None, newest_date=None):
return self._campbot.wiki.get_contributions(
user_id=self.document_id, oldest_date=oldest_date, newest_date=newest_date
)
def get_last_contribution(self, oldest_date=None, newest_date=None):
for contribution in self.get_contributions(
oldest_date=oldest_date, newest_date=newest_date
):
return contribution
return None
def is_personal(self):
return True
class Route(WikiObject):
"""Route object : https://www.camptocamp.org/routes"""
url_path = "routes"
class Article(WikiObject):
"""Article object : https://www.camptocamp.org/articles"""
url_path = "articles"
def is_personal(self):
return self.article_type == "personal"
class Image(WikiObject):
"""Image object : https://www.camptocamp.org/images"""
url_path = "images"
def is_personal(self):
return self.image_type in ("personal", "copyright")
class Book(WikiObject):
"""Book object : https://www.camptocamp.org/books"""
url_path = "books"
class Xreport(WikiObject):
"""Xreport object : https://www.camptocamp.org/xreports"""
url_path = "xreports"
def is_personal(self):
return True
class Waypoint(WikiObject):
"""Waypoint object : https://www.camptocamp.org/waypoints"""
url_path = "waypoints"
def get_invalidity_reason(self):
if self.waypoint_type in ("hut", "gite") and self.custodianship is None:
return "custodianship is missing"
if self.elevation is None and self.waypoint_type not in ("climbing_indoor",):
return "elevation is missing"
return None
class Area(WikiObject):
"""Area object : https://www.camptocamp.org/areas"""
url_path = "areas"
def _build_payload(self, message):
payload = super(Area, self)._build_payload(message)
# Geometry info must not be present in payload, otherwise, save actions fails
del payload["document"]["geometry"]
return payload
class Map(WikiObject):
"""Map object : https://www.camptocamp.org/maps"""
url_path = "maps"
class Outing(WikiObject):
"""Outings object : https://www.camptocamp.org/outings"""
url_path = "outings"
def is_personal(self):
return True
class ForumUser(BotObject):
def get_admin_view(self):
return self._campbot.forum.get(
"/admin/users/{}/{}.json".format(self.id, self.username)
)
def anonymise(self):
return self._campbot.forum.put(
"/admin/users/{}/anonymize.json".format(self.id), data=None
)
class Post(BotObject):
def __init__(self, campbot, data):
super(Post, self).__init__(campbot, data)
self._convert_list("polls", Poll)
class Poll(BotObject):
def __init__(self, campbot, data):
super(Poll, self).__init__(campbot, data)
| c2corg/CampBot | campbot/objects.py | objects.py | py | 11,732 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "differ.get_diff_report",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 293,
"usage_type": "call"
}
] |
24877168673 | """Submit proposal content."""
import os
from typing import Optional
import httpx
from starlette.datastructures import UploadFile
from saltapi.auth.token import create_token
from saltapi.repository.user_repository import User
import logging
logger = logging.getLogger(__name__)
proposal_submission_url = f"{os.environ['STORAGE_SERVICE_URL']}/proposal/submit"
async def submit_proposal(
proposal: UploadFile, proposal_code: Optional[str], submitter: str
) -> str:
"""Submit a proposal."""
generic_error = "The proposal could not be sent to the storage service."
files = {
"proposal": (proposal.filename, proposal.file, "application/octet-stream"),
}
data = {
"submitter": submitter,
}
if proposal_code:
data["proposal_code"] = proposal_code
user = User(
id=-1,
username="admin",
first_name="",
last_name="",
email="",
roles=["Admin"],
permissions=[],
)
auth_token = create_token(user=user, expiry=300, algorithm="RS256")
headers = {"Authorization": f"Bearer {auth_token}"}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
proposal_submission_url, data=data, files=files, headers=headers
)
except Exception:
logger.exception(msg=generic_error)
raise Exception(generic_error)
submission_id = _submission_id(response)
if submission_id:
return submission_id
# error handling
error = _submission_error(response)
if error:
logger.error(msg=error)
raise Exception(error)
else:
logger.error(msg=generic_error)
raise Exception(generic_error)
def _submission_id(response: httpx.Response) -> Optional[str]:
"""
Extract the submission id sent by the server, if there is one.
The response body is parsed as a JSON object and its submission_id field is
returned. If this fails (because the response isn't JSON object or has no
submission_id field) None is returned.
"""
try:
submission_id = response.json().get("submission_id")
if submission_id is not None:
return str(submission_id)
else:
return None
except Exception:
return None
def _submission_error(response: httpx.Response) -> Optional[str]:
"""
Extract the error message sent by the server, if there is one.
The response body and is parsed as a JSON object and its error field is returned.
If this fails (because the response isn't JSON object or has no error field) None is
returned.
"""
try:
error = response.json().get("error")
if error is not None:
return str(error)
else:
return None
except Exception:
return None
| saltastroops/salt-api-old | saltapi/submission/submit.py | submit.py | py | 2,842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "starlette.datastructures.UploadFile",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": ... |
74771536672 | from typing import List
from RLEnv.EnvLayer.PCBRoutingEnv import PCBRoutingEnv
from scipy.spatial.distance import cityblock
import numpy as np
class rl_env(PCBRoutingEnv):
def __init__(
self,
resolution: float,
pcb_folder: str,
pcb_names: List[str],
connect_coef: float=20.0,
dist_coef: float=0.5,
path_coef: float = 0.1
) -> None:
super().__init__(resolution, pcb_folder, pcb_names)
self.connect_coef = connect_coef
self.dist_coef = dist_coef
self.path_coef = path_coef
def reward(self) -> float:
if np.array_equal(self._agent_location, self._target_location):
return self.connect_coef
if self.conflict:
return -self.dist_coef * cityblock(self._agent_location, self._target_location)
return -self.path_coef
| PCBench/PCBench | Baselines/RL/rl_env.py | rl_env.py | py | 871 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "RLEnv.EnvLayer.PCBRoutingEnv.PCBRoutingEnv",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.array_equal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name"... |
5498440877 | from struct import unpack
import json
import hid
from math import cos, atan
USB_VID = 0x04d8
USB_PID = 0xef7e
def hid_multiread(dev):
data = dev.read(128, timeout_ms=10)
res = data
while len(data) > 0:
data = dev.read(128, timeout_ms=10)
res.extend(data)
return res
def hid_query(dev, addr):
# 68 byte request, might actually need 64, but not clear
# when hid versus hidraw is used.
buffer = bytearray([0] * 68)
buffer[0] = 0 # Report ID
buffer[1] = 0
buffer[2] = addr >> 8 # Page to read
buffer[3] = addr & 0xff
res = dev.send_feature_report(buffer)
assert res >= 0
res = hid_multiread(dev)
assert res[:4] == list(buffer[:4])
return res[4:]
class Calibration:
def __init__(self, json_in):
config = json_in
self.screenW = int(config['screenW']['value'])
self.screenH = int(config['screenH']['value'])
self.DPI = int(config['DPI']['value'])
self.pitch = config['pitch']['value']
self.slope = config['slope']['value']
self.center = config['center']['value']
# Physical image width
self.screenInches = self.screenW / self.DPI
self.pitch = self.pitch * self.screenInches * cos(atan(1.0 / self.slope))
self.tilt = self.screenH / (self.screenW * self.slope)
self.subp = 1.0 / (3 * self.screenW) * self.pitch
def lg_json(usb_vid=USB_VID, usb_pid=USB_PID, save_flag=False, json_name="LookingGlassConfig.json"):
'''
:param usb_vid: Looking Glass USB port VID, look up by 'sudo lsusb | grep Microchip'
:param usb_pid: Looking Glass USB port PID, look up by 'sudo lsusb | grep Microchip'
:param save_flag: whether to save the json
:param json_name: save json name
:return: a Calibration class of the Looking Glass
'''
devs = list(hid.enumerate(usb_vid, usb_pid))
# print(devs)
assert len(devs) == 1
devinfo = devs[0]
dev = hid.device()
dev.open_path(devinfo['path'])
# dev.set_nonblocking(1)
# Data is read in pages of 64 bytes. First page (0) starts with a
# 4 byte header denoting the length of the calibration data (in JSON
# format)
page = hid_query(dev, 0)
json_size = unpack('>I', bytes(page[:4]))[0]
# print('JSON size: %d' % json_size)
json_data = page[4:]
addr = 1
while len(json_data) < json_size:
page = hid_query(dev, addr)
json_data.extend(page)
addr += 1
json_data = json_data[:json_size]
json_data = bytes(json_data)
json_data = json_data.decode('utf8')
# Pretty print
parsed = json.loads(json_data)
print(json.dumps(parsed, indent=4))
if save_flag:
with open(json_name, 'w') as outfile:
outfile.write(json.dumps(parsed, indent=4))
return Calibration(parsed) | murez/looking_glass_py | get_cal.py | get_cal.py | py | 2,837 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "math.cos",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "hid.enumerate",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "hid.device",
"line_number": 73,
... |
74415935072 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsorships', '0003_sponsorship_event'),
]
operations = [
migrations.CreateModel(
name='SponsorshipLevel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('description', models.TextField(blank=True)),
('amount', models.DecimalField(max_digits=10, decimal_places=2)),
('event', models.ForeignKey(related_name='sponsorship_levels', to='events.Event')),
],
options={
'verbose_name': 'Sponsorship Level',
'verbose_name_plural': 'Sponsorship Levels',
},
),
migrations.AddField(
model_name='sponsorship',
name='level',
field=models.ForeignKey(related_name='sponsorships', blank=True, to='sponsorships.SponsorshipLevel', null=True),
),
]
| dco5/tendenci-sponsorships | sponsorships/migrations/0004_auto_20180110_2357.py | 0004_auto_20180110_2357.py | py | 1,169 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
73767637792 | from __future__ import print_function
import logging
import sys
from argh import *
from vcloudtools.api import VCloudAPIClient
log = logging.getLogger(__name__)
parser = ArghParser()
@arg('path', help='Path to fetch', default='/')
def browse(args):
"""
Browse the vCloud API using the built-in hypermedia links
"""
c = VCloudAPIClient()
res = c.browse(args.path)
print('HTTP/1.1 {0} {1}'.format(res.status_code, res.reason))
for k, v in res.headers.items():
print("{0}: {1}".format(k, v))
print()
print(res.content)
def main():
dispatch_command(browse)
if __name__ == '__main__':
main()
| alphagov/vcloudtools | vcloudtools/command/browse.py | browse.py | py | 648 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "vcloudtools.api.VCloudAPIClient",
"line_number": 17,
"usage_type": "call"
}
] |
30693236327 | import json
with open ("city.json") as file:
data = json.load(file)
list_data = []
for i in data:
#print (i['name'])
list_data.append({"id":i['id'], "text":i["name"]})
data = {"results":list_data}
with open("city_name.json", "w") as f:
json.dump(list_data, f)
| pbrlionocde/mini_weather_portal | app/static/json/j.py | j.py | py | 271 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 16,
"usage_type": "call"
}
] |
23665017015 | from django.shortcuts import render, redirect, reverse
from django.contrib.auth import logout
from django.contrib.auth.views import LoginView, LogoutView, login_required
@login_required(login_url='accounts:login')
def logoutview(request):
logout(request)
return redirect('accounts:login')
@login_required(login_url='accounts:login')
def profileview(request):
context = {}
return render(request, 'accounts/profile.html', context)
class Login(LoginView):
template_name = 'accounts/login.html'
next_page = 'accounts:profile'
redirect_authenticated_user = True | georgeballasdev/chat_app | ChatApp/accounts/views.py | views.py | py | 589 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.logout",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.login_required",
"line_number": 7,
"usage_type": "call"
}... |
24543006030 | import h5py
import json
import numpy as np
from os.path import splitext, basename, join as pjoin
from pipeline_utils import print_and_call
from pipeline_config import MAPPING_PATH
from graphslam_config import GRAPHSLAM_EVAL_DIR, GRAPHSLAM_MAPS_DIR, MATCH_JSON_DATA
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
'''
Evaluate alignment between maps
First compute bounds of map features to compare
Then compute score
'''
# TODO Parallelize
if __name__ == '__main__':
# First compute bounding box / center data
bounds_file_pairs = list()
for match in MATCH_JSON_DATA:
fstem1 = '--'.join(match['rss1'])
fstem2 = '--'.join(match['rss2'])
map_data1 = pjoin(GRAPHSLAM_MAPS_DIR, fstem1 + '.h5')
map_data2 = pjoin(GRAPHSLAM_MAPS_DIR, fstem2 + '.h5')
pcd1 = splitext(map_data1)[0] + '.pcd'
pcd2 = splitext(map_data2)[0] + '.pcd'
# First convert to pcd format
cmd = '%s/bin/h5_to_pcd --h5 %s --pcd %s' % (MAPPING_PATH, map_data1, pcd1)
print_and_call(cmd)
cmd = '%s/bin/h5_to_pcd --h5 %s --pcd %s' % (MAPPING_PATH, map_data2, pcd2)
print_and_call(cmd)
# Then compute the bounding boxes
bounds1 = '%s/%s.h5' % (GRAPHSLAM_EVAL_DIR, splitext(basename(pcd1))[0])
bounds2 = '%s/%s.h5' % (GRAPHSLAM_EVAL_DIR, splitext(basename(pcd2))[0])
cmd = '%s/bin/compute_bounds %s %s' % (MAPPING_PATH, pcd1, bounds1)
print_and_call(cmd)
cmd = '%s/bin/compute_bounds %s %s' % (MAPPING_PATH, pcd2, bounds2)
print_and_call(cmd)
bounds_file_pairs.append((bounds1, bounds2))
# Now compute match quality
eval_json = dict()
eval_json['match_eval'] = list()
for (file_pair, match_data) in zip(bounds_file_pairs, MATCH_JSON_DATA):
h5f = h5py.File(file_pair[0], 'r')
centers1 = h5f['cluster_centers'][...]
h5f.close()
h5f = h5py.File(file_pair[1], 'r')
centers2 = h5f['cluster_centers'][...]
h5f.close()
# Compute nearest neighbor distances
nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(centers1)
distances, tgt_indices = nbrs.kneighbors(centers2)
distances = distances.ravel()
distances = distances[distances < 2.0] # PARAM
# Also compute lateral distances when not taking z coord into account
centers1_lat = np.array(centers1)
centers2_lat = np.array(centers2)
centers1_lat[:, 2] = 0.0
centers2_lat[:, 2] = 0.0
nbrs_lat = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(centers1_lat)
distances_lat, tgt_indices_lat = nbrs_lat.kneighbors(centers2_lat)
distances_lat = distances_lat.ravel()
distances_lat = distances_lat[distances_lat < 2.0] # PARAM
# Compute aggregate statistics
mean_dist = np.mean(distances)
median_dist = np.median(distances)
mean_dist_lat = np.mean(distances_lat)
median_dist_lat = np.median(distances_lat)
# Store
match_eval = dict()
match_eval['match_data'] = match_data
match_eval['match_distances'] = {'mean_dist': mean_dist, 'distances': distances.tolist(), 'median_dist': median_dist}
match_eval['match_distances_lat'] = {'mean_dist': mean_dist_lat, 'distances': distances_lat.tolist(), 'median_dist': median_dist_lat}
eval_json['match_eval'].append(match_eval)
# Save evaluation summary
eval_json_file = '%s/%s.json' % (GRAPHSLAM_EVAL_DIR, '+'.join((fstem1, fstem2)))
json.dump(eval_json, open(eval_json_file, 'w'), indent=4, sort_keys=True)
# Save figures
plt.hist(distances.tolist(), 50)
plt.ylabel('count')
plt.xlabel('distance')
fig_file = '%s/%s_hist.pdf' % (GRAPHSLAM_EVAL_DIR, '+'.join((fstem1, fstem2)))
plt.savefig(fig_file)
plt.hist(distances_lat.tolist(), 50)
plt.ylabel('count')
plt.xlabel('lateral distance')
fig_file = '%s/%s_hist_lat.pdf' % (GRAPHSLAM_EVAL_DIR, '+'.join((fstem1, fstem2)))
plt.savefig(fig_file)
| sameeptandon/sail-car-log | mapping/sandbox/graphslam/scripts/eval_maps.py | eval_maps.py | py | 4,146 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "graphslam_config.MATCH_JSON_DATA",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "graphslam_config.GRAPHSLAM_MAPS_DIR",
"line_number": 27,
"usage_type": "argument"
},
{
... |
38105066131 | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
def marginal_distribution(mu, sigma2, indeces):
mu_U = mu[indeces]
sigma2_U = sigma2[indeces][:,indeces].reshape(-1,len(indeces))
return mu_U, sigma2_U
def conditional_distribution(X, mu, sigma2, indeces):
mu_U, sigma2_U = marginal_distribution(mu, sigma2, indeces)
indeces_V = list(set(range(mu.shape[0])).difference(set(indeces)))
mu_V, sigma2_V = marginal_distribution(mu, sigma2, indeces_V)
sigma2_U_V = sigma2[indeces][:,indeces_V]
sigma2_V_U = sigma2_U_V.T
U = X[indeces]
mu_V_U = mu_V + np.dot(np.dot(sigma2_V_U, np.linalg.pinv(sigma2_U)), U-mu_U)
sigma2_V_U = sigma2_V - np.dot(np.dot(sigma2_V_U,np.linalg.pinv(sigma2_U)), sigma2_U_V)
return mu_V_U, sigma2_V_U
def display_prior_Gaussian_Process(mu, sigma, lim):
np.random.seed(0)
xmin, xmax = lim
X = np.arange(xmin, xmax,0.1)
cov = np.array([[np.exp(-(X[i] - X[j])*(X[i] - X[j])/(2*sigma*sigma)) for i in range(X.shape[0])] for j in range(X.shape[0])])
Y = np.random.multivariate_normal(np.array([mu]*X.shape[0]), cov, 3)
plt.figure()
plt.plot(X, Y.T)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
def display_posterior_Gaissian_Process(X_post, Y_post, X_pred, sigma):
np.random.seed(0)
xmin, xmax = min(X_pred), max(X_pred)
X = np.r_[X_post, X_pred]
Y = np.r_[Y_post, np.zeros(X_pred.shape[0])]
mu = np.zeros(X.shape[0])
cov = np.array([[np.exp(-(X[i] - X[j])*(X[i] - X[j])/(2*sigma*sigma)) for i in range(X.shape[0])] for j in range(X.shape[0])])
mu_pred, sigma2_pred = conditional_distribution(Y, mu, cov, list(range(X_post.shape[0])))
line = plt.plot(X_pred, mu_pred)
plt.setp(line, linewidth = 3.0)
for i in range(5):
Y_pred = np.random.multivariate_normal(mu_pred, sigma2_pred)
line = plt.plot(X_pred, Y_pred)
plt.setp(line, linewidth = 1.0)
plt.scatter(X_post, Y_post, marker='x', zorder=100, color = 'r')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return
sigma_list = [0.3, 0.5, 1.0]
#(a)
for sigma in sigma_list:
display_prior_Gaussian_Process(0, sigma, (-5,5))
#(c)
X_S = np.array([-1, 2.4, -2.5, -3.3, 0.3])
Y_S = np.array([2, 5.2, -1.5, -0.8, 0.3])
xmin, xmax, step = -4, 3, 0.05
for sigma in sigma_list:
display_posterior_Gaissian_Process(X_S, Y_S, np.arange(xmin, xmax, step), sigma)
| hongxin-y/EECS545-Homeworks | HW4/prob5.py | prob5.py | py | 2,419 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.pinv",
"... |
1758676352 | import functools
print("******* Recursive and Memoization *******")
@functools.lru_cache(maxsize=None)
def grid_traveller(m,n):
if (m == 1 and n == 1):
return 1
elif (m == 0 or n==0 ):
return 0
else:
return (grid_traveller(m-1,n)+grid_traveller(m,n-1))
print(grid_traveller(1,1)) # 1
print(grid_traveller(2,3)) # 3
print(grid_traveller(3,2)) # 3
print(grid_traveller(3,3)) # 6
print(grid_traveller(18,18)) # 2333606220
# tabulation
print("******* Tabulation *******")
def grid_traveller2(m,n):
table = []
for i in range(m+1):
table.append([0 for i in range(n+1)])
table[1][1] = 1
for i in range(m+1):
for x in range(n+1):
current = table[i][x]
if x+1 <= n:
table[i][x+1] += current
if i+1 <=m:
table[i+1][x] += current
return table[m][n]
print(grid_traveller2(1,1)) # 1
print(grid_traveller2(2,3)) # 3
print(grid_traveller2(3,2)) # 3
print(grid_traveller2(3,3)) # 6
print(grid_traveller2(18,18)) # 2333606220 | ahmetsoguksu/Dynamic-Programming-Python | 2-grid_traveller.py | 2-grid_traveller.py | py | 1,171 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.lru_cache",
"line_number": 5,
"usage_type": "call"
}
] |
75045505952 | import time
import os, json
import wiotp.sdk.application
client = None
def commandCallback(event):
if event.eventId == "doorStatus":
# Received event 'doorStatus'
payload = json.loads(event.payload)
# Get the status and time
status = payload['status']
time = payload['time']
print("Status: " + status)
print("Time: " + time)
def main():
try:
# Try and connect the client to the IBM Server
options = wiotp.sdk.application.parseConfigFile("application.yaml")
client = wiotp.sdk.application.ApplicationClient(config=options)
client.connect()
while True:
# Continually subscribe to the event and call the callback function
client.deviceEventCallback = commandCallback
client.subscribeToDeviceEvents(eventId="doorStatus")
time.sleep(0.1)
except Exception as e:
print(e)
if __name__ == "__main__":
main() | iSagnik/Intro-to-IOT | hw4/laptop/laptop.py | laptop.py | py | 975 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "wiotp.sdk.application.sdk.application.parseConfigFile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "wiotp.sdk.application.sdk",
"line_number": 20,
"usage_type": "attribute"... |
23724759222 | import pathlib
import pygubu
PROJECT_PATH = pathlib.Path(__file__).parent
PROJECT_UI = PROJECT_PATH / "snakegame.ui"
import tkinter as tk
from tkinter import *
from PIL import ImageTk, Image
import random, os
import numpy as np
from operator import itemgetter
import pickle
# - GAMEPLAN DATA SETUP
GAMEPLAN_SIZE = 40 # Set the number of positions (boxes) that the snake can move (Horizontal and vertical)
GAMEBOX_SIZE = 20 # Set the size, in pixels, for the width of the box
GAME_PADDING = 10 # Set the padding on each side of the board
GAME_SPEED = 150 # Set the default speed for the snake. Lower number is faster
SNAKE_LENGTH_ADD = 3 # The increase in length each time the snake catch the cake
class SnakegameApp:
direction = [0,-1]
position = [0,0]
candypos = [0,0]
gamedata = {
'step' : 0,
'score' : 0,
'iteration' : 0,
'speed' : GAME_SPEED,
'delay' : 0,
'length' : 5,
'generation' : 0,
}
snake_list = []
gamematrix = np.zeros([GAMEPLAN_SIZE, GAMEPLAN_SIZE])
def __init__(self, game_engine_hook, running_ai=False):
self.gameplan_size = GAMEPLAN_SIZE
self.gamebox_size = GAMEBOX_SIZE
self.game_padding = GAME_PADDING
self.gamestatus = 'FIRST TIME'
self.running_ai = running_ai
self.game_engine_hook = game_engine_hook
# -----------------------------------------------------------------------------------------
# - PyGubu Builder Magic - Setup the static layout
# -----------------------------------------------------------------------------------------
self.builder = builder = pygubu.Builder()
builder.add_resource_path(PROJECT_PATH)
builder.add_from_file(PROJECT_UI)
# Main widget
self.mainwindow = builder.get_object("toplevel1", None)
self.canvas = builder.get_object("canvas", None)
self.info = builder.get_object("info", None)
self.highscore = builder.get_object("highscore", None)
self.score_variable = None
self.info_variable = None
self.highscore_variable = None
self.scale_variable = None
self.status_variable = None
self.restart_variable = None
builder.import_variables(self,
['score_variable',
'info_variable',
'highscore_variable',
'scale_variable',
'status_variable',
'restart_variable'])
builder.connect_callbacks(self)
# Complete the gameplan in the canvas object --------------------------------------------
for pos in range(0, self.gameplan_size+1, 1):
xl = self.game_padding + pos*self.gamebox_size
t = self.gameplan_size*self.gamebox_size+self.game_padding
self.canvas.create_line(xl, self.game_padding, xl, t)
self.canvas.create_line(self.game_padding, xl, t, xl)
# - Get the snake head and body image from file (19x19), and cake image
self.img_snake_body = ImageTk.PhotoImage(Image.open("snake_body.bmp"))
self.img_snake_head = ImageTk.PhotoImage(Image.open("snake_head_up.bmp"))
self.img_cake = ImageTk.PhotoImage(Image.open("cake-19x19.bmp"))
# - Bind some command buttons for shortcuts
self.mainwindow.bind("<space>", self.spacebar_command)
if running_ai == False:
# - Bind the arrow keys to the update move function
self.mainwindow.bind("<Left>" , lambda value : self.snake_update_move( -1, 0))
self.mainwindow.bind("<Right>" , lambda value : self.snake_update_move( +1, 0))
self.mainwindow.bind("<Up>" , lambda value : self.snake_update_move( 0, -1))
self.mainwindow.bind("<Down>" , lambda value : self.snake_update_move( 0, +1))
# - Run the mainloop
def run(self):
self.mainwindow.mainloop()
def start_new(self):
# set the first position for the snake
self.position[0] = random.randint(12, self.gameplan_size-12) # '12' is just to set it not too close to edge
self.position[1] = random.randint(12, self.gameplan_size-12)
self.gamematrix[self.position[0], self.position[1]] = 1 # Upself.date the gamematrix, '1' means position is 'snake_head'
self.snake = self.canvas.create_image(
self.position[0]*self.gamebox_size+self.game_padding+10, # '10' is the image offset
self.position[1]*self.gamebox_size+self.game_padding+10,
image=self.img_snake_head,
tags=('clean','snake'))
# Set the first position for the cake, avoid putting it on-top of snake
while True:
self.candypos[0] = random.randint(5, self.gamebox_size-5) # '5' is just to keep it away from edge
self.candypos[1] = random.randint(5, self.gamebox_size-5)
if not(self.candypos[0] == self.position[0] and self.candypos[1] == self.position[1]):
break
self.gamematrix[self.candypos[0], self.candypos[1]] = 3 # '3' indicates 'cake'
self.candy = self.canvas.create_image(
self.candypos[0]*self.gamebox_size+self.game_padding+10,
self.candypos[1]*self.gamebox_size+self.game_padding+10,
image=self.img_cake,
tags=('clean', 'cake'))
# --------------------------------------------------------------------------------------------
# snake_update_move: depending on gamestatus, change the direction parameter
# --------------------------------------------------------------------------------------------
def snake_update_move(self, x, y):
if (self.direction[0] != -x) and (self.direction[1] != -y):
self.direction[0], self.direction[1] = x, y
def check_collision(self, x, y):
# Check wall crash
if ((x < 0 or x>self.gameplan_size-1) or (y < 0 or y>self.gameplan_size-1)):
return 'CRASH_WALL'
# Check if we crashed with snake body
if self.gamematrix[x,y] == 2:
return 'CRASH_SNAKE'
# Check if we have done to many iterations since last cake was found
if (len(self.snake_list) > 2) and (self.gamedata['iteration'] > 100*len(self.snake_list)):
return 'CRASH_ITERATION'
return 'NO_COLLISION'
def check_candy_found(self, x, y):
if self.candypos == [x,y]:
return True
return False
# --------------------------------------------------------------------------------------------
# move: Move the snake, and update status information
# --------------------------------------------------------------------------------------------
def move(self):
# Save the old position, and update position with the new direction
prevpos = [0,0]
prevpos[0] = self.position[0]
prevpos[1] = self.position[1]
self.position[0] += self.direction[0]
self.position[1] += self.direction[1]
# Check if we hit the wall or the body
is_collision = self.check_collision(self.position[0], self.position[1])
if is_collision != 'NO_COLLISION':
self.gamestatus = is_collision
if is_collision == 'CRASH_WALL' :
self.info_variable.set("GAME OVER!\nYou hit the wall!\nPress 'Start Game' to go again.")
elif is_collision == 'CRASH_SNAKE':
self.info_variable.set("GAME OVER!\nYou ran into yourself!\nPress 'Start Game' to go again.")
else:
self.info_variable.set("GAME OVER!\nYou are stuck!\nPress 'Start Game' to go again.")
self.info.config(bg='red')
return
# Check if we caught the candy
if self.check_candy_found(self.position[0], self.position[1]) == True:
self.gamedata['score'] += 1
self.score_variable.set(f"{self.gamedata['score']:03d}")
self.gamestatus = 'CAKE_FOUND'
self.gamedata['iteration'] = 0
# - place the cake again, be sure that is not set in an occupied box
while True:
self.candypos[0] = random.randint(5, self.gameplan_size-5)
self.candypos[1] = random.randint(5, self.gameplan_size-5)
if self.gamematrix[self.candypos[0], self.candypos[1]] == 0:
break
else: # For debugging, TODO
print("CANNOT FIND CAKE POSITION")
self.gamematrix[self.candypos[0], self.candypos[1]] = 3
self.canvas.moveto(
self.candy,
self.candypos[0]*self.gamebox_size+self.game_padding+1,
self.candypos[1]*self.gamebox_size+self.game_padding+1
)
self.gamedata['length'] += SNAKE_LENGTH_ADD
# Update new position in gamematrix
self.gamematrix[prevpos[0], prevpos[1]] = 0 # Clear old position. (Actually not needed)
self.gamematrix[self.position[0], self.position[1]] = 1
# Move the image_head of the snake to it's new position
self.canvas.move(self.snake,
self.direction[0]*self.gamebox_size,
self.direction[1]*self.gamebox_size
)
# Move each image section of the snake body to it's new positions
if len(self.snake_list) < self.gamedata['length']: # Need to create new body segments
body = self.canvas.create_image(
prevpos[0]*self.gamebox_size + self.game_padding + 10,
prevpos[1]*self.gamebox_size + self.game_padding + 10,
image=self.img_snake_body,
tags=('clean', 'body')
)
self.snake_list.insert(0,[body, prevpos[0], prevpos[1]])
self.gamematrix[prevpos[0], prevpos[1]] = 2
else:
item = self.snake_list.pop()
self.gamematrix[item[1], item[2]]=0 # Clear old positon in matrix
item[1], item[2] = prevpos[0], prevpos[1]
self.snake_list.insert(0, item)
self.canvas.moveto( item[0],
item[1]*self.gamebox_size+self.game_padding,
item[2]*self.gamebox_size+self.game_padding)
self.gamematrix[prevpos[0], prevpos[1]]=2
# Update the step counter (Need to present it in the dashboard later ...)
self.gamedata['step'] += 1
self.gamedata['iteration'] += 1
self.status_variable.set(
f"Status:\n\nstep: {self.gamedata['step']}\n" +\
f"iteration: {self.gamedata['iteration']}\n" +\
f"Generation: {self.gamedata['generation']}\n")
# --------------------------------------------------------------------------------------------
def game_engine(self):
if self.gamestatus == 'GAMEOVER':
return
self.game_engine_hook()
# self.move()
self.mainwindow.after(int(self.gamedata['delay']),self.game_engine)
# self.mainwindow.after(300,self.game_engine)
# ---------------------------------------------------------------------------------------------
def spacebar_command(self, event):
self.restart_command()
def setspeed_command(self, scale_value):
pass
def restart_command(self, dont_restart_engine=False):
# -- Clean up previous session
# ----- Delete all objects
self.canvas.delete('clean')
self.gamematrix.fill(0)
self.gamedata['step'] = 0
self.gamedata['score'] = 0
self.gamedata['speed'] = self.scale_variable.get()
self.gamedata['delay'] = (490-40*self.scale_variable.get())/3
#print(f"scale: {self.scale_variable.get()} speed: {self.gamedata['speed']}")
self.gamedata['length'] = 5
self.snake_list = []
self.info_variable.set("\n Catch the Cake ....")
self.info.config(bg="green")
self.score_variable.set(f"{self.gamedata['score']:03d}")
# -- Start up game session
self.gamestatus = 'RUNNING'
self.start_new()
if dont_restart_engine == False:
self.game_engine()
if __name__ == "__main__":
app = SnakegameApp(GAMEPLAN_SIZE, GAMEBOX_SIZE, GAME_PADDING)
app.run()
# %%
| mwsse/raw_nn | snakeAI.py | snakeAI.py | py | 12,755 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygubu.Builder",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"... |
72153323874 | from uuid import uuid4
import pytest
from pydent.marshaller.base import add_schema
from pydent.marshaller.base import ModelRegistry
from pydent.marshaller.fields import Alias
from pydent.marshaller.fields import Callback
from pydent.marshaller.fields import Field
from pydent.marshaller.fields import Nested
from pydent.marshaller.fields import Relationship
class TestDump:
def test_dump_empty_data(self, base):
"""Dump should produce an empty dictionary."""
@add_schema
class MyModel(base):
pass
model = MyModel()
assert model.dump() == {}
def test_dump_empty_data_with_non_tracked_attrs(self, base):
"""Expect that non-tracked attributes are excluded from the dump."""
@add_schema
class MyModel(base):
pass
model = MyModel()
model.id = 4
assert model.dump() == {}
def test_dump_loaded_data(self, base):
"""Manually set data should appear in the dump."""
@add_schema
class MyModel(base):
pass
model = MyModel._set_data({"id": 5, "name": "MyName"})
assert model.dump() == {"id": 5, "name": "MyName"}
def test_dump_loaded_data_and_overwrite(self, base):
"""Manually set data can be overridden by setting attributes."""
@add_schema
class MyModel(base):
pass
model = MyModel._set_data({"id": 5, "name": "MyName"})
model.id = 6
assert model.dump() == {"id": 6, "name": "MyName"}
def test_dump_empty_field(self, base):
"""Empty fields should return an empty dictionary."""
@add_schema
class MyModel(base):
fields = dict(field=Field())
model = MyModel()
assert model.dump() == {}
def test_dump_field(self, base):
@add_schema
class MyModel(base):
fields = dict(field=Field())
model = MyModel._set_data({"name": "NAME"})
assert model.dump() == {"name": "NAME"}
def test_dump_with_new_data_key(self, base):
@add_schema
class MyModel(base):
fields = {
"field": Field(),
"source": Callback(
lambda s: getattr(s, "field"),
callback_args=(Callback.SELF,),
always_dump=True,
data_key="field",
),
}
model = MyModel({"field": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
model = MyModel({"source": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
print(model._get_data())
def test_alias(self, base):
"""Expect that alias fields refer to exactly the attribute set in the
alias.
That means, the 'source' field should refer to the 'field'
attribute.
"""
@add_schema
class MyModel(base):
fields = {"field": Field(), "source": Alias("field")}
model = MyModel({"field": 5})
assert model.field == model.source
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
model = MyModel({"source": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
def test_dump_marshalling_field(self, base):
"""Expect the custom HTMLTag field to be properly
serialized/deserialized."""
class HTMLTag(Field):
def serialize(self, caller, val):
return "<{tag}>{val}</{tag}>".format(tag=self.data_key, val=val)
@add_schema
class MyModel(base):
fields = dict(h1=HTMLTag())
model = MyModel._set_data({"h1": "raw"})
assert model.h1 == "raw"
model.h1 = "This is a Heading 1 Title"
assert model.h1 == "This is a Heading 1 Title"
assert model.dump() == {"h1": "<h1>This is a Heading 1 Title</h1>"}
def test_always_dump(self, base):
"""Expect that fields with 'always_dump' are, by default, dumped as
empty constructors event when they are empty."""
@add_schema
class MyModel(base):
fields = dict(
field1=Callback("find"), field2=Callback("find", always_dump=True)
)
def find(self):
return 100
m = MyModel()
assert m.dump() == {"field2": 100}
assert m.dump(include="field1") == {"field1": 100, "field2": 100}
assert m.dump(ignore="field2") == {}
def test_empty_list_field(self, base):
"""Expect."""
@add_schema
class ModelWithList(base):
fields = dict(mylist=Field())
model = ModelWithList()
model.mylist = []
assert model.mylist == []
model.mylist.append(5)
assert model.mylist == [5]
class TestNested:
"""Tests for nested serialization/deserialization."""
@pytest.fixture(scope="function")
def Company(self, base):
@add_schema
class Company(base):
pass
return Company
@pytest.fixture(scope="function")
def Publisher(self, base):
@add_schema
class Publisher(base):
fields = dict(author=Nested("Author"), company=Nested("Company"))
return Publisher
@pytest.fixture(scope="function")
def Author(self, base):
@add_schema
class Author(base):
fields = dict(
publisher=Nested("Publisher"), id=Field("id", allow_none=True)
)
return Author
def test_simple_nested(self, Author, Publisher):
author = Author._set_data(
{"name": "Richard Dawkings", "publisher": {"name": "Scotts Books"}}
)
print(author._get_data())
assert isinstance(author, Author)
assert isinstance(author.publisher, Publisher)
assert author.name == "Richard Dawkings"
assert author.publisher.name == "Scotts Books"
def test_double_nested(self, Author, Publisher, Company):
author = Author._set_data(
{
"name": "Samuel",
"publisher": {"name": "Archive 81", "company": {"name": "Damage Inc."}},
}
)
print(author._get_data())
assert isinstance(author, Author)
assert isinstance(author.publisher, Publisher)
assert isinstance(author.publisher.company, Company)
@pytest.fixture(scope="function")
def author_example_data(self):
data = {
"name": "Samuel",
"publisher": {"name": "Archive 81", "company": {"name": "Damage Inc."}},
}
return data
@pytest.fixture(scope="function")
def author_example(self, author_example_data, Author, Publisher, Company):
author = Author._set_data(author_example_data)
return author
def test_shared_data(self, author_example, author_example_data):
author = author_example
company = author.publisher.company
print(id(author_example_data["publisher"]))
assert author_example_data["publisher"] is author._get_data()["publisher"]
print(id(author._get_data()["publisher"]))
publisher = author.publisher
print(id(publisher._get_data()))
assert author._get_data()["publisher"] is publisher._get_data()
def test_double_nested_dump(self, author_example, author_example_data):
assert author_example._get_data() == author_example_data
assert author_example.publisher._get_data() == author_example_data["publisher"]
assert (
author_example.publisher.company._get_data()
== author_example_data["publisher"]["company"]
)
def test_del_nested(self, author_example, author_example_data):
author_example.name = "TIM"
assert author_example.name == "TIM"
author_example.publisher.name = "Holland"
assert author_example.publisher.name == "Holland"
author_example.publisher.company.name = "ABC"
assert author_example.publisher.company.name == "ABC"
del author_example.publisher.company
with pytest.raises(AttributeError):
author_example.publisher.company
assert "company" not in author_example._get_data()["publisher"]
assert "company" not in author_example.publisher._get_data()
def test_set_none_on_nested(self, author_example):
author_example.publisher = None
assert author_example.publisher is None
assert author_example._get_data()["publisher"] is None
def test_set_nested_attribute(self, author_example, Publisher):
author_example.publisher = None
assert author_example.publisher is None
assert author_example._get_data()["publisher"] is None
publisher = Publisher._set_data({"name": "P"})
author_example.publisher = publisher
assert author_example.publisher.name == "P"
assert author_example._get_data()["publisher"] is publisher._get_data()
def test_nested_dump(self, author_example, author_example_data):
new_company_name = str(uuid4())
expected_data = dict(author_example_data)
expected_data["publisher"]["company"]["name"] = new_company_name
author_example.publisher.company.name = new_company_name
expected_data_copy = dict(expected_data)
expected_data_copy.pop("publisher")
assert expected_data_copy == author_example.dump()
expected_data_copy = dict(expected_data["publisher"])
expected_data_copy.pop("company")
assert expected_data_copy == author_example.publisher.dump()
assert (
expected_data["publisher"]["company"]
== author_example.publisher.company.dump()
)
def test_load_a_model(self, base, author_example):
@add_schema
class AuthorList(base):
fields = dict(author=Nested("Author"))
author_list = AuthorList()
author_example.publisher.company.name = "Umbrella Corp"
author_list.author = author_example
assert author_list.author.publisher.company.name == "Umbrella Corp"
author_example.publisher.company.name = "LexCorp"
assert author_list.author.publisher.company.name == "LexCorp"
class TestRelationship:
@pytest.fixture(scope="function")
def Company(self, base):
@add_schema
class Company(base):
pass
return Company
@pytest.fixture(scope="function")
def Publisher(self, base):
@add_schema
class Publisher(base):
fields = dict(
company=Relationship(
"Company", "instantiate_model", 6, {"name": "MyCompany"}
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
return Publisher
@pytest.fixture(scope="function")
def Author(self, base):
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher", "instantiate_model", 4, {"name": "MyPublisher"}
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
return Author
@pytest.mark.parametrize(
"model,include,expected",
[
("Company", None, {}),
("Publisher", None, {}),
("Author", None, {}),
pytest.param(
"Publisher",
"company",
{"company": {"id": 6, "name": "MyCompany"}},
id="include 1 layer nested",
),
pytest.param(
"Author",
"publisher",
{"publisher": {"id": 4, "name": "MyPublisher"}},
id="include 1 layer nested",
),
pytest.param(
"Author",
{"publisher": "company"},
{
"publisher": {
"id": 4,
"name": "MyPublisher",
"company": {"id": 6, "name": "MyCompany"},
}
},
id="include 2 layer nested",
),
],
)
def test_nested_dump_with_include(
self, base, Author, Publisher, Company, model, include, expected
):
instance = ModelRegistry.get_model(model)()
assert instance.dump(include=include) == expected
@pytest.mark.parametrize(
"model,only,expected",
[
pytest.param(
"Author", "publisher", {"publisher": {"name": "MyPublisher", "id": 4}}
),
pytest.param(
"Author", {"publisher": "name"}, {"publisher": {"name": "MyPublisher"}}
),
pytest.param("Author", {"publisher": "id"}, {"publisher": {"id": 4}}),
pytest.param(
"Author",
{"publisher": "company"},
{"publisher": {"company": {"name": "MyCompany", "id": 6}}},
),
pytest.param(
"Author",
{"publisher": {"company": "id"}},
{"publisher": {"company": {"id": 6}}},
),
],
)
def test_relationship_dump_with_only(
self, base, Author, Publisher, Company, model, only, expected
):
instance = ModelRegistry.get_model(model)()
assert instance.dump(only=only) == expected
def test_relationship_dump_ignore(self, base, Author):
instance = Author._set_data({"name": "MyName", "id": 5})
assert instance.dump() == {"name": "MyName", "id": 5}
assert instance.dump(ignore="name") == {"id": 5}
assert instance.dump(ignore=["name", "id"]) == {}
def test_basic_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher", "instantiate_model", 4, {"name": "MyPublisher"}
),
cache=True,
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
author = Author()
assert author.dump() == {}
assert isinstance(
author.publisher, Publisher
), "publisher attribute should be a Publisher type"
assert (
author.publisher._get_data() is author._get_data()["publisher"]
), "data should be shared between the Author and Publisher"
assert author._get_data() == {"publisher": {"id": 4, "name": "MyPublisher"}}
assert author.dump() == {}
assert author.dump(include=["publisher"]) == author._get_data()
def test_many_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher",
"instantiate_model",
4,
{"name": "MyPublisher"},
many=True,
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
models = []
for i in range(3):
models.append(
ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
)
return models
author = Author()
print(author.dump())
print(author.dump(include="publisher"))
assert len(author.publisher) == 3
assert len(author.dump(include="publisher")["publisher"]) == 3
author.publisher = [
Publisher._set_data({"id": 3}),
Publisher._set_data({"id": 5}),
]
assert len(author.publisher) == 2
assert len(author.dump(include="publisher")["publisher"]) == 2
assert author.publisher[0].id == 3
assert author.publisher[1].id == 5
author.publisher.append(Publisher())
assert len(author.publisher) == 3
assert len(author.dump(include="publisher")["publisher"]) == 3
def test_load_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publishers=Relationship(
"Publisher",
"instantiate_model",
4,
{"name": "MyPublisher"},
many=True,
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
models = []
for i in range(3):
models.append(
ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
)
return models
publisher_data = {"name": "Torr Books"}
author = Author._set_data(
{"name": "Steven King", "publishers": [publisher_data]}
)
assert len(author.publishers) == 1
assert isinstance(author.publishers[0], Publisher)
assert author.publishers[0]._get_data() is publisher_data
del author.publishers
assert len(author.publishers) == 3
assert isinstance(author.publishers[0], Publisher)
| aquariumbio/pydent | tests/test_marshaller/test_fields/test_marshalling.py | test_marshalling.py | py | 19,065 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pydent.marshaller.base.add_schema",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pydent.marshaller.base.add_schema",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pydent.marshaller.base.add_schema",
"line_number": 39,
"usage_type": ... |
10428105366 | import errno
import linecache
import mimetypes
import os
import signal
import sys
from datetime import datetime
from functools import wraps
from urllib.parse import urlparse
import pandas as pd
import requests
from bs4 import BeautifulSoup
from validator_collection import checkers
EN_KEY_WORDS = ["debt", "statistical", "statistics", "bulletin", "monthly", "report", "strategy"]
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
def is_downloadable(url):
"""
Does the url contain a downloadable resource
"""
try:
file_extensions = ["pdf", "docx", "doc", "xls", "xlsx" "csv"]
content_type_list = ["pdf", "word", "excel", "officedocument"]
response = requests.get(url, timeout=10)
if response.status_code != 200:
return None
headers = response.headers
content_type = headers.get("Content-Type")
for f in content_type:
if f in content_type_list:
return True
filename = url.split("/")[-1]
for e in file_extensions:
if e in filename:
return True
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True
except Exception:
return False
def get_weblinks(fpath=None):
"""
Gets website links and put them in a data frame
:param fpath:
:return:
"""
df = pd.read_csv(fpath)
df["Link"].fillna("No link yet", inplace=True)
country_dict = {}
for index, row in df.iterrows():
country_code = row["Country Code"]
country_name = row["DMF Country name"]
url = row["Link"]
country_dict[country_code] = {"targetUrl": url, "countryName": country_name}
return country_dict
def get_links_from_target_sites(url=None):
"""
Given the website, gets the links
:param url:
:return:
"""
# TODO: detect webpage language and return it
try:
response = requests.get(url, timeout=10)
if response.status_code != 200:
return None
html = response.text
bs = BeautifulSoup(html, "html.parser")
urls = {}
for a in bs.find_all('a', href=True):
urls[a['href']] = a.getText()
return urls
except Exception:
return None
def retrieve_filename_and_extension(response=None, url=None):
file_extensions = ["pdf", "docx", "doc", "xls", "xlsx" "csv"]
content_type_list = ["pdf", "word", "excel", "officedocument"]
try:
filename = url.split("/")[-1]
for e in file_extensions:
if e in filename:
return filename
content_type = response.headers['content-type']
for c in content_type_list:
if c in content_type.lower():
extension = mimetypes.guess_extension(content_type)
filename = url.split("/")[-1] + extension
return filename
except Exception:
return None
def download_file(url=None, outfolder=None):
"""
Download and save file if possible
:param url:
:param outfolder:
:return:
"""
try:
file = requests.get(url, allow_redirects=True, timeout=10)
if file.status_code != 200:
return None
filename = retrieve_filename_and_extension(response=file, url=url)
fpath = os.path.join(outfolder, filename)
open(fpath, 'wb').write(file.content)
return filename
except Exception:
return None
def download_content(content_url=None, output_folder=None, base_url=None, visit_log=None):
"""
Given a list of links extracted from seed list of websites
download relevant content
:param urls: a dict object with country details and urls
:param key_words: key words to determine what content to download
:return:
"""
try:
if checkers.is_url(content_url):
if is_downloadable(content_url):
return download_file(url=content_url, outfolder=output_folder)
else:
return scrape_docs_from_links(seed_url=content_url, output_dir=output_folder,
root_url=base_url, seen_links=visit_log, relevant=True)
else:
parsed_uri = urlparse(base_url)
root_url = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
revised_link = root_url[:-1] + content_url
return download_content(content_url=revised_link, output_folder=output_folder,
base_url=revised_link, visit_log=visit_log)
except Exception:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
def get_relevant_links(starter_url=None, has_key_words=True):
links = get_links_from_target_sites(url=starter_url)
if not links:
return None
relevant_urls = [starter_url]
for l, d in links.items():
for w in EN_KEY_WORDS:
if w in l.lower():
relevant_urls.append(l)
break
if d:
if w in d.lower():
relevant_urls.append(l)
break
for e in ["pdf", "docx", "doc", "xls", "xlsx" "csv"]:
if e in l:
relevant_urls.append(l)
break
return relevant_urls
def scrape_docs_from_links(seed_url=None, output_dir=None, root_url=None, seen_links=None, relevant=False):
relevant_urls = set(get_relevant_links(starter_url=seed_url, has_key_words=relevant))
if not relevant_urls:
return None
files_downloaded = []
for u in relevant_urls:
print("========================================")
print(u)
print("========================================")
try:
if u in seen_links:
seen_links[u] = seen_links[u] + 1
else:
seen_links[u] = 1
if seen_links[u] > 2:
continue
res = download_content(content_url=u, output_folder=output_dir, base_url=root_url, visit_log=seen_links)
if res:
files_downloaded.append({"downloadedFileName": res, "downloadUrl": u})
except Exception as e:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
pass
return files_downloaded
def process_single_country(url=None, downloads_dir=None, country_code=None):
"""
A helper function which calls the rest of the functions to download data
:param downloads_dir:
:return:
"""
# ========================================
# SET UP DIRECTORY TO PUT FILES
# ========================================
country_downloads_dir = os.path.join(downloads_dir, country_code)
if not os.path.exists(country_downloads_dir):
os.makedirs(country_downloads_dir)
# =========================================
# DOWNLOAD FILES WHERE AVAILABLE
# =========================================
links_visits = {url: 1}
downloaded_files = scrape_docs_from_links(seed_url=url, output_dir=country_downloads_dir,
root_url=url, seen_links=links_visits, relevant=True)
return downloaded_files
def process_all_countries(metadata_outfile=None, country_web_links=None, downloads_dir=None, country_list=None):
"""
Gets the download results and saves into CSV
:param results:
:return:
"""
urls = get_weblinks(fpath=country_web_links)
ts = datetime.now()
time_format = "%m-%d-%Y %H:%M"
ts_str = ts.strftime(time_format)
df_data = []
data_pt = {"dateProcessed": ts_str, "keyWords": EN_KEY_WORDS, "comment": None,
"downloadRelevant": None, "status": None}
for k, v in urls.items():
if k not in country_list:
continue
try:
print("Working on country : {}".format(k))
print()
res = process_single_country(url=v["targetUrl"], downloads_dir=downloads_dir, country_code=k)
data_pt["countryName"] = v["countryName"]
data_pt["countryCode"] = k
data_pt["targetUrl"] = v["targetUrl"]
if data_pt["targetUrl"] == "No link yet":
data_pt["comment"] = 'Need to find MoF weblink'
data_pt["status"] = "Not started"
df_data.append(data_pt)
if res:
for item in res:
data_pt.update(item)
data_pt["status"] = "Complete"
df_data.append(data_pt)
else:
data_pt["comment"] = 'Something went wrong'
data_pt["status"] = 'in progress'
df_data.append(data_pt)
except Exception as e:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
print("Failed to complete work for this country")
pass
df = pd.DataFrame(df_data)
cols = ["countryName", "countryCode", "targetUrl", "downloadUrl", "downloadedFileName",
"dateProcessed", "keyWords", "downloadRelevant", "comment", 'status']
df = df[cols]
df.to_csv(metadata_outfile, index=False)
def main():
# replace the paths below with your paths
base_dir = os.path.abspath("/Users/dmatekenya/Google-Drive/teachingAndLearning/SIAP-oct-2019/python-for-data-science/")
downloads_folder = os.path.join(base_dir, "fileDownloads")
weblinks_csv = os.path.join(base_dir, "data", "webLinks.csv")
meta_outfile = os.path.join(base_dir, "fileDownloads", "processingLogs.csv")
countries = ["ETH", "BEN", "GMB", "GHA"]
process_all_countries(metadata_outfile=meta_outfile, country_web_links=weblinks_csv,
downloads_dir=downloads_folder, country_list=countries)
print("DONE")
if __name__ == '__main__':
main()
| dmatekenya/UNSIAP-Python-Oct-2019 | src/python-for-data-science/case_study_web_scraping.py | case_study_web_scraping.py | py | 11,265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.strerror",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "errno.ETIME",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "signal.SIGALRM",
"line... |
1133256822 | #Libray
import csv
import math
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.model_selection import KFold
#Variables
X = []
y = []
Y_series = [] #used to save real values and respective predictions
dtr_MADscores = []
dtr_RMSEscores = []
features = ('X','Y','month','day','FFMC','DMC','DC','ISI','temp','RH','wind','rain')
month_dict = {'jan':1,'feb':2,'mar':3,'apr':4,'may':5,'jun':6,
'jul':7,'aug':8,'sep':9,'oct':10,'nov':11,'dec':12,}
day_dict = {'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6,'sun':7}
#modifiable
keep_col = None
kf_rounds = None
depth = None
#Retrieve user input
n = int(input("Enter # of desired features: "))
keep_col = list(map(int,input("Enter the feature columns (format: 0 1 2 ...): ").strip().split()))[:n]
kf_rounds = int(input("Enter # of 10 fold cross validation rounds: "))
depth = int(input("Enter depth of decision tree (3 is reccomended): "))
#Load data
print("\nLoading data...", end=' ')
with open("../resource/forestfires.csv", 'r') as csvfile:
reader = csv.reader(csvfile)
for i, row in enumerate(reader):
if i > 0: #skipping the header
temp = [row[0],row[1],month_dict[row[2]],day_dict[row[3]],row[4],row[5],
row[6],row[8],row[9],row[10],row[11]]
X.append([float(i) for i in temp])
#natural log transform function
y.append(math.log(float(row[-1])+1))
print("Done")
#Update X according to selected features
X_fs = []
for instance in X:#_std:
temp = []
for k,value in enumerate(instance):
if k in keep_col:
temp.append(value)
X_fs.append(temp)
#Train and test model with rounds of 10 fold cross validtion
print("\nPerforming 10 fold cross validation...")
for kf_round in range(kf_rounds):
print("Round " + str(kf_round+1))
#split data into test and train
kf = KFold(n_splits=10, shuffle=True)
kf.split(X_fs,y)
for train_index, test_index in kf.split(X_fs):
X_train = []
y_train = []
for index in train_index:
X_train.append(X_fs[index])
y_train.append(y[index])
X_test = []
y_test = []
for index in test_index:
X_test.append(X_fs[index])
y_test.append(y[index])
#Build models
dtr = tree.DecisionTreeRegressor(max_depth=depth)
dtr = dtr.fit(X_train,y_train)
#Test models
dtr_totalError = 0
dtr_totalSE = 0
for instance,target in zip(X_test,y_test):
#reversing natural log transformation
dtr_prediction = math.exp(dtr.predict([instance])[0])-1
target_transform = math.exp(target)-1
#populating series for later plotting
Y_series.append([target_transform,dtr_prediction])
#totaling errors
dtr_error = target_transform - dtr_prediction
dtr_totalError += abs(dtr_error)
dtr_totalSE += dtr_error*dtr_error
#saving MAD and RMSE scores
dtr_MADscores.append(dtr_totalError/len(X_test))
dtr_RMSEscores.append(math.sqrt(dtr_totalSE/len(X_test)))
print("Done")
#Calculte average scores
dtr_avgMAD = sum(dtr_MADscores)/len(dtr_MADscores)
dtr_avgRMSE = sum(dtr_RMSEscores)/len(dtr_RMSEscores)
#Print average peformace of models
print("\nDT w/ depth " + str(depth) +":")
print("average MAD = " + str(dtr_avgMAD))
print("average RMSE = " + str(dtr_avgRMSE))
#Sort real values and predictions by increasing order of actual values
Y_series.sort(key=lambda x:x[0])
truth_series = [i for i,j in Y_series] #actual values
dtr_series = [j for i,j in Y_series] #dtr predictions
#Plot real values and dtr predictions
plt.figure(figsize=(12,12))
plt.title(label="DT")
plt.xlabel('Ordered test set')
plt.ylabel('Burned area (in hectares)')
plt.ylim([0,20])
plt.plot(truth_series, 'bo', label='real values')
plt.plot(dtr_series, 'r+', label='predictions')
plt.legend()
plt.show()
| hyin8/CS4210_project | src/forest_fires_dt.py | forest_fires_dt.py | py | 3,966 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.Decisi... |
6386175748 | from statistics import median_low as mdl, median_high as mdh, median as md
from copy import deepcopy as dc
num = int(input())
floors = list(map(int, input().split()))
newfloor = dc(floors)
newfloor.sort()
if len(floors)%2==0:
medl = mdl(newfloor)
medh = mdh(newfloor)
midmed = md(newfloor)
ind = min([floors.index(medl), floors.index(medh)])
if floors.index(medl) == floors.index(medh):
print(floors.index(midmed)+1)
else:
print(ind+1)
else:
midmed = md(newfloor)
print(floors.index(midmed)+1)
| oneku16/CompetitiveProgramming | ICPC/ICPC2021/warmup/k.py | k.py | py | 544 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "copy.deepcopy",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "statistics.median_low",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "statistics.median_high",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "statistics.m... |
74732864354 | import tkinter as tk
from tkinter import ttk
import datetime as dt
import mysql.connector
from tkinter import messagebox
import configparser
class NuovoLottoCucina(tk.Toplevel):
def __init__(self):
super(NuovoLottoCucina, self).__init__()
self.title("Nuovo Lotto Cucina")
self.geometry("+0+0")
self.data = dt.date.today()
self.config = self.leggi_file_ini()
self.conn = mysql.connector.connect(host=self.config['DataBase']['host'],
database=self.config['DataBase']['db'],
user=self.config['DataBase']['user'],
password='')
self.c = self.conn.cursor()
self.lista_da_salvare = []
self.lista_nuova_produzione = []
self.nuova_produzione = tk.StringVar()
self.prog_lotto_ven = self.data.strftime('%d%m%y')
self.img_btn1 = tk.PhotoImage(file=".//immagini//logo_piccolo.gif")
self.value = tk.StringVar()
self.value_peso = tk.StringVar()
# DISPOSIZIONE FRAME
self.frame_alto = tk.Frame(self, bd='3', relief='groove')
self.frame_centro = tk.Frame(self, height=450, width=self.winfo_screenwidth(),
bd='3', relief='groove')
self.frame_basso = tk.Frame(self, bd='3', background='white', relief='groove')
# LABEL nuovo lotto vendita
self.lbl_nuovo_lotto = tk.Label(self.frame_alto, text='NUOVO LOTTO VENDITA', font=('Helvetica', 20),
foreground='blue', relief='ridge', padx=20)
self.lbl_prog_lotto_vendita = tk.Label(self.frame_alto, text=str(self.prog_lotto_ven),
font=('Helvetica', 20), bg='white', relief='sunken', padx=20)
# LABEL che mostra il numero della settimana
self.lbl_settimana = tk.Label(self.frame_alto, text='SETTIMANA NUMERO ',
foreground='blue', font=('Verdana', 20), relief='ridge', padx=20)
self.lbl_nr_settimana = tk.Label(self.frame_alto, text=str(1 + int(self.data.strftime('%W'))),
font=('Verdana', 20), bg='white', relief='sunken', padx=20)
# TREEVIEW per riepilogo immissioni
self.tree = ttk.Treeview(self.frame_centro, height=15)
self.tree['columns'] = ('prog_v', 'prodotto', 'peso', 'settimana')
self.tree['displaycolumns'] = ('prodotto', 'peso')
self.tree['show'] = 'headings'
self.tree.column("prodotto", width=180)
self.tree.column("peso", width=80)
self.tree.heading("prodotto", text="prodotto")
self.tree.heading("peso", text="peso")
self.tree.tag_configure('odd', background='light green')
# NOTEBOOK e posizione
self.notebook = ttk.Notebook(self.frame_centro)
# TAB 1 per PRIMI PIATTI
self.tab1 = ttk.Frame(self.notebook)
self.notebook.add(self.tab1, text='Primi', compound='left', image=self.img_btn1)
self.lista_primi = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'Primi piatti'")
for row in self.c:
self.lista_primi.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_primi)):
if row % 10 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab1,
text=str(self.lista_primi[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_primi[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 2 per PRIMI pesce
self.tab2 = ttk.Frame(self.notebook)
self.notebook.add(self.tab2, text='Primi Pesce', compound='left', image=self.img_btn1)
self.lista_primi_pesce = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'Primi piatti di pesce'")
for row in self.c:
self.lista_primi_pesce.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_primi_pesce)):
if row % 10 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab2,
text=str(self.lista_primi_pesce[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_primi_pesce[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 3 per SECONDI
self.tab3 = ttk.Frame(self.notebook)
self.notebook.add(self.tab3, text='Secondi', compound='left', image=self.img_btn1)
self.lista_secondi = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'secondi piatti'")
for row in self.c:
self.lista_secondi.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_secondi)):
if row % 13 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab3,
text=str(self.lista_secondi[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_secondi[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 4 per SECONDI pesce
self.tab4 = ttk.Frame(self.notebook)
self.notebook.add(self.tab4, text='Secondi Pesce', compound='left', image=self.img_btn1)
self.lista_secondi_pesce = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = '5'")
for row in self.c:
self.lista_secondi_pesce.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_secondi_pesce)):
if row % 13 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab4,
text=str(self.lista_secondi_pesce[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_secondi_pesce[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 5 per CONTORNI
self.tab5 = ttk.Frame(self.notebook)
self.notebook.add(self.tab5, text='Contorni', compound='left', image=self.img_btn1)
self.lista_contorni = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'contorni'")
for row in self.c:
self.lista_contorni.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_contorni)):
if row % 10 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab5,
text=str(self.lista_contorni[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_contorni[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 6 per DOLCI
self.tab6 = ttk.Frame(self.notebook)
self.notebook.add(self.tab6, text='Dolci', compound='left', image=self.img_btn1)
self.lista_dolci = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'dolci'")
for row in self.c:
self.lista_dolci.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_dolci)):
if row % 10 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab6,
text=str(self.lista_dolci[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_dolci[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# TAB 7 per PIATTI freddi
self.tab7 = ttk.Frame(self.notebook)
self.notebook.add(self.tab7, text='Piatti freddi', compound='left', image=self.img_btn1)
self.lista_piatti_freddi = []
self.c.execute("SELECT prodotto FROM prodotti WHERE merceologia = 'piatti freddi'")
for row in self.c:
self.lista_piatti_freddi.extend(row)
row, col = 1, 0
for i in range(0, len(self.lista_piatti_freddi)):
if row % 10 == 0:
col += 1
row = 1
tk.Radiobutton(self.tab7,
text=str(self.lista_piatti_freddi[i]),
variable=self.value,
width=25,
indicatoron=0,
value=self.lista_piatti_freddi[i],
font='Verdana').grid(row=row, column=col, sticky='w')
row += 1
# LABELFRAME per peso da inserire e bottoni
self.lblframe_peso = ttk.LabelFrame(self.frame_basso, text='Peso')
# ENTRY per inserimento del peso
self.entry_peso = ttk.Entry(self.lblframe_peso,
font=('Helvetica', 20),
textvariable=self.value_peso)
self.entry_peso.focus()
# BOTTONE ESCI E SALVA
self.btn_invia = tk.Button(self.frame_basso,
text="Invio",
font=('Helvetica', 20),
command=self.invia)
self.btn_esci = tk.Button(self.frame_basso,
text="Chiudi finestra",
font=('Helvetica', 20),
command=self.esci_senza_salvare)
self.btn_esci_salva = tk.Button(self.frame_basso,
text="Esci e salva",
font=('Helvetica', 20),
command=self.esci_salva)
self.btn_elimina_riga = tk.Button(self.frame_centro, text='Elimina riga', command=self.rimuovi_riga_selezionata)
# LAYOUT
self.frame_alto.grid(row=0, column=0, padx=10)
self.frame_centro.grid(row=1, column=0)
self.frame_basso.grid(row=2, column=0, columnspan=3)
self.notebook.grid(row=0, column=0, rowspan=3)
self.tree.grid(row=0, column=1)
self.btn_elimina_riga.grid(row=1, column=1, sticky='we')
self.lbl_settimana.grid(row=0, column=1)
self.lbl_nr_settimana.grid(row=0, column=2)
self.lbl_nuovo_lotto.grid(row=0, column=3)
self.lbl_prog_lotto_vendita.grid(row=0, column=4)
self.lblframe_peso.grid(row=0, column=0)
self.entry_peso.grid()
self.btn_invia.grid(row=0, column=1, padx=10, pady=10)
self.btn_esci_salva.grid(row=0, column=2, padx=10, pady=10)
self.btn_esci.grid(row=0, column=3, padx=10, pady=10)
@staticmethod
def leggi_file_ini():
ini = configparser.ConfigParser()
ini.read('config.ini')
return ini
def rimuovi_riga_selezionata(self):
curitem = self.tree.selection()[0]
self.tree.delete(curitem)
def invia(self):
self.tree.insert('', 'end', values=('L' + (str(self.prog_lotto_ven)),
self.value.get(),
self.value_peso.get(),
self.data,
(1 + int(self.data.strftime('%W')))))
self.entry_peso.delete(0, tk.END)
def esci_senza_salvare(self):
if bool(self.tree.get_children()):
messagebox.showinfo('Attenzione', 'Ci sono dati inseriti non salvati')
else:
self.destroy()
def esci_salva(self):
for child in self.tree.get_children():
self.lista_da_salvare.append(self.tree.item(child)['values'])
print(self.lista_da_salvare)
self.c.executemany('INSERT INTO lotti_vendita_cucina(progressivo_ven_c, prodotto, quantita,'
'data_prod, settimana) VALUES (%s,%s,%s,%s,%s)', self.lista_da_salvare)
self.conn.commit()
self.conn.close()
self.destroy()
if __name__ == '__main__':
root = tk.Tk()
new = NuovoLottoCucina()
root.mainloop()
| AleLuzzi/GestioneLaboratorio | Laboratorio/nuovo_lotto_cucina.py | nuovo_lotto_cucina.py | py | 12,794 | python | it | code | 1 | github-code | 1 | [
{
"api_name": "tkinter.Toplevel",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysql.con... |
75201405152 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Created on 2017/12/1
__author__ = "Sky Jin "
"""
Description:
练习 1 : 去掉页面动态窗: 这里举一个新世界教育官网首页的例子
由于alert弹窗不美观,现在大多数网站都会使用自定义弹窗,
使用Selenium自带的方法就驾驭不了了,此时就要搬出JS大法
driver.execute_script(js_monitor
"""
from selenium import webdriver
from time import sleep
options = webdriver.ChromeOptions()
options.add_argument('disable-infobars')
driver = webdriver.Chrome(chrome_options=options)
driver.maximize_window()
driver.get("http://sh.xsjedu.org/")
driver.implicitly_wait(20)
# js = driver.execute_script('document.getElementById("doyoo_monitor").style.display')
# print(js)
# 关闭悬浮框的显示。
js_monitor = 'document.getElementById("doyoo_monitor").style.display = "none"'
driver.execute_script(js_monitor)
sleep(2)
driver.quit()
| skyaiolos/SeleniumWithPython | demomore/training_JS_find/training1_dis_alertdlg.py | training1_dis_alertdlg.py | py | 977 | python | zh | code | 1 | github-code | 1 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 20,
"usage_type": "call"
},
{
"api... |
18276850000 | from datetime import datetime
import schedule
from telebot import types
from rf_bank_api import get_banks_currency
from keyboards import kb_banks, kb_currencies, kb_configure
from database_work import add_banks, add_currencies, get_currencies, get_banks, form_gist
from data import banks, currencies, rus_to_en_banks, rus_to_en_currencies
from obj_store_work import get_from_os
import telebot
def schedule_statistic():
schedule.every().monday.at("07:00").do(form_gist, 'statistics')
while True:
schedule.run_pending()
def telegram_bot(token):
bot = telebot.TeleBot(token)
@bot.message_handler(commands=["start"])
def start_message(message):
bot.send_message(message.chat.id, "Чтобы узнать о возможностях бота, введите команду /help")
@bot.message_handler(commands=["help"])
def start_message(message):
bot.send_message(message.chat.id, "/help - помощь \n/configure - выбор валют и банков \n/notifyOn - включить ежедневные уведомления\n"
"/get - получить сводку по выбранным валютам и банкам \n/getstat - получение статистики ")
@bot.message_handler(commands=["get"])
def start_message(message):
send_notify(message)
@bot.message_handler(commands=['configure'])
def start_message(message: types.Message):
response = bot.send_message(message.chat.id, text="Выберите из списка:", reply_markup=kb_configure)
d = {'c': [], 'l': []}
bot.register_next_step_handler(response, choose_option, d)
@bot.message_handler(commands=['getstat'])
def start_message(message: types.Message):
response = get_from_os()
bot.send_photo(message.chat.id, response['Body'].read())
def choose_option(message, d):
if message.text == 'Выбрать банки':
response = bot.send_message(message.chat.id, text="Выберите банки:", reply_markup=kb_banks)
bot.register_next_step_handler(response, choose_banks, d)
if message.text == 'Выбрать валюты':
response = bot.send_message(message.chat.id, text="Выберите валюты:", reply_markup=kb_currencies)
bot.register_next_step_handler(response, choose_currency, d)
if message.text == 'Закрыть':
bot.send_message(message.chat.id, text=f"Изменения сохранены.\nТекущие настройки:\n Банки:{d.get('l')}\n"
f"Валюты: {d.get('c')}", reply_markup=types.ReplyKeyboardRemove())
def choose_banks(message, d):
if message.text != "Закрыть":
for bank in banks:
if message.text == banks.get(bank).get("rus_spell"):
append_banks(d.get('l'), message)
break
message = bot.send_message(message.chat.id, text="Выберите банки:", reply_markup=kb_banks)
bot.register_next_step_handler(message, choose_banks, d)
else:
add_banks(message.chat.id, d.get('l'))
message = bot.send_message(message.chat.id, text=f"Выбранные банки: {d.get('l')}. Сохранено!",
reply_markup=kb_configure)
bot.register_next_step_handler(message, choose_option, d)
def append_banks(l, message):
if not l.__contains__(message.text):
l.append(message.text)
return bot.send_message(message.chat.id, text=f"Выбранные банки: {l}. Выберите банки:",
reply_markup=kb_banks)
def append_currencies(c, message):
if not c.__contains__(message.text):
c.append(message.text)
return bot.send_message(message.chat.id, text=f"Выбранные валюты: {c}. Выберите валюты:",
reply_markup=kb_currencies)
def choose_currency(message, d):
if message.text != "Закрыть":
for cur in currencies:
print()
if (message.text + " (EUR)" == currencies.get(cur).get("rus_spell")) or \
(message.text + " (USD)" == currencies.get(cur).get("rus_spell")):
append_currencies(d.get('c'), message)
break
message = bot.send_message(message.chat.id, text="Выберите валюты:", reply_markup=kb_currencies)
bot.register_next_step_handler(message, choose_currency, d)
else:
add_currencies(message.chat.id, d.get('c'))
bot.send_message(message.chat.id, text=f"Выбранные валюты: {d.get('c')}. Сохранено!",
reply_markup=kb_configure)
bot.register_next_step_handler(message, choose_option, d)
@bot.message_handler(commands=['show'])
def start_message(message: types.Message):
user_id = message.chat.id
bot.send_message(message.chat.id, text=f"Текущие настройки:\n Банки:{get_banks(user_id)}\n"
f"Валюты: {get_currencies(user_id)}")
@bot.message_handler(commands=['notifyOn'])
def notify_on(message):
configure_notification(message)
def configure_notification(message):
schedule.every().day.at("07:00").do(send_notify, message)
bot.send_message(message.chat.id, f"Уведомления включены.", parse_mode='html')
while True:
schedule.run_pending()
def send_notify(message):
bot.send_message(message.chat.id, text=str(datetime.now()))
user_id = message.chat.id
for bank in get_banks(user_id)[0]:
for curr in get_currencies(user_id)[0]:
bot.send_message(message.chat.id,
text=get_banks_currency(rus_to_en_banks.get(bank), rus_to_en_currencies.get(curr)))
bot.polling()
| SuDarina/currency_bot | controller.py | controller.py | py | 6,195 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "database_work.form_gist",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "schedule.every",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "schedule.run_pending",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "teleb... |
5603964278 | #!/usr/bin/env python3
from itertools import product, permutations, combinations, combinations_with_replacement
import heapq
from collections import deque, defaultdict, Counter
from bisect import bisect
import sys
def input(): return sys.stdin.readline().rstrip()
def list_int(): return list(map(int, input().split()))
def is_ok(mid):
if mid > 0:
return True
else:
return False
def binary_search(ok, ng):
# 複雑な二部探索用
while ng - ok > 1:
mid = (ok + ng) // 2
if is_ok(mid):
ok = mid
else:
ng = mid
return ok
INF = float('inf')
# mod = 1000000007
# mod = 998244353
DIR = [(0, 1), (0, -1), (1, 0), (-1, 0)]
sys.setrecursionlimit(10**9)
# ----------------------- #
N = int(input())
points = [[]]
Array = defaultdict(int)
for i in range(N):
x, y = map(int, input().split())
points.append([x, y])
Array[(x, y)] += 1
ans = 0
for x in range(1, N+1):
for y in range(x+1, N+1):
x1, y1 = points[x]
x2, y2 = points[y]
if x1 == x2 or y1 == y2:
continue
if Array[(x2, y1)] and Array[(x1, y2)]:
ans += 1
print(ans // 2)
| yuu246/Atcoder_ABC | practice/recommendation/ABC218_D.py | ABC218_D.py | py | 1,194 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.readline",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.setrecursionlimit",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "collections.d... |
39184132558 | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from users.models import User
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Category(BaseModel):
name = models.CharField(max_length=64, unique=True)
description = models.TextField(null=True, blank=True)
class Meta:
verbose_name_plural = 'Категории Игр'
verbose_name = 'Категория Игры'
def __str__(self):
return self.name
class Game(BaseModel):
user = models.ForeignKey(User, verbose_name='пользователь', related_name='user_games', on_delete=models.CASCADE)
title = models.CharField(max_length=80, verbose_name='название игры')
slug = models.SlugField(max_length=80, verbose_name='слаг', unique_for_date='created_at')
description = models.TextField(verbose_name='описание')
image = models.ImageField(null=True, blank=True, upload_to='games_images', verbose_name='скрин из игры')
price = models.IntegerField(null=True, blank=True, verbose_name='цена',
validators=[MinValueValidator(1), MaxValueValidator(5000)])
category = models.ManyToManyField(Category)
class Meta:
verbose_name_plural = 'Игры'
verbose_name = 'Игра'
def __str__(self):
return self.title
class Favourite(models.Model):
user = models.ForeignKey(User, verbose_name='пользователь', related_name='user_favourite', on_delete=models.CASCADE)
game = models.ForeignKey(Game, verbose_name='игра', related_name='game_favourite', on_delete=models.CASCADE)
class Meta:
verbose_name_plural = 'Список желаемого'
verbose_name = 'Список желаемого'
class Comment(BaseModel):
game = models.ForeignKey(Game, verbose_name='игра', related_name='game_comments', on_delete=models.CASCADE)
user = models.ForeignKey(User, verbose_name='пользователь', related_name='user_comments', on_delete=models.CASCADE)
text = models.TextField(verbose_name='текст комментарий')
class Meta:
verbose_name_plural = 'Комментарии'
verbose_name = 'Комментарий'
def __str__(self):
return self.text
class UserInfo(BaseModel):
user = models.ForeignKey(User, related_name='user_info', verbose_name='пользователь', on_delete=models.CASCADE)
name = models.CharField(null=True, blank=True, max_length=40, verbose_name='имя')
about = models.TextField(null=True, blank=True, max_length=200, verbose_name='информация о себе')
class Meta:
verbose_name_plural = 'Информация о пользователях'
verbose_name = 'Информация о пользователе'
def __str__(self):
return self.name
class Basket(models.Model):
user = models.ForeignKey(User, verbose_name='пользователь', related_name='user_basket', on_delete=models.CASCADE)
game = models.ForeignKey(Game, verbose_name='игра_в_корзине', related_name='game_basket', on_delete=models.CASCADE)
created_timestamp = models.DateTimeField(auto_now_add=True)
modificated_timestamp = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = 'Корзины пользователей'
verbose_name = 'Корзина пользователя'
def __str__(self):
return f'Корзина для {self.user.username} | Продукт {self.game.title}'
def sum(self):
return self.game.price
| MrBerserk/semestrovka | web/models.py | models.py | py | 3,801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name... |
27439167711 | from __future__ import absolute_import
from threading import Thread, Lock, Event, Condition
import ctypes
import os
import sys
import traceback
import shutil
from time import time
import hashlib
from tempfile import mkstemp
from functools import wraps, partial
from six.moves import xrange
from past.builtins import execfile
import _ctypes
from runtime.typemapping import TypeTranslator
from runtime.loglevels import LogLevelsDefault, LogLevelsCount
from runtime.Stunnel import getPSKID
from runtime import PlcStatus
from runtime import MainWorker
from runtime import default_evaluator
if os.name in ("nt", "ce"):
dlopen = _ctypes.LoadLibrary
dlclose = _ctypes.FreeLibrary
elif os.name == "posix":
dlopen = _ctypes.dlopen
dlclose = _ctypes.dlclose
def get_last_traceback(tb):
while tb.tb_next:
tb = tb.tb_next
return tb
lib_ext = {
"linux2": ".so",
"win32": ".dll",
}.get(sys.platform, "")
def PLCprint(message):
sys.stdout.write("PLCobject : "+message+"\n")
sys.stdout.flush()
def RunInMain(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
return MainWorker.call(func, *args, **kwargs)
return func_wrapper
class PLCObject(object):
def __init__(self, WorkingDir, argv, statuschange, evaluator, pyruntimevars):
self.workingdir = WorkingDir # must exits already
self.tmpdir = os.path.join(WorkingDir, 'tmp')
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
os.mkdir(self.tmpdir)
# FIXME : is argv of any use nowadays ?
self.argv = [WorkingDir] + argv # force argv[0] to be "path" to exec...
self.statuschange = statuschange
self.evaluator = evaluator
self.pyruntimevars = pyruntimevars
self.PLCStatus = PlcStatus.Empty
self.PLClibraryHandle = None
self.PLClibraryLock = Lock()
# Creates fake C funcs proxies
self._InitPLCStubCalls()
self._loading_error = None
self.python_runtime_vars = None
self.TraceThread = None
self.TraceLock = Lock()
self.Traces = []
self.DebugToken = 0
self._init_blobs()
# First task of worker -> no @RunInMain
def AutoLoad(self, autostart):
# Get the last transfered PLC
try:
self.CurrentPLCFilename = open(
self._GetMD5FileName(),
"r").read().strip() + lib_ext
self.PLCStatus = PlcStatus.Stopped
if autostart:
if self.LoadPLC():
self.StartPLC()
else:
self._fail(_("Problem autostarting PLC : can't load PLC"))
return
except Exception:
self.PLCStatus = PlcStatus.Empty
self.CurrentPLCFilename = None
self.StatusChange()
def StatusChange(self):
if self.statuschange is not None:
for callee in self.statuschange:
callee(self.PLCStatus)
def LogMessage(self, *args):
if len(args) == 2:
level, msg = args
else:
level = LogLevelsDefault
msg, = args
PLCprint(msg)
if self._LogMessage is not None:
return self._LogMessage(level, msg, len(msg))
return None
@RunInMain
def ResetLogCount(self):
if self._ResetLogCount is not None:
self._ResetLogCount()
# used internaly
def GetLogCount(self, level):
if self._GetLogCount is not None:
return int(self._GetLogCount(level))
elif self._loading_error is not None and level == 0:
return 1
@RunInMain
def GetLogMessage(self, level, msgid):
tick = ctypes.c_uint32()
tv_sec = ctypes.c_uint32()
tv_nsec = ctypes.c_uint32()
if self._GetLogMessage is not None:
maxsz = len(self._log_read_buffer)-1
sz = self._GetLogMessage(level, msgid,
self._log_read_buffer, maxsz,
ctypes.byref(tick),
ctypes.byref(tv_sec),
ctypes.byref(tv_nsec))
if sz and sz <= maxsz:
self._log_read_buffer[sz] = '\x00'
return self._log_read_buffer.value, tick.value, tv_sec.value, tv_nsec.value
elif self._loading_error is not None and level == 0:
return self._loading_error, 0, 0, 0
return None
def _GetMD5FileName(self):
return os.path.join(self.workingdir, "lasttransferedPLC.md5")
def _GetLibFileName(self):
return os.path.join(self.workingdir, self.CurrentPLCFilename)
def _LoadPLC(self):
"""
Load PLC library
Declare all functions, arguments and return values
"""
md5 = open(self._GetMD5FileName(), "r").read()
self.PLClibraryLock.acquire()
try:
self._PLClibraryHandle = dlopen(self._GetLibFileName())
self.PLClibraryHandle = ctypes.CDLL(self.CurrentPLCFilename, handle=self._PLClibraryHandle)
self.PLC_ID = ctypes.c_char_p.in_dll(self.PLClibraryHandle, "PLC_ID")
if len(md5) == 32:
self.PLC_ID.value = md5
self._startPLC = self.PLClibraryHandle.startPLC
self._startPLC.restype = ctypes.c_int
self._startPLC.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p)]
self._stopPLC_real = self.PLClibraryHandle.stopPLC
self._stopPLC_real.restype = None
self._PythonIterator = getattr(self.PLClibraryHandle, "PythonIterator", None)
if self._PythonIterator is not None:
self._PythonIterator.restype = ctypes.c_char_p
self._PythonIterator.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_void_p)]
self._stopPLC = self._stopPLC_real
else:
# If python confnode is not enabled, we reuse _PythonIterator
# as a call that block pythonthread until StopPLC
self.PlcStopping = Event()
def PythonIterator(res, blkid):
self.PlcStopping.clear()
self.PlcStopping.wait()
return None
self._PythonIterator = PythonIterator
def __StopPLC():
self._stopPLC_real()
self.PlcStopping.set()
self._stopPLC = __StopPLC
self._ResetDebugVariables = self.PLClibraryHandle.ResetDebugVariables
self._ResetDebugVariables.restype = None
self._RegisterDebugVariable = self.PLClibraryHandle.RegisterDebugVariable
self._RegisterDebugVariable.restype = ctypes.c_int
self._RegisterDebugVariable.argtypes = [ctypes.c_int, ctypes.c_void_p]
self._FreeDebugData = self.PLClibraryHandle.FreeDebugData
self._FreeDebugData.restype = None
self._GetDebugData = self.PLClibraryHandle.GetDebugData
self._GetDebugData.restype = ctypes.c_int
self._GetDebugData.argtypes = [ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_void_p)]
self._suspendDebug = self.PLClibraryHandle.suspendDebug
self._suspendDebug.restype = ctypes.c_int
self._suspendDebug.argtypes = [ctypes.c_int]
self._resumeDebug = self.PLClibraryHandle.resumeDebug
self._resumeDebug.restype = None
self._ResetLogCount = self.PLClibraryHandle.ResetLogCount
self._ResetLogCount.restype = None
self._GetLogCount = self.PLClibraryHandle.GetLogCount
self._GetLogCount.restype = ctypes.c_uint32
self._GetLogCount.argtypes = [ctypes.c_uint8]
self._LogMessage = self.PLClibraryHandle.LogMessage
self._LogMessage.restype = ctypes.c_int
self._LogMessage.argtypes = [ctypes.c_uint8, ctypes.c_char_p, ctypes.c_uint32]
self._log_read_buffer = ctypes.create_string_buffer(1 << 14) # 16K
self._GetLogMessage = self.PLClibraryHandle.GetLogMessage
self._GetLogMessage.restype = ctypes.c_uint32
self._GetLogMessage.argtypes = [ctypes.c_uint8, ctypes.c_uint32, ctypes.c_char_p, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_uint32)]
self._loading_error = None
except Exception:
self._loading_error = traceback.format_exc()
PLCprint(self._loading_error)
return False
finally:
self.PLClibraryLock.release()
return True
@RunInMain
def LoadPLC(self):
res = self._LoadPLC()
if res:
try:
self.PythonRuntimeInit()
except Exception:
self._loading_error = traceback.format_exc()
PLCprint(self._loading_error)
return False
else:
self._FreePLC()
return res
@RunInMain
def UnLoadPLC(self):
self.PythonRuntimeCleanup()
self._FreePLC()
def _InitPLCStubCalls(self):
"""
create dummy C func proxies
"""
self._startPLC = lambda x, y: None
self._stopPLC = lambda: None
self._ResetDebugVariables = lambda: None
self._RegisterDebugVariable = lambda x, y: 0
self._IterDebugData = lambda x, y: None
self._FreeDebugData = lambda: None
self._GetDebugData = lambda: -1
self._suspendDebug = lambda x: -1
self._resumeDebug = lambda: None
self._PythonIterator = lambda: ""
self._GetLogCount = None
self._LogMessage = None
self._GetLogMessage = None
self._PLClibraryHandle = None
self.PLClibraryHandle = None
def _FreePLC(self):
"""
Unload PLC library.
This is also called by __init__ to create dummy C func proxies
"""
self.PLClibraryLock.acquire()
try:
# Unload library explicitely
if getattr(self, "_PLClibraryHandle", None) is not None:
dlclose(self._PLClibraryHandle)
# Forget all refs to library
self._InitPLCStubCalls()
finally:
self.PLClibraryLock.release()
return False
def PythonRuntimeCall(self, methodname, use_evaluator=True, reverse_order=False):
"""
Calls init, start, stop or cleanup method provided by
runtime python files, loaded when new PLC uploaded
"""
methods = self.python_runtime_vars.get("_runtime_%s" % methodname, [])
if reverse_order:
methods = reversed(methods)
for method in methods:
if use_evaluator:
_res, exp = self.evaluator(method)
else:
_res, exp = default_evaluator(method)
if exp is not None:
self.LogMessage(0, '\n'.join(traceback.format_exception(*exp)))
# used internaly
def PythonRuntimeInit(self):
MethodNames = ["init", "start", "stop", "cleanup"]
self.python_runtime_vars = globals().copy()
self.python_runtime_vars.update(self.pyruntimevars)
parent = self
class PLCSafeGlobals(object):
def __getattr__(self, name):
try:
t = parent.python_runtime_vars["_"+name+"_ctype"]
except KeyError:
raise KeyError("Try to get unknown shared global variable : %s" % name)
v = t()
parent.python_runtime_vars["_PySafeGetPLCGlob_"+name](ctypes.byref(v))
return parent.python_runtime_vars["_"+name+"_unpack"](v)
def __setattr__(self, name, value):
try:
t = parent.python_runtime_vars["_"+name+"_ctype"]
except KeyError:
raise KeyError("Try to set unknown shared global variable : %s" % name)
v = parent.python_runtime_vars["_"+name+"_pack"](t, value)
parent.python_runtime_vars["_PySafeSetPLCGlob_"+name](ctypes.byref(v))
class OnChangeStateClass(object):
def __getattr__(self, name):
u = parent.python_runtime_vars["_"+name+"_unpack"]
return type("changedesc",(),dict(
count = parent.python_runtime_vars["_PyOnChangeCount_"+name].value,
first = u(parent.python_runtime_vars["_PyOnChangeFirst_"+name]),
last = u(parent.python_runtime_vars["_PyOnChangeLast_"+name])))
self.python_runtime_vars.update({
"PLCGlobals": PLCSafeGlobals(),
"OnChange": OnChangeStateClass(),
"WorkingDir": self.workingdir,
"PLCObject": self,
"PLCBinary": self.PLClibraryHandle,
"PLCGlobalsDesc": []})
for methodname in MethodNames:
self.python_runtime_vars["_runtime_%s" % methodname] = []
try:
filenames = os.listdir(self.workingdir)
filenames.sort()
for filename in filenames:
name, ext = os.path.splitext(filename)
if name.upper().startswith("RUNTIME") and ext.upper() == ".PY":
execfile(os.path.join(self.workingdir, filename), self.python_runtime_vars)
for methodname in MethodNames:
method = self.python_runtime_vars.get("_%s_%s" % (name, methodname), None)
if method is not None:
self.python_runtime_vars["_runtime_%s" % methodname].append(method)
except Exception:
self.LogMessage(0, traceback.format_exc())
raise
self.PythonRuntimeCall("init", use_evaluator=False)
self.PythonThreadCondLock = Lock()
self.PythonThreadCmdCond = Condition(self.PythonThreadCondLock)
self.PythonThreadAckCond = Condition(self.PythonThreadCondLock)
self.PythonThreadCmd = None
self.PythonThreadAck = None
self.PythonThread = Thread(target=self.PythonThreadProc, name="PLCPythonThread")
self.PythonThread.start()
# used internaly
def PythonRuntimeCleanup(self):
if self.python_runtime_vars is not None:
self.PythonThreadCommand("Finish")
self.PythonThread.join()
self.PythonRuntimeCall("cleanup", use_evaluator=False, reverse_order=True)
self.python_runtime_vars = None
def PythonThreadLoop(self):
res, cmd, blkid = "None", "None", ctypes.c_void_p()
compile_cache = {}
while True:
cmd = self._PythonIterator(res, blkid)
FBID = blkid.value
if cmd is None:
break
try:
self.python_runtime_vars["FBID"] = FBID
ccmd, AST = compile_cache.get(FBID, (None, None))
if ccmd is None or ccmd != cmd:
AST = compile(cmd, '<plc>', 'eval')
compile_cache[FBID] = (cmd, AST)
result, exp = self.evaluator(eval, AST, self.python_runtime_vars)
if exp is not None:
res = "#EXCEPTION : "+str(exp[1])
self.LogMessage(1, ('PyEval@0x%x(Code="%s") Exception "%s"') % (
FBID, cmd, '\n'.join(traceback.format_exception(*exp))))
else:
res = str(result)
self.python_runtime_vars["FBID"] = None
except Exception as e:
res = "#EXCEPTION : "+str(e)
self.LogMessage(1, ('PyEval@0x%x(Code="%s") Exception "%s"') % (FBID, cmd, str(e)))
def PythonThreadProc(self):
while True:
self.PythonThreadCondLock.acquire()
cmd = self.PythonThreadCmd
while cmd is None:
self.PythonThreadCmdCond.wait()
cmd = self.PythonThreadCmd
self.PythonThreadCmd = None
self.PythonThreadCondLock.release()
if cmd == "PreStart":
self.PreStartPLC()
# Ack once PreStart done, must be finished before StartPLC
self.PythonThreadAcknowledge(cmd)
elif cmd == "Start":
# Ack Immediately, for responsiveness
self.PythonThreadAcknowledge(cmd)
self.PythonRuntimeCall("start")
self.LogMessage("Python extensions started")
self.PostStartPLC()
self.PythonThreadLoop()
self.PythonRuntimeCall("stop", reverse_order=True)
elif cmd == "Finish":
self.PythonThreadAcknowledge(cmd)
break
def PythonThreadAcknowledge(self, ack):
self.PythonThreadCondLock.acquire()
self.PythonThreadAck = ack
self.PythonThreadAckCond.notify()
self.PythonThreadCondLock.release()
def PythonThreadCommand(self, cmd):
self.PythonThreadCondLock.acquire()
self.PythonThreadCmd = cmd
self.PythonThreadCmdCond.notify()
ack = None
while ack != cmd:
self.PythonThreadAckCond.wait()
ack = self.PythonThreadAck
self.PythonThreadAck = None
self.PythonThreadCondLock.release()
def _fail(self, msg):
self.LogMessage(0, msg)
self.PLCStatus = PlcStatus.Broken
self.StatusChange()
def PreStartPLC(self):
"""
Here goes actions to be taken just before PLC starts,
with all libraries and python object already created.
For example : restore saved proprietary parameters
"""
pass
def PostStartPLC(self):
"""
Here goes actions to be taken after PLC is started,
with all libraries and python object already created,
and python extensions "Start" methods being called.
This is called before python thread processing py_eval blocks starts.
For example : attach additional ressource to web services
"""
pass
@RunInMain
def StartPLC(self):
if self.PLClibraryHandle is None:
if not self.LoadPLC():
self._fail(_("Problem starting PLC : can't load PLC"))
if self.CurrentPLCFilename is not None and self.PLCStatus == PlcStatus.Stopped:
self.PythonThreadCommand("PreStart")
c_argv = ctypes.c_char_p * len(self.argv)
res = self._startPLC(len(self.argv), c_argv(*self.argv))
if res == 0:
self.LogMessage("PLC started")
self.PLCStatus = PlcStatus.Started
self.StatusChange()
self.PythonThreadCommand("Start")
else:
self._fail(_("Problem starting PLC : error %d" % res))
@RunInMain
def StopPLC(self):
if self.PLCStatus == PlcStatus.Started:
self.LogMessage("PLC stopped")
self._stopPLC()
self.PLCStatus = PlcStatus.Stopped
self.StatusChange()
if self.TraceThread is not None:
self.TraceThread.join()
self.TraceThread = None
return True
return False
def GetPLCstatus(self):
try:
return self._GetPLCstatus()
except EOFError:
return (PlcStatus.Disconnected, None)
@RunInMain
def _GetPLCstatus(self):
return self.PLCStatus, map(self.GetLogCount, xrange(LogLevelsCount))
@RunInMain
def GetPLCID(self):
return getPSKID(partial(self.LogMessage, 0))
def _init_blobs(self):
self.blobs = {}
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
os.mkdir(self.tmpdir)
@RunInMain
def SeedBlob(self, seed):
blob = (mkstemp(dir=self.tmpdir) + (hashlib.new('md5'),))
_fd, _path, md5sum = blob
md5sum.update(seed)
newBlobID = md5sum.digest()
self.blobs[newBlobID] = blob
return newBlobID
@RunInMain
def AppendChunkToBlob(self, data, blobID):
blob = self.blobs.pop(blobID, None)
if blob is None:
return None
fd, _path, md5sum = blob
md5sum.update(data)
newBlobID = md5sum.digest()
os.write(fd, data)
self.blobs[newBlobID] = blob
return newBlobID
@RunInMain
def PurgeBlobs(self):
for fd, _path, _md5sum in self.blobs.values():
os.close(fd)
self._init_blobs()
def BlobAsFile(self, blobID, newpath):
blob = self.blobs.pop(blobID, None)
if blob is None:
raise Exception(_("Missing data to create file: {}").format(newpath))
self._BlobAsFile(blob, newpath)
def _BlobAsFile(self, blob, newpath):
fd, path, _md5sum = blob
fobj = os.fdopen(fd)
fobj.flush()
os.fsync(fd)
fobj.close()
shutil.move(path, newpath)
def _extra_files_log_path(self):
return os.path.join(self.workingdir, "extra_files.txt")
def RepairPLC(self):
self.PurgePLC()
MainWorker.quit()
@RunInMain
def PurgePLC(self):
extra_files_log = self._extra_files_log_path()
old_PLC_filename = os.path.join(self.workingdir, self.CurrentPLCFilename) \
if self.CurrentPLCFilename is not None \
else None
try:
allfiles = open(extra_files_log, "rt").readlines()
allfiles.extend([extra_files_log, old_PLC_filename, self._GetMD5FileName()])
except Exception:
self.LogMessage("No files to purge")
allfiles = []
for filename in allfiles:
if filename:
filename = filename.strip()
try:
os.remove(os.path.join(self.workingdir, filename))
except Exception:
self.LogMessage("Couldn't purge " + filename)
self.PLCStatus = PlcStatus.Empty
# TODO: PLCObject restart
@RunInMain
def NewPLC(self, md5sum, plc_object, extrafiles):
if self.PLCStatus in [PlcStatus.Stopped, PlcStatus.Empty, PlcStatus.Broken]:
NewFileName = md5sum + lib_ext
extra_files_log = self._extra_files_log_path()
new_PLC_filename = os.path.join(self.workingdir, NewFileName)
self.UnLoadPLC()
self.PurgePLC()
self.LogMessage("NewPLC (%s)" % md5sum)
try:
# Create new PLC file
self.BlobAsFile(plc_object, new_PLC_filename)
# Then write the files
log = open(extra_files_log, "w")
for fname, blobID in extrafiles:
fpath = os.path.join(self.workingdir, fname)
self.BlobAsFile(blobID, fpath)
log.write(fname+'\n')
# Store new PLC filename based on md5 key
with open(self._GetMD5FileName(), "w") as f:
f.write(md5sum)
f.flush()
os.fsync(f.fileno())
# Store new PLC filename
self.CurrentPLCFilename = NewFileName
except Exception:
self.PLCStatus = PlcStatus.Broken
self.StatusChange()
PLCprint(traceback.format_exc())
return False
if self.LoadPLC():
self.PLCStatus = PlcStatus.Stopped
self.StatusChange()
else:
self._fail(_("Problem installing new PLC : can't load PLC"))
return self.PLCStatus == PlcStatus.Stopped
return False
def MatchMD5(self, MD5):
try:
last_md5 = open(self._GetMD5FileName(), "r").read()
return last_md5 == MD5
except Exception:
pass
return False
@RunInMain
def SetTraceVariablesList(self, idxs):
"""
Call ctype imported function to append
these indexes to registred variables in PLC debugger
"""
self.DebugToken += 1
if idxs:
# suspend but dont disable
if self._suspendDebug(False) == 0:
# keep a copy of requested idx
self._ResetDebugVariables()
for idx, iectype, force in idxs:
if force is not None:
c_type, _unpack_func, pack_func = \
TypeTranslator.get(iectype,
(None, None, None))
force = ctypes.byref(pack_func(c_type, force))
res = self._RegisterDebugVariable(idx, force)
if res != 0:
self._resumeDebug()
self._suspendDebug(True)
return -res
self._TracesSwap()
self._resumeDebug()
return self.DebugToken
else:
self._suspendDebug(True)
return None
def _TracesSwap(self):
self.LastSwapTrace = time()
if self.TraceThread is None and self.PLCStatus == PlcStatus.Started:
self.TraceThread = Thread(target=self.TraceThreadProc, name="PLCTrace")
self.TraceThread.start()
self.TraceLock.acquire()
Traces = self.Traces
self.Traces = []
self.TraceLock.release()
return Traces
@RunInMain
def GetTraceVariables(self, DebugToken):
if DebugToken is not None and DebugToken == self.DebugToken:
return self.PLCStatus, self._TracesSwap()
return PlcStatus.Broken, []
def TraceThreadProc(self):
"""
Return a list of traces, corresponding to the list of required idx
"""
self._resumeDebug() # Re-enable debugger
while self.PLCStatus == PlcStatus.Started:
tick = ctypes.c_uint32()
size = ctypes.c_uint32()
buff = ctypes.c_void_p()
TraceBuffer = None
self.PLClibraryLock.acquire()
res = self._GetDebugData(ctypes.byref(tick),
ctypes.byref(size),
ctypes.byref(buff))
if res == 0:
if size.value:
TraceBuffer = ctypes.string_at(buff.value, size.value)
self._FreeDebugData()
self.PLClibraryLock.release()
# leave thread if GetDebugData isn't happy.
if res != 0:
break
if TraceBuffer is not None:
self.TraceLock.acquire()
lT = len(self.Traces)
if lT != 0 and lT * len(self.Traces[0]) > 1024 * 1024:
self.Traces.pop(0)
self.Traces.append((tick.value, TraceBuffer))
self.TraceLock.release()
# TraceProc stops here if Traces not polled for 3 seconds
traces_age = time() - self.LastSwapTrace
if traces_age > 3:
self.TraceLock.acquire()
self.Traces = []
self.TraceLock.release()
self._suspendDebug(True) # Disable debugger
break
self.TraceThread = None
def RemoteExec(self, script, *kwargs):
try:
exec(script, kwargs)
except Exception:
_e_type, e_value, e_traceback = sys.exc_info()
line_no = traceback.tb_lineno(get_last_traceback(e_traceback))
return (-1, "RemoteExec script failed!\n\nLine %d: %s\n\t%s" %
(line_no, e_value, script.splitlines()[line_no - 1]))
return (0, kwargs.get("returnVal", None))
| thiagoralves/OpenPLC_Editor | editor/runtime/PLCObject.py | PLCObject.py | py | 28,013 | python | en | code | 307 | github-code | 1 | [
{
"api_name": "os.name",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "_ctypes.LoadLibrary",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "_ctypes.FreeLibrary",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.nam... |
20059168851 | """
Gridsearch implementation
"""
from hops import hdfs, tensorboard, devices
from hops.experiment_impl.util import experiment_utils
from hops.experiment import Direction
import threading
import six
import time
import os
def _run(sc, train_fn, run_id, args_dict, direction=Direction.MAX, local_logdir=False, name="no-name", optimization_key=None):
"""
Run the wrapper function with each hyperparameter combination as specified by the dictionary
Args:
sc:
train_fn:
args_dict:
direction:
local_logdir:
name:
Returns:
"""
app_id = str(sc.applicationId)
num_executions = 1
if direction.upper() != Direction.MAX and direction.upper() != Direction.MIN:
raise ValueError('Invalid direction ' + direction + ', must be Direction.MAX or Direction.MIN')
arg_lists = list(args_dict.values())
currentLen = len(arg_lists[0])
for i in range(len(arg_lists)):
if currentLen != len(arg_lists[i]):
raise ValueError('Length of each function argument list must be equal')
num_executions = len(arg_lists[i])
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup(os.environ['ML_ID'], "{} | Grid Search".format(name))
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key))
arg_count = six.get_function_code(train_fn).co_argcount
arg_names = six.get_function_code(train_fn).co_varnames
exp_dir = experiment_utils._get_logdir(app_id, run_id)
max_val, max_hp, min_val, min_hp, avg, max_return_dict, min_return_dict = experiment_utils._get_best(args_dict, num_executions, arg_names, arg_count, exp_dir, optimization_key)
param_combination = ""
best_val = ""
return_dict = {}
if direction.upper() == Direction.MAX:
param_combination = max_hp
best_val = str(max_val)
return_dict = max_return_dict
elif direction.upper() == Direction.MIN:
param_combination = min_hp
best_val = str(min_val)
return_dict = min_return_dict
print('Finished Experiment \n')
best_dir = exp_dir + '/' + param_combination
return best_dir, experiment_utils._get_params_dict(best_dir), best_val, return_dict
def _prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key):
"""
Args:
app_id:
run_id:
train_fn:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict)
hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task ' + param_string)
task_start = time.time()
retval = train_fn(*args)
task_end = time.time()
experiment_utils._handle_return(retval, hdfs_exec_logdir, optimization_key, logfile)
time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('Returning metric ' + str(retval))
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun | logicalclocks/hops-util-py | hops/experiment_impl/parallel/grid_search.py | grid_search.py | py | 4,463 | python | en | code | 26 | github-code | 1 | [
{
"api_name": "hops.experiment.Direction.MAX",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "hops.experiment.Direction",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "hops.experiment.Direction.MAX",
"line_number": 33,
"usage_type": "attribute"... |
8946929415 | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import linregress
def _area_of_triangles(this_bin, pre_point, next_point):
"""Area of a triangle from duples of vertex coordinates
Uses implicit numpy boradcasting along first axis of this_bin"""
bin_pre = this_bin - pre_point
pre_bin = pre_point - this_bin
return 0.5 * abs((pre_point[0] - next_point[0]) * (bin_pre[:, 1])
- (pre_bin[:, 0]) * (next_point[1] - pre_point[1]))
def _split_data(data, n_out):
"""
Split data into bins
Dynamically adjust bucket size according to SSE (the sum of squared errors)
:param data:
:return:
"""
iteration_num = int(len(data)/(n_out*10))
n_bins = n_out - 2
data_bins = np.array_split(data[1: len(data) - 1], n_bins)
curr_iteration = 1
while curr_iteration <= iteration_num:
sse_list = []
for i in range(len(data_bins)):
if i == 0:
pre_point = np.array([data[0]])
next_bin = data_bins[i+1]
next_point = np.array([next_bin[0]])
elif i == len(data_bins)-1:
pre_bin = data_bins[i-1]
pre_point = np.array([pre_bin[-1]])
next_point = np.array([data[-1]])
else:
pre_bin = data_bins[i - 1]
pre_point = np.array([pre_bin[-1]])
next_bin = data_bins[i + 1]
next_point = np.array([next_bin[0]])
this_bin = np.append(pre_point, data_bins[i], axis=0)
this_bin = np.append(this_bin, next_point, axis=0)
sse_list.append(linregress(this_bin).stderr)
data_bins = _resize_bins(data_bins, sse_list)
curr_iteration += 1
return data_bins
def _find_max_bin(data_bins, sse_list):
"""
return max_index in SSE list, and the data_bin with max SSE must has at least two data points.
:param data_bins:
:param sse_list:
:return:
"""
sort_sse = np.argsort(sse_list)
for i in range(-1, -len(sort_sse)-1, -1):
if len(data_bins[sort_sse[i]]) >= 2:
return sort_sse[i]
return sort_sse[i]
def _find_min_bin(sse_list):
"""
return min_index in SEE list
:param sse_list:
:return:
"""
sum_pairs = [sse_list[i] + sse_list[i + 1] for i in range(len(sse_list) - 1)]
return sum_pairs.index(min(sum_pairs))
def _resize_bins(data_bins, sse_list):
"""
resize bins according to SSE (the sum of squared errors)
:param data_bins:
:param sse_list:
:return:
"""
resize_bins = []
max_index = _find_max_bin(data_bins, sse_list)
min_index = _find_min_bin(sse_list)
for i in range(len(data_bins)):
if i == max_index:
resize_bins += np.array_split(data_bins[i], 2)
elif i == min_index:
resize_bins.append(np.concatenate([data_bins[i], data_bins[i+1]], axis=0))
elif i == min_index + 1:
continue
else:
resize_bins.append(data_bins[i])
return resize_bins
def ltd(data, n_out):
"""Downsample ``data`` to ``n_out`` points using the LTD algorithm.
Reference
---------
Sveinn Steinarsson. 2013. Downsampling Time Series for Visual
Representation. MSc thesis. University of Iceland.
Constraints
-----------
- ncols(data) == 2
- 3 <= n_out <= nrows(data)
- ``data`` should be sorted on the first column.is_overseas
Returns
-------
numpy.array of shape (n_out, 2).
"""
# Validate input
if data.shape[1] != 2:
raise ValueError('data should have 2 columns')
if any(data[:, 0] != np.sort(data[:, 0])):
raise ValueError('data should be sorted on first column')
if n_out > data.shape[0]:
raise ValueError('n_out must be <= number of rows in data')
if n_out == data.shape[0]:
return data
if n_out < 3:
raise ValueError('Can only downsample to a minimum of 3 points')
# Split data into bins
# Dynamically adjust bucket size
n_bins = n_out - 2
data_bins = _split_data(data, n_out)
# Prepare output array
# First and last points are the same as in the input.
out = np.zeros((n_out, 2))
out[0] = data[0]
out[len(out) - 1] = data[len(data) - 1]
# Keep the max point and min point in output.
max_point = data[np.argmax(data, axis=0)[1]].tolist()
min_point = data[np.argmin(data, axis=0)[1]].tolist()
# In each bin, find the point that makes the largest triangle
# with the point saved in the previous bin
# and the centroid of the points in the next bin.
for i in range(len(data_bins)):
this_bin = data_bins[i]
this_bin_lists = this_bin.tolist()
if max_point in this_bin_lists:
out[i + 1] = max_point
elif min_point in this_bin_lists:
out[i + 1] = min_point
else:
if i < n_bins - 1:
next_bin = data_bins[i + 1]
else:
next_bin = data[len(data) - 1:]
pre_point = out[i]
next_point = next_bin.mean(axis=0)
areas = _area_of_triangles(this_bin, pre_point, next_point)
out[i + 1] = this_bin[np.argmax(areas)]
return out
| FarisYang/LTTB-LTD-py | down_sample/ltd.py | ltd.py | py | 5,287 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.array_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_num... |
71223066915 | # coding=utf-8
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.escape
from settings import urls
import tornado.options
import logging.config
from tornado.log import app_log as weblog
from settings.logConfig import logConfig
import warnings
warnings.filterwarnings("ignore")
from tornado.options import define, options
define("port", default=9081, help="run on the given port", type=int)
logging.config.dictConfig(logConfig)
MAX_STREAMED_SIZE = 1024 * 1024 * 1024
def check_path_exist():
if not os.path.exists("/opt/data/fs"):
os.makedirs('/opt/data/fs')
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
template_path=(os.path.join(os.path.dirname(__file__), "templates")),
static_path=(os.path.join(os.path.dirname(__file__), "static")),
cookie_secret="f6d4f6de102f29b5cd37cd5eQtsdfsfdsdJ5/xJ89E=",
session_secret="12f29b5c61c118ccd37cd5eQtsdfsfdsdJ5/xJ89E=",
session_timeout=300, # seconds
token_timeout=10, # minutes
top_path="/opt/data/fs",
login_url="/login",
debug=True,
autoescape=None,
xheaders=True,
# xsrf_cookies=True,
)
handlers = urls.url
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
try:
import setproctitle
setproctitle.setproctitle("imageserver") # set process name in linux environment
except:
pass
check_path_exist()
# tornado.options.parse_command_line()
app = Application()
http_server = tornado.httpserver.HTTPServer(app, max_buffer_size=4 * MAX_STREAMED_SIZE)
http_server.listen(options.port)
try:
http_server.start(2) # linux use mutli process
except:
print("window app start...port={}".format(options.port))
pass
weblog.info("-- imageserver start .... pid:{} ".format(os.getpid()))
tornado.ioloop.IOLoop.instance().start()
| FYPYTHON/PathOfStudy | python/service/imageserver/imageserver_app.py | imageserver_app.py | py | 2,078 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tornado.options.define",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.config.config.dictConfig",
"line_number": 18,
"usage_type": "call"
},
{
"a... |
26786218549 | from gi.repository import Gtk
class Controls(Gtk.Box):
def __init__(self, *args, **kwargs):
super().__init__(spacing=10.0, *args, **kwargs)
self.reset_btn = Gtk.Button(label="Reset")
angle_control_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
angle_control_label = Gtk.Label("Angle [0-90 deg]:")
self.angle_control = Gtk.Entry()
angle_control_box.pack_start(angle_control_label, False, False, 0)
angle_control_box.pack_start(self.angle_control, False, False, 0)
velocity_control_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
velocity_control_label = Gtk.Label("Velocity [pix/sec]:")
self.velocity_control = Gtk.Entry()
velocity_control_box.pack_start(
velocity_control_label, False, False, 0)
velocity_control_box.pack_start(
self.velocity_control, False, False, 0)
self.fire_btn = Gtk.Button(label="Fire!")
self.pack_start(self.reset_btn, False, False, 10)
self.pack_start(angle_control_box, False, False, 0)
self.pack_start(velocity_control_box, False, False, 0)
self.pack_end(self.fire_btn, False, False, 10)
def get_angle_and_velocity(self):
return (float(self.angle_control.get_text()),
float(self.velocity_control.get_text()))
| sgorawski/InformatykaUWr | Kurs_rozszerzony_jezyka_Python/l07/controls.py | controls.py | py | 1,346 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gi.repository.Gtk.Box",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "gi... |
12804462040 | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy
from .models import Post
class BlogListView(ListView):
model = Post
template_name = 'blog/home.html'
class BlogDetailView(DetailView):
model = Post
template_name = 'blog/post_detail.html'
# if you set context_object_name you cannot access the data in template using lowercase
# model name. for this model it is `post`. use-> post.tile, post.body, post.author etc
# context_object_name = 'anything_you_want'
class BlogCreateView(CreateView):
model = Post
template_name = 'blog/post_new.html'
# show all the field in the form
fields = '__all__'
class BlogUpdateView(UpdateView):
model = Post
template_name = 'blog/post_edit.html'
fields = ['title', 'body']
# success_message = ugettext_lazy('Widget was successfully updated')
# success_url = reverse('post_edit')
# def has_permission(self, request):
# return request.user.is_active and request.user.is_staff
class BlogDeleteView(DeleteView):
model = Post
template_name = 'blog/post_delete.html'
success_url = reverse_lazy('home')
| Nahid-Hassan/fullstack-software-development | courses/backend/Learn Django by Creating Projects/projects/blog/blog/views.py | views.py | py | 1,337 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name... |
12078108329 | # Created by Yuanbiao Wang
# Implements a simple contrastive learning pretrain learner
# MoCo: Momentum Contrast for Unsupervised Visual Representation Learning
# https://github.com/facebookresearch/moco
import jittor as jt
import jittor.nn as nn
from advance.ssl_utils import *
import matplotlib.pyplot as plt
from tqdm import tqdm
plt.switch_backend('agg')
class MoCo(nn.Module):
def __init__(self, encoder, embedding_channel, projection_dim, K=1024, T=0.07, dim=128):
super(MoCo, self).__init__()
self.K = K
self.T = T
self.encoder = encoder
self.project = Projection(embedding_channel, projection_dim)
self.queue = jt.randn(dim, K)
self.queue = jt.misc.normalize(self.queue, dim=0)
self.ptr = 0
def _dequeue_and_enqueue(self, keys):
with jt.no_grad():
batch_size = keys.shape[0]
left_space = self.K - self.ptr
key_size = min(batch_size, left_space)
keys = keys[:key_size]
self.queue[:, self.ptr : self.ptr + key_size] = keys.transpose()
self.ptr = (self.ptr + key_size) % self.K
def execute(self, im_q, im_k):
q = self.encoder(im_q)
q = self.project(q)
q = jt.misc.normalize(q, dim=1) # im_q feature vector
k = self.encoder(im_k)
k = self.project(k)
k = jt.misc.normalize(k, dim=1) # im_k feature vector
l_pos = (q * k).sum(dim=1).unsqueeze(-1) # similarity of two feature vectors
l_neg = jt.matmul(q, self.queue.clone().detach()) # discrepancy of two feature vectors, queue is the buffer
logits = jt.contrib.concat([l_pos, l_neg], dim=1)
logits /= self.T
labels = jt.zeros(logits.shape[0], dtype=jt.int)
self._dequeue_and_enqueue(k) # add im_k feature vector into the buffer
return logits, labels
# https://blog.csdn.net/yyhaohaoxuexi/article/details/113824125 博客解读
# 这里的变量logits的意义我也查了一下:是未进入softmax的概率,crossentropy会自动做一个log softmax
# 这段代码根据注释即可理解:l_pos表示正样本的得分,l_neg表示所有负样本的得分,logits表示将正样本和负样本在列上cat起来之后的值。
# 值得关注的是,labels的数值,是根据logits.shape[0]的大小生成的一组zero。也就是大小为batch_size的一组0。
# 这里直接对输出的logits和生成的````labels```计算交叉熵,然后就是模型的loss。这里就是让我不是很理解的地方。先将疑惑埋在心里~
class OutputHiddenLayer(nn.Module):
def __init__(self, net, layer=(-2)):
super().__init__()
self.net = net
self.layer = layer
self.hidden = None
self._register_hook()
def _find_layer(self):
if (type(self.layer) == str):
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif (type(self.layer) == int):
children = [*self.net.children()]
return children[self.layer]
return None
def _register_hook(self):
def hook(_, __, output):
self.hidden = output
layer = self._find_layer()
assert (layer is not None)
handle = layer.register_forward_hook(hook)
def execute(self, x):
if (self.layer == (- 1)):
return self.net(x)
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert (hidden is not None)
return hidden
class Projection(nn.Module):
def __init__(self, input_channel, project_dim):
super(Projection, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(output_size=2)
self.fc = nn.Sequential(
nn.ReLU(),
nn.Linear(input_channel * 4, project_dim)
)
def execute(self, x):
y = self.pool(x)
y = y.view(x.size(0), -1)
y = self.fc(y)
return y
class MoCoLearner():
def __init__(self, model, layer, loader, embedding_channel=1024, project_dim=128, lr=1e-5):
super(MoCoLearner, self).__init__()
encoder = OutputHiddenLayer(model, layer)
self.co = MoCo(encoder, embedding_channel, project_dim)
self.loader = loader
self.criterion = nn.CrossEntropyLoss()
self.optim = jt.optim.Adam(model.parameters(), lr=lr)
def update(self, query, key):
output, target = self.co(query, key)
loss = self.criterion(output, target)
self.optim.step(loss)
return loss.item()
def train(self):
loss_mean = 0.0
total = 0
bar = tqdm(self.loader, desc='loss')
for i, (query, key, _) in enumerate(bar):
loss = self.update(query, key)
bar.set_description('loss: [%.6f]' % loss)
bar.update()
loss_mean += loss * query.shape[0]
total += query.shape[0]
loss_mean /= total
return loss_mean
| THU-CVlab/JMedSeg | advance/ssl.py | ssl.py | py | 5,168 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "jittor.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name... |
26235363048 | from flask import Flask, request, abort
import linebot
import os
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
#======這裡是呼叫的檔案內容=====
from message import *
from new import *
from Function import *
#======這裡是呼叫的檔案內容=====
#======python的函數庫==========
import tempfile, os
import datetime
import time
#======python的函數庫==========
app = Flask(__name__)
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
# Channel Access Token
line_bot_api = LineBotApi('OsCPT7k8WkUFh/8JXZWSdxR8AKs9LQQgQhiQyGzbLlnKQJKfnFsKAai4t7XwQDNSCu7e7/BEUntlQsUN+8Bvpr0o/UAKqxOj8Ocm/LZIsR7bTLyRlZDT0hfU0/GxDzD3DwbU3PW/wZ2Tqf6jm3sfzwdB04t89/1O/w1cDnyilFU=')
# Channel Secret
handler = WebhookHandler('33152f3e78a40af114991b26e232dc7b')
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 處理訊息
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
msg = event.message.text
if '蟾蜍山簡介' in msg:
message = TextSendMessage(text="#======蟾蜍山簡介===== 1.關於蟾蜍山 2.蟾蜍山由來 3.瀏海仙翁 ")
line_bot_api.reply_message(event.reply_token, message)
elif '關於蟾蜍山' in msg:
message = TextSendMessage(
text="蟾蜍山聚落位於臺北盆地南端、臺大公館商圈旁。山城聚落內保留了都市發展的軌跡,包含清代的水利設施、日治時期的農業研究佈局、中美協防的軍事地景、臺北市目前唯一完整保留的空軍眷村「煥民新村」、結合軍眷及臺北城鄉移民的自力營造聚落等,因其豐富的歷史文化及生態資源,2014年被指定為臺北市文化景觀。"
)
line_bot_api.reply_message(event.reply_token, message)
elif '蟾蜍山由來' in msg:
message = TextSendMessage(text="蟾蜍精出現於今日台北市公館地區,據說牠經常吐出毒物毒死作物和家畜,有時甚至還會吃人,讓當地居民苦不堪言。後來呂洞賓仙公下凡成功降伏蟾蜍精,蟾蜍精就變成今日的蟾蜍山,仙跡岩上的腳印則是鬥法過程中呂仙公力道太深而留下來的。另有傳說認為降伏牠的人是劉海仙翁;或是率軍經過的鄭成功用傳說中的大砲龍碽打爛其嘴巴(或說尾巴),讓牠嚇得不敢作亂,直到公館開新路時因腳被切斷而死。"
)
line_bot_api.reply_message(event.reply_token, message)
elif '瀏海仙翁' in msg:
message = TextSendMessage(text="道教全真教 ...... 改稱瀏海。")
line_bot_api.reply_message(event.reply_token, message)
elif '景點故事' in msg:
message = ImageSendMessage(
#放地圖
original_content_url="https://scontent.ftpe8-1.fna.fbcdn.net/v/t1.6435-9/72642222_634082933787548_5027039107189047296_n.jpg?_nc_cat=105&ccb=1-7&_nc_sid=730e14&_nc_ohc=3mHh5UgKudUAX8IBe2B&_nc_ht=scontent.ftpe8-1.fna&oh=00_AfAcp7GMZ2_41uOQeHt6jpNO0nEdDXW8704XlBCnSTqXbQ&oe=645E4840",
preview_image_url= "https://scontent.ftpe8-1.fna.fbcdn.net/v/t1.6435-9/72642222_634082933787548_5027039107189047296_n.jpg?_nc_cat=105&ccb=1-7&_nc_sid=730e14&_nc_ohc=3mHh5UgKudUAX8IBe2B&_nc_ht=scontent.ftpe8-1.fna&oh=00_AfAcp7GMZ2_41uOQeHt6jpNO0nEdDXW8704XlBCnSTqXbQ&oe=645E4840"
)
line_bot_api.reply_message(event.reply_token, message)
message = Carousel_Template()
line_bot_api.reply_message(event.reply_token, message)
elif '蟾蜍山地圖' in msg:
message = LocationSendMessage(
title='蟾蜍山',
address='蟾蜍山',
latitude="25.009825001671054",
longitude="121.540005115302"
)
line_bot_api.reply_message(event.reply_token, message)
elif '開始折價挑戰' in msg:
message = TextSendMessage(text="折價挑戰第一題 : 蟾蜍山的活動中心名稱是? (A)蟾蜍山大後院 (B)蟾蜍山大客廳 (C)蟾蜍山大舞廳")
line_bot_api.reply_message(event.reply_token, message)
elif 'A' in msg:
message = TextSendMessage(text="嗯......好像不太對喔,再想想看吧?")
line_bot_api.reply_message(event.reply_token, message)
elif 'C' in msg:
message = TextSendMessage(text="嗯......好像不太對喔,再想想看吧?")
line_bot_api.reply_message(event.reply_token, message)
elif 'B' in msg:
message = TextSendMessage(text="太棒了!回答正確,請繼續進行第二題......")
line_bot_api.reply_message(event.reply_token, message)
else:
#else : 重複用戶的發問
message = TextSendMessage(text=msg)
line_bot_api.reply_message(event.reply_token, message)
@handler.add(PostbackEvent)
def handle_message(event):
print(event.postback.data)
@handler.add(MemberJoinedEvent)
def welcome(event):
uid = event.joined.members[0].user_id
gid = event.source.group_id
profile = line_bot_api.get_group_member_profile(gid, uid)
name = profile.display_name
message = TextSendMessage(text=f'{name}歡迎加入')
line_bot_api.reply_message(event.reply_token, message)
#if __name__ == "__main__":
# port = int(os.environ.get('PORT', 5000))
# app.run(host='0.0.0.0', port=port)
| morrischen0/-DEMO | linebot_mo2/app.py | app.py | py | 5,943 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_num... |
11737097023 | import os
from werkzeug.utils import secure_filename
class Upload:
app = None
def __init__(self, app):
self.app = app
def upload(self, destination, ext_conf, files):
if type(files) is list:
file_names = []
for _file in files:
filename = self.upload_file(destination, self.app.config[ext_conf], _file)
file_names.append(filename)
return file_names
else:
filename = self.upload_file(destination, self.app.config[ext_conf], files)
return filename
def upload_file(self, destination, ext, file):
# Check if the file is one of the allowed types/extensions
if file and self.allowed_files(file.filename, ext):
# Make the filename safe, remove unsupported chars
filename = secure_filename(file.filename)
# Move the file form the temporal folder to the upload folder we setup
file.save(os.path.join(destination, filename))
return filename
def allowed_files(self, filename, ext):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ext
| johndoe-dev/Ecodroid | app/flask_helpers/upload.py | upload.py | py | 1,164 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
}
] |
1163086234 | from cv2 import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
#imwrite("image.jpg",img) #save image
#img = cv2.imread("image.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.show()
r,g,b=cv2.split(img)
h,w=r.shape
enol = np.zeros((h,w),'uint8')
enols=np.ones((h,w),'uint8')
enols=enols * 255
imgr = cv2.merge((r,enol,enol))
imgg = cv2.merge((enol,g,enol))
imgb = cv2.merge((enol,enol,b))
plt.subplot(2,2,1)
plt.imshow(imgr)
plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2)
plt.imshow(imgg)
plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3)
plt.imshow(imgb)
plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4)
plt.imshow(img)
plt.xticks([]), plt.yticks([])
plt.show()
del(cam)
| kgfathur/selfly | campy/crackImages.py | crackImages.py | py | 824 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
21086711197 | import pygame
import globalVariables
class EnemyProjectile(pygame.sprite.Sprite):
def __init__(self, direction, enemy, window):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/sprites/bone.png").convert_alpha()
self.size = self.image.get_size()
self.image = pygame.transform.scale(self.image, (int(self.size[0]/5), int(self.size[1]/5)))
self.rect = self.image.get_rect()
self.rect.x = enemy.rect.x + 45
self.rect.y = enemy.rect.y + 60
self.velocity = 10
self.screen_width = window.get_width()
self.screen_height = window.get_height()
self.attack = 100
self.direction = direction
def remove(self):
globalVariables.meteorites.remove(self)
def move(self, enemy, player):
if(self.direction > 0):
self.rect.x += self.velocity
else:
self.rect.x -= self.velocity
if(pygame.sprite.collide_rect(self, player)):
self.remove()
player.damage(self.attack)
# if self.rect.y > self.screen_height:
# self.remove()
| NaoufelMaazouzi/jeu-python | enemyProjectile.py | enemyProjectile.py | py | 1,145 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
31518888968 | import os
import gzip
import shutil
from inspect import isawaitable
from typing import Optional, Union, Dict, Tuple, Iterable, Iterator, List, Coroutine, NamedTuple, Callable, Generator
from logging import Logger
from pandas import read_csv, DataFrame, isna, Series, concat
import numpy as np
from pathlib import Path
from aiofiles import open as aiofiles_open
from re import compile as re_compile
from pyexcel import Sheet
import asyncio
from unsync import unsync, Unfuture
from itertools import chain
import orjson as json
import logging
from io import StringIO
from operator import itemgetter
from textdistance import overlap, sorensen
from collections import Counter, OrderedDict
from warnings import warn
"""def to_interval(lyst: Union[Iterable, Iterator]) -> List:
def pass_check(lyst):
try:
if (not isinstance(lyst, Generator) and (len(lyst) == 0)) or isna(lyst):
return False
else:
return True
except ValueError:
if isinstance(lyst, float):
return False
else:
return True
if not pass_check(lyst):
return ()
else:
if not isinstance(lyst, set):
lyst = frozenset(int(i) for i in lyst if i is not None)
if not pass_check(lyst):
return ()
start = []
interval_lyst = []
max_edge = max(lyst)
min_edge = min(lyst)
if len(lyst) == (max_edge + 1 - min_edge):
return ((min_edge, max_edge),)
else:
lyst_list = sorted(lyst)
for j in lyst_list:
if not start:
i = j
start.append(j)
i += 1
else:
if (i != j) or (j == max(lyst_list)):
if j == max(lyst_list):
if (i != j):
interval_lyst.append(start)
interval_lyst.append([j])
break
else:
start.append(j)
interval_lyst.append(start)
start = [j]
i = j + 1
else:
start.append(j)
i += 1
return tuple((min(li), max(li)) for li in interval_lyst)
def lyst22interval(x, y):
# x, y = sorted(x), sorted(y)
data = frozenset({i for i in zip(x,y)})
x, y = zip(*sorted(data, key=itemgetter(0)))
start_x, start_y = x[0], y[0]
index_x, index_y = x[0]-1, y[0]-1
interval_x, interval_y = [], []
for i, j in zip(x, y):
pre_x = index_x + 1
pre_y = index_y + 1
if pre_x == i and pre_y == j:
index_x, index_y = i, j
else:
interval_x.append((start_x, index_x))
interval_y.append((start_y, index_y))
start_x, start_y = i, j
index_x, index_y = i, j
interval_x.append((start_x, index_x))
interval_y.append((start_y, index_y))
return interval_x, interval_y"""
@unsync
async def init_semaphore(concurreq) -> Unfuture:
"""
`semaphore` initiated in the `unsync` event loop
"""
await asyncio.sleep(.01)
return asyncio.Semaphore(concurreq)
'''
def decompression(path: str, extension: str =".gz", remove: bool =True, outputPath: Optional[str] = None, logger: Optional[Logger] = None):
"""
Decompress gz file
:param path: The file path of the compressed file
:param extension: Compressed file tail, default value: `.gz`
:param remove: Whether remove the compressed file, default value: `True`
:param outputPath: File safe path, default value: `None`
:param logger: logger Object
"""
"""
with gzip.GzipFile(mode="rb", fileobj=open(path, 'rb')) as raw:
with open(path[:-len(extension)], "wb") as file:
file.write(raw.read())
"""
if outputPath is None:
outputPath = path[:-len(extension)]
with gzip.open(path, 'rb') as raw:
with open(outputPath, 'wb') as file:
shutil.copyfileobj(raw, file)
try:
if remove:
os.remove(path)
except Exception as e:
if isinstance(logger, Logger):
logger.error(e)
return outputPath
'''
def related_dataframe(filters: Optional[Union[Dict, Iterable[Tuple]]] = None, dfrm: Optional[DataFrame] = None, path: Union[str, Path, None] = None, sep: str = '\t', **kwargs):
'''
valid symbol: `eq, ne, le, lt, ge, gt, isin, isnull`
'''
if dfrm is None:
if path is not None:
dfrm = read_csv(path, sep=sep)
else:
raise ValueError('path should not be None')
elif not isinstance(dfrm, DataFrame):
raise ValueError('dfrm should be a pandas.DataFrame')
if filters is None:
return dfrm
elif isinstance(filters, Dict):
filters = filters.items()
for col, (symbol, value) in filters:
dfrm = dfrm[getattr(getattr(dfrm, col), symbol)(value)]
return dfrm
def sort_sub_cols(dfrm, cols):
if set(dfrm.columns) >= set(cols):
dfrm[cols] = np.sort(dfrm[cols].to_numpy())
return dfrm.drop_duplicates()
else:
return dfrm
@unsync
async def a_read_csv(path, read_mode='r',**kwargs):
'''
only suitable for small dataframe
'''
try:
if isinstance(path, (Coroutine, Unfuture)):
path = await path
if isinstance(path, (Path, str)):
async with aiofiles_open(path, read_mode) as file_io:
with StringIO(await file_io.read()) as text_io:
return read_csv(text_io, **kwargs)
elif isinstance(path, DataFrame):
return path
else:
return DataFrame(path, columns=kwargs.get('columns', None))
except Exception:
raise ValueError(f'{path}')
async def a_load_json(path):
try:
if isawaitable(path):
path = await path
if path is None:
return None
async with aiofiles_open(path) as inFile:
return json.loads(await inFile.read())
except Exception as e:
raise ValueError(f"Exception {e} ({path})")
async def pipe_out(df, path, **kwargs):
if not isinstance(df, Sheet):
raise TypeError(f"Invalid Object for pipe_out(): {type(df)}")
if len(df) == 0:
raise ValueError("Zero record!")
path = Path(path)
mode = kwargs.get('mode', 'a')
clear_headers:bool = path.exists() and mode.startswith('a')
var_format = kwargs.get('format', 'tsv').lower()
async with aiofiles_open(path, mode) as file_io:
"""
if isinstance(df, DataFrame):
sorted_col = sorted(df.columns)
if clear_headers:
headers = None
else:
headers = sorted_col
dataset = Dataset(headers=headers)
dataset.extend(df[sorted_col].to_records(index=False))
to_write = dataset.export(var_format, lineterminator='\n')
elif isinstance(df, Dataset):
if clear_headers:
df.headers = None
to_write = df.export(var_format, lineterminator='\n')
"""
df = df.project(sorted(df.colnames))
if clear_headers:
df.colnames = []
to_write = getattr(df, f"get_{var_format}")(lineterminator='\n')
await file_io.write(to_write)
def flatten_dict(data: Dict, root: str, with_root: bool = True):
if with_root:
iterator = yield_flatten_dict(data[root], root)
else:
iterator = yield_flatten_dict(data[root])
for key, value in iterator:
data[key] = value
del data[root]
def yield_flatten_dict(data: Dict, root: Optional[str] = None):
if root is None:
yield from data.items()
else:
for key, value in data.items():
yield f'{root}.{key}', value
def slice_series(se: Iterable) -> Dict:
'''
For Sorted Series
'''
data = {}
cur = next(iter(se))
start = 0
try:
for index, i in enumerate(se):
if i != cur:
assert cur not in data, "Invalid Series"
data[cur] = (start, index)
cur = i
start = index
assert cur not in data, "Invalid Series"
data[cur] = (start, index+1)
except AssertionError as e:
logging.error(e)
raise e
return data
def split_df_by_chain(df, all_cols, cols_to_split, mode='sep', sep=','):
'''
Reference: <https://stackoverflow.com/a/50731258/12876491>
'''
def chainer_sep(s):
return list(chain.from_iterable(s.str.split(sep)))
def chainer_json(s):
return list(chain.from_iterable(s.apply(json.loads)))
def chainer_list(s):
return list(chain.from_iterable(s))
if mode == 'sep':
chainer = chainer_sep
lens = df[cols_to_split[0]].str.split(sep).map(len)
elif mode == 'json-list':
chainer = chainer_json
lens = df[cols_to_split[0]].apply(json.loads).map(len)
elif mode == 'list':
chainer = chainer_list
lens = df[cols_to_split[0]].map(len)
else:
raise ValueError("Invalid mode!")
repeat_dict = {col: np.repeat(df[col], lens)
for col in set(all_cols)-set(cols_to_split)}
chain_dict = {col: chainer(df[col]) for col in cols_to_split}
return DataFrame({**repeat_dict, **chain_dict})
def init_folder_from_suffix(folder: Union[Path, str], suffix: str):
folder = Path(folder)
new_path = folder/suffix
new_path.mkdir(parents=True, exist_ok=True)
return new_path
def init_folder_from_suffixes(folder: Union[Path, str], suffixes: Iterable) -> Iterable[Path]:
folder = Path(folder)
for suffix in suffixes:
new_path = folder/suffix
new_path.mkdir(parents=True, exist_ok=True)
yield new_path
def iter_first(df: DataFrame, criteria: Callable[[NamedTuple], bool], **kwargs) -> Optional[NamedTuple]:
'''
Implement pandas.DataFrame.itertuples
Returns the value as soon as you find the first row/record
that meets the requirements and NOT iterating other rows
Originated from: https://stackoverflow.com/a/63826677/12876491
>>> iter_first(df, lambda row: row.A > 4 and row.B > 3)
'''
for row in df.itertuples(**kwargs):
if criteria(row):
return row
class MMCIF2DictPlus(dict):
"""
Parse a mmCIF file and return a dictionary
NOTE: Override methods based on Biopython's `Bio.PDB.MMCIF2Dict.MMCIF2Dict`
"""
def _check_token_with_focus_keys(self, token: Iterable[str]) -> bool:
is_key, key_value = token
return is_key == 0 and ((key_value in self.focus_keys) or any(key in key_value for key in self.focus_keys))
def __init__(self, handle, focus_keys: Iterable[str]=['']):
self.focus_keys = set(focus_keys)
self.quote_chars = ["'", '"']
self.whitespace_chars = [" ", "\t"]
# TODO: init first loop
loop_flag = False
key = None
tokens = self._tokenize(handle)
try:
token = next(tokens)
except StopIteration:
return # NOTE: annotation from biopython: for Python 3.7 and PEP 479
self[token[1][0:5]] = token[1][5:]
i = 0
n = 0
use = []
# TODO: loops
for token in tokens:
if token[1].lower() == "loop_":
loop_flag = True
keys = []
i = 0
n = 0
use = []
continue
elif loop_flag:
'''
NOTE: annotation from biopython:
# The second condition checks we are in the first column
# Some mmCIF files (e.g. 4q9r) have values in later columns
# starting with an underscore and we don't want to read
# these as keys
'''
if token[1].startswith("_") and (n == 0 or i % n == 0):
if i > 0:
loop_flag = False
else:
if self._check_token_with_focus_keys(token):
use.append(n)
self[token[1]] = []
keys.append(token[1])
n += 1
continue
else:
key_index = i % n
try:
if key_index in use:
self[keys[key_index]].append(token[1])
except Exception:
raise ValueError(f"{keys}, {key_index}, {use}")
i += 1
continue
if key is None:
if self._check_token_with_focus_keys(token):
key = token[1]
else:
# Always returns a list
self[key] = [token[1]]
key = None
if self.keys() >= self.focus_keys:
break
def _splitline(self, line: str):
# NOTE: annotation from biopython: See https://www.iucr.org/resources/cif/spec/version1.1/cifsyntax for the syntax
in_token = False
# NOTE: annotation from biopython: quote character of the currently open quote, or None if no quote open
quote_open_char = None
start_i = 0
for (i, c) in enumerate(line):
if c in self.whitespace_chars:
if in_token and not quote_open_char:
in_token = False
yield start_i, line[start_i:i]
elif c in self.quote_chars:
if not quote_open_char and not in_token:
# raise ValueError(f"{self['data_']}: Opening quote in middle of word: " + line)
quote_open_char = c
in_token = True
start_i = i + 1
elif c == quote_open_char and (i + 1 == len(line) or line[i + 1] in self.whitespace_chars):
quote_open_char = None
in_token = False
yield start_i, line[start_i:i]
elif c == "#" and not in_token:
''' NOTE: annotation from biopython:
# Skip comments. "#" is a valid non-comment char inside of a
# quote and inside of an unquoted token (!?!?), so we need to
# check that the current char is not in a token.
'''
return
elif not in_token:
in_token = True
start_i = i
if in_token:
yield start_i, line[start_i:]
if quote_open_char:
raise ValueError("Line ended with quote open: " + line)
def _tokenize(self, handle):
empty = True
for line in handle:
empty = False
if line.startswith("#"):
continue
elif line.startswith(";"):
'''
NOTE: annotation from biopython:
# The spec says that leading whitespace on each line must be
# preserved while trailing whitespace may be stripped. The
# trailing newline must be stripped.
'''
token_buffer = [line[1:].rstrip()]
for line in handle:
line = line.rstrip()
if line.startswith(";"):
yield 1, "\n".join(token_buffer)
line = line[1:]
if line and not line[0] in self.whitespace_chars:
raise ValueError("Missing whitespace")
break
token_buffer.append(line)
else:
raise ValueError("Missing closing semicolon")
yield from self._splitline(line.strip())
if empty:
raise ValueError("Empty file.")
class DisplayPDB(object):
a_name = 'Asymmetric unit'
b_name = 'Biological assembly {assembly_id}'
a_code = 'model-1'
b_code = 'assembly-{assembly_id}'
header_unit = '''
<td>
<b>{name}</b> of {pdb_id}
</td>
'''
content_unit = '''
<td>
<img class="display" width="300em" src="https://cdn.rcsb.org/images/structures/{in_code}/{pdb_id}/{pdb_id}_{code}.jpeg"/>
</td>
'''
css = '''
<style>
img.display {
-webkit-filter: invert(1);
filter: invert(1);
}
</style>
'''
template = '''
<table align="center">
<tr>
{headers}
</tr>
<tr>
{content}
</tr>
</table>
'''
@classmethod
def setting(cls, pdb_id, assemblies):
headers = [cls.header_unit.format(name=cls.a_name, pdb_id=pdb_id)]
content = [cls.content_unit.format(pdb_id=pdb_id, in_code=pdb_id[1:3], code=cls.a_code)]
for assembly_id in assemblies:
headers.append(cls.header_unit.format(
name=cls.b_name.format(assembly_id=assembly_id),
pdb_id=pdb_id))
content.append(cls.content_unit.format(
pdb_id=pdb_id,
in_code=pdb_id[1:3],
code=cls.b_code.format(assembly_id=assembly_id)
))
return ''.join(headers), ''.join(content)
def show(self, pdb_id, assemblies: Iterable[int]= [1]):
from IPython.display import display, HTML
assemblies = sorted(int(i) for i in assemblies if int(i) > 0)
headers, content = self.setting(pdb_id, assemblies)
self.table = self.template.format(headers=headers, content=content)
if self.dark:
self.table = self.css + self.table
display(HTML(self.table))
def __init__(self, dark:bool=False):
self.dark = dark
fasta_pat = re_compile(r'(>.+)\n([A-Z\*\n]+)')
unp_header_pat = re_compile(r'>sp\|(.+)\|')
async def a_seq_reader(path: Union[Unfuture, Union[Path, str]]):
if isinstance(path, Unfuture):
path = await path
async with aiofiles_open(path, 'rt') as handle:
header, content = fasta_pat.match(await handle.read()).groups()
content = content.replace('\n', '')
assert content != '', str(path)
return header, content
@unsync
async def get_seq_from_parser(res, identifier, seq_only:bool = True):
async for header, content in await res:
if (identifier in header) and (f'{identifier}-' not in header):
return content if seq_only else (header, content)
@unsync
async def get_seqs_from_parser(res, identifiers:Optional[Iterable[str]]=None):
ret = []
async for header, content in await res:
header = unp_header_pat.match(header).group(1)
if identifiers is None or header in identifiers:
ret.append((header, content))
return ret
async def a_seq_parser(path: Union[Unfuture, Coroutine, Path, str]):
if isawaitable(path):
path = await path
async with aiofiles_open(path, 'rt') as handle:
header, content = None, ''
async for line in handle:
if line.startswith('>'):
if header is not None:
yield header.strip(), content.replace('\n', '')
header, content = line, ''
else:
content += line
assert header is not None, f"\npath: {path}\ncur_content: {content}"
yield header.strip(), content.replace('\n', '')
nu2aa_dict = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',
}
def translate2aa(seq:str, check:bool=False):
assert len(seq) % 3 == 0, "Invalid length of dna OR rna sequence!"
seq = seq.replace('U', 'T')
p_seq = ""
for i in range(0, len(seq), 3):
codon = seq[i:i + 3]
p_seq += nu2aa_dict[codon]
if check:
assert "_" not in p_seq, "Invalid Sequence!"
return p_seq
def unsync_run(arg):
@unsync
async def s_unsync_wrap(arg):
return await arg
return s_unsync_wrap(arg).result()
@unsync
def unsync_wrap(var):
return var
class SeqRangeReader(object):
def __init__(self, name_group):
self.name = name_group # ('pdb_id', 'chain_id', 'UniProt')
self.pdb_range = []
self.unp_range = []
def output(self):
if self.pdb_range:
pdb_range = json.dumps(self.pdb_range).decode('utf-8')
unp_range = json.dumps(self.unp_range).decode('utf-8')
return pdb_range, unp_range
else:
return self.default_pdb_range, self.default_unp_range
def check(self, name_group_to_check, data_group):
self.default_pdb_range = '[[%s, %s]]' % data_group[:2]
self.default_unp_range = '[[%s, %s]]' % data_group[2:4]
if self.name == name_group_to_check:
self.pdb_range.append([int(data_group[0]), int(data_group[1])])
self.unp_range.append([int(data_group[2]), int(data_group[3])])
else:
self.name = name_group_to_check
self.pdb_range = [[int(data_group[0]), int(data_group[1])]]
self.unp_range = [[int(data_group[2]), int(data_group[3])]]
return self.output()
def sort_2_range(unp_range: List, pdb_range: List):
unp_range, pdb_range = zip(
*sorted(zip(unp_range, pdb_range), key=lambda x: x[0][0]))
return unp_range, pdb_range
def flat_dict_in_df(dfrm:DataFrame, targetCol:Union[str, Series], cols:List):
try:
new_cols = list(f'{targetCol.name}.{col}' for col in cols)
dfrm[new_cols] = DataFrame(
targetCol.apply(lambda x: list(x[col] for col in cols)).to_list(),
columns=new_cols)
return dfrm.drop(columns=[targetCol.name])
except AttributeError:
assert isinstance(targetCol, str)
new_cols = list(f'{targetCol}.{col}' for col in cols)
dfrm[cols] = DataFrame(
dfrm[targetCol].apply(lambda x: list(x[col] for col in cols)).to_list(),
columns=new_cols)
return dfrm.drop(columns=[targetCol])
class AAThree2One(dict):
def __missing__(self, key):
return 'X'
aa_three2one = AAThree2One({
"GLY": "G", "ALA": "A", "SER": "S", "THR": "T", "CYS": "C", "VAL": "V", "LEU": "L",
"ILE": "I", "MET": "M", "PRO": "P", "PHE": "F", "TYR": "Y", "TRP": "W", "ASP": "D",
"GLU": "E", "ASN": "N", "GLN": "Q", "HIS": "H", "LYS": "K", "ARG": "R"})
standardAA = list(aa_three2one.keys())
standardNu = ['DA', 'DT', 'DC', 'DG', 'DI', 'A', 'U', 'C', 'G', 'I']
"""def range_len(lyst: Union[List, str, float]) -> int:
if isinstance(lyst, float) or lyst is None:
return 0
elif isinstance(lyst, str):
lyst = json.loads(lyst)
length = 0
for left, right in lyst:
assert right >= left, f"\n{lyst}"
length += right - left + 1
return length
def interval2set(lyst: Union[Iterable, Iterator, str]):
if isinstance(lyst, str):
lyst = json.loads(lyst)
range_set = frozenset()
for left, right in lyst:
range_set |= frozenset(range(left, right+1))
return range_set"""
def expand_interval(lyst: Union[Iterable, Iterator, str]):
lyst = json.loads(lyst) if isinstance(lyst, str) else lyst
yield from (i for start, end in lyst for i in range(start, end+1))
def lyst2range(lyst, add_end=1):
if isinstance(lyst, str):
lyst = json.loads(lyst)
for start, end in lyst:
yield from range(int(start), int(end)+add_end)
"""def subtract_range(pdb_range: Union[str, Iterable], mis_range: Union[str, Iterable]) -> List:
if isinstance(mis_range, float) or mis_range is None:
return pdb_range
elif len(pdb_range) == 0:
return ()
elif len(mis_range) == 0:
return pdb_range
pdb_range_set = interval2set(pdb_range)
mis_range_set = interval2set(mis_range)
return to_interval(pdb_range_set - mis_range_set)
def check_range(i) -> bool:
if isinstance(i, float) or (i is None) or (len(i) == 0) or (i == 'nan'):
return False
return True
def add_range(left: Union[str, Iterable], right: Union[str, Iterable]) -> List:
check_left = check_range(left)
check_right = check_range(right)
if check_left and not check_right:
return left
elif not check_left and check_right:
return right
elif not check_left and not check_right:
return np.nan
try:
left_range_set = interval2set(left)
right_range_set = interval2set(right)
return to_interval(left_range_set | right_range_set)
except Exception as e:
print(left, right)
print(type(left), type(right))
raise e
def overlap_range(obs_range:Union[str, Iterable], unk_range: Union[str, Iterable]) -> List:
if isinstance(unk_range, float) or unk_range is None:
return ()
'''
obs_range_set = interval2set(obs_range)
unk_range_set = interval2set(unk_range)
return to_interval(obs_range_set.intersection(unk_range_set))
'''
obs_range = json.loads(obs_range) if isinstance(obs_range, str) else obs_range
unk_range = json.loads(unk_range) if isinstance(unk_range, str) else unk_range
def unit(i1,i2):
for start1, end1 in i1:
for start2, end2 in i2:
sl = start2 >= start1
sr = start2 <= end1
el = end2 >= start1
er = end2 <= end1
s_in = sl and sr
e_in = el and er
ini = s_in or e_in
# out = (sl and el) or (sr and er)
cov = (not sl) and (not er)
start = start2 if s_in else start1
end = end2 if e_in else end1
if ini or cov:
yield start, end
return tuple(unit(obs_range, unk_range))"""
def get_seq_seg(seq, ranges, **kwargs):
for start,end in ranges:
if end >= start:
yield start, seq[start-1:end]
else:
warn(f"{kwargs} -> Invalid Order: {ranges}, skip")
def get_diff_index(lseq, lrange, rseq, rrange):
if isinstance(lrange, str):
lrange = json.loads(lrange)
if isinstance(rrange, str):
rrange = json.loads(rrange)
for (lstart, lseg), (rstart, rseg) in zip(get_seq_seg(lseq, lrange), get_seq_seg(rseq, rrange)):
yield from ((lstart+index, rstart+index) for index, (r1, r2) in enumerate(zip(lseg, rseg)) if r1 != r2)
def red_seq_seg(seq, ranges):
edge = 0
for start, end in ranges:
yield f"{seq[edge:start-1]}\x1b[31m{seq[start-1:end]}\x1b[0m"
edge = end
yield seq[end:]
"""def outside_range(pdb_range: Union[str, Iterable], seqres_len: int):
pdb_range = json.loads(pdb_range) if isinstance(pdb_range, str) else pdb_range
out_head = pdb_range[0][0] - 1
out_tail = pdb_range[-1][-1] + 1
ret = [[1, out_head], [out_tail, seqres_len]]
return [i for i in ret if i[0]<=i[1]]"""
def outside_range_len(pdb_range: Union[str, Iterable], seqres_len: int, omit: int = 5) -> int:
if isinstance(pdb_range, str):
lyst = json.loads(pdb_range)
else:
lyst = pdb_range
out_head = lyst[0][0]-1
out_tail = seqres_len - lyst[-1][-1]
if out_head <= omit:
out_head = 0
else:
out_head -= omit
if out_tail <= omit:
out_tail = 0
else:
out_tail -= omit
return out_head + out_tail
def get_gap_list(li: Union[str,List,Tuple]):
if isinstance(li, str):
li = json.loads(li)
return [li[i+1][0] - li[i][1] - 1 for i in range(len(li)-1)]
def get_range_diff(lyst_a: Union[str, List, Tuple], lyst_b: Union[str, List, Tuple]):
lyst_a = json.loads(lyst_a) if isinstance(lyst_a, str) else lyst_a
lyst_b = json.loads(lyst_b) if isinstance(lyst_b, str) else lyst_b
array_a = np.array([right - left + 1 for left, right in lyst_a])
array_b = np.array([right - left + 1 for left, right in lyst_b])
return array_a - array_b
def select_range(ranges, indexes, cutoff=0.2, skip_index=[], selected_ranges=None, similarity_func=overlap.similarity):
select_index = []
selected_ranges = [] if selected_ranges is None else selected_ranges
def unit(cur_index):
if cur_index in skip_index:
return
cur_range = ranges[cur_index]
cur_range = json.loads(cur_range) if isinstance(cur_range, str) else cur_range
for selected_range in selected_ranges:
selected_range = json.loads(selected_range) if isinstance(selected_range, str) else selected_range
if len(cur_range) == 0:
return
score = similarity_func(lyst2range(cur_range),
lyst2range(selected_range))
if score > cutoff:
return
select_index.append(cur_index)
selected_ranges.append(cur_range)
for index in indexes:
unit(index)
return select_index
"""def select_ho_range(ranges1, ranges2, indexes, cutoff=0.2, skip_index=[]):
from scipy.stats import wasserstein_distance
select_index = []
def unit(cur_index):
if cur_index in skip_index:
return
cur_range1, cur_range2 = ranges1[cur_index], ranges2[cur_index]
c1_1 = Counter(expand_interval(cur_range1))
c1_2 = Counter(expand_interval(cur_range2))
if len(c1_1) == 0 or len(c1_2) == 0:
return
c1 = c1_1 + c1_2
for selected in select_index:
selected_range1,selected_range2 = ranges1[selected], ranges2[selected]
c_c1 = c1.copy()
c2 = Counter(expand_interval(selected_range1))+Counter(expand_interval(selected_range2))
for key in c_c1.keys() | c2.keys():
if key not in c_c1:
c_c1[key] = 0
if key not in c2:
c2[key] = 0
oc2 = OrderedDict(sorted((item for item in c2.items()), key=lambda x: x[0]))
oc1 = OrderedDict(sorted((item for item in c_c1.items()), key=lambda x: x[0]))
score = wasserstein_distance(tuple(oc1.values()), tuple(oc2.values()))
if score < cutoff:
return
select_index.append(cur_index)
for index in indexes:
unit(index)
return select_index"""
def select_ho_max_range(ranges1, ranges2, indexes, cutoff=0.2, skip_index=[]):
select_range_set = OrderedDict()
def unit(cur_index):
if cur_index in skip_index:
return
cur_range1, cur_range2 = ranges1[cur_index], ranges2[cur_index]
c1_1 = frozenset((1, i) for i in expand_interval(cur_range1))
c1_2 = frozenset((2, i) for i in expand_interval(cur_range2))
if len(c1_1) == 0 or len(c1_2) == 0:
return
c1 = c1_1 | c1_2
for c2s in select_range_set.values():
for c2 in c2s:
score = sorensen.similarity(c1, c2)
if score > cutoff:
return
select_range_set[cur_index] = (
frozenset((1, i) for i in expand_interval(cur_range1)) | frozenset((2, i) for i in expand_interval(cur_range2)),
frozenset((2, i) for i in expand_interval(cur_range1)) | frozenset((1, i) for i in expand_interval(cur_range2)))
for index in indexes:
unit(index)
return list(select_range_set.keys())
def select_he_range(Entry_1, Entry_2, ranges1, ranges2, indexes, cutoff=0.2, skip_index=[]):
select_index = []
def unit(cur_index):
if cur_index in skip_index:
return
cur_range1, cur_range2 = ranges1[cur_index], ranges2[cur_index]
cur_e1, cur_e2 = Entry_1[cur_index], Entry_2[cur_index]
(cur_e1, cur_range1), (cur_e2, cur_range2) = sorted(((cur_e1, cur_range1), (cur_e2, cur_range2)), key=lambda x: x[0])
c1_1 = frozenset((1, i) for i in expand_interval(cur_range1))
c1_2 = frozenset((2, i) for i in expand_interval(cur_range2))
if len(c1_1) == 0 or len(c1_2) == 0:
return
c1 = c1_1 | c1_2
for selected in select_index:
selected_range1, selected_range2 = ranges1[selected], ranges2[selected]
selected_e1, selected_e2 = Entry_1[selected], Entry_2[selected]
(selected_e1, selected_range1), (selected_e2, selected_range2) = sorted(((selected_e1, selected_range1), (selected_e2, selected_range2)), key=lambda x: x[0])
c2 = frozenset((1, i) for i in expand_interval(selected_range1)) | frozenset((2, i) for i in expand_interval(selected_range2))
score = sorensen.similarity(c1, c2)
if score > cutoff:
return
select_index.append(cur_index)
for index in indexes:
unit(index)
return select_index
def dumpsParams(params: Dict) -> str:
return '&'.join(f'{key}={value}' for key, value in params.items())
@unsync
async def a_concat(pathes, sep='\t', sort=False, ignore_index=True, columns=None):
if isinstance(pathes, (Unfuture, Coroutine)):
pathes = await pathes
res = [await a_read_csv((await path) if isinstance(Unfuture, Coroutine) else path, sep=sep, columns=columns) for path in pathes]
return concat((i for i in res if i is not None), sort=sort, ignore_index=ignore_index)
def get_str_dict_len(x):
if isinstance(x, str):
return x.count(':')
else:
return len(x)
def id2score(identifier):
len_id = len(identifier)
return -sum(ord(i)*2*(len_id-level) for level, i in enumerate(identifier))
| NatureGeorge/pdb-profiling | pdb_profiling/utils.py | utils.py | py | 34,681 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "asyncio.sleep",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "asyncio.Semaphore",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "unsync.unsync",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "unsync.Unfuture",
... |
9943778853 | import argparse
import torchvision.transforms as transforms
import torchvision
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets
from cutout import Cutout
from models.resnet import *
from models.resnet import Bottleneck
from utils import CrossEntropy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
epochs = 200
BATCH_SIZE = 128
LR = 0.1
parser = argparse.ArgumentParser(description='Task-Oriented Feature Distillation. ')
parser.add_argument('--model', default="resnet50", help="choose the student model", type=str)
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar10/cifar100")
parser.add_argument('--alpha', default=0.05, type=float)
parser.add_argument('--beta', default=0.03, type=float)
parser.add_argument('--l2', default=7e-3, type=float)
parser.add_argument('--teacher', default="resnet152", type=str)
parser.add_argument('--t', default=3.0, type=float, help="temperature for logit distillation ")
args = parser.parse_args()
transform_train = transforms.Compose(
[transforms.RandomCrop(32, padding=4, fill=128), transforms.RandomHorizontalFlip(), transforms.ToTensor(),
Cutout(n_holes=1, length=16), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset, testset = None, None
if args.dataset == 'cifar100':
trainset = datasets.CIFAR100(root='./.data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR100(root='./.data', train=False, download=True, transform=transform_test)
if args.dataset == 'cifar10':
trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)
if args.model == "resnet18":
net = resnet18()
if args.model == "resnet50":
net = resnet50()
if args.model == "resnet101":
net = resnet101()
if args.model == "resnet152":
net = resnet152()
if args.teacher == 'resnet18':
teacher = resnet18()
elif args.teacher == 'resnet50':
teacher = resnet50()
elif args.teacher == 'resnet101':
teacher = resnet101()
elif args.teacher == 'resnet152':
teacher = resnet152()
teacher.load_state_dict(torch.load("./resnet152.pth"))
teacher.cuda()
net.to(device)
orthogonal_penalty = args.beta
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, weight_decay=args.l2, momentum=0.9)
def train(net, trainloader, optimizer, epoch):
if epoch in [80, 160, 240]:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
init = False
net.train()
sum_loss = 0.0
correct = 0.0
total = 0.0
for i, data in enumerate(trainloader):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs, student_feature = net(inputs)
with torch.no_grad():
teacher_logits, teacher_feature = teacher(inputs)
if not init:
teacher_feature_size = teacher_feature[0].size(1)
student_feature_size = student_feature[0].size(1)
num_auxiliary_classifier = len(teacher_logits)
link = []
for j in range(num_auxiliary_classifier):
link.append(nn.Linear(student_feature_size, teacher_feature_size, bias=False))
net.link = nn.ModuleList(link)
net.cuda()
# we redefine optimizer here so it can optimize the net.link layers.
optimizer = optim.SGD(net.parameters(), lr=LR, weight_decay=5e-4, momentum=0.9)
loss = torch.FloatTensor([0.]).to(device)
for index in range(len(student_feature)):
student_feature[index] = net.link[index](student_feature[index])
# task-oriented feature distillation loss
loss += torch.dist(student_feature[index], teacher_feature[index], p=2) * args.alpha
# task loss (cross entropy loss for the classification task)
loss += criterion(outputs[index], labels)
# logit distillation loss, CrossEntropy implemented in utils.py.
loss += CrossEntropy(outputs[index], teacher_logits[index], 1 + (args.t / 250) * float(1 + epoch))
for index in range(len(student_feature)):
weight = list(net.link[index].parameters())[0]
weight_trans = weight.permute(1, 0)
ones = torch.eye(weight.size(0)).cuda()
ones2 = torch.eye(weight.size(1)).cuda()
loss += torch.dist(torch.mm(weight, weight_trans), ones, p=2) * args.beta
loss += torch.dist(torch.mm(weight_trans, weight), ones2, p=2) * args.beta
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
_, predicted = torch.max(outputs[0].data, 1)
correct += float(predicted.eq(labels.data).cpu().sum())
print(f'\r[Train] epoch[{epoch + 1}] - '
f'iteration = {i + 1 + epoch * length} '
f'loss = {sum_loss / (i + 1)} '
f'acc = {100 * correct / total}% ',end='')
def test(net, testloader):
with torch.no_grad():
correct = 0.0
total = 0.0
for data in testloader:
net.eval()
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs, feature = net(images)
_, predicted = torch.max(outputs[0].data, 1)
correct += float(predicted.eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('\rTest Set AccuracyAcc: %.4f%% ' % (100 * float(predicted.eq(labels.data).cpu().sum()) / float(labels.size(0))),end='')
print('\r[TEST]Test Set AccuracyAcc: %.4f%% ' % (100 * correct / total))
test_acc = 100 * correct / total
return test_acc
def main():
init = False
best_acc = 0
print("start Training")
for epoch in range(250):
train(net, trainloader, optimizer, epoch)
test_acc = test(net,testloader)
if test_acc > best_acc:
best_acc = test_acc
print("Best Accuracy Updated: ", best_acc)
torch.save(net.state_dict(), "./checkpoint/" + args.model + ".pth")
print("Training Finished, Best Accuracy is %.4f%%" % (best_acc))
if __name__ == '__main__':
main()
| leejeongho3214/KD | train.py | train.py | py | 6,804 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "argparse.Argumen... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.