seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
32560325870 | # coding: utf-8
# author: chenhongqing
from Public.appBase import *
import sys
import unittest
import os
import time
app = appBase()
class tab_download(unittest.TestCase, BasePage):
"""TAB DOWNLOAD下面的功能检查"""
@classmethod
@setupclass
def setUpClass(cls):
cls.d.app_start(app.pkg_name)
@classmethod
@teardownclass
def tearDownClass(cls):
cls.d.app_stop(app.pkg_name)
@testcase
def test_01_xxx_download(self):
"""检查视频下载"""
app.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=5)
###### 清理下载记录
self.d(resourceId=res['com.app.xxxxxx:id/ivDownload']).click()
app.clear_downloaded_xxx()
self.d.press('back')
###### 访问下载连接
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=5)
self.d.send_keys("https://www.ted.com/talks/armand_d_angour_the_ancient_origins_of_the_olympics/up-next")
self.d.press('enter')
self.d(resourceId=res['com.app.xxxxxx:id/button_analytics']).click(timeout=5)
###### 清理知栏消息
app.clear_notification()
self.d.press('back')
###### 检查下载管理器记录
time.sleep(10)
self.d(text="Download").click(timeout=5)
self.d(text="view >").click(timeout=5)
self.assertTrue(self.d(resourceId=res['com.app.xxxxxx:id/flCover']).exists(timeout=1),
msg='下载管理器没有视频')
self.screenshot()
###### 暂停下载
self.d(resourceId=res['com.app.xxxxxx:id/progress']).click()
self.assertTrue(self.d(text='Paused').exists(timeout=5),msg='暂停下载失败')
###### 恢复下载
self.d(resourceId=res['com.app.xxxxxx:id/progress']).click()
time.sleep(2)
self.assertFalse(self.d(text='Paused').exists(timeout=5), msg='恢复下载失败')
###### 检查通知栏消息
self.d.open_notification()
self.assertTrue(self.d(text='app').exists(timeout=1),msg='通知栏没有下载消息')
self.screenshot()
self.d.press('back')
###### Downloading count
downloading_count = int(self.d(resourceId=res['com.app.xxxxxx:id/tvCount']).get_text())
self.assertEqual(downloading_count, 1, msg='Downloading count计算错误')
###### 下载完成检查
self.assertTrue(self.d(resourceId=res['com.app.xxxxxx:id/tvDownloaded']).exists(timeout=540),
msg='下载完成超时')
self.d(resourceId=res['com.app.xxxxxx:id/ivLeft']).click(timeout=5)
self.d.press('back')
time.sleep(1)
self.d(resourceId=res['com.app.xxxxxx:id/ivSiteClose']).click(timeout=5)
@testcase
def test_02_playing_download(self):
'''检查视频边播放边下载'''
app.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=5)
###### 访问下载连接
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=5)
self.d.send_keys("https://www.ted.com/talks/armand_d_angour_the_ancient_origins_of_the_olympics/up-next")
self.d.press('enter')
self.d(resourceId=res['com.app.xxxxxx:id/button_analytics']).click(timeout=5)
###### 在线播放
time.sleep(10)
self.d(text="Play").click()
time.sleep(10)
xxx_play = VdieoPlay.xxx_play_time_check()
self.assertNotEqual(xxx_play[0], xxx_play[1], msg='播放时间没有跑动')
self.screenshot()
#todo:下载
@testcase
def test_03_bookmark(self):
"""检查bookmark功能"""
app.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=5)
###### 创建新的书签
self.d(text="More").click(timeout=5)
self.d(resourceId=res['com.app.xxxxxx:id/edtName']).set_text('google', timeout=5)
self.d(resourceId=res['com.app.xxxxxx:id/edtUrl']).set_text('https://google.com', timeout=5)
self.d(text="Save").click(timeout=5)
self.screenshot()
###### 打开书签
self.d(text="google").click(timeout=5)
self.assertTrue(self.d(text="Google").exists(timeout=10), msg='打开自建的书签失败')
self.screenshot()
self.d.click(0.076, 0.071)
time.sleep(1)
###### 删除书签
self.d(text="google").long_click(duration=5, timeout=10)
BookMark_ID = res['com.app.xxxxxx:id/rvBookMark']
self.d.xpath(f'//*[@resource-id="{BookMark_ID}"]/android.view.ViewGroup[2]/android.widget.ImageView[2]').click()
self.d(text='Ok').click(timeout=5)
time.sleep(2)
self.assertFalse(self.d(text='google').exists(timeout=5),msg='删除书签失败')
self.screenshot()
self.d.press('back')
@testcase
def test_04_whatsapp(self):
"""检查whatspp视频"""
app.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=5)
self.d(text="Whatsapp").click(timeout=5)
self.assertTrue(self.d(text='Open WhatsApp Status').exists(timeout=5),msg='打开whatsapp失败')
self.screenshot()
self.d.press('back')
@testcase
def test_05_youtube_xxx(self):
"""检查youtube视频"""
app.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=5)
self.d(text="YouTube").click(timeout=5)
self.d(text="Got it").click(timeout=10)
time.sleep(5)
self.screenshot()
| taylortaurus/android-ui-runner | testsuite/case/test_04_tab_download.py | test_04_tab_download.py | py | 5,615 | python | en | code | 0 | github-code | 36 |
3974726049 | import json
import os
from datetime import datetime
from sys import exit as x
from typing import List
import cv2
import numpy as np
import pandas as pd
import printj # pip install printj
from jaitool.inference import D2Inferer as inferer
from jaitool.inference.models.hook import draw_info_box, draw_inference_on_hook2
from pyjeasy.file_utils import (dir_exists, file_exists, delete_dir,
make_dir, make_dir_if_not_exists)
from pyjeasy.math_utils import dist
from annotation_utils.coco.structs import COCO_Annotation, COCO_Dataset
from common_utils import path_utils
from common_utils.check_utils import check_value
from common_utils.common_types import BBox
from common_utils.common_types.bbox import BBox
from common_utils.common_types.bbox import ConstantAR_BBox as BBox
from common_utils.common_types.keypoint import Keypoint2D, Keypoint2D_List
from common_utils.cv_drawing_utils import (SimpleVideoViewer,
cv_simple_image_viewer,
draw_bbox,
draw_bool_mask,
draw_keypoints,
draw_skeleton)
# from common_utils.file_utils import (delete_dir, dir_exists, file_exists,
# make_dir, make_dir_if_not_exists)
from common_utils.path_utils import (get_all_files_in_extension_list,
get_all_files_of_extension, get_filename,
get_rootname_from_path, get_script_dir,
rel_to_abs_path)
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from typing import List
# from logger import logger
from tqdm import tqdm
def infer(path: str, weights_path: str, thresh: int = 0.5, key: str = 'R', infer_dump_dir: str = '', model: str = 'mask_rcnn_R_50_FPN_1x', size: int = 1024,
class_names: List[str]=['hook'],
gt_path: str = '/home/jitesh/3d/data/coco_data/hook_test/json/cropped_hook.json'):
# class_names=['hook', 'pole']
# class_names=['hook']
conf_thresh = 0.001
show_bbox_border = True
gt_dataset = COCO_Dataset.load_from_path(json_path=gt_path)
inferer_seg = inferer(
weights_path=weights_path,
confidence_threshold=0.1,
# num_classes=1,
# num_classes=2,
class_names=class_names,
# class_names=['hook'],
model='keypoint_rcnn_R_50_FPN_1x',
# model='faster_rcnn_X_101_32x8d_FPN_3x',
# model='faster_rcnn_R_101_FPN_3x',
# model=model,
)
inferer_seg.cfg.INPUT.MIN_SIZE_TEST = size
inferer_seg.cfg.INPUT.MAX_SIZE_TEST = size
inferer_seg.cfg.MODEL.MASK_ON = True
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_3/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0007999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
# inferer_key = jDetectron2KeypointInferer(
# weights_path=weights_path,
# # ref_coco_ann_path=f'/home/jitesh/3d/data/coco_data/hook_real1/json/hook.json',
# # categories_path=f'/home/jitesh/3d/data/categories/hook_infer.json',
# # categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt.json',
# categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt_pole.json',
# target_category='hook',
# model_name='keypoint_rcnn_R_50_FPN_1x',
# bbox_threshold=bbox_thresh,
# kpt_threshold=kpt_thresh,
# key_box='hook',
# )
# k_size = 1024
# inferer_key.cfg.INPUT.MIN_SIZE_TEST = k_size
# inferer_key.cfg.INPUT.MAX_SIZE_TEST = k_size
possible_modes = ['save', 'preview']
mode = 'save'
check_value(mode, valid_value_list=possible_modes)
# make_dir_if_not_exists(infer_dump_dir)
img_extensions = ['jpg', 'JPG', 'png', 'PNG']
img_pathlist = get_all_files_in_extension_list(
dir_path=f'{path}', extension_list=img_extensions)
img_pathlist.sort()
confirm_folder(infer_dump_dir, mode)
# confirm_folder(f'{infer_dump_dir}/good_seg', mode)
# confirm_folder(f'{infer_dump_dir}/good_cropped', mode)
# confirm_folder(f'{infer_dump_dir}/good', mode)
# confirm_folder(f'{infer_dump_dir}/G(>4D) P(>4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(>4D) P(<4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(<4D) P(>4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(<4D) P(<4D)', mode)
# confirm_folder(f'{infer_dump_dir}/bad', mode)
confirm_folder(f'{infer_dump_dir}/infer_key_seg', mode)
count = 0
start = datetime.now()
df = pd.DataFrame(data=[], columns=['gt_d', 'pred_d',
'gt_ab', 'pred_ab',
'gt_ratio', 'pred_ratio',
'gt_ratio>4', 'pred_ratio>4',
'correct_above4d_ratio', 'incorrect_above4d_ratio',
'correct_below4d_ratio', 'incorrect_below4d_ratio',
])
# 'image_path'])
for i, img_path in enumerate(tqdm(img_pathlist, desc='Writing images',)):
img_filename = get_filename(img_path)
# if not '201005_70_縮小革命PB020261.jpg' in img_path:
# continue
# if i > 19:
# continue
printj.purple(img_path)
img = cv2.imread(img_path)
result = img
# print(f'shape {img.shape}')
# cv2.imshow('i', img)
# cv2.waitKey(100000)
# continue
score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list = inferer_seg.predict(
img=img)
# printj.blue(pred_masks_list)
max_hook_score = -1
max_pole_score = -1
diameter = -1
len_ab = -1
found_hook = False
found_pole = False
for score, pred_class, bbox, mask, keypoints, vis_keypoints, kpt_confidences in zip(score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list):
if pred_class == 'pole':
found_pole = True
if max_pole_score < score:
# if True:
max_pole_score = score
diameter = compute_diameter(mask)
# result = draw_bool_mask(img=result, mask=mask, color=[
# 0, 255, 255],
# transparent=True
# )
pole_bbox_text = f'pole {str(round(score, 2))}'
pole_bbox = bbox
pole_mask = mask
# result = draw_bbox(img=result, bbox=bbox,
# text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
printj.blue(f'diameter={diameter}')
if pred_class == 'hook':
# printj.green.bold_on_yellow(score)
found_hook = True
if max_hook_score < score:
# if True:
max_hook_score = score
hook_bbox = BBox.buffer(bbox)
hook_score = round(score, 2)
hook_mask = mask
hook_keypoints = keypoints
hook_vis_keypoints = vis_keypoints
hook_kpt_confidences = kpt_confidences
# xmin, ymin, xmax, ymax = bbox.to_int().to_list()
# _xmin, _ymin, _xmax, _ymax = _bbox.to_int().to_list()
# width = _xmax-_xmin
# height = _ymax-_ymin
# scale = 0.2
# xmin = max(int(_xmin - width*scale), 0)
# xmax = min(int(_xmax + width*scale), img.shape[1])
# ymin = max(int(_ymin - height*scale), 0)
# ymax = min(int(_ymax + height*scale), img.shape[0])
# printj.red(score)
# printj.red(bbox)
# return
# img = draw_bbox(img=img, bbox=_bbox, color=[
# 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
# label_orientation='top')
# img = draw_bbox(img=img, bbox=_bbox, color=[
# 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
# label_orientation='bottom')
# result = draw_bool_mask(img=result, mask=mask, color=[
# 255, 255, 0],
# transparent=True
# )
# result = result
# bbox_text = str(round(score, 4))
# result = draw_bbox(img=result, bbox=bbox,
# text=bbox_text, label_only=not show_bbox_border)
bbox_label_mode = 'euler'
# result = draw_keypoints(
# img=result, keypoints=vis_keypoints, radius=2, color=[0, 0, 255],
# # keypoint_labels=kpt_labels, show_keypoints_labels=True, label_thickness=1,
# # ignore_kpt_idx=conf_idx_list
# )
kpt_labels = ["kpt-a", "kpt-b", "kpt-cb",
"kpt-c", "kpt-cd", "kpt-d", "kpt-e"]
kpt_skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]
conf_idx_list = np.argwhere(
np.array(kpt_confidences) > conf_thresh).reshape(-1)
not_conf_idx_list = np.argwhere(
np.array(kpt_confidences) <= conf_thresh).reshape(-1).astype(int)
conf_keypoints, conf_kpt_labels = np.array(
vis_keypoints)[conf_idx_list], np.array(kpt_labels)[conf_idx_list]
not_conf_keypoints, not_conf_kpt_labels = np.array(
vis_keypoints)[not_conf_idx_list], np.array(kpt_labels)[not_conf_idx_list]
cleaned_keypoints = np.array(
vis_keypoints.copy()).astype(np.float32)
# result = draw_bool_mask(img=result, mask=mask, color=[
# 255, 255, 0],
# transparent=True
# )
# result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
# score=score, bbox=_bbox, vis_keypoints=vis_keypoints, kpt_confidences=kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
# conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
# conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
# result=result
# printj.green(_bbox)
# printj.green(_bbox.to_int())
# printj.green(_bbox.to_int().to_list())
printj.green.on_white(max_hook_score)
if found_pole:
result = draw_bool_mask(img=result, mask=pole_mask, color=[
0, 255, 255],
transparent=True
)
result = draw_bbox(img=result, bbox=pole_bbox,
text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='top')
result = draw_bbox(img=result, bbox=pole_bbox,
text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
if found_hook:
result = draw_bool_mask(img=result, mask=hook_mask, color=[
255, 255, 0],
transparent=True
)
result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
score=hook_score, bbox=hook_bbox, vis_keypoints=hook_vis_keypoints, kpt_confidences=hook_kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
printj.purple(len_ab)
if len_ab == 0:
printj.green(keypoints)
result = draw_info_box(result, len_ab, diameter)
# img: np.ndarray, cleaned_keypoints, kpt_labels: List[str], kpt_skeleton: List[list],
# score: float, bbox: BBox, vis_keypoints: list, kpt_confidences: list, conf_idx_list: list, not_conf_idx_list: list,
# conf_keypoints, conf_kpt_labels, not_conf_keypoints, not_conf_kpt_labels,
# conf_thresh: float = 0.3, show_bbox_border: bool = False, bbox_label_mode: str = 'euler', index_offset: int = 0, diameter=1
# cv2.imshow('i', result)
# # cv2.imwrite('i', result)
# cv2.waitKey(10000)
# quit_flag = cv_simple_image_viewer(img=result, preview_width=1000)
# if quit_flag:
# break
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
cv2.imwrite(f"{infer_dump_dir}/infer_key_seg/{img_filename}", result)
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
# # img3, score_list, bbox_list, len_ab = inferer_key.infer_image(img=img2, draw_hm_collage=False, show_bbox_border=True, diameter=diameter)
# if diameter<=0:
# length_ratio = np.inf
# else:
# length_ratio = len_ab/diameter
# printj.purple(length_ratio)
# img4=img0
# img4[ymin:ymax, xmin:xmax]=img3
# font = cv2.FONT_HERSHEY_SIMPLEX
# TopLeftCornerOfText = (10,50)
# fontScale = 1
# fontColor = (255,255,255)
# lineType = 2
# cv2.rectangle(img4, (5,10 ), (280,180), (0,0,0), -1)
# cv2.rectangle(img4, (5,10 ), (280,180), (200,200,0), 2)
# cv2.putText(img4, f'Len-ab: {round(len_ab,2)}', (10,50), font, fontScale, fontColor, lineType)
# cv2.putText(img4, f'Diameter: {round(diameter,2)}', (10,100), font, fontScale, fontColor, lineType)
# cv2.putText(img4, str(round(length_ratio,2))+' D', (10,150), font, fontScale, fontColor, lineType)
# printj.purple(f'img0.shape = {img0.shape}')
# printj.purple(f'img.shape = {img.shape}')
# printj.purple(f'img2.shape = {img2.shape}')
# printj.purple(f'img3.shape = {img3.shape}')
# printj.purple(f'img4.shape = {img4.shape}')
# printj.purple(img.shape)
# printj.purple(img2.shape)
# printj.purple(img3.shape)
# printj.purple(img4.shape)
# quit_flag = cv_simple_image_viewer(img=img4, preview_width=1000)
# if quit_flag:
# break
# continue
# if len(score_list) == 0:
# if all(score < thresh for score in score_list):
# count = count +1
# # printj.purple(img_path)
# printj.purple(score_list)
# printj.yellow.bold_on_black(f'Good count: {i+1-count}, Bad count: {count}, Total: {i+1}')
# dump_path = f"{infer_dump_dir}/bad/{img_filename}"
# # cv2.imwrite(dump_path, img)
# else:
# # # printj.purple(score_list)
# # pass
# dump_path = f"{infer_dump_dir}/good/{img_filename}"
# cv2.imwrite(f"{infer_dump_dir}/good_cropped/{img_filename}", img3)
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
# # dump_path = f"{infer_dump_dir}/{img_filename}"
# cv2.imwrite(dump_path, img4)
# printj.yellow(f"({i+1}/{len(img_pathlist)}): Wrote {dump_path}")
# for image in gt_dataset.images:
# if image.file_name == img_filename:
# image_id = image.id
# for ann in gt_dataset.annotations:
# if ann.image_id == image_id:
# keys = Keypoint2D_List.to_point_list(ann.keypoints)
# gt_diameter = keys[7].distance(keys[8])
# gt_len_ab = keys[0].distance(keys[1])
# # gt_ratio = round(gt_diameter/gt_len_ab, 2)
# if gt_diameter<=0:
# gt_ratio = np.inf
# else:
# gt_ratio = round(gt_len_ab/gt_diameter, 2)
# # correct_ratio = int((length_ratio>4)==(gt_ratio>4))
# # incorrect_ratio = int((length_ratio>4)!=(gt_ratio>4))
# correct_above4d_ratio = int((length_ratio>4)==(gt_ratio>4)==True)
# incorrect_below4d_ratio = int((length_ratio>4)==(gt_ratio<4)==True)
# correct_below4d_ratio = int((length_ratio<4)==(gt_ratio<4)==True)
# incorrect_above4d_ratio = int((length_ratio<4)==(gt_ratio>4)==True)
# if gt_diameter<=0:
# error_diameter = np.inf
# else:
# error_diameter = (gt_diameter-diameter)/gt_diameter*100
# if gt_len_ab<=0:
# error_len_ab = np.inf
# else:
# error_len_ab = (gt_len_ab-len_ab)/gt_len_ab*100
# # incorrect_below4d_ratio = int((length_ratio>4)==(gt_ratio<4)==True)
# # correct_below4d_ratio = int((length_ratio<4)==(gt_ratio<4)==True)
# # incorrect_above4d_ratio = int((length_ratio<4)==(gt_ratio>4)==True)
# row = {'gt_d': round(gt_diameter, 2), 'pred_d': diameter,
# 'gt_ab': round(gt_len_ab, 2), 'pred_ab': len_ab,
# 'error_diameter': error_diameter,
# 'error_len_ab': error_len_ab,
# 'gt_ratio': gt_ratio, 'pred_ratio': length_ratio,
# 'gt_ratio>4': int(gt_ratio>4), 'pred_ratio>4': int(length_ratio>4),
# 'correct_above4d_ratio': correct_above4d_ratio,
# 'incorrect_above4d_ratio': incorrect_above4d_ratio,
# 'correct_below4d_ratio': correct_below4d_ratio,
# 'incorrect_below4d_ratio': incorrect_below4d_ratio,
# 'image_path':img_path,
# }
# df = df.append(pd.DataFrame(row, index =[img_filename]) )
# if correct_above4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(>4D) P(>4D)/{img_filename}", img4)
# if incorrect_above4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(>4D) P(<4D)/{img_filename}", img4)
# if incorrect_below4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(<4D) P(>4D)/{img_filename}", img4)
# if correct_below4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(<4D) P(<4D)/{img_filename}", img4)
# printj.blue(df)
# # printj.cyan(df['correct_below4d_ratio'])
# cm = pd.DataFrame(data=[],columns = ['p: more than 4D', 'p: less than 4D', 'Total'])
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['correct_above4d_ratio'].sum(),
# 'p: less than 4D':df['incorrect_above4d_ratio'].sum(),
# 'Total':df['correct_above4d_ratio'].sum()+df['incorrect_above4d_ratio'].sum()}, index =['g: more than 4D']) )
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['incorrect_below4d_ratio'].sum(),
# 'p: less than 4D':df['correct_below4d_ratio'].sum(),
# 'Total':df['incorrect_below4d_ratio'].sum()+df['correct_below4d_ratio'].sum()}, index =['g: less than 4D']) )
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['correct_above4d_ratio'].sum()+df['incorrect_below4d_ratio'].sum(),
# 'p: less than 4D':df['incorrect_above4d_ratio'].sum()+df['correct_below4d_ratio'].sum(),
# 'Total':df['correct_above4d_ratio'].sum()+df['incorrect_above4d_ratio'].sum()+df['incorrect_below4d_ratio'].sum()+df['correct_below4d_ratio'].sum()}, index =['Total']) )
# printj.yellow(cm)
# cm.to_excel(f"{os.path.abspath(f'{path}/..')}/cm_data.xlsx")
# cm2 = pd.DataFrame(data=[],columns = ['correct', 'incorrect'])
# cm2 = cm2.append(pd.DataFrame({'correct':df['correct_above4d_ratio'].sum(), 'incorrect':df['incorrect_above4d_ratio'].sum()}, index =['more than 4D']) )
# cm2 = cm2.append(pd.DataFrame({'correct':df['correct_below4d_ratio'].sum(), 'incorrect':df['incorrect_below4d_ratio'].sum()}, index =['less than 4D']) )
# printj.cyan(cm2)
# df.to_excel(f"{os.path.abspath(f'{path}/..')}/test4d_data.xlsx") # pip install openpyx
# cm.to_excel(f"{os.path.abspath(f'{path}/..')}/cm_data.xlsx") # pip install openpyx
# total_time = datetime.now()-start
# info = f'\nDetection count: {len(img_pathlist) - count}, Total: {len(img_pathlist)}'
# info += f'\nNo detection count: {count}, Total: {len(img_pathlist)}'
# # Starts # Write inference json
# output_json_path = f"{infer_dump_dir}/infered_hook.json"
# info += f'\nTotal inference time: {total_time} \nTime per image: {total_time/len(img_pathlist)}'
# info += f'\n\nConfusion Matrix for ratio: \n{cm}'
# printj.blue.bold_on_yellow(info)
# text_file = f"{infer_dump_dir}/info.txt"
# if os.path.exists(text_file):
# os.remove(text_file)
# f= open(text_file,"w+")
# f.write(info)
# f.close()
# printj.green.italic_on_black(infer_dump_dir)
# from cocoeval_hook import run as evaluate
# # evaluate(output_json_path)
# os.system('spd-say "Folder Created"')
if __name__ == "__main__":
now = datetime.now()
dt_string3 = now.strftime("%Y_%m_%d_%H_%M_%S")
dt_string3 = now.strftime("%m_%d_%H")
TEST_PATH = '/home/jitesh/3d/data/coco_data/hook_test/level_01'
# TEST_PATH = '/home/jitesh/sekisui/teamviewer/sampled_data/VID_20200107_142503/img'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hook_real1/s_good'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hlk1_100_coco-data/img'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hlk2_200_coco-data/img'
# GT_PATH = f'/home/jitesh/3d/data/coco_data/hook_test/json/hook.json'
# GT_PATH = f'/home/jitesh/3d/data/coco_data/hook_test/json/hook4.json'
# WEIGHT_PATH='/home/jitesh/3d/data/coco_data/hook_weights/seg_hook_pole/model_0049999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hlk1_100_coco-data/weights/Keypoints_R_50_1x_aug_cm_seg_val_5/model_0004999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0019999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0099999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0049999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_101_3x_aug_key_seg_val_1/model_0099999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_3_hook-only/model_0049999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
# KEY_WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_weights/seg_hook_pole/model_0049999.pth'
iteration = WEIGHT_PATH.split('_')[-1].split('.')[0]
training_data_name = WEIGHT_PATH.split('/')[-2].split('_')[0] + '_'\
+ WEIGHT_PATH.split('/')[6].split('_')[-2] + '_'\
+ WEIGHT_PATH.split('/')[6].split('_')[-1]
# training_model_name = WEIGHT_PATH.split('/')[-2].split('_')[0]
kpt_thresh = 0.1
bbox_thresh = 0.5
img_size = 1024
# key = f's{img_size}'
key = f'hookpole'
# key = f'hook'
class_names=['hook', 'pole']
# class_names=['hook']
output_dir_path = f'{TEST_PATH}_{dt_string3}_{training_data_name}_{key}_{iteration}_{bbox_thresh}_vis_infer_output_50_1x'
infer(path=TEST_PATH,
weights_path=WEIGHT_PATH,
# key='X'
key='c',
infer_dump_dir=output_dir_path,
thresh=bbox_thresh,
# model='mask_rcnn_R_50_FPN_1x',
model='mask_rcnn_R_101_FPN_3x',
size=img_size,
class_names=class_names,
# gt_path=GT_PATH,
)
| Jitesh17/jaitool | jaitool/inference/models/hook/hook.py | hook.py | py | 25,575 | python | en | code | 0 | github-code | 36 |
7796314828 | #
# 最大公约数:
# 1. 更损相减法
# 2.辗转相除法
# 更损相减法
# def solution(a, b):
# while a != b:
# if a > b:
# a = a - b
# else:
# b = b - a
# return a
# 辗转相除法
def solution(a, b):
if a < b:
a, b = b, a
while b != 0:
t = b
a = a % b
b = t
if a < b:
a, b = b, a
return a
if __name__ == '__main__':
ans = solution(1071, 462)
print(ans)
| 20130353/Leetcode | target_offer/大整数+经典算法/最大公约数.py | 最大公约数.py | py | 492 | python | en | code | 2 | github-code | 36 |
73387100264 | import time
from dataclasses import dataclass
from transmitter import sendEmail
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.firefox.options import Options as FFOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
@dataclass()
class AutoWeb:
url: str
ff: bool = False
headless: bool = False
def start_session(self):
options = FFOptions() if self.ff else Options()
if self.headless: options.add_argument('--headless')
dr = '/Users/ashhadghazi/scripts/python/webdrivers/geckodriver' if self.ff \
else '/Users/ashhadghazi/scripts/python/webdrivers/chromedriver'
self.br = webdriver.Firefox(executable_path=dr, options=options) if self.ff else webdriver.Chrome(dr, chrome_options=options)
self.br.get(self.url)
def get(self, url):
self.br.get(url)
def go_back(self):
self.br.back()
def refresh(self):
self.br.refresh()
def stop_session(self):
self.br.quit()
def get_key(self, key, only_check_if_special=False):
key_map = {
'up': Keys.UP,
'right': Keys.RIGHT,
'down': Keys.DOWN,
'left': Keys.LEFT,
'enter': Keys.ENTER,
'escape': Keys.ESCAPE
}
if only_check_if_special:
return True if key in key_map.keys() else False
return key_map[key] if key in key_map.keys() else key
def get_by(self, by):
by_map = {
'id': By.ID,
'css': By.CSS_SELECTOR,
'name': By.NAME,
'xpath': By.XPATH,
'class': By.CLASS_NAME,
'link_text': By.LINK_TEXT,
'partial_link_text': By.PARTIAL_LINK_TEXT
}
return by_map[by]
def element_exists(self, by, elem):
by = self.get_by(by)
return len(self.br.find_elements(by, elem))
def get_element(self, by, elem):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_element_located((by, elem)))
def get_elements(self, by, elem):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_all_elements_located((by, elem)))
def get_element_text(self, elem, by):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_element_located((by, elem))).text
def get_table_rows(self, by, elem):
by = self.get_by(by)
table = self.get_element(by, elem)
return table.find_elements_by_tag_name('li')
def notify(sbj, msg):
email = "<email_address>"
pwd = "<password>"
sendEmail(email, pwd, [email], msg, sbj)
def run_op(self, by, elem, op, op_value=''):
by = self.get_by(by)
if op == 'send':
if not self.get_key(op_value, only_check_if_special=True):
self.get_element(by, elem).clear()
time.sleep(0.2)
self.get_element(by, elem).send_keys(self.get_key(op_value))
elif op == 'clear':
self.get_element(by, elem).clear()
elif op == 'click':
self.get_element(by, elem).click()
time.sleep(0.2)
def run_ops(self, ops_map):
for op in ops_map:
self.run_op(op['by'], op['elem'], op['op'], op['op_value']) | ghazis/auto_flights | backend/flight_scraper/AutoWeb.py | AutoWeb.py | py | 3,652 | python | en | code | 0 | github-code | 36 |
16266301757 | class Solution:
def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:
def make_graph(n, edges):
g = {v:set() for v in range(n)}
for u,v in edges:
g[u].add(v)
g[v].add(u)
return g
g = make_graph(n, edges)
leaves = [v for v in g.keys() if len(g[v]) <= 1]
while len(g)>2:
new_leaves = []
for v in leaves:
u = g[v].pop()
g[u].remove(v)
del g[v]
if len(g[u]) == 1:
new_leaves.append(u)
leaves = new_leaves
return leaves | alexbowe/LeetCode | 0310-minimum-height-trees/0310-minimum-height-trees.py | 0310-minimum-height-trees.py | py | 685 | python | en | code | 5 | github-code | 36 |
74946729704 | '''
Question link: https://leetcode.com/problems/string-to-integer-atoi/
Implement the myAtoi(string s) function, which converts a string to a 32-bit signed integer (similar to C/C++'s atoi function).
The algorithm for myAtoi(string s) is as follows:
Read in and ignore any leading whitespace.
Check if the next character (if not already at the end of the string) is '-' or '+'. Read this character in if it is either. This determines if the final result is negative or positive respectively. Assume the result is positive if neither is present.
Read in next the characters until the next non-digit charcter or the end of the input is reached. The rest of the string is ignored.
Convert these digits into an integer (i.e. "123" -> 123, "0032" -> 32). If no digits were read, then the integer is 0. Change the sign as necessary (from step 2).
If the integer is out of the 32-bit signed integer range [-231, 231 - 1], then clamp the integer so that it remains in the range. Specifically, integers less than -231 should be clamped to -231, and integers greater than 231 - 1 should be clamped to 231 - 1.
Return the integer as the final result.
'''
class Solution:
def myAtoi(self, s: str) -> int:
try:
s=s.lstrip()
data=list()
if s[0] =="-":
data.append("-")
elif s[0]=="+":
pass
elif s[0].isdigit():
data.append(s[0])
else:
return 0
for i in range(1,len(s)):
if s[i].isdigit():
if s[i] != 0:
data.append(s[i])
else:
break
if len(s)==1 and s[0]in ["-","+"]:
return 0
else:
output=int(''.join(data))
if output<=-2**31 or output>=2**31-1:
if s[0]=="-":
return -2**31
else:
return 2**31-1
else:
return output
except:
return 0
| BhatnagarKshitij/Algorithms | Leetcode/stringToAtoi.py | stringToAtoi.py | py | 2,103 | python | en | code | 2 | github-code | 36 |
32203986950 | import streamlit as st
from recipesnet.api import RecipesApi
from recipesnet.st_helpers import recip_ingr_widget
st.set_page_config("Recipes net", layout="wide")
st.title("Recipes similarity")
st.write(
"""
In this section you can search what recipes are similar to an specific one.
"""
)
with st.spinner("Loading data..."):
if "api" not in st.session_state:
st.session_state["api"] = RecipesApi()
api: RecipesApi = st.session_state.api
c1, c2 = st.columns(2)
with c1:
st.header("Similar recipes")
recipes = api.recipes
selected_recipe = st.selectbox(
"Recipes similar to ...",
recipes,
recipes.index(st.session_state.recipe) if "recipe" in st.session_state else 0,
)
st.session_state.recipe = selected_recipe
similar = api.similar_recipes(selected_recipe)
i = 0
for rec, score in similar:
if st.button(f"{score:.1%}: {rec.capitalize()}", key=f"similarity_btn_{i}"):
st.session_state.recipe = rec
i += 1
with c2:
recip_ingr_widget()
| jmorgadov/complex-recipes-net | recipesnet/pages/Similarity.py | Similarity.py | py | 1,054 | python | en | code | 0 | github-code | 36 |
70616752744 | import constants
from flask import jsonify, make_response
def getData(request):
body = request.json
outlook = body['outlook']
temp = body['temp']
humidity = body['humidity']
wind = body['wind']
data = [
constants.OUTLOOK_VALUES[outlook],
constants.TEMP_VALUES[temp],
constants.HUMIDITY_VALUES[humidity],
constants.WIND_VALUES[wind]
]
return data
def makeResponse(result, modelType):
return make_response(jsonify(
{
'message': 'Success',
'data': {
'modelType': modelType,
'play': constants.PLAY_VALUES[result]
}
}
)) | mgstabrani/play-tennis-model-service-python | general.py | general.py | py | 675 | python | en | code | 0 | github-code | 36 |
2114773970 | import time
def lista(N):
L = []
for x in range(N):
L.append(x)
return L
def lista_yield(N):
for x in range(N):
yield x
print(lista(10))
print(lista_yield(10))
Generador = lista_yield(10)
#0 1 2 3 4 5 6 7 8 9
for x in Generador:
print(x)
Generador_2 = lista_yield(15)
print( list(Generador_2) )
#generador infinito
def generador_infinito_inef():
x = 0
while True:
yield 2**x
x += 1
def generador_infinito_ef():
x = 1
while True:
yield x
x = x*2
generador_3 = generador_infinito_ef()
for x in generador_3:
print(x) | nicooffee/ay-paradigmas-2020-01 | ejercicios_ayudantia-2020/nico_ejer/2020-05-11/yield.py | yield.py | py | 614 | python | pt | code | 1 | github-code | 36 |
15772164828 | from django.shortcuts import render,redirect
from axf.models import SlideShow, Cart,MainDescription, Product,CategorieGroup,ChildGroup,User,Address,Order
from django.contrib.auth import logout
import random
from axf.sms import send_sms
from django.http import JsonResponse
import uuid
# Create your views here.
def home(request):
#获取轮播图数据
slideList = SlideShow.objects.all()
#获取5大模块数据
mainList = MainDescription.objects.all()
for item in mainList:
products = Product.objects.filter(categoryId=item.categoryId)
item.product1 = products.get(productId=item.product1)
item.product2 = products.get(productId=item.product2)
item.product3 = products.get(productId=item.product3)
return render(request, "home/home.html", {"slideList":slideList, "mainList":mainList})
def market(request, gid, cid, sid):
#左侧分组数据
leftCategorieList = CategorieGroup.objects.all()
#获取分组商品的信息
products = Product.objects.filter(categoryId=gid)
#获取子类数据
if cid != "0":
products = products.filter(childId=cid)
#排序
if sid == "1":
# products = products.order_by()
pass
elif sid == "2":
products = products.order_by("price")
elif sid == "3":
products = products.order_by("-price")
#获取子组信息
childs = ChildGroup.objects.filter(categorie__categorieId=gid)
return render(request, "market/market.html", {"leftCategorieList":leftCategorieList, "products":products, "childs":childs, "gid":gid, "cid":cid})
def cart(request):
# 判断是否登录
tokenValue = request.COOKIES.get("token")
if not tokenValue:
# 说明没登录
return redirect("/login/")
try:
user = User.objects.get(tokenValue=tokenValue)
except User.DoesNotExist as e:
return redirect("/login/")
carts = Cart.objects.filter(user__tokenValue=tokenValue)
return render(request, "cart/cart.html", {"carts":carts})
def mine(request):
phone = request.session.get('phoneNum',default='未登录')
return render(request, "mine/mine.html",{'phone':phone})
# def login(request):
# if request.method == 'GET':
# if request.is_ajax():
# strNum = '0123456789'
# rand_str=''
# for i in range(0,6):
# rand_str += strNum[random.randrange(0,len(strNum))]
# msg ="您的验证码是:%s。请不要把验证码泄露给其他人。"%rand_str
# phone = request.GET.get('phoneNum')
# send_sms(msg,phone)
# #存入session
# request.session['code'] = rand_str
# return JsonResponse({'data':'ok'})
# else:
# return render(request,'mine/login.html')
# else:
# phone = request.POST.get('username')
# passwd = request.POST.get('passwd')
# code = request.session.get('code')
#
# if passwd == code:
# uuidStr=str(uuid.uuid4())
# try:
# user= User.objects.get(pk=phone)
# user.tokenValue = uuidStr
# user.save()
# except User.DoesNotExist as e:
# user = User.create(phone,None,uuidStr,'000000')
# user.save()
# request.session['phoneNum'] = phone
# return redirect('/mine/')
# else:
# return redirect('/login/')
def login(request):
if request.method == "GET":
if request.is_ajax():
# 生产验证码
strNum = '1234567890'
# 随机选取4个值作为验证码
rand_str = ''
for i in range(0, 6):
rand_str += strNum[random.randrange(0, len(strNum))]
msg = "您的验证码是:%s。请不要把验证码泄露给其他人。"%rand_str
phone = request.GET.get("phoneNum")
send_sms(msg, phone)
# print('*************',rand_str)
#存入session
request.session["code"] = rand_str
return JsonResponse({"data":"ok"})
else:
return render(request, "mine/login.html")
else:
phone = request.POST.get("username")
passwd = request.POST.get("passwd")
code = request.session.get("code")
if passwd == code:
#验证码验证成功
#判断用户是否存在
uuidStr = str(uuid.uuid4())
try:
user = User.objects.get(pk=phone)
user.tokenValue = uuidStr
user.save()
except User.DoesNotExist as e:
#注册
user = User.create(phone,None,uuidStr,"sunck good")
user.save()
request.session["phoneNum"] = phone
#将tokenvalue写入cookie
response = redirect("/mine/")
response.set_cookie('token',uuidStr)
return response
else:
# 验证码验证失败
return redirect("/login/")
def quit(request):
logout(request)
return redirect('/mine/')
def showaddress(request):
addrList= Address.objects.filter(user__phoneNum=request.session.get('phoneNum'))
return render(request,'mine/showaddress.html',{'addrList':addrList})
def addaddr(request):
if request.method == 'GET':
return render(request, 'mine/addaddr.html')
else:
name = request.POST.get('name')
sex = request.POST.get('sex')
if sex == '0':
sex = False
sex = True
telephone = request.POST.get('phone')
province = request.POST.get('province')
city = request.POST.get('city')
county = request.POST.get('county')
street = request.POST.get('street')
postCode = request.POST.get('postCode')
detailAddress=request.POST.get('detailAddress')
phone = request.session.get('phoneNum')
print(phone)
user = User.objects.get(pk=phone)
# name, sex, phoneNum, postCode, address, province, city, county, street, detailAddress, user
alladdress = province+city+county+street+postCode+detailAddress
address = Address.create(name,sex,telephone,postCode,alladdress,province,city,county,street,detailAddress,user)
address.save()
return redirect('/mine/')
def changecart(request,flag):
num = 1
if flag == '1':
num = -1
#判断是否登陆
tokenValue=request.COOKIES.get('token')
if not tokenValue:
return JsonResponse({'error':1})
try:
user = User.objects.get(tokenValue=tokenValue)
except User.DoesNotExist as e:
return JsonResponse({'error':2})
gid = request.POST.get('gid')
cid = request.POST.get('cid')
pid = request.POST.get('pid')
product = Product.objects.filter(categoryId=gid,childId=cid).get(productId=pid)
try:
cart = Cart.objects.filter(user__tokenValue=tokenValue).filter(product__categoryId=gid).filter(product__childId=cid).get(product__productId=pid)
if flag == '2':
if product.storeNums == '0':
return JsonResponse ({'error':0,'num':cart.num})
cart.num = cart.num + num
product.storeNums = str(int(product.storeNums) - num)
product.save()
if cart.num == 0:
cart.delete()
else:
cart.save()
except Cart.DoesNotExist as e:
if flag == '1':
return JsonResponse({'error':0,'num':0})
try:
order = Order.orders2.filter(user__tokenValue=tokenValue).get(flag=0)
except Order.DoesNotExist as e:
orderId = str(uuid.uuid4())
address = Address.objects.get(pk=3)
order = Order.create(orderId,user,address,0)
order.save()
cart = Cart.create(user,product,order,1)
cart.save()
product.storeNums = str(int(product.storeNums) - num)
product.save()
return JsonResponse({'error':0,'num':cart.num})
def changecart2(request):
cartid = request.POST.get("cartid")
cart = Cart.objects.get(pk=cartid)
cart.isCheck = not cart.isCheck
cart.save()
return JsonResponse({'error':0,'flag':cart.isCheck})
def qOrder(request):
tokenValue = request.COOKIES.get('token')
order = Order.orders2.filter(user__tokenValue=tokenValue).get(flag=False)
order.flag = 1
order.save()
carts = Cart.objects.filter(user__tokenValue=tokenValue).filter(order=order).filter(isCheck=True)
for cart in carts:
cart.isOrder = False
cart.save()
newOrder = Order.create(str(uuid.uuid4()),User.objects.get(tokenValue=tokenValue),Address.objects.get(pk=3),0)
newOrder.save()
oldCarts = Cart.objects.filter(user__tokenValue=tokenValue)
for cart in oldCarts:
cart.order = newOrder
cart.save()
return JsonResponse({'error':0})
| qwewangjian/Xgd | axf/views.py | views.py | py | 8,953 | python | en | code | 0 | github-code | 36 |
17878539903 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Helper functions for the workflows"""
from distutils.version import StrictVersion
from builtins import range
def _tofloat(inlist):
if isinstance(inlist, (list, tuple)):
return [float(el) for el in inlist]
else:
return float(inlist)
def fmri_getidx(in_file, start_idx, stop_idx):
"""Heuristics to set the start and stop indices of fMRI series"""
from nibabel import load
from nipype.interfaces.base import isdefined
nvols = load(in_file).shape[3]
max_idx = nvols - 1
if start_idx is None or not isdefined(start_idx) or start_idx < 0 or start_idx > max_idx:
start_idx = 0
if (
stop_idx is None
or not isdefined(stop_idx)
or max_idx < stop_idx < start_idx
):
stop_idx = max_idx
return start_idx, stop_idx
def fwhm_dict(fwhm):
"""Convert a list of FWHM into a dictionary"""
fwhm = [float(f) for f in fwhm]
return {'fwhm_x': fwhm[0], 'fwhm_y': fwhm[1],
'fwhm_z': fwhm[2], 'fwhm_avg': fwhm[3]}
def thresh_image(in_file, thres=0.5, out_file=None):
"""Thresholds an image"""
import os.path as op
import nibabel as nb
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath('{}_thresh{}'.format(fname, ext))
im = nb.load(in_file)
data = im.get_data()
data[data < thres] = 0
data[data > 0] = 1
nb.Nifti1Image(
data, im.affine, im.header).to_filename(out_file)
return out_file
def spectrum_mask(size):
"""Creates a mask to filter the image of size size"""
import numpy as np
from scipy.ndimage.morphology import distance_transform_edt as distance
ftmask = np.ones(size)
# Set zeros on corners
# ftmask[0, 0] = 0
# ftmask[size[0] - 1, size[1] - 1] = 0
# ftmask[0, size[1] - 1] = 0
# ftmask[size[0] - 1, 0] = 0
ftmask[size[0] // 2, size[1] // 2] = 0
# Distance transform
ftmask = distance(ftmask)
ftmask /= ftmask.max()
# Keep this just in case we want to switch to the opposite filter
ftmask *= -1.0
ftmask += 1.0
ftmask[ftmask >= 0.4] = 1
ftmask[ftmask < 1] = 0
return ftmask
def slice_wise_fft(in_file, ftmask=None, spike_thres=3., out_prefix=None):
"""Search for spikes in slices using the 2D FFT"""
import os.path as op
import numpy as np
import nibabel as nb
from mriqc.workflows.utils import spectrum_mask
from scipy.ndimage.filters import median_filter
from scipy.ndimage import generate_binary_structure, binary_erosion
from statsmodels.robust.scale import mad
if out_prefix is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, _ = op.splitext(fname)
out_prefix = op.abspath(fname)
func_data = nb.load(in_file).get_data()
if ftmask is None:
ftmask = spectrum_mask(tuple(func_data.shape[:2]))
fft_data = []
for t in range(func_data.shape[-1]):
func_frame = func_data[..., t]
fft_slices = []
for z in range(func_frame.shape[2]):
sl = func_frame[..., z]
fftsl = median_filter(np.real(np.fft.fft2(sl)).astype(np.float32),
size=(5, 5), mode='constant') * ftmask
fft_slices.append(fftsl)
fft_data.append(np.stack(fft_slices, axis=-1))
# Recompose the 4D FFT timeseries
fft_data = np.stack(fft_data, -1)
# Z-score across t, using robust statistics
mu = np.median(fft_data, axis=3)
sigma = np.stack([mad(fft_data, axis=3)] * fft_data.shape[-1], -1)
idxs = np.where(np.abs(sigma) > 1e-4)
fft_zscored = fft_data - mu[..., np.newaxis]
fft_zscored[idxs] /= sigma[idxs]
# save fft z-scored
out_fft = op.abspath(out_prefix + '_zsfft.nii.gz')
nii = nb.Nifti1Image(fft_zscored.astype(np.float32), np.eye(4), None)
nii.to_filename(out_fft)
# Find peaks
spikes_list = []
for t in range(fft_zscored.shape[-1]):
fft_frame = fft_zscored[..., t]
for z in range(fft_frame.shape[-1]):
sl = fft_frame[..., z]
if np.all(sl < spike_thres):
continue
# Any zscore over spike_thres will be called a spike
sl[sl <= spike_thres] = 0
sl[sl > 0] = 1
# Erode peaks and see how many survive
struc = generate_binary_structure(2, 2)
sl = binary_erosion(sl.astype(np.uint8), structure=struc).astype(np.uint8)
if sl.sum() > 10:
spikes_list.append((t, z))
out_spikes = op.abspath(out_prefix + '_spikes.tsv')
np.savetxt(out_spikes, spikes_list, fmt=b'%d', delimiter=b'\t', header='TR\tZ')
return len(spikes_list), out_spikes, out_fft
def get_fwhmx():
from nipype.interfaces.afni import Info, FWHMx
fwhm_args = {"combine": True,
"detrend": True}
afni_version = StrictVersion('%s.%s.%s' % Info.version())
if afni_version >= StrictVersion("2017.2.3"):
fwhm_args['args'] = '-ShowMeClassicFWHM'
fwhm_interface = FWHMx(**fwhm_args)
return fwhm_interface
| pGarciaS/PREEMACS | scripts/mriqc/mriqc/workflows/utils.py | utils.py | py | 5,355 | python | en | code | 8 | github-code | 36 |
31095405505 | #You'r a robot?
from random import randint, randrange
from PIL import Image, ImageDraw, ImageFont
import os
import textwrap
class CreateCaptcha:
def __init__(self):
self.valido = False
self.l = []
self.width = 300
self.height = 150
self.font_size = 60 # Tamanho maior da fonte
def Gerar(self):
y = randrange(4, 12, 4) #pega valores em um intervalo aleatório
cont = y #Vai gerar o tamanho do capcha
while cont > 0:
n = randint(0, 1)
if n == 0:
val = chr(randint(65, 90))
else:
val = chr(randint(49, 57))
self.l.append(val)
cont -= 1
#print(f"CARACETERES DE VERIFICAÇÃO: {l}") #Apenas para análise
l = ''.join(self.l) #Uni as letras
return l
def Validar(self, MeuGerador, ValorUser):
#Executa a validação
if ValorUser == MeuGerador:
self.valido = True
return self.valido
else:
return self.valido
def GerarImagem(self, text):
# Cria uma imagem em branco
image = Image.new('RGB', (self.width, self.height), color=(255, 255, 255))
# Cria um objeto de desenho
draw = ImageDraw.Draw(image)
# Carrega uma fonte para o texto com tamanho maior
font = ImageFont.load_default()
font = ImageFont.truetype("arial.ttf", self.font_size) # Use a fonte TrueType e defina o tamanho da fonte
# Obtém as dimensões da caixa do texto
text_bbox = draw.textbbox((0, 0), text, font)
# Centraliza o texto na imagem
x = (self.width - text_bbox[2] - text_bbox[0]) / 2
y = (self.height - text_bbox[3] - text_bbox[1]) / 2
# Desenha o texto na imagem
draw.text((x, y), text, fill=(0, 0, 0), font=font)
# Adiciona um risco à imagem
for _ in range(10):
x1 = randint(0, self.width - 1)
y1 = randint(0, self.height - 1)
x2 = randint(0, self.width - 1)
y2 = randint(0, self.height - 1)
draw.line([(x1, y1), (x2, y2)], fill=(0, 0, 0), width=2)
# Adiciona um padrão de fundo aleatório (pontos)
for _ in range(1000):
x = randint(0, self.width - 1)
y = randint(0, self.height - 1)
draw.point((x, y), fill=(0, 0, 0))
#Salve o arquivo na pasta image
d = os.getcwd()
i = "static\\image"
caminho = os.path.join(d, i)
element_image_path = os.path.join(caminho, "element_image.png")
# Salva a imagem como um arquivo
image.save(element_image_path)
if __name__ == "__main__":
c = CreateCaptcha()
x = c.Gerar()
c.GerarImagem(x)
print(x)
y = input("Copie aqui ou digite errado: ").upper()
print(c.Validar(x, y))
| Jv131103/ProjectCaptcha | cp.py | cp.py | py | 2,968 | python | pt | code | 0 | github-code | 36 |
4978717366 | #!/usr/bin/python3
from PyQt5 import QtCore
from PyQt5.QtCore import QSize, QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QMainWindow,QWidget, QPushButton
from PyQt5.QtGui import QIcon, QPixmap, QCursor
import sys, os, time
from playsound import playsound
dirname = os.path.dirname(os.path.abspath(__file__)) + '/'
class VideoPlayer(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Study With Me")
self.setWindowIcon(QIcon(QPixmap(dirname+'media/icons/coding.svg')))
self.setFixedSize(1900,1000)
# This line for updateting window for seconds timer
QApplication.processEvents()
menubar = self.menuBar()
menubar.setObjectName('menu')
file_menu = menubar.addMenu('&File')
help_menu = menubar.addMenu('&Help')
help = QAction(QIcon(dirname+'media/icons/information.svg'), 'ShortCuts', self)
help.triggered.connect(self.help_info)
help_menu.addAction(help)
file_video = QAction(QIcon(dirname+'media/icons/video.svg'), 'Select videofile', self)
file_video.triggered.connect(self.user_video)
file_menu.addAction(file_video)
#VIDEOPLAYER
'''
Installing VideoPlayer settings
'''
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
videoWidget.setFixedSize(1700,1000)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(dirname+'media/videos/video1.mp4')))
self.mediaPlayer.play()
'''
Installing Central Widget for Window
'''
wid = QWidget(self)
self.setCentralWidget(wid)
layout = QHBoxLayout()
#CONFIGURATION SIDEBAR
self.sideLayout = QVBoxLayout()
self.sideLayout.setObjectName('sideLayout')
#CONFIGURATION TIMERBAR
'''
Timer_is_run variable created for run report timer
'''
self.timer_is_run = False
self.timerLayout = QHBoxLayout()
self.count_minute = QLabel('25')
self.count_minute.setObjectName('counter')
self.count_second = QLabel('00')
self.count_second.setObjectName('counter')
self.count_separater = QLabel(':')
self.count_separater.setObjectName('counter')
self.start_btn = QPushButton('START')
self.start_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.start_btn.setObjectName('start_btn')
self.restart_btn = QPushButton()
self.restart_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.restart_btn.setIcon(QIcon(QPixmap(dirname+'media/icons/restart.png')))
self.restart_btn.setIconSize(QSize(40,40))
self.restart_btn.setObjectName('restart_btn')
self.pause_btn = QPushButton('PAUSE')
self.pause_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.pause_btn.setObjectName('start_btn')
# Stack
'''
Stack_btn created for switch two buttons - restart button and start button
'''
self.stack_btn = QStackedWidget()
self.stack_btn.addWidget(self.start_btn)
self.stack_btn.addWidget(self.pause_btn)
# Selected default button for stack
self.stack_btn.setCurrentWidget(self.start_btn)
self.timerLayout.addWidget(self.count_minute)
self.timerLayout.addWidget(self.count_separater)
self.timerLayout.addWidget(self.count_second)
'''
Stretch created for remove empty space between timer labels and timer buttons
'''
self.timerLayout.addStretch()
self.timerLayout.addWidget(self.stack_btn)
self.timerLayout.addWidget(self.restart_btn)
self.sideLayout.addLayout(self.timerLayout)
self.start_btn.clicked.connect(self.start)
self.restart_btn.clicked.connect(self.restart)
self.pause_btn.clicked.connect(self.pause)
#CONFIGURATION RADIO BUTTONS IN GROUPBOX
self.radio_layout = QHBoxLayout()
self.radio_group = QGroupBox()
self.radio_group.setObjectName('radio_group')
self.pomodoro_rad = QRadioButton('Pomodoro')
self.pomodoro_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.pomodoro_rad.setChecked(True)
self.short_rad = QRadioButton('Short Break')
self.short_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.long_rad = QRadioButton('Long Break')
self.long_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.radio_layout.addWidget(self.pomodoro_rad)
self.radio_layout.addWidget(self.short_rad)
self.radio_layout.addWidget(self.long_rad)
self.radio_group.setLayout(self.radio_layout)
self.sideLayout.addWidget(self.radio_group)
self.sideLayout.addStretch()
self.pomodoro_rad.clicked.connect(lambda x: self.set_time('25'))
self.short_rad.clicked.connect(lambda x: self.set_time('5'))
self.long_rad.clicked.connect(lambda x: self.set_time('15'))
#CONFIGURATION VIDEO-BUTTONS FOR SELECT BACKGROUND VIDEO
self.grid_videos = QGridLayout()
self.create_video_button(icon=f'{dirname}media/icons/study.svg', url=f'{dirname}media/videos/video1.mp4', row=0, column=0, tip='Study with me', cut='1')
self.create_video_button(icon=f'{dirname}media/icons/abstract.svg', url=f'{dirname}media/videos/video2.mp4', row=0, column=1, tip='Abstaction', cut='2')
self.create_video_button(icon=f'{dirname}media/icons/landscape.svg', url=f'{dirname}media/videos/video3.mp4', row=0, column=2, tip='River', cut='3')
self.create_video_button(icon=f'{dirname}media/icons/forest.svg', url=f'{dirname}media/videos/video4.mp4', row=0, column=3, tip='Nature', cut='4')
self.create_video_button(icon=f'{dirname}media/icons/mountain.svg', url=f'{dirname}media/videos/video5.mp4', row=1, column=0, tip='Mountains', cut='5')
self.create_video_button(icon=f'{dirname}media/icons/fire.svg', url=f'{dirname}media/videos/video6.mp4', row=1, column=1, tip='Campfire', cut='6')
self.create_video_button(icon=f'{dirname}media/icons/programming.svg', url=f'{dirname}media/videos/video7.mp4', row=1, column=2, tip='Coding Time', cut='7')
self.create_video_button(icon=f'{dirname}media/icons/galaxy.svg', url=f'{dirname}media/videos/video8.mp4', row=1, column=3, tip='Space', cut='8')
#CONFIGURATION VOLUME SLIDER
self.volumeLayout = QHBoxLayout()
self.vol_ico = QPushButton('')
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume.svg')))
self.vol_ico.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.vol_ico.clicked.connect(lambda: self.vol_slider.setValue(0))
self.vol_ico.setIconSize(QSize(40,40))
self.vol_ico.setObjectName('vol_ico')
self.vol_slider = QSlider()
self.vol_slider.setOrientation(QtCore.Qt.Horizontal)
self.vol_slider.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
# SET DEFAULT VOLUME LEVEL
self.vol_slider.setValue(90)
self.vol_slider.valueChanged.connect(self.change_volume)
self.volumeLayout.addWidget(self.vol_ico)
self.volumeLayout.addWidget(self.vol_slider)
self.sideLayout.addLayout(self.volumeLayout)
self.sideLayout.addStretch()
self.sideLayout.addLayout(self.grid_videos)
self.sideLayout.addStretch(10)
layout.addLayout(self.sideLayout)
layout.addWidget(videoWidget)
wid.setLayout(layout)
self.x = 0 # для колесика мышки
help.setShortcut('Ctrl+I')
file_video.setShortcut('Ctrl+O')
self.vol_ico.setShortcut('Ctrl+M')
self.long_rad.setShortcut('Ctrl+L')
self.short_rad.setShortcut('Ctrl+S')
self.pomodoro_rad.setShortcut('Ctrl+P')
self.restart_btn.setShortcut('Esc')
self.pause_btn.setShortcut('SPACE')
self.start_btn.setShortcut('SPACE')
# APP LOGIC
'''
This functions accept five arguments for create button.
1. Icon take the path for icon button
2. Url take the video path
3. Row and Column set place for object
4. Tip tells about icon video
'''
def create_video_button(self, icon, url, row, column, tip, cut):
self.button = QPushButton()
self.button.setShortcut(cut)
self.button.setIcon(QIcon(QPixmap(icon)))
self.button.setIconSize(QSize(40,40))
self.button.setObjectName('video_button')
self.button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.button.setToolTip(tip)
self.button.clicked.connect(lambda x: self.open_video(url))
self.grid_videos.addWidget(self.button, row, column)
'''
Changing the volume with the mouse
'''
def wheelEvent(self, event):
number = event.angleDelta().y()
if number == 120:
self.vol_slider.setValue(self.vol_slider.value() + 3)
elif number == -120:
self.vol_slider.setValue(self.vol_slider.value() - 3)
'''
This method shows the user possible keyboard shortcuts
'''
def help_info(self):
info = '<h4>Hello, World! We have some shortcuts for you!</h4>\n \
<p>Press <b>Ctrl+I</b> for call Help info</p>\
<p>Press <b>Ctrl+M</b> for mute volumn</p>\
<p>Press <b>Ctrl+L</b> for call Long Break</p>\
<p>Press <b>Ctrl+S</b> for call Short Break</p>\
<p>Press <b>Ctrl+P</b> for call Pomodoro method</p>\
<p>Press <b>Ctrl+O</b> for open your videofile.</p>\
<p>Press <b>SPACE</b> for Pause/Start timer</p>\
<p>Press <b>Esc</b> for STOP timer</p>\
<p>You can use numbers keyboard <b>(1-8)</b> for select video</p>'
QMessageBox.about(self, 'About Program', info)
'''
When User selected RadioButton this function set right time for timer
'''
def set_time(self, minute):
self.count_minute.setText(minute)
self.count_second.setText('00')
self.timer_is_run = False
'''
This function tracks changes for volume slider and set current volume video.
'''
def change_volume(self):
volume = self.vol_slider.value()
if volume == 0:
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume-x.svg')))
self.mediaPlayer.setVolume(volume)
else:
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume.svg')))
self.mediaPlayer.setVolume(volume)
'''
After user clicked button, this function opens the current video
'''
def open_video(self, path):
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(path)))
self.mediaPlayer.play()
'''
When user clicked Start-button this function be:
1. Disabled all radio_buttons
2. Run timer
3. Replaces start-button with pause-button
'''
def start(self):
self.radio_group.setDisabled(True)
self.timer_is_run = True
self.stack_btn.setCurrentWidget(self.pause_btn)
self.tik_tak()
'''
Timer Logic.
First, we take the current value of the timestamps to calculate the total number of seconds.
The total number of seconds we use to run the report cycle.
During the loop, we always check whether the user has pressed pause.
If pressed, we exit the loop and save the last time value to our labels.
Otherwise, we start checking:
If the second is not equal to zero , we subtract one from it, otherwise we look at what minutes are equal to.
If the minutes are greater than zero, then we subtract one from the minute, and assign the number 59 to the second.
If there are no minutes and seconds, we exit the cycle
At the end, we start the sound signal
'''
def tik_tak(self):
min, sec = map(int, (self.count_minute.text(), self.count_second.text()))
len_seconds = min * 60 + sec
for s in range(len_seconds):
QApplication.processEvents()
if self.timer_is_run:
if sec > 0:
sec -= 1
self.count_second.setText(str(sec))
time.sleep(1)
# print(self.count_minute.text(), self.count_second.text())
else:
if min > 0:
sec = 59
min -= 1
self.count_second.setText(str(sec))
self.count_minute.setText(str(min))
time.sleep(1)
# print(self.count_minute.text(), self.count_second.text())
if sec == min == 0:
self.radio_group.setDisabled(False)
self.stack_btn.setCurrentWidget(self.start_btn)
playsound('media/sounds/over_sound.mp3', True)
self.timer_is_run = False
'''
When user clicked restart button activated this function.ц
Before exiting the loop, the function checks which button is currently active to replace the text on the label
'''
def restart(self):
times = {
'Pomodoro': '25',
'Short Break': '5',
'Long Break': '15'
}
self.radio_group.setDisabled(False)
self.stack_btn.setCurrentWidget(self.start_btn)
self.timer_is_run = False
time.sleep(1)
for item in self.radio_group.children()[1::]:
if item.isChecked():
self.count_minute.setText(times[item.text()])
self.count_second.setText('00')
'''
The function interrupts the timer and saves the last time value on the label
'''
def pause(self):
self.radio_group.setDisabled(False)
self.timer_is_run = False
self.stack_btn.setCurrentWidget(self.start_btn)
def user_video(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, 'Открыть файл', '',
'MP4 Files (*.mp4);; MOV Files (*.mov)', options=options)
if fileName:
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))
self.mediaPlayer.play()
'''
Exit from app
'''
def closeEvent(self, event):
event.accept()
sys.exit()
if __name__ == "__main__":
app = QApplication(sys.argv)
videoplayer = VideoPlayer()
style = ''
with open('style.css', 'r') as file:
for line in file:
style += line
videoplayer.setStyleSheet(style)
videoplayer.showMaximized()
videoplayer.show()
sys.exit(app.exec_())
| SalomanYu/StudyWithMe | main.py | main.py | py | 15,163 | python | en | code | 1 | github-code | 36 |
42482969055 | from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../spark/dev/"))
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
SPARK_HOME = os.environ.get("SPARK_HOME")
PYTHONPATH = os.environ.get("PYTHONPATH")
snappy_python_modules = ["pyspark-sql-snappy", "pyspark-streaming-snappy"]
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(os.path.abspath(''), "unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
python_test_goals = {"pyspark-sql-snappy": "pyspark.sql.snappy.tests",
"pyspark-streaming-snappy": "pyspark.streaming.snappy.tests"}
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
testDir = test_name + pyspark_python
if not os.path.exists(testDir):
os.makedirs(testDir)
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env, cwd=testDir).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.6", "python3.4", "pypy"] if which(x)]
if "python2.6" not in python_execs:
LOGGER.warning("Not testing against `python2.6` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(snappy_python_modules)),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in snappy_python_modules:
modules_to_test.append(module_name)
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(snappy_python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
test_goal = python_test_goals[module]
task_queue.put((0, (python_exec, test_goal)))
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
| TIBCOSoftware/snappydata | python/run-snappy-tests.py | run-snappy-tests.py | py | 7,072 | python | en | code | 1,041 | github-code | 36 |
7431120432 | from sqlalchemy import create_engine
from constants import get_nutrient_idx
def load_cache():
db = create_engine('sqlite:///usda.sql3')
cache = {}
query = "SELECT food.id,food.long_desc,food_group.name,nutrient.tagname,nutrition.amount,weight.gm_weight,weight.gm_weight*nutrition.amount/100.0 as gm_amount,weight.description FROM food, food_group, nutrient, nutrition, weight where food.food_group_id = food_group.id and food.id = nutrition.food_id and nutrient.id = nutrition.nutrient_id and weight.food_id = food.id and food.id < 1100 and weight.sequence_num = 1 and nutrient.tagname in ('ENERC_KCAL','CHOCDF','PROCNT','FAT','LACS','SUGAR','CAFFN') order by food.id, nutrient.tagname"
conn = db.connect()
result = conn.execute(query)
nidx = get_nutrient_idx()
rows = result.cursor.fetchall()
for row in rows:
fid = row[0]
desc = row[1]
group = row[2]
nutrient = row[3]
amount = row[4]
gm_weight = row[5]
gm_amount = row[6]
serving_desc = row[7]
if fid not in cache:
cache[fid] = [fid,desc,group,serving_desc,gm_weight,0,0,0,0,0,0,0]
cache[fid][nidx[nutrient]] = gm_amount
return cache | sidowsky/sr_takehome | loaders.py | loaders.py | py | 1,169 | python | en | code | 0 | github-code | 36 |
8272148926 | #!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 17:55:12 2023
@author: Carlos Gómez-Huélamo
"""
# General purpose imports
import sys
import os
import pdb
import git
if str(sys.version_info[0])+"."+str(sys.version_info[1]) >= "3.9": # Python >= 3.9
from math import gcd
else:
from fractions import gcd
# DL & Math imports
import math
import numpy as np
import torch
import pytorch_lightning as pl
from scipy import sparse
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import conv
from torch_geometric.utils import from_scipy_sparse_matrix
# Plot imports
# Custom imports
# Global variables
# https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision
torch.backends.cudnn.benchmark = True
torch.set_float32_matmul_precision("medium") # highest, high, medium
#######################################
class TMFModel(pl.LightningModule):
def __init__(self, args):
super(TMFModel, self).__init__() # allows us to avoid using the base class name explicitly
self.args = args
# Save model in log_dir as backup
self.save_hyperparameters() # It will enable Lightning to store all the provided arguments under the self.hparams attribute.
# These hyperparameters will also be stored within the model checkpoint, which simplifies model re-instantiation after training.
# Encoder
## Social
self.linear_embedding = LinearEmbedding(3,self.args)
self.pos_encoder= PositionalEncoding1D(self.args.social_latent_size)
self.encoder_transformer = EncoderTransformer(self.args)
self.agent_gnn = AgentGNN(self.args)
## Physical
if self.args.use_map:
self.map_sub_net = MapSubNet(self.args)
assert self.args.social_latent_size == self.args.map_latent_size
if self.args.final_latent_info == "concat":
self.args.decoder_latent_size = self.args.social_latent_size + self.args.map_latent_size
elif self.args.final_latent_info == "fuse":
self.A2L_1 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.L2A_1 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.A2L_2 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.L2A_2 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.args.decoder_latent_size = self.args.social_latent_size
else:
raise AssertionError
else:
self.args.decoder_latent_size = self.args.social_latent_size
if self.args.decoder == "decoder_residual": self.decoder = DecoderResidual(self.args)
elif self.args.decoder == "decoder_temporal": self.decoder = Temporal_Multimodal_Decoder(self.args)
# Metrics
self.reg_loss = nn.SmoothL1Loss(reduction="none")
if self.args.freeze_decoder:
self.initial_lr_conf = self.args.initial_lr_conf
self.min_lr_conf = self.args.min_lr_conf
else:
self.initial_lr_conf = 1e-3
self.min_lr_conf = 1e-6
self.is_frozen = False
self.save_model_script = True
@staticmethod
def init_args(parent_parser, BASE_DIR, DATASET_DIR):
parser_dataset = parent_parser.add_argument_group("dataset")
parser_dataset.add_argument(
"--BASE_DIR", type=str, default=BASE_DIR)
parser_dataset.add_argument(
"--DATASET_DIR", type=str, default=DATASET_DIR)
parser_dataset.add_argument(
"--LOG_DIR", type=str, default="non_specified")
parser_dataset.add_argument(
"--train_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "train"))
parser_dataset.add_argument(
"--val_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "val"))
parser_dataset.add_argument(
"--test_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "test"))
# Social preprocess
parser_dataset.add_argument(
"--train_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "train_pre_clean.pkl"))
parser_dataset.add_argument(
"--val_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "val_pre_clean.pkl"))
parser_dataset.add_argument(
"--test_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "test_pre_clean.pkl"))
# Map preprocess
parser_dataset.add_argument(
"--train_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "train_map_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--val_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "val_map_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--test_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "test_map_data_rot_right_x_multi_agent.pkl"))
# Whole preprocess
parser_dataset.add_argument(
"--train_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "train_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--val_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "val_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--test_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "test_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument("--reduce_dataset_size", type=int, default=0)
parser_dataset.add_argument("--use_preprocessed", type=bool, default=False)
parser_dataset.add_argument("--use_map", type=bool, default=False)
parser_dataset.add_argument("--align_image_with_target_x", type=bool, default=True)
parser_training = parent_parser.add_argument_group("training")
parser_training.add_argument("--num_epochs", type=int, default=200)
parser_training.add_argument("--check_val_every_n_epoch", type=int, default=10)
parser_training.add_argument("--lr_values", type=list, default=[1e-3, 1e-4, 1e-3 , 1e-4])
parser_training.add_argument("--lr_step_epochs", type=list, default=[10, 20, 45])
parser_training.add_argument("--initial_lr_conf", type=float, default=5e-5)
parser_training.add_argument("--min_lr_conf", type=float, default=1e-6)
parser_training.add_argument("--wd", type=float, default=0.001)
parser_training.add_argument("--batch_size", type=int, default=128)
parser_training.add_argument("--val_batch_size", type=int, default=128)
parser_training.add_argument("--workers", type=int, default=0) # TODO: Not working with >= 0
parser_training.add_argument("--val_workers", type=int, default=0)
parser_training.add_argument("--gpus", type=int, default=1)
parser_model = parent_parser.add_argument_group("model")
parser_dataset.add_argument("--MODEL_DIR", type=str, default="non_specified")
parser_model.add_argument("--data_dim", type=int, default=2)
parser_model.add_argument("--obs_len", type=int, default=50)
parser_model.add_argument("--pred_len", type=int, default=60)
parser_model.add_argument("--centerline_length", type=int, default=40)
parser_model.add_argument("--num_centerlines", type=int, default=6)
parser_model.add_argument("--num_attention_heads", type=int, default=8)
parser_model.add_argument("--apply_dropout", type=float, default=0.2)
parser_model.add_argument("--data_aug_gaussian_noise", type=float, default=0.01)
parser_model.add_argument("--social_latent_size", type=int, default=64)
parser_model.add_argument("--map_latent_size", type=int, default=64)
parser_model.add_argument("--final_latent_info", type=str, default="non_specified")
parser_model.add_argument("--decoder_latent_size", type=int, default=-1)
parser_model.add_argument("--decoder_temporal_window_size", type=int, default=30) # 49
parser_model.add_argument("--num_modes", type=int, default=6)
parser_model.add_argument("--freeze_decoder", type=bool, default=False)
parser_model.add_argument("--mod_steps", type=list, default=[1, 5]) # First unimodal -> Freeze -> Multimodal
parser_model.add_argument("--mod_freeze_epoch", type=int, default=20)
parser_model.add_argument("--mod_full_unfreeze_epoch", type=int, default=60)
parser_model.add_argument("--reg_loss_weight", type=float, default=1) # xy predictions
parser_model.add_argument("--cls_loss_weight", type=float, default=1) # classification = confidences
parser_model.add_argument("--epsilon", type=float, default=0.0000001)
return parent_parser
def add_noise(self, input, factor=1):
"""_summary_
Args:
input (_type_): _description_
factor (int, optional): _description_. Defaults to 1.
Returns:
_type_: _description_
"""
noise = factor * torch.randn(input.shape).to(input)
noisy_input = input + noise
return noisy_input
def forward(self, batch):
# Set batch norm to eval mode in order to prevent updates on the running means,
# if the weights are frozen
if self.args.freeze_decoder:
if self.is_frozen:
for module in self.modules():
if isinstance(module, torch.nn.modules.BatchNorm1d):
module.eval()
# Encoder
## Social
### Extract the social features in each sample of the current batch
pdb.set_trace()
displ, centers = batch["displ"], batch["centers"]
rotation, origin = batch["rotation"], batch["origin"]
agents_per_sample = [x.shape[0] for x in displ]
batch_size = len(agents_per_sample)
### OBS: For each sequence, we always set the focal (target) agent as the first agent
### of the scene, then our ego-vehicle (AV) and finally the remanining agents
### (See extractor_proc.py preprocessing)
focal_agent_id = np.cumsum(agents_per_sample)
focal_agent_id = np.roll(focal_agent_id,1)
focal_agent_id[0] = 0
### Convert the list of tensors to tensors
displ_cat = torch.cat(displ, dim=0)
centers_cat = torch.cat(centers, dim=0)
### Data augmentation (TODO: It should be in collate_fn_dict, in the DataLoader)
if self.training:
displ_cat[:,:,:2] = self.add_noise(displ_cat[:,:,:2], self.args.data_aug_gaussian_noise)
centers_cat = self.add_noise(centers_cat, self.args.data_aug_gaussian_noise)
linear_output = self.linear_embedding(displ_cat)
pos_encoding = self.pos_encoder(linear_output)
pos_encoding = pos_encoding + linear_output
out_transformer = self.encoder_transformer(pos_encoding, agents_per_sample)
out_agent_gnn = self.agent_gnn(out_transformer, centers_cat, agents_per_sample)
social_info = torch.stack([x[0] for x in out_agent_gnn])
if torch.any(torch.isnan(social_info)):
pdb.set_trace()
## Physical
if self.args.use_map:
### Get relevant centerlines (non-padded) per scenario
rel_candidate_centerlines = batch["rel_candidate_centerlines"]
rel_candidate_centerlines = torch.stack(rel_candidate_centerlines,dim=0)
# Data augmentation (TODO: It should be in collate_fn_dict, in the DataLoader)
# if self.training:
# rel_candidate_centerlines = self.add_noise(rel_candidate_centerlines, self.args.data_aug_gaussian_noise)
### Get the map latent vector associated
_, num_centerlines, points_centerline, data_dim = rel_candidate_centerlines.shape
rel_candidate_centerlines = rel_candidate_centerlines.contiguous().view(-1, points_centerline, data_dim)
non_empty_mask = rel_candidate_centerlines.abs().sum(dim=1).sum(dim=1) # A padded-centerline must sum 0.0
# in each dimension, and after that both dimensions together
rows_mask = torch.where(non_empty_mask == 0.0)[0]
non_masked_centerlines = rel_candidate_centerlines.shape[0] - len(rows_mask)
rel_candidate_centerlines_mask = torch.zeros([rel_candidate_centerlines.shape[0]], device=rel_candidate_centerlines.device).type(torch.bool) # False
rel_candidate_centerlines_mask[rows_mask] = True # Padded-centerlines
rel_candidate_centerlines_mask_inverted = ~rel_candidate_centerlines_mask # Non-padded centerlines (so, relevant) to True
centerlines_per_sample = [] # Relevant centerlines (non-padded) per sequence
num_current_centerlines = 0
for i in range(rel_candidate_centerlines_mask.shape[0]+1):
if i % self.args.num_centerlines == 0 and i > 0: # Next traffic scenario
centerlines_per_sample.append(num_current_centerlines)
num_current_centerlines = 0
if i == rel_candidate_centerlines_mask.shape[0]:
break
if rel_candidate_centerlines_mask_inverted[i]: # Non-masked
num_current_centerlines += 1
assert non_masked_centerlines == sum(centerlines_per_sample), \
"The number of relevant centerlines do not match"
centerlines_per_sample = np.array(centerlines_per_sample)
rel_candidate_centerlines_ = rel_candidate_centerlines[rel_candidate_centerlines_mask_inverted,:,:]
rel_candidate_centerlines_mask_ = rel_candidate_centerlines_mask.reshape(-1,1).repeat_interleave(points_centerline,dim=1)
physical_info = self.map_sub_net(rel_candidate_centerlines, rel_candidate_centerlines_mask_)
# Decoder
if self.args.use_map:
if self.args.final_latent_info == "concat": # Concat info
merged_info = torch.cat([social_info,
physical_info],
dim=1)
if self.args.final_latent_info == "fuse": # Fuse info
physical_info = physical_info + self.A2L_1(physical_info, social_info)
social_info = social_info + self.L2A_1(social_info, physical_info)
physical_info = physical_info + self.A2L_2(physical_info, social_info)
social_info = social_info + self.L2A_2(social_info, physical_info)
merged_info = social_info
else:
merged_info = social_info
if torch.any(torch.isnan(merged_info)):
pdb.set_trace()
# If self.args.freeze_decoder is set to True, conf are useless
if self.args.decoder == "decoder_residual":
pred_traj, conf = self.decoder(merged_info, self.is_frozen, self.current_epoch)
elif self.args.decoder == "decoder_temporal":
traj_agent_abs_rel = displ_cat[focal_agent_id,:self.args.decoder_temporal_window_size,:self.args.data_dim]
last_obs_agent = centers_cat[focal_agent_id,:]
decoder_h = merged_info.unsqueeze(0)
decoder_c = torch.zeros(tuple(decoder_h.shape)).to(decoder_h)
state_tuple = (decoder_h, decoder_c)
pred_traj_rel, conf = self.decoder(traj_agent_abs_rel, state_tuple)
# Convert relative displacements to absolute coordinates (around origin)
pred_traj = relative_to_abs_multimodal(pred_traj_rel, last_obs_agent)
### In this model we are only predicting
### the focal agent. We would actually
### have batch_size x num_agents x num_modes x pred_len x data_dim
num_agents = 1
out = pred_traj.contiguous().view(batch_size, num_agents, -1, self.args.pred_len, self.args.data_dim)
if not self.args.freeze_decoder: conf = conf.view(batch_size, num_agents, -1)
# Iterate over each batch and transform predictions into the global coordinate frame
for i in range(len(out)):
out[i] = torch.matmul(out[i], rotation[i]) + origin[i].view(
1, 1, 1, -1
)
return out, conf
# Aux class functions
def freeze(self):
for param in self.parameters():
param.requires_grad = False
self.decoder.unfreeze_layers()
self.is_frozen = True
def full_unfreeze(self):
for param in self.parameters():
param.requires_grad = True
self.is_frozen = False
def prediction_loss(self, preds, gts, conf=None):
"""_summary_
Args:
preds (torch.tensor): batch_size x num_agents x num_modes x pred_len x data_dim
OBS: At this moment, num_agents = 1 since we are only predicting the focal agent
gts (list): list of gt of each scenario (num_agents x pred_len x 2)
conf (torch.tensor): batch_size x num_agents x 1
Returns:
_type_: _description_
"""
if self.args.freeze_decoder:
# # Stack all the predicted trajectories of the target agent
# num_mods = preds.shape[2]
# # [0] is required to remove the unneeded dimensions
# preds = torch.cat([x[0] for x in preds], 0)
# # Stack all the true trajectories of the target agent
# # Keep in mind, that there are multiple trajectories in each sample,
# # but only the first one ([0]) corresponds to the target agent
# gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0)
# gt_target = torch.repeat_interleave(gt_target, num_mods, dim=0) # repeate the gt for all ks
# loss_single = self.reg_loss(preds, gt_target)
# loss_single = torch.sum(torch.sum(loss_single, dim=2), dim=1)
# loss_single = torch.split(loss_single, num_mods)
# # Tuple to tensor
# loss_single = torch.stack(list(loss_single), dim=0)
# min_loss_index = torch.argmin(loss_single, dim=1) # Get best mode
# min_loss_combined = [x[min_loss_index[i]] for i, x in enumerate(loss_single)]
# loss_out = torch.sum(torch.stack(min_loss_combined))
# # loss_out = torch.mean(torch.stack(min_loss_combined))
# return loss_out
# Stack all the predicted trajectories of the target agent
preds = preds.squeeze(1)
batch_size, num_modes, pred_len, data_dim = preds.shape
# Stack all the true trajectories of the target agent
# Keep in mind, that there are multiple trajectories in each sample, but only the first one ([0]) corresponds
# to the target agent
gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0) # batch_size x pred_len x data_dim
gt_target_repeated = gt_target.unsqueeze(1).repeat(1,preds.shape[1],1,1) # repeate the gt for all ks
# batch_size x num_modes x pred_len x data_dim
fde_k = torch.sqrt((preds[:, :, -1, 0] - gt_target_repeated[:, :, -1, 0]) ** 2 + # x
(preds[:, :, -1, 1] - gt_target_repeated[:, :, -1, 1]) ** 2 + # y
self.args.epsilon) # to avoid division by zero
k_hat = torch.argmin(fde_k, dim=1)
index = torch.tensor(range(preds.shape[0]), dtype=torch.long)
pred_fut_traj = preds[index, k_hat] # Best trajectory in terms of FDE per sequence
batch_size, pred_len, _ = pred_fut_traj.shape
num_modes = preds.shape[1]
# Regression loss
# reg_loss = torch.zeros(1, dtype=torch.float32).to(preds)
mse_loss = F.mse_loss(pred_fut_traj, gt_target, reduction='none')
mse_loss = mse_loss.sum(dim=2) + self.args.epsilon # sum epsilon to avoid division by zero
mse_loss = torch.sqrt(mse_loss)
mse_loss = mse_loss.mean(dim=1)
fde_loss = fde_k[index, k_hat]
reg_loss = mse_loss * 0.5 + fde_loss * 0.5
reg_loss = reg_loss.mean()
return reg_loss
else:
# Stack all the predicted trajectories of the target agent
preds = preds.squeeze(1)
conf = conf.squeeze(1)
batch_size, num_modes, pred_len, data_dim = preds.shape
# Stack all the true trajectories of the target agent
# Keep in mind, that there are multiple trajectories in each sample, but only the first one ([0]) corresponds
# to the target agent
gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0) # batch_size x pred_len x data_dim
gt_target_repeated = gt_target.unsqueeze(1).repeat(1,preds.shape[1],1,1) # repeate the gt for all ks
# batch_size x num_modes x pred_len x data_dim
fde_k = torch.sqrt((preds[:, :, -1, 0] - gt_target_repeated[:, :, -1, 0]) ** 2 + # x
(preds[:, :, -1, 1] - gt_target_repeated[:, :, -1, 1]) ** 2 + # y
self.args.epsilon) # to avoid division by zero
k_hat = torch.argmin(fde_k, dim=1)
index = torch.tensor(range(preds.shape[0]), dtype=torch.long)
pred_fut_traj = preds[index, k_hat] # Best trajectory in terms of FDE per sequence
batch_size, pred_len, _ = pred_fut_traj.shape
num_modes = preds.shape[1]
# Regression loss
# reg_loss = torch.zeros(1, dtype=torch.float32).to(preds)
mse_loss = F.mse_loss(pred_fut_traj, gt_target, reduction='none')
mse_loss = mse_loss.sum(dim=2) + self.args.epsilon # sum epsilon to avoid division by zero
mse_loss = torch.sqrt(mse_loss)
mse_loss = mse_loss.mean(dim=1)
fde_loss = fde_k[index, k_hat]
reg_loss = mse_loss * 0.5 + fde_loss * 0.5
reg_loss = reg_loss.mean()
# Classification loss (max-margin)
score_hat = conf[index, k_hat].unsqueeze(-1)
score_hat = score_hat.repeat(1, num_modes)
cls_loss = conf + 0.2 - score_hat
cls_loss[cls_loss < 0] = 0
cls_loss = cls_loss.sum(dim=-1).sum(dim=-1)
cls_loss = cls_loss /((num_modes-1) * batch_size)
# Final loss
loss = reg_loss * self.args.reg_loss_weight + \
cls_loss * self.args.cls_loss_weight
return loss
def get_lr(self, epoch):
lr_index = 0
for lr_epoch in self.args.lr_step_epochs:
if epoch < lr_epoch:
break
lr_index += 1
return self.args.lr_values[lr_index]
def get_best_predictions(self, pred, best_pred_indeces):
"""
pred: batch_size x num_modes x pred_len x data_dim
best_pred_indeces: batch_size x 1
Take the best prediction (best mode) according to the best confidence for each sequence
"""
return pred[torch.arange(pred.shape[0]), best_pred_indeces, :, :].squeeze()
def calc_prediction_metrics(self, preds, gts, conf=None):
if self.args.freeze_decoder:
# Calculate prediction error for each mode
# Output has shape (batch_size, n_modes, n_timesteps)
error_per_t = np.linalg.norm(preds - np.expand_dims(gts, axis=1), axis=-1)
# Calculate the error for the first mode (at index 0)
fde_1 = np.average(error_per_t[:, 0, -1])
ade_1 = np.average(error_per_t[:, 0, :])
# Calculate the error for all modes
# Best mode is always the one with the lowest final displacement
lowest_final_error_indices = np.argmin(error_per_t[:, :, -1], axis=1)
error_per_t = error_per_t[np.arange(
preds.shape[0]), lowest_final_error_indices]
fde = np.average(error_per_t[:, -1])
ade = np.average(error_per_t[:, :])
else:
# Calculate prediction error for each mode
# K = 1
# Calculate the error for the theoretically best mode (that with the highest confidence)
best_pred_traj_indeces = conf.argmax(1)
k1_predictions = self.get_best_predictions(preds,best_pred_traj_indeces)
error_per_t_k1 = np.linalg.norm(k1_predictions - gts, axis=-1)
fde_1 = np.average(error_per_t_k1[:, -1])
ade_1 = np.average(error_per_t_k1[:, :])
# K = 6
# Calculate the error for all modes
# Best mode is always the one with the lowest final displacement
error_per_t = np.linalg.norm(preds - np.expand_dims(gts, axis=1), axis=-1)
lowest_final_error_indices = np.argmin(error_per_t[:, :, -1], axis=1)
error_per_t = error_per_t[np.arange(
preds.shape[0]), lowest_final_error_indices]
fde = np.average(error_per_t[:, -1])
ade = np.average(error_per_t[:, :])
return ade_1, fde_1, ade, fde
# Overwrite Pytorch-Lightning functions
def configure_optimizers(self):
if self.args.freeze_decoder:
if self.current_epoch == self.args.mod_freeze_epoch:
optimizer = torch.optim.AdamW(
filter(lambda p: p.requires_grad, self.parameters()), weight_decay=self.args.wd) # Apply optimizer just to those parameters
# that require to be trained
else:
optimizer = torch.optim.AdamW(
self.parameters(), weight_decay=self.args.wd)
return optimizer
else:
optimizer = torch.optim.AdamW(self.parameters(),
weight_decay=self.args.wd,
lr=self.initial_lr_conf)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.5,
patience=5,
min_lr=self.min_lr_conf,
verbose=True)
return {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "ade_val"}
def on_train_epoch_start(self):
if self.args.freeze_decoder:
# Trigger weight freeze and optimizer reinit on mod_freeze_epoch
if self.current_epoch == self.args.mod_freeze_epoch:
self.freeze()
self.trainer.strategy.setup_optimizers(self.trainer)
if self.current_epoch == self.args.mod_full_unfreeze_epoch:
self.args.freeze_decoder = False
self.full_unfreeze()
self.trainer.strategy.setup_optimizers(self.trainer)
# Set learning rate according to current epoch
for single_param in self.optimizers().param_groups:
single_param["lr"] = self.get_lr(self.current_epoch)
self.log("lr", single_param["lr"], prog_bar=True, sync_dist=True)
else:
# Get learning rate according to current epoch
for single_param in self.optimizers().param_groups:
self.log("lr", single_param["lr"], prog_bar=True, sync_dist=True)
def training_step(self, train_batch, batch_idx):
out, conf = self.forward(train_batch)
loss = self.prediction_loss(out, train_batch["gt"], conf)
self.log("loss_train", loss, sync_dist=True)
return loss
def validation_step(self, val_batch, batch_idx):
out, conf = self.forward(val_batch)
loss = self.prediction_loss(out, val_batch["gt"], conf)
self.log("loss_val", loss, sync_dist=True)
# Extract target agent only
pred = [x[0].detach().cpu().numpy() for x in out]
gt = [x[0].detach().cpu().numpy() for x in val_batch["gt"]]
if not self.args.freeze_decoder: conf = [x[0].detach().cpu().numpy() for x in conf]
# if self.save_model_script:
# model_filename = os.path.join(self.args.BASE_DIR,
# self.args.MODEL_DIR,
# "TFMF_TGR.py")
# os.system(f"cp {model_filename} {self.args.LOG_DIR}")
# self.save_model_script = False
return {"predictions": pred,
"groundtruth": gt,
"confidences": conf} # = validation_outputs
def validation_epoch_end(self, validation_outputs):
# Extract predictions
pred = [out["predictions"] for out in validation_outputs]
pred = np.concatenate(pred, 0) # get predictions along all validation steps
gt = [out["groundtruth"] for out in validation_outputs]
gt = np.concatenate(gt, 0) # get ground-truth along all validation steps
if self.args.freeze_decoder:
conf = None
else:
conf = [out["confidences"] for out in validation_outputs]
conf = np.concatenate(conf, 0) # get confidences along all validation steps
ade1, fde1, ade, fde = self.calc_prediction_metrics(pred, gt, conf)
self.log("ade1_val", ade1, prog_bar=True, sync_dist=True)
self.log("fde1_val", fde1, prog_bar=True, sync_dist=True)
self.log("ade_val", ade, prog_bar=True, sync_dist=True)
self.log("fde_val", fde, prog_bar=True, sync_dist=True)
# Layers
class LinearEmbedding(nn.Module):
def __init__(self,input_size,args):
super(LinearEmbedding, self).__init__()
self.args = args
self.input_size = input_size
self.output_size = args.social_latent_size
self.encoder_input_layer = nn.Linear(
in_features=self.input_size,
out_features=self.output_size
)
def forward(self,linear_input):
linear_out = F.relu(self.encoder_input_layer(linear_input))
return linear_out
class PositionalEncoding1D(nn.Module):
def __init__(self, channels):
"""
:param channels: The last dimension of the tensor you want to apply pos emb to.
"""
super(PositionalEncoding1D, self).__init__()
self.org_channels = channels
channels = int(np.ceil(channels / 2) * 2)
self.channels = channels
inv_freq = 1.0 / (10000 ** (torch.arange(0, channels, 2).float() / channels))
self.register_buffer("inv_freq", inv_freq)
self.cached_penc = None
def forward(self, tensor):
"""
:param tensor: A 3d tensor of size (batch_size, x, ch)
:return: Positional Encoding Matrix of size (batch_size, x, ch)
"""
if len(tensor.shape) != 3:
raise RuntimeError("The input tensor has to be 3d!")
if self.cached_penc is not None and self.cached_penc.shape == tensor.shape:
return self.cached_penc
self.cached_penc = None
batch_size, x, orig_ch = tensor.shape
pos_x = torch.arange(x, device=tensor.device).type(self.inv_freq.type())
sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)
emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1)
emb = torch.zeros((x, self.channels), device=tensor.device).type(tensor.type())
emb[:, : self.channels] = emb_x
self.cached_penc = emb[None, :, :orig_ch].repeat(batch_size, 1, 1)
return self.cached_penc
class EncoderTransformer(nn.Module):
def __init__(self, args):
super(EncoderTransformer, self).__init__()
self.args = args
self.d_model = self.args.social_latent_size # embedding dimension
# self.nhead = self.args.num_attention_heads # TODO: Is this correct?
self.nhead = self.args.social_latent_size
self.d_hid = 1 ## dimension of the feedforward network model in nn.TransformerEncoder
self.num_layers = 1
self.dropout = self.args.apply_dropout
self.encoder_layer = nn.TransformerEncoderLayer(self.d_model, self.nhead, self.d_hid , self.dropout, batch_first=True)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=self.num_layers)
def forward(self, transformer_in, agents_per_sample):
transformer_out = F.relu(self.transformer_encoder(transformer_in))
return transformer_out[:,-1,:]
class AgentGNN(nn.Module):
def __init__(self, args):
super(AgentGNN, self).__init__()
self.args = args
self.latent_size = args.social_latent_size
self.gcn1 = conv.CGConv(self.latent_size, dim=2, batch_norm=True)
self.gcn2 = conv.CGConv(self.latent_size, dim=2, batch_norm=True)
def forward(self, gnn_in, centers, agents_per_sample):
# gnn_in is a batch and has the shape (batch_size, number_of_agents, latent_size)
x, edge_index = gnn_in, self.build_fully_connected_edge_idx(
agents_per_sample).to(gnn_in.device)
edge_attr = self.build_edge_attr(edge_index, centers).to(gnn_in.device)
x = F.relu(self.gcn1(x, edge_index, edge_attr))
gnn_out = F.relu(self.gcn2(x, edge_index, edge_attr))
edge_index_out1 = []
for i in agents_per_sample:
edge_index_out1.append(gnn_out[0:i,:])
gnn_out = gnn_out[i:,:]
return edge_index_out1
def build_fully_connected_edge_idx(self, agents_per_sample):
edge_index = []
# In the for loop one subgraph is built (no self edges!)
# The subgraph gets offsetted and the full graph over all samples in the batch
# gets appended with the offsetted subgrah
offset = 0
for i in range(len(agents_per_sample)):
num_nodes = agents_per_sample[i]
adj_matrix = torch.ones((num_nodes, num_nodes))
adj_matrix = adj_matrix.fill_diagonal_(0)
sparse_matrix = sparse.csr_matrix(adj_matrix.numpy())
edge_index_subgraph, _ = from_scipy_sparse_matrix(sparse_matrix)
# Offset the list
edge_index_subgraph = torch.Tensor(
np.asarray(edge_index_subgraph) + offset)
offset += agents_per_sample[i]
edge_index.append(edge_index_subgraph)
# Concat the single subgraphs into one
edge_index = torch.LongTensor(np.column_stack(edge_index))
return edge_index
def build_edge_attr(self, edge_index, data):
edge_attr = torch.zeros((edge_index.shape[-1], 2), dtype=torch.float)
rows, cols = edge_index
# goal - origin
edge_attr = data[cols] - data[rows]
return edge_attr
class DecoderResidual(nn.Module):
def __init__(self, args):
super(DecoderResidual, self).__init__()
self.args = args
self.latent_size = self.args.decoder_latent_size
self.num_modes = self.args.num_modes
output = []
for i in range(sum(args.mod_steps)):
output.append(PredictionNet(args))
self.output = nn.ModuleList(output) # is just like a Python list. It was designed to store any desired number of nn.Module’s
if not self.args.freeze_decoder or self.args.mod_full_unfreeze_epoch != -1:
# Classification
norm = "BN"
ng = 1
self.latent_predictions = nn.Linear(self.args.num_modes * self.args.pred_len * self.args.data_dim,
self.latent_size)
self.confidences = nn.Sequential(LinearRes(self.latent_size*2, self.latent_size*2, norm=norm, ng=ng),
nn.Linear(self.latent_size*2, self.num_modes))
def forward(self, decoder_in, is_frozen, current_epoch):
batch_size = decoder_in.shape[0]
if self.args.freeze_decoder:
sample_wise_out = []
if self.training is False: # If you are validating or test, use all decoders
for out_subnet in self.output:
sample_wise_out.append(out_subnet(decoder_in))
elif is_frozen: # If the first decoder has been frozen, decode and train the remaining ones
for i in range(self.args.mod_steps[0], sum(self.args.mod_steps)):
sample_wise_out.append(self.output[i](decoder_in))
else: # If you are training and is_frozen = False, use only the first decoder
sample_wise_out.append(self.output[0](decoder_in))
decoder_out = torch.stack(sample_wise_out)
decoder_out = torch.swapaxes(decoder_out, 0, 1)
return decoder_out, []
else:
sample_wise_out = []
for out_subnet in self.output:
sample_wise_out.append(out_subnet(decoder_in))
decoder_out = torch.stack(sample_wise_out)
decoder_out = torch.swapaxes(decoder_out, 0, 1)
latent_predictions = self.latent_predictions(decoder_out.contiguous().view(batch_size,-1))
conf_latent = torch.cat([decoder_in,
latent_predictions],
dim=1)
conf = self.confidences(conf_latent)
conf = torch.softmax(conf.view(batch_size,-1), dim=1) # batch_size, num_modes
if not torch.allclose(torch.sum(conf, dim=1), conf.new_ones((batch_size,))):
pdb.set_trace()
return decoder_out, conf
def unfreeze_layers(self):
for layer in range(self.args.mod_steps[0], sum(self.args.mod_steps)): # Unfreeze all decoders except the first one
for param in self.output[layer].parameters():
param.requires_grad = True
class LinearRes(nn.Module):
def __init__(self, n_in, n_out, norm='GN', ng=32):
super(LinearRes, self).__init__()
assert(norm in ['GN', 'BN', 'SyncBN'])
self.linear1 = nn.Linear(n_in, n_out)
self.linear2 = nn.Linear(n_out, n_out)
self.linear3 = nn.Linear(n_out, n_out)
self.relu = nn.ReLU(inplace=True)
if norm == 'GN':
self.norm1 = nn.GroupNorm(gcd(ng, n_out), n_out)
self.norm2 = nn.GroupNorm(gcd(ng, n_out), n_out)
elif norm == 'BN':
self.norm1 = nn.BatchNorm1d(n_out)
self.norm2 = nn.BatchNorm1d(n_out)
self.norm3 = nn.BatchNorm1d(n_out)
else:
exit('SyncBN has not been added!')
if n_in != n_out:
if norm == 'GN':
self.transform = nn.Sequential(
nn.Linear(n_in, n_out, bias=False),
nn.GroupNorm(gcd(ng, n_out), n_out))
elif norm == 'BN':
self.transform = nn.Sequential(
nn.Linear(n_in, n_out, bias=False),
nn.BatchNorm1d(n_out))
else:
exit('SyncBN has not been added!')
else:
self.transform = None
def forward(self, x):
out = self.linear1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.linear2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.linear3(out)
out = self.norm3(out)
if self.transform is not None:
out += self.transform(x)
else:
out += x
out = self.relu(out)
return out
class PredictionNet(nn.Module):
def __init__(self, args):
super(PredictionNet, self).__init__()
self.args = args
self.latent_size = args.decoder_latent_size
self.weight1 = nn.Linear(self.latent_size, self.latent_size)
self.norm1 = nn.GroupNorm(1, self.latent_size)
self.weight2 = nn.Linear(self.latent_size, self.latent_size)
self.norm2 = nn.GroupNorm(1, self.latent_size) # Batch normalization solves a major problem called internal covariate shift.
self.output_fc = nn.Linear(self.latent_size, args.pred_len * 2)
def forward(self, prednet_in):
# Residual layer
x = self.weight1(prednet_in)
x = self.norm1(x)
x = F.relu(x)
x = self.weight2(x)
x = self.norm2(x)
x += prednet_in
x = F.relu(x)
# Last layer has no activation function
prednet_out = self.output_fc(x)
return prednet_out
class map_smooth_decoder(nn.Module):
def __init__(self, args):
super(map_smooth_decoder, self).__init__()
self.args = args
self.latent_size = self.args.map_latent_size
self.norm0 = nn.BatchNorm1d(self.latent_size)
self.conv1 = nn.Conv1d(self.latent_size, self.latent_size // 4, kernel_size=3, padding=1)
self.norm1 = nn.BatchNorm1d(self.latent_size // 4)
self.conv2 = nn.Conv1d(self.latent_size // 4, self.latent_size // 8, kernel_size=3, padding=1)
self.norm2 = nn.BatchNorm1d(self.latent_size // 8)
self.linear3 = nn.Linear(self.args.centerline_length * (self.latent_size // 8), self.latent_size // 8)
self.norm3 = nn.BatchNorm1d(self.latent_size // 8)
self.linear4 = nn.Linear(self.args.num_centerlines * (self.latent_size // 8), self.latent_size)
def forward(self, x):
total_centerlines = x.shape[0]
batch_size = x.shape[0] // self.args.num_centerlines
x = x.permute(0, 2, 1)
x = self.norm0(x)
x = self.norm1(F.relu(self.conv1(x)))
x = self.norm2(F.relu(self.conv2(x)))
x = self.norm3(F.relu(self.linear3(x.contiguous().view(total_centerlines,-1))))
x = self.linear4(x.contiguous().view(batch_size,-1))
return x
class MLP(nn.Module):
def __init__(self, input_size, output_size) -> None:
super(MLP, self).__init__()
self.linear1 = nn.Linear(input_size, output_size // 2)
self.norm = nn.LayerNorm(output_size // 2)
self.GELU = nn.GELU()
self.linear2 = nn.Linear(output_size // 2, output_size)
# self.linear1 = nn.Linear(input_size, output_size)
def forward(self, x):
x = self.linear1(x)
x = self.norm(x)
x = self.GELU(x)
x = self.linear2(x)
return x
class MapSubNet(nn.Module):
def __init__(self, args, depth=None):
super(MapSubNet, self).__init__()
self.args = args
if depth is None:
depth = 2
self.hidden_size = self.args.map_latent_size
self.input_dim = self.args.data_dim
self.dropout = self.args.apply_dropout
self.MLPs = nn.ModuleList([MLP(self.input_dim, self.hidden_size // 8), MLP(self.hidden_size // 4, self.hidden_size // 2)])
self.Attn = nn.ModuleList([nn.MultiheadAttention(self.hidden_size // 8, self.args.num_attention_heads, dropout=self.dropout),
nn.MultiheadAttention(self.hidden_size // 2, self.args.num_attention_heads, dropout=self.dropout)])
self.Norms = nn.ModuleList([nn.LayerNorm(self.hidden_size // 4), nn.LayerNorm(self.hidden_size)])
self.final_layer = map_smooth_decoder(self.args)
def forward(self, inputs, inputs_mask):
hidden_states_batch = inputs
hidden_states_mask = inputs_mask
for layer_index, layer in enumerate(self.Attn):
hidden_states_batch = self.MLPs[layer_index](hidden_states_batch)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
temp = hidden_states_batch
query = key = value = hidden_states_batch.permute(1,0,2)
# hidden_states_batch = layer(query, key, value=value, attn_mask=None, key_padding_mask=hidden_states_mask)[0].permute(1,0,2)
hidden_states_batch = layer(query, key, value=value)[0].permute(1,0,2)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
hidden_states_batch = torch.cat([hidden_states_batch, temp], dim=2)
hidden_states_batch = self.Norms[layer_index](hidden_states_batch)
hidden_states_batch = F.relu(hidden_states_batch)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
hidden_states_batch = self.final_layer(hidden_states_batch)
return hidden_states_batch
class TransformerDecoder(nn.Module):
def __init__(self, hidden_size, head_num=8, dropout=0.1) -> None:
super(TransformerDecoder, self).__init__()
self.self_attn = nn.MultiheadAttention(hidden_size, head_num, dropout)
self.cross_attn = nn.MultiheadAttention(hidden_size, head_num, dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.norm3 = nn.LayerNorm(hidden_size)
self.linear1 = nn.Linear(hidden_size, 256)
self.linear2 = nn.Linear(256, hidden_size)
def forward(self, x_padding, y_padding):
self_attn_output = self.self_attn(query=x_padding,
key=x_padding,
value=x_padding)[0]
x_padding = x_padding + self.dropout1(self_attn_output)
x_padding = self.norm1(x_padding)
cross_attn_output = self.cross_attn(query=x_padding,
key=y_padding,
value=y_padding)[0]
x_padding = x_padding + self.dropout2(cross_attn_output)
x_padding = self.norm2(x_padding)
output = self.linear1(x_padding)
output = F.relu(output)
output = self.dropout3(output)
output = self.linear2(output)
x_padding = x_padding + self.dropout4(output)
x_padding = self.norm3(x_padding)
return x_padding
class Temporal_Multimodal_Decoder(nn.Module):
def __init__(self, args):
super(Temporal_Multimodal_Decoder, self).__init__()
self.args = args
self.data_dim = self.args.data_dim
self.obs_len = self.args.obs_len
self.pred_len = self.args.pred_len
self.window_size = self.args.decoder_temporal_window_size
self.decoder_h_dim = self.args.decoder_latent_size
self.num_modes = self.args.num_modes
self.spatial_embedding = nn.Linear(self.window_size*2, self.window_size*4)
self.decoder = nn.LSTM(self.window_size*4,
self.decoder_h_dim,
num_layers=1)
pred = []
for _ in range(self.num_modes):
pred.append(nn.Linear(self.decoder_h_dim,self.data_dim))
self.hidden2pos = nn.ModuleList(pred)
norm = "BN"
ng = 1
# Confidences
self.latent_predictions = nn.Linear(self.args.num_modes*self.args.pred_len*self.args.data_dim,
self.decoder_h_dim)
self.confidences = nn.Sequential(LinearRes(self.decoder_h_dim*2, self.decoder_h_dim*2, norm=norm, ng=ng),
nn.Linear(self.decoder_h_dim*2, self.num_modes))
def forward(self, traj_rel, state_tuple, num_mode=None, current_centerlines=None):
"""_summary_
Args:
traj_rel (_type_): _description_
state_tuple (_type_): _description_
num_mode (_type_, optional): _description_. Defaults to None.
current_centerlines (_type_, optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
traj_rel = traj_rel.permute(1,0,2)
num_displacements, batch_size, data_dim = traj_rel.shape
state_tuple_h, state_tuple_c = state_tuple
pred_traj_fake_rel = []
for num_mode in range(self.num_modes):
traj_rel_ = torch.clone(traj_rel)
decoder_input = F.leaky_relu(self.spatial_embedding(traj_rel_.permute(1,0,2).contiguous().view(batch_size,-1))) # bs x window_size·2
decoder_input = decoder_input.unsqueeze(0)
decoder_input = F.dropout(decoder_input, p=self.args.apply_dropout, training=self.training)
state_tuple_h_ = torch.clone(state_tuple_h)
state_tuple_c_ = torch.zeros(tuple(state_tuple_h_.shape)).to(state_tuple_h_)
curr_pred_traj_fake_rel = []
for _ in range(self.pred_len):
output, (state_tuple_h_, state_tuple_c_) = self.decoder(decoder_input, (state_tuple_h_, state_tuple_c_))
rel_pos = self.hidden2pos[num_mode](output.contiguous().view(-1, self.decoder_h_dim))
traj_rel_ = torch.roll(traj_rel_, -1, dims=(0))
traj_rel_[-1] = rel_pos
curr_pred_traj_fake_rel.append(rel_pos)
decoder_input = F.leaky_relu(self.spatial_embedding(traj_rel_.permute(1,0,2).contiguous().view(batch_size,-1))) # bs x window_size·2
decoder_input = decoder_input.unsqueeze(0)
decoder_input = F.dropout(decoder_input, p=self.args.apply_dropout, training=self.training)
curr_pred_traj_fake_rel = torch.stack(curr_pred_traj_fake_rel,dim=0)
curr_pred_traj_fake_rel = curr_pred_traj_fake_rel.permute(1,0,2)
pred_traj_fake_rel.append(curr_pred_traj_fake_rel)
pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0) # num_modes, batch_size, pred_len, data_dim
pred_traj_fake_rel = pred_traj_fake_rel.permute(1,0,2,3) # batch_size, num_modes, pred_len, data_dim
# Obtain confidences based on the initial latent state and the predictions
predictions_latent = self.latent_predictions(pred_traj_fake_rel.contiguous().view(batch_size, -1))
state_tuple_h = state_tuple_h.squeeze(0)
conf_latent = torch.cat([state_tuple_h,
predictions_latent],
dim=1)
conf = self.confidences(conf_latent)
conf = torch.softmax(conf.view(batch_size,-1), dim=1) # batch_size, num_modes
if not torch.allclose(torch.sum(conf, dim=1), conf.new_ones((batch_size,))):
pdb.set_trace()
return pred_traj_fake_rel, conf
# Aux functions
def relative_to_abs_multimodal(rel_traj, start_pos):
"""
Inputs:
- rel_traj: pytorch tensor of shape (batch_size, num_modes, seq_len, 2)
- start_pos: pytorch tensor of shape (batch_size, 2)
N.B. If you only have the predictions, this must be the last observation.
If you have the whole trajectory (obs+pred), this must be the first observation,
since you must reconstruct the relative displacements from this position
Outputs:
- abs_traj: pytorch tensor of shape (seq_len, batch, 2) (around 0,0, not map coordinates)
"""
displacement = torch.cumsum(rel_traj, dim=2) # Sum along the seq_len dimension!
start_pos = torch.unsqueeze(torch.unsqueeze(start_pos, dim=1), dim=1) # batch, 1 (only one position) x 1 (same for all modes) x 2
abs_traj = displacement + start_pos
return abs_traj | Cram3r95/argo2_TGR | model/models/TFMF_TGR.py | TFMF_TGR.py | py | 53,803 | python | en | code | 4 | github-code | 36 |
1395161242 | #!/usr/bin/env python
# coding: utf-8
# 1. Compare and contrast the float and Decimal classes' benefits and drawbacks.
#
# floats are faster and more memory-efficient, suitable for a wide range of values, but can have precision and rounding issues. Decimals provide precise decimal arithmetic, accurate representation of decimal numbers, but are slower and have a more limited value range. The choice between float and Decimal depends on the specific requirements of the application.
# 2. Decimal('1.200') and Decimal('1.2') are two objects to consider. In what sense are these the same object? Are these just two ways of representing the exact same value, or do they correspond to different internal states?
#
# Decimal('1.200') and Decimal('1.2') represent the same value of 1.2 mathematically. However, internally they have different representations due to the presence or absence of trailing zeros, making them distinct Decimal objects.
# 3. What happens if the equality of Decimal('1.200') and Decimal('1.2') is checked?
#
# In[6]:
from decimal import Decimal
decimal1 = Decimal('1.200')
decimal2 = Decimal('1.2')
print(decimal1 == decimal2)
# 4. Why is it preferable to start a Decimal object with a string rather than a floating-point value?
#
# In[10]:
#example
from decimal import Decimal
float_value = 0.1
decimal_float = Decimal(float_value)
decimal_string = Decimal('0.1')
# In[11]:
print(decimal_float)
# In[12]:
print(decimal_string)
# 5. In an arithmetic phrase, how simple is it to combine Decimal objects with integers?
#
# Decimal objects with integers in arithmetic operations is simple and straightforward. The Decimal class seamlessly handles the interoperability between Decimal objects and integers, allowing you to use standard arithmetic operators without any additional complexity.
# 6. Can Decimal objects and floating-point values be combined easily?
#
# Combining Decimal objects with floating-point values in arithmetic operations is easy and straightforward in Python. The Decimal class seamlessly supports interoperability between Decimal objects and floating-point values, allowing you to use standard arithmetic operators without any complications.
# 7. Using the Fraction class but not the Decimal class, give an example of a quantity that can be expressed with absolute precision.
#
# In[13]:
#The Fraction class in Python allows precise representation of rational numbers without any loss of precision. Here's an example of a quantity that can be expressed with absolute precision using the Fraction class
from fractions import Fraction
fraction = Fraction(4,8)
# In[15]:
print(fraction)
# 8. Describe a quantity that can be accurately expressed by the Decimal or Fraction classes but not by a floating-point value.
#
# In[16]:
#example
from decimal import Decimal
decimal = Decimal('2')/ Decimal('8')
# In[17]:
print(decimal)
# In[19]:
#example
from fractions import Fraction
fraction = Fraction(2,8)
# In[20]:
print(fraction)
# Q9.Consider the following two fraction objects: Fraction(1, 2) and Fraction(1, 2). (5, 10). Is the internal state of these two objects the same? Why do you think that is?
#
# In[25]:
#yes the internal state of these two object are same:
from fractions import Fraction
fractions1 = Fraction(1,2)
fractions2 = Fraction(5,10)
# In[26]:
print(fractions1)
# In[27]:
print(fractions2)
# Q10. How do the Fraction class and the integer type (int) relate to each other? Containment or inheritance?
#
# The Fraction class and the int type have a containment relationship. The Fraction class can work with and contain integer
# In[ ]:
| Rajn013/assignment-020 | Untitled83.py | Untitled83.py | py | 3,685 | python | en | code | 0 | github-code | 36 |
973355858 | from pathlib import Path
import unittest
from lispy import reader
from lispy import rep as step6_file
from lispy.env import Env
from lispy.mal_types import MalList, MalAtom, MalInt
from lispy.mal_types import MalSyntaxException, MalString
class TestStep6(unittest.TestCase):
def setUp(self) -> None:
self._repl_env = step6_file.init_repl_env()
def test_step6_string_unbalanced(self):
with self.assertRaises(MalSyntaxException):
step6_file.rep('"foo', self._repl_env)
def test_step6_standard_string(self):
self.assertEqual(
'"foo"', step6_file.EVAL(MalString('"foo"'), Env(None)).native()
)
self.assertEqual('"foo"', step6_file.rep('"foo"', self._repl_env).__str__())
self.assertEqual('"foo"', MalString('"foo"').native())
self.assertEqual('"\\"foo\\""', MalString('"foo"').__str__())
def test_step6_reader_read_string(self):
read = reader.read('(read-string "(1 2 (3 4) nil)")')
self.assertTrue(isinstance(read, MalList))
arg = read.native()[1]
self.assertTrue(isinstance(arg, MalString))
native_str = arg.native()
self.assertEqual("(1 2 (3 4) nil)", native_str)
def test_step6_read_string_no_escapes(self):
self.assertEqual(
"(1 2 (3 4) nil)",
step6_file.rep('(read-string "(1 2 (3 4) nil)")', self._repl_env),
)
def test_step6_slurp(self):
f = Path(__file__).parent / "mal" / "tests" / "test.txt"
self.assertEqual(
'"A line of text\\n"', step6_file.rep(f'(slurp "{f}")', self._repl_env)
)
def test_step6_eval(self):
self.assertEqual(
"2", step6_file.rep('(eval (read-string "(+ 1 1)"))', self._repl_env)
)
def test_step6_str(self):
self.assertEqual(
'"abc2def ghi"',
step6_file.rep('(str "abc" 2 "def" " ghi")', self._repl_env),
)
def test_step6_atom_type(self):
atom = step6_file.EVAL(MalAtom(MalInt(1)), Env(None))
self.assertEqual(1, atom.native().native())
def test_step6_read_atom(self):
atom = step6_file.EVAL(step6_file.READ("(atom 1)"), self._repl_env)
self.assertEqual(1, atom.native().native())
def test_step6_atom_deref(self):
self.assertEqual("1", step6_file.rep("(deref (atom 1))", self._repl_env))
def test_step6_atom_p(self):
self.assertEqual("true", step6_file.rep("(atom? (atom 1))", self._repl_env))
self.assertEqual("false", step6_file.rep("(atom? (+ 1 2))", self._repl_env))
def test_step6_reset(self):
self.assertEqual(
"3", step6_file.rep("(do (def! a (atom 2)) (reset! a 3))", self._repl_env)
)
def test_step6_swap(self):
self.assertEqual(
"#<function>",
step6_file.rep("(def! inc3 (fn* (a) (+ 3 a)))", self._repl_env),
)
self.assertEqual(
"(atom 2)", step6_file.rep("(def! a (atom 2))", self._repl_env)
)
self.assertEqual("3", step6_file.rep("(swap! a + 1)", self._repl_env))
if __name__ == "__main__":
unittest.main()
| rectalogic/lispy | tests/test_step6.py | test_step6.py | py | 3,164 | python | en | code | 0 | github-code | 36 |
15062588717 | n = int(input())
l = list(map(int,input().split()))
p = 0
c = 0
for i in range(len(l)):
if i%2!=0:
p+=1
if l[i]%2!=0:
c+=1
if p==c:
print(True)
else:
print(False) | SAIRAJA2005/codemind-python | Strictly_ODD.py | Strictly_ODD.py | py | 202 | python | en | code | 0 | github-code | 36 |
19035164489 | import random
import os
def asOrderedList(d):
ordered = []
for key in d:
ordered.append([key, d[key]])
ordered.sort()
return ordered
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
class Player:
def __init__(self, w):
self.world = w
self.name = input("What is your creature's name? ")
print("Is your creature a carnivore or an herbivore?")
self.diet = input("Herbivores need only find fruit to survive, while carnivores must kill their prey to have meat. ").lower()
while self.diet != 'carnivore' and self.diet!= 'c' and self.diet != 'herbivore' and self.diet != 'h':
self.diet = input('Invalid response. Choose "carnivore" or "herbivore." ')
if self.diet == 'h':
self.diet = 'herbivore'
elif self.diet == 'c':
self.diet = 'carnivore'
w.add_player(self)
self.location = random.choice(self.world.squares)
while self.location.terrain == 'lake':
self.location = random.choice(self.world.squares)
self.home = self.location # The player's home base will be their starting location.
self.alive = True
self.hunger = 100 # If self.hunger reaches 0, the player's health will decrease at each update.
self.maxHealth, self.health = 50, 50
self.maxStrength, self.strength = 10, 10
self.maxSociability, self.sociability = 10, 10
self.maxSpeed, self.speed = 10, 10
self.healthLoss = 2
self.hungerLoss = 5
self.speedPenalty = 0
self.socPenalty = 0
self.intelligence = 0
self.experience = 0
self.abilities = []
self.inventory = {}
self.inventorySize = 0
self.inventoryCap = 10
self.invweight = 0
self.maxinvweight = 20
self.availabledirs = []
self.dirstring = ''
self.defeated = 0
self.friends = []
self.ally = None
self.m = 0
self.going = ''
self.conch = True
self.conchUses = 0
def update(self):
if self.conchUses >= 2:
self.conch = False
self.conchUses = 0
del self.inventory['conch shell']
input('Unfortunately, you dropped your conch shell while using it. It is destroyed.')
if self.going != '':
print('You go ' + self.going + '.')
self.going = ''
self.dirstring = ''
for elem in self.availabledirs:
if self.dirstring == '':
self.dirstring = elem
else:
self.dirstring += ', ' + elem
if self.ally != None:
if self.ally.hostility < 0:
self.ally.hostility = 0
# We reset the penalties in order to implement the terrain and weather effects
self.healthLoss = 2
self.hungerLoss = 5
self.speedPenalty = 0
self.socPenalty = 0
if self.hunger > 100:
self.hunger = 100
# Terrain effects
if self.location.terrain == "desert":
self.hungerLoss += 5
elif self.location.terrain == "hills":
self.speedPenalty += self.maxSpeed // 4
elif self.location.terrain == "tundra":
self.healthLoss += 3
# Weather effects
if self.world.weather == "rainy":
self.speedPenalty += self.maxSpeed // 4
elif self.world.weather == "hailing":
self.healthLoss += 3
elif self.world.weather == "snowy":
self.socPenalty += self.maxSociability // 4
elif self.world.weather == "drought":
self.hungerLoss += 5
# You gain health at home
if self.location == self.home:
healthGained = self.maxHealth // 2
self.health += healthGained
if self.health > self.maxHealth:
self.health = self.maxHealth
healthGained -= self.health - self.maxHealth
print('You gain ' + str(healthGained) + ' health at your home base!')
else: # Your stats (may) go down elsewhere
self.health -= self.healthLoss
self.sociability -= self.socPenalty
if self.sociability < 0: # No negative stats
self.sociability = 0
self.speed -= self.speedPenalty
if self.speed < 0:
self.speed = 0
print()
print('You lose ' + str(self.healthLoss) + ' health from the terrain and weather.')
print('Your sociability decreases by ' + str(self.socPenalty) + ' points.')
print('Your speed decreases by ' + str(self.speedPenalty) + ' points.')
if self.health <= 0:
self.die()
if self.hunger > 0:
if 'Improved metabolism' in self.abilities: # The "Improved metabolism" ability makes you become hungrier less quickly
self.hunger -= 5
else:
self.hunger -= self.hungerLoss
if self.hunger < 0:
self.hunger = 0
elif self.hunger == 0: # If they player is starving...
print()
r = random.randint(0,3) # then they will randomly take damage to health, strength, sociability, or speed
if r == 0:
hungerPenalty = self.health // 10
self.health -= hungerPenalty
print("You're starving! You lose " + str(hungerPenalty) + " health!")
elif r == 1:
hungerPenalty = self.strength // 10
self.strength -= hungerPenalty
print("You're starving! You lose " + str(hungerPenalty) + " strength!")
elif r == 2:
hungerPenalty = self.sociability // 10
self.sociability -= hungerPenalty
print("You're starving! You lose " + str(hungerPenalty) + " sociability!")
elif r == 3:
hungerPenalty = self.speed // 10
self.speed -= hungerPenalty
print("You're starving! You lose " + str(hungerPenalty) + " speed!")
if self.hunger < 0: # You can't have negative hunger!
self.hunger = 0
self.availabledirs = []
for exit in self.location.exits:
if exit != None:
self.availabledirs.append(exit)
if 'meat' in self.inventory:
self.m += 1
if self.m == 6: # If you go long enough with meat in your inventory, then it will rot all your food
self.invweight -= self.inventory['meat'] * self.world.itemWeights['meat']
self.inventorySize -= self.inventory['meat']
del self.inventory['meat']
if 'fruit' in self.inventory:
self.invweight -= self.inventory['fruit'] * self.world.itemWeights['fruit']
self.inventorySize -= self.inventory['fruit']
del self.inventory['fruit']
self.m = 0
print('Oh no! You carried meat in your bag for too long. All of your food has gone rotten.')
else:
self.m = 0
def showInventory(self):
#clear()
print('Your inventory contains the following items:')
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
weight = self.world.itemWeights[kvp[0]] * kvp[1]
print('\t' + kvp[0] + ' x' + str(kvp[1]) + ', ' + str(weight) + ' weight')
def showAbilities(self):
print('You have the following abilities:')
for ab in self.abilities:
print('\t' + ab)
def evolve(self):
clear()
print('Health increase: 5 exp')
print('Stomach size increase: 5 exp')
print('Strength increase: 5 exp')
print('Sociability increase: 5 exp')
print('Speed increase: 5 exp')
print('Intelligence increase – unlock new upgrades: 5 exp')
print('Pouches – can carry more items: 5 exp')
print('Stronger back – can carry heaver items: 5 exp')
if self.diet != 'omnivore':
print('Omnivorous diet — eat any food you find: 10 exp')
if 'Metabolism increase' not in self.abilities:
print('Metabolism increase – hunger increases more slowly: 10 exp')
if 'Fat reserves' not in self.abilities:
print('Fat reserves – reduced penalty when starving: 10 exp')
if 'Semiaquatic' not in self.abilities:
print('Semiaquatic – access watery terrain: 10 exp')
if 'use items' not in self.abilities:
print('Use items: 10 exp')
if self.intelligence >= 8 and 'Item use' not in self.abilities:
print('Item use: 10 exp')
if self.intelligence >= 13 and 'Item use' in self.abilities and 'Flexible responding' not in self.abilities:
print('Flexible responding – more options when you engage with other creatures: 20 exp') # Idk, maybe players will be able to change whether they want to socialize or attack. Also, I just thought that if the player attacks a creature, then the creature's hostility should go up
if self.intelligence >= 20 and 'Flexible responding' in self.abilities:
print('Fire: 30 exp')
print()
print('Go back.')
print()
print('You have ' + str(self.experience) + ' experience points.')
print()
transactionCompleted = False
while not transactionCompleted:
choice = input('What would you like to improve? ')
if choice.lower() == 'health increase':
if self.experience >= 5:
self.maxHealth += 8
self.health = self.maxHealth
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'use items':
if self.experience >= 10:
self.abilities.append('use items')
self.abilities.append('Item use')
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'stomach size increase':
if self.experience >= 5:
self.maxHunger += 5
self.hunger = self.maxHunger
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'strength increase':
if self.experience >= 5:
self.maxStrength += 3
self.strength = self.maxStrength
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'sociability increase':
if self.experience >= 5:
self.maxSociability += 3
self.sociability = self.maxSociability
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'speed increase':
if self.experience >= 5:
self.maxSpeed += 3
self.speed = self.maxSpeed
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'intelligence increase':
if self.experience >= 5:
self.intelligence += 4
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'pouches':
if self.experience >= 5:
self.inventoryCap += 3
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'stronger back':
if self.experience >= 5:
self.maxinvweight += 3
self.experience -= 5
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'omnivore':
if self.experience >= 10:
self.diet = 'omnivore'
self.abilities.append('omnivore')
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'metabolism increase':
if self.experience >= 15:
self.abilities.append('improved metabolism')
self.experience -= 15
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'fat reserves':
if self.experience >= 15:
self.abilities.append('fat reserves')
self.experience -= 15
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'semiaquatic':
if self.experience >= 15:
self.abilities.append('semiaquatic')
self.experience -= 15
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'item use':
if self.experience >= 15:
self.abilities.append('item use')
self.experience -= 15
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'flexible responding':
if self.experience >= 25:
self.abilities.append('flexible responding')
self.experience -= 30
transactionCompleted = True
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'fire':
if self.experience >= 35:
self.abilities.append('fire')
victory()
else:
print('Not enough experience. Try again.')
elif choice.lower() == 'go back':
transactionCompleted = True
def fillStats(self, n):
healthGained = self.maxHealth // n
self.health += healthGained
if self.health > self.maxHealth:
self.health = self.maxHealth
print('Your health is now at max!')
else:
print('You gain ' + str(healthGained) + ' health.')
strengthGained = self.maxStrength // n
self.strength += strengthGained
if self.strength > self.maxStrength:
self.strength = self.maxStrength
print('Your strength is now at max!')
else:
print('You gain ' + str(strengthGained) + ' strength.')
sociabilityGained = self.maxSociability // n
self.sociability += sociabilityGained
if self.sociability > self.maxSociability:
self.sociability = self.maxSociability
print('Your sociability is now at max!')
else:
print('You gain ' + str(sociabilityGained) + ' sociability.')
speedGained = self.maxSpeed // n
self.speed += speedGained
if self.speed > self.maxSpeed:
self.speed = self.maxSpeed
print('Your speed is now at max!')
else:
print('You gain ' + str(sociabilityGained) + ' speed.')
def die(self):
self.alive = False
def eat(self, food):
while food != 'fruit' and food != 'meat':
food = input("Sorry, I didn't catch that. Do you want to eat fruit or meat? ")
print()
if food not in self.location.items and food not in self.inventory:
print("There's no " + food + " for you to eat here!")
return
if self.location.terrain == 'forest':
if 'big stick' not in self.inventory:
print("You'll need a stick or something to get the food out of the trees.")
return
elif 'big stick' in self.inventory and 'Item use' not in self.abilities:
print('You need to unlock the "item use" ability before that stick will help you!')
return
if food == 'fruit':
if self.diet == 'herbivore' or self.diet == 'omnivore':
if 'fruit' in self.location.items:
self.location.items['fruit'] -= 1
if self.location.items['fruit'] <= 0:
del self.location.items['fruit']
elif 'fruit' in self.inventory:
self.inventory['fruit'] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights['fruit']
if self.inventory['fruit'] <= 0:
del self.inventory['fruit']
print('You eat the fruit.')
print()
self.fillStats(2)
self.hunger += 20
return True
else:
print("You can't eat that! Bleh!")
return
elif food == 'meat':
if self.diet == 'carnivore' or self.diet == 'omnivore':
if 'meat' in self.location.items:
self.location.items['meat'] -= 1
if self.location.items['meat'] <= 0:
del self.location.items['meat']
elif 'meat' in self.inventory:
self.inventory['meat'] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights['meat']
if self.inventory['meat'] <= 0:
del self.inventory['meat']
print('You eat the meat.')
self.fillStats(1)
self.hunger += 30
return True
else:
print("You can't eat that! Bleh!")
return
def pickup(self, item):
if self.location.terrain == 'forest':
if 'big stick' not in self.inventory:
if item == 'big stick':
print("Good thing that's a big stick...you're just able to pull it out of a tree without needing another big stick!")
else:
print("You'll need a stick or something to get the item out of the trees.")
return
elif 'big stick' in self.inventory and 'Item use' not in self.abilities and 'use items' not in self.abilities:
print('You need to unlock the "item use" ability before that stick will help you!')
return
if item in self.location.items:
if self.invweight + self.world.itemWeights[item] > self.maxinvweight:
s = self.invweight + self.world.itemWeights[item] - self.maxinvweight
print("This item is too heavy for you to pick up! Leave it behind or use the 'drop' command to free up " + str(s) + " kg in your inventory. ")
elif self.inventorySize < self.inventoryCap:
if item in self.location.items:
if item in self.inventory:
self.inventory[item] += 1
self.invweight += self.world.itemWeights[item]
else:
self.inventory[item] = 1
self.invweight += self.world.itemWeights[item]
self.inventorySize += 1
self.location.items[item] -= 1
if self.location.items[item] <= 0:
del self.location.items[item]
print('You pick up the ' + item + '.')
else:
print('Your inventory is already full!')
else:
print('There is no such item here.')
def drop(self,item):
if item in self.inventory:
if item in self.location.items:
self.location.items[item] += 1
else:
self.location.items[item] = 1
self.inventory[item] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights[item]
if self.inventory[item] <= 0:
del self.inventory[item]
print('You drop the ' + item + '.')
else:
print('There is no such item in your inventory.')
def inspect(self, item):
if item == 'creature' or item in self.world.creatureNames:
if self.location.creature == None:
print('There is no creature here.')
else:
print("The creature is a " + self.location.creature.name + '!')
print("It has " + str(self.location.creature.health) + " health, " + str(self.location.creature.speed) + " speed, " + str(self.location.creature.strength) + " strength, and " + str(self.location.creature.hostility) + " hostility.")
if self.ally != None:
print("Your ally is a " + self.ally.name + '!')
print("It has " + str(self.ally.health) + " health, " + str(self.ally.speed) + " speed, " + str(self.ally.strength) + " strength, and " + str(self.ally.hostility) + " hostility.")
#$$$
elif item in self.location.items or item in self.inventory:
if item == 'sticky sap':
print("Sticky sap from a tree. Use it during an encounter to decrease the other creature's speed.")
elif item == 'poison berries':
print("Poisonous berries. Use them during an encounter to decrease the other creature's health and strength.")
elif item == 'big leaf':
print('A large, surprisingly sturdy leaf. It could protect you from the weather.')
elif item == 'healing salve':
print('A healing salve from a plant. Use it to restore your stats.')
elif item == 'flowers':
print("Pretty flowers. Use them during an encounter to decrease the other creature's hostility.")
elif item == 'big stick':
print('A large stick. It will let you get items out of trees.')
elif item == 'nesting materials':
print('Materials for building a nest. Use them to move your home base.')
elif item == 'fruit':
print('A fruit. If you are an herbivore or omnivore, then eating this will reduce hunger and restore your stats.')
elif item == 'meat':
print('A piece of meat. If you are a carnivore or omnivore, then eating this will reduce hunger and restore your stats.')
elif item == 'seaweed':
print('A big nasty ball of seaweed. Use it during a fight to distract an animal and reduce its strength.')
elif item == 'driftwood':
print('A large piece of driftwood. Use it during a fight to try to block your opponent\'s attacks.')
elif item == 'conch shell':
print('A conch shell. Use it on land to calm the creatures around you and temporarily decrease their hostility.')
else:
print('There is nothing by that name here.')
def useItem(self, item):
if 'Item use' not in self.abilities and 'use items' not in self.abilities:
print('You need to unlock the "Item use" ability before you can use items!')
return False
else:
if item in self.inventory:
if item != 'conch shell':
print('You use the ' + item + '.')
if item == 'fruit':
self.eat(item)
elif item == 'meat':
self.eat(item)
elif item == 'healing salve':
print('All your stats have been restored!')
self.fillStats(1)
self.inventory['healing salve'] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights['healing salve']
if self.inventory['healing salve'] <= 0:
del self.inventory['healing salve']
elif item == 'big leaf':
print('You are now protected from the weather!')
self.world.weather = 'clear'
self.inventory['big leaf'] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights['big leaf']
if self.inventory['big leaf'] <= 0:
del self.inventory['big leaf']
elif item == 'nesting materials':
if self.location == self.home:
print("You're already at home!")
else:
print('You have established a new home at the current location!')
self.home = self.location
self.inventory['nesting materials'] -= 1
self.inventorySize -= 1
self.invweight -= self.world.itemWeights['nesting materials']
if self.inventory['nesting materials'] <= 0:
del self.inventory['nesting materials']
elif item == 'conch shell':
if self.location.terrain == 'lake':
input("You can't use that here! Sea animals don't care for conch shells. Go to land to use this.")
else:
print('The sound of the conch calms the creatures around you, and briefly decreases their hostility!')
self.world.hostilityDec = True
self.conchUses += 1
else:
print("Now's not the time to use that!")
return False
return True
elif item in self.location.items:
print('You must pick an item up before you can use it!')
else:
print("There's no item by that name in your inventory.")
return False
def useBattleItem(self, item, target):
if item in self.inventory:
if item == 'sticky sap':
target.speed -= target.speed // 2
self.inventory['sticky sap'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['sticky sap']
if self.inventory['sticky sap'] <= 0:
del self.inventory['sticky sap']
elif item == 'poison berries':
target.health -= target.health // 4
target.strength -= target.strength // 4
self.inventory['poison berries'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['poison berries']
if self.inventory['poison berries'] <= 0:
del self.inventory['poison berries']
elif item == 'healing salve':
self.fillStats()
self.inventory['healing salve'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['healing salve']
if self.inventory['healing salve'] <=0:
del self.inventory['healing salve']
elif item == 'flowers':
target.hostility -= target.hostility // 3
self.inventory['flowers'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['flowers']
if self.inventory['flowers'] <=0:
del self.inventory['flowers']
elif item == 'seaweed':
target.strength -= random.randint(2,5)
self.inventory['seaweed'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['seaweed']
if self.inventory['seaweed'] <=0:
del self.inventory['seaweed']
elif item == 'driftwood':
self.inventory['driftwood'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['driftwood']
if self.inventory['driftwood'] <=0:
del self.inventory['driftwood']
return True
elif item == 'seaweed':
target.strength -= 2*target.level
self.inventory['seaweed'] -= 1
self.inventorySize -= 1
self.invWeight -= self.world.itemWeights['seaweed']
if self.inventory['seaweed'] <=0:
del self.inventory['seaweed']
def go(self, dir):
if dir.lower() == 'north':
if self.location.exits['north'] == None:
print('You may not go north. Try again.')
return False
elif self.location.exits['north'].terrain == 'lake':
if 'semiaquatic' not in self.abilities: # You have to have the "Semiaquatic" skill to access lake terrain
print('There is water in that direction, and you cannot swim. Try again.')
return False
else:
self.location = self.location.exits['north']
return True
else:
self.going = 'north'
self.location = self.location.exits['north']
return True
if dir.lower() == 'south':
if self.location.exits['south'] == None:
print('You may not go south. Try again.')
return False
elif self.location.exits['south'].terrain == 'lake':
if 'semiaquatic' not in self.abilities:
print('There is water in that direction, and you cannot swim. Try again.')
return False
else:
self.location = self.location.exits['south']
return True
else:
self.going = 'south'
self.location = self.location.exits['south']
return True
if dir.lower() == 'east':
if self.location.exits['east'] == None:
print('You may not go east. Try again.')
return False
elif self.location.exits['east'].terrain == 'lake':
if 'semiaquatic' not in self.abilities:
print('There is water in that direction, and you cannot swim. Try again.')
return False
else:
self.location = self.location.exits['east']
return True
else:
self.going = 'east'
self.location = self.location.exits['east']
return True
if dir.lower() == 'west':
if self.location.exits['west'] == None:
print('You may not go west. Try again.')
return False
elif self.location.exits['west'].terrain == 'lake':
if 'semiaquatic' not in self.abilities:
print('There is water in that direction, and you cannot swim. Try again.')
return False
else:
self.location = self.location.exits['west']
return True
else:
self.going = 'west'
self.location = self.location.exits['west']
return True
else:
print("Sorry, I don't understand. Choose north, south, east or west.")
return False
def stats(self):
if self.diet == 'herbivore':
print("You are an herbivore.")
elif self.diet == 'carnivore':
print("You are a carnivore.")
print("Your location is " + str(self.location.coordinates))
print("Hunger = " + str(self.hunger))
print("Health = " + str(self.health))
print('Type: \n \t "all stats" for all stats; \n \t "inventory" for abilities and inventory; \n \t "location" for details on location')
def allstats(self):
if self.diet == 'herbivore':
print("You are an herbivore.")
elif self.diet == 'carnivore':
print("You are a carnivore.")
print("You may travel " + self.dirstring +".")
print("You may travel " + str(self.inventorySize))
print("Hunger = " + str(self.hunger))
print("Health = " + str(self.health))
print("Strength = " + str(self.strength))
print("Sociability = " + str(self.sociability))
print("Speed = " + str(self.speed))
print("Intelligence = " + str(self.intelligence))
print("Abilities = " + str(self.abilities))
print("Inventory = " + str(self.inventory))
print("Inventory size = " + str(self.inventorySize))
print("Inventory cap = " + str(self.inventoryCap))
print("Inventory weight = " + str(self.invweight))
print("Inventory max weight = " + str(self.maxinvweight))
print("Friends: " + str(len(self.friends)))
print("Defeated: " + str(self.defeated))
def attack(self, creature):
if self.location.creature == None:
print('There is no creature here.')
return
else:
fleeing = False
defense = False
while self.health > 0 and creature.health > 0:
clear()
print('Creature health: ' + str(creature.health))
print('Creature strength: ' + str(creature.strength))
print('Creature hostility: ' + str(creature.hostility))
print()
print('Health: ' + str(self.health))
print('Strength: ' + str(self.strength))
print('You may:')
print('\t attack')
if 'item use' in self.abilities:
print('\t use item')
print('\t flee')
choice = input('What will you do? ')
choice = choice.lower()
while choice != 'attack' and choice != 'flee' and 'item' != choice:
if 'item use' in self.abilities:
print('Invalid command. Choose "attack," "item" or "flee."')
else:
print('Invalid command. Choose "attack" or "flee."')
choice = input('What will you do? ')
while 'item' in choice.lower() and len(self.inventory) == 0:
print('Your inventory is empty!')
choice = input('What will you do? ')
while 'item' in choice.lower() and 'item use' not in self.abilities == 0:
print('You can\'t do that!')
choice = input('What will you do? ')
print()
if self.speed >= creature.speed:
# If the player is faster, the player goes first
if choice.lower() in 'attack':
attackStrength = random.randint(self.strength // 2, self.strength)
print("You attack!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= self.strength
creature.hostility += 3
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
fleeing = True
break
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
print(random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...']))
else:
# If the creature is faster, the creature goes first
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
fleeing = True
break
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
creatureChoice = random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...'])
if choice.lower() in 'attack':
attackStrength = random.randint(self.strength // 2, self.strength)
print("You attack!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= self.strength
creature.hostility += 3
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
if type(creatureChoice) == str:
# If the creature does nothing, we say so at the end of the turn.
print(creatureChoice)
print()
if self.ally != None:
if random.choice([True, False]):
if choice.lower() in 'attack':
attackStrength = random.randint(self.ally.strength // 2, self.ally.strength)
print("Your ally attacks!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= attackStrength
creature.hostility += 3
input('Press enter to continue.')
print()
if fleeing == True:
r = random.choice(self.world.squares)
if creature in self.world.aquaticCreatures:
while r.creature != None and r.terrain != 'lake':
r.random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
else:
while r.creature != None:
r = random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
elif creature.health <= 0 and self.health > 0:
print("You've defeated the creature!")
print("You gain " + str(creature.experience) + " experience!")
self.experience += creature.experience
self.defeated += 1
self.location.creature = None
self.location.items['meat'] = random.randint(1,3)
if random.random() < .15:
if self.location == 'lake':
itemDrop = random.choice(self.world.waterItems)
else:
itemDrop = random.choice(self.world.landItems)
print('The creature dropped an item!')
if itemDrop in self.location.items:
self.location.items[itemDrop] += 1
else:
self.location.items[itemDrop] = 1
elif self.health <= 0:
self.die()
return True
def befriend(self, creature):
if self.location.creature == None:
print('There is no creature here.')
return
else:
fleeing = False
defense = False
while self.health > 0 and creature.hostility > 0:
clear()
print('Creature health: ' + str(creature.health))
print('Creature strength: ' + str(creature.strength))
print('Creature hostility: ' + str(creature.hostility))
print()
print('Health: ' + str(self.health))
print('Sociability: ' + str(self.sociability))
print('You may:')
print('\t befriend')
if 'item use' in self.abilities:
print('\t use item')
print('\t flee')
choice = input('What will you do? ')
while choice.lower() not in 'befriend' and choice.lower() not in 'flee' and 'item' not in choice.lower():
if 'item use' in self.abilities: #why say "not in 'befriend'?
print('Invalid command. Choose "befriend," "item" or "flee."')
else:
print('Invalid command. Choose "befriend" or "flee."')
choice = input('What will you do? ')
if 'item' in choice.lower() and len(self.inventory) == 0:
print('Your inventory is empty!')
choice = input('What will you do? ')
if 'item' in choice.lower() and 'item use' not in self.abilities == 0:
print('You can\'t do that!')
choice = input('What will you do? ')
print()
if self.speed >= creature.speed:
# If the player is faster, the player goes first
if choice.lower() in 'befriend' and choice.lower() != 'f':
befriendSuccess = random.randint(self.sociability // 2, self.sociability)
print("You try to befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
fleeing = True
break
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
print(random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...']))
else:
# If the creature is faster, the creature goes first
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
break
fleeing = True
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
creatureChoice = random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...'])
if choice.lower() in 'befriend' and choice.lower() != 'f':
befriendSuccess = random.randint(self.sociability // 2, self.sociability)
print("You try to befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
if type(creatureChoice) == str:
# If the creature does nothing, we say so at the end of the turn.
print(creatureChoice)
print()
if self.ally != None:
if random.choice([True, False]):
if choice.lower() in 'befriend':
allySociability = 100 // self.ally.hostility
if allySociability < 0:
allySociability = 0
befriendSuccess = random.randint(allySociability // 2, allySociability)
print("Your ally helps befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
input('Press enter to continue.')
print()
if fleeing == True:
r = random.choice(self.world.squares)
if creature in self.world.aquaticCreatures:
while r.creature != None and r.terrain != 'lake':
r.random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
else:
while r.creature != None:
r = random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
elif creature.hostility <= 0 and self.health > 0:
print("You've befriended the creature!")
print("You gain " + str(creature.experience) + " experience!")
self.experience += creature.experience
self.friends.append(creature)
creature.befriended = True
if random.random() < .15:
if self.location == 'lake':
itemDrop = random.choice(self.world.waterItems)
else:
itemDrop = random.choice(self.world.landItems)
print('The creature dropped an item!')
if itemDrop in self.location.items:
self.location.items[itemDrop] += 1
else:
self.location.items[itemDrop] = 1
elif self.health <= 0:
self.die()
return True
def flexibleResponse(self, creature):
if self.location.creature == None:
print('There is no creature here.')
return
else:
fleeing = False
defense = False
while self.health > 0 and (creature.hostility > 0 or creature.health > 0):
clear()
print('Creature health: ' + str(creature.health))
print('Creature strength: ' + str(creature.strength))
print('Creature hostility: ' + str(creature.hostility))
print()
print('Health: ' + str(self.health))
print('Strength: ' + str(self.strength))
print('Sociability: ' + str(self.sociability))
print('You may:')
print('\t attack')
print('\t befriend')
if 'item use' in self.abilities:
print('\t use item')
print('\t flee')
choice = input('What will you do? ')
while choice.lower() not in 'attack' and choice.lower() not in 'befriend' and choice.lower() not in 'flee' and 'item' not in choice.lower():
if 'item use' in self.abilities:
print('Invalid command. Choose "attack," "befriend," "item" or "flee."')
else:
print('Invalid command. Choose "attack," "befriend," or "flee."')
choice = input('What will you do? ')
if 'item' in choice.lower() and len(self.inventory) == 0:
print('Your inventory is empty!')
choice = input('What will you do? ')
if 'item' in choice.lower() and 'item use' not in self.abilities == 0:
print('You can\'t do that!')
choice = input('What will you do? ')
print()
if self.speed >= creature.speed:
# If the player is faster, the player goes first
if choice.lower() in 'attack':
attackStrength = random.randint(self.strength // 2, self.strength)
print("You attack!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= attackStrength
creature.hostility += 3
elif choice.lower() in 'befriend' and choice.lower() != 'f':
befriendSuccess = random.randint(self.sociability // 2, self.sociability)
print("You try to befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
fleeing = True
break
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
print(random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...']))
else:
# If the creature is faster, the creature goes first
creatureAttackChance = creature.hostility * .1
creatureChoice = random.random()
if creatureChoice < creature.fleeRate:
print("The creature flees!")
fleeing = True
break
elif creatureChoice < creatureAttackChance + creature.fleeRate:
creatureAttackStrength = random.randint(creature.strength // 2, creature.strength)
print("The creature attacks!")
if defense == True:
if random.random() < 0.5:
creatureAttackStrength = 0
print('Your driftwood barrier protects you!')
print("You take " + str(creatureAttackStrength) + " damage!")
self.health -= creatureAttackStrength
else:
creatureChoice = random.choice(['The creature does nothing!', 'The creature awaits your next move.', 'The creature is watching you closely...'])
if choice.lower() in 'attack':
attackStrength = random.randint(self.strength // 2, self.strength)
print("You attack!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= attackStrength
creature.hostility += 3
elif choice.lower() in 'befriend' and choice.lower() != 'f':
befriendSuccess = random.randint(self.sociability // 2, self.sociability)
print("You try to befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
elif 'item' in choice.lower():
print("Items: ")
orderedInventory = asOrderedList(self.inventory)
for kvp in orderedInventory:
print('\t' + kvp[0] + ' x' + str(kvp[1]))
itemChoice = input('Pick an item. ')
if self.useBattleItem(itemChoice, creature):
defense = True
elif choice.lower() in 'flee':
print("You flee!")
break
if type(creatureChoice) == str:
# If the creature does nothing, we say so at the end of the turn.
print(creatureChoice)
print()
if self.ally != None:
if random.choice([True, False]):
if choice.lower() in 'attack':
attackStrength = random.randint(self.ally.strength // 2, self.ally.strength)
print("Your ally attacks!")
print("The creature takes " + str(attackStrength) + " damage!")
print("The creature's hostility increases!")
creature.health -= attackStrength
creature.hostility += 3
elif choice.lower() in 'befriend':
allySociability = 100 // self.ally.hostility
if allySociability < 0:
allySociability = 0
befriendSuccess = random.randint(allySociability // 2, allySociability)
print("Your ally helps befriend the creature!")
print("The creature's hostility decreases!")
creature.hostility -= befriendSuccess
input('Press enter to continue.')
print()
if fleeing == True:
r = random.choice(self.world.squares)
if creature in self.world.aquaticCreatures:
while r.creature != None and r.terrain != 'lake':
r.random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
else:
while r.creature != None:
r = random.choice(self.world.squares)
r.creature = creature
creature.location = r
self.location.creature = None
elif creature.health <= 0 and self.health > 0:
print("You've defeated the creature!")
print("You gain " + str(creature.experience) + " experience!")
self.experience += creature.experience
self.defeated += 1
self.location.creature = None
self.location.items['meat'] = random.randint(1,3)
if random.random() < .15:
if self.location.terrain == 'lake':
itemDrop = random.choice(self.world.waterItems)
else:
itemDrop = random.choice(self.world.landItems)
print('The creature dropped an item!')
if itemDrop in self.location.items:
self.location.items[itemDrop] += 1
else:
self.location.items[itemDrop] = 1
input()
elif creature.hostility <= 0 and self.health > 0:
print("You've befriended the creature!")
print("You gain " + str(creature.experience) + " experience!")
self.experience += creature.experience
self.friends.append(creature)
creature.befriended = True
if random.random() < .15:
if self.location.terrain == 'lake':
itemDrop = random.choice(self.world.waterItems)
else:
itemDrop = random.choice(self.world.landItems)
print('The creature dropped an item!')
if itemDrop in self.location.items:
self.location.items[itemDrop] += 1
else:
self.location.items[itemDrop] = 1
elif self.health <= 0:
self.die()
return True
def recruit(self):
if self.location.creature == None:
print('There is no creature here for you to befriend!')
elif self.ally != None:
print('You need to dismiss your ally before you recruit a new one!')
else:
if self.location.creature in self.friends:
self.ally = self.location.creature
print('You have allied your friend the ' + self.ally.name + '! Your ally will follow you around and fight with you.')
else:
print('You must befriend a creature before it will be your ally!')
def locationDets(self):
print('Location coordinates: ' + str(self.location.coordinates))
print('Terrain: ' + self.location.terrain)
print('Weather: ' + self.location.weather)
self.location.availableDirs()
| aimalanos/Irtiqa | Player.py | Player.py | py | 64,055 | python | en | code | 0 | github-code | 36 |
31298715713 | class Solution(object):
def maximalSquare(self, matrix):
maximal = 0
m = len(matrix)
n = len(matrix[0])
squreSizeMemo = [[0 for i in range(n+1)] for j in range(m+1)]
for i in range(m-1, -1, -1):
for j in range(n-1, -1, -1):
if (matrix[i][j] == "1"):
squreSizeMemo[i][j] = min(squreSizeMemo[i+1][j], squreSizeMemo[i][j+1], squreSizeMemo[i+1][j+1]) + 1
if maximal < squreSizeMemo[i][j]: maximal = squreSizeMemo[i][j]
return maximal*maximal
s = Solution()
print(s.maximalSquare([["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]))
| shwjdgh34/algorithms-python | leetcode/221.py | 221.py | py | 714 | python | en | code | 2 | github-code | 36 |
21091300951 | import streamlit as st
import scraper
stock = ['AAPL', 'AMZN', 'INTC', 'GOOG', 'CSCO']
search_btn = False
if st.sidebar.checkbox("Deseja procurar alguma ação?"):
symbol = st.sidebar.text_input("Dígite o símbolo da ação desejada")
if len(symbol) == 4:
new_company_info = scraper.fetch_info(symbol)
stock.append(symbol)
search_btn = st.sidebar.button('Buscar')
def get_new(symbol):
if new_company_info != None:
st.header(symbol)
new_graph = scraper.fetch_company_data_history(chart_data, symbol)
my_chart = st.line_chart(new_graph[chart_data], height=400, width=400)
st.header(f'Informações: {stock_symbol_info}')
st.text(scraper.fetch_info(symbol))
return my_chart
stock_symbol_info = st.sidebar.selectbox(
'Informações da ação',
['', *stock],
)
stock_chart = st.sidebar.multiselect(
'Ações para mostrar no gráfico',
scraper.DEFAULT_COMPANIES,
default=scraper.DEFAULT_COMPANIES
)
chart_data = st.sidebar.radio(
'Gráfico do volume ou da cotação de fechamento ajustado', ('Volume', 'Adj Close')
)
def main():
if search_btn: get_new(str(symbol))
else:
st.header('Gráficos')
if len(stock_chart) > 0:
scraper.render_graph(chart_data, [*stock_chart])
if (stock_symbol_info):
st.header(f'Informações: {stock_symbol_info}')
st.text(scraper.fetch_info(stock_symbol_info))
if __name__ == "__main__":
main()
| rodrigoaqueiroz/laraia-yahoo-finance | main.py | main.py | py | 1,440 | python | pt | code | 0 | github-code | 36 |
19494554547 | from jinja2 import Environment, FileSystemLoader, select_autoescape
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
def render_run_plan(workout, routes, sunrise_sunset, forecast, dress):
template = env.get_template('run_plan.html')
return template.render(workout=workout, routes=routes, sunrise_sunset=sunrise_sunset,
forecast=forecast, dress=dress) | csickelco/runforfun | runforfun/util/template_engine.py | template_engine.py | py | 452 | python | en | code | 0 | github-code | 36 |
15724939255 | import ast
import os
# Third party imports
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version(module='spyder_reports'):
"""Get version."""
with open(os.path.join(HERE, module, '_version.py'), 'r') as f:
data = f.read()
lines = data.split('\n')
for line in lines:
if line.startswith('VERSION_INFO'):
version_tuple = ast.literal_eval(line.split('=')[-1].strip())
version = '.'.join(map(str, version_tuple))
break
return version
def get_description():
"""Get long description."""
with open(os.path.join(HERE, 'README.rst'), 'r') as f:
data = f.read()
return data
REQUIREMENTS = ['spyder>=3.2.0', 'pweave', 'matplotlib']
setup(
name='spyder-reports',
version=get_version(),
keywords=['Spyder', 'Plugin'],
url='https://github.com/spyder-ide/spyder-reports',
license='MIT',
author='Spyder Project Contributors',
author_email='admin@spyder-ide.org',
description='Spyder-IDE plugin for Markdown reports using Pweave.',
long_description=get_description(),
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=REQUIREMENTS,
include_package_data=True,
package_data={'spyder_reports.utils': ['*.md']},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
])
| spyder-ide/spyder-reports | setup.py | setup.py | py | 1,820 | python | en | code | 72 | github-code | 36 |
43965888115 | import ast
from django.db.models import Q
from django.db import transaction
from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError
from rest_framework.generics import ListAPIView
from common_config.api_code import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_OK, HTTP_500_INTERNAL_SERVER_ERROR
from common_config.api_message import ADD_SERVICE, UPDATE_SERVICE, INVALID_PAGE_SIZE, \
DELETE_SERVICE, EXTRA_QUERY_PARAMS, INVALID_PAGE_NUMBER, INVALID_BOOLEAN_FLAG, BLANK_PARAM, INVALID_SORT_BY, \
INVALID_SORT_BY_FIELD_PARAM, REQUIRED_PARAMS, INVALID_STATUS_FILTER, INVALID_SERVICE_IMAGE_ID
from common_config.constant import SERVICE_CATEGORY
from common_config.logger.logging_handler import logger
from common_config.generics import get_object_or_404
from utils.api_response import APIResponse
from utils.permissions import IsAuthorized
from utils.pagination import Pagination
from utils.views.service import ServiceListCreateMixin, ServiceRetrieveUpdateDeleteMixin
from services.models.service import Service
from services.serializers.service import ServiceCreateSerializer, ServiceViewSerializer, ServiceListSerializer, \
ServiceUpdateSerializer
from price_groups.tasks.store_service import linked_services_to_store_task, linked_service_and_options_to_store_task
class ServiceListCreateView(ServiceListCreateMixin):
"""
An Api View which provides a method to add new service or view list services.
Accepts the following GET/POST header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
serializer_class = ServiceCreateSerializer
pagination_class = Pagination
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('add_service', 'list_service',)
query_filter_params = ["is_active", "include_deleted", "page", "page_size", "status", "sort_by", "search",
"sort_by_field"]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.errors = dict()
self.params = dict()
def validate_query_param(self, page_size, page):
# check pre define query parameter if contain extra query param then raise error message
if len(self.params) > 0 and not all(key in self.query_filter_params for key in self.params.keys()):
extra_param = [key for key in self.params if key not in self.query_filter_params]
self.errors.setdefault("message", []).append(EXTRA_QUERY_PARAMS.format(extra_param))
# check page size must number
if "page_size" in self.params and not page_size.isnumeric():
self.errors.setdefault("page_size", []).append(INVALID_PAGE_SIZE)
if "page" in self.params and not page.isnumeric():
self.errors.setdefault("page", []).append(INVALID_PAGE_NUMBER)
if "status" in self.params:
try:
self.params['status'] = ast.literal_eval(self.params['status'])
except Exception as err:
self.errors.setdefault("status", []).append(INVALID_STATUS_FILTER.format(
type(self.params['status']).__name__))
if not isinstance(self.params['status'], list):
self.errors.setdefault("status", []).append(INVALID_STATUS_FILTER.format(
type(self.params['status']).__name__))
if "is_active" in self.params:
try:
eval(self.params['is_active'])
except Exception as err:
self.errors.setdefault("is_active", []).append(
INVALID_BOOLEAN_FLAG.format("is_active", self.params['is_active']))
if "sort_by" in self.params:
if self.params['sort_by'] == "":
self.errors.setdefault("sort_by", []).append(BLANK_PARAM)
elif self.params['sort_by'].lower() not in ["asc", "desc"]:
self.errors.setdefault("sort_by", []).append(INVALID_SORT_BY.format(self.params['sort_by']))
if "search" in self.params and self.params['search'] == "":
self.errors.setdefault("search", []).append(BLANK_PARAM)
if "sort_by_field" in self.params and self.params['sort_by_field'] not in ["name", "description", "status",
"price"]:
self.errors.setdefault("sort_by_field", []).append(INVALID_SORT_BY_FIELD_PARAM)
if "sort_by_field" in self.params and "sort_by" not in self.params:
self.errors.setdefault("sort_by", []).append(REQUIRED_PARAMS)
if "include_deleted" in self.params:
try:
eval(self.params['include_deleted'])
except Exception as err:
self.errors.setdefault("include_deleted", []).append(
INVALID_BOOLEAN_FLAG.format("include_deleted", self.params['include_deleted']))
else:
if not self.errors:
# validate view soft deleted object view permission
IsAuthorized.has_include_deleted_permission(self.request, "list_service")
def filter_queryset(self, params):
filter_kwargs = {'is_active': True}
if "is_active" in params and params['is_active'] in ['False']:
filter_kwargs['is_active'] = False
if "status" in params:
filter_kwargs['status__in'] = params.get('status')
if "sort_by_field" in params:
if params['sort_by_field'] == "name":
sort_by_field = "name"
elif params['sort_by_field'] == "status":
STATUS_CHOICE = Service.STATUS_CHOICES
# sort service status
service_status = sorted(STATUS_CHOICE, key=lambda tup: tup[1], reverse=True)
# get sorted status
sorted_list = [x[0] for x in service_status]
from django.db.models import Case, When
# sort by field
sort_by_field = Case(
*[When(status=status, then=pos) for pos, status in enumerate(sorted_list)])
elif params['sort_by_field'] == "description":
sort_by_field = "description"
else:
sort_by_field = "price"
else:
sort_by_field = "created_on"
query = Q()
if "search" in params:
query = Q(name__icontains=params['search']) | Q(description__icontains=params['search']) | \
Q(Q(category_tags__name__icontains=params['search']) &
Q(category_tags__entity_type=SERVICE_CATEGORY))
for item in filter_kwargs:
query = query & Q(**{item: filter_kwargs[item]})
if "sort_by" in params and params['sort_by'] == "asc":
return self.queryset.filter(query).order_by(sort_by_field)
return self.queryset.filter(query).order_by(sort_by_field).reverse()
def get(self, request, *args, **kwargs):
"""
In this method validate request query parameters and filter and return service list.
return success/error message.
"""
self.params = request.query_params.copy()
page_size = self.params.get('page_size', None)
page = self.params.get('page', None)
# validate sales order params
self.validate_query_param(page_size, page)
if self.errors:
return APIResponse(self.errors, HTTP_400_BAD_REQUEST)
error_msg, status_code = None, None
try:
# filter and get all service based on query params
queryset = self.filter_queryset(self.params)
except DjangoValidationError as err:
error_msg, status_code = err.args[0], HTTP_400_BAD_REQUEST
except Exception as err:
logger.error("Unexpected error occurred : %s.", err.args[0])
error_msg, status_code = err.args[0], HTTP_500_INTERNAL_SERVER_ERROR
if error_msg is not None:
return APIResponse({"message": error_msg}, status_code)
is_pagination = False
# set api request page number
if page is not None:
self.paginator.page = page
is_pagination = True
# set request api page size number
if page_size is None:
page_size = 10
self.paginator.page_size = page_size
return self.paginator.generate_response(queryset, ServiceListSerializer, request, is_pagination)
@transaction.atomic
def post(self, request, *args, **kwargs):
"""
In this method validate service from data and created new service.
return success/error message.
"""
request_data = request.data.copy()
try:
# validate service and service option fields value
serializer, validate_data = self.validate(request_data)
except ValidationError as err:
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred : %s.", err)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# get last transaction save point id
sid = transaction.savepoint()
try:
# add new service
instance, priceGroupServiceIdList = serializer.create(validate_data)
except ValidationError as err:
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
logger.error("Unexpected error occurred : %s.", err.args[0])
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# convert model object into json
data = ServiceViewSerializer(instance).data
data['message'] = ADD_SERVICE
if priceGroupServiceIdList:
# system user assign services to store
linked_services_to_store_task.delay({'priceGroupServiceIdList': priceGroupServiceIdList})
return APIResponse(data, HTTP_201_CREATED)
class ServiceRetrieveUpdateDeleteView(ServiceRetrieveUpdateDeleteMixin):
"""
An Api View which provides a method to get, update and delete service.
Accepts the following GET/PUT/DELETE header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
serializer_class = ServiceUpdateSerializer
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('change_service', 'view_service', 'delete_service',)
lookup_field = 'pk'
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
# get object
obj = get_object_or_404(queryset, "service_id", **filter_kwargs)
return obj
def get(self, request, *args, **kwargs):
# get service object
instance = self.get_object()
# serialize service objects
serializer = ServiceViewSerializer(instance)
return APIResponse(serializer.data, HTTP_OK)
@transaction.atomic
def put(self, request, *args, **kwargs):
# get service object
instance = self.get_object()
# get request form data
request_data = request.data
try:
# validate service and service option fields value
serializer, validated_data = self.validate(request_data)
except ValidationError as err:
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred : %s.", err)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
if "del_images" in validated_data and len(validated_data['del_images']) > 0:
del_images = validated_data.get("del_images")
errors = {}
images = [x.id for x in instance.images.all()]
for x in del_images:
if x.id not in images:
errors.setdefault("del_images", []).append(INVALID_SERVICE_IMAGE_ID.format(x.id))
if len(errors) > 0:
return APIResponse(errors, HTTP_400_BAD_REQUEST)
# get last transaction save point id
sid = transaction.savepoint()
try:
# update service
instance, priceGroupServiceIdList = serializer.update(instance, validated_data)
except ValidationError as err:
logger.error("validation error occurred 1 : %s.", err.args[0])
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred 2 : %s.", err.args[0])
# roll back transaction if any exception occur while update service and service option
transaction.savepoint_rollback(sid)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# convert model object into json
data = ServiceViewSerializer(instance).data
data['message'] = UPDATE_SERVICE
task_payload = {}
if priceGroupServiceIdList:
task_payload['priceGroupServiceIdList'] = priceGroupServiceIdList
if "createOptionIds" in request.session:
task_payload['createOptionIds'] = request.session['createOptionIds']
del request.session['createOptionIds']
if task_payload:
# system user assign services to store
linked_service_and_options_to_store_task.delay(task_payload)
return APIResponse(data, HTTP_OK)
@transaction.atomic
def delete(self, request, *args, **kwargs):
# validate and get service object
instance = self.get_object()
# get last transaction save point id
sid = transaction.savepoint()
try:
# soft delete service
instance.delete()
except Exception as err:
# roll back transaction if any exception occur while delete service
transaction.savepoint_rollback(sid)
logger.error("Unexpected error occurred : %s.", err.args[0])
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
return APIResponse({'message': DELETE_SERVICE}, HTTP_OK)
class UpdateServiceOptionSequenceNumber(ListAPIView):
"""
An Api View which provides a method to update service option sequence number.
Accepts the following GET header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('change_service',)
def get(self, request, *args, **kwargs):
services = Service.objects.all()
for service_obj in services:
options = service_obj.options.all().order_by("id")
sequence = 1
for option_obj in options:
option_obj.sequence = sequence
option_obj.save()
# update price group service option
for price_list_option_obj in option_obj.price_group_options.all():
price_list_option_obj.sequence = sequence
price_list_option_obj.save()
sequence += 1
return APIResponse({'message': "Service option sequence updated successfully."}, HTTP_OK) | BharatPlutus/python-django-sample | services/views/service.py | service.py | py | 16,111 | python | en | code | 0 | github-code | 36 |
13823383640 | import numpy as np
import matplotlib.pyplot as plt
from shapely import geometry
from numpy.linalg import norm
from random import *
import pickle
def reach_set_calc(x_val, reach_range):
"""
:type x_val: list
:type reach_range: float
:return: reach_set: Polygon
Description: With given x and reach_range, generate a rectangular set centering at x with side length
2 * reach_range
"""
p1 = geometry.Point(x_val[0] - reach_range, x_val[1] - reach_range)
p2 = geometry.Point(x_val[0] + reach_range, x_val[1] - reach_range)
p3 = geometry.Point(x_val[0] + reach_range, x_val[1] + reach_range)
p4 = geometry.Point(x_val[0] - reach_range, x_val[1] + reach_range)
vertex_list = [p1, p2, p3, p4]
reach_set = geometry.Polygon(vertex_list)
return reach_set
def all_Q_plt(Q, node_num, color_set, line_style_set, T, plt_scale):
"""
:param Q: dict
:param node_num: int
:param color_set: list
:param line_style_set: list
:param T: int
:return: None
"""
# Plot all given convex sets
for t_val in range(T + 1):
for node in range(1, node_num + 1):
hcoord_q, vcoord_q = Q[f"Q_t={t_val}^i={node}"].region.exterior.xy
# plt.fill(hcoord_q, vcoord_q, alpha=0.1, facecolor=color_set[t_val], edgecolor=color_set[t_val],
# linewidth=2,
# linestyle=line_style_set[node - 1], label=fr"$Q_{t_val}^{{({node})}}$")
plt.fill(hcoord_q, vcoord_q, alpha=1, facecolor='none', edgecolor=color_set[t_val],
linewidth=1.5,
linestyle=line_style_set[node - 1], label=r"$\mathcal{Q}_" + fr"{t_val}^{{({node})}}$")
plt.legend(fontsize=14)
plt.grid(True)
plt.axis(plt_scale)
return None
def set_plotter(set, plt_color, alpha_val):
"""
:type set: Polygon
:type plt_color: string
:type alpha_val: float
:return: None
"""
hcoord, vcoord = set.exterior.xy
# plt.fill(hcoord, vcoord, alpha=alpha_val, facecolor=plt_color, edgecolor=plt_color)
plt.fill(hcoord, vcoord, alpha=alpha_val, facecolor='none', edgecolor=plt_color)
def game_plt(full_tree, oppo_action, Q, colors, UV_dict, t, prev_x_action, R, control):
"""
:param full_tree: list
:param oppo_action: State
:param Q: dict
:param colors: list
:param UV_dict: dict
:param t: int
:param prev_x_action: State
:param R: float
:return: player_action: State
"""
prev_x_state = prev_x_action.state
ax = plt.gca()
ax.set_aspect(1)
# Plot selected Qt
Qt = Q[f"Q_t={t}^i={oppo_action.state}"].region
set_plotter(Qt, colors[t], alpha_val=0.05)
# Plot the set discretized over
if t == 0:
set = Qt
else:
R_set = reach_set_calc(prev_x_action.state, R)
set = Qt.intersection(R_set)
set_plotter(set, colors[t], alpha_val=0.1)
# Find disc xt in the set
# disc_x_list = [action.state for action in full_tree if action.parent_state == oppo_action]
# Plot disc xt in the set
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
if control in ['1', '2']: # Opt pl vs. Opt op or Opt pl vs. Sub-opt op
# Find optimal player action xt
player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, {oppo_action.state})"].action
player_state = player_action.state
else: # Control == '3', Sub-opt pl vs. Opt op
# Randomly pick player action xt
player_action = choice([action for action in full_tree if action.parent_state == oppo_action])
player_state = player_action.state
# Plot optimal xt in the set
plt.scatter(player_state[0], player_state[1], color=colors[t], linewidths=1.5, marker='.')
# plt.scatter(player_state[0], player_state[1], color='black', linewidths=0.1, marker='.')
if t != 0:
# Connect optimal xt state approximation to prev_x_state
plt.plot([prev_x_state[0], player_state[0]], [prev_x_state[1], player_state[1]], color='black')
return player_action
# Given a rectangular set, return discrete points inside the set
# def discrete_x_calc(poly, t_node, approx_para):
def discrete_x_calc(poly, approx_para, bound_rmv):
"""
:type approx_para: int
:type poly: Polygon
:type bound_rmv: string
:return discrete_x: list
"""
[hcoord_val, vcoord_val] = poly.exterior.xy # Find the horizontal and vertical coordinates of poly's vertices
discrete_x = []
for x_hcoord in np.linspace(min(hcoord_val), max(hcoord_val), approx_para):
for x_vcoord in np.linspace(min(vcoord_val), max(vcoord_val), approx_para):
discrete_x += [[x_hcoord, x_vcoord]]
discrete_x_copy = discrete_x[:] # Back up original discrete list
if bound_rmv.lower() == 'y':
# Find discrete x on the boundary
bound_x = []
for x_eval in discrete_x_copy:
if x_eval[0] in hcoord_val or x_eval[1] in vcoord_val:
bound_x.append(x_eval)
# Remove discrete x on the boundary from original discrete list
discrete_x.pop(discrete_x.index(x_eval))
print(bound_x)
return discrete_x
class State:
# Set node loc i_t and parent_node i_t-1 as the attributes to newly defined OpNode object
def __init__(self, state_value, parent_state, t_step, side):
"""
:type state_value: int (Opponent), list (Player)
:type parent_state: State / None (dummy_i)
:type t_step: int, None(dummy i)
:type side: str ('Player'/'Opponent')
"""
self.state = state_value
self.side = side
self.parent_state = parent_state
self.children_state_list = []
self.t_step = t_step
# Define methods that determine child nodes list with current node (Specific to graph)
def add_child_state(self, child_state):
"""
:type child_state: State
"""
self.children_state_list.append(child_state)
class ConvexBody:
def __init__(self, t_step, node, vertices):
"""
:type t_step: int
:type node: int
:type vertices: list
"""
self.t_step = t_step
self.t_node = node
self.region = geometry.Polygon(vertices)
class Value: # Value function
def __init__(self, player_state, oppo_state, t_step, side, value, action):
"""
:type player_state: State (None for U0)
:type oppo_state: State (None for U0)
:type t_step: int
:type side: string
:type value: float
:type action: State
"""
self.side = side
self.player_state = player_state
self.oppo_state = oppo_state
self.t_step = t_step
self.value = value
self.action = action
if __name__ == "__main__":
#################################### Display ####################################
"""
Plot trajectory result. Allow user to control opponent, while player always applies its optimal strategy by
computer. Allow re-run functionality.
IDEA: Separate game computation section (discretization, optimal value approximation) and game play section
(display) as two different .py files
"""
method = ''
while method.lower() != 'terminate':
method = input("Which method to use or terminate the program? [Old/New/Terminate]: ")
if method.lower() == 'new':
# Load tree info new files into the program
tree_file = open(f'tree_info_new (epsilon = {0.15}, extra_disc_para = {5})', 'rb')
tree_info = pickle.load(tree_file)
tree_file.close()
# Assign keys from tree_info to variables in this program
Q = tree_info['Q']
full_tree = tree_info['full_tree']
UV_dict = tree_info['UV_dict']
T = tree_info['T']
num_nodes = tree_info['num_nodes']
colors = tree_info['colors']
line_style_list = tree_info['line_style_list']
plt_scale = tree_info['plt_scale']
extra_disc_para = tree_info['extra_disc_para']
scale_para = tree_info['scale_para']
dummy_i = tree_info['dummy_i']
performance_bound = tree_info['performance_bound']
R = tree_info['R']
method = tree_info['method']
# Plot all convex sets
plt_scale_Q = [0, 0.8, 0, 0.8]
all_Q_plt(Q, num_nodes, colors, line_style_list, T, plt_scale_Q)
ax = plt.gca()
ax.set_aspect(1)
plt.show()
plt_scale = [0.3, 0.4, 0.3, 0.4]
msg = ''
oppo_hist = dict()
while msg.lower() != 'n':
# Define figure and ax for result plot figure
fig, ax = plt.subplots(figsize=(8, 8))
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(22)
tot_cost = 0
all_Q_plt(Q, num_nodes, colors, line_style_list, T, plt_scale)
## Still need to add opt player vs. opt opponent
control = input("Opt Player vs. Opt Opponent [1] / Opt Player vs. Sub-opt Opponent [2] / Sub-opt "
"Player vs. Opt Opponent [3]? ")
if control not in ['1', '2', '3']:
print('Invalid game setting. Select again.')
else: # Valid game setting
if control == '2': # Case of Player (PC) vs. Opponent (User)
# Initialize the game
t = 0
opt_player_action = dummy_i
opt_player_state = dummy_i.state
while t <= T:
prev_x_action = opt_player_action
prev_x_state = opt_player_state
oppo_node = int(input("Enter opponent action: "))
if t == 0:
if oppo_node not in range(num_nodes + 1):
print("Invalid selection of node. Try again.")
else: # oppo_node is valid with given graph
oppo_hist[f"i{t}"] = oppo_node # Store selected oppo_node to oppo_hist
oppo_action = [action for action in full_tree if action.state == oppo_node and
action.parent_state == prev_x_action][0]
# Plot the game process
opt_player_action = game_plt(full_tree, oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
# # Plot selected Q0
# Q0 = Q[f"Q_t={t}^i={oppo_action.state}"].region
# set_plotter(Q0, colors[t], alpha_val=0.25)
# set_plotter(Q0, colors[t], alpha_val=0.5)
#
# # Find disc x0 in Q0
# disc_x_list = [action.state for action in full_tree if action.parent_state ==
# oppo_action]
#
# # Plot disc x0 in Q0
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.5, marker='.')
#
# # Find optimal player action x0 (Can be made UDF)
# opt_player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, "
# f"{oppo_action.state})"].action
# opt_player_state = opt_player_action.state
#
# # Plot optimal x0 in Q0
# plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
# marker='.')
t += 1 # Update t value
# Display
print(f"Optimal Player State Approximation: {opt_player_state}")
else: # t != 0
if oppo_node not in [action.state for action in prev_x_action.children_state_list]:
print("Invalid selection of node. Try again.")
else: # selected oppo_node is a reachable node
oppo_hist[f"i{t}"] = oppo_node
oppo_action = [action for action in full_tree if action.state == oppo_node and
action.parent_state == prev_x_action][0]
# Plot the game process
opt_player_action = game_plt(full_tree, oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
# # Plot selected Qt
# Qt = Q[f"Q_t={t}^i={oppo_action.state}"].region
# set_plotter(Qt, colors[t], alpha_val=0.25)
#
# # Plot R(previous_x) intersect Qt
# R_set = reach_set_calc(prev_x_state, R)
# R_intersect_Q = Qt.intersection(R_set)
# set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
#
# # Find disc xt in R(previous_x) intersect Qt
# disc_x_list = [action.state for action in full_tree if action.parent_state ==
# oppo_action]
#
# # Plot disc xt in R(previous_x) intersect Qt
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.5, marker='.')
#
# # Find optimal player action xt in R(previous_x) intersect Qt
# opt_player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, {oppo_action.state}"
# f")"].action
# opt_player_state = opt_player_action.state
#
# # Plot optimal x_t in R(previous_x) intersect Qt
# plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
# marker='.')
#
# # Connect optimal x_t state approximation to prev_x_state
# plt.plot([prev_x_state[0], opt_player_state[0]],
# [prev_x_state[1], opt_player_state[1]], color='black')
# Update cost
tot_cost += norm(np.array(prev_x_state) - np.array(opt_player_state), 2)
t += 1 # Update t value
# Display
print(f"Optimal Player State Approximation: {opt_player_state}")
print(f"Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Sub-optimal Opponent vs. Optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '1': # Case of Player (PC) vs. Opponent (PC)
for t in range(T+1):
if t == 0:
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
prev_x_action = opt_oppo_action.parent_state
### Need to check line 846 - 848 correctness!!!! Continue Here
# Plot game process
opt_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
prev_x_action = opt_player_action # Reassign prev_x_action for next iteration use
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
else: # When t != 0
opt_oppo_action = UV_dict[f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state})"].\
action
# Plot game process
opt_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
tot_cost += norm(np.array(prev_x_action.state) - np.array(opt_player_state), 2)
prev_x_action = opt_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Display
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
print(f"Optimal Player State Approximation: {opt_player_action.state}")
print(f"Total Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Optimal Opponent vs. Optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '3':
for t in range(T+1):
if t == 0:
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
prev_x_action = opt_oppo_action.parent_state
# Plot game process
ram_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
ram_player_state = ram_player_action.state
prev_x_action = ram_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
else:
opt_oppo_action = UV_dict[f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state}"
f")"].action
ram_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
ram_player_state = ram_player_action.state
tot_cost += norm(np.array(prev_x_action.state) - np.array(ram_player_state), 2)
prev_x_action = ram_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Display
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
print(f"Sub-optimal Player State Approximation: {ram_player_state}")
print(f"Total Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Optimal Opponent vs. Sub-optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
plt.show()
# Save Simulation Results
sim_result = {
'tot_cost': tot_cost,
'performance_bound': performance_bound,
'extra_disc_para': extra_disc_para
}
sim_file = open(f'sim_result_new (epsilon = {performance_bound}, extra_disc_para = {extra_disc_para})',
'wb')
pickle.dump(sim_result, sim_file)
sim_file.close()
msg = input(f"Rerun (Method: {method})? [Y/N] ")
#################################### End Here ####################################
#################################### Display ####################################
elif method.lower() == 'old':
tot_cost = 0
# Load tree info old files into the program
tree_file = open('tree_info_old', 'rb')
tree_info = pickle.load(tree_file)
tree_file.close()
# Assign keys from tree_info to variables in this program
Q = tree_info['Q']
tree_no_lf_copy = tree_info['tree_no_lf_copy']
UV_dict = tree_info['UV_dict']
T = tree_info['T']
num_nodes = tree_info['num_nodes']
colors = tree_info['colors']
line_style_list = tree_info['line_style_list']
plt_scale = tree_info['plt_scale']
disc_para = tree_info['disc_para']
scale_para = tree_info['scale_para']
dummy_i = tree_info['dummy_i']
R = tree_info['R']
method = tree_info['method']
boundary_rmv = tree_info['boundary_rmv']
msg = ''
oppo_hist = dict()
while msg.lower() != 'n':
control = input("Player (PC) vs. Opponent (PC) [1] / Player (PC) vs. Opponent (User) [2]? ")
if control not in ['1', '2']:
print('Invalid game setting. Select again.')
else: # control is in ['1', '2']
if control == '2':
# Let user be opponent, show player optimal action approximation for demo (Plot them)
t = 0
opt_player_action = None
opt_player_state = None
tot_cost = 0
while t <= T:
print(f"\nt={t}")
# I reassigned opt_player_action to avoid warning about potentially undefined
# opt_player_action in else statements
prev_x_action = opt_player_action
prev_x_state = opt_player_state # Reassignment needed for later generation of R_intersect_Q
oppo_node = int(input("Enter opponent action: "))
if t == 0:
if oppo_node not in range(num_nodes + 1):
print("Invalid selection of node. Try again.")
else:
oppo_action = [action for action in tree_no_lf_copy if
action.state == oppo_node and action.t_step == t]
oppo_action = oppo_action[0]
oppo_hist[f"i{t}"] = oppo_action.state
# Plot selected Q0
Q0 = Q[f"Q_t={t}^i={oppo_node}"]
set_plotter(Q0.region, colors[t], alpha_val=0.5)
# Plot discrete x0 in Q0
"""
disc_x0_list = [action.state for action in tree_no_lf_copy if
action.parent_state == oppo_action]
"""
disc_x0_list = discrete_x_calc(Q[f'Q_t={t}^i={oppo_node}'].region, disc_para,
bound_rmv=boundary_rmv)
for disc_x0 in disc_x0_list:
plt.scatter(disc_x0[0], disc_x0[1], color=colors[t], linewidths=0.5, marker='.')
opt_player_action = UV_dict[
f"V_t={t} ({oppo_action.parent_state.state}, {oppo_action.state})"].action
opt_player_state = opt_player_action.state # value of optimal x0 approximation
print(f"Optimal Player State Approximation: {opt_player_state}")
plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
marker='.')
t += 1
else: # t != 0
if oppo_node not in [action.state for action in prev_x_action.children_state_list]:
print("Invalid selection of node. Try again.")
else:
oppo_action = \
[state for state in tree_no_lf_copy if
state.state == oppo_node and state.parent_state ==
prev_x_action][0]
oppo_hist[f"i{t}"] = oppo_action.state
opt_player_action = UV_dict[
f"V_t={t} ({prev_x_action.state}, {oppo_action.state})"].action
opt_player_state = opt_player_action.state
print(f"Optimal Player State Approximation: {opt_player_state}")
# Plot Qt
Qt = Q[f"Q_t={t}^i={oppo_action.state}"]
set_plotter(Qt.region, colors[t], alpha_val=0.25)
# Plot R(previous_x) intersect Q
R_set = reach_set_calc(prev_x_state, R)
R_intersect_Q = Q[f"Q_t={t}^i={oppo_action.state}"].region.intersection(R_set)
set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
# Plot discrete x in R_intersect_Q
disc_x_list = discrete_x_calc(R_intersect_Q, approx_para=disc_para,
bound_rmv=boundary_rmv)
for disc_x in disc_x_list:
plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
# Plot optimal x_t state approximation
plt.scatter(opt_player_state[0], opt_player_state[1], facecolor='black',
linewidths=0.1, marker='.')
# Connect optimal x_t state approximation to prev_x_state
plt.plot([prev_x_state[0], opt_player_state[0]],
[prev_x_state[1], opt_player_state[1]], color='black')
tot_cost += norm(np.array(prev_x_state) - np.array(opt_player_state), 2)
print(f"Total Cost: {tot_cost}")
t += 1
if boundary_rmv.lower() == 'n':
plt.title(
fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, $i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization, Total Cost={round(tot_cost, 4)}")
else:
plt.title(
fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, $i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization (Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '1':
opt_oppo_action = dummy_i
prev_x_action = opt_oppo_action.parent_state
tot_cost = 0
for t in range(T + 1):
if t == 0:
# Find optimal i_0
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
# Plot Q0
Q0 = Q[f'Q_t={t}^i={opt_oppo_action.state}']
set_plotter(Q0.region, colors[t], alpha_val=0.25)
set_plotter(Q0.region, colors[t], alpha_val=0.5)
# Find discrete x0 in Q0
disc_x_list = [action.state for action in tree_no_lf_copy if action.parent_state ==
opt_oppo_action]
else: # when t is not 0
# Find optimal i_t
opt_oppo_action = UV_dict[
f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state})"].action
# Plot selected Qt
Qt = Q[f"Q_t={t}^i={opt_oppo_action.state}"]
set_plotter(Qt.region, colors[t], alpha_val=0.25)
# Plot R(previous_x) intersect Q
R_set = reach_set_calc(prev_x_action.state, R)
R_intersect_Q = Qt.region.intersection(R_set)
set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
# Find discrete x in R_intersect_Q
disc_x_list = discrete_x_calc(R_intersect_Q, disc_para, bound_rmv=boundary_rmv)
# Output message
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
# Plot discrete x in sets
for disc_x in disc_x_list:
plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
# Given x_t-1 and i_t, find approximation of optimal x_t
opt_player_action = \
UV_dict[
f"V_t={t} ({opt_oppo_action.parent_state.state}, {opt_oppo_action.state})"].action
print(f"Optimal Player State Approximation: {opt_player_action.state}")
# Plot optimal x_t state approximation
plt.scatter(opt_player_action.state[0], opt_player_action.state[1], facecolor='black',
linewidth=0.1,
marker='.')
# Connect optimal x_t state approximation to prev_x_state
if t != 0:
plt.plot([prev_x_action.state[0], opt_player_action.state[0]],
[prev_x_action.state[1], opt_player_action.state[1]], color='black')
# Update total cost
tot_cost += norm(np.array(prev_x_action.state) - np.array(opt_player_action.state), 2)
print(f"Total Cost: {tot_cost}")
prev_x_action = opt_player_action
# Store optimal opponent history
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Plot display
if boundary_rmv.lower() == 'n':
plt.title(fr"Optimal Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
fr"$i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization, Total Cost={round(tot_cost, 4)}")
else:
plt.title(fr"Optimal Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
fr"$i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization (Without Boundary), Total Cost={round(tot_cost, 4)}")
# Plot all given convex sets
for t_val in range(T + 1):
for node in range(1, num_nodes + 1):
hcoord_q, vcoord_q = Q[f"Q_t={t_val}^i={node}"].region.exterior.xy
plt.fill(hcoord_q, vcoord_q, alpha=0.1, facecolor=colors[t_val], edgecolor=colors[t_val],
linewidth=2,
linestyle=line_style_list[node - 1], label=fr"$Q_{t_val}^{{({node})}}$")
plt.legend(fontsize=8)
plt.grid(True)
plt.axis(plt_scale)
if control == '1':
plt.savefig(f"Optimal Opponent History {oppo_hist['i0']}{oppo_hist['i1']}{oppo_hist['i2']}, "
f"disc_para={disc_para}")
else:
plt.savefig(
f"Opponent History {oppo_hist['i0']}{oppo_hist['i1']}{oppo_hist['i2']}, disc_para={disc_para}")
plt.show()
msg = input("Rerun? [Y/N] ")
pass
| DRK98519/aCBC | game_play.py | game_play.py | py | 36,021 | python | en | code | 0 | github-code | 36 |
32331250687 | #!/usr/bin/python3
"""
Script that takes in a letter and sends a POST request
to http://0.0.0.0:5000/search_user with the letter as a
parameter.
"""
from sys import argv
import requests
if __name__ == "__main__":
if len(argv) < 2:
q = ""
else:
q = argv[1]
values = {'q': q}
url = "http://0.0.0.0:5000/search_user"
req = requests.post(url, values)
try:
js_ob = req.json()
if js_ob:
print("[{}] {}".format(js_ob.get("id"), js_ob.get("name")))
else:
print("No result")
except ValueError:
print("Not a valid JSON")
| ammartica/holbertonschool-higher_level_programming | 0x11-python-network_1/8-json_api.py | 8-json_api.py | py | 617 | python | en | code | 0 | github-code | 36 |
22350898838 | import grid
import shapes
import random
class Game:
def __init__(self):
self.gameData = grid.BlockGrid(10, 25, margin=5, swidth=25, sheight=25)
self.jshape = shapes.JShape()
self.lshape = shapes.LShape()
self.lineshape = shapes.LineShape()
self.squareshape = shapes.SquareShape()
self.sshape = shapes.SShape()
self.zshape = shapes.ZShape()
self.tshape = shapes.TShape()
self.curShape = 0
self.addShape()
self.drawShape()
def getShape(self, num=0):
allTheShapes = {
1: self.jshape,
2: self.lshape,
3: self.lineshape,
4: self.squareshape,
5: self.sshape,
6: self.zshape,
7: self.tshape
}
return allTheShapes.get(num)
def addShape(self):
selection = random.randint(1, 7)
self.curShape = selection
self.getShape(selection).reset()
def drawShape(self):
shape = self.getShape(self.curShape)
for blocks in shape.info.data:
for block in blocks:
if block.color != grid.black:
self.gameData.data[block.y][block.x].color = block.color
def clearShape(self):
for blocks in self.gameData.data:
for block in blocks:
if block.color != grid.black and block.color != grid.gray:
block.color = grid.black
def clearSupport(self):
for blocks in self.gameData.data:
for block in blocks:
if block.color == grid.white:
block.color = grid.black
def loseShape(self):
self.clearSupport()
for blocks in self.gameData.data:
for block in blocks:
if block.color != grid.black and block.color != grid.gray and block.color != grid.white:
block.color = grid.gray
self.addShape()
self.drawShape()
def checkCollisionBottom(self):
for x in range(self.gameData.numX):
block = self.gameData.data[self.gameData.numY - 1][x]
if block.color == grid.white:
self.clearSupport()
if block.color != grid.black and block.color != grid.gray and block.color != grid.white:
self.loseShape()
def checkCollisionOther(self):
for y in range(self.gameData.numY - 1, -1, -1):
for x in range(self.gameData.numX):
if self.gameData.data[y][x].color == grid.gray:
block = self.gameData.data[y - 1][x]
if block.color == grid.white:
self.clearSupport()
if block.color != grid.black and block.color != grid.gray and block.color != grid.white:
self.loseShape()
def rowDelete(self, row=0):
for y in range(row, -1, -1):
for x in range(self.gameData.numX):
if y - 1 >= 0:
self.gameData.data[y][x].color = self.gameData.data[y - 1][x].color
def checkFullRow(self):
for y in range(self.gameData.numY - 1, -1, -1):
count = 0
for x in range(self.gameData.numX):
if self.gameData.data[y][x].color == grid.gray:
count += 1
if count == self.gameData.numX:
self.rowDelete(y)
def checkGameOver(self, y):
if y == 0:
return True
for x in range(self.gameData.numX):
if self.gameData.data[y][x].color != grid.black:
y -= 1
return self.checkGameOver(y)
return False
def moveShapeLeft(self):
self.checkCollisionOther()
self.checkCollisionBottom()
self.getShape(self.curShape).moveLeft(1)
for x in range(1, self.gameData.numX):
for y in range(self.gameData.numY - 1, -1, -1):
if self.gameData.data[y][x].color != grid.black and self.gameData.data[y][x].color != grid.gray:
if self.gameData.data[y][x-1].color == grid.black:
self.gameData.data[y][x-1].color = self.gameData.data[y][x].color
self.gameData.data[y][x].color = grid.black
def moveShapeRight(self):
self.checkCollisionOther()
self.checkCollisionBottom()
self.getShape(self.curShape).moveRight(1, self.gameData.numX)
for x in range(self.gameData.numX - 2, -1, -1):
for y in range(self.gameData.numY - 1, -1, -1):
if self.gameData.data[y][x].color != grid.black and self.gameData.data[y][x].color != grid.gray:
if self.gameData.data[y][x + 1].color == grid.black:
self.gameData.data[y][x + 1].color = self.gameData.data[y][x].color
self.gameData.data[y][x].color = grid.black
def moveShapeDown(self):
self.checkCollisionOther()
self.checkCollisionBottom()
self.getShape(self.curShape).moveDown(1)
for y in range(self.gameData.numY-2, -1, -1):
for x in range(self.gameData.numX):
if self.gameData.data[y][x].color != grid.black and self.gameData.data[y][x].color != grid.gray:
self.gameData.data[y+1][x].color = self.gameData.data[y][x].color
self.gameData.data[y][x].color = grid.black
def rotate(self):
self.getShape(self.curShape).rot()
self.clearShape()
self.drawShape()
def update(self):
self.moveShapeDown()
self.checkFullRow()
| chrisgliu/TetrisGame | PyTetris/tetris.py | tetris.py | py | 5,626 | python | en | code | 0 | github-code | 36 |
3735254268 | #!/usr/bin/env python3
#
# get_ad_right_matrix.py
# Export AD User -> Group Matrix to Excel
# Written by Maximilian Thoma 2021
#
import json
import re
import ldap3
import pandas as pd
########################################################################################################################
# NOTE:
# -----
# Following packages must be installed in your python environment:
# pandas, xslxwriter, ldap3
#
# Just install them with:
# pip install pandas xslxwriter, ldap3
#
########################################################################################################################
# Settings
# LDAP server ip or fqdn
LDAP_SERVER = '10.1.1.231'
# LDAP port 389 = unencrypted, 636 = encrypted
PORT = 389
# Use SSL? True/False
USE_SSL = False
# LDAP bind user DN
BIND = 'CN=ldap bind,CN=Users,DC=lab,DC=local'
# LDAP bind user password
BIND_PW = 'Test12345!'
# Base search DN
SEARCH = 'OU=lab,DC=lab,DC=local'
# All users regardless deactivated or activated
SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*))'
# All users who are not deactivated
#SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))'
# All users who are not deactivated and in special group
#SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*)(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(memberOf=CN=b_testgruppe und restlicher DN))'
# Output file
FILE = 'output.xlsx'
########################################################################################################################
def main():
# Connect to LDAP and query
server = ldap3.Server(LDAP_SERVER, port=389, use_ssl=USE_SSL)
conn = ldap3.Connection(server, BIND, BIND_PW, auto_bind=True)
conn.search(SEARCH, SEARCH_FILTER, attributes=['memberOf', 'sAMAccountName'])
response = json.loads(conn.response_to_json())
def get_cn(cn_str):
cn = re.findall(r"CN=([^,]*),?", cn_str)[0]
return cn
buffer_users = {}
buffer_user_in_group = {}
for entry in response['entries']:
# Get short and long username
long_username = get_cn(entry['dn'])
short_username = entry['attributes']['sAMAccountName'].lower()
# append to users dir
buffer_users[short_username] = long_username
# go trough groups
for group in entry['attributes']['memberOf']:
# add to group buffer
group_name = get_cn(group)
if group_name not in buffer_user_in_group:
buffer_user_in_group[group_name] = []
if short_username not in buffer_user_in_group[group_name]:
buffer_user_in_group[group_name].append(short_username)
matrix = {}
length_cell = 0
for group, users in buffer_user_in_group.items():
matrix[group] = {}
for user, long_user in buffer_users.items():
index = "%s - %s" % (user, long_user)
# determine width of 1 column
index_length = len(index)
if index_length > length_cell:
length_cell = index_length
if user in users:
matrix[group][index] = "X"
else:
matrix[group][index] = "-"
# generate data matrix with pandas
a = pd.DataFrame(matrix)
# create excel file
writer = pd.ExcelWriter(FILE, engine='xlsxwriter')
# write pandas matrix to sheet1
a.to_excel(writer, sheet_name="Sheet1", startrow=1, header=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# format header line
header_format = workbook.add_format(
{
'bold': True,
'valign': 'bottom',
'fg_color': '#D7E4BC',
'border': 1,
}
)
# set header line text rotation to 90 degree
header_format.set_rotation(90)
# apply header format
for col_num, value in enumerate(a.columns.values):
worksheet.write(0, col_num + 1, value, header_format)
# format for X cells
format2 = workbook.add_format(
{
'bg_color': '#C6EFCE',
'font_color': '#006100'
}
)
# set autofilter in first line
cols_count = len(a.columns.values)
worksheet.autofilter(0, 0, 0, cols_count)
# set column width
worksheet.set_column(0, 0, length_cell+1)
worksheet.set_column(1, cols_count, 3)
# freeze panes
worksheet.freeze_panes(1, 1)
# conditional formatting
worksheet.conditional_format('A1:ZA65535', {
'type': 'cell',
'criteria': '=',
'value': '"X"',
'format': format2
})
# save excel file
writer.save()
if __name__ == "__main__":
main()
| lanbugs/get_ad_right_matrix | get_ad_right_matrix.py | get_ad_right_matrix.py | py | 4,692 | python | en | code | 3 | github-code | 36 |
25108240229 | from zigzag.classes.io.onnx.parser import Parser
from zigzag.classes.io.onnx.utils import get_node_input_output_dimension_shapes
from zigzag.classes.workload.layer_node import LayerNode
import logging
logger = logging.getLogger(__name__)
class SoftmaxParser(Parser):
"""Parser for ONNX Softmax nodes into LayerNode."""
def __init__(self, node_id, node, nodes_outputs, mapping, onnx_model):
super().__init__(node_id, node, nodes_outputs, mapping, onnx_model)
def run(self):
"""Run the parser and return the created LayerNode object."""
layer_node = self.generate_layer_node_for_softmax()
return layer_node
def generate_layer_node_for_softmax(self):
def get_layer_node_input_format(B, C, K, node_mapping, nodes_outputs):
"""
Generate the necessary dictionary items required for the Node creation.
"""
# Convert the data types to precisions based on the ONNX definition
# Equation
d = {}
# Update the equation for Softmax
d["equation"] = "O[b][c] = exp(I[b][c]) / (reduce_sum(exp(I[b]), axis=1))"
d["dimension_relations"] = []
d["operand_precision"] = {"O": 16, "I": 8} # Modify precision as needed
d["operand_source"] = {"I": []}
# Core allocation and spatial mapping
d["core_allocation"] = node_mapping["core_allocation"]
d["spatial_mapping"] = node_mapping["spatial_mapping"]
# Find the previous layer(s) that should be this node's parent(s)
node_inputs = self.node.input
preds = []
for node_input in node_inputs:
for n in nodes_outputs:
if node_input in nodes_outputs[n]:
preds.append(n)
d["operand_source"]["I"] = preds
return d
ia_dimension_shape, oa_dimension_shape = get_node_input_output_dimension_shapes(
self.node, self.onnx_model
)
# Get the batch size, input channels, and output channels
B = ia_dimension_shape[0] if ia_dimension_shape else 1
C = ia_dimension_shape[1] if ia_dimension_shape else 0
K = oa_dimension_shape[1] if oa_dimension_shape else 0
# Get the hw mapping of this node.
if self.node.name in self.mapping:
node_mapping = self.mapping[self.node.name]
else:
try:
node_mapping = self.mapping["default"]
except:
raise ValueError(
f"There is no mapping provided for node {self.node.name}, nor a default one."
)
node_attrs = get_layer_node_input_format(
B, C, K, node_mapping, self.nodes_outputs
)
node_obj = LayerNode(
self.node_id,
node_attrs,
node_name=self.node.name,
type=self.node.op_type.lower(),
)
logger.info(f"Parsed Softmax node {self.node.name}")
return node_obj
| wangxdgg/zigzag_2 | zigzag/classes/io/onnx/softmax2.py | softmax2.py | py | 3,148 | python | en | code | 0 | github-code | 36 |
35042458812 | import pickle
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', help='fasta input file')
args = parser.parse_args()
junction_id_to_seq = {}
with open(args.fasta, "r") as f:
while True:
line1 = f.readline()
if not line1:
break
line2 = f.readline()
junction_id_to_seq[line1.strip()] = line2.strip()
pickle.dump(junction_id_to_seq, open("known_fusions.pickle", "wb"))
| salzmanlab-admin/DEEPEST-Fusion | reference_files/create_pickle_file.py | create_pickle_file.py | py | 471 | python | en | code | 5 | github-code | 36 |
42659570407 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 08:08:07 2023
@author: jtazioli
TIP CALCULATOR:
input cost of total bill
input percentage of tip
output cost per person
"""
tot_cost = float(input("What is the cost of the bill?\n"))
tip_percent = float(input("What percent do you want to leave as tip? (ex. 10, 15, 20)\n"))
num_people = int(input("How many people are splitting the bill?\n"))
tip = round(tot_cost * (tip_percent/100),2)
tip_per_person = round(tip / num_people,2)
print(f"Each person pays: ${tip_per_person} for a {tip_percent}% tip.") | JTazi/100-Days-of-Code | day2/tip_calculator.py | tip_calculator.py | py | 588 | python | en | code | 0 | github-code | 36 |
28853226017 | import bpy
from bpy.types import Menu
brush_icons = {}
def create_icons():
global brush_icons
icons_directory = bpy.utils.system_resource('DATAFILES', path="icons")
brushes = [
"border_mask",
"border_hide",
"box_trim",
"line_project",
]
import os
for brush in brushes:
icon_str = f"ops.sculpt.{brush}.dat"
filename = f"{icons_directory}/{icon_str}"
icon_value = bpy.app.icons.new_triangles_from_file(filename)
brush_icons[brush] = icon_value
def release_icons():
global brush_icons
for value in brush_icons.values():
bpy.app.icons.release(value)
class PIE_MT_hide_mask_brushes(Menu):
# label is displayed at the center of the pie menu.
bl_label = "Hide/Mask Brush Menu"
bl_idname = "PIE_MT_hide_mask_brushes"
bl_options = {"REGISTER", "UNDO"}
def draw(self, context):
global brush_icons
layout = self.layout
pie = layout.menu_pie()
op = pie.operator("wm.tool_set_by_id", text=" Mask", icon_value=brush_icons["border_mask"])
op.name = "builtin.box_mask"
op = pie.operator("wm.tool_set_by_id", text=" Hide", icon_value=brush_icons["border_hide"])
op.name = "builtin.box_hide"
op = pie.operator("wm.tool_set_by_id", text=" Trim", icon_value=brush_icons["box_trim"])
op.name = "builtin.box_trim"
op = pie.operator("wm.tool_set_by_id", text=" Line Project", icon_value=brush_icons["line_project"])
op.name = "builtin.line_project"
class PIE_MT_init_face_sets(Menu):
bl_label = "Init Face Sets"
bl_idname = "PIE_MT_init_face_sets"
bl_options = {"REGISTER", "UNDO"}
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
op = pie.operator("sculpt.face_sets_init", text='Loose Parts', icon="OUTLINER_DATA_POINTCLOUD")
op.mode = 'LOOSE_PARTS'
op = pie.operator("sculpt.face_sets_init", text='Face Set Boundaries', icon="PIVOT_BOUNDBOX")
op.mode = 'FACE_SET_BOUNDARIES'
op = pie.operator("sculpt.face_sets_init", text='Materials', icon="MATERIAL")
op.mode = 'MATERIALS'
op = pie.operator("sculpt.face_sets_init", text='Normals', icon="NORMALS_VERTEX_FACE")
op.mode = 'NORMALS'
op = pie.operator("sculpt.face_sets_init", text='UV Seams', icon="UV_EDGESEL")
op.mode = 'UV_SEAMS'
op = pie.operator("sculpt.face_sets_init", text='Edge Creases', icon="EDGESEL")
op.mode = 'CREASES'
op = pie.operator("sculpt.face_sets_init", text='Edge Bevel Weight', icon="MOD_BEVEL")
op.mode = 'BEVEL_WEIGHT'
op = pie.operator("sculpt.face_sets_init", text='Sharp Edges', icon="SHARPCURVE")
op.mode = 'SHARP_EDGES'
classes = (
PIE_MT_hide_mask_brushes,
PIE_MT_init_face_sets,
)
from my_pie_menus import utils
kms = [
{
"keymap_operator": "wm.call_menu_pie",
"name": "Sculpt",
"letter": "ONE",
"shift": 0,
"ctrl": 0,
"alt": 1,
"space_type": "VIEW_3D",
"region_type": "WINDOW",
"keywords": {"name": "PIE_MT_init_face_sets"},
},
{
"keymap_operator": "wm.call_menu_pie",
"name": "Sculpt",
"letter": "TWO",
"shift": 0,
"ctrl": 0,
"alt": 1,
"space_type": "VIEW_3D",
"region_type": "WINDOW",
"keywords": {"name": "PIE_MT_hide_mask_brushes"},
},
]
addon_keymaps = []
def register():
create_icons()
utils.register_classes(classes)
utils.register_keymaps(kms, addon_keymaps)
def unregister():
release_icons()
for cls in classes:
bpy.utils.unregister_class(cls)
utils.unregister_keymaps(kms)
| jmobley0429/my_pie_menus | menus/sculpt_mode_pies.py | sculpt_mode_pies.py | py | 3,776 | python | en | code | 1 | github-code | 36 |
72692974183 | from ast import literal_eval
import pymongo
# Handles all interactions with the database
class DbManager:
database = None
def __init__(self):
# Client instantiation with the MongoDB Client
self.client = pymongo.MongoClient(
"mongodb+srv://gdp:gdp@propaganda.m00hm.mongodb.net/Trilateral?retryWrites=true&w=majority")
# Sets the database to our Trilateral Database in the MongoDB Client
self.database = self.client.Trilateral
# Deletes an entire collection
def drop_collections(self):
try:
self.database['documents_document'].drop()
self.database['documents_claim'].drop()
self.database['documents_graph'].drop()
self.database['tweets_tweet'].drop()
self.database['tweets_query'].drop()
self.database['trends_trend'].drop()
except pymongo.errors.PyMongoError:
print("Collection not found Found in Database")
# Returns all documents of a specific collection
def get_all_documents(self, uid: str):
try:
return list(self.database['documents_document'].find({"uid": uid}))
except pymongo.errors.PyMongoError:
print("No Collection Documents_Document, Found in Database")
# Returns the number of documents in the collection under the specified uid
def count_all_documents(self, uid: str):
try:
return self.database['documents_document'].find({"uid": uid}).count()
except pymongo.errors.PyMongoError:
print("Returns no documents, uid %s, Found in Database", uid)
# Returns the number of tweets in the collection under the specified uid
def count_all_tweets(self, uid: str):
try:
return self.database['tweets_tweet'].find({"uid": uid}).count()
except pymongo.errors.PyMongoError:
print("Returns no tweets, uid %s, Found in Database", uid)
# Returns a list of cleaned tokens from all the Documents under the specified UID
def get_all_cleaned_tokens(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "cleaned_tokens": 1}))
cleaned_tokens = []
for tokens in ini_list:
res = literal_eval(tokens['cleaned_tokens'])
cleaned_tokens.extend(res)
return cleaned_tokens
except pymongo.errors.PyMongoError:
print("No Collection, Documents_Document Found in Database")
# Returns all the text-bodies from each Document under the specified UID
def get_all_main_texts(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "text_body": 1}))
main_text = []
for text in ini_list:
main_text.append(text['text_body'])
return " ".join([text for text in main_text])
except pymongo.errors.PyMongoError:
print("No Collection, Documents_document Found in Database")
# Returns all Tweets under the specified UID
def get_all_tweets(self, uid: str):
try:
ini_list = list(self.database['tweets_tweet'].find({"uid": uid}))
tweets = []
for t in ini_list:
tweets.append(
dict(uid=t['uid'], screen_name=t['screen_name'], created_at=t['created_at'], text=t['text'],
favorite_count=t['favorite_count'],
retweet_count=t['retweet_count'], user_location=t['user_location'],
sentiment=t['sentiment']))
return ini_list
except pymongo.errors.PyMongoError:
print("No Collection, Tweets_tweet Found in Database")
# Returns a list of html_links from each Document under the specified UID
def get_all_html_links(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "html_links": 1}))
html_links = []
for html_link in ini_list:
res = literal_eval(html_link['html_links'])
html_links.extend(res)
return html_links
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Documents_document", uid)
# Returns a claim under the specified UI
def get_claim(self, uid: str):
try:
c_result = self.database['documents_claim'].find({"uid": uid},
{"_id": 0, "claim": 1})
claim = c_result[0]['claim']
return claim
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Documents_claim", uid)
# Returns a query under the specified UID
def get_query(self, uid: str):
try:
q_result = self.database['tweets_query'].find({"uid": uid},
{"_id": 0, "query": 1})
query = q_result[0]['query']
return query
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Tweets_Query", uid)
# Returns all causal data with a specified UID
def get_causal(self, uid: str):
try:
causal = self.database['trends_trend'].find({"uid": uid})
causal_item = causal[0]
return causal_item
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Trends_trend", uid)
| madeleinemvis/original_gdp | BackEnd/functions/dbmanager.py | dbmanager.py | py | 5,836 | python | en | code | 0 | github-code | 36 |
32568853073 | # -*- coding: utf-8 -*-
from logbook import Logger
import numpy as np
import pandas as pd
from zipline.data.bundles import register
from zipline.utils.calendars import get_calendar
EXPORT_FOLDER = '/mnt/data/earnings_calls/export/'
log = Logger('zipline_ingest.py')
def bundle_hf_data(price_file, debug = False):
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir,
start,
end):
log.info("Starting bundle build from %s" % price_file)
data = pd.read_hdf(price_file)
data.dropna(subset = ['Open', 'Close'], inplace = True)
data = data.loc[data.Currency == 'USD']
data['instrument_key'] = data.instrument_key.str.upper()
log.info("Importing %d instruments" % len(data.instrument_key.unique()))
dfMetadata = []
def read_instruments():
for sid, (instrument_key, instrument_data) in enumerate(data.groupby('instrument_key')):
log.debug("Reading instrument %s" % instrument_key)
log.debug("\tInstrument has %d rows" % len(instrument_data))
if len(instrument_data) == 0:
log.debug("\tNo data for instrument, skipping")
continue
instrument_data.drop_duplicates(subset = ['Date'], inplace = True)
instrument_data.set_index('Date', inplace = True)
instrument_data.sort_index(inplace = True)
#dfData['exchange_open'] = instrument_data.index.map(calendar.is_open_on_minute)
#dfData = dfData[dfData['exchange_open'] == True]
start_date = instrument_data.index[0]
log.debug("\tstart_date %s" % start_date)
end_date = instrument_data.index[-1]
log.debug("\tend_date %s" % end_date)
ac_date = end_date + pd.Timedelta(days=1)
log.debug("\tac_date %s" % ac_date)
sessions = get_calendar('NYSE').sessions_in_range(start_date, end_date)
instrument_data = instrument_data.reindex(sessions)
# Update our meta data
dfMetadata.append((sid, instrument_key, start_date, end_date, \
ac_date, instrument_key, "Eikon"))
instrument_data['High'] = np.nan
instrument_data['Low'] = np.nan
instrument_data['Volume'].fillna(1.0, inplace = True)
instrument_data = instrument_data.loc[:, ['Open', 'High', 'Low', 'Close', 'Volume']]
instrument_data.columns = ['open', 'high', 'low', 'close', 'volume']
instrument_data = instrument_data.astype(float)
yield (sid, instrument_data)
if debug:
break
liData = read_instruments()
log.info("calling daily_bar_writer")
daily_bar_writer.write(liData,
show_progress = True)
log.info("returned from daily_bar_writer")
dfMetadata = pd.DataFrame(dfMetadata,
columns=['sid', 'asset_name', 'start_date',
'end_date', 'auto_close_date',
'symbol', 'exchange'])\
.set_index('sid')
log.info("calling asset_db_writer")
log.info(dfMetadata)
asset_db_writer.write(equities = dfMetadata)
log.info("returned from asset_db_writer")
log.info("calling adjustment_writer")
adjustment_writer.write()
log.info("returned from adjustment_writer")
return ingest
register(
'eikon-data-bundle',
bundle_hf_data(price_file = EXPORT_FOLDER + "/adjusted_prices.hdf",
debug = False),
)
| olgsfrt/earningscall | backtest/zipline_ingest.py | zipline_ingest.py | py | 4,373 | python | en | code | 0 | github-code | 36 |
19634743961 | """
Usage: negotiator-cli [OPTIONS] GUEST_UNIX_SOCKET
Communicate from a KVM/QEMU host system with running guest systems using a
guest agent daemon running inside the guests.
Supported options:
-c, --list-commands
List the commands that the guest exposes to its host.
-e, --execute=COMMAND
Execute the given command inside GUEST_UNIX_SOCKET. The standard output stream of
the command inside the guest is intercepted and copied to the standard
output stream on the host. If the command exits with a nonzero status code
the negotiator-host program will also exit with a nonzero status code.
-t, --timeout=SECONDS
Set the number of seconds before a remote call without a response times
out. A value of zero disables the timeout (in this case the command can
hang indefinitely). The default is 10 seconds.
-h, --help
Show this message and exit.
"""
from humanfriendly import Timer
from negotiator_common.config import DEFAULT_TIMEOUT
from negotiator_common import NegotiatorInterface
from negotiator_common.utils import TimeOut
import coloredlogs
import functools
import getopt
import logging
import os
import shlex
import socket
import sys
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class GuestChannel(NegotiatorInterface):
"""
The host side of the channel connecting KVM/QEMU hosts and guests.
This is a modificaiton of negotiator_host.GuestChannel
"""
def __init__(self, unix_socket):
if not unix_socket:
raise GuestChannelInitializationError("No UNIX socket pathname provided!")
# Connect to the UNIX socket.
logger.debug("Opening UNIX socket: %s", unix_socket)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
logger.debug("Connecting to UNIX socket: %s", unix_socket)
self.socket.connect(unix_socket)
except Exception:
raise GuestChannelInitializationError("Guest refused connection attempt!")
logger.debug("Successfully connected to UNIX socket!")
# Initialize the super class, passing it a file like object connected
# to the character device in read/write mode.
super(GuestChannel, self).__init__(handle=self.socket.makefile(),
label="UNIX socket %s" % unix_socket)
def prepare_environment(self):
"""
Prepare environment variables for command execution on KVM/QEMU hosts.
The following environment variables are currently exposed to commands:
``$NEGOTIATOR_GUEST``
The name of the KVM/QEMU guest that invoked the command.
"""
os.environ['NEGOTIATOR_GUEST'] = self.guest_name
class GuestChannelInitializationError(Exception):
"""Exception raised by :py:class:`GuestChannel` when socket initialization fails."""
class Context(object):
"""Enables :py:func:`main()` to inject a custom timeout into partially applied actions."""
def __init__(self):
"""Initialize a context for executing commands on the host."""
self.timeout = DEFAULT_TIMEOUT
def print_commands(self, guest_unix_socket):
"""Print the commands supported by the guest."""
with TimeOut(self.timeout):
channel = GuestChannel(unix_socket=guest_unix_socket)
print('\n'.join(sorted(channel.call_remote_method('list_commands'))))
def execute_command(self, guest_unix_socket, command_line):
"""Execute a command inside the named guest."""
with TimeOut(self.timeout):
timer = Timer()
channel = GuestChannel(unix_socket=guest_unix_socket)
output = channel.call_remote_method('execute', *shlex.split(command_line), capture=True)
logger.debug("Took %s to execute remote command.", timer)
print(output.rstrip())
def main():
"""Command line interface for the ``negotiator-cli`` program."""
# Initialize logging to the terminal and system log.
coloredlogs.install(syslog=True)
# Parse the command line arguments.
actions = []
context = Context()
try:
options, arguments = getopt.getopt(sys.argv[1:], 'ce:t:h', [
'list-commands', 'execute=', 'timeout=', 'help'
])
for option, value in options:
if option in ('-c', '--list-commands'):
assert len(arguments) == 1, \
"Please provide the unix socket of a guest as the 1st and only positional argument!"
actions.append(functools.partial(context.print_commands, arguments[0]))
elif option in ('-e', '--execute'):
assert len(arguments) == 1, \
"Please provide the unix socket of a guest as the 1st and only positional argument!"
actions.append(functools.partial(context.execute_command, arguments[0], value))
elif option in ('-t', '--timeout'):
context.timeout = int(value)
elif option in ('-h', '--help'):
usage()
sys.exit(0)
if not actions:
usage()
sys.exit(0)
except Exception:
logger.exception("Failed to parse command line arguments!")
sys.exit(1)
# Execute the requested action(s).
try:
for action in actions:
action()
except Exception:
logger.exception("Caught a fatal exception! Terminating ..")
sys.exit(1)
def usage():
"""Print a user friendly usage message to the terminal."""
print(__doc__.strip())
if __name__ == "__main__":
main() | htrc/HTRC-DataCapsules | backend/tools/negotiator-cli/negotiator-cli.py | negotiator-cli.py | py | 5,639 | python | en | code | 4 | github-code | 36 |
11820326179 | from abc import ABC, abstractmethod
import ml.optimization.gradient_descent_optimizer as gradient_descent_optimizer
import numpy as np
class BoostedRegressor(ABC):
def __init__(self, pointwise_loss, num_learners, learner_regularizer = 1):
BoostedRegressor.set_params(self, pointwise_loss, num_learners, learner_regularizer)
def set_params(self, pointwise_loss, num_learners, learner_regularizer):
self._pointwise_loss = pointwise_loss
self._num_learners = num_learners
self._learner_regularizer = learner_regularizer
def predict(self, X):
out = np.zeros(X.shape[0], dtype = np.float64)
for m in range(len(self.__h)):
if self.__h[m] is None:
return out
out += self.__gamma[m] * self.__h[m](X)
return out
'''
Returns a function that takes in X, a numpy array
of datapoints where X[i] is the ith datapoint, and returns
a vector h, where h[i] is the evaluation of the trained
weak learner on X[i]
'''
@abstractmethod
def _fit_weak_learner(self, X, y):
pass
@abstractmethod
def _solve_for_gamma_m(self, X, y, current_model_preds, h_m):
pass
def __get_initial_weak_learner(self, y):
y_avg = np.average(y)
return lambda X : np.full(X.shape[0], y_avg)
def get_weak_learner_coefficients(self):
return self.__gamma
def train(self, X, y):
self.__h = [None for i in range(0, self._num_learners)]
self.__gamma = np.zeros(self._num_learners, dtype = np.float64)
self.__h[0] = (self.__get_initial_weak_learner(y))
self.__gamma[0] = 1.0
current_model_preds = self.predict(X)
for m in range(1, self._num_learners):
pseudo_residuals = -self._pointwise_loss.loss_derivatives(current_model_preds, y)
h_m = self._fit_weak_learner(X, pseudo_residuals)
self.__h[m] = h_m
self.__gamma[m] = self._learner_regularizer * self._solve_for_gamma_m(X, y, current_model_preds, h_m)
current_model_preds += self.__gamma[m] * h_m(X)
#print("learner (" + str(m) + ") mean error: " + str(np.average(self._pointwise_loss.losses(current_model_preds, y))))
| jek343/StanfordMedical | ml/model/regression/gradient_boosting/boosted_regressor.py | boosted_regressor.py | py | 2,248 | python | en | code | 0 | github-code | 36 |
7210833797 | # -*- coding: utf-8 -*-
from odoo import api, models, fields, registry
import odoo
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
import json
import logging
_logger = logging.getLogger(__name__)
class pos_call_log(models.Model):
_rec_name = "call_model"
_name = "pos.call.log"
_description = "Log datas of pos sessions"
min_id = fields.Integer('Min Id', required=1, index=True, readonly=1)
max_id = fields.Integer('Max Id', required=1, index=True, readonly=1)
call_domain = fields.Char('Domain', required=1, index=True, readonly=1)
call_results = fields.Char('Results', readonly=1)
call_model = fields.Char('Model', required=1, index=True, readonly=1)
call_fields = fields.Char('Fields', index=True, readonly=1)
active = fields.Boolean('Active', default=True)
write_date = fields.Datetime('Write date', readonly=1)
@api.multi
def compare_database_write_date(self, model, pos_write_date):
last_logs = self.search([('call_model', '=', model), ('write_date', '<', pos_write_date)])
if last_logs:
_logger.info('POS write date is %s' % pos_write_date)
_logger.info('Model %s write date is %s' % (model, last_logs[0].write_date))
return True
else:
return False
def covert_datetime(self, model, datas):
all_fields = self.env[model].fields_get()
version_info = odoo.release.version_info[0]
if version_info == 12:
if all_fields:
for data in datas:
for field, value in data.items():
if field == 'model':
continue
if all_fields[field] and all_fields[field]['type'] in ['date', 'datetime'] and value:
data[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return datas
@api.multi
def refresh_call_logs(self):
_logger.info('========================= BEGIN refresh_call_logs ========================================')
cache_database_object = self.env['pos.cache.database']
logs = self.search([])
for log in logs:
call_fields = cache_database_object.get_fields_by_model(log.call_model)
call_domain = cache_database_object.get_domain_by_model(log.call_model)
call_domain.append(['id', '>=', log.min_id])
call_domain.append(['id', '<=', log.max_id])
_logger.info('Refresh log of model: %s' % log.call_model)
_logger.info(call_domain)
_logger.info('===============================')
results = self.env[log.call_model].sudo().search_read(
call_domain,
call_fields)
version_info = odoo.release.version_info[0]
if version_info == 12:
all_fields = self.env[log.call_model].fields_get()
if all_fields:
for result in results:
for field, value in result.items():
if field == 'model':
continue
if all_fields[field] and all_fields[field]['type'] in ['date', 'datetime'] and value:
result[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
log.write({
'call_results': json.dumps(results),
'call_fields': json.dumps(call_fields),
'call_domain': json.dumps(call_domain),
})
self.env['pos.cache.database'].search([]).unlink()
_logger.info('========================= END refresh_call_logs ========================================')
return True
| mahmohammed16881688/odoo_12 | addons/pos_retail/models/pos/pos_call_log.py | pos_call_log.py | py | 3,733 | python | en | code | 1 | github-code | 36 |
18393968114 | class Solution:
def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:
rows = len(heights)
cols = len(heights[0])
# get all cells adjecant to pacific and atlantic
pacific_queue = deque()
atlantic_queue = deque()
for i in range(rows):
pacific_queue.append((i, 0))
atlantic_queue.append((i, cols-1))
for j in range(cols):
pacific_queue.append((0, j))
atlantic_queue.append((rows-1, j))
def bfs(queue):
reachable = set()
while queue:
cell = queue.popleft()
reachable.add(cell)
for x, y in [(0,1), (1,0), (0,-1), (-1,0)]:
row = cell[0] + x
col = cell[1] + y
if row >= 0 and row < rows and col >= 0 and col < cols and (row, col) not in reachable and heights[cell[0]][cell[1]] <= heights[row][col]:
queue.append((row, col))
return reachable
pacific_bfs = bfs(pacific_queue)
atlantic_bfs = bfs(atlantic_queue)
return list(pacific_bfs.intersection(atlantic_bfs))
| ileenf/Data-Structures-Algos | BFS/pacific_atlantic_water_flow.py | pacific_atlantic_water_flow.py | py | 1,367 | python | en | code | 0 | github-code | 36 |
10704896320 | '''
Created on May 12, 2010
Harvests all PDB structures from PDB database
@author: ed
'''
import urllib, sys, os, random, math
otherProteins = open(sys.argv[1],'r')
otherProts = otherProteins.readlines()
notPDZDomain = []
pdzDomain =[]
pdzIds = []
notPDZIds = []
pdzActives = open(sys.argv[2],'w')
pdzInactives = open(sys.argv[3],'w')
for i in range(len(otherProts)):
anotherProtein = str(otherProts[i]).strip()
if ';' in anotherProtein and 'PDZ' in anotherProtein:
proteinIds = anotherProtein.split(';')
pdzIds.append(str(proteinIds[0]).strip())
result = str(proteinIds[0]).strip()+";"+str(proteinIds[1]).strip()+";"+str(proteinIds[2]).strip()+"\n"
elif ';' in anotherProtein and 'PDZ' not in anotherProtein and anotherProtein !="":
proteinIdsX = anotherProtein.split(';')
notPDZIds.append(str(proteinIdsX[0]).strip())
result = str(proteinIdsX[0]).strip()+";"+str(proteinIdsX[1]).strip()+";"+str(proteinIdsX[2]).strip()+"\n"
random.shuffle(notPDZIds)
sizeofInactives = int(math.floor(len(notPDZIds)/10))
tenPercent = notPDZIds[:sizeofInactives]
#Now for harvesting
outputDirectory="/InactiveProteins/"
for i in range(len(tenPercent)):
id = str(tenPercent[i]).strip()
PDBfile = "http://www.rcsb.org/pdb/files/"+id+".pdb"
datasource = urllib.urlopen(PDBfile)
DS = datasource.readlines()
pathname = os.path.dirname(sys.argv[4])
val=pathname+outputDirectory+id+".pdb"
f=open(val, 'w')
for i in range(len(DS)):
A = DS[i]
if(A[0:6] !='ANISOU'):
B = str(A)
f.write(B)
datasource.close() | eoc21/Protein-Descriptors | src/csdsML/WebHarvester.py | WebHarvester.py | py | 1,631 | python | en | code | 4 | github-code | 36 |
7696440509 | from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^reservar/(?P<id>\d+)/$',Reservrlibros),
url(r'^consultaLibos/$',ConsultaLibros.as_view(), name='ConsultaLibros'),
url(r'^reservaExitosa/$',MostrarReservas),
#url(r'^Verreservas/$',Verreservas),
#url(r'^busqueda_ajax/$',ReservasLibros.as_view(), name='buscarView'), el id lo pasamos en una vista como parametro
)
| juanjavierlimachi/Biblioteca | Biblioteca/Biblioteca/apps/estudiantes/urls.py | urls.py | py | 426 | python | es | code | 0 | github-code | 36 |
18039525483 | from keras import layers,models,optimizers,losses
from keras.datasets import cifar10
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
(X_train,y_train),(X_test,y_test)=cifar10.load_data()
print(X_train.shape)
print(y_train.shape)
HIDDEN_SIZE=256#要有下划线
NUM_CLASSES=10#要有下划线避免语法重叠
LEARNING_RATE=1E-3
model=models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(32,32,3)))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dense(HIDDEN_SIZE,activation='relu'))
model.add(layers.Dense(NUM_CLASSES,activation='softmax'))
model.compile(
optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
loss=losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
EPOCH=10
history=model.fit(X_train,y_train,epochs=EPOCH,validation_split=0.2)
#pd.DataFrame(history.history).plot(figsize=(8,5))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.show()
result=model.evaluate(X_train,y_train,verbose=0)
print('卷积神经网络在cifar10数据集上的准确率为%.2f%%'%(result[1]*100))
print('卷积神经网络在cifar10数据集上的loss为%.2f'%(result[0]))
import numpy as np
pred=model.predict(X_test)
pred = np.argmax(pred, axis = 1)[:10]
label = np.argmax(y_test,axis = 1)[:10]
print(pred)
print(label)
model.save('model2.h5')
| westbchampion/Python_to_Kaggle | 手写卷积神经网络_test.py | 手写卷积神经网络_test.py | py | 1,540 | python | en | code | 0 | github-code | 36 |
19766387119 | # készíts programot ami egy bevitt mondatban megszámolja a számokat és a betűket (külön)
# és kiírja az eredményt. pl: szia 123 -> betűk: 4, számok: 3
sentence = input('irj be egy mondatot: ')
digits = 0
letters = 0
for c in sentence:
if c.isdigit():
digits = digits + 1
if c.isalpha():
letters = letters + 1
print("betűk száma: {}, számok száma: {}".format(letters, digits))
| Zatyi94/gyakorlas | 5.py | 5.py | py | 425 | python | hu | code | 0 | github-code | 36 |
30284661095 | # Differential Equations part 1
import numpy as np
g = 9.8
L = 2
mu = 0.1
theta_0 = np.pi/30
theta_dot_0 = 0
def get_theta_double_dot(theta, theta_dot):
return -mu*theta_dot - (g/L) * np.sin(theta)
# Solution to diff eqn
def theta(t):
theta = theta_0
theta_dot = theta_dot_0
delta_t = 0.01
for time in np.arange(0,t, delta_t):
theta_double_dot = get_theta_double_dot(theta, theta_dot)
theta += theta_dot * delta_t
theta_dot = theta_double_dot * delta_t
return(theta)
print(theta(5)) | AaryanChhabra/Training-DS-Python | Python ML/Experiment.py | Experiment.py | py | 539 | python | en | code | 0 | github-code | 36 |
35734197596 | from manage_company import CompanyManager
import sqlite3
class console:
def __init__(self):
self.manager = CompanyManager('database.db')
self.commands = {
"read_command": self.read_command,
"list_employees": self.list_employees,
"add_employee": self.add_employee,
"monthly_spending": self.monthly_spending,
"yearly_spending": self.yearly_spending,
"delete_employee": self.delete_employee,
"update_employee": self.update_employee,
"exit": self.exit
}
def read_command(self):
user_input = input('command>')
self.commands[user_input]()
def list_employees(self):
employees = self.manager.list_employees()
for employee in employees:
print("{} - {} - {}".format(employee['id'], employee['name'], employee['position']))
def add_employee(self):
n = input('input name:')
ms = input('input monthly_salaray:')
yb = input('input yearly_bonus:')
p = input('position:')
self.manager.add_employee(n, ms, yb, p)
def monthly_spending(self):
spendings = self.manager.monthly_spending()
print("The company is spending ${} every month!".format(spendings))
def yearly_spending(self):
spendings = self.manager.yearly_spending()
print("The company is spending ${} every year!".format(spendings))
def delete_employee(self):
id_number = input('id>')
self.manager.delete_employee(id_number)
print("employee deleted!")
def update_employee(self):
id_number = input('id>')
n = input('name>')
ms = input('monthly_salaray>')
yb = input('yearly_bonus>')
p = input('position>')
self.manager.update_employee(id_number, n, ms, yb, p)
def exit(self):
print("You are now out of the application!")
self.manager.exit()
| yordanovagabriela/HackBulgaria | week7/company/console.py | console.py | py | 1,951 | python | en | code | 0 | github-code | 36 |
74936854824 | from loguru import logger
import configparser as cfg
import os
def logger_handler(msg: str, mode=2) -> None:
"""
Handles logging of messages
mode: 0 = debug, 1 = info, 2 = error (default)
"""
# construct logger
_log_constructor(mode)
# log message
if mode == 0:
logger.exception(msg)
elif mode == 1:
logger.info(msg)
else:
logger.error(msg)
def _log_constructor(mode: int) -> None:
"""
internal function to construct the logger
requires config file in the same directory!
"""
# read config file
config = cfg.ConfigParser()
config.read('config.ini')
_log_level_mapping = {0: 'DEBUG', 1: 'INFO', 2: 'ERROR'}
# define log levels as defined in config file
_config_level = {0: 'LOGGING_DEBUG', 1: 'LOGGING_INFO', 2: 'LOGGING_ERROR'}
_mode = _log_level_mapping[mode]
_cnf_lvl = _config_level[mode]
_file_name = os.path.expanduser(
config.get(_cnf_lvl, 'log_file', fallback=''))
_serialize = config.get(_cnf_lvl, 'log_serialize')
_diagnose = config.get(_cnf_lvl, 'log_diagnose')
logger.add(_file_name,
rotation=config.get(_cnf_lvl, 'log_rotate', fallback=''),
level=_mode,
format=config.get(_cnf_lvl, 'log_format', fallback=''),
compression=config.get(
_cnf_lvl, 'log_compression', fallback=''),
diagnose=eval(_diagnose),
serialize=eval(_serialize))
| Anton0Lashov/dng_extractor | _logger.py | _logger.py | py | 1,503 | python | en | code | 0 | github-code | 36 |
32289074245 | def main():
place = [1,2,3,4,5,6,7,8,9]
turn = 0
while (checkwin):
drawboard(place)
turn += 1
xo = " "
if (turn % 2 == 0):
play = int(input("x's turn to choose a square (1-9):"))
xo = "x"
else:
play = int(input("o's turn to shoose a square (1-9):"))
xo = "o"
makechange(play, xo, place)
def drawboard(place):
print(f"{place[0]}|{place[1]}|{place[2]}\n-+-+-\n{place[3]}|{place[4]}|{place[5]}\n-+-+-\n{place[6]}|{place[7]}|{place[8]}")
def makechange(play, xo, place):
for x in place:
if (play == place[x - 1]):
x = xo
place[play-1] = x
def checkwin(place):
return (place[0] == place[1] == place[2] or
place[3] == place[4] == place[5] or
place[6] == place[7] == place[8] or
place[0] == place[3] == place[6] or
place[1] == place[4] == place[7] or
place[2] == place[5] == place[8] or
place[0] == place[4] == place[8] or
place[2] == place[4] == place[6])
def checkdraw(place):
if not checkwin(place):
return ()
if __name__ == "__main__":
main()
| dannyfwalter1/personal-python | tictactoe/__main__.py | __main__.py | py | 1,207 | python | en | code | 0 | github-code | 36 |
17354358336 | import typing as t
import numpy as np
from emo_utils import convert_to_one_hot
from emo_utils import predict
from emo_utils import softmax
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def sentence_to_avg(
sentence: str,
word_to_vec_map: dict[str, t.Any],
) -> np.ndarray:
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe
representation of each word and averages its value into a single vector encoding
the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of
shape (J,), where J can be any number
"""
words = [w.lower() for w in sentence.split()]
any_word = list(word_to_vec_map.keys())[0]
avg = np.zeros(word_to_vec_map[any_word].shape[0])
count = 0
for w in words:
if w in word_to_vec_map:
avg += word_to_vec_map[w]
count += 1
if count > 0:
avg = avg / count
return avg
def model(
X,
Y,
word_to_vec_map,
learning_rate=0.01,
num_iterations=400,
):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m,)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
# Get a valid word contained in the word_to_vec_map
any_word = list(word_to_vec_map.keys())[0]
# number of training examples
m = Y.shape[0]
# number of classes
n_y = len(np.unique(Y))
# dimensions of the GloVe vectors
n_h = word_to_vec_map[any_word].shape[0]
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_one_hot with n_y classes
Y_oh = convert_to_one_hot(Y, C=n_y)
# Optimization loop
for t in range(num_iterations):
cost = 0
dW = 0
db = 0
# Loop over the training examples
for i in range(m):
# Average the word vectors of the words from the i'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer.
z = W @ avg + b
a = softmax(z)
# Add the cost using the i'th training label's one hot representation and
# "A" (the output of the softmax)
cost += -np.sum(Y_oh[i] * np.log(a))
# Compute gradients
dz = a - Y_oh[i]
dW += np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))
db += dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
assert type(cost) == np.float64, "Incorrect implementation of cost"
assert cost.shape == (), "Incorrect implementation of cost"
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
def sentences_to_indices(
X,
word_to_index,
max_len,
):
"""
Converts an array of sentences (strings) into an array of indices corresponding to
words in the sentences. The output shape should be such that it can be given to
`Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m,)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in
X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of
shape (m, max_len)
"""
# number of training examples
m = X.shape[0]
X_indices = np.zeros((m, max_len))
for i in range(m):
sentence_words = [w.lower() for w in X[i].split()]
j = 0
for w in sentence_words:
if w in word_to_index:
X_indices[i, j] = word_to_index[w]
j += 1
return X_indices
def pretrained_embedding_layer(
word_to_vec_map,
word_to_index,
):
"""
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional
vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in
the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
"""
# adding 1 to fit Keras embedding (requirement)
vocab_size = len(word_to_index) + 1
any_word = list(word_to_vec_map.keys())[0]
# define dimensionality of your GloVe word vectors (= 50)
emb_dim = word_to_vec_map[any_word].shape[0]
# Initialize the embedding matrix as a numpy array of zeros.
emb_matrix = np.zeros((vocab_size, emb_dim))
# Set each row "idx" of the embedding matrix to be
# the word vector representation of the idx'th word of the vocabulary
for word, idx in word_to_index.items():
emb_matrix[idx, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct input and output sizes
# Make it non-trainable.
embedding_layer = Embedding(
input_dim=vocab_size,
output_dim=emb_dim,
trainable=False,
)
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
def Emojify_V2(
input_shape,
word_to_vec_map,
word_to_index,
):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the
vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
### START CODE HERE ###
# Define sentence_indices as the input of the graph.
# It should be of shape input_shape and dtype 'int32'
# (as it contains indices, which are integers).
sentence_indices = Input(shape=input_shape, dtype="int32")
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer
# (See additional hints in the instructions).
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# The returned output should be a batch of sequences.
X = LSTM(units=128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(rate=0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# The returned output should be a single hidden state, not a batch of sequences.
X = LSTM(units=128, return_sequences=False)(X)
# Add dropout with a probability of 0.5
X = Dropout(rate=0.5)(X)
# Propagate X through a Dense layer with 5 units
X = Dense(units=5)(X)
# Add a softmax activation
X = Activation("softmax")(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs=sentence_indices, outputs=X)
return model
| HarryMWinters/ML_Coursework | Course 6, Sequence Models/Week 2/assignment_2/Emoji_v3a.py | Emoji_v3a.py | py | 8,499 | python | en | code | 0 | github-code | 36 |
26454404997 | import gc
import logging
import os
import glob
import pandas as pd
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from math import exp
import numpy as np
torch.backends.cudnn.benchmark = True
from matplotlib import pyplot as plt
import matplotlib as mpl
import matplotlib.patches as patches
from matplotlib import pyplot as plt
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
avm = ArgoverseMap()
num = 10
data_path="/datasets/argoverse/val/data"
infer_path="../../inn"
import os
import sys
sys.path.append("../ddn/")
sys.path.append("./")
import warnings
warnings.filterwarnings('ignore')
import torch
import numpy as np
import scipy.special
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.linalg import block_diag
from torch.utils.data import Dataset, DataLoader
#from bernstein import bernstesin_coeff_order10_new
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
avm = ArgoverseMap()
def denoise(gt_x, gt_y, w = 7):
# denoising
gt_x_t = []
gt_y_t = []
for iq in range(len(gt_x)):
if iq >= w and iq + w <= len(gt_x):
gt_x_t.append(np.mean(gt_x[iq: iq + w]))
gt_y_t.append(np.mean(gt_y[iq: iq + w]))
elif iq < w:
okx = np.mean(gt_x[w: w + w])
gt_x_t.append(gt_x[0] + (okx - gt_x[0]) * (iq) / w)
oky = np.mean(gt_y[w: w + w])
gt_y_t.append(gt_y[0] + (oky - gt_y[0]) * (iq) / w)
else:
okx = np.mean(gt_x[len(gt_x) - w:len(gt_x) - w + w])
oky = np.mean(gt_y[len(gt_x) - w: len(gt_x) - w + w])
gt_x_t.append(okx + (gt_x[-1] - okx) * (w - (len(gt_x) - iq)) / w)
gt_y_t.append(oky + (gt_y[-1] - oky) * (w - (len(gt_y) - iq)) / w)
gt_x = gt_x_t
gt_y = gt_y_t
return gt_x, gt_y
from shapely.geometry.polygon import Polygon, Point
output_dir="../results/"
t_obs=20
dt=0.3
t_obs=20
pred=False
pred_array=None
batch_size = 512
dpi=100
w,h=512,512
res=0.5
paths = glob.glob(os.path.join(data_path, "*.csv"))
color = {
'polygon': '#e6cf93',
'polygon-outline': '#e6cf93',
'centerline': '#fceec7',
'agent': 'blue',
'av': 'grey',
'other': 'grey',
'outline': 'black'
}
color = {
'polygon': 'white',
'polygon-outline': 'white',
'centerline': 'white',
'agent': 'white',
'av': 'white',
'other': 'white',
'outline': 'black'
}
from tqdm import tqdm
for idx in tqdm(range(len(paths))):
if idx < 19:
continue
path = paths[idx]
dff = pd.read_csv(path)
city = dff['CITY_NAME'].values[0]
agent_df = dff[dff['OBJECT_TYPE'] == 'AGENT']
x_a = agent_df['X'].values
y_a = agent_df['Y'].values
x_a, y_a = denoise(x_a, y_a)
av_df = dff[dff['OBJECT_TYPE'] == 'AV']
x_av = av_df['X'].values
y_av = av_df['Y'].values
x_av, y_av = denoise(x_av, y_av)
others_df = dff[dff['OBJECT_TYPE'] == 'OTHERS']
others_dfs = np.array([v for k, v in others_df.groupby('TRACK_ID')], dtype=object)
x_o = {}
y_o = {}
for other_df in others_dfs:
x_other, y_other = other_df['X'].values, other_df['Y'].values
x_other, y_other = denoise(x_other, y_other)
x_o[other_df['TRACK_ID'].values[0]] = x_other
y_o[other_df['TRACK_ID'].values[0]] = other_df['Y'].values
# group by timestamp
dfs = [x for _, x in dff.groupby('TIMESTAMP')]
grids_lanes = np.zeros((20, h, w))
grids_obstacles = np.zeros((20, h, w))
grids_centerlines = np.zeros((20, h, w))
grids_agent = np.zeros((20, h, w))
total_successors = []
current = []
das_polygons = []
das_polygons_mp = []
das_ids = []
agent_polygons = []
others_polygons = []
for indd in range(0, 20):
lane_id = avm.get_nearest_centerline(np.array([x_a[indd],y_a[indd]]), city_name=city)[0].id
current.append(lane_id)
successors = avm.get_lane_segment_successor_ids(lane_id, city)
if successors == None:
continue
for successor in successors:
total_successors.append(successor)
successors_2d = avm.get_lane_segment_successor_ids(successor, city)
for successorr in successors_2d:
if successors_2d == None:
continue
total_successors.append(successorr)
polygons = [ avm.get_lane_segment_polygon(successor, city) for successor in successors]
current = np.unique(np.array(current))
total_successors = np.unique(np.array(total_successors))
for curr in current:
current_polygon = avm.get_lane_segment_polygon(curr, city)
das_polygons.append(current_polygon)
das_polygons_mp.append(avm.get_lane_segment_polygon(curr, city))
das_ids.append(curr)
# plt.fill(current_polygon[:, 0], current_polygon[:, 1], color='white', zorder=4)
for successor in total_successors :
polygon = avm.get_lane_segment_polygon(successor, city)
das_polygons.append(polygon)
das_polygons_mp.append(avm.get_lane_segment_polygon(successor, city))
das_ids.append(successor)
# plt.fill(polygon[:, 0], polygon[:, 1], color='white', zorder=4)
das_polygons_mp = np.array(das_polygons_mp)
x_off = 75
y_off = 75
points = np.array([[x_a[20] - x_off, y_a[20] + y_off],[x_a[20] + x_off, y_a[20] + y_off], [x_a[20] + x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] + y_off]])
for ind, df in enumerate(dfs):
agent_df = df[df['OBJECT_TYPE'] == 'AGENT']
others_df = df[df['OBJECT_TYPE'] == 'OTHERS']
others_dfs = [x for _, x in others_df.groupby('TRACK_ID')]
av_df = df[df['OBJECT_TYPE'] == 'AV']
# agent
x_traj = agent_df['X'].values
y_traj = agent_df['Y'].values
offsets = [x_a[0], y_a[0]] # offsets for other agents
others_polyon = []
if ind < len(dfs) - 1:
x_off = 2 #0.75
y_off = 2.25 #1.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_a[ind + 1] - y_a[ind]) , (x_a[ind + 1] - x_a[ind])) - np.pi/2
ww = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): ww[i] = A @ points[i]
ww[:, 0] += x_traj[0]
ww[:, 1] += y_traj[0]
try:
agent_polygons.append(Polygon(ww))
except:
print("AGENT problem")
for indoo, other in enumerate(others_dfs):
x_traj = other['X'].values
y_traj = other['Y'].values
indo = other['TRACK_ID'].values[0]
if ind < len(dfs) - 1 and ind < len(x_o[indo]) - 1 and ind < len(y_o[indo]) - 1:
x_off = 2
y_off = 2.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_o[indo][ind + 1] - y_o[indo][ind]) , (x_o[indo][ind + 1] - x_o[indo][ind])) - np.pi/2
ww = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): ww[i] = A @ points[i]
ww[:, 0] += x_traj[0]
ww[:, 1] += y_traj[0]
try:
others_polyon.append(Polygon(ww))
except:
print("OTHERS")
others_polygons.append(others_polyon)
sample = np.zeros((h, w))
lx = x_a[20] - res*(h/2)
ly = y_a[20] - res*(w/2)
# seq_lane_props = avm.city_lane_centerlines_dict[city]
# for lane_id, lane_props in seq_lane_props.items():
# lane_cl = lane_props.centerline
# if (np.min(lane_cl[:, 0]) < x_max and np.min(lane_cl[:, 1]) < y_max and np.max(lane_cl[:, 0]) > x_min and np.max(lane_cl[:, 1]) > y_min):
# lane_centerlines.append(lane_cl)
for i in tqdm(range(h)):
for j in range(w):
px = lx + i * res
py = ly + j * res
point_xy = Point(px, py)
flag = 0
for k in range(len(das_polygons)):
if Polygon(das_polygons[k]).contains(point_xy):
flag = 1
sample[j,i] = flag
for k in range(20):
# get obstacle polygon
for l in range(len(others_polygons[k])):
if others_polygons[k][l].contains(point_xy):
grids_obstacles[k, j, i] = 1
# get agent polygon
if agent_polygons[k].contains(point_xy):
grids_agent[k, j, i] = 1
print("DONE")
print(grids_agent.shape)
for i in range(20): grids_lanes[i] = sample
print(str(infer_path) + "/das/{}.npy".format(idx))
np.save(str(infer_path) + "/das/{}.npy".format(idx), grids_lanes)
np.save(str(infer_path) + "/agents/{}.npy".format(idx), grids_agent)
np.save(str(infer_path) + "/others/{}.npy".format(idx), grids_obstacles)
| Vikr-182/ddn-forecasting | vis/infer.py | infer.py | py | 10,164 | python | en | code | 0 | github-code | 36 |
6043631950 | from .dbtest import (
DbTest,
dbconnect
)
import os
from psycopg2.extras import (
RealDictCursor,
RealDictRow
)
PATH_TO_SQL_DIR = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..",
"sql"
)
)
class TestExample(DbTest):
@dbconnect
def test_select_organizations(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "organizations.sql")
)
sql = """
SELECT * FROM organizations;
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
organizations = cur.fetchall()
assert len(organizations) == 7
@dbconnect
def test_count_the_number_of_subordinates(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "organizations.sql")
)
sql = """
SELECT COUNT(enterprise_sales_enterprise_customers.sales_organization_id) as subordinates_count, organizations."id" from organizations
LEFT JOIN enterprise_sales_enterprise_customers ON organizations.id=enterprise_sales_enterprise_customers.sales_organization_id
GROUP BY enterprise_sales_enterprise_customers.sales_organization_id, organizations."id" ORDER BY organizations."id";
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
assert len(actual) == 7
assert actual == [
RealDictRow(**{
"subordinates_count": 0,
"id": 1,
})
, RealDictRow(**{
"subordinates_count": 4,
"id": 2,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 3,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 4,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 5,
})
, RealDictRow(**{
"subordinates_count": 1,
"id": 6,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 7,
})
]
@dbconnect
def test_calculate_center_of_each_segment(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "japan_segments.sql")
)
sql = """
SELECT sub_query.id, ST_X(sub_query.bounds_center) as longitude, ST_Y(sub_query.bounds_center) as latitude
FROM (SELECT japan_segments.id as id, st_centroid(bounds) as bounds_center FROM japan_segments) as sub_query;
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
assert len(actual) == 10
assert actual == [
RealDictRow(**{
"id": "KAGOSHIMA_1",
"longitude": 130.642228315775,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_2",
"longitude": 130.694183864916,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_3",
"longitude": 130.746139414057,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_4",
"longitude": 129.707028431231,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_5",
"longitude": 129.758983980373,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_6",
"longitude": 129.810939529514,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_7",
"longitude": 129.862895078655,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_8",
"longitude": 129.914850627797,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_9",
"longitude": 129.966806176937,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_10",
"longitude": 130.018761726079,
"latitude": 30.75,
})
]
@dbconnect
def test_segments_using_geojson_boundary(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "japan_segments.sql")
)
sql = """
SELECT sub.id from (SELECT * from japan_segments, (SELECT ST_GeomFromEWKT('SRID=4326;POLYGON((130.27313232421875 30.519681272749402,131.02020263671875 30.519681272749402,
131.02020263671875 30.80909017893796,130.27313232421875 30.80909017893796,130.27313232421875 30.519681272749402))') as boundary) as sub_query) as sub where ST_Contains(sub.boundary, sub.bounds)
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
print(actual)
assert len(actual) == 3
assert actual == [
RealDictRow(**{
"id": "KAGOSHIMA_1",
})
, RealDictRow(**{
"id": "KAGOSHIMA_2",
})
, RealDictRow(**{
"id": "KAGOSHIMA_3",
})
]
| HaithamKhedrSalem/postgis-practices-solution | test/test_example.py | test_example.py | py | 6,115 | python | en | code | 0 | github-code | 36 |
29382149888 | import json
import boto3
from botocore.exceptions import ClientError
import os
region = os.environ['AWS_REGION']
sess = boto3.session.Session(region_name=region)
# def get_bucket_name():
# ssmClient = sess.client('ssm')
# response = ssmClient.get_parameter(
# Name = 'ProserveProject_S3BucketName',
# WithDecryption = True)
# return response['Parameter']['Value']
def lambda_handler(event, context):
s3Client = sess.client('s3')
# try:
# bucketName = get_bucket_name()
# except ClientError as e:
# print(e)
# return {
# 'statusCode': 500,
# 'body': json.dumps("An error occurred")
# }
bucketName = os.environ['BUCKET_NAME']
objectKey = json.loads(event['body'])["objectKey"].strip()
response = s3Client.delete_object(
Bucket = bucketName,
Key = objectKey,
VersionId = "null",
)
return {
'statusCode': 204,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET,DELETE'
},
}
| ferozbaig96/Proserve-project | lambdas/DeleteS3Object.py | DeleteS3Object.py | py | 1,191 | python | en | code | 0 | github-code | 36 |
18065190929 | from __future__ import absolute_import
import logging
import numpy as np
from .import utils
from .import sampling
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
from sklearn.model_selection import StratifiedShuffleSplit
logger = logging.getLogger(__name__)
class Dataset(object):
def __init__(self, inputs, labels, test_indices=None, **kwargs):
"""Encapsulates all pieces of data to run an experiment. This is basically a bag of items that makes it
easy to serialize and deserialize everything as a unit.
Args:
inputs: The raw model inputs. This can be set to None if you dont want
to serialize this value when you save the dataset.
labels: The raw output labels.
test_indices: The optional test indices to use. Ideally, this should be generated one time and reused
across experiments to make results comparable. `generate_test_indices` can be used generate first
time indices.
**kwargs: Additional key value items to store.
"""
self.X = np.array(inputs)
self.y = np.array(labels)
for key, value in kwargs.items():
setattr(self, key, value)
self._test_indices = None
self._train_indices = None
self.test_indices = test_indices
self.is_multi_label = isinstance(labels[0], (set, list, tuple))
self.label_encoder = MultiLabelBinarizer() if self.is_multi_label else LabelBinarizer()
self.y = self.label_encoder.fit_transform(self.y).flatten()
def update_test_indices(self, test_size=0.1):
"""Updates `test_indices` property with indices of `test_size` proportion.
Args:
test_size: The test proportion in [0, 1] (Default value: 0.1)
"""
if self.is_multi_label:
self._train_indices, self._test_indices = sampling.multi_label_train_test_split(self.y, test_size)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size)
self._train_indices, self._test_indices = next(sss.split(self.X, self.y))
def save(self, file_path):
"""Serializes this dataset to a file.
Args:
file_path: The file path to use.
"""
utils.dump(self, file_path)
def train_val_split(self, split_ratio=0.1):
"""Generates train and validation sets from the training indices.
Args:
split_ratio: The split proportion in [0, 1] (Default value: 0.1)
Returns:
The stratified train and val subsets. Multi-label outputs are handled as well.
"""
if self.is_multi_label:
train_indices, val_indices = sampling.multi_label_train_test_split(self.y, split_ratio)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=split_ratio)
train_indices, val_indices = next(sss.split(self.X, self.y))
return self.X[train_indices], self.X[val_indices], self.y[train_indices], self.y[val_indices]
@staticmethod
def load(file_path):
"""Loads the dataset from a file.
Args:
file_path: The file path to use.
Returns:
The `Dataset` instance.
"""
return utils.load(file_path)
@property
def test_indices(self):
return self._test_indices
@test_indices.setter
def test_indices(self, test_indices):
if test_indices is None:
self._train_indices = np.arange(0, len(self.y))
else:
self._test_indices = test_indices
self._train_indices = np.setdiff1d(np.arange(0, len(self.y)), self.test_indices)
@property
def train_indices(self):
return self._train_indices
@property
def labels(self):
return self.label_encoder.classes_
@property
def num_classes(self):
if len(self.y.shape) == 1:
return 1
else:
return len(self.labels)
| raghakot/keras-text | keras_text/data.py | data.py | py | 4,007 | python | en | code | 422 | github-code | 36 |
26921480455 | from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import time
'''
제가 실행하면 보이나요
python3 test.py
쳐보실래요??
저만 실행 되나요?? 안되는데 그러면 ㅠㅠ
맥으로 하는거라 리팩토링하면서 하고있어요
지금 날짜 찾는거까진 했거든요 이제 몇일인지 찾아야해요
ㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋ 함 봐볼까여
'''
'''
근데 그거 그냥 코드로 박으면 안되나여? 궁금하네여 예
'''
options = webdriver.ChromeOptions()
options.add_argument("window-size=800,600")
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 10)
url = "https://ticket.interpark.com/Gate/TPLogin.asp"
driver.get(url)
def interpark_login(): # 인터파크 로그인
driver.switch_to.frame(driver.find_element(By.TAG_NAME, "iframe"))
driver.find_element(By.ID, "userId").send_keys("chlwldnjs0416")
driver.find_element(By.ID, "userPwd").send_keys("#Chl4689056")
driver.find_element(By.ID, "btn_login").click()
def booking_number_site(): # 예약번호 입력 후, 입장
driver.get(
"http://poticket.interpark.com/Book/BookSession.asp?GroupCode="
# + showcode_entry.get()
+ "23002291"
)
# def date_select():
# # Select date
# while True:
# driver.switch_to.frame(driver.find_element(By.ID, "ifrmBookStep"))
# # if int(calender_entry.get()) > 0: # 날짜 설정
# if int(1) > 0: # 날짜 설정
# # for i in range(int(calender_entry.get())): # 해당 월 아닐시 +1씩 증가하여 해당 월 찾기.
# for i in range(int(1)): # +1씩 증가하여 해당 월 찾기.:) 날짜 설정과 같은 넘버로 해야함.
# driver.find_element(
# By.XPATH, "/html/body/div/div[1]/div[1]/div/span[3]").click()
# try:
# '''
# 회차 클릭 해야함.
# '''
# time.sleep(100)
# driver.find_element(
# # By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + date_entry.get() + "]" # 회차 설정
# # By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + 21 + "]").click() # 회차 설정
# By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + 21 + "]").click() # 회차 설정
# break
# except NoSuchElementException:
# print("NoSearch")
# # # link_go()
# # # go()
# # # Select round
# # # round_xpath = f"/html/body/div/div[3]/div[1]/div/span/ul/li[{round_entry.get()}]/a"
# # round_xpath = f"/html/body/div/div[3]/div[1]/div/span/ul/li['10']/a"
# # wait.until(EC.element_to_be_clickable((By.XPATH, round_xpath))).click()
# # # Click next button
# # driver.switch_to.default_content()
# # driver.find_element(By.ID, "LargeNextBtnImage").click()
def date_select():
day_value = 23
# 날짜
while True:
driver.switch_to.frame(driver.find_element(By.ID, "ifrmBookStep"))
if int(1) == 0:
pass
elif int(1) >= 1:
for i in range(1, int(1) + 1):
driver.find_element(
By.XPATH, "/html/body/div/div[1]/div[1]/div/span[3]"
).click()
try:
driver.find_element(
By.XPATH, '(//*[@id="CellPlayDate"])' +
"[" + day_value + "]"
).click()
break
except NoSuchElementException:
# # link_go()
# # go()
# print("Element 못찾음.")
time.sleep(1111)
# 회차
wait.until(
EC.element_to_be_clickable(
(
By.XPATH,
"/html/body/div/div[3]/div[1]/div/span/ul/li["
+ round_entry.get()
+ "]/a",
)
)
).click()
driver.switch_to.default_content()
driver.find_element(By.ID, "LargeNextBtnImage").click()
def find_random_seat(): # 좌석 무작위로 설정
driver.switch_to.default_content()
seat_frame = driver.find_element(By.NAME, "ifrmSeat")
driver.switch_to.frame(seat_frame)
# wait.until(EC.presence_of_element_located(
# ))
interpark_login() # 예약번호 입력 후, 입장
booking_number_site() # 예약번호 입력 후, 입장
date_select() # 상품 날짜 찾기.
# find_random_seat() # 좌석 무작위로 설정
| sinde530/python | interpark/test.py | test.py | py | 4,841 | python | en | code | 0 | github-code | 36 |
14183098505 | import pygame
import config
import math
from unit import Unit
from unit_move import UnitMove
class Enemy(Unit):
def __init__(self, x, y):
super().__init__(x, y)
self.start_position = [x, y]
self.time_till_damage = 0
self.look = [] # [center_x, center_y, radius]
self.score = 0
def move_to_position(self, move_to_position):
position = self.get_position()
# Find direction vector (dx, dy) between enemy and player.
dx, dy = move_to_position[0] - position[0], move_to_position[1] - position[1]
dist = math.hypot(dx, dy)
if dx < 0:
self.set_direction(UnitMove.LEFT)
else:
self.set_direction(UnitMove.RIGHT)
if dist > 1:
dx, dy = dx / dist, dy / dist # Normalize.
# Move along this normalized vector towards the player at current speed.
position[0] += dx * config.BAT_VELOCITY
position[1] += dy * config.BAT_VELOCITY
self.set_position(position[0], position[1])
def contains_look(self, player):
player_corners = player.get_hitbox_corners()
enemy_look = self.get_look()
player_center = [player_corners[0][0] + (player_corners[1][0] - player_corners[0][0])/2,
player_corners[0][1] + (player_corners[3][1] - player_corners[0][1])/2]
dx = enemy_look[0] - player_center[0]
dy = enemy_look[1] - player_center[1]
if dx * dx + dy * dy <= enemy_look[2] * enemy_look[2]:
return True
return False
def contains(self, player):
player_hitbox = player.get_hitbox()
enemy_hitbox = self.get_hitbox()
time_till_damage = self.get_time_till_damage()
if time_till_damage == 0:
# Check player up left corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player up right corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] + player_hitbox[3] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player down right corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] + player_hitbox[2] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] + player_hitbox[3] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player down left corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] + player_hitbox[2] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
else:
time_till_damage += 1
if time_till_damage < config.TIME_TILL_DAMAGE:
self.set_time_till_damage(time_till_damage + 1)
else:
self.set_time_till_damage(0)
return False
def render_look(self, screen, camera):
look = self.get_look()
pygame.draw.circle(screen, (0, 0, 255), [look[0] - camera[0], look[1] - camera[1]], look[2], 1)
def set_look(self, hitbox):
self.look = [hitbox[0] + hitbox[2] / 2, hitbox[1] + hitbox[3] / 2, config.RADIUS]
def get_look(self):
return self.look
def get_start_position(self):
return self.start_position
def set_time_till_damage(self, time_till_damage):
self.time_till_damage = time_till_damage
def get_time_till_damage(self):
return self.time_till_damage
def move_directrion(self, dx, dy):
pass
def get_score(self):
return self.score
| EdySima/The-Lost-Penguin | enemy.py | enemy.py | py | 4,317 | python | en | code | 0 | github-code | 36 |
37538329416 | import scrapy
class CjSpider(scrapy.Spider):
name = 'cj'
# allowed_domains = ['caijing.com']
start_urls = ['https://www.dyxhw.com/']
def parse(self, response):
typess = response.xpath('//div[@class="nav clearfix"]/a[@class="j_ch_nav _block_news_menu"]/@href').getall()
for one_type in typess:
# print(one_type)
yield scrapy.Request(url=one_type, callback=self.parse_types)
def parse_types(self, response):
news_links = response.xpath('//ul[@class="list14 ml10"]/li/a/@href').getall()
for news_link in news_links:
# print(news_link)
yield scrapy.Request(url=news_link, callback=self.parse_detial)
def parse_detial(self, response):
title = response.xpath('//h1[@class="title"]/text()').get()
contents = response.xpath('//div[@class="clearfix"]/p/text()').getall()
content = '\n'.join(x for x in contents)
recurse = response.xpath('//div[@class="info fl"]//tr/td/text()').get().strip()
pubtime = response.xpath('//div[@class="info fl"]//span[@class="pubTime"]/text()').get()
item = dict()
item['title'] = title
item['content'] = content
item['pubtime'] = pubtime
yield item
rela_article = response.xpath('//div[@class="pic-list clearfix"]//h3/a/@href').getall()
if rela_article:
for rela in rela_article:
yield scrapy.Request(url=rela, callback=self.parse_detial)
| ykallan/caijingguancha | caijingguancha/caijingguancha/spiders/cj.py | cj.py | py | 1,497 | python | en | code | 0 | github-code | 36 |
10084081981 | def bin(l, h):
# dap 이라는 변수에 최종 출력값 담기
global dap
# 종료 조건
if l > h:
return
# 중간 값 설정
mid = (l + h) // 2
# 반복문 돌려서 문제 조건에 따라 절단기 높이 설정 후
# 나무 높이에서 절단기 높이를 빼준 값들을 다 더해서 fin으로 값 받기
fin = 0
for a in trees:
if a > mid:
fin += a - mid
# 가져가야 할 나무 높이만큼 가져왔으면
# 절단기의 높이(mid)를 dap 이라는 변수에 담기
if fin == M:
dap = mid
return
# 가져가야 할 나무의 높이보다 덜 가져오면
# 절단기의 높이를 낮추기 --> 여기서 반대로 생각해서 계속 헤맴
elif fin < M:
bin(l, mid - 1)
# 위의 케이스와 반대로 적용
elif fin > M:
dap = mid
bin(mid + 1, h)
N, M = map(int, input().split())
trees = list(map(int, input().split()))
# 이진 탐색을 할 것이기 때문에 정렬
trees.sort()
dap = 0
bin(0, trees[-1])
print(dap) | papillonthor/Cool_Hot_ALGO | gyKwon/s2_2805_나무자르기.py | s2_2805_나무자르기.py | py | 1,114 | python | ko | code | 2 | github-code | 36 |
29754454026 | import sys
from PyQt6 import QtCore, QtGui, QtWidgets
from CurConUi import Ui_MainWindow
from currency_converter import CurrencyConverter
class CurrencyConv(QtWidgets.QMainWindow):
def __init__(self):
super(CurrencyConv, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.init_ui()
def init_ui(self):
self.ui.line_new_currency.setPlaceholderText("В какую валюту перевести")
self.ui.line_old_currency.setPlaceholderText("Из какой валюты перевести")
self.ui.line_old_amount.setPlaceholderText("У вас было")
self.ui.button_convert.clicked.connect(self.convert)
# self.ui.button_convert.setObjectName()
def convert(self):
converter = CurrencyConverter()
old_currency = self.ui.line_old_currency.text().upper()
new_currency = self.ui.line_new_currency.text().upper()
old_amount = self.ui.line_old_amount.text()
if old_amount.isdigit() and old_currency and new_currency:
new_amount = round(converter.convert(int(old_amount), f"{old_currency}", f"{new_currency}"), 2)
self.ui.line_new_amount.setText(str(new_amount))
else:
self.ui.line_new_amount.setText("Ошибка ввода")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = CurrencyConv()
application.show()
sys.exit(app.exec())
| AdirtKa/CurrencyConverter | main.py | main.py | py | 1,476 | python | en | code | 0 | github-code | 36 |
9299459932 | #Intro
print('This program tells you how far an object will fall in a number of seconds.')
#Input
time = int(input('Enter the falling time in seconds: '))
#Defining Function
def fallingDistance(time):
gravity = 9.8
distance = 1 / 2 * gravity * time**2
return round(distance, 1)
#Loop
while time > 0:
print('The distance the object will fall in', time, 'seconds is:', fallingDistance(time), '\n')
time = int(input('Enter the falling time in seconds: '))
| gazarrillo/Falling-Distance-Calculator | Falling Distance.py | Falling Distance.py | py | 473 | python | en | code | 0 | github-code | 36 |
32041540540 | import torch
import drjit as dr
import mitsuba as mi
import sys,os,json
import importlib
sys.path.append(".")
import cv2
import numpy as np
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
from utils.logger import Logger
from utils.matcher import Matcher
from mitsuba.scalar_rgb import Transform4f as T
from tqdm.std import tqdm
mi.set_variant('cuda_ad_rgb')
log_level = 1
Pooler = torch.nn.AvgPool2d(kernel_size=2)
@dr.wrap_ad(source='drjit', target='torch')
def down_res_loss(st, img, img_ref):
img = img[None,...].permute(0,3,1,2)
img_ref = img_ref[None,...].permute(0,3,1,2)
while st>0:
img = Pooler(img)
img_ref = Pooler(img_ref)
st = st-1
if log_level>0:
Logger.save_img("down_res.png",img.permute(0,2,3,1)[0])
return torch.mean((img-img_ref)**2)
if __name__=="__main__":
method = sys.argv[1]
config = sys.argv[2]
Logger.init(exp_name=config+"/"+method, show=False, debug=False, path="results/",add_time=False)
tasks = importlib.import_module(f'exp.{config}') # import specific task
resolution = tasks.resolution #resolution
spp = tasks.spp # spp
scene = tasks.scene # scene
thres = tasks.thres # for hybrid scheme
max_depth = tasks.max_depth
match_res = tasks.match_res
# get target image
if hasattr(tasks,"gt_img")==True:
gt_img = torch.from_numpy(cv2.cvtColor(cv2.imread(tasks.gt_img),cv2.COLOR_BGR2RGB)).to(device)/255.0
img_ref = mi.TensorXf(gt_img.reshape(-1,3))
else:
if hasattr(tasks,"gt_scene")==True:
img_ref = mi.render(tasks.gt_scene, seed=0, spp=8192, sensor=0)
else:
img_ref = mi.render(scene, seed=0, spp=8192, sensor=0)
img_ref = img_ref[...,:3]
img_np = np.array(mi.util.convert_to_bitmap(img_ref))
gt_img = torch.from_numpy(img_np).to(device)/255.0
if log_level>0:
Logger.save_img("gt_img.png",gt_img)
gt_img_low= torch.from_numpy(cv2.resize(np.array(mi.util.convert_to_bitmap(img_ref)),(match_res,match_res))).to(device)/255.0
# pixel matcher using optimal transport(Sinkhorn)
matcher = Matcher(match_res, device)
# get optimized parameter and transformation
opt, apply_transformation, output, params = tasks.optim_settings()
apply_transformation(params, opt)
for key in opt.keys():
dr.enable_grad(opt[key])
params = mi.traverse(scene)
# get init image
img_init = mi.render(scene, params, seed=0, spp=512, sensor=0)
init_img = torch.from_numpy(np.array( mi.util.convert_to_bitmap(img_init[...,:3]))).to(device)/255.0
if log_level>0:
Logger.save_img("init_img.png",init_img)
# deal with hybrid scheme
if method.endswith("hybrid"):
method = method[:-7]
integrator2 = mi.load_dict({
'type': "prb_reparam",
'max_depth': max_depth
})
else:
thres = 10000
# define integrator
integrator1 = mi.load_dict({
'type': method,
'max_depth': max_depth
})
# camera settings are slightly different between EPSM and PRB.
if method.startswith("manifold"):
sensor_id = 1
else:
sensor_id = 0
loop = tqdm(range(tasks.it))
for it in loop:
apply_transformation(params, opt)
if it<thres:
img = mi.render(scene, params, seed=it, spp=spp, integrator=integrator1, sensor=sensor_id)
else:
if it==thres:
for key in opt.keys():
opt.reset(key)
img = mi.render(scene, params, seed=it, spp=spp, integrator=integrator2, sensor=0)
imgs = np.array(mi.util.convert_to_bitmap(img[...,:3]))
if log_level>0:
Logger.save_img(f"optim.png",imgs/255.0,flip=False)
Logger.add_image(f"optim",imgs/255.0,flip=False)
if log_level>1:
Logger.save_img_2(f"optim{it}.png",imgs/255.0,flip=False)
if img.shape[-1]==5:
render_img = torch.from_numpy(cv2.resize(imgs,(match_res,match_res))).to(device)/255.0
grad_ = matcher.match_Sinkhorn(render_img[...,:3].reshape(-1,3), gt_img_low[...,:3].reshape(-1,3))
grad_ = grad_.reshape(match_res,match_res,5)
grad_ = grad_.repeat(resolution//match_res,resolution//match_res,1)
grad = mi.TensorXf(grad_)
dr.backward(img*grad)
else:
# whether using multi-resolution loss
# loss = down_res_loss(6-((7*it)//tasks.it),img,img_ref[...,:3])
loss = dr.sum(dr.sqr(img - img_ref[...,:3])) / len(img)
dr.backward(loss)
try:
# remove nan in grad
dic = {}
for key in opt.keys():
x = dr.grad(opt[key])
x[dr.isnan(x)] = 0
dr.set_grad(opt[key],x)
dic[key] = float(opt[key].torch().item())#.item()
if log_level>1:
Logger.save_param(f"param{it}.npy",dic)
except:
pass
opt.step()
loop.set_description(f"Iteration {it:02d}: error={output(opt)}")
Logger.exit()
img_final = mi.render(scene, params, seed=0, spp=8192, sensor=0)
img_final = torch.from_numpy(np.array( mi.util.convert_to_bitmap(img_final[...,:3]))).to(device)/255.0
if log_level>0:
Logger.save_img(f"{sys.argv[1]}.png",img_final)
print("finish optim")
| jkxing/EPSM_Mitsuba3 | EPSM/optim.py | optim.py | py | 5,566 | python | en | code | 4 | github-code | 36 |
38488846369 | #!/usr/bin/env python3
#
# 10. Bayesian History Matching technique (advanced use)
#
import os
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import json
from GPErks.constants import DEFAULT_TMP_OUTFILE_DIR
from GPErks.perks.history_matching import Wave
from GPErks.serialization.labels import read_labels_from_file
from GPErks.serialization.path import posix_path
from GPErks.utils.array import get_minmax
from GPErks.utils.plotting import interp_col, get_col
from GPErks.utils.sampling import Sampler
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from torchmetrics import MeanSquaredError, R2Score
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.gp.mean import LinearMean
from GPErks.log.logger import get_logger
from GPErks.train.emulator import GPEmulator
from GPErks.utils.random import set_seed
def main():
# Set logger and enforce reproducibility
log = get_logger()
seed = 8
set_seed(seed)
# Load experimental values (mean and variance) you aim to match
exp_data_file = posix_path(os.getcwd(), "data", "example_10", "expdata.json")
expdata = {}
with open(exp_data_file, "r") as f:
expdata = json.load(f)
exp_mean = [val["mean"] for val in expdata.values()]
exp_var = [val["var"] for val in expdata.values()]
# Load input parameters and output features' names
dataset_dir = Path(posix_path(os.getcwd(), "datasets", "stefano", "8p", "sham"))
xlabels = read_labels_from_file(dataset_dir / "xlabels.txt")
ylabels = read_labels_from_file(dataset_dir / "ylabels.txt")
feature_idx = {key: val for val, key in enumerate(ylabels)}
active_features = list(expdata.keys())
active_indices = [feature_idx[f] for f in active_features]
# Train list of univariate emulators (one for each feature to match)
X = np.loadtxt(dataset_dir / "X.txt", dtype=float)
Y = np.loadtxt(dataset_dir / "Y.txt", dtype=float)
emulators = []
for idx, feature in zip(active_indices, active_features):
y = Y[:, idx]
dataset = Dataset(X, y, x_labels=xlabels, y_label=feature)
likelihood = GaussianLikelihood()
mean = LinearMean(degree=1, input_size=dataset.input_size, bias=True)
covar = ScaleKernel(MaternKernel(ard_num_dims=dataset.input_size))
metrics = [MeanSquaredError(), R2Score()]
experiment = GPExperiment(
dataset,
likelihood,
mean,
covar,
metrics=metrics,
seed=seed
)
device = "cpu"
emulator = GPEmulator(experiment, device)
emulator.train_auto()
emulators.append(emulator)
minmax = get_minmax(X)
waveno = 1 # number of iteration we are at (wave id if you want)
cutoff = 3.0 # threshold value for the implausibility criterion
maxno = 1 # explained below
# The univariate GPE of each output feature will give for each point x_i a specific implausibility measure.
# With the current implausibility criterion, for each x_i we take the maximum implausibility across all the output
# features. With maxno=1, the maximum is calculated across all the output features (i.e., till the last worse
# implausibility measure). If maxno=2 --> till the previous-to-last worse implausibility measure and so on.
# With this criterion, the worse-performing emulator (the output feature which is the least well captured) will
# dominate the entire analysis and thus determine if a point is non-implausible or implausible
w = Wave(
emulator=emulators,
Itrain=minmax,
cutoff=cutoff,
maxno=maxno,
mean=exp_mean,
var=exp_var,
) # instantiate the wave object
sampler = Sampler(design="lhs", dim=X.shape[1], seed=seed)
n_samples = 100000
X = sampler.sample(
n_samples,
l_bounds=list(minmax[:, 0]),
u_bounds=list(minmax[:, 1]),
)
# Run one iteration of HM, which is: apply the implausibility criterion to detect regions of non-implausible
# and of implausible points starting from the initial samples in X
w.find_regions(X)
w.print_stats() # show statistics about the two obtained spaces
w.plot_wave(xlabels=xlabels, display="impl") # plot the current wave of history matching (impl. measure plot)
w.plot_wave(xlabels=xlabels, display="var") # we can also check the accuracy of the GPEs for the current wave
# note: if filepath=<path_to_file> flag is provided, the plot will be saved to <path_to_file>
# How to continue on the next wave in 5 steps
#
# (0) Save an exact copy of the wave. We always recommend saving each wave right on completion before manipulating
# its internal structure as you might need it later for other purposes (see Appendix - A2)
outfiles_dir = Path(DEFAULT_TMP_OUTFILE_DIR)
outfiles_dir.mkdir(parents=True, exist_ok=True)
w0 = w.copy()
w0.print_stats()
w0.save(outfiles_dir / f"wave_{waveno}.json")
# (1) From the current non-implausible region, select points to be simulated and points to be used as tests
# for the next wave
n_tests = 10000 # number of TEST points we want for the next wave
n_simuls = 128 # number of current NIMP points we want to simulate to augment training dataset for the next wave
n_avail_nimps = len(w0.nimp_idx) # we currently have available only this number of NIMP points
if n_tests + n_simuls > n_avail_nimps: # if they are not enough
n_total_points = n_tests + n_simuls
w.augment_nimp(n_total_points) # use 'cloud technique' to generate new NIMP points starting from existing ones
# Get the requested datasets
X_simul, X_test = w.get_nimps(n_simuls)
# We now have all the necessary data to run the next wave: a dataset to simulate to augment the training dataset
# and build new emulators, and new TEST points to be evaluated with the new emulators. Saving the data to files.
np.savetxt(outfiles_dir / f"X_simul_{waveno}.txt", X_simul, fmt="%.6f")
np.savetxt(outfiles_dir / f"X_test_{waveno}.txt", X_test, fmt="%.6f")
w.print_stats() # quick check on TESTS, IMP, and NIMP sets' sizes after augmentation
# (2) Simulate the selected points X_simul
# (3) Add the simulated points and respective results to the training dataset used in the previous wave
# (3) Train GPEs on the new, augmented training dataset
# (4) Start a new wave of HM, where the initial parameter space to be split into non-implausible and
# implausible regions is no more a Latin Hypercube design but is now the non-implausible region obtained
# (and augmented) in the previous wave (i.e., X_test)
# Appendix
#
# (A1) Visual check on the datasets generated for the next wave
X_nimp = w.NIMP
X_test = np.loadtxt(outfiles_dir / f"X_test_{waveno}.txt", dtype=float)
X_simul = np.loadtxt(outfiles_dir / f"X_simul_{waveno}.txt", dtype=float)
# We will inspect only 2 dimensions of the full 8D parameter space to keep it simple
param = [4, 5] # select 2 dimensions
subset_idx = list(np.random.randint(0, X_test.shape[0], size=10*X_simul.shape[0])) # select an example portion
colors = interp_col(get_col("blue"), 4) # getting some blue colour variants
# Plotting current wave NIMP + next wave TEST + next wave SIMUL
fig, axis = plt.subplots(1, 1)
axis.scatter(X_nimp[:, param[0]], X_nimp[:, param[1]], fc=colors[1], ec=colors[1], label=f"X_nimp of wave {waveno}")
axis.scatter(X_test[subset_idx, param[0]], X_test[subset_idx, param[1]], fc=colors[-1], ec=colors[-1], label=f"X_test for wave {waveno+1}")
axis.scatter(X_simul[:, param[0]], X_simul[:, param[1]], fc='r', ec='r', label=f"X_simul for wave {waveno+1}")
axis.set_xlabel(xlabels[param[0]], fontsize=12)
axis.set_ylabel(xlabels[param[1]], fontsize=12)
axis.legend()
fig.tight_layout()
plt.show() # TEST + SIMUL points for NEXT wave are all within NIMP space CURRENT wave
# (A2) Loading a wave object
# You can load a wave object by providing the same data used to instantiate the wave (emulator, Itrain, cutoff,
# maxno, mean, var). This is normally done when you need to re-run the wave differently. Alternatively, you can load
# the wave object with no arguments. This is normally done when you need to examine the wave internal structure.
# Let's try loading with no arguments.
w = Wave()
w.load(Path(outfiles_dir) / f"wave_{waveno}.json")
w.print_stats() # notice that TESTS, IMP, and NIMP sets' sizes are the same as pre-augmentation
# You can get a list of all wave object attributes by printing:
# print(w.__dict__.keys())
# Noteworthy attributes are:
# W.I: implausibility measure obtained for each point in the test set
# W.PV: percentage emulator variance over experimental variance at each point (given as a fraction)
# W.NIMP: non-implausible region
# W.nimp_idx: indices of the initial test set which resulted to be non-implausible
# W.IMP: implausible region
# W.imp_idx: indices of the initial test set which resulted to be implausible
# W.simul_idx: indices of W.NIMP that were selected to be simulated for the next wave
# W.nsimul_idx: indices of W.NIMP which were not selected for simulations
# (the respective points will appear in the test set of the next wave instead)
# The original test set is not stored as an attribute to save space. However, this information can still be
# retrieved from the stored attributes as:
X_test = w.reconstruct_tests()
print((np.equal(X_test, X)).all()) # the test set of first wave was the LHD we generated initially in this script
if __name__ == "__main__":
main()
| stelong/GPErks | examples/example_10.py | example_10.py | py | 9,883 | python | en | code | 3 | github-code | 36 |
41191349576 | import sys
sys.path.append('../../preprocess')
from make_pca import load_landmarks
import numpy as np
import tensorflow as tf
from pfld import predict_landmarks as pfld_predict_landmarks
from pfld_custom import predict_landmarks as pfld_custom_predict_landmarks
from skimage.color import rgb2gray
import cv2
import dlib
from skimage.transform import resize
from prepare_data import IMAGE_SIZE, view_img, resize_lmks
from skimage.transform import resize
import matplotlib
from train_pfld import normalize_data
import os
from detector import get_face_detector
matplotlib.use("TkAgg")
# IMAGE_SIZE = 224
class Rect:
def __init__(self, t, b, l, r):
self.t = t
self.b = b
self.l = l
self.r = r
def top(self):
return self.t
def bottom(self):
return self.b
def right(self):
return self.r
def left(self):
return self.l
def predict(data, model_path, predict_fn, image_size=IMAGE_SIZE, depth_multiplier=1.0, **kwargs):
input_shape = [None, image_size, image_size, 3]
inputs = tf.placeholder(tf.float32, shape=input_shape, name='input_images')
preds, _, _ = predict_fn(inputs, image_size, is_training=False, depth_multiplier=depth_multiplier, **kwargs)
print('predict tensor = ', preds)
saver = tf.train.Saver()
# g = tf.get_default_graph()
# tf.contrib.quantize.create_eval_graph(input_graph=g)
with tf.Session() as sess:
saver.restore(sess, model_path)
# sess.run(tf.global_variables_initializer())
results = sess.run(preds, feed_dict={inputs: data})
print('landmarks = ', results)
# print('S1 > ')
return results
def predict_tflite(data, model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print('input_details ', input_details[0], ' data shape ', data.shape)
interpreter.set_tensor(input_details[0]['index'], data)
interpreter.invoke()
landmarks = interpreter.get_tensor(output_details[0]['index'])
print('landmarks = ', landmarks)
return landmarks
def crop(img, box):
return img[box.top(): box.bottom(), box.left(): box.right()]
def crop_landmarks(landmarks, box):
return landmarks - np.array([box.left(), box.top()])
def predict_single(img_path, model_path,
image_size=IMAGE_SIZE,
depth_multiplier=1.0,
predict_fn=pfld_predict_landmarks,
zero_mean=True,
box_detector='dlib',
**kwargs):
img_size = image_size
gt_landmark = None
if box_detector == 'gt':
points, imgs_sizes, imgs = load_landmarks('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml')
fn = os.path.basename(img_path)
gt_landmark = None
for idx, img in enumerate(imgs):
if img.endswith(fn):
gt_landmark = points[idx]
break
if gt_landmark is not None:
min_y, max_y = gt_landmark[:,1].min(), gt_landmark[:,1].max()
min_x, max_x = gt_landmark[:,0].min(), gt_landmark[:,0].max()
box = Rect(min_y, max_y, min_x, max_x)
# _, gt_landmark = crop_and_resize(, gt_landmark, image_size)
elif box_detector == 'tf':
detector = get_face_detector()
l, t, r, b = detector.detect(img_path)
box = Rect(t, b, l, r)
# get face bound
else:
img = dlib.load_rgb_image(img_path)
detector = dlib.get_frontal_face_detector()
box = detector(img, 1)[0]
oridata = cv2.imread(img_path)
# if image_size ==80:
# oridata = cv2.cvtColor(oridata,cv2.COLOR_BGR2RGB)
data = crop(oridata, box)
data = resize(data, (img_size, img_size), anti_aliasing=True, mode='reflect')
# view_img(data, None)
normalized_data = normalize_data(data)
if model_path.endswith('.tflite'):
# print('using tflite model ', model_path)
# is_unint8 = model_path.find('uint8') >= 0
# if is_unint8:
# print('int model')
# lmks = predict_tflite((np.reshape(data, (1, *data.shape)) * 255).astype(np.uint8), model_path)[0]
# else:
print('float model')
lmks = predict_tflite(np.reshape(normalized_data, (1, *normalized_data.shape)).astype(np.float32), model_path)[0]
else:
lmks = predict(np.reshape(normalized_data, (1, *normalized_data.shape)), model_path, predict_fn,
image_size=image_size,
depth_multiplier=depth_multiplier,
**kwargs)[0]
# print('landmark = ', lmks)
if zero_mean:
for i in range(0, 68):
lmks[i*2] = (lmks[i*2]/2+0.5)* image_size# (lmks[i*2]/2+0.5)*image_size
lmks[i*2+1] = (lmks[i*2+1]/2 + 0.5) * image_size# (lmks[i*2+1]/2 + 0.5)*image_size
else:
for i in range(0, 68):
lmks[i*2] = (lmks[i*2])* image_size# (lmks[i*2]/2+0.5)*image_size
lmks[i*2+1] = (lmks[i*2+1]) * image_size# (lmks[i*2+1]/2 + 0.5)*image_size
# print('landmarks after denorm', lmks)
lmks = lmks.reshape((68, 2))
view_img(data, lmks)
if __name__ == '__main__':
# 2960256451_1.jpg
# '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/30427236_1.jpg'
use_tflite = False
model = 'pfld-custom-80-025m-saux7-x3'
# model = 'ailab'
if model == 'pfld-64':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-64-05m/pfld-311400' if not use_tflite else '../../data/pfld-64-quant.tflite',
depth_multiplier=0.5,
image_size=64)
elif model == 'pfld-112':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-112/pfld-1983600' if not use_tflite else '../../data/pfld-112-quant.tflite',
# '../../data/pfld-64.tflite',
image_size=112)
elif model == 'pfld-80':
predict_single('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-80-025m/pfld-449100',
# '../../data/pfld-64.tflite',
zero_mean=False,
depth_multiplier=0.25,
image_size=80)
elif model == 'pfld-custom-80':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom/pfld-183000',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=1,
zero_mean=True,
image_size=80)
elif model == 'pfld-custom-80-025m':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m/pfld-314100',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80)
elif model == 'pfld-custom-80-025m-aux7':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7/pfld-376500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux7-x3':
predict_single( '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7-x3/pfld-220000',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
fc_x_n=3,
box_detector='tf',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-saux7-x3':
predict_single( '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-saux7-x3/pfld-310500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
simple_aux=True,
zero_mean=True,
image_size=80,
fc_x_n=3,
box_detector='dlib',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux7-x4-m3':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg',# '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7-x4-m3/pfld-131500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
fc_x_n=4,
mid_conv_n=3,
box_detector='tf',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux8':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux8/pfld-445500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
aux_start_layer='layer_8')
else:
use_tflite = True
predict_single('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/landmark_80pose.tflite',
normalize_lmks=True,
# '../../data/pfld-64.tflite',
image_size=80)
| vuamitom/shapenet-tensorflow | model/pfld/eval_pfld.py | eval_pfld.py | py | 11,142 | python | en | code | 1 | github-code | 36 |
30876684301 | # This is necessary to find the main code
import operator
import sys
from Bomberman.bomberman.entity import MonsterEntity
from Bomberman.bomberman.sensed_world import SensedWorld
sys.path.insert(0, '../bomberman')
# Import necessary stuff
from entity import CharacterEntity
from colorama import Fore, Back
from queue import PriorityQueue
import math
from enum import Enum
class TestCharacter(CharacterEntity):
destination = (0, 0)
expectiDepth = 3
minimaxDepth = 4
bound = 4
def do(self, wrld):
# Your code here
loc = (self.x, self.y)
wrldState = self.evaluateState(wrld)
characterState = wrldState[0]
# exit is first destination
self.destination = wrld.exitcell
# If the exit is right next to us, just pick it
if wrld.exitcell in self.getNeighbors(loc, wrld, [obstacles.EXIT, obstacles.PLAYER]):
move = self.calculateD(loc, wrld.exitcell)
self.move(move[0], move[1])
return
# There is a monster close to us
if characterState == state.UNSAFE:
self.place_bomb()
# running away from stupid
if wrldState[1][0] == 'stupid':
v, action = self.maxvalue(wrld, loc, 0, 'stupid')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# Running away from aggressive
if wrldState[1][0] == 'aggressive':
v, action = self.miniMaxvalue(wrld, -math.inf, math.inf, loc, 0, 'aggressive')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# Running away from selfpreserving
if wrldState[1][0] == 'selfpreserving':
v, action = self.miniMaxvalue(wrld, -math.inf, math.inf, loc, 0, 'selfpreserving')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# What to do when there is a bomb near us
if characterState == state.NEAR_BOMB:
next_move = (0, 0)
max = 0
name = ''
flag = True
if wrldState[1]:
name = wrldState[1][0]
flag = False
if self.bomb_check(loc, wrld):
for cell in self.getNeighbors(loc, wrld, [obstacles.EXIT]):
if not self.bomb_check(cell, wrld):
# predict one step ahead
next_move = self.calculateD(loc, cell)
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (cell[0], cell[1]))
character.move(new_move[0], new_move[1])
if name != '':
monster = self.getMonster(newWrld, name)
monster.move(0, 0)
newerWrld = newWrld.next()[0]
if flag:
test = self.exit_utility(newerWrld)
else:
test = self.utility(newerWrld, name)
if test > max:
max = test
next_move = new_move
self.move(next_move[0], next_move[1])
else:
self.move(0, 0)
# What to do if we cannot currently reach the exit
if characterState == state.BLOCKED:
walls = []
route = []
reachable = False
# Map a direct course to the exit, ignoring walls
came_from, cost_so_far = self.AStar(wrld, loc, wrld.exitcell, [obstacles.EXIT, obstacles.WALL])
path = wrld.exitcell
while path != loc:
path = came_from[path]
route.append(path)
# Find all the walls you have to go through
for stepnum, step in enumerate(route):
self.set_cell_color(step[0], step[1], Fore.RED + Back.GREEN)
if wrld.wall_at(step[0], step[1]):
walls.append(route[stepnum+1])
# Choose the closest reachable wall to the exit
closest_wall = (0,0)
for wall in (walls):
new_goal = wall
came_from, cost_so_far = self.AStar(wrld, loc, new_goal, [obstacles.EXIT])
for path in came_from:
if path == new_goal:
closest_wall = new_goal
reachable = True
break
if reachable: break
self.destination = closest_wall
# Navigate to that location
came_from, cost_so_far = self.AStar(wrld, loc, closest_wall, [obstacles.EXIT])
path = closest_wall
next_m = (0, 0)
while path != loc:
temp = path
path = came_from[path]
if path == loc:
next_m = temp
break
next_move = self.calculateD(loc, next_m)
# Place bomb at wall -- deal with diagonal!?!
if loc == closest_wall:
self.place_bomb()
else:
self.move(next_move[0], next_move[1])
# What to do if there are no monsters near us and we can reach the exit
if characterState == state.SAFE:
# Just do A star
came_from, cost_so_far = self.AStar(wrld, loc, self.destination, [obstacles.EXIT])
path = self.destination
next_m = (0, 0)
while path != loc:
temp = path
path = came_from[path]
if path == loc:
next_m = temp
break
next_move = self.calculateD(loc, next_m)
self.move(next_move[0], next_move[1])
# Max Value function of expecitmax
def maxvalue(self, wrld, curr, d, name):
# Terminal state
if self.evaluateState(wrld)[0] == state.SAFE or d == self.expectiDepth:
return self.utility(wrld, name), curr
if self.evaluateState(wrld)[0] == state.DEAD:
return -10000, curr
v = -math.inf
action = (0, 0)
for a in self.getNeighbors(curr, wrld, [obstacles.EXIT]):
# simulate a new world where we make the move
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (a[0], a[1]))
character.move(new_move[0], new_move[1])
monster = self.getMonster(newWrld, name)
monster.move(0, 0)
newerWrld = newWrld.next()[0]
val = self.expvalue(newerWrld, a, d + 1, name)
if val > v:
v = val
action = a
return v, action
# Expected Value part of expectimax
def expvalue(self, wrld, act, d, name):
if self.evaluateState(wrld)[0] == state.SAFE or d == self.expectiDepth:
return self.utility(wrld, name)
v = 0
mcurr = self.getMonster(wrld, name)
possible_moves = self.getNeighbors((mcurr.x, mcurr.y), wrld, [obstacles.PLAYER])
for a in possible_moves:
p = 1.0/len(possible_moves)
# Predict a step ahead using simulated world
newWrld = SensedWorld.from_world(wrld)
monster = self.getMonster(newWrld, name)
new_move = self.calculateD((monster.x, monster.y), (a[0], a[1]))
monster.move(new_move[0], new_move[1])
try:
character = next(iter(newWrld.characters.values()))[0]
except(IndexError, StopIteration):
return -10000
character.move(0, 0)
newerWrld = newWrld.next()[0]
value = self.maxvalue(newerWrld, act, d+1, name)[0]
v = v + p*value
return v
# Alpha Beta Minimax
# Max value for Alpha-Beta Pruning
def miniMaxvalue(self, wrld, alpha, beta, curr, d, name):
# Terminal State is we are safe or depth reached
if self.evaluateState(wrld)[0] == state.SAFE or d == self.minimaxDepth:
return self.utility(wrld, name), curr
if self.evaluateState(wrld)[0] == state.DEAD:
return -10000, curr
v = -math.inf
action = (0, 0)
for a in self.getNeighbors(curr, wrld, [obstacles.EXIT, obstacles.PLAYER]):
# Simulate a new world where we made that action
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (a[0], a[1]))
monster = self.getMonster(newWrld, name)
character.move(new_move[0], new_move[1])
monster.move(0, 0)
newerWrld = newWrld.next()[0]
val = self.minvalue(newerWrld, alpha, beta, a, d+1, name)
if val > v:
v = val
action = a
if v >= beta:
return v, a
alpha = max(alpha, v)
return v, action
# Min value for Minimax Alpha-Beta Pruning
def minvalue(self, wrld, alpha, beta, act, d, name):
# Terminal State is we are safe or depth reached
if self.evaluateState(wrld)[0] == state.SAFE or d == self.minimaxDepth:
return self.utility(wrld, name)
v = math.inf
mcurr = self.getMonster(wrld, name)
possible_moves = self.getNeighbors((mcurr.x, mcurr.y), wrld, [obstacles.PLAYER, obstacles.EXIT, obstacles.MONSTER])
for a in possible_moves:
# Simulate a new world where we made that action
newWrld = SensedWorld.from_world(wrld)
monster = self.getMonster(newWrld, name)
new_move = self.calculateD((monster.x, monster.y), (a[0], a[1]))
monster.move(new_move[0], new_move[1])
try:
character = next(iter(newWrld.characters.values()))[0]
except(IndexError, StopIteration):
return -10000
character.move(0, 0)
newerWrld = newWrld.next()[0]
val, act = self.miniMaxvalue(newerWrld, alpha, beta, act, d + 1, name)
v = min(v, val)
if v <= alpha:
return v
beta = min(beta, v)
return v
# Main utility function for terminal states
def utility(self, wrld, name):
# Utility for stupid monster
if name == 'stupid':
return 6*(1/(1 + self.exit_utility(wrld))) - 1*(1/((1 + self.monster_utility(wrld, name))**2))
# Utility for non-stupid monster
else:
return 20 * (1 / (1 + self.exit_utility(wrld))) - 50 * (1 / ((1 + self.monster_utility(wrld, name)) ** 2)) + self.dpangle(wrld, name)
# Calculate Vector between us, the monster, and the exit
def dpangle(self, wrld, name):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return -10
# Vector for character to exit
e = self.destination
loc = (character.x, character.y)
ce = tuple(map(operator.sub, e, loc))
eu = self.calculateH(e, loc)
if ce == (0, 0) or eu == 0:
return 10000
# Vector for character to monster
monster = self.getMonster(wrld, name)
mu = self.calculateH((monster.x, monster.y), loc)
cm = tuple(map(operator.sub, (monster.x, monster.y), loc))
if cm == (0, 0) or mu == 0:
return -10000
# Dot product
dp = (ce[0] * cm[0]) + (ce[1] * cm[1])
cosangle = dp / (eu * mu)
try:
angle = math.degrees(math.acos(cosangle))
except(ValueError):
return -10
if self.exit_utility(wrld) <= 4:
return 10
# Return values based on if it is higher or lower than 90 degrees
if angle >= 90:
return eu
else:
return -mu
# Gets the monster in the current world with a name
def getMonster(self, wrld, name):
for monster in list(wrld.monsters.values()):
if monster[0].name == name:
return monster[0]
return MonsterEntity('dead', [0], 0, 0)
# Utility function for the distance to the exit
def exit_utility(self, wrld):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return 10
loc = (character.x, character.y)
e = self.destination
exit_came_from, exit_cost_so_far = self.AStar(wrld, loc, (e[0], e[1]), [obstacles.EXIT])
counter = 0
path = (e[0], e[1])
while path != loc:
try:
path = exit_came_from[path]
except (KeyError):
return self.calculateH(loc, e)
counter += 1
if counter == -1:
return counter
return counter
# Utility function for the distance to the monster
def monster_utility(self, wrld, name):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return -10
m = self.getMonster(wrld, name)
if m.name == 'dead':
return 100
loc = (character.x, character.y)
mloc = (m.x, m.y)
monster_came_from, monster_cost_so_far = self.AStar(wrld, loc, mloc, [obstacles.MONSTER, obstacles.PLAYER, obstacles.EXIT])
counter = 0
path = mloc
while path != loc:
try:
path = monster_came_from[path]
except (KeyError):
return 100
counter += 1
return counter
# A Star algorithm
def AStar(self, wrld, start, goal, list_of_e):
frontier = PriorityQueue()
frontier.put((0, start))
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()[1]
if current == goal:
break
for next in self.getNeighbors(current, wrld, list_of_e):
new_cost = cost_so_far[current] + self.calculateH(next, current)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + self.calculateH(goal, next)
frontier.put((priority, next))
came_from[next] = current
return came_from, cost_so_far
# Heuristic calculation - returns euclidean distance
def calculateH(self, loc1, loc2):
(x1, y1) = loc1
(x2, y2) = loc2
return math.sqrt(((loc1[0] - loc2[0]) ** 2) + ((loc1[1] - loc2[1]) ** 2))
# Calculates the dx and dy between two locations
def calculateD(self, loc1, loc2):
(x1, y1) = loc1
(x2, y2) = loc2
return ((x2 - x1), (y2 - y1))
# Returns the neighbors of a particular location according to the obstacles passed in - obstacles passed in ARE AVAILABLE to be considered neighbors
def getNeighbors(self, loc, wrld, list_of_e):
list_of_N = []
for dx in [-1, 0, 1]:
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
# Loop through delta y
for dy in [-1, 0, 1]:
# Make sure the monster is moving
if (dx != 0) or (dy != 0):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
# No need to check impossible moves
if obstacles.EXIT in list_of_e:
if wrld.exit_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.MONSTER in list_of_e:
if wrld.monsters_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.PLAYER in list_of_e:
if wrld.characters_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.WALL in list_of_e:
if wrld.wall_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if wrld.empty_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
return list_of_N
# Checks if location is in range of a bomb
def bomb_check(self, loc, wrld):
bomb_range = wrld.expl_range
for dx in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
if wrld.bomb_at((loc[0] + dx), loc[1]):
return True
for dy in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
if wrld.bomb_at(loc[0], (loc[1] + dy)):
return True
return False
# Checks if location is in range of an explosion
def expl_check(self, loc, wrld):
bomb_range = wrld.expl_range
for dx in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
if wrld.explosion_at((loc[0] + dx), loc[1]):
return True
for dy in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
if wrld.explosion_at(loc[0], (loc[1] + dy)):
return True
return False
#Returns states and potentially a list of threats
def evaluateState(self, wrld):
monsters = []
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return state.DEAD, []
try:
monsters = list(wrld.monsters.values())
except (StopIteration):
pass
loc = (character.x, character.y)
counters = {}
#Calculate each distance to the monster
for monster in monsters:
m = monster[0]
monsterType = m.name
mloc = (m.x, m.y)
monster_came_from, monster_cost_so_far = self.AStar(wrld, loc, mloc, [obstacles.MONSTER, obstacles.PLAYER, obstacles.EXIT])
counter = 0
path = mloc
while path != loc:
try:
path = monster_came_from[path]
except (KeyError):
counter = 100
break
counter += 1
counters[monsterType] = counter
counts = [(k, v) for k, v in counters.items() if v <= 4]
flag = False
monsterTypes = []
for count in counts:
if count[1] <= self.bound:
flag = True
monsterTypes.append((count[0], count[1]))
threats = []
# Sort the monster list in order of closest
monsterTypes.sort(key=lambda x: x[1])
for monster in monsterTypes:
threats.append(monster[0])
if flag:
return state.UNSAFE, threats
if (wrld.bombs or wrld.explosions):
return state.NEAR_BOMB, []
# Does safe path exist?
came_from, cost_so_far = self.AStar(wrld, loc, wrld.exitcell, [obstacles.EXIT])
for path in came_from:
if (path == wrld.exitcell):
return state.SAFE, []
return state.BLOCKED, []
class state(Enum):
SAFE = 1
UNSAFE = 2
DEAD = 3
NEAR_BOMB = 4
BLOCKED = 5
class obstacles(Enum):
EXIT = 1
MONSTER = 2
WALL = 3
BOMB = 4
EXPLOSION = 5
PLAYER = 6 | ifeeney/CS4341-projects | Bomberman/group10/testcharacter.py | testcharacter.py | py | 20,664 | python | en | code | 0 | github-code | 36 |
41700781621 | def factorial(n):
"""Return th factorial of N, a positive integer."""
if n == 1:
return 1
return n * factorial(n-1)
def recursive_multiplication(m, n):
if n == 1:
return m
return m + recursive_multiplication(m, n-1)
def is_prime(n):
def helper(n, m):
if m == 1:
return True
return n % m != 0 and helper(n, m-1)
if n == 1:
return False
else:
return helper(n, n - 1)
def hailstone(n):
def helper(n, count):
print(n)
count += 1
if n == 1:
return count
if n % 2 == 0:
return helper(n//2, count)
else:
return helper(n*3+1, count)
return helper(n, 0)
def merge(n1, n2):
if n1 == 0:
return n2
if n2 == 0:
return n1
if n1 % 10 < n2 % 10:
return n1 % 10 + 10 * merge(n1//10, n2)
else:
return n2 % 10 + 10 * merge(n1, n2//10) | yangzilongdmgy/cs61a | discussion/recursion.py | recursion.py | py | 949 | python | en | code | 1 | github-code | 36 |
41245539467 | from pynvml import *
import logging
from datasets import load_dataset
from datasets import ClassLabel
from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification, TrainingArguments, Trainer
import torch
from tqdm import trange
# construir função que converta spans de relativos a frase para globais
import random
import os
import json
def print_gpu_utilization():
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(handle)
print(f"GPU memory occupied: {info.used//1024**2} MB.")
class MyDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx].clone().detach() for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
def convert_spans(item):
sents = []
sent_map = []
entities = item["vertexSet"]
entity_start, entity_end = [], []
mention_types = []
entity_spans = []
for entity in entities:
for mention in entity:
if mention["sent_id"] != 0:
current_id = mention["sent_id"]
mention["pos"] = [sum(len(s) for s in item["sents"][:current_id])+mention["pos"][0],
sum(len(s) for s in item["sents"][:current_id])+mention["pos"][1]]
mention["sent_id"] = 0
pos = mention["pos"]
mention_types.append(mention['type'])
entity_spans.append(pos)
item["vertexSet"] = entities
return item, entity_spans
def load_examples_test(dataset):
examples = []
for i, item in enumerate(dataset["validation"]):
concat_tokens = []
counter = 0
converted_item, entity_spans = convert_spans(item)
tokens = item["sents"]
for j in range(len(tokens)):
concat_tokens += tokens[j]
del j
tokens = concat_tokens
del concat_tokens
# new
text = ""
cur = 0
new_char_spans = [0]*len(entity_spans)
entity_spans.sort(key=lambda y:y[0])
for target_entity in entity_spans:
tamanho_texto = len(text)
text += " ".join(tokens[cur: target_entity[0]])
if text:
text += " "
char_start = len(text)
text += " ".join(tokens[target_entity[0]: target_entity[1]])
char_end = len(text)
new_char_spans[counter] = (char_start, char_end)
text += " "
cur = target_entity[1]
counter+=1
text += " ".join(tokens[cur:])
text = text.rstrip()
# get true labels
labels_pairs = tuple(zip(item["labels"]["head"], item["labels"]["tail"], item["labels"]["relation_id"]))
entity_spans = [tuple(l) for l in entity_spans]
oldToNewPos = dict(zip(entity_spans, new_char_spans))
entities = item["vertexSet"]
correlations = []
for pair in labels_pairs:
for head in entities[pair[0]]:
if tuple(head["pos"]) in oldToNewPos:
head["pos"]=oldToNewPos[tuple(head["pos"])]
for tail in entities[pair[1]]:
if tuple(tail["pos"]) in oldToNewPos:
tail["pos"] = oldToNewPos[tuple(tail["pos"])]
pack = tuple((head["pos"], tail["pos"], pair[2]))
correlations += (pack),
item["vertexSet"] = entities
examples.append(dict(
text=text,
entity_spans= [d[:][:-1] for d in correlations],
labels = [d[:][-1] for d in correlations]
))
return examples
def load_examples_competition(dataset):
examples = []
for i, item in enumerate(dataset["test"]):
concat_tokens = []
counter = 0
converted_item, entity_spans = convert_spans(item)
tokens = item["sents"]
for j in range(len(tokens)):
concat_tokens += tokens[j]
del j
tokens = concat_tokens
del concat_tokens
# new
text = ""
cur = 0
new_char_spans = [0]*len(entity_spans)
entity_spans.sort(key=lambda y:y[0])
for target_entity in entity_spans:
tamanho_texto = len(text)
text += " ".join(tokens[cur: target_entity[0]])
if text:
text += " "
char_start = len(text)
text += " ".join(tokens[target_entity[0]: target_entity[1]])
char_end = len(text)
new_char_spans[counter] = (char_start, char_end)
text += " "
cur = target_entity[1]
counter+=1
text += " ".join(tokens[cur:])
text = text.rstrip()
aux_head = 0
aux_tail = 0
labels_pairs = []
# get true labels
for head_id in range(len(item["vertexSet"])):
for tail_id in range(len(item["vertexSet"])):
if (head_id!=tail_id):
labels_pair = tuple([head_id, tail_id , "Na"])
labels_pairs.append(labels_pair)
entity_spans = [tuple(l) for l in entity_spans]
oldToNewPos = dict(zip(entity_spans, new_char_spans))
entities = item["vertexSet"]
correlations = []
for pair in labels_pairs:
head = random.choice(entities[pair[0]])
tail = random.choice(entities[pair[1]])
entity_head_id = pair[0]
entity_tail_id = pair[1]
rel = pair[2]
if tuple(head["pos"]) in oldToNewPos:
head["pos"]=oldToNewPos[tuple(head["pos"])]
if tuple(tail["pos"]) in oldToNewPos:
tail["pos"] = oldToNewPos[tuple(tail["pos"])]
pack = tuple((head["pos"], tail["pos"], pair[2], tuple([entity_head_id, entity_tail_id]), item["title"]))
item["vertexSet"] = entities
examples.append(dict(
text=text,
entity_spans= pack[:2],
labels = pack[2],
idxs_entity_pair = pack[3],
title = pack[4]
))
return examples
torch.cuda.empty_cache()
dataset = load_dataset("docred")
max_value = 0
#for i, item in enumerate(dataset["train_annotated"]):
# total_text_len = 0
# tokens = item["sents"]
# num_relations = len(item["labels"]["head"])
class ModifiedClassicLuke(LukeForEntityPairClassification):
def __init__(self, config):
super().__init__(config)
self.classifier = torch.nn.Linear(in_features = 2048, out_features = 97, bias = True)
logging.info("Loading data and finetuned dataset for CLASSIC LUKE")
# FAZER LOAD DO MODEL FINETUNED DE 3 EPOCHS
model = ModifiedClassicLuke.from_pretrained("model_finetuned_classic")
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
test_examples = load_examples_competition(dataset)
maximum = 0
max_seq = 0
logging.info("Memory before choosing GPU")
#torch.cuda.empty_cache()
########################## Choose GPU ########################
# set the GPU device to use
cuda_device= 0 # mudar para 0 para dar o cuda
if cuda_device < 0:
device = torch.device("cpu")
else:
device = torch.device(f"cuda:{cuda_device}")
#model = model.to(device)
#model.eval()
# Convert to inputs
for batch_start_idx in trange(0, len(test_examples), len(test_examples)):
batch_examples = test_examples[batch_start_idx:batch_start_idx+len(test_examples)]
texts = [example["text"] for example in batch_examples]
entity_spans = [example["entity_spans"] for example in batch_examples]
#gold_labels = [example["labels"] for example in batch_examples]
idxs_entity_pair = [example["idxs_entity_pair"] for example in batch_examples]
titles = [example["title"] for example in batch_examples]
for i in range(len(entity_spans)):
entity_spans[i] = list(entity_spans[i])
del batch_examples
logging.info("Removing too big examples!!")
num_rejected = 0
clean_texts = []
clean_ents = []
clean_idxs_entity_pairs = []
clean_titles = []
tokenizer2 = LukeTokenizer.from_pretrained("studio-ousia/luke-large")
for ix in range(len(texts)):
input = tokenizer2(texts[ix])
if len(input.data["input_ids"]) > 500:
num_rejected+=1
continue
clean_texts.append(texts[i])
clean_ents.append(entity_spans[ix])
clean_idxs_entity_pairs.append(idxs_entity_pair)
clean_titles.append(titles)
texts = clean_texts
entity_spans = clean_ents
idxs_entity_pair = clean_idxs_entity_pairs
titles = clean_titles
torch.cuda.empty_cache()
relations_code_list = ["P1376",
"P607",
"P136",
"P137",
"P131",
"P527",
"P1412",
"P206",
"P205",
"P449",
"P127",
"P123",
"P86",
"P840",
"P355",
"P737",
"P740",
"P190",
"P576",
"P749",
"P112",
"P118",
"P17",
"P19",
"P3373",
"P6",
"P276",
"P1001",
"P580",
"P582",
"P585",
"P463",
"P676",
"P674",
"P264",
"P108",
"P102",
"P25",
"P27",
"P26",
"P20",
"P22",
"Na",
"P807",
"P800",
"P279",
"P1336",
"P577",
"P570",
"P571",
"P178",
"P179",
"P272",
"P170",
"P171",
"P172",
"P175",
"P176",
"P39",
"P30",
"P31",
"P36",
"P37",
"P35",
"P400",
"P403",
"P361",
"P364",
"P569",
"P710",
"P1344",
"P488",
"P241",
"P162",
"P161",
"P166",
"P40",
"P1441",
"P156",
"P155",
"P150",
"P551",
"P706",
"P159",
"P495",
"P58",
"P194",
"P54",
"P57",
"P50",
"P1366",
"P1365",
"P937",
"P140",
"P69",
"P1198",
"P1056"]
c2l = ClassLabel(num_classes = 97, names = relations_code_list)
label_list_ids = [c2l.str2int(label) for label in relations_code_list]
#gold_labels_ids = [c2l.str2int(label) for label in gold_labels]
#aa = [c2l.int2str(label) for label in gold_labels_ids] # convert ints to CODE of label!! USE IN EVAL
#inputs = tokenizer(text=texts[0], entity_spans = entity_spans[0], padding = "max_length", max_length = 1024, task = "entity_pair_classification", return_tensors = "pt")
#torch.save(inputs, 'inputs_eval.pt')
#test_dataset = MyDataset(inputs, gold_labels_ids)
logging.info("Beginning of evaluation batching")
output_dir = "evalClassic_17Out"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_filename = os.path.join(output_dir, 'results.json')
output_file = open(output_filename, 'w')
batch_size = 10
rel2word = {
"Na": "Na",
"P6": "head of government",
"P17": "country",
"P19": "place of birth",
"P20": "place of death",
"P22": "father",
"P25": "mother",
"P26": "spouse",
"P27": "country of citizenship",
"P30": "continent",
"P31": "instance of",
"P35": "head of state",
"P36": "capital",
"P37": "official language",
"P39": "position held",
"P40": "child",
"P50": "author",
"P54": "member of sports team",
"P57": "director",
"P58": "screenwriter",
"P69": "educated at",
"P86": "composer",
"P102": "member of political party",
"P108": "employer",
"P112": "founded by",
"P118": "league",
"P123": "publisher",
"P127": "owned by",
"P131": "located in the administrative territorial entity",
"P136": "genre",
"P137": "operator",
"P140": "religion",
"P150": "contains administrative territorial entity",
"P155": "follows",
"P156": "followed by",
"P159": "headquarters location",
"P161": "cast member",
"P162": "producer",
"P166": "award received",
"P170": "creator",
"P171": "parent taxon",
"P172": "ethnic group",
"P175": "performer",
"P176": "manufacturer",
"P178": "developer",
"P179": "series",
"P190": "sister city",
"P194": "legislative body",
"P205": "basin country",
"P206": "located in or next to body of water",
"P241": "military branch",
"P264": "record label",
"P272": "production company",
"P276": "location",
"P279": "subclass of",
"P355": "subsidiary",
"P361": "part of",
"P364": "original language of work",
"P400": "platform",
"P403": "mouth of the watercourse",
"P449": "original network",
"P463": "member of",
"P488": "chairperson",
"P495": "country of origin",
"P527": "has part",
"P551": "residence",
"P569": "date of birth",
"P570": "date of death",
"P571": "inception",
"P576": "dissolved, abolished or demolished",
"P577": "publication date",
"P580": "start time",
"P582": "end time",
"P585": "point in time",
"P607": "conflict",
"P674": "characters",
"P676": "lyrics by",
"P706": "located on terrain feature",
"P710": "participant",
"P737": "influenced by",
"P740": "location of formation",
"P749": "parent organization",
"P800": "notable work",
"P807": "separated from",
"P840": "narrative location",
"P937": "work location",
"P1001": "applies to jurisdiction",
"P1056": "product or material produced",
"P1198": "unemployment rate",
"P1336": "territory claimed by",
"P1344": "participant of",
"P1365": "replaces",
"P1366": "replaced by",
"P1376": "capital of",
"P1412": "languages spoken, written or signed",
"P1441": "present in work",
"P3373": "sibling"}
num_predicted = 0
num_gold = 0
num_correct = 0
this_pair = []
all_pairs = []
list_of_dicts = []
torch.cuda.empty_cache()
logging.info("Evaluation will start now!:")
model.eval()
model.to(device)
for batch_start_idx in trange(0, len(test_examples), batch_size):# len(test_examples) 100
batch_examples = test_examples[batch_start_idx:batch_start_idx + batch_size]
texts = [example["text"] for example in batch_examples]
entity_spans = [example["entity_spans"] for example in batch_examples]
idxs_entity_pair = [example["idxs_entity_pair"] for example in batch_examples]
titles = [example["title"] for example in batch_examples]
#gold_labels = [example["labels"] for example in batch_examples]
#gold_labels_ids = [c2l.str2int(label) for label in gold_labels]
for i in range(len(entity_spans)):
entity_spans[i] = list(entity_spans[i])
inputs = tokenizer(text=texts, entity_spans=entity_spans, truncation=True, padding = "max_length", max_length = 512, task = "entity_pair_classification", return_tensors = "pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
predicted_indices = outputs.logits.argmax(-1)
predicted_labels = [c2l.int2str(pred) for pred in predicted_indices.tolist()]
predicted_relation = [rel2word.get(rel) for rel in predicted_labels]
for i in range(len(predicted_relation)):
list_of_dicts.append(dict(
title=titles[i],
h_idx=idxs_entity_pair[i][0],
t_idx = idxs_entity_pair[i][1],
r = predicted_relation[i]
))
torch.cuda.empty_cache()
json_object = json.dumps(list_of_dicts, indent = 4)
with open("results_classic.json", "w") as outfile:
outfile.write(json_object)
| joseMalaquias/tese | DOCRED/classic_obtainJSON.py | classic_obtainJSON.py | py | 17,283 | python | en | code | 0 | github-code | 36 |
9910221737 | import subprocess
import threading
import io
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
import sys
#from flask_socketio import SocketIO
command = "pintos -v -k --qemu --disk cs162proj.dsk -- -q run shell"
class Shell():
def set_flags(self, pipe):
flags = fcntl(pipe, F_GETFL)
fcntl(pipe, F_SETFL, flags | O_NONBLOCK)
def __init__(self, app, command):
self.app = app
self.cmd = command
self.p = None
self.output_thread = threading.Thread()
def run(self):
if self.p == None:
self.p = subprocess.Popen(self.cmd.split(' '),
cwd="./os_build",
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
self.set_flags(self.p.stdout)
self.set_flags(self.p.stdin)
def output(self):
buf = io.StringIO()
while True:
data = self.p.stdout.read(1)
if data:
buf.write(data.decode('utf-8'))
else:
if (buf.getvalue() != ""):
self.app.emit('send_output', buf.getvalue())
buf = io.StringIO()
def input(self, command):
self.p.stdin.write(command.encode())
self.p.stdin.flush()
shell = Shell(None, command)
| dietd/webpintos | shell.py | shell.py | py | 1,345 | python | en | code | 0 | github-code | 36 |
38031979552 | import click
from .core import NmapReportParser, NmapReport, CSVFileParser, JsonOutput, BateaModel, MatrixOutput
from defusedxml import ElementTree
from xml.etree.ElementTree import ParseError
from batea import build_report
import warnings
warnings.filterwarnings('ignore')
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option("-c", "--read-csv", type=click.File('r'), multiple=True)
@click.option("-x", "--read-xml", type=click.File('r'), multiple=True)
@click.option("-n", "--n-output", type=int, default=5)
@click.option("-A", "--output-all", is_flag=True)
@click.option("-L", "--load-model", type=click.File('rb'), default=None)
@click.option("-D", "--dump-model", type=click.File('wb'), default=None)
@click.option("-f", "--input-format", type=str, default='xml')
@click.option('-v', '--verbose', count=True)
@click.option('-oM', "--output-matrix", type=click.File('w'), default=None)
@click.argument("nmap_reports", type=click.File('r'), nargs=-1)
def main(*, nmap_reports, input_format, dump_model, load_model,
output_all, read_csv, read_xml, n_output, verbose, output_matrix):
"""Context-driven asset ranking based using anomaly detection"""
report = build_report()
csv_parser = CSVFileParser()
xml_parser = NmapReportParser()
if output_matrix:
output_manager = MatrixOutput(output_matrix)
else:
output_manager = JsonOutput(verbose)
try:
if input_format == 'xml':
for file in nmap_reports:
report.hosts.extend([host for host in xml_parser.load_hosts(file)])
if input_format == 'csv':
for file in nmap_reports:
report.hosts.extend([host for host in csv_parser.load_hosts(file)])
if read_csv:
for file in read_csv:
report.hosts.extend([host for host in csv_parser.load_hosts(file)])
if read_xml:
for file in read_xml:
report.hosts.extend([host for host in xml_parser.load_hosts(file)])
except (ParseError, UnicodeDecodeError, ElementTree.ParseError, ValueError) as e:
output_manager.log_parse_error(e)
raise SystemExit
if len(report.hosts) == 0:
output_manager.log_empty_report()
raise SystemExit
report_features = report.get_feature_names()
output_manager.add_report_info(report)
matrix_rep = report.generate_matrix_representation()
batea = BateaModel(report_features=report_features)
if load_model is not None:
batea.load_model(load_model)
else:
batea.build_model()
batea.model.fit(matrix_rep)
scores = -batea.model.score_samples(matrix_rep)
output_manager.add_scores(scores)
if output_all:
n_output = len(scores)
n_output = min(n_output, len(scores))
top_n = scores.argsort()[-n_output:][::-1]
for i, j in enumerate(top_n):
output_manager.add_host_info(
rank=str(i+1),
score=scores[j],
host=report.hosts[j],
features={name: value for name, value in zip(report_features, matrix_rep[j, :])}
)
output_manager.flush()
if dump_model:
batea.dump_model(dump_model)
if __name__ == "__main__":
main()
| delvelabs/batea | batea/__main__.py | __main__.py | py | 3,254 | python | en | code | 287 | github-code | 36 |
4517993066 | import os
from django.contrib.auth.views import redirect_to_login
from chat.models import *
from django.db.models.query_utils import Q
from notification.models import *
from user.models import *
from post.models import *
from post.forms import *
from group.models import *
from django.shortcuts import redirect, render
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from pydub.silence import split_on_silence
from pydub import AudioSegment
import numpy as np
import librosa
import math
import pickle
def home(request):
me = None if request.user.id is None else User.objects.get(id=request.user.id)
if me is None:
return redirect(reverse('user:login'))
personnal_chats = ChatBox.objects.filter(Q(user1=me)|Q(user2=me))
group_chats = list(GroupChatBox.objects.filter(creator=me)) + [join.groupchatbox for join in JoinGroupChat.objects.filter(invitee=me)]
my_groups = set(list(Group.objects.filter(admins__in=[me])) + list(Group.objects.filter(members__in=[me])))
#online_users = User.objects.filter(is_online=True)
online_users = User.objects.filter(Q(is_online=True)&~Q(id=me.id))
posts = Post.objects.all()
context = {
'posts': [{
'view': 'list',
'post': post,
'reactions': Reaction.objects.filter(post=post),
'comments': Comment.objects.filter(post=post),
} for post in reversed(posts)],
'me': me,
'personnal_chats': [{
'chat': chat,
'receiver_id': chat.user2.id if chat.user1 == me else chat.user1.id,
} for chat in personnal_chats],
'group_chats': [{
'chat': chat,
'latest_msg': GroupMessage.objects.filter(chatbox=chat).order_by('-sent')[0]
} for chat in group_chats],
'my_groups': my_groups,
'online_users': online_users,
'my_notifications': list(reversed(PostNotification.objects.filter(recipient=me).exclude(actor=me))),
}
return render(request, 'home.html', context)
def get_mfcc(file_path):
y, sr = librosa.load(file_path) # read .wav file
hop_length = math.floor(sr*0.010) # 10ms hop
win_length = math.floor(sr*0.025) # 25ms frame
# mfcc is 12 x T matrix
mfcc = librosa.feature.mfcc(
y, sr, n_mfcc=12, n_fft=1024,
hop_length=hop_length, win_length=win_length)
# substract mean from mfcc --> normalize mfcc
mfcc = mfcc - np.mean(mfcc, axis=1).reshape((-1,1))
# delta feature 1st order and 2nd order
delta1 = librosa.feature.delta(mfcc, order=1)
delta2 = librosa.feature.delta(mfcc, order=2)
# X is 36 x T
X = np.concatenate([mfcc, delta1, delta2], axis=0) # O^r
# return T x 36 (transpose of X)
return X.T # hmmlearn use T x N matrix
def detect_leading_silence(sound, silence_threshold=-42.0, chunk_size=10):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
assert chunk_size > 0 # to avoid infinite loop
while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms
def search(request, filename):
me = None if request.user.id is None else User.objects.get(id=request.user.id)
my_groups = set(list(Group.objects.filter(admins__in=[me])) + list(Group.objects.filter(members__in=[me])))
# Get file audio
abs_path = "E:/Code/Python/Django/tomo/tomo/voice_search_data/"
audio_data = AudioSegment.from_file(abs_path+filename, format="wav")
os.remove(abs_path+filename)
# split audio into single word's audio
audio_chunks = split_on_silence(audio_data, min_silence_len=500, silence_thresh=-30)
# export to folder
for i, chunk in enumerate(audio_chunks):
out_file = "tomo/voice_search_data/chunk{0}.wav".format(i)
print("exporting", out_file)
chunk.export(out_file, format="wav")
predict_words = []
# Predict each segmented audio
i = 0
for audio_name in os.listdir('tomo/voice_search_data'):
if audio_name == 'search.wav':
continue # ignore if this is the original file
audio_data = AudioSegment.from_file(abs_path+audio_name, format="wav")
# trim silence
start_trim = detect_leading_silence(audio_data)
end_trim = detect_leading_silence(audio_data.reverse())
trimmed_sound = audio_data[start_trim:len(audio_data)-end_trim]
trimmed_sound.export(f"tomo/voice_search_data/trimmed{i}.wav", format="wav")
# get model
class_names = ['con', 'học', 'nhà', 'sinh', 'tuyển', 'một', 'hai', 'ba', 'bốn', 'năm', 'sáu', 'bảy', 'tám', 'chín', 'có', 'không', 'ngày', 'tháng', 'lớp']
model = {}
for key in class_names:
name = f"tomo/models/model_{key}.model"
with open(name, 'rb') as file:
model[key] = pickle.load(file)
# predict
record_mfcc = get_mfcc(f"tomo/voice_search_data/trimmed{i}.wav")
scores = [model[cname].score(record_mfcc) for cname in class_names]
predict_word = class_names[np.argmax(scores)]
# convert word of num into num (if exist)
'''num = {
'một': 1,
'hai': 2,
'ba': 3,
'bốn': 4,
'năm': 5,
'sáu': 6,
'bảy': 7,
'tám': 8,
'chín': 9,
}
if predict_word in num:
predict_word = num[predict_word]'''
predict_words.append(predict_word)
os.remove("tomo/voice_search_data/" + audio_name)
os.remove(f"tomo/voice_search_data/trimmed{i}.wav")
i += 1
# Get posts relating to predicted word
posts_search_result = []
all_posts = Post.objects.all()
for post in all_posts:
if any(str(predict_word) in post.text for predict_word in predict_words):
posts_search_result.append(post)
context = {
'posts': [{
'view': 'list',
'post': post,
'reactions': Reaction.objects.filter(post=post),
'comments': Comment.objects.filter(post=post),
} for post in reversed(posts_search_result)],
'my_groups': my_groups,
'predict_words': predict_words,
'me': me,
}
return render(request, 'search_result.html', context)
def conv_to_num(word):
return {
'một': 1,
'hai': 2,
'ba': 3,
'bốn': 4,
'năm': 5,
'sáu': 6,
'bảy': 7,
'tám': 8,
'chín': 9,
}[word]
| longnp030/SocialNetwork-Py | tomo/views.py | views.py | py | 6,784 | python | en | code | 1 | github-code | 36 |
10125696279 | #!/bin/bash/env python
# coding=UTF-8
# by Tarcisio marinho
# github.com/tarcisio-marinho
import requests,json,os
def minha_localizacao(frase):
url = 'http://freegeoip.net/json/'
try:
requisicao = requests.get('http://freegeoip.net/json/')
dicionario = json.loads(requisicao.text)
if(frase == u'pais'):
print('Você está no ')
print(str(dicionario['country_name'])+', '+str(dicionario['country_code']))
os.system('espeak -v pt-br -g 4 -a 100 "Você está no '+str(dicionario['country_name'])+'"')
elif(frase == u'estado'):
print('Você está em ')
print(str(dicionario['city'])+'-'+str(dicionario['region_code'])+', '+dicionario['region_name'])
os.system('espeak -v pt-br -g 4 -a 100 "Você está em '+str(dicionario['city'])+'"')
elif(frase == u'ip'):
print('Seu ip é: '+str(dicionario['ip']))
os.system('espeak -v pt-br -g 4 -a 100 "Seu ipê é"')
except:
print('Erro de conexão')
os.system('espeak -v pt-br -g 4 -a 100 "Erro de conexão"')
def clima(cidade):
url = 'http://api.openweathermap.org/data/2.5/weather?q='+ cidade + '&APPID=ab6ec687d641ced80cc0c935f9dd8ac9&units=metric'
try:
requisicao = requests.get(url)
dicionario = json.loads(requisicao.text)
print('A temperatura em '+str(cidade)+' é: ' + str(dicionario['main']['temp'])+ ' graus Celcius')
os.system('espeak -v pt-br -g 4 -a 100 "A temperatura em '+str(cidade)+' é: ' + str(dicionario['main']['temp'])+ ' graus Celcius'+'"')
if(dicionario['weather'][0]['main']=='Clear'):
print('O clima está: Limpo/Aberto')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: Limpo e Aberto"')
elif(dicionario['weather'][0]['main']=='Clouds'):
print('O clima está: Nebuloso/fechado')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: Nebuloso e fechado"')
elif(dicionario['weather'][0]['main']=='Thunderstorm'):
print('O clima está muito chuvoso e com tempestade, cuidado pae')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está muito chuvoso e com tempestade, cuidado pae"')
else:
print('O clima está: '+ dicionario['weather'][0]['main'])
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: '+ dicionario['weather'][0]['main']+'"')
except:
print('Erro de conexão')
os.system('espeak -v pt-br -g 4 -a 100 "Erro de conexão"')
| tarcisio-marinho/Eliza | modulos/mapa.py | mapa.py | py | 2,562 | python | pt | code | 11 | github-code | 36 |
29198681192 | import pandas as pd
def process(mode, dataframe, column_common, column_data, worksheet_list):
df1 = pd.DataFrame()
df2 = pd.DataFrame()
if mode == "A=<-B":
df1 = dataframe[0].copy()
df1.drop_duplicates(subset=['Serial'], inplace=True)
df1.replace(['NO REGISTRA',"",'NO REIGSTRA','','no registra'], pd.NA, inplace=True) # type: ignore
df1.dropna(subset=['Serial'], inplace=True)
df2 = dataframe[1].copy()
df2.drop_duplicates(subset=['Serial'], inplace=True)
df2.replace(['NO REGISTRA',"",'NO REIGSTRA','','no registra'], pd.NA, inplace=True) # type: ignore
df2.dropna(subset=['Serial',f'{column_data}'], inplace=True)
combination = df2[df2[f'{column_common}'].isin(df1[f'{column_common}'])]# type: ignore
worksheet_cons = worksheet_list[0]
data_pending = df1[df1[f'{column_data}'].isnull()]
data_send = combination[combination[f'{column_common}'].isin(data_pending[f'{column_common}'])]
search = dataframe[1]
return data_send, worksheet_cons, search
| SebIngB/SoftwareClinico | procesamiento/config_consult.py | config_consult.py | py | 1,079 | python | en | code | 0 | github-code | 36 |
30473800469 | from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from globalStyle import *
from View.openConn import *
from View.settings import *
from Model.connectDB import *
from Control.session import *
session = Session()
fonts = Fonts()
class SSCI:
def __init__(self, master=None, theme=None):
self.master = master
self.window = Frame(master)
self.window.pack(side=TOP, fill="both")
#Menu
menubar = Menu(self.master, bg=theme.menu)
filemenu = Menu(menubar)
filemenu.add_command(label="Connect", command=self.Connect, font=fonts.default)
filemenu.add_command(label="Disconnect", command=self.Disconnect, font=fonts.default)
filemenu.add_command(label="Exit SSCI", command=self.ExitSSCI, font=fonts.default)
settingsmenu = Menu(menubar)
settingsmenu.add_command(label="Settings", command=self.Settings, font=fonts.default)
menubar.add_cascade(label="File", menu=filemenu, foreground=theme.fontMenu, font=fonts.default)
menubar.add_cascade(label="Tools", menu=settingsmenu, foreground=theme.fontMenu, font=fonts.default)
self.master.config(menu=menubar)
#Exec's
self.up = Frame(self.window, bg=theme.exec)
self.up.pack(side=TOP, fill="both")
self.btnRun = Button(self.up, text="RUN", bg="green", font=fonts.default, command=self.Run)
self.btnRun.grid(row=0, column=0, padx=2, pady=2)
#Scrollbar/Query's
self.querys = Frame(self.window)
self.querys.pack(side=TOP, fill="both")
self.cs = Scrollbar(self.querys, orient="vertical")
self.cs.pack(side=RIGHT, fill="y")
self.txtQuery = Text(self.querys, height=15, relief="raise", yscrollcommand=self.cs.set, bg=theme.query, foreground=theme.fontQuery, font=fonts.query)
self.txtQuery.bind("<Key>", self.Keypress)
self.txtQuery.pack(fill="both")
self.cs.config(command=self.txtQuery.yview)
#DataTable
self.dataTable = Frame(self.window)
self.dataTable.pack(side=BOTTOM, fill="both")
#Open Connect
def Connect(self):
self.open = Toplevel()
OpenConnect(self.open, session=session)
self.open.protocol("WM_DELETE_WINDOW", self.CloseOpenConn)
self.open.transient(self.master)
self.open.focus_force()
self.open.grab_set()
def CloseOpenConn(self):
self.open.destroy()
self.open = None
#Session Over
def Disconnect(self):
session.Over()
#Exit this SSCI
def ExitSSCI(self):
self.master.destroy()
#Open Settings Page
def Settings(self):
self.config = Toplevel()
Settings(self.config)
self.config.protocol("WM_DELETE_WINDOW", self.CloseSettings)
self.config.transient(self.master)
self.config.focus_force()
self.config.grab_set()
def CloseSettings(self):
self.config.destroy()
self.config = None
#Run this query
def Run(self):
if session.active:
if self.txtQuery.get("1.0", END).strip() != "":
verify = True
use = False
try:
#query = self.txtQuery.selection_get() ERROR IN TRY EXCEPT
query = self.txtQuery.get("sel.first", "sel.last")
except:
query = self.txtQuery.get("1.0", END)
if query.split()[0].lower() == "use":
verify = Data(session=session).TestDatabase(query.split()[1].lower())
if verify:
session.SetDatabase(query.split()[1].lower())
if len(query.split()) <= 2:
use = True
messagebox.showinfo(title="Query Exec", message="Success sending query")
else:
query = " ".join(query.split()[2:])
if verify and not use:
data = Data(session=session).Send(query)
if not data:
messagebox.showwarning(title="Incorrect Query", message="This query has incorrect instructions and/or arguments that do not exist in the database.")
elif data.rowcount < 0 and data.description == None:
messagebox.showinfo(title="Transaction Accepted", message="Query sent and returned successfully")
elif data.rowcount >= 0 and data.description == None:
messagebox.showinfo(title="Transaction Accepted", message=str(data.rowcount) + " line affected")
else:
self.InsertTable(table=data)
elif not verify:
messagebox.showwarning(title="Database does not exists", message="The database entered was not found")
else:
messagebox.showwarning(title="Server Not Connected", message="No connection to servers found")
def Keypress(self, event):
if event.keycode == 71:
self.Run()
#Insert Query Data
def InsertTable(self, table):
try:
execute = True
while execute:
row = table.fetchall()
columns = []
cont = 0
if len(row) != 0:
for i in range(len(table.description)):
columns.append("#" + str(i + 1))
self.csTableY = Scrollbar(self.dataTable, orient="vertical")
self.csTableY.pack(side=RIGHT, fill="y")
self.csTableX = Scrollbar(self.dataTable, orient="horizontal")
self.csTableX.pack(side=BOTTOM, fill="x")
self.table = ttk.Treeview(self.dataTable, columns=columns, show="headings", yscrollcommand=self.csTableY.set, xscrollcommand=self.csTableX.set)
self.csTableY.config(command=self.table.yview)
self.csTableX.config(command=self.table.xview)
for i in range(len(table.description)):
self.table.heading(str(i), text=str(table.description[i][0]), anchor=CENTER)
for line in row:
self.table.insert(parent="", index=cont, iid=cont, text="", values=line)
cont = cont + 1
self.table.pack(side=BOTTOM, fill="both")
else:
execute = False
except:
messagebox.showwarning(title="Incorrect Query",
message="This query has incorrect instructions and/or arguments that do not exist in the database.")
#columns = ("#1", "#2")
#self.table = ttk.Treeview(self.dataTable, columns=columns, show="headings")
#self.table.heading("0", text="Teste", anchor=CENTER)
#self.table.heading("1", text="Teste", anchor=CENTER)
#self.table.insert(parent="", index=0, iid=0, text="", values=("1", "Vineet", "Alpha"))
#self.table.insert(parent="", index=1, iid=1, text="", values=("2", "Anil", "Bravo"))
#self.table.pack(side=BOTTOM, fill="both")
#ssci = Tk()
#Main(ssci)
#ssci.title("SQL Server Control Interface for Unix")
#ssci.geometry("300x250+250+250")
#ssci.attributes("-zoomed", True)
#ssci.mainloop()
| GuilhermeAnselmi/SQLServerControlInterface | SQLServerControlInterface/View/ssci.py | ssci.py | py | 7,398 | python | en | code | 0 | github-code | 36 |
41673811681 | import os
import shutil
from object2urdf import ObjectUrdfBuilder
from cleanup_tools import get_immediate_subdirectories
import argparse
import shapenet
from glob import glob
import point_cloud_utils as pcu
import numpy as np
import trimesh
def as_mesh(scene_or_mesh):
# Utils function that returns a mesh from a trimesh.Trimesh() or trimesh.scene.Scene()
if isinstance(scene_or_mesh, trimesh.Scene):
mesh = trimesh.util.concatenate([
trimesh.Trimesh(vertices=m.vertices, faces=m.faces)
for m in scene_or_mesh.geometry.values()])
else:
mesh = scene_or_mesh
return mesh
# Update file
def replace_in_file(filepath, original, replacement):
"""Replace original string with replacement string in file at filepath.
These can be single strings or list of strings."""
with open(filepath, "r") as file:
filedata = file.read()
original = [original] if not isinstance(original, list) else original
replacement = [replacement] if not isinstance(replacement, list) else replacement
assert len(original) == len(replacement)
for idx in range(len(original)):
filedata = filedata.replace(original[idx], replacement[idx])
with open(filepath, "w") as file:
file.write(filedata)
def main(args):
# Create new directory to place processed files
new_folder = os.path.join(os.path.dirname(shapenet.__file__), 'ShapeNetCoreV2urdf')
if not os.path.exists(new_folder):
os.makedirs(new_folder)
# Create __init__.py file
initfile = os.path.join(new_folder, '__init__.py')
try:
open(initfile, 'x')
except FileExistsError:
pass
shapenet_folder = os.path.join(os.path.dirname(shapenet.__file__), 'ShapeNetCoreV2')
subdirs = get_immediate_subdirectories(shapenet_folder)
for subdir in subdirs:
category_folder = os.path.join(shapenet_folder, subdir)
# Create new directory for the ShapeNet category
new_category_folder = os.path.join(new_folder, subdir)
if not os.path.exists(new_category_folder):
os.makedirs(new_category_folder)
# copy prototype.urdf to subdir
src_proto = os.path.join(shapenet_folder, '_prototype.urdf')
dst_proto = os.path.join(new_category_folder, '_prototype.urdf')
shutil.copy2(src_proto, dst_proto)
builder = ObjectUrdfBuilder(new_category_folder)
obj_paths = glob(os.path.join(category_folder, '*', 'models', '*.obj'))
for obj_path in obj_paths:
# Create new directory for the ShapeNet object
new_object_folder = os.path.join(new_category_folder, obj_path.split(os.sep)[-3])
if not os.path.exists(new_object_folder):
os.makedirs(new_object_folder)
if args.watertight:
# Generate watertight mesh
mesh = as_mesh(trimesh.load(obj_path))
if mesh.is_watertight:
# Copy .obj to new directory
shutil.copy2(obj_path, os.path.join(new_object_folder, 'model.obj'))
else:
vm, fm = pcu.make_mesh_watertight(mesh.vertices, mesh.faces, 50000)
watertight_path = os.path.join(new_object_folder, 'model.obj')
pcu.save_mesh_vf(watertight_path, vm, fm, dtype=np.float32)
else:
# Copy .obj to new directory
shutil.copy2(obj_path, os.path.join(new_object_folder, 'model.obj'))
# build urdf
builder.build_urdf(filename=new_object_folder,
force_overwrite=True,
decompose_concave=False,
force_decompose=False,
center=None)
# rename urdf with their .obj name
src_urdf_path = glob(os.path.join(new_category_folder, '[!_]*.urdf'))[0]
dst_urdf_path = os.path.join(new_object_folder, 'model.urdf')
shutil.move(src_urdf_path, dst_urdf_path)
# Add flag 'concave=yes' to allow concave meshes in simulators,
# edit the new urdf with the updated mesh path
obj_index = dst_urdf_path.split(os.sep)[-2]
original = [f'filename=\"{obj_index}\"',
'collision']
replacement = ['filename=\"model.obj\"',
'collision concave=\"yes\"']
replace_in_file(dst_urdf_path, original, replacement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--watertight", default=False, action='store_true', help="Extract watertight meshes and watertight URDF"
)
args = parser.parse_args()
main(args)
| dexterousrobot/obj_urdfs | obj_urdfs/build_shapenet_urdfs.py | build_shapenet_urdfs.py | py | 4,788 | python | en | code | 2 | github-code | 36 |
42243521560 | import numpy as np
import matplotlib.pyplot as plt
import bead_util as bu
import scipy.signal as ss
path = "/data/20180927/bead1/spinning/50s_monitor_5min_gaps"
files = bu.find_all_fnames(path)
index = 0
fdrive = 1210.7
bw = 0.5
bwp = 5.
Ns = 250000
Fs = 5000.
k = 1e-13*(2.*np.pi*370.)**2
df = bu.DataFile()
df.load(files[-2])
df.load_other_data()
df.diagonalize()
drive = df.other_data[2]
resp = ss.detrend(df.pos_data[index])*df.conv_facs[0]/k
drive = ss.detrend(df.other_data[2])*df.conv_facs[0]/k
respft = np.fft.rfft(resp)
driveft = np.fft.rfft(drive)
freqs = np.fft.rfftfreq(Ns, d = 1./Fs)
#plot the data
plt.loglog(freqs, np.abs(respft)*2./Ns)
plt.axvline(x = fdrive, linestyle = '--', color = 'k', label = str(fdrive)+"Hz drive", alpha = 0.5)
plt.legend()
plt.xlabel("Frequency [Hz]")
plt.ylabel("Apparent Displacement [m]")
plt.show()
#plot the zoom
plt.semilogy(freqs, np.abs(respft)*2./Ns)
plt.axvline(x = fdrive, linestyle = '--', color = 'k', label = str(fdrive)+"Hz drive", alpha = 0.5)
plt.legend()
plt.xlabel("Frequency [Hz]")
plt.ylabel("Apparent Displacement [m]")
plt.xlim([fdrive-bwp/2., fdrive+bwp/2.])
plt.show()
#get inst amp and phase
tarr = np.linspace(0., 50., 250000)
respft_line = respft
driveft_line = driveft
respft_line[np.abs(freqs - fdrive)>bw] = 0.
driveft_line[np.abs(freqs - fdrive)>bw] = 0.
anal_signal_resp = ss.hilbert(np.fft.irfft(respft_line))
anal_signal_drive = ss.hilbert(np.fft.irfft(driveft_line))
phir = np.unwrap(np.angle(anal_signal_resp)) - np.unwrap(np.angle(anal_signal_drive))
plt.plot(tarr, np.abs(anal_signal_resp))
plt.xlabel("Time [s]")
plt.ylabel("Instantaneous Amplitude [m]")
plt.ylim([0, 4e-10])
plt.xlim([0, 50])
plt.show()
plt.plot(tarr, np.abs(phir))
plt.xlabel("Time [s]")
plt.ylabel("Drive Response Phase Difference [rad]")
plt.xlim([0, 50])
#plt.ylim([0, 3])
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/spinning/old_scripts/inst_amp_phase_plot.py | inst_amp_phase_plot.py | py | 1,852 | python | en | code | 1 | github-code | 36 |
41744416176 | from st7920 import ST7920
from random import randint
from time import sleep
import curses
import collections
SCALE = 4
WIDTH = 128/SCALE
HEIGHT = 64/SCALE
score = 0
alive = True
s = ST7920()
def newfoodpos():
return [randint(0,WIDTH-1), randint(0,HEIGHT-1)]
def update():
global headpos, foodpos, score
if headdir == 0:
newpos = [headpos[0]+1, headpos[1]]
elif headdir == 1:
newpos = [headpos[0], headpos[1]+1]
elif headdir == 2:
newpos = [headpos[0]-1, headpos[1]]
else:
newpos = [headpos[0], headpos[1]-1]
if newpos[0]<0: newpos[0] += WIDTH
if newpos[0]>=WIDTH: newpos[0] = 0
if newpos[1]<0: newpos[1] += HEIGHT
if newpos[1]>=HEIGHT: newpos[1] = 0
if (newpos in snakebits):
dead()
if newpos[0]==foodpos[0] and newpos[1]==foodpos[1]:
foodpos = newfoodpos() # don't remove if we hit the food
score += 1
else:
snakebits.popleft() #remove the last tail bit
snakebits.append(newpos)
headpos = newpos
draw()
s.redraw()
def dead():
global alive
alive = False
s.clear()
s.put_text("You died!", ((WIDTH*SCALE)-54)/2, ((HEIGHT*SCALE)/2)-8)
msg = "Score: " + str(score)
s.put_text(msg, ((WIDTH*SCALE)-(6*len(msg)))/2, ((HEIGHT*SCALE)/2))
s.redraw()
exit()
def draw():
s.clear()
s.rect(foodpos[0]*SCALE, foodpos[1]*SCALE, ((foodpos[0]+1)*SCALE)-1, ((foodpos[1]+1)*SCALE)-1)
for bit in snakebits:
s.fill_rect(bit[0]*SCALE, bit[1]*SCALE, ((bit[0]+1)*SCALE)-1, ((bit[1]+1)*SCALE)-1)
def showsplash(screen):
s.clear()
s.put_text("SNAKE!", ((WIDTH*SCALE)-36)/2, ((HEIGHT*SCALE)/2)-16)
s.put_text("Arrow keys", ((WIDTH*SCALE)-60)/2, ((HEIGHT*SCALE)/2))
s.put_text("to control!", ((WIDTH*SCALE)-66)/2, ((HEIGHT*SCALE)/2)+8)
s.redraw()
sleep(3)
while screen.getch() != -1: # clear the input buffer
pass
def main(screen):
global headdir
screen.nodelay(1)
showsplash(screen)
while alive:
char = screen.getch()
if char==113: exit()
elif char==curses.KEY_RIGHT and headdir!=2 : headdir = 0
elif char==curses.KEY_DOWN and headdir!=3: headdir = 1
elif char==curses.KEY_LEFT and headdir!=0: headdir = 2
elif char==curses.KEY_UP and headdir!=1: headdir = 3
update()
sleep(0.05)
s.clear()
s.redraw()
foodpos = newfoodpos()
snakebits = collections.deque()
headpos = [5,5]
snakebits.append([2,5])
snakebits.append([3,5])
snakebits.append([4,5])
snakebits.append(headpos)
headdir = 0 #0:east, 1:south, 2:west, 3:north
curses.wrapper(main)
| JMW95/RaspiLCDGames | snake.py | snake.py | py | 2,499 | python | en | code | 3 | github-code | 36 |
71846182823 | if __name__ == "__main__":
from ESParserPy.dataFile import DataFile
from ESParserPy.dataWriter import DataWriter
import sys
args = sys.argv
outPath = args[1]
saveFile = DataFile(outPath)
system = args[2]
planet = args[3]
for node in saveFile.Begin():
if node.Token(0) == "system":
node.tokens[1] = system
elif node.Token(0) == "planet":
node.tokens[1] = planet
elif node.Token(0) == "ship":
for child in node.Begin():
if child.Token(0) == "system":
child.tokens[1] = system
elif child.Token(0) == "planet":
child.tokens[1] = planet
elif child.Token(0) == "position":
child.tokens[1] = "0"
child.tokens[2] = "0"
newSave = DataWriter(outPath)
for node in saveFile.Begin():
newSave.Write(node)
newSave.Save()
| comnom/ES-tools | teleport.py | teleport.py | py | 789 | python | en | code | 3 | github-code | 36 |
32505694658 | import streamlit as st
# import pandas as pd
import numpy as np
import pydeck as pdk
import plotly.express as px
from ParserXML import *
from ConverterToHTML import *
from VisualTools import *
__all__ = [st, pd, np, pdk, px]
DATE_TIME = "date/time"
local_path = ""
file_name = "Datasets/50k_cleaned_from_xml.csv"
DATA_URL = local_path + file_name
st.title("Motor Vehicle Collisions Analyzer")
st.markdown("This application is a Streamlit dashboard that can \
be used to analyze XML-file from the State Automobile Inspection ЁЯЪФЁЯТе")
st.markdown("ЁЯФ╡ Author: **Andriy Fedorych**")
st.markdown("ЁЯЯб GitHub: [**StopFuture**](https://github.com/StopFuture)")
upload_check = False
xml_source_file = st.file_uploader("Upload XML File", type="xml")
if xml_source_file is not None and upload_check is False:
try:
context_t = DefParserXML(XMLDictStrategy())
context_t.strategy = XMLDictStrategy()
imported = context_t.extract_data(xml_source_file.name)
Converter = ConverterToHTML(xml_source_file.name)
@st.cache(persist=True)
def load_data(imported_data):
def lowercase(el): return str(el).lower()
imported_data.rename(lowercase, axis='columns', inplace=True)
imported_data.dropna(subset=["latitude", "longitude", "injured_persons", "date-time", "on_street_name"],
inplace=True)
imported_data['date-time'] = pd.to_datetime(imported_data['date-time'], format='%Y-%m-%d %H:%M:%S')
for name in ["injured_persons", "killed_persons", "injured_pedestrians",
"killed_pedestrians", "injured_cyclists", "killed_cyclists", "injured_motorists",
"killed_motorists"]:
imported_data[name] = imported_data[name].astype('int')
imported_data['latitude'] = imported_data['latitude'].astype('float')
imported_data['longitude'] = imported_data['longitude'].astype('float')
return imported_data
upload_check = True
except Exception as exp:
x = exp
st.markdown("тЪая╕П я╕П**The file is not from the SAI system, try upload another file**")
else:
upload_check = False
if upload_check:
data = load_data(imported)
origin = data
st.header("Where are the most people injured in city?")
injured_people = st.slider("Number of persons injured in vehicle collisions", 0, 18)
midpoint = (np.average(data["latitude"].dropna(how="any")), np.average(data["longitude"].dropna(how="any")))
tmp_data = data.query("injured_persons >= @injured_people")[cols]
HeatMap(data, midpoint, injured_people)
if st.checkbox("Show Raw Data ", False):
st.subheader('Raw Data')
x = (st.text_input("Number of displayed rows : ", value="1"))
st.write(tmp_data.head(int(x) if x != "" else 0))
DownloadButton(tmp_data, Converter)
st.header("How many collisions occur during a given time of day(60 min interval)?")
hour = st.slider("Hour to look at", 0, 24)
data = data[data['date-time'].dt.hour == hour]
st.markdown(f"Vehicle collisions between {hour}:00 and {hour + 1}:00")
HistMap(data, midpoint)
if st.checkbox("Show Raw Data", False):
st.subheader('Raw Data')
x = (st.text_input("Number of displayed rows: ", value="10"))
st.write(data.head(int(x) if x != "" else 0))
st.button(
f"Extract this data as {Converter.set_source(st.text_input('Select a name:', value=Converter.source))}.html ",
key=None, help=None, on_click=Converter.create_html(tmp_data, Converter.source))
# Hist
st.subheader("Breakdown by minute between %i:00 and %i:00" % (hour, (hour + 1) % 24))
filtered = data[
(data['date-time'].dt.hour >= hour) & (data['date-time'].dt.hour <= hour + 1)
]
hist = np.histogram(filtered["date-time"].dt.minute, bins=60, range=(0, 60))[0]
chart_data = pd.DataFrame({'minute': range(0, 60, 1), 'crashes': hist})
fig = px.bar(chart_data, x="minute", y="crashes", hover_data=["minute", "crashes"], height=500)
st.write(fig)
st.markdown("The data may be inaccurate, because most of the time is rounded up to 5 minutes")
if st.checkbox("Show raw data", False):
st.subheader('Raw Data')
st.write(data.head(10))
st.header("Top dangerous streets by affected class")
Box(data)
st.header("Creating html file from source data")
FinalHtmlCreator(origin, Converter)
| StopFuture/AnalyzerXML | AnalyzerXML.py | AnalyzerXML.py | py | 4,533 | python | en | code | 1 | github-code | 36 |
7619783575 | import time
from pathlib import Path
import torch
import torch.nn as nn
from torch.optim import RMSprop, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from .evaluate import evaluate
from .logger import print_logger
def train_net(net,
dataloaders,
device,
result_path : Path,
learning_rate: float = 0.1,
epochs : int = 999,
):
train_loader = dataloaders['Train']
val_loader = dataloaders['Valid']
early_stop = 0
early_stop_criterion = 12
best_val_score = 0
total_start_time = time.time()
logger = print_logger(result_path.joinpath('LOG').with_suffix('.txt'))
image_path = result_path.joinpath('Prediction')
image_path.mkdir(exist_ok=True, parents = True)
checkpoint = result_path.joinpath('Model_Weight').with_suffix('.pth')
checkpoint.parent.mkdir(exist_ok = True, parents = True)
optimizer = RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8)
# optimizer = Adam(net.parameters(), lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer, mode = 'max', factor = 0.1, patience = 4, min_lr = 1e-5) # goal: maximize Dice score
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs+1):
start_time = time.time()
net.train()
epoch_loss = 0
for images, true_masks, _ in train_loader :
images = images.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.float32)
masks_pred = net(images)
loss = criterion(masks_pred, true_masks)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss = epoch_loss / len(train_loader)
dice_score, sensitivity, specificity = evaluate(net, val_loader, device, image_path)
scheduler.step(dice_score)
if dice_score <= best_val_score :
early_stop += 1
else :
early_stop = 0
best_val_score = dice_score
torch.save(net.state_dict(), checkpoint)
if early_stop == early_stop_criterion :
break
time_elapsed = time.time() - start_time
total_elapsed = time.time() - total_start_time
total_min = total_elapsed // 60
total_sec = total_elapsed % 60
lr = optimizer.param_groups[0]['lr']
logger(f'[EPOCH : {epoch:3d}/{epochs:3d}] \
| LOSS : [{epoch_loss:.4f}] \
| DICE : [{best_val_score:.4f}] \
| SENSI : [{sensitivity:.4f}] \
| SPECI : [{specificity:.4f}] \
| ES : [{early_stop}/{early_stop_criterion}] \
| LR : [{lr:.5f}] \
| TIME : [{int(time_elapsed):3d}S / {int(total_min):2d}M {int(total_sec):2d}S]'
)
net.load_state_dict(torch.load(checkpoint))
final_val_score = evaluate(net, val_loader, device, image_path)
logger(f'\n\nFINAL VALIDATION SCORE : {final_val_score}')
return net
| kimjh0107/2022_Rayence_Medical_Image_processing | src/train.py | train.py | py | 3,042 | python | en | code | 0 | github-code | 36 |
2360946621 | """
【问题描述】
输入n个学生的成绩,按总分从大到小输出。
【输入形式】
第一行输入学生人数n。
后续n行,每一行输入一个学生的学号, 姓名,语文成绩和数学成绩。各字段之间用空格隔开。
【输出形式】
输出n行。每一行给出学生学号,姓名,总分。按总分从大到小排序。若总分相同,则按学号从小到大排序。
【样例输入】
5
355 dj 60 70
665 kk 70 80
g33 He 55 95
l222 Li 60 80
n77 Liu 70 60
【样例输出】
665 kk 150
g33 He 150
l222 Li 140
355 dj 130
n77 Liu 130
"""
n = int(input())
temp = []
for i in range(n):
infors = input().split()
a = []
for j in infors:
a.append(j)
sum = int(a[2]) + int(a[3])
a.append(sum)
temp.append(a)
result2 = sorted(temp, key = lambda x:x[0])
result = sorted(temp, key = lambda x:x[-1], reverse = True)
for i in range(n):
del result[i][2]
del result[i][2]
for i in range(n):
for j in range(3):
print(result[i][j], end = " ")
print()
| xzl995/Python | CourseGrading/6.2.12按总分排序.py | 6.2.12按总分排序.py | py | 1,046 | python | zh | code | 3 | github-code | 36 |
17285744603 | from abc import ABCMeta, abstractmethod
import subprocess
import io
from logging import Logger
class Action(metaclass=ABCMeta):
def __init__(self, action_id, job, **kwargs):
self.id = action_id
self.job = job
@abstractmethod
def to_text(self, logger: Logger) -> str:
pass
class TextAction(Action):
def __init__(self, action_id, job, **kwargs):
super(TextAction, self).__init__(action_id, job)
self.text = kwargs.get("text", "今天又是元气满满的一天")
def to_text(self, loger: Logger) -> str:
loger.info(f"Text to text Length: {len(self.text)}")
return self.text
class CommandAction(Action):
def __init__(self, action_id, job, **kwargs):
super(CommandAction, self).__init__(action_id, job)
self.command = kwargs.get("command", "echo Hello")
def to_text(self, loger: Logger) -> str:
proc = subprocess.Popen(self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
proc.wait()
stream_stdout = io.TextIOWrapper(proc.stdout, encoding='utf-8')
stream_stderr = io.TextIOWrapper(proc.stderr, encoding='utf-8')
str_stdout = str(stream_stdout.read()).strip()
str_stderr = str(stream_stderr.read()).strip()
loger.info(f"Command to text Command {self.command} stdout: {str_stdout}")
loger.info(f"Command to text Command {self.command} stdout: {str_stderr}")
if len(str_stdout) == 0:
return str_stdout
else:
return str_stdout + "\n" + str_stderr
| SuperH-0630/HelloEmail | action.py | action.py | py | 1,587 | python | en | code | 0 | github-code | 36 |
3458283707 | class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def __init__(self):
self.data = []
def helper(self, root):
if root != None:
self.helper(root.left)
self.data.append( root.val )
self.helper(root.right)
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
self.helper(root)
return self.data
if __name__ == '__main__':
TreeNode1 = TreeNode(1)
TreeNode2 = TreeNode(2)
TreeNode3 = TreeNode(3)
TreeNode1.right = TreeNode2
TreeNode2.left = TreeNode3
print( Solution().inorderTraversal(TreeNode1) )
| pi408637535/Algorithm | com/study/algorithm/bts/Binary Tree Inorder Traversal.py | Binary Tree Inorder Traversal.py | py | 795 | python | en | code | 1 | github-code | 36 |
36830649760 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # SciKitLearn 机器学习库
# - VScode中, `ctrl + /` 快速注释代码
# %%
# Sklearn 通用的学习模式
# 案例1. 本例鸢尾花数据集,使用KNN模块实现分类
import numpy as np
from sklearn import datasets
# from sklearn.cross_validation import train_test_split # cross_validation包早已不再使用,功能划入model_selection模块中
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris() # 加载鸢尾花数据集
iris_X = iris.data # 属性存入X变量,作为特征向量集合
iris_y = iris.target # 标签存入y变量,作为目标向量
print(iris_X[:2,:])
print(iris_y)
# %%
# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
# 将iris_X和iris_y都分别按30%测试集的比例划分train集和test集
# 定义用到的模块
knn = KNeighborsClassifier() # 使用knn模块训练数据分类
# knn = KNeighborsClassifier(n_neighbors=5) # K近邻会将邻近点求平均,这里可指定平均邻近几个点的值
knn.fit(X_train, y_train) # 使用的是fit函数
# 测试预测结果
print(knn.predict(X_test))
print(y_test)
# 可视化(自己增加)
yuc = knn.predict(X_test) # 预测结果
zhs = y_test # 实际值
import numpy as np
idx = np.arange(0,len(yuc),1) # 按元素数(len取值)生成索引,用于x坐标
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(idx,yuc,s=80,c='g',alpha=0.5) # idx为x,预测和实际值为y
plt.scatter(idx,zhs,s=80,c='r',alpha=0.5) # 设置图形足够大,颜色区分,有透明度
# %%
# 案例2. 本例波士顿房价数据集,使用linear_model实现线性回归预测
from sklearn import datasets
from sklearn.linear_model import LinearRegression
loaded_data = datasets.load_boston() # 加载波士顿房价数据集
data_X = loaded_data.data # 数据的data属性就是特征向量集
data_y = loaded_data.target # 数据的target属性就是目标函数
model = LinearRegression() # 使用线性回归模型
model.fit(data_X, data_y)
print(model.predict(data_X))
print(data_y)
# 可视化(自己增加)
yuc = model.predict(data_X) # 预测结果
zhs = data_y # 实际值
import numpy as np
idx = np.arange(0,len(yuc),1) # 按元素数(len取值)成索引,用于x坐标
import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.plot(idx,yuc,c='g',alpha=0.5) # idx为x,预测和实际值为y
plt.plot(idx,zhs,c='r',alpha=0.5) # 设置颜色区分,有透明度
# %%
# model模块的常见属性和功能,如上述的predict预测功能(1分类2回归)
model = LinearRegression() # 指定本例所用的model
model.fit(X,y) # 对特征向量集和目标向量,用模型进行拟合
model.predict(X) # 对测试集数据X,用模型进行预测
model.coef_ # 模型的斜率
model.intercept_ # 模型的截距
model.get_params() # 获得模型选择时给模型定义的参数
model.score(X,y) # 对预测结果打分。用X预测,用y做真值进行比较。R^2方式打分
# %%
# 预处理preprocessing
# 标准化normalization、正则化、特征缩放feature scaling
# Idea: Make sure features are on a similar scale. 各特征处于相近的量级,便于学习
from sklearn import preprocessing
X = preprocessing.scale(X) # 对数据进行预处理(标准化,缩放到0-1之间的数值)
# %%
# 交叉验证(数据集分割)
# 上面案例1中的数据集分割方式,按照固定比例分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# 为了有效评价模型,对数据集进行多次不同模式的分割,分别测试并平均其准确率
from sklearn.model_selection import cross_val_score # cross_val_score函数也并入model_selection
knn = KNeighborsClassifier(n_neighbors=5) # 计算5个近邻点
score = cross_val_score(knn, X, y, cv=5, scoring='accuracy') # 分类问题用准确率
# 打分由多次分割评估结果平均而来,使用knn模型,对X预测,用y验证,使用5种分割方案,打分使用准确率进行
loss = -cross_val_score(knn, X, y, cv=5, scoring='neg_mean_squared_error') # 回归问题用均方差(原值时负值)
# 原mean_squared_error参数已弃用
# %%
# 学习率曲线,可视化学习的准确率变化过程
from sklearn.model_selection import learning_curve # 学习曲线也放入model_selection
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits() # 加载数据集
X = digits.data # digits属性作为特征向量集
y = digits.target # 目标向量
# 学习曲线计算(指定阶段的准确率/损失值变化),输出给训练集大小、训练集损失、测试集损失等变量
# gamma是学习率(速率),阶段有数组指定,损失计算和上述交叉验证方法一样
train_sizes, train_loss, test_loss = learning_curve(
SVC(gamma=0.001),X,y,cv=10,scoring='neg_mean_squared_error',
train_sizes=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
train_loss_mean = -np.mean(train_loss,axis=1) # 上述cv10次分割的值求均值
test_loss_mean = -np.mean(test_loss,axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-',color='r', label="training")
plt.plot(train_sizes, test_loss_mean, 'o-',color='g', label="cross-validation")
plt.xlabel('training examples')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
# %%
# 模型调参过程,使用validation_curve评估参数取值变化过程中评估指标的变化曲线,根据是否欠拟合或过拟合来选取该参数的合适范围
from sklearn.model_selection import validation_curve # 评估曲线也放入model_selection
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 10) # 在区间取5个点,用于测试参数(调参)
# 评估曲线计算(指定阶段的准确率/损失值变化),输出给训练集大小、训练集损失、测试集损失等变量
# gamma是学习率(速率),阶段有数组指定,损失计算和上述交叉验证方法一样
train_loss, test_loss = validation_curve( # 改用评估曲线,返回值没有train_sizes
# SVC的固定参数去掉,后面给出参数名和取值范围(已指定)
SVC(),X,y,param_name='gamma',param_range=param_range, cv=10,scoring='neg_mean_squared_error')
train_loss_mean = -np.mean(train_loss,axis=1) # 上述cv10次分割的值求均值
test_loss_mean = -np.mean(test_loss,axis=1)
plt.plot(param_range, train_loss_mean, 'o-',color='r', label="training")
plt.plot(param_range, test_loss_mean, 'o-',color='g', label="cross-validation")
plt.xlabel('gamma')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
# %%
# 保存model和参数
# pickle方法
import pickle
with open('/path/to/file.pickle','wb') as f: # 打开句柄-写入
pickle.dump(model,f) # 保存模型
with open('/path/to/file.pickle','rb') as f: # 打开句柄-读出
mdl = pickle.load(f) # 加载模型
print(mdl.predict(X[0:1])) # 使用模型预测
# joblib方法-sklearn
from sklearn.externals import joblib
joblib.dump(model,'/path/to/file.pkl') # 保存模型
mdl = joblib.load('/path/to/file.pkl') # 加载模型
print(mdl.predict(X[0:1])) # 使用模型预测
| oca-john/Python3-xi | Python3-ipynb/py3.sklearn.py | py3.sklearn.py | py | 7,841 | python | zh | code | 0 | github-code | 36 |
36168624616 | from enum import Enum, auto
from pathlib import Path
import numpy as np
import pandas as pd
import pendulum
import pytest
from whatsappnalysis.lib.custom_types import ChatDataset, Schema
from whatsappnalysis.lib.data_loader import WhatsappLoader
class TestWhatsappLoader:
""" Tests for ChatDataset """
test_chat_txt = (
"2/5/20, 8:38 PM - Author 1: Hello world\n"
"2/5/20, 8:39 PM - Author 1: I like balloons\n"
"2/5/20, 8:39 PM - Author 2: I like balloons too!\n"
"2/5/20, 8:42 PM - Author 3: foo\n"
"2/5/20, 8:42 PM - Author 3: Balloons are terrible\n"
"2/5/20, 8:45 PM - Author 2: False\n"
)
test_chat_df = pd.DataFrame.from_dict(
{
"CHAT_NAME": {
0: "test_chat",
1: "test_chat",
2: "test_chat",
3: "test_chat",
4: "test_chat",
5: "test_chat",
},
"TIMESTAMP": {
0: pendulum.parse("2020-02-05 20:38:00+0000"),
1: pendulum.parse("2020-02-05 20:39:00+0000"),
2: pendulum.parse("2020-02-05 20:39:00+0000"),
3: pendulum.parse("2020-02-05 20:42:00+0000"),
4: pendulum.parse("2020-02-05 20:42:00+0000"),
5: pendulum.parse("2020-02-05 20:45:00+0000"),
},
"AUTHOR": {
0: "Author 1",
1: "Author 1",
2: "Author 2",
3: "Author 3",
4: "Author 3",
5: "Author 2",
},
"MESSAGE": {
0: "Hello world",
1: "I like balloons",
2: "I like balloons too!",
3: "foo",
4: "Balloons are terrible",
5: "False",
},
"HAS_MEDIA": {
0: False,
1: False,
2: False,
3: False,
4: False,
5: False,
},
}
)
class Columns(Enum):
TIMESTAMP = auto()
AUTHOR = auto()
MESSAGE = auto()
schema = Schema(
columns=Columns,
columns_to_dtypes={Columns.TIMESTAMP.name: np.dtype("datetime64[ns]")},
)
def test_load_from_txt(self, tmp_path: Path):
""" Test loading from txt file"""
# Arrange
expected = self.test_chat_df.astype({"TIMESTAMP": np.dtype("datetime64[ns]")})
raw_path = tmp_path / "test_chat.txt"
with raw_path.open("w") as file:
file.write(self.test_chat_txt)
dataset = ChatDataset(schema=self.schema)
# Act
result = WhatsappLoader().load_from_txt(raw_path)
# Assert
pd.testing.assert_frame_equal(result.data, expected)
def test_load_from_txt_bad_file(self, tmp_path: Path):
""" Test loading from txt file"""
# Arrange
raw_path = tmp_path / "test_chat.txt"
with raw_path.open("w") as file:
file.write("")
# Act / assert
with pytest.raises(TypeError):
WhatsappLoader().load_from_txt(raw_path)
| lbartell/whatsappnalysis | tests/test_lib/test_data_loader/test_whatsapp_loader.py | test_whatsapp_loader.py | py | 3,175 | python | en | code | 0 | github-code | 36 |
73915422185 | from flask import Flask, request
import base64
from PIL import Image
from preprocess4 import Pr1
from preprocess2 import test_transforms
import torch
from torchvision import models
import torch.nn as nn
import numpy as np
import cv2
def get_net():
finetune_net = nn.Sequential()
finetune_net.features = models.resnet18(weights='ResNet18_Weights.DEFAULT')
finetune_net.output_new = nn.Sequential(nn.Linear(1000, 256),
nn.ReLU(),
nn.Linear(256, 26))
finetune_net = finetune_net.to('cpu')
for param in finetune_net.features.parameters():
param.requires_grad = False
return finetune_net
# Load the saved parameters
saved_params = torch.load('my_model61.pt', map_location=torch.device('cpu'))
# Create a new instance of the model and load the parameters
model_test = get_net()
model_test.load_state_dict(saved_params)
model_test.eval()
classes = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
app = Flask(__name__)
@app.route('/upload', methods=['POST'])
def upload():
try:
print(0)
# Get the base64 image string from the request
base64_image = request.json['image']
print(1)
# Decode the base64 image string to bytes
image_bytes = base64.b64decode(base64_image)
print(2)
# Convert the image bytes to a numpy array
nparr = np.fromstring(image_bytes, np.uint8)
# Decode the numpy array to an image using OpenCV
frame = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)
# Process the image as needed
p1 = Pr1(frame)
processed_frame = p1.detect_crop_and_segment_hands(p1.image)
if processed_frame is not None:
cropped_hand_array = Image.fromarray(processed_frame)
# Apply the transformations
#img_tensor = test_transforms(cropped_hand_array)
#Make a prediction using the model
#prediction = model_test(img_tensor[None].to("cpu"))
# Get the predicted label
#pred_label = classes[torch.max(prediction, dim=1)[1]]
#print(pred_label)
# Return a response if needed
return {'status': 'success'}
except Exception as e:
return {'status': 'error', 'message': str(e)}
if __name__ == '__main__':
app.run()
| Moezwalha/Alphabet-SL_Prediction_Service | app.py | app.py | py | 2,494 | python | en | code | 0 | github-code | 36 |
5515751660 | import yaml
class Config:
def __init__(self):
self.load_model_epochs = None
self.debug = None
self.n_epochs = None
self.load_g_model_score = None
self.load_d_model_score = None
self.model_no = None
self.batch_size = None
self.n_split = None
self.max_lr = None
self.min_lr = None
self.lambda1 = None
self.lambda2 = None
self.seed = None
self.dataloader_seed = None
self.device = None
self.size = None
self.load()
def load(self):
with open('config/config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
self.load_model_epochs = config.get('LOAD_MODEL_EPOCH')
self.debug = config.get('DEBUG')
self.n_epochs = config.get('N_EPOCHS')
self.load_g_model_score = config.get('LOAD_G_MODEL_SCORE')
self.load_d_model_score = config.get('LOAD_D_MODEL_SCORE')
self.model_no = config.get('MODEL_NO')
self.batch_size = config.get('BATCH_SIZE')
self.n_split = config.get('N_SPLIT')
self.max_lr = config.get('MAX_LR')
self.min_lr = config.get('MIN_LR')
self.lambda1 = config.get('LAMBDA1')
self.lambda2 = config.get('LAMBDA2')
self.seed = config.get('SEED')
self.dataloader_seed = config.get('DATALOADER_SEED')
self.device = config.get('DEVICE')
self.size = config.get('SIZE')
class TestConfig:
def __init__(self):
self.load_model_epochs = None
self.debug = None
self.load_g_model_score = None
self.batch_size = None
self.seed = None
self.device = None
self.size = None
self.load()
def load(self):
with open('config/test_config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
self.load_model_epochs = config.get('LOAD_MODEL_EPOCH')
self.debug = config.get('DEBUG')
self.load_g_model_score = config.get('LOAD_G_MODEL_SCORE')
self.batch_size = config.get('BATCH_SIZE')
self.seed = config.get('SEED')
self.device = config.get('DEVICE')
self.size = config.get('SIZE') | spider-man-tm/pix2pix_gray_to_color | config/config.py | config.py | py | 2,313 | python | en | code | 3 | github-code | 36 |
1102521524 | #对比Java,python的文本处理再次让人感动
#! /usr/bin/python
import os
spath = os.path.join(os.getcwd(), "test.txt")
f = open(spath,"w") # Opens file for writing.Creates this file doesn't exist.
f.write("First line 1.\n")
f.writelines("First line 2.")
f.close()
f=open(spath,"r") # Opens file for reading
for line in f:
print("每一行的数据是:%s"%line)
f.close()
"""
知识点:
• open的参数:r表示读,w写数据,在写之前先清空文件内容,a打开并附加内容.
• 打开文件之后记得关闭
""" | code4love/dev | Python/demos/practice/文件处理.py | 文件处理.py | py | 540 | python | en | code | 0 | github-code | 36 |
33136179454 | from telebot import types
import telebot, wikipedia, re
from config import *
from base_bot import bot
# Test-bot
IDLE = 0
LISTENING_TO_COMMANDS = 2
bot_state = IDLE
@bot.message_handler(commands=['test'])
def start_message(message):
markup = telebot.types.InlineKeyboardMarkup()
markup.add(telebot.types.InlineKeyboardButton(text='Три', callback_data=3))
markup.add(telebot.types.InlineKeyboardButton(text='Четыре', callback_data=4))
markup.add(telebot.types.InlineKeyboardButton(text='Пять', callback_data=5))
bot.send_message(message.chat.id, text="Какая средняя оценка была у Вас в школе?", reply_markup=markup)
@bot.callback_query_handler(func=lambda call: True)
def query_handler(call):
bot.answer_callback_query(callback_query_id=call.id, text='Спасибо за честный ответ!')
answer = ''
if call.data == '3':
answer = 'Вы троечник!'
elif call.data == '4':
answer = 'Вы хорошист!'
elif call.data == '5':
answer = 'Вы отличник!'
bot.send_message(call.message.chat.id, answer)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id)
@bot.message_handler(commands=['stop'])
def stop(m, res=False):
global bot_state
bot_state = IDLE
def handle_text(message):
if bot_state != IDLE:
bot.send_message(message.chat.id, getwiki(message.text), reply_markup=keyboard1)
keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True)
keyboard1.row('/test', '/stop')
if __name__ == "__main__":
from base_bot import main
main() | TheGustOff/telegram_bot_gust_MUIV | test_bot.py | test_bot.py | py | 1,635 | python | en | code | 0 | github-code | 36 |
18347648906 | import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
from matplotlib import cm
# set defaults for charts
pio.templates.default = "plotly_white"
@np.vectorize
def calculate_tax(income):
brackets = [9950, 40525, 86375, 164925, 209425, 523600]
rates = [0.10, 0.12, 0.22, 0.24, 0.32, 0.35, 0.37]
tax = 0
for i in range(len(brackets)):
if income > brackets[i]:
if i == 0:
tax += rates[i] * brackets[i]
else:
tax += rates[i] * (brackets[i] - brackets[i - 1])
else:
if i == 0:
tax += rates[i] * income
else:
tax += rates[i] * (income - brackets[i - 1])
break
if income > brackets[-1]:
tax += rates[-1] * (income - brackets[-1])
return tax
# parameters
variables = ['robert_income', 'isabel_income', 'expenses', 'assets']
# create initial setup DataFrame
data = pd.DataFrame({
'year': [2023],
'robert_income': [100000],
'isabel_income': [200000],
'expenses': [50000],
'assets': [800000]
}).set_index('year')
growth_assumptions = {
'robert_income': 0.0,
'isabel_income': 0.0,
'expenses': 0.01,
'assets': 0.04
}
shocks = {
2027: {
'robert_income': (-10000, 'Robert leaves Google'),
'isabel_income': (-100000, 'Isabel book deals are smaller')
},
2030: {
'expenses': (30000, 'Childcare')
}
}
volatility = 0.08 # standard deviation of asset growth
simulations = 1000 # number of simulations
# create a DataFrame to hold the future projections
projection = pd.DataFrame(index=range(2023, 2083))
# initialize a DataFrame with simulations for assets
asset_simulations = pd.DataFrame(1 + volatility * np.random.standard_normal(size=(60,10000)),
index=projection.index,
columns=['simulation_'+str(i) for i in range(10000)]
)
# chain all
asset_simulations = asset_simulations.cumprod()
# loop over years
for year in projection.index:
if year == 2023:
# handle base year
for var in variables:
projection.loc[year, var] = data.loc[2023, var]
asset_simulations.loc[year] = data.loc[2023, 'assets']
else:
# apply growth assumptions and shocks
for var in variables:
projection.loc[year, var] = projection.loc[year - 1, var] * (1 + growth_assumptions[var])
if year in shocks and var in shocks[year]:
shock, _ = shocks[year][var]
projection.loc[year, var] += shock
# calculate household income and savings
projection.loc[year, 'household_income'] = projection.loc[year, 'robert_income'] + projection.loc[year, 'isabel_income']
projection.loc[year, 'taxes'] = calculate_tax(projection.loc[year, 'household_income'])
projection.loc[year, 'net_household_income'] = projection.loc[year, 'household_income'] - projection.loc[year, 'taxes']
# calculate savings
projection.loc[year, 'savings'] = projection.loc[year, 'net_household_income'] - projection.loc[year, 'expenses']
# add savings to assets
projection.loc[year, 'assets'] += projection.loc[year, 'savings']
# add volatility to assets
asset_simulations.loc[year] = projection.loc[year - 1, 'assets'] * (asset_simulations.loc[year])
# plot income, expenses, and savings
fig = go.Figure(layout=go.Layout(template='plotly_white'))
for var in ['robert_income', 'isabel_income', 'expenses', 'savings', 'household_income','net_household_income','taxes']:
fig.add_trace(go.Scatter(x=projection.index, y=projection[var], mode='lines', name=var))
fig.show()
# plot asset simulations as a fan chart
fig = go.Figure()
percentiles = [1, 5, 20, 50, 80, 95, 99]
colors = [cm.Blues(x) for x in np.linspace(0.01, 1, 7)]
for i in range(len(percentiles)):
percentile = percentiles[i]
color = colors[i]
asset_percentile = asset_simulations.apply(lambda x: np.percentile(x, percentile), axis=1)
fig.add_trace(go.Scatter(x=asset_percentile.index, y=asset_percentile, fill='tonexty', fillcolor='rgba'+str(color), line_color='rgba'+str(color), name=str(percentile)+'th percentile'))
fig.show()
# plot shocks
all_shock_values = []
for shock_type in ['assets', 'robert_income', 'isabel_income', 'expenses']:
for year, shocks_in_year in shocks.items():
if shock_type in shocks_in_year:
all_shock_values.append(shocks_in_year[shock_type][0])
fig = make_subplots(rows=4, cols=1, shared_xaxes=True, shared_yaxes='rows')
for shock_type, subplot in zip(['assets', 'robert_income', 'isabel_income', 'expenses'], [1, 2, 3, 4]):
shock_years = []
shock_values = []
hover_texts = [] # New list to store hover text labels
for year, shocks_in_year in shocks.items():
if shock_type in shocks_in_year:
shock_years.append(year)
shock_values.append(shocks_in_year[shock_type][0])
hover_texts.append(shocks_in_year[shock_type][1]) # Add the hover text label to the list
fig.add_trace(go.Bar(x=shock_years, y=shock_values, name=shock_type + ' shocks', text=hover_texts, textposition='outside', hovertemplate='%{text}', textfont=dict(color='rgba(0,0,0,0)')), row=subplot, col=1)
fig.update_xaxes(range=[2023, 2082])
fig.update_yaxes(range=[min(all_shock_values), max(all_shock_values)])
fig.update_layout(template='plotly_white')
fig.show()
| robert-sturrock/financial-projections | financial_projections.py | financial_projections.py | py | 5,578 | python | en | code | 0 | github-code | 36 |
25864017409 |
# my solution to https://codility.com/programmers/task/binary_gap/
from nose_parameterized import parameterized
import sys
import unittest
def solution(N):
if N is None:
raise TypeError
max_count = 0
while N / 2 is not 0:
current_count = 0
while N % 4 is not 1:
N = N / 2
N = N / 2
while N is not 0 and N % 2 is 0:
current_count += 1
N = N / 2
if current_count > max_count:
max_count = current_count
return max_count
class TestBinaryGap(unittest.TestCase):
def test_invoke_without_argument(self):
with self.assertRaises(TypeError):
solution()
def test_with_none(self):
with self.assertRaises(TypeError):
solution(None)
def _build_parameters(base2_string, expected):
the_integer = int(base2_string, base=2)
return (base2_string, the_integer, expected)
@parameterized.expand([
_build_parameters('0', 0),
_build_parameters('1', 0),
_build_parameters('101', 1),
_build_parameters('101001', 2),
_build_parameters('100101', 2),
_build_parameters('1011', 1),
_build_parameters('1101', 1),
_build_parameters('1100000100010000000111110000', 7),
('sys.maxint', sys.maxint, 0),
])
def test_solution(self, _, N, expected):
self.assertEqual(solution(N), expected)
| m11m/codility | python2/01-binarygap.py | 01-binarygap.py | py | 1,476 | python | en | code | 0 | github-code | 36 |
71124732903 | import os
clear = lambda: os.system('cls')
clear()
# This alg uses merge sort
def Sort_Array(arr):
# Base Case
if len(arr) <= 1:
return
# Divide into 2 =======
mid_idx = len(arr)//2
L_arr = arr[:mid_idx]
R_arr = arr[mid_idx:]
# =====================
# Recursion ======================================
# Keep dividing till you get to an elemental array
Sort_Array(L_arr)
Sort_Array(R_arr)
# ================================================
# Build it back up from stacks ===================
ll = 0
rr = 0
nn = 0
# For every pairwise element of left and right arrays, copy the smaller one into main array
# This stops once either one of the arrays is exhausted
while ll < len(L_arr) and rr < len(R_arr):
if L_arr[ll] < R_arr[rr]:
arr[nn] = L_arr[ll]
ll += 1
else:
arr[nn] = R_arr[rr]
rr += 1
nn += 1
# Copy the rest (if any) of the left array into the main array
while ll < len(L_arr):
arr[nn] = L_arr[ll]
ll += 1
nn += 1
# Copy the rest (if any) of the right array into the main array
while rr < len(R_arr):
arr[nn] = R_arr[rr]
rr += 1
nn += 1
# ================================================
return arr
# ==========================================
arr_1 = [200, 13, 34, 57, 23, 17, 18, 95, 61, 43, 22, 12, 3, 7, 8, 15, 32, 28, 24, 103, 100, 35]
Sorted_Array = Sort_Array(arr_1)
print(Sorted_Array) | Behtash-BehinAein/Data-Structures-and-Algorithms- | General/Merge Sort O_nlogn.py | Merge Sort O_nlogn.py | py | 1,602 | python | en | code | 0 | github-code | 36 |
27119972934 | #Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
lista = [[], []]
contImpar = contPar = 0
for i in range(1, 8):
n = int(input(f'Digite o {i}º valor: '))
if n % 2 == 0:
lista[0].append(n)
contPar +=1
else:
lista[1].append(n)
contImpar += 1
lista[0].sort()
lista[1].sort()
print(f'Os pares são: {lista[0]} e os ímpares são: {lista[1]}') | JoaoFerreira123/Curso_Python-Curso_em_Video | Exercícios/#085.py | #085.py | py | 556 | python | pt | code | 0 | github-code | 36 |
3600506632 | import cv2
img = cv2.imread("sample1.png")
cv2.imwrite("sample2.png", img)
img2 = cv2.imread("sample2.png")
grayImg = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", grayImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
| rohith274/AiGuide | AI/Day1/ReadImage.py | ReadImage.py | py | 226 | python | en | code | 0 | github-code | 36 |
20049305182 | import numpy as np
import cv2
from scipy import ndimage, interpolate
def track(I, J, input_points, total_points, window=(21, 21), min_disp=0.01):
output = []
I_gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
J_gray = cv2.cvtColor(J, cv2.COLOR_BGR2GRAY)
#normalization
I_norm = I_gray/I_gray.max()
J_norm = J_gray/J_gray.max()
for points in input_points:
print('inside calculate')
d = calculate_new_point(I_norm, J_norm, points[0], points[1], window)
if d is not None:
print('output '+str(output))
output.append((points[0] + d[0], points[1] + d[1]))
output = np.asarray(output).T
output = output.astype(int)
frame = J.copy()
for point in zip(*total_points[::-1]):
print('printing new points')
print(point)
print(type(point))
J = cv2.circle(J, point, 3, (0, 0, 255), 10)
for point in zip(*output[::-1]):
print('printing new points')
print(point)
print(type(point))
J = cv2.circle(J, point, 3, (0, 0, 255), 10)
# for point in zip(*output[::-1]):
# frame = cv2.circle(frame, point, 3, (255, 0, 0), 4)
return J, output
def calculate_new_point(I, J, x, y, window):
displ_tot = np.array([0., 0.]).T
# The window to evaluate
win_x = np.arange(x, x + window[0], dtype=float)
win_y = np.arange(y, y + window[1], dtype=float)
roi = I[x:x + window[0], y: y + window[1]]
# Find image gradient in I
Ix = cv2.Sobel(roi,cv2.CV_64F,1,0,ksize=3)
Iy = cv2.Sobel(roi,cv2.CV_64F,0,1,ksize=3)
# Calculate the Hessian matrix
Ix = Ix.flatten()
Iy = Iy.flatten()
A = np.array([Ix, Iy])
T = A.dot(A.T)
#T = np.matmul(A, A.T)
# Check that H is not singular
if np.linalg.det(T) == 0:
return None
T_inv = np.linalg.inv(T)
# Bilinear interpolation
x_arr = np.arange(0, J.shape[1])
y_arr = np.arange(0, J.shape[0])
J_bilinear = interpolate.interp2d(x_arr, y_arr, J, kind='linear')
for x in range(35):
try:
# Calculate e matrix
J_window = J_bilinear(win_x + displ_tot[0], win_y + displ_tot[1])
D = (I[x:x + window[0], y: y + window[1]]-J_window).flatten()
e = -1*(np.dot(A,D))
d_temp = np.dot(T_inv, e)
displ_tot = displ_tot + d_temp
return displ_tot
except:
return None
# calculate displacement
def compute_corners(img, threshold=0.5):
img_cpy = img.copy()
# Grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Ix = Convolution.convolution(img_gray, 'sobel_x')
#Iy = Convolution.convolution(img_gray, 'sobel_y')
Ix = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=3)
Iy = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=3)
Ix2 = np.square(Ix)
Iy2 = np.square(Iy)
Ixdy = Ix*Iy
#g_Ix2 = Convolution.convolution(dx2, 'gaussian')
#g_Iy2 = Convolution.convolution(dy2, 'gaussian')
#g_IxIy = Convolution.convolution(dxdy, 'gaussian')
g_Ix2 = cv2.GaussianBlur(Ix2, (3,3),0)
g_Iy2 = cv2.GaussianBlur(Iy2, (3,3),0)
g_IxIy = cv2.GaussianBlur(Ixdy, (3,3),0)
R = g_Ix2*g_Iy2 - np.square(g_IxIy) - 0.22*np.square(g_Ix2 + g_Iy2)
# find all points above threshold
img_cpy[R>threshold]=[255,0,0]
return img_cpy, np.where(R > threshold*R.max())
cap = cv2.VideoCapture('Ass_img/MarinaBayatNightDrone.mp4')
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Capture frame-by-frame
ret, frame = cap.read()
cv2.namedWindow('Frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', 900,600)
old_frame, points = compute_corners(frame)
points = np.asarray(points)
total_points = points
print(len(points.T))
cv2.imshow('Frame',old_frame)
# Read until video is completed
while(cap.isOpened()):
ret, new_frame = cap.read()
old_frame, points = track(old_frame, new_frame, points.T, total_points)
cv2.imshow('Frame',old_frame)
print('points and total points')
print(points)
print('total points')
print(total_points)
total_points = np.hstack((total_points, points))
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
#
# corner_det = Corner_Detector()
# corners = corner_det.compute_corners()
| ocinemod87/Advanced_Topics_Image_Analysis | Assignment_1/Assignment_1.py | Assignment_1.py | py | 4,477 | python | en | code | 0 | github-code | 36 |
23442339369 | import numpy as np
import math
class SMOTE:
"""
Class for doing Synthetic Minority Oversampling Technique
"""
def __init__(self, p: float, k: int, random_state: int = 1337) -> None:
"""
Parameters:
p: Percentage of the minority class required after oversampling
k: Number of nearest neighbours to consider while generating samples
random_state: Random seed
"""
# initialize variables
self.p = p
self.k = k
self.nn = None
self.X_min = None
self.X_maj = None
self.y = None
self.minority_label = None
# set random seed
np.random.seed(random_state)
def euclidean_distance(self, v1: np.array, v2: np.array) -> np.array:
"""
Computes Euclidean distance between
corresponding rows in `v1` and `v2`.
Parameters:
v1: NxM numpy array with each row as a sample
v2: NxM numpy array with each row as a sample
Returns:
Nx1 numpy array with each row being the euclidean distance between
corresponding rows of `v1` and `v2`.
"""
# compute euclidean distance
return np.sqrt(np.sum(np.square(v1 - v2), axis=1))
def get_nearest_neighbours(
self, sample: np.array, population: np.array
) -> np.array:
"""
Computes `k` nearest-neighbours of `sample`
present in array of vectors `population`
Parameters:
sample: 1xM numpy array representing a single sample of data
population: NxM numpy array with all samples in the population
Returns:
sort indices of neighbours of `sample` in `population`
"""
# create copies of `sample` to compare to
# every other sample in the population
sample_duplicates = np.tile(sample, (population.shape[0], 1))
# compute euclidean distances
distances = self.euclidean_distance(population, sample_duplicates)
# return the indices used to sort the samples
# according to euclidean distance
return np.argsort(distances)
def get_minority_label(self, labels: np.array) -> (int, np.array):
"""
Get the label which is the minority in terms of frequency
Parameters:
labels: Nx1 numpy array of labels
Returns:
minority_label: label with lowest frequency in `labels`
minority_label_map: boolean array indicating indices corresponding
to minority labels
"""
# get the counts of each distinct label
counts = np.bincount(labels)
label = np.nonzero(counts)[0]
label_counts = list(zip(label, counts[label]))
# sort the label counts in ascending order
label_counts.sort(key=lambda x: x[1])
# get the minority class labels
minority_label = label_counts[0][0]
# get the boolean map where label is the minority label
minority_label_map = labels == minority_label
return minority_label, minority_label_map
def get_synthetic_sample(self, sample: np.array, neighbours: np.array) -> np.array:
"""
Return a synthetic sample according to the SMOTE algorithm
Parameters:
sample: 1xM sample of data
neighbours: NxN sort indices of neighbours of `sample`.
Returns:
synthetic_sample: 1xM synthetic sample according to SMOTE
"""
# pick a random nearest neighbour index
nn_index = np.random.randint(0, high=self.k)
# pick a sample from minority samples using index from above
# choose from 1 to k+1 since 0th nearest neighbour is the sample itself
nearest_neighbours = self.X_min[neighbours][1 : self.k + 1]
nn_sample = nearest_neighbours[nn_index]
# choose a random weight for the neighbour
weight = np.random.uniform(low=0, high=1)
# generate synthetic sample by weighting sample and random neighbour
synthetic_sample = sample + (sample - nn_sample) * weight
return synthetic_sample
def fit(self, X: np.array, y: np.array) -> None:
"""
Get the nearest neighbours of the data
Parameters:
X: NxM dataset with each row containing a sample
y: Nx1 labels
"""
# get the minority label
# and the boolean map for minority samples
minority_label, minority_label_map = self.get_minority_label(y)
# use the boolean map to choose the minority samples
X_min = X[minority_label_map, :]
# since with this SMOTE, we would only like to do oversampling,
# if desired percentage for minority class < current ratio
# raise an exception
if self.p <= 100 * X_min.shape[0] / X.shape[0]:
raise ValueError(
f"""minority class in X already has a percentage of {round(100*X_min.shape[0]/X.shape[0], 2)} which is >= desired percentage self.p = {self.p}. This class is used to do oversampling of minority class, not undersampling"""
)
# get the sort indices for nearest neighbours of
# each sample in the minority class
self.nn = np.apply_along_axis(self.get_nearest_neighbours, 1, X_min, X_min)
# set variables as class variables
self.minority_label = minority_label
self.y = y
self.X_min = X_min
# select majority class samples using the boolean map
self.X_maj = X[~minority_label_map, :]
def transform(self, shuffle: bool = True) -> (np.array, np.array):
"""
Generate the samples according to the nearest neighbours
computed in `self.fit` and the desired minority class percentage
in `self.p`.
Parameters:
shuffle: boolean parameter indicating whether
final dataset is to be shuffled
Returns:
X_resampled: minority oversampled dataset
y_resampled: labels corresponding to the oversampled dataset
"""
num_maj_samples = self.X_maj.shape[0]
num_min_samples = self.X_min.shape[0]
# self.p = 100 * min_samples_req / (maj_samples + min_samples_req)
# therefore, min_samples_req = self.p*maj_samples/(100 - self.p)
total_min_samples_reqd = math.ceil(self.p * num_maj_samples / (100 - self.p))
extra_min_samples_reqd = total_min_samples_reqd - num_min_samples
# pick random minority samples to resample using SMOTE
resample_indices = np.random.randint(
0, high=num_min_samples, size=extra_min_samples_reqd
)
# iterate over chosen minority samples
smoted_samples = []
for resample_index in resample_indices:
# get SMOTE sample by passing the minority sample
# and the index of sample in minority list
sample_neighbours = self.nn[resample_index]
random_sample = self.X_min[resample_index]
smoted_samples.append(
self.get_synthetic_sample(random_sample, sample_neighbours)
)
# create a numpy array from resampled minority examples
# and corresponding labels
smoted_samples = np.array(smoted_samples)
smoted_labels = np.array(
[self.minority_label for _ in range(extra_min_samples_reqd)]
)
# create full sample and labels combining majority, minority and smoted samples
X_resampled = np.concatenate((self.X_maj, self.X_min, smoted_samples), axis=0)
y_resampled = np.concatenate((self.y, smoted_labels), axis=0)
# shuffle
if shuffle is True:
np.random.shuffle(X_resampled)
np.random.shuffle(y_resampled)
return X_resampled, y_resampled
| sharwinbobde/cyber-data-analytics | Part-1/smote.py | smote.py | py | 7,815 | python | en | code | 0 | github-code | 36 |
33014908805 |
#==========================================Librerias=======================================#
import time
from machine import RTC
# synchronize RTC with ntp
import ntptime
import startup
import ufirebase as firebase
from comunicacion import Uaart
#=====================================Conexion internet=====================================#
startup.wlan_connect("MEGACABLE-UZ7BSY", "93214985")
#startup.wlan_connect("INFINITUM4119", "hWzA1D2tm7")
URL = "https://control-inteligente-310605-default-rtdb.firebaseio.com/"
#=======================================Tiempo Fecha=========================================#
rtc = RTC()
#fecha(0:year, 1:month, 2:day, 4:hour, 5:minute, 6:second)
def hora():
ntptime.settime()
date = rtc.datetime()
if ((int(date[4]) - 7) <= 0):
return (str(int(date[4])+17) + ":" +str(date[5]))
else:
return (str(int(date[4])-7) + ":" +str(date[5]))
def fecha():
ntptime.settime()
date = rtc.datetime()
return (str(date[0])+":"+str(date[1])+":"+str(date[2]))
#=========================================Sensores=============================================#
def TDS():
return Uaart(b'1').decode('utf-8')
def DISTANCIA():
return Uaart(b'2').decode('utf-8')
def TEMPERATURA():
return Uaart(b'3').decode('utf-8')
def PH():
return Uaart(b'4').decode('utf-8')
#=========================================Actuadores=============================================#
def Bomba_2():
estado=firebase.get(URL+"bomba")
estado=(int(estado)).decode()
return Uaart(estado).decode('utf-8')
#================================Transmicion de datos tiempo real===============================#
i=firebase.get(URL+"i")
def mensaje():
global i
i=int(i)+1
firebase.patch(URL, {'i':str(i)})
firebase.patch(URL, {'Sensor/Sensor1/'+str(i)+"/Fecha": fecha(),
'Sensor/Sensor1/'+str(i)+"/Hora": hora(),
'Sensor/Sensor1/'+str(i)+"/tds": TDS(),
'Sensor/Sensor1/'+str(i)+"/distancia": DISTANCIA(),
'Sensor/Sensor1/'+str(i)+"/ph": PH(),
'Sensor/Sensor1/'+str(i)+"/temperatura": TEMPERATURA()})
time.sleep(56)
while(1):
try:
mensaje()
Bomba_2()
time.sleep(1)
except:
Uaart(b'5').decode('utf-8') #Apagar bomba
startup.wlan_connect("MEGACABLE-UZ7BSY", "93214985")
#=================================================================================================#
| carloscaste-LV/Hidroponia-IoT-python | comunicacion/Micropython/main.py | main.py | py | 2,551 | python | fr | code | 0 | github-code | 36 |
5644011022 | import agentpy as ap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as mpatches
import seaborn as sns
def status_stackplot(data, ax):
"""Stackplot of people's condition over time."""
x = data.index.get_level_values("t")
y = [data[var] for var in ["store", "buy", "sell"]]
color_map = {"labels": ["store", "buy", "sell"], "colors": ["blue", "orange", "green"]}
ax.stackplot(x, y, **color_map)
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_ylim(0, 1)
ax.set_xlabel("Time steps")
ax.set_ylabel("Percentage of population")
ax.set_title("Proportion of agents taking each action")
def cost_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
y = -data["daily_cost"][1:]
ax.plot(x, y)
# Fit a linear regression model
coeffs = np.polyfit(x, y, 1)
m = coeffs[0]
b = coeffs[1]
# Plot the regression line
ax.plot(x, m * x + b, color="black", linestyle="--")
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_xlabel("Time steps")
ax.set_ylabel("Daily cost (arbitrary units)")
def transfer_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
local = data["local_transfer"][1:]
grid = data["grid_transfer"][1:]
sns.set()
ax.plot(x, local, label="Local transfer")
ax.plot(x, grid, label="Grid transfer")
ax.legend()
ax.set_xlabel("Time steps")
ax.set_ylabel("Daily energy sources (arbitrary units)")
def reward_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
y = data["reward"][1:]
ax.plot(x, y)
# Fit a linear regression model
coeffs = np.polyfit(x, y, 1)
m = coeffs[0]
b = coeffs[1]
# Plot the regression line
ax.plot(x, m * x + b, color="black", linestyle="--")
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_xlabel("Time steps")
ax.set_ylabel("Reward (arbitrary units)")
def animation_plot(model, ax):
group_grid = model.network.attr_grid("status")
color_dict = {-1: "orange", 0: "blue", 1: "green"}
action_dict = {"buy": "orange", "sell": "green", "store": "blue"}
cmap = colors.ListedColormap([color_dict[key] for key in color_dict])
ap.gridplot(group_grid, cmap=cmap, ax=ax)
# Create legend
legend_handles = [
mpatches.Patch(color=color, label=label) for label, color in action_dict.items()
]
ax.legend(handles=legend_handles)
ax.set_title(f"Energyshed model \n Time-step: {model.t} Weather: {model.weather}")
def q_values_plot(i, q_values):
# Extract the state and action spaces from the q-values
state_space = sorted(set([key[0] for q_values in q_values for key in q_values.keys()]))
action_space = sorted(set([key[1] for q_values in q_values for key in q_values.keys()]))
q_values = q_values[i]
# Create an empty matrix to hold the q-values
q_values_matrix = np.zeros((len(state_space), len(action_space)))
for j, state in enumerate(state_space):
for k, action in enumerate(action_space):
q_values_matrix[j, k] = q_values.get((state, action), 0)
value_map = {-1: "Neg. energy bal.", 0: "Zero energy bal.", 1: "Pos. energy bal."}
state_space_labels = [
(value_map[energy], weather, store) for energy, weather, store in state_space
]
# Clear the previous plot and plot the new heat map
plt.clf()
sns.heatmap(
q_values_matrix,
annot=True,
fmt=".3g",
xticklabels=action_space,
yticklabels=state_space_labels,
norm=colors.Normalize(vmin=-50, vmax=10),
)
| jacob-evarts/energyshed-simulation | src/plots.py | plots.py | py | 3,649 | python | en | code | 0 | github-code | 36 |
10731341714 | from os.path import isfile
import json
from db import normalize
from itertools import product
class Settings:
def __init__(self, user_id):
self.user_id = user_id
self.search = {}
self.match = []
@staticmethod
def from_dict(settings: dict) -> tuple:
return (
settings.get('search'),
settings.get('match'),
)
@staticmethod
def to_dict(search, match) -> dict:
return {
'search': search,
'match': match,
}
def load_from_file(self) -> bool:
file_name = f'settings_{self.user_id}.json'
if not isfile(file_name):
return False
with open(file_name, mode='r', encoding='utf-8') as file:
self.search, self.match = self.from_dict(json.load(file))
return True
def save_to_file(self):
file_name = f'settings_{self.user_id}.json'
with open(file_name, mode='w', encoding='utf-8') as file:
json.dump(
self.to_dict(self.search, self.match),
file, ensure_ascii=False, indent=4)
def load_from_vk(self, vk_user: dict) -> bool:
vk_user['sex'] = {1: 2, 2: 1}.get(vk_user['sex'], 0)
search_fields = {
'city', 'country', 'hometown',
'sex', 'has_photo', 'religion'
}
search_fields_with_fix = {
'universities': 'university',
'schools': 'school',
'career': 'company'
}
match = {
'universities', 'schools', 'status', 'activities',
'interests', 'music', 'movies', 'tv', 'books',
'games', 'about', 'quotes', 'career', 'military', 'langs',
'verified', 'sex', 'city', 'country', 'home_town', 'has_photo',
'has_mobile', 'common_count', 'occupation', 'relation',
'can_post', 'can_see_all_posts', 'can_see_audio',
'can_write_private_message', 'can_send_friend_request',
'is_hidden_from_feed', 'blacklisted', 'blacklisted_by_me',
'political', 'religion', 'inspired_by', 'people_main',
'life_main', 'smoking', 'alcohol'
}
search_params = {
field: value for field, value in vk_user.items()
if field in search_fields
}
for field, alias in search_fields_with_fix.items():
if field in vk_user and len(vk_user[field]) == 1:
search_params[alias] = vk_user[field][0]
self.search = search_params.copy()
self.match = [
(field, value) for field, value in vk_user.items()
if field in match and value
]
def load_settings(self, vk):
if self.load_from_file():
return
user = vk.get_user(self.user_id)
normalize.normalize(user)
self.load_from_vk(user)
def make_flat_searc_params(self, searc_params=None):
if searc_params is None:
searc_params = self.search
arrays = [
product([key], value) for key, value in searc_params.items()
if isinstance(value, (list, tuple))
]
result = []
for iteam in map(dict, product(*arrays)):
new_iteam = searc_params.copy()
new_iteam.update(iteam)
result.append(new_iteam)
return result
def add_settings(self):
new_settings = {
'sort': [0, 1],
'online': [0, 1]
}
self.search.update(new_settings)
def get_base_searc(self):
return {
'sex': self.search.get('sex', [0, 1, 2]),
'age_from': self.search.get('age_from', 18),
'age_to': self.search.get('age_to', 25),
'country': self.search.get('country', 1),
}
| rychanya/vkinder | src/vkinder/settings.py | settings.py | py | 3,811 | python | en | code | 0 | github-code | 36 |
35397958448 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import mox
from pants.base.hash_utils import hash_all, hash_file
from pants.util.contextutil import temporary_file
class TestHashUtils(mox.MoxTestBase):
def setUp(self):
super(TestHashUtils, self).setUp()
self.digest = self.mox.CreateMockAnything()
def test_hash_all(self):
self.digest.update('jake')
self.digest.update('jones')
self.digest.hexdigest().AndReturn('42')
self.mox.ReplayAll()
self.assertEqual('42', hash_all(['jake', 'jones'], digest=self.digest))
def test_hash_file(self):
self.digest.update('jake jones')
self.digest.hexdigest().AndReturn('1137')
self.mox.ReplayAll()
with temporary_file() as fd:
fd.write('jake jones')
fd.close()
self.assertEqual('1137', hash_file(fd.name, digest=self.digest))
| fakeNetflix/square-repo-pants | tests/python/pants_test/base/test_hash_utils.py | test_hash_utils.py | py | 942 | python | en | code | 0 | github-code | 36 |
75188366184 |
def taomang(n):
for i in range(n):
nhapmang = input('nhap mang: ')
arr.append(nhapmang)
nguoc = list(reversed(arr))
return nguoc
nhap = int(input('nhap so luong mang: '))
arr = []
print(taomang(nhap))
| nghia46203/lap-trinh-python | 2113005_lab1/cau2/test.py | test.py | py | 239 | python | en | code | 0 | github-code | 36 |
2063152797 | from flask import Flask, render_template, redirect, url_for, flash, request, Blueprint, abort
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask_migrate import Migrate
from werkzeug.urls import url_parse
from models import *
from forms import *
from flask_admin import Admin, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import ImageUploadField
import os
from jinja2 import Environment
import base64
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecretkey'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///marketplace.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['UPLOAD_FOLDER'] = 'static'
db.init_app(app)
with app.app_context():
db.create_all()
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
@app.route('/home')
def index():
# with open('D:/market/market/static/images/qaz_thumb.png', 'rb') as f:
# image_data = base64.b64encode(f.read()).decode()
# image = Product(name='xxxxx', description='aaa', price=112, quantity=1, image=image_data, category='sdsd', seller_id=4)
# db.session.add(image)
# db.session.commit()
return render_template('home.html', products=Product.query)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Неправильная почта или пароль')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('lc'))
# next_page = request.args.get('next')
# # if not next_page or url_parse(next_page).netloc != '':
# # next_page = url_for('index')
# # return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password=form.password.data, role=form.role.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/lc')
@login_required
def lc():
return render_template('all.html')
@app.route('/uploadproduct', methods=['GET', 'POST'])
def upload():
form = ProductForm()
if form.validate_on_submit():
image = Product(name=form.name.data, description=form.description.data, price=form.price.data, quantity=form.quantity.data, image=base64.b64encode(form.image.data.read()).decode(), category=form.category.data, seller_id=current_user.id)
db.session.add(image)
db.session.commit()
return redirect(url_for('lc'))
return render_template('product.html', title='nnn', form=form)
class UserView(ModelView):
def is_accessible(self):
return current_user.role == 'admin'
class ProductView(ModelView):
def is_accessible(self):
return current_user.is_authenticated and current_user.role == 'admin'
def get_query(self):
if current_user.role == 'admin':
return self.session.query(self.model)
# else:
# return self.session.query(self.model).filter_by(seller_id=current_user.id)
def get_count_query(self):
if current_user.role == 'admin':
return db.session.query(db.func.count(self.model.id))
class CustomAdminIndexView(AdminIndexView):
@expose('/')
def index(self):
if not current_user.is_authenticated or current_user.role != 'admin':
abort(404) # Forbidden
return super().index()
admin = Admin(app, index_view=CustomAdminIndexView())
admin.add_view(UserView(User, db.session))
admin.add_view(ProductView(Product, db.session, category='Products', name='Edit Products'))
if __name__ == '__main__':
app.run(debug=True) | Dimmj/market12 | market/app.py | app.py | py | 4,672 | python | en | code | 0 | github-code | 36 |
72076483945 | import numpy as np
import torch
from torch.utils.data import Dataset
import matplotlib
from matplotlib import pyplot as plt
import enum
import scipy
from scipy import ndimage, signal
import io
from . import fileloader, util, zernike
from skimage import restoration
@enum.unique
class Augmentation(enum.Enum):
PIXEL_SHIFT = 1
NOISE_GAUSSIAN =2
class BaseDataset(Dataset):
def __init__(self):
super().__init__()
self.target_is_image = False
class SimulatedImageDataset(BaseDataset):
"""
Base class.
"""
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
conv_kernel=None,
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
super().__init__()
for key in augmentations:
if not isinstance(key, Augmentation):
raise Exception("Augmentation '{}' not recognized. Use Augmentation enum.".format(key))
self.params_range = image_params
self.augmentations = augmentations
self.padding = augmentations.get(Augmentation.PIXEL_SHIFT, [0,0]) # x, y
self.gen_size = (out_size[0]+2*self.padding[0], out_size[1]+2*self.padding[1])
self.out_size = out_size
output_image_shape = np.atleast_1d(np.asarray(length))
if output_image_shape.shape[0]<2:
output_image_shape = np.concatenate([output_image_shape, [1]])
self.set_params(output_image_shape, image_params, image_params_preset)
shifts = np.stack([self.params['x'].flatten(), self.params['y'].flatten(), self.params['z'].flatten()], axis=-1)
images = self.generate_images(self.gen_size, output_image_shape, shifts, image_params)
if dropout_p > 0:
images = images * (np.random.rand(images.shape[0], 1, 1) > dropout_p)
images = images * self.params['A'].reshape(-1, 1, 1)
images = images.reshape(output_image_shape[0], output_image_shape[1], images.shape[1], images.shape[2])
images = images.sum(axis=1, keepdims=True)
images = images + self.params['bg'].reshape(-1, 1, 1, 1)
if not conv_kernel is None:
conv_kernel = torch.as_tensor(conv_kernel, dtype=torch.float).reshape(1, 1, conv_kernel.shape[-2], conv_kernel.shape[-1])
images = torch.as_tensor(images, dtype=torch.float)
images = torch.nn.functional.pad(images, (conv_kernel.shape[-1]//2,)*2 + (conv_kernel.shape[-2]//2,)*2, mode="reflect")
images = torch.nn.functional.conv2d(images, conv_kernel, padding=0).numpy()
if len(noise_params) > 0:
images = self.add_noise(images, noise_params)
if normalize:
images -= images.min(axis=(2,3), keepdims=True)
images /= images.max(axis=(2,3), keepdims=True)
self.images = images.astype(np.float32)
def set_params(self, output_image_shape, image_params, image_params_preset):
# print("Image parameters settings: {}".format(image_params))
self.params = {}
self.params['id'] = np.arange(output_image_shape[0])
self.params['A'] = np.random.uniform(image_params['A'][0], image_params['A'][1], output_image_shape).astype(np.float32)
self.params['bg'] = np.random.uniform(image_params['bg'][0], image_params['bg'][1], output_image_shape[0]).astype(np.float32)
self.params['x'] = np.random.uniform(image_params['x'][0], image_params['x'][1], output_image_shape).astype(np.float32)
self.params['y'] = np.random.uniform(image_params['y'][0], image_params['y'][1], output_image_shape).astype(np.float32)
if 'z' in image_params:
self.params['z'] = np.random.uniform(image_params['z'][0], image_params['z'][1], output_image_shape).astype(np.float32)
else:
self.params['z'] = np.zeros(output_image_shape).astype(np.float32)
self.params.update(image_params_preset)
def generate_images(self, size, length, shifts, image_params):
raise NotImplementedError()
def add_noise(self, images, noise_params):
ret = images.copy()
if noise_params.get('poisson', False) is True:
ret += np.random.poisson(images) - images
if 'gaussian' in noise_params:
ret += np.random.normal(np.zeros_like(images), noise_params['gaussian'])
return ret
def __len__(self):
return self.images.shape[0]
def __getitem__(self, key):
image = self.images[key]
label = {param_key: param_val[key] for param_key, param_val in self.params.items()}
if Augmentation.PIXEL_SHIFT in self.augmentations:
shift = [np.random.randint(0, 2*i+1) for i in self.padding]
label['x'] = label['x'] - shift[0] + self.padding[0]
label['y'] = label['y'] - shift[1] + self.padding[1]
image = image[:,shift[0]:shift[0]+self.out_size[0],shift[1]:shift[1]+self.out_size[1]]
if Augmentation.NOISE_GAUSSIAN in self.augmentations:
noise_sig = self.augmentations[Augmentation.NOISE_GAUSSIAN] * (image.max() - image.min())
image = np.random.normal(image, noise_sig).astype(np.float32)
return image, label
def to(self, device):
self.images = torch.as_tensor(self.images, device=device)
class SingleImageDataset(SimulatedImageDataset):
"""
Repeatedly sample a single image.
"""
def __init__(self, data, out_size=(64, 64), length=16, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
conv_kernel = None,
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
default_image_params = {
'A': [0.5, 2.0],
'bg': [0, 10],
'x': [-5, 5],
'y': [-5, 5],
# 'conv':np.ones((3,3)),
}
_image_params = dict(default_image_params, **image_params)
_image_params['data'] = data
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
conv_kernel=conv_kernel,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, image_params):
data = image_params['data']
# add padding larger than shifts
shift_max = [np.ceil(np.max([np.abs(shifts[:,i].min()), shifts[:,i].max()])).astype(int) for i in range(len(shifts.shape))]
crop_size = [size[i] + 2*shift_max[i] for i in range(len(data.shape))]
data = data[:crop_size[0],:crop_size[1]]
# zero padding for fft
padding = [(int(np.ceil(1.5 * data.shape[0])),)*2, (int(np.ceil(1.5 * data.shape[1])),)*2]
data = np.pad(data, padding, mode='wrap')
kx = np.fft.fftshift(np.fft.fftfreq(data.shape[0]))
ky = np.fft.fftshift(np.fft.fftfreq(data.shape[1]))
self.KX, self.KY = np.meshgrid(kx, ky, indexing='ij')
fft_image = np.fft.fftshift(np.fft.fft2(data))
fft_image_mag = np.abs(fft_image)
fft_image_phase = np.angle(fft_image)
# helps remove ringing artifacts
fft_image_mag = fft_image_mag * signal.windows.tukey(fft_image_mag.shape[0], alpha=0.5)[:,None]
fft_image_mag = fft_image_mag * signal.windows.tukey(fft_image_mag.shape[1], alpha=0.5)[None,:]
# x, y shift
fft_image_phase = fft_image_phase - 2 * np.pi * (self.KX[None,...] * shifts[:,0,None,None])
fft_image_phase = fft_image_phase - 2 * np.pi * (self.KY[None,...] * shifts[:,1,None,None])
shifted_fft = fft_image_mag * np.exp(1j * fft_image_phase)
shifted_img = np.fft.ifft2(np.fft.ifftshift(shifted_fft))
crop = np.concatenate([shift_max[i] + padding[i] for i in range(len(data.shape))])
shifted_img = shifted_img[:, crop[0]:-crop[1], crop[2]:-crop[3]]
return np.abs(shifted_img)
class SimulatedPSFDataset(SimulatedImageDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
default_image_params = {
'A': [500, 2000],
'bg': [0, 100],
'x': [-0.35*out_size[0], 0.35*out_size[0]],
'y': [-0.35*out_size[1], 0.35*out_size[1]],
}
_image_params = dict(default_image_params, **image_params)
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, image_params):
raise NotImplementedError()
class Gaussian2DPSFDataset(SimulatedPSFDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
psf_params={},
noise_params={'poisson':True, 'gaussian':100},
normalize=False, augmentations={},
image_params_preset={}):
default_image_params = {
'sig_x':[5, 5],
'sig_y':[5, 5],
}
_image_params = dict(default_image_params, **psf_params)
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, psf_params):
xs = np.arange(0, size[0]) - 0.5*(size[0]-1)
ys = np.arange(0, size[1]) - 0.5*(size[1]-1)
XS, YS = np.meshgrid(xs, ys, indexing='ij')
self.params['sig_x'] = np.random.uniform(*psf_params['sig_x'], length).astype(np.float32)
self.params['sig_y'] = np.random.uniform(*psf_params['sig_y'], length).astype(np.float32)
ret = np.exp(-((XS[None,...]-shifts[:,0,None,None])**2/(2*self.params['sig_x'].reshape(-1,1,1)) \
+ (YS[None,...]-shifts[:,1,None,None])**2/(2*self.params['sig_y'].reshape(-1,1,1))))
return ret
class FourierOpticsPSFDataset(SimulatedPSFDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
psf_params={}, psf_zerns={},
noise_params={'poisson':True, 'gaussian':100},
normalize=False, augmentations={},
image_params_preset={}):
default_psf_params = {
'apod':False,
'pupil_scale':0.75,
}
_psf_params = dict(default_psf_params, **psf_params)
_psf_params["psf_zerns"] = psf_zerns
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_psf_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, psf_params):
pupil_padding_factor = 4
pupil_padding_clip = 0.5 * (pupil_padding_factor - 1)
pupil_padding = int(pupil_padding_clip*size[0]), int(-pupil_padding_clip*size[0]), int(pupil_padding_clip*size[1]), int(-pupil_padding_clip*size[1])
kx = np.fft.fftshift(np.fft.fftfreq(pupil_padding_factor*size[0]))
ky = np.fft.fftshift(np.fft.fftfreq(pupil_padding_factor*size[1]))
self.KX, self.KY = np.meshgrid(kx, ky, indexing='ij')
us = np.linspace(-1, 1, pupil_padding_factor*size[0]) * (pupil_padding_factor*size[0]-1) / (size[0]-1) / psf_params.get('pupil_scale', 0.75)
vs = np.linspace(-1, 1, pupil_padding_factor*size[1]) * (pupil_padding_factor*size[0]-1) / (size[0]-1) / psf_params.get('pupil_scale', 0.75)
US, VS = np.meshgrid(us, vs, indexing='ij')
R = np.sqrt(US**2 + VS**2)
if psf_params.get('apod', False):
pupil_mag = np.sqrt(1-np.minimum(R, 1)**2)
else:
pupil_mag = (R <= 1).astype(np.float)
pupil_phase = zernike.calculate_pupil_phase(R*(R<=1), np.arctan2(US, VS), psf_params.get("psf_zerns", {}))
self.pupil = pupil_mag * np.exp(1j*pupil_phase)
self.pupil = self.pupil[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
self.pupil_suppl = {"radial_distance": (R*(R<=1))[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]],
"azimuthal_angle": np.arctan2(US, VS)[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]}
shifted_pupil_phase = np.tile(pupil_phase, (shifts.shape[0], 1, 1))
shifted_pupil_phase = shifted_pupil_phase - 2 * np.pi * (self.KX[None,...] * shifts[:,0,None,None])
shifted_pupil_phase = shifted_pupil_phase - 2 * np.pi * (self.KY[None,...] * shifts[:,1,None,None])
shifted_pupil_phase = shifted_pupil_phase + np.sqrt(1-np.minimum(R, 1)**2) * shifts[:,2,None,None]
shifted_pupils = pupil_mag[None,...]*np.exp(1j*shifted_pupil_phase)
psfs = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(shifted_pupils)))
psfs = psfs[:, pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
psfs = np.abs(psfs)**2
ref_psf = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(np.pad(self.pupil, ((pupil_padding[0], -pupil_padding[1]), (pupil_padding[2], -pupil_padding[3]))))))
ref_psf = ref_psf[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
ref_psf = np.abs(ref_psf)**2
psfs /= ref_psf.max()
return psfs
class FileDataset(BaseDataset):
def __init__(self, file_path,
transform=None,
image_slice=slice(None),
length=None,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__()
self.file = self.load_file(file_path,
file_loader=file_loader,
slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
if length is None:
self.length = len(self.file)
else:
self.length = length
self.transform = transform
self.image_slice = np.arange(len(self.file), dtype=np.int32)[image_slice]
def load_file(self, file_path, file_loader, slices, stack_to_volume, cache):
file_loaded = file_loader(file_path, slices=slices,
stack_to_volume=stack_to_volume, cache=cache)
print(", ".join(["{}: {}".format(key, val) for key, val in
{"filepath":file_loaded.file_path,
"frames":len(file_loaded),
"image shape":file_loaded[0].shape}.items()]))
return file_loaded
def __len__(self):
return self.length
def __getitem__(self, key):
file_id = np.random.choice(self.image_slice)
img = torch.as_tensor(self.file[file_id])
if not self.transform is None:
img = self.transform(img)
return img, {'id': key}
class ResamplingFileDataset(FileDataset):
# overlap with SingleImageDataset?
def __init__(self, file_path, out_size=(64, 64, 64),
length=16,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__(file_path=file_path, length=length,
file_loader=file_loader,
slices=slices, stack_to_volume=stack_to_volume,
cache=cache)
self.in_size = self.file[0][0].shape
self.out_size = [min(out_size[dim], self.in_size[dim]) for dim in range(len(out_size))]
if (self.out_size < list(out_size)):
print("out_size {} clipped to {}".format(out_size, self.out_size))
print(self.in_size, self.out_size)
def __getitem__(self, key):
file_id = np.random.randint(0, len(self.file), dtype=np.int32)
shifts = np.asarray([np.random.randint(0, self.in_size[dim] - self.out_size[dim] + 1) for dim in range(len(self.in_size))])
labels = {'id':file_id, }
labels.update({"slice_{}".format(['x','y','z'][i]): shift for i, shift in enumerate(shifts)})
slicing = np.stack([shifts, shifts + self.out_size], -1)
slicing = tuple([slice(None),] + [slice(a, b) for (a, b) in slicing])
return self.file[file_id][slicing], labels
class FilePairsDataset(FileDataset):
def __init__(self, file_path, target_file_path,
transform=None, target_transform=None,
image_slice=slice(None),
length=16,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__(file_path=file_path,
file_loader=file_loader, slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
self.target_is_image = True
self.target_file = self.load_file(target_file_path,
file_loader=file_loader,
slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
self.transform = transform
self.target_transform = target_transform
self.image_slice = np.arange(len(self.file), dtype=np.int32)[image_slice]
def __getitem__(self, key):
file_id = np.random.choice(self.image_slice)
img = torch.as_tensor(self.file[file_id])
target = torch.as_tensor(self.target_file[file_id])
seed = np.random.randint(2147483648)
if not self.transform is None:
torch.manual_seed(seed)
img = self.transform(img)
if not self.target_transform is None:
torch.manual_seed(seed)
target = self.transform(target)
return img, target
def inspect_images(dataset, indices=None):
if indices is None:
indices = np.random.choice(len(dataset), min(8, len(dataset)), replace=False)
images, labels = zip(*[(dataset[i][0].detach().cpu().numpy() if torch.is_tensor(dataset[i][0]) else dataset[i][0], dataset[i][1]) for i in indices])
tiled_images, n_col, n_row = util.tile_images(util.reduce_images_dim(np.stack(images, axis=0)), full_output=True)
fig, axes = plt.subplots(2, 1, figsize=(4*n_col, 3*n_row*2))
im = axes[0].imshow(tiled_images)
plt.colorbar(im, ax=axes[0])
im = axes[1].imshow(np.log(tiled_images))
plt.colorbar(im, ax=axes[1])
axes_to_label = [axes,]
if dataset.target_is_image is True:
tiled_images, n_col, n_row = util.tile_images(util.reduce_images_dim(np.stack(labels, axis=0)), full_output=True)
fig, axes = plt.subplots(2, 1, figsize=(4*n_col, 3*n_row*2))
im = axes[0].imshow(tiled_images)
plt.colorbar(im, ax=axes[0])
im = axes[1].imshow(np.log(tiled_images))
plt.colorbar(im, ax=axes[1])
axes_to_label.append(axes)
for i, id in enumerate(indices):
label = "{}:\t".format(id)
if dataset.target_is_image is False:
for key, val in labels[i].items():
label += " [{} =".format(key)
for datum in np.atleast_1d(val.squeeze()):
label += " {:.3f},".format(datum)
label += "],"
print(label)
for axes in axes_to_label:
for j in range(2):
axes[j].text(i%n_col / n_col, i//n_col / n_row,
# label,
id,
bbox={'facecolor':'white', 'alpha':1},
ha='left', va='bottom',
fontsize='medium',
transform=axes[j].transAxes)
if hasattr(dataset, 'params'):
fig, axes = plt.subplots(1, len(dataset.params), figsize=(4*len(dataset.params), 3))
for i, (key, val) in enumerate(dataset.params.items()):
axes[i].hist(val.flatten(), bins=20)
axes[i].set_xlabel(key)
if hasattr(dataset, 'pupil'):
fig, axes = plt.subplots(1, 3, figsize=(4*2 + 8, 3), gridspec_kw={'width_ratios': [1,1,3]})
pupil_magnitude = np.abs(dataset.pupil)
pupil_magnitude_colored, norm, cmap = util.color_images(pupil_magnitude, full_output=True)
im = axes[0].imshow(pupil_magnitude_colored)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=axes[0])
axes[0].set_title('pupil mag')
pupil_phase = restoration.unwrap_phase(np.ma.array(np.angle(dataset.pupil), mask=np.abs(dataset.pupil)<=0))
pupil_phase_colored, norm, cmap = util.color_images(pupil_phase, vsym=True, full_output=True)
im = axes[1].imshow(pupil_phase_colored)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=axes[1])
axes[1].set_title('pupil phase')
zernike_coeffs = zernike.fit_zernike_from_pupil(dataset.pupil, 16, dataset.pupil_suppl["radial_distance"], dataset.pupil_suppl["azimuthal_angle"])
zernike.plot_zernike_coeffs(axes[2], zernike_coeffs)
fig.tight_layout() | kkhchung/smlm-dl | smlm_dl/dataset.py | dataset.py | py | 22,899 | python | en | code | 0 | github-code | 36 |
5919442650 | #!/usr/bin/env python3
heatmap_skeleton = '''$(function () {
$('#container').highcharts({
chart: {
type: 'heatmap',
marginTop: 40,
marginBottom: 40
},
title: {
text: null
},
xAxis: {
categories: [%s],
title: 'k'
},
yAxis: {
categories: [%s],
title: 'm'
},
colorAxis: {
min: 0,
minColor: '#FFFFFF',
maxColor: Highcharts.getOptions().colors[1]
},
legend: {
align: 'right',
layout: 'vertical',
margin: 0,
verticalAlign: 'top',
y: 25,
symbolHeight: 320
},
series: [{
name: null,
borderWidth: 1,
data: [%s],
dataLabels: {
enabled: false,
color: 'black',
style: {
textShadow: 'none',
HcTextStroke: null
}
}
}]
});
});'''
heatmap_skeleton = ' '.join(s.strip() for s in heatmap_skeleton.split('\n'))
if __name__ == '__main__':
import sys
import re
filename_reg = re.compile(r'^(time|count)-(.+)-(\d+)-(\d+)-(\d+)$')
data = {'time': {}, 'count': {}}
for line in sys.stdin.readlines():
line = line.strip()
match = filename_reg.match(line)
if match:
type_ = match.group(1)
name = match.group(2)
k = int(match.group(3))
m = int(match.group(4))
n = int(match.group(5))
if type_ == 'count' and k <= 3:
continue
try:
ls = list(map(float, open(match.group(0)).readlines()))
except ValueError:
print('File %s is malformated' % match.group(0), file=sys.stderr)
continue
if not ls:
continue
avg = sum(ls) / len(ls)
data[type_].setdefault(name, {})
data[type_][name][k, m, n] = avg
for type_, element in data.items():
for name, table in element.items():
kCategories = set()
mCategories = set()
for (k, m, n), avg in table.items():
kCategories.add(k)
mCategories.add(m)
kCategories = sorted(kCategories)
mCategories = sorted(mCategories)
serie = list()
for (k, m, n), avg in table.items():
serie.append((kCategories.index(k), mCategories.index(m), avg))
strKCategories = ', '.join(map(str, kCategories))
strMCategories = ', '.join(map(str, mCategories))
strSerie = ', '.join('[%s, %s, %s]' % e for e in serie)
print(type_, name)
print(heatmap_skeleton % (strKCategories, strMCategories, strSerie))
| TurpIF/tp-markov-chain | filenames2heatmap.py | filenames2heatmap.py | py | 2,949 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.