seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
6747544921 | from sender import *
phone_number = str(input("Enter Your Phone Number(Without First Zero), eg: 9123456789 : "))
number_of_sms = int(input("Number Of SMS, eg: 20 : "))
sent_sms = 0
end = False
while number_of_sms:
for i in range(len(All)):
resp = All[i][1].send(phone_number)
if resp["code"] == 200:
sent_sms += 1
else:
print(All[i][0] + " : " + resp["message"])
print(resp)
print("========================")
if sent_sms == number_of_sms:
print(str(number_of_sms) + " SMS sent successfully")
end = True
exit(0)
| amirmnoohi/SMS-bomber | main.py | main.py | py | 632 | python | en | code | 0 | github-code | 13 |
30700095884 | import numpy as np
import astropy
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
from astropy.nddata import CCDData
import ccdproc
import astropy.units as u
from astropy.modeling import models
from ccdproc import Combiner
import m2fs_process as m2fs
import dill as pickle
directory='/nfs/nas-0-9/mgwalker.proj/m2fs/'
m2fsrun='nov18'
#m2fsrun='jul15'
datadir='/nfs/nas-0-9/mgwalker.proj/m2fs/m2fs.astro.lsa.umich.edu/data/NovDec2018/'
#datadir='/nfs/nas-0-9/mgwalker.proj/m2fs/m2fs.astro.lsa.umich.edu/data/Jul2015/'
threshold_factor=25.#multiple of continuum residual rms threshold to impose for aperture detection in find_lines_derivative
n_lines=20#columns to combine when scanning across rows to identify apertures (as 'emission lines')
continuum_rejection_iterations=10#number of iterations of outlier rejection for fitting "continuum"
window=10#pixels, width of aperture window for fitting (gaussian) aperture profiles (perpendicular to spectral axis)
trace_step=n_lines#tracing step
utdate=[]
file1=[]
file2=[]
flatfile=[]
with open(directory+m2fsrun+'_science_raw') as f:
data=f.readlines()[0:]
for line in data:
p=line.split()
if p[0]!='none':
utdate.append(str(p[0]))
file1.append(int(p[1]))
file2.append(int(p[2]))
flatfile.append(int(p[3]))
utdate=np.array(utdate)
file1=np.array(file1)
file2=np.array(file2)
flatfile=np.array(flatfile)
for i in range(0,len(utdate)):
for ccd in ('b','r'):
root=datadir+utdate[i]+'/'+ccd+str(flatfile[i]).zfill(4)
data=astropy.nddata.CCDData.read(root+'_stitched.fits')
out=root+'_columnspec_array.pickle'
columnspec_array=m2fs.get_columnspec(data,trace_step,n_lines,continuum_rejection_iterations,threshold_factor,window)
pickle.dump(columnspec_array,open(out,'wb'))
print('writing '+out)
'''
columnspec_array is array of columnspec objects.
columnspec object contains:
columns: numbers of columns in original data frame that are stacked
spec1d: 'spectrum' across stacked column, where each aperture appears as an 'emission line'
pixel: value of pixel across stacked column 'spectrum', artificially given units of AA in order to comply with specutils requirements for fitting spectra
continuum: parameters of 1dGaussian continuum fit to stacked column 'spectrum'
rms: rms residuals around continuum fit (using only regions between fiber bundles)
apertures_initial: initial aperture centers returned by specutils find_lines_derivative
apertures_profile: contains parameters of fits to apertures detected in apertures_initial
'''
| mgwalkergit/spec | m2fs_apertures_initialize.py | m2fs_apertures_initialize.py | py | 2,693 | python | en | code | 0 | github-code | 13 |
14277672686 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
# Contact for more information about the Addon:
# Email: germano.costa@ig.com.br
# Twitter: wii_mano @mano_wii
# import bgl
import gpu
import numpy as np
from mathutils.geometry import interpolate_bezier
from mathutils import Matrix
import bmesh
import time
# Mesh / curve
TYP_OBJ = 1
TYP_ORIGIN = 2
TYP_BOUNDS = 4
TYP_INSTANCE = 8
USE_PRIMITIVE_BUFFER = True
vert_3d = '''
uniform mat4 MVP;
in vec3 pos;
in float primitive_id;
out float primitive_id_var;
void main()
{
primitive_id_var = primitive_id;
gl_Position = MVP * vec4(pos, 1.0);
}
'''
primitive_id_frag = '''
uniform float offset;
in float primitive_id_var;
out vec4 FragColor;
vec4 cast_to_4_bytes(float f){
vec4 color;
color.a = float(int(f)%256);
color.b = float(int(f/256)%256);
color.g = float(int(f/65536)%256);
color.r = float(int(f/16581376)%256);
return color / 255.0;
}
void main()
{
FragColor = cast_to_4_bytes(offset + primitive_id_var);
}
'''
gl_version = '''
#version 330
'''
def interp_bezier(p0, p1, segs, resolution):
"""
Bezier curve approximation
"""
if (resolution == 0 or
(p0.handle_right_type == 'VECTOR' and
p1.handle_left_type == 'VECTOR')):
segs.append(p0.co[0:3])
else:
seg = interpolate_bezier(p0.co,
p0.handle_right,
p1.handle_left,
p1.co,
resolution + 1)
segs.extend([p[0:3] for p in seg[:-2]])
def get_bmesh_loose_edges(bm):
# TODO: handle "visible" state
edges_co = []
for ed in bm.edges:
if ed.is_wire:
edges_co.extend([ed.verts[0].co, ed.verts[1].co])
if len(edges_co) > 0:
return edges_co
return None
def get_bmesh_loose_verts(bm):
# TODO: handle "visible" state
pos = [v.co for v in bm.verts if v.is_wire]
if len(pos) > 0:
return pos
return None
def get_bmesh_edges(bm):
edges_co = [v.co for v in bm.verts]
edges_indexes = [[ed.verts[0].index, ed.verts[1].index] for ed in bm.edges]
if len(edges_co) > 0:
return edges_co, edges_indexes
return None, None
def get_bmesh_verts(bm):
pos = [v.co for v in bm.verts]
if len(pos) > 0:
return pos
return None
def get_bmesh_faces(bm):
# must triangulate !
faces = bm.calc_loop_triangles()
# each face is a triangle, with a ref to source face
faces_tris = {f.index: [[]] for f in bm.faces}
faces_center_normal = []
for loop in faces:
for l in loop:
i = l.face.index
if len(faces_tris[i][-1]) == 3:
faces_tris[i].append([])
if (faces_tris[i][-1]) == 0:
faces_center_normal.append(l.face.center)
faces_center_normal.append(l.face.normal)
faces_tris[i][-1].append(l.vert.index)
# same as vertex cos
faces_co = [v.co for v in bm.verts]
# array of array of tris for each face
faces_indexes = [[tri for tri in faces_tris[f.index]] for f in bm.faces]
# provide index for each triangle ?
# primitive_ids = [[f.index for tri in faces_tris[f.index]] for f in bm.faces]
# provide index for each vertex ??
# primitive_ids = [[f.index] * 3 for f in bm.faces for tri in faces_tris[f.index]]
if len(faces_co) > 0:
return faces_co, faces_indexes, faces_center_normal
return None, None, None
def get_curve_arrays(curve):
# ~0.1s / 100k pts on POLY
t = time.time()
segs = []
pos = []
k0 = 0
for i, spl in enumerate(curve.splines):
# limited support for nurbs
if spl.type in {'POLY', 'NURBS'}:
if len(spl.points) < 2:
continue
pos.extend([p.co[0:3] for p in spl.points])
elif spl.type == 'BEZIER':
pts = spl.bezier_points
# limit resolution on huge curves
if len(pts) < 2:
continue
elif len(pts) > 500:
resolution = 0
else:
resolution = curve.resolution_u
for j, p1 in enumerate(pts[1:]):
interp_bezier(pts[j], p1, pos, resolution)
if spl.use_cyclic_u:
interp_bezier(pts[-1], pts[0], pos, resolution)
else:
pos.append(pts[-1].co[0:3])
else:
# fix issue #9 Nurbs curve crash blender
continue
k1 = len(pos)
segs.extend([(j, j + 1) for j in range(k0, k1 - 1)])
if spl.use_cyclic_u:
segs.append((k1 - 1, k0))
k0 = k1
if len(segs) < 1:
return None
edges_co = []
for i, (v0, v1) in enumerate(segs):
edges_co.extend([pos[v0], pos[v1]])
return edges_co
class _Object_Arrays:
def __init__(self, obj, typ):
self.points_co = None
self.segs_co = None
self.tris_co = None
self.faces_indexes = None
self.faces_center_normal = None
self.is_mesh = False
if typ == TYP_ORIGIN:
self.points_co = obj
elif typ == TYP_BOUNDS:
self.points_co = obj
elif typ == TYP_INSTANCE:
if obj.type == "MESH":
print(obj.name, typ)
# @TODO: properly store isolated verts
if obj.mode == "EDIT":
bm = bmesh.from_edit_mesh(obj.data)
else:
bm = bmesh.new(use_operators=True)
bm.from_mesh(obj.data)
# apply both rotation and scale
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
self.segs_co = get_bmesh_edges(bm)
self.tris_co, self.faces_indexes, self.faces_center_normal = get_bmesh_faces(bm)
self.points_co = get_bmesh_loose_verts(bm)
if obj.mode != "EDIT":
bm.free()
elif obj.type == "CURVE":
self.segs_co = get_curve_arrays(obj.data)
elif obj.type == 'CURVE':
self.segs_co = get_curve_arrays(obj.data)
elif obj.type == 'MESH':
# print(obj.name, typ)
# @TODO: properly store isolated verts
if obj.mode == "EDIT":
bm = bmesh.from_edit_mesh(obj.data)
else:
bm = bmesh.new(use_operators=True)
bm.from_mesh(obj.data)
# apply both rotation and scale
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
self.points_co = get_bmesh_loose_verts(bm)
self.segs_co = get_bmesh_loose_edges(bm)
if obj.mode != "EDIT":
bm.free()
def __del__(self):
del self.segs_co
del self.points_co
del self.tris_co
class SnapBuffer:
def __init__(self, num, vbo, ebo, batch):
self.num = num
self.vbo = vbo
self.ebo = ebo
self.batch = batch
def __del__(self):
del self.batch
del self.ebo
del self.vbo
class GPU_Indices:
shader = gpu.types.GPUShader(vert_3d, primitive_id_frag)
fmt = gpu.types.GPUVertFormat()
fmt.attr_add(id="pos", comp_type='F32', len=3, fetch_mode='FLOAT')
if USE_PRIMITIVE_BUFFER:
fmt.attr_add(id="primitive_id", comp_type='F32', len=1, fetch_mode='FLOAT')
P = Matrix()
def create_buffers(self, co, batch_type, indices=None):
_len = len(co)
# print("create_buffers",_len, self.fmt, batch_type, co, indices)
_indices = indices
if _indices is None:
if batch_type == 'POINTS':
_indices = np.arange(_len, dtype='i')
elif batch_type == 'LINES':
_indices = [(i, i + 1) for i in range(0, _len, 2)]
else:
_indices = [(i, i + 1, i + 2) for i in range(0, _len, 3)]
vbo = gpu.types.GPUVertBuf(len=_len, format=self.fmt)
vbo.attr_fill(id="pos", data=co)
n_indices = len(_indices)
if USE_PRIMITIVE_BUFFER:
if batch_type == 'POINTS':
primitive_id = np.arange(_len, dtype='f4')
elif batch_type == 'LINES':
primitive_id = np.repeat(np.arange(n_indices, dtype='f4'), 2)
else:
primitive_id = np.repeat(np.arange(n_indices, dtype='f4'), 3)
vbo.attr_fill(id="primitive_id", data=primitive_id)
del primitive_id
ebo = gpu.types.GPUIndexBuf(type=batch_type, seq=_indices)
batch = gpu.types.GPUBatch(type=batch_type, buf=vbo, elem=ebo)
del indices
del _indices
return SnapBuffer(n_indices, vbo, ebo, batch)
def __init__(self, obj, typ):
self.MVP = Matrix()
self.first_index = 0
self.segs = None
self.tris = None
self.points = None
self.typ = typ
self._draw_points = False
self._draw_segs = False
self._draw_tris = False
_arrays = _Object_Arrays(obj, typ)
if _arrays.segs_co:
self.segs = self.create_buffers(_arrays.segs_co, "LINES")
if _arrays.points_co:
self.points = self.create_buffers(_arrays.points_co, "POINTS")
if _arrays.tris_co:
self.tris = self.create_buffers(_arrays.tris_co, "TRIS", indices=_arrays.faces_indexes)
self._arrays = _arrays
def get_tot_elems(self):
tot = 0
if self.draw_segs:
tot += self.segs.num
if self.draw_points:
tot += self.points.num
if self.draw_tris:
tot += self.tris.num
return tot
@property
def draw_segs(self):
return self._draw_segs and self.segs is not None
@property
def draw_points(self):
return self._draw_points and self.points is not None
@property
def draw_tris(self):
return self._draw_tris and self.tris is not None
def set_draw_mode(self, draw_origins, draw_bounds, draw_segs, draw_tris):
# Knots / segs / isolated edges
self._draw_segs = draw_segs
# origin / isolated verts / bounds / cursor
self._draw_points = (
((self.typ & (TYP_OBJ | TYP_INSTANCE)) > 0 and draw_segs) or
(self.typ == TYP_BOUNDS and draw_bounds) or
(self.typ == TYP_ORIGIN and draw_origins)
)
# snap to faces of instances / ghost
self._draw_tris = draw_tris
@classmethod
def set_ProjectionMatrix(cls, P):
cls.P = P
def set_ModelViewMatrix(self, MV):
self.MVP = self.P @ MV
def draw(self, index_offset):
self.first_index = index_offset
self.shader.bind()
self.shader.uniform_float("MVP", self.MVP)
gpu.state.depth_mask_set(False)
gpu.state.blend_set('NONE')
gpu.state.line_width_set(1.0)
if self.draw_segs:
self.shader.uniform_float("offset", float(index_offset))
self.segs.batch.draw(self.shader)
# if USE_PRIMITIVE_BUFFER:
# index_offset += int(self.segs.num / 2)
# else:
index_offset += self.segs.num
if self.draw_points:
self.shader.uniform_float("offset", float(index_offset))
self.points.batch.draw(self.shader)
index_offset += self.points.num
if self.draw_tris:
self.shader.uniform_float("offset", float(index_offset))
self.tris.batch.draw(self.shader)
# if USE_PRIMITIVE_BUFFER:
# index_offset += int(self.tris.num / 3)
# else:
index_offset += self.tris.num
def get_seg_co(self, index):
# if USE_PRIMITIVE_BUFFER:
i = index * 2
# else:
# i = 2 * int(index / 2)
if index + 2 > self.segs.num:
print("index", i, ">", self.segs.num)
i = 2 * self.segs.num - 2
return self._arrays.segs_co[i:i + 2]
def get_points_co(self, index):
return self._arrays.points_co[index]
def get_tris_co(self, index):
# if USE_PRIMITIVE_BUFFER:
i = index * 3
# else:
# i = 3 * int(index / 3)
if index + 3 > self.tris.num:
print("index", i, ">", self.tris.num)
i = self.tris.num - 3
return self._arrays.tris_co[i:i + 3]
def get_faces_center_normal(self, index):
if USE_PRIMITIVE_BUFFER:
i = index * 2
else:
i = 2 * int(index / 2)
return self._arrays.faces_center_normal[i: i + 2]
def __del__(self):
del self._arrays
pass
| Tilapiatsu/blender-custom_config | scripts/addon_library/local/slcad_transform/snap_context/drawing.py | drawing.py | py | 13,581 | python | en | code | 5 | github-code | 13 |
72523601297 | ##
## A keyboard layout analyzer , give it a file with all the keys and their relative pos
## The script will calculate how much you need to move your fingers in order to write words
##
# IMPORTS AND FILE READING #
import sys
keylayfile = sys.argv[1]
testwordfile = sys.argv[2]
testWords = open(testwordfile)
klay = open(keylayfile)
# VARIABLES #
nRightSide = 0
nWords = 0
# Function for calculating the steps your finger have to move to get to a certain key
# Sums this up for every key and returns it.
def distancetravelled(char, keyMatrix):
global nRightSide, nWords
poschar = []
defaultkeys = [0, 1, 2, 3, 6, 7, 8]
sumofstrokes = 10
rightside = 0
for i, j in enumerate(keyMatrix):
for k, l in enumerate(j):
if l == char:
poschar = [i, k]
for d in defaultkeys:
if sumofstrokes > (abs(1 - poschar[0]) + abs(d - poschar[1])):
sumofstrokes = (abs(1 - poschar[0]) + abs(d - poschar[1]))
rightside = d
if rightside > 3:
nRightSide +=1
nWords +=1
return sumofstrokes
# Function generates the matrix of the keyboard layout
def run():
totalstrokes = 0
with klay as f:
keyMatrix = [[c for c in line] for line in f]
with testWords as f:
word = [c for c in testWords]
for i, j in enumerate(word):
for k, l in enumerate(j):
totalstrokes = totalstrokes + distancetravelled(l, keyMatrix)
print("total times fingers moved: " + str(totalstrokes))
print("left/right side heavy (%): " + str((nRightSide/nWords)))
run() | KarlssonLucas/keyboard-analyzer | KeyboardAnalyzer.py | KeyboardAnalyzer.py | py | 1,605 | python | en | code | 0 | github-code | 13 |
10952536972 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 17:45:57 2020
@author: lenovo
"""
from sparql import get_dataframe
from preprocessing import find_combinations,delete_null,sort_properties,correlation
import pandas as pd
import configparser
import os
CONFIG_FILE = "config.cfg"
if os.path.exists(os.path.join( os.getcwd(),CONFIG_FILE ) ):
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
KG = config.get("main", "KG")
endpoint_url = config.get(KG, "endpoint_url")
class_name = config.get(KG, "class_name")
delete_ratio = config.get(KG, "delete_ratio")
num_keywords = config.get(KG, "num_keywords")
class_properties_query = config.get(KG, "class_properties_query")
values_query1 = config.get(KG, "values_query1")
values_query2 = config.get(KG, "values_query2")
properties_label,properties,data_final = get_dataframe(endpoint_url,class_name,class_properties_query,values_query1,values_query2)
print("step 1 done!")
data_final,nan_list,properties = delete_null(data_final,properties,delete_ratio)
print("step 2 done!")
properties = sort_properties(properties,nan_list)
print("step 3 done!")
#corr_scores, properties = correlation(properties,num_keywords)
#print(corr_scores)
print("step 4 done!")
com_list = find_combinations(properties)
print("step 5 done!")
print("-------------------------------------------------")
result = [[],[]]
for col_1 in com_list:
data_test = data_final[col_1]
count = 0
for i in data_test:
count = count + 1
if(count == 1):
flag = i
if(count>1):
data_test[flag] = data_test[flag].str.cat(data_test[i]) # Merge into string
uniq = data_test[flag].nunique()
result[0].append(str(col_1))
result[1].append(str(uniq))
result_df = pd.DataFrame(result)
result_df = result_df.T
columns = ["properties","uniq"]
result_df.columns=columns
result_df.to_csv(str(class_name) +"_result.csv") | RohanYim/Entity-Reconciliation | main.py | main.py | py | 1,998 | python | en | code | 0 | github-code | 13 |
21293596179 | inherited_money = float(input())
final_year = int(input())
needed_money = 0
for year in range(1800, final_year + 1):
if year % 2 == 0:
needed_money += 12000
else:
needed_money += 12000 + 50 * (year - 1800 + 18)
if needed_money <= inherited_money:
left_money = inherited_money - needed_money
print(f"Yes! He will live a carefree life and will have {left_money:.2f} dollars left.")
else:
not_enought_money = needed_money - inherited_money
print(f"He will need {not_enought_money:.2f} dollars to survive.")
| SJeliazkova/SoftUni | Programming-Basic-Python/Exercises-and-Labs/For_Loop_More Exercises/01.Back_To_The_Past.py | 01.Back_To_The_Past.py | py | 550 | python | en | code | 0 | github-code | 13 |
8984328448 | import telebot
import os
from environs import Env
from datetime import datetime
import aio_parser as hh
from Task import Task
import markups
# read envoirments variables
env = Env()
env.read_env()
# setup main variables
bot = telebot.TeleBot(env('TOKEN'))
task = Task()
# handlers
@bot.message_handler(commands=['start'])
def welcome_message(message):
if not task.isRunning:
msg = bot.reply_to(
message, f'Привет. Я hhkeys_bot. Приятно познакомиться, {message.from_user.first_name}. Для начала работы отправьте ключевое слово для желаемой профессии, чтобы узнать какие ключевые навыки хотят видеть работодатели. Например: python, back-end, маникюр или грузчик :)', )
bot.register_next_step_handler(msg, askSkill)
task.isRunning = True
remove_png()
def askSkill(message):
task.keySkill = message.text
msg = bot.reply_to(message, 'Какое количество страниц поиска по ключевому слову Вы желаете охватить? (одна страница - 50 вакансий). Максимум 40 страниц, но это долго. Скорее всего, для понимания всей картины, Вам хватит 3-4 страницы')
bot.register_next_step_handler(msg, askPages)
def askPages(message):
if not message.text.isdigit():
msg = bot.reply_to(
message, 'Количество страниц должно быть числом, введите снова')
bot.register_next_step_handler(msg, askPages)
return
elif int(message.text) < 1:
msg = bot.reply_to(
message, 'Количество страниц должно быть числом больше 0 :)')
bot.register_next_step_handler(msg, askPages)
return
task.numberOfSearchPages = int(message.text)
msg = bot.reply_to(
message, 'Топ-N. Сколько самых популярных ключевых навыков Вы хотите увидеть в результате?')
bot.register_next_step_handler(msg, askTopN)
def askTopN(message):
if not message.text.isdigit():
msg = bot.reply_to(
message, 'Количество ключевых навыков должно быть числом, введите снова')
bot.register_next_step_handler(msg, askTopN)
return
elif int(message.text) < 1:
msg = bot.reply_to(
message, 'Количество ключевых навыков должно быть числом больше 0 :)')
bot.register_next_step_handler(msg, askTopN)
return
bot.send_chat_action(message.chat.id, 'upload_photo')
task.topN = int(message.text)
start = datetime.now()
links = hh.LinkCollector(
task.keySkill,
task.numberOfSearchPages,
task.topN).get_links()
scraper = hh.WebScraper(links, task.keySkill)
skills = scraper.return_skills_list()
skills_dataframe = hh.make_results(skills, task.topN)
make_photo = hh.plot(skills_dataframe)
photo = open(make_photo, 'rb')
bot.send_photo(message.chat.id, photo,
reply_markup=markups.start_markup)
end = datetime.now()
total = end - start
task.isRunning = False
bot.send_message(
message.chat.id, f'Задача выполнена за {total}. Чтобы начать заново, нажмите "/start"')
def remove_png():
for file in os.listdir(path='.'):
if file.endswith('.png'):
os.remove(os.path.join('.', file))
bot.polling(none_stop=True)
| eddie-nero/hhkeys | bot.py | bot.py | py | 3,798 | python | ru | code | 0 | github-code | 13 |
33454556442 | import random
class Grafo:
# Construtor que recebe o número de vértices e inicia o vetor de adjacentes
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
self.estado = ['S' for i in range(V)]
pacienteZero = random.randint(0, V - 1)
self.estado[pacienteZero] = 'I'
self.c = 0.7
self.r = 0.3
self.passos = 0
self.saida_passos = []
def step_count(self):
saida = [0,0,0] # [S, I, R]
for i in self.estado:
if i == 'S':
saida[0] += 1
elif i == 'I':
saida[1] += 1
else:
saida[2] += 1
self.saida_passos.append(saida)
# Função que recebe dois vértices e cria uma aresta entre eles, adicionando-os em seus vértices de adjacentes
def add_edge(self, u, v):
self.adj[u].append(v)
self.adj[v].append(u)
# Função que exibe na tela cada vértice e sua lista de adjacência
def print_adj_list (self):
for node in range(self.V):
print(node, '->', self.adj[node])
# Função que exibe na tela a quantidade de vértices em cada lista de adjacência
def quantidadeNos(self):
quantidade = []
for node in self.nodes:
quantidade.append(len(self.adj[node]))
return quantidade
# Função que realiza a Depth-first search
def DFSUtil(self, temp, v, visited):
self.step_count()
# Marca o vértice passado como visitado
visited[v] = True
x = random.uniform(0, 1)
# Se x for menor ou igual a probabilidade de recuperação
if x <= self.r and self.estado[v] == 'I':
self.estado[v] = 'R'
if v < self.V - 1:
self.DFSUtil(temp, (v + 1), visited)
y = random.uniform(0, 1)
# Se y for menor ou igual a probabilidade de contágio
if y <= self.c and self.estado[v] == 'S':
self.estado[v] = 'I'
# Salva o vértice na lista
temp.append(v)
#gv (Guarda Visitados): Guarda o caminho feito
gv = []
gv.append(v)
# Repete o processo para todos os vértices adjacentes
while (len(gv) != 0):
for i in self.adj[gv[len(gv)-1]]:
if visited[i] == False:
gv.append(i)
visited[i] = True
x = random.uniform(0, 1)
# Se x for menor ou igual a probabilidade de recuperação
if x <= self.r and self.estado[i] == 'I':
self.estado[i] = 'R'
if i < self.V - 1:
self.DFSUtil(temp, (i + 1), visited)
y = random.uniform(0, 1)
# Se y for menor ou igual a probabilidade de contágio
if y <= self.c and self.estado[v] == 'S':
self.estado[i] = 'I'
temp.append(i)
del(gv[len(gv)-1])
return temp
# Função que Atualiza os componentes conexos do grafo
def SIR(self):
# Vetor de vértices visitados e de componentes conexos respectivamente
visited = []
# Inicializa o vetor de visitados com todos os valores igual a false
for i in range(self.V):
visited.append(False)
# Para cada vértice não visitado, chama a função de Depth-first search
for v in range(self.V):
if visited[v] == False:
temp = []
self.DFSUtil(temp, v, visited) | matheus-reyes/AEDIIGrafos | EP5/código/Grafo.py | Grafo.py | py | 3,664 | python | pt | code | 3 | github-code | 13 |
2212925840 | # Two opposite co -ordiantes of rectangle are given and check whether two triangle overlap each other or not.
"""Input:
L1=(0,2)
R1=(1,1)
L2=(-2,-3)
R2=(0,2)
Output:
0
"""
def is_overlap(l1, r1 , l2, r2):
if l1[0] > r2[0] or r1[0] < l2[0]:
return 0
if l1[1] < r2[1] or r1[1] > l2[1]:
return 0
return 1
if __name__ == "__main__":
ordinates = list(map(int,input().split()))
l1 = [ordinates[0],ordinates[1]]
r1 = [ordinates[2],ordinates[3]]
l2 = [ordinates[4],ordinates[5]]
r2 = [ordinates[6],ordinates[7]]
res = is_overlap(l1, r1, l2, r2)
print(res)
| 1809mayur/6Companies30DayChallenge | goldman/2.py | 2.py | py | 649 | python | en | code | 0 | github-code | 13 |
21410990578 | from collections import OrderedDict
class LRUCache:
def __init__(self, capacity: int):
"""Solution 1
[MEMO] Directly use OrderedDict move_to_end and popitem
"""
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key: int) -> int:
if key not in self.cache:
return -1
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key: int, value: int) -> None:
if key in self.cache:
self.cache[key] = value
self.cache.move_to_end(key)
return
if len(self.cache) == self.capacity:
self.cache.popitem(False)
self.cache[key] = value
class LRUCache:
"""Use double linked list to implement self
[MEMO] Use start - end node when implementing linked list
_add_node - always add at front
_remove_node
_move_to_head - implement use _add_node and _remove_node
_pop_tail
[MEMO] Tips - The sequence of modifing links matter, start from one node, put the longer ones first
"""
class Node:
def __init__(self, key, val):
self.key = key
self.val = val
self.prev = None
self.next = None
def __init__(self, capacity: int):
self.capacity = capacity
self.size = 0
self.cache = {} # key to Node
self.head = LRUCache.Node(0, 0)
self.tail = LRUCache.Node(0, 0)
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key: int) -> int:
if key not in self.cache:
return -1
self._move_to_head(self.cache[key])
return self.cache[key].val
def put(self, key: int, value: int) -> None:
if key in self.cache:
self.cache[key].val = value
self._move_to_head(self.cache[key])
return
node = LRUCache.Node(key, value)
self.cache[key] = node
self._add_node(node)
self.size += 1
if self.size > self.capacity:
# remove lru key
node = self._pop_tail()
del self.cache[node.key]
self.size -= 1
def _add_node(self, node):
"""
Always add the new node right after head.
"""
node.prev = self.head
node.next = self.head.next
# [MEMO] This must run first!
self.head.next.prev = node
self.head.next = node
def _remove_node(self, node):
"""
Remove an existing node from the linked list.
"""
prev = node.prev
new = node.next
prev.next = new
new.prev = prev
def _move_to_head(self, node):
"""
Move certain node in between to the head.
"""
self._remove_node(node)
self._add_node(node)
def _pop_tail(self):
res = self.tail.prev
self._remove_node(res)
return res
| stevenjst0121/leetcode | 146_lru_cache.py | 146_lru_cache.py | py | 2,948 | python | en | code | 0 | github-code | 13 |
10759058533 | from pymongo import MongoClient
from bson.code import Code
import re
#function to connect to the MongoDB
def get_db(db_name):
client = MongoClient('localhost:27017')
db = client[db_name]
return db
#function to index data in the database (multiple single field indexes)
def create_indices(db, collection, fields):
for field in fields:
db[collection].create_index(field)
#function to retrieve the names of all fields in the database
def get_fields(db, collection):
fields = []
#map_reduce is used to retrieve the keys
reduce = Code("function(key, stuff) { return null; }")
map = Code("function() {for (var key in this) { emit(key, null); }}")
mr = db[collection].map_reduce(map, reduce, collection + "_keys")
for doc in mr.find():
fields.append(doc['_id'])
#going into nested fields. should later be done recursively, if other nested fields can be found in the database
map_address = Code("function() {for (var key in this.address) { emit(key, null); }}")
mr_address = db[collection].map_reduce(map_address, reduce, collection + "_keys")
for doc in mr_address.find():
fields.append('address.' + doc['_id'])
map_created = Code("function() {for (var key in this.created) { emit(key, null); }}")
mr_created = db[collection].map_reduce(map_created, reduce, collection + "_keys")
for doc in mr_created.find():
fields.append('created.' + doc['_id'])
#saving feld names to file
with open('Output\\' + collection + '_fields.txt', 'w') as f:
for field in fields:
f.write(unicode(field + '\n').encode("utf-8"))
return(len(fields))
#This function searches the values for certain characters to filter possible unclean entries
def key_type(data):
#The code checks whether the k-attribute in the tag-tag contains elements that are described by the regular expressions above and increments the respective counts in the keys-dictionary.
keys = keys = {"lower": 0, "lower_colon": 0,'space': 0, "problemchars": 0, "numbers": 0}
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
space = re.compile(r'[ ]')
numbers = re.compile(r'[0-9]')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\.\t\r\n]')
for k in data:
if lower.search(k) != None:
keys['lower'] = keys['lower'] + 1
if lower_colon.search(k) != None:
keys['lower_colon'] = keys['lower_colon'] + 1
if problemchars.search(k) != None:
keys['problemchars'] = keys['problemchars'] + 1
#print(k)
#Spaces were one of the most common problemchars. But they belong into the street names and thus do not indicate bad entries
if space.search(k) != None:
keys['space'] = keys['space'] + 1
if numbers.search(k) != None:
keys['numbers'] = keys['numbers'] + 1
return keys
#Function to retrieve the values stored in given fields. Fields have to be given as list.
def get_values(db, collection, fields):
values = []
for field in fields:
values = values + db[collection].distinct(field)
with open(collection + '_' + field+ '.txt', 'w') as f:
for value in values:
f.write(unicode(value + '\n').encode("utf-8"))
print(key_type(values))
return values
#Function to update multiple given values of a certain field
def update_db(db, collection, field, new_values):
for i in new_values:
for k in new_values[i]:
db[collection].update({field:k},{'$set':{field:new_values[i][k]}})
#Function to move certain documents to another collection
def move_db(db, collection, new_collection, field, value):
# Moves all documents possesing the given fielf
if value == None:
for record in db[collection].find({field : {'$exists' : 'true'}}):
db[new_collection].insert(record)
db[collection].remove({field : {'$exists' : 'true'}})
# Documents to move have to have the given field with the given value
else:
for record in db[collection].find({field:value}):
db[new_collection].insert(record)
db[collection].remove({ field : value }) | tdraebing/Data_Analyst_Nanodegree-Project3 | src/Project/db_ops.py | db_ops.py | py | 4,295 | python | en | code | 0 | github-code | 13 |
73677704659 | import numpy as np
from gensim.models import KeyedVectors
import gensim
import random
import read_config
import sys
import glob
import os
import json
from gensim.models import Word2Vec
from scipy import stats
import sys
import math
def word_assoc(w,A,B,embedding):
"""
Calculates difference in mean cosine similarity between a word and two sets
of words.
"""
return embedding.n_similarity([w],A) - embedding.n_similarity([w],B)
def diff_assoc(X,Y,A,B,embedding):
"""
Caclulates the WEAT test statics for four sets of words in an embeddings
"""
word_assoc_X = np.array(list(map(lambda x : word_assoc(x,A,B,embedding), X)))
word_assoc_Y = np.array(list(map(lambda y : word_assoc(y,A,B,embedding), Y)))
mean_diff = np.mean(word_assoc_X) - np.mean(word_assoc_Y)
std = np.std(np.concatenate((word_assoc_X, word_assoc_Y), axis=0))
return mean_diff / std
def get_bias_scores_mean_err(word_pairs,embedding):
"""
Caculate the mean WEAT statistic and standard error using a permutation test
on the sets of words (defaults to 100 samples)
"""
# divide smaller word_list by two
subset_size_target = min(len(word_pairs['X']),len(word_pairs['Y']))//2
subset_size_attr = min(len(word_pairs['A']),len(word_pairs['B']))//2
bias_scores = []
for i in range(100):
sX = np.random.choice(word_pairs['X'],subset_size_target,replace=False)
sY = np.random.choice(word_pairs['Y'],subset_size_target,replace=False)
sA = np.random.choice(word_pairs['A'],subset_size_attr,replace=False)
sB = np.random.choice(word_pairs['B'],subset_size_attr,replace=False)
bias_scores.append(diff_assoc(sX,sY,sA,sB,embedding))
return np.mean(bias_scores), stats.sem(bias_scores)
def run_test(config, embedding):
word_pairs = {}
min_len = sys.maxsize
# Only include words that are present in the word embedding
for word_list_name, word_list in config.items():
if word_list_name in ['X', 'Y', 'A', 'B']:
word_list_filtered = list(filter(lambda x: x in embedding and np.count_nonzero(embedding[x]) > 0, word_list))
word_pairs[word_list_name] = word_list_filtered
if len(word_list_filtered) < 2:
print('ERROR: Words from list {} not found in embedding\n {}'.\
format(word_list_name, word_list))
print('All word groups must contain at least two words')
return None, None
return get_bias_scores_mean_err(word_pairs,embedding)
def load_embedding(embed_path):
if embed_path.endswith('wv'):
return KeyedVectors.load(embed_path)
elif embed_path.endswith('txt'):
return KeyedVectors.load_word2vec_format(embed_path, binary=False)
elif embed_path.endswith('bin'):
return KeyedVectors.load_word2vec_format(embed_path, binary=True)
# NOTE reddit embedding is saved as model (no ext) + syn1neg + syn0
else:
return Word2Vec.load(embed_path)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: python weat.py config.json results_file=config_results.json')
sys.exit(1)
fname = sys.argv[1]
if len(sys.argv) > 2:
results_file = sys.argv[2]
else:
results_file = 'results_' + fname
results = {}
config = read_config.read_json_config(fname)
for e_name, e in config['embeddings'].items():
results[e_name] = {}
if not isinstance(e,dict):
print('loading embedding {}...'.format(e_name))
try:
embedding = load_embedding(e)
except:
print('could not load embedding {}'.format(e_name))
continue;
for name_of_test, test_config in config['tests'].items():
mean, err = run_test(test_config, embedding)
print('mean: {} err: {}'.format(mean, err))
if mean is not None:
results[e_name][name_of_test] = (round(mean, 4), round(err,4))
else:
print('loading time series embeddings...')
for time, embed_path in e.items():
results[e_name][time] = {}
embedding = load_embedding(embed_path)
for name_of_test, test_config in config['tests'].items():
print(name_of_test)
mean, err = run_test(test_config, embedding)
print('mean: {} err: {}'.format(mean, err))
if mean is not None:
results[e_name][time][name_of_test] = (round(mean, 4), round(err,4))
with open(results_file, 'wb') as outfile:
json.dump(results, outfile)
| hljames/compare-embedding-bias | weat.py | weat.py | py | 4,704 | python | en | code | 13 | github-code | 13 |
24662487723 | # TODO: make a 2nd run for the other bank stand
import pyautogui as py
import time
import random
import sys
import tkinter
from tkinter import messagebox
import ctypes
py.FAILSAFE = True
# Variables for randomly clicking a tab (inv, skills, equip, etc.)
xleft = 1580
xright = 1890
yup = 523
ydown = 568
# Bank dimensions
xleft1 = 1038
xright1 = 1071
yup1 = 477
ydown1 = 516
# Portable dimensions
xleft2 = 811
xright2 = 936
yup2 = 278
ydown2 = 365
# "Go to friends house" option menu
xleft3 = 273
xright3 = 504
yup3 = 895
ydown3 = 908
# left to right portal
xleft7 = 1724
xright7 = 1758
yup7 = 527
ydown7 = 562
#
xleft4 = 1724
xright4 = 1758
yup4 = 527
ydown4 = 562
# Select r1c1 randomly
xleft5 = 1633
xright5 = 1657
yup5 = 594
ydown5 = 617
# Select portal leaving
xleft5 = 1008
xright5 = 1014
yup5 = 545
ydown5 = 694
# r1c1
xleft6 = 1631
xright6 = 1659
yup6 = 596
ydown6 = 616
# Variables for program information
iterations = 0 # Count how many iterations
start_time = time.time() # Calculate time passed
# TODO: make the starting number a user input
# TODO: make sure valid answers are given -> https://stackoverflow.com/questions/23294658/asking-the-user-for-input-until-they-give-a-valid-response
# Financials
#buysteelbar =
#sellcannonball =
#invprofit =
x = 1 # Code loop
while True:
# Variables for clock and time management
clock1 = float(random.uniform(.762, .827)) # Standard move between clicks
clock2 = float(random.uniform(.352, .427)) # Custom deviation of clock1
waitsteelbars = float(random.uniform(158, 163.1))
runsleep = float(random.uniform(7.2, 7.4)) # Run = 6 | Walk = 12
walksleep = float(random.uniform(11.4, 12.6)) # Run = 6 | Walk = 12
processsleep = float(random.uniform(.98, 1.51))
# Click objects in game randomly
n = random.uniform(0, 100)
topbar = float(random.uniform(400, 1244))
xtab = float(random.uniform(xleft, xright)) # selecting tab
ytab = float(random.uniform(yup, ydown))
xinvtab = float(random.uniform(xleft4, xright4)) # selecting tab
yinvtab = float(random.uniform(yup4, ydown4))
xbank = float(random.uniform(xleft1, xright1)) # selecting tab
ybank = float(random.uniform(yup1, ydown1))
xportable = float(random.uniform(xleft2, xright2)) # selecting tab
yportable = float(random.uniform(yup2, ydown2))
xportoptions = float(random.uniform(xleft3, xright3)) # selecting tab
yportoptions = float(random.uniform(yup3, ydown3))
xr1c1 = float(random.uniform(xleft6, xright6)) # selecting tab
yr1c1 = float(random.uniform(yup6, ydown6))
xportal2 = float(random.uniform(xleft5, xright5)) # selecting tab
yportal2 = float(random.uniform(yup5, ydown5))
py.click(x = topbar, y = 0) # Click the top of a window
time.sleep(1.3)
py.moveTo(xbank, ybank, duration=clock1) # Click bank
py.click()
py.moveTo(514, 176, duration=clock1) # Options selecting an item in bank r1c1
py.click(button = 'right')
py.moveTo(553, 356, duration=clock1) # Select all but one from options
py.click()
# Begin run
py.moveTo(1687, 149, duration=clock2)
py.click()
time.sleep(walksleep)
py.moveTo(1697, 181, duration=clock2)
py.click()
time.sleep(walksleep)
py.moveTo(1689, 169, duration=clock2)
py.click()
time.sleep(walksleep)
py.moveTo(1707, 142, duration=clock2)
py.click()
time.sleep(walksleep)
py.moveTo(xportable, yportable, duration=clock1) # Click the portal
py.click()
time.sleep(processsleep)
py.moveTo(xportoptions, yportoptions, duration=clock1) # Select friends house from options
py.click()
time.sleep(processsleep)
if iterations == 0:
input('enter portal, then press ANY KEY')
# playerhouse = input('enter POH name: ')
# py.press(playerhouse) # Enter poh name
# time.sleep(processsleep)
# py.press('enter')
else:
py.moveTo(445, 812, duration=clock1)
py.click()
time.sleep(1)
py.moveTo(1677, 228, duration=clock1) # Toggle run
py.click()
py.moveTo(xinvtab, yinvtab, duration=clock1) # Click inventory tab
py.click()
py.moveTo(xr1c1, yr1c1, duration=clock1)
py.click(button = 'right')
py.moveRel(xOffset=None, yOffset=62, duration=clock1)
py.click()
py.moveTo(897, 153, duration=clock1) # Click altar
py.click()
time.sleep(66)
m = float(random.uniform(0, 1))
if m > .70:
py.moveTo(xtab, ytab, duration=clock1/2)
py.click()
py.click(x = topbar, y = 0) # Click the top of a window
# Begin run back to bank
time.sleep(processsleep)
py.moveTo(1796, 197, duration=clock1) # Go to portal in house
py.click()
time.sleep(runsleep)
py.moveTo(996, 613, duration=clock1)
py.click()
time.sleep(runsleep)
py.moveTo(1904, 196, duration=clock1)
py.click()
time.sleep(runsleep)
py.moveTo(1908, 142, duration=clock1)
py.click()
time.sleep(runsleep)
py.moveTo(1908, 142, duration=clock1)
py.click()
time.sleep(runsleep)
py.moveTo(1877, 173, duration=clock1)
py.click()
time.sleep(runsleep)
py.moveTo(1677, 228, duration=clock1) # Toggle run
py.click()
# Progarm information
iterations += 1
profit = format(iterations * 252, ',d')
barsused = format(iterations * 28, ',d')
elapsed_time = ((time.time() - start_time))/60
print('Iterations:',iterations, '| Bones Used:',barsused, '| XP Gained:',profit, '| Minutes passed:',elapsed_time)
x += 1 # Repeat
########### END
# def waitbars(entertime):
# for i in range(entertime,0,-1):
# time.sleep(1)
# print(i) # Print a countdown of time remaining
## Styles: (~LINE 62)
## 0 : OK
## 1 : OK | Cancel
## 2 : Abort | Retry | Ignore
## 3 : Yes | No | Cancel
## 4 : Yes | No
## 5 : Retry | No
## 6 : Cancel | Try Again | Continue
| ryanmolin/MMORPGBots | Prayer - Altar Run.py | Prayer - Altar Run.py | py | 6,077 | python | en | code | 0 | github-code | 13 |
29165098394 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from functools import cache
from matplotlib import style
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
import time
from math import *
import function as func
style.use('dark_background')
def main():
time_0 = time.time()
n = 100000
dt = 1/n
dt_ = 1/25
vfp = func.vfpwr_(n, func.Pr)
ho = func.howr_(n)
token = "NzQ4MzMyNjU4MTQ0MTgyMzQz.X6chQA.JQvyyhi7toxdYrai8pnL-1i_9FY"
id = '811258320467787828'
executor = ProcessPoolExecutor(max_workers=10)
p1 = executor.submit(func.hvfpwr_, n, func.Pr)
p2 = executor.submit(func.highwr, vfp, dt)
p3 = executor.submit(func.vvfpwr_, n, func.Pr)
p4 = executor.submit(func.speedwr, vfp, dt)
p5 = executor.submit(func.hmaxwr)
hprop = p1.result()
h = p2.result()
h_ = func.highwr(vfp, dt_)
vprop = p3.result()
v = p4.result()
tprop = np.linspace(0, func.tpwr, len(func.hvfpwr_(n, func.Pr)))
t = np.linspace(func.tpwr, dt*len(h) + func.tpwr, len(h))
t_ = np.linspace(func.tpwr, dt_*len(h_) + func.tpwr, len(h_))
zo = np.zeros(len(h_))
ymax = max(h)
W = func.highCwr(1000)
C_ = W[0]
HI_ = W[1]
R = p5.result()
P = R[0]
VFP = R[1]
H = R[2]
print("empty rocket mass : ", func.m, " (kg)")
print("fuel mass : ", func.mp_, " (kg)")
print("pressure : ", func.Pr, " (Pa)")
print("propulsion time : ", func.tpwr, " (s)")
print("coefficient of drag(z) : ", func.Cz)
print("frontal area : ", func.S, " (m^2)")
print("\n")
print("end propultion speed : ", vfp, " (m/s)")
print("max height : ", ymax, " (m)")
print("duration of free flight : ", dt*len(h), " (s)")
func.send_message(token, id, "calculation done for fuseeinator 2.0 at {0} and it took {1} seconds https://i.giphy.com/media/LmNwrBhejkK9EFP504/giphy.gif".format(datetime.now(), time.time() - time_0))
input("\\\\")
plt.ion()
fig = plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), rowspan=2, colspan= 2, projection = '3d')
ax3 = plt.subplot2grid((3,3), (0,2), rowspan=1, colspan= 1)
ax4 = plt.subplot2grid((3,3), (1,2), rowspan=1, colspan= 1)
ax2 = plt.subplot2grid((3,3), (2,0), rowspan=1, colspan= 2)
ax5 = plt.subplot2grid((3,3), (2,2), rowspan=1, colspan= 1)
ax1.plot(VFP, P, H, color = 'yellow')
ax1.set_xlabel('end propultion velocity (m/s)')
ax1.set_ylabel('Pressure (Pa)')
ax1.set_zlabel('max height (m)')
ax1.set_title("height depending on end propulsion velocity and propulsion force [water rocket]")
ax2.plot(C_, HI_, color = "red")
ax2.set_xlabel('drag coefficient')
ax2.set_ylabel('max height (m)')
ax2.set_title("hmax(Cz) [water rocket]")
ax3.plot(t, h, color = 'blue')
ax3.plot(tprop, hprop, color = 'green')
ax3.set_xlabel("time (s)")
ax3.set_ylabel("height (m)")
ax3.set_title("height [water rocket]")
ax4.plot(t, v, color = 'red')
ax4.plot(tprop, vprop, color = 'green')
ax4.set_xlabel("time (s)")
ax4.set_ylabel("speed (m/s)")
ax4.set_title("speed [water rocket]")
i = 0
while i < len(h_):
ax5.clear()
ax5.set_xlim([-1, 1])
ax5.set_ylim([0, ymax + 50])
ax5.scatter(zo[i], h_[i], color = 'green')
ax5.set_title("time = {}" .format(t_[i]))
if i + 2 == len(h_):
dt = 100000
i = i + 1
plt.pause(dt)
| nobody48sheldor/fuseeinator2.0 | position_calculation/waterrocket.py | waterrocket.py | py | 3,600 | python | en | code | 1 | github-code | 13 |
72629944979 | ### IMPORTING PACKAGES
import pandas as pd
import numpy as np
from math import ceil
# Read excel from file locations
path1 = 'C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\Assignment details\\Company X - Order Report.xlsx'
path2 = 'C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\Assignment details\\Company X - Pincode Zones.xlsx'
path3 = 'C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\Assignment details\\Company X - SKU Master.xlsx'
path4 = 'C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\Assignment details\\Courier Company - Invoice.xlsx'
path5 = 'C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\Assignment details\\Courier Company - Rates.xlsx'
df_Orders = pd.read_excel(path1)
df_pincodes = pd.read_excel(path2)
df_sku = pd.read_excel(path3)
df_courierInvoice = pd.read_excel(path4)
df_courierRates = pd.read_excel(path5)
### CALULATE TOTAL WEIGHT per SKU, COD PRICE
df_Orders_sum = df_Orders.merge(df_sku, on='SKU', how='left')
df_Orders_sum['Net Weight(kg)'] = (df_Orders_sum['Order Qty']*df_Orders_sum['Weight (g)']/1000).round(2)
df_Orders_sum['Net Price'] = df_Orders_sum['Item Price(Per Qty.)']
for i in range(df_Orders_sum.shape[0]):
df_Orders_sum['Payment Mode'][i] = (0, 1)[ df_Orders_sum['Payment Mode'][i] == 'COD' ] ### mapping PREPAID-COD to 0-1 for calculative convinience
for i in range(df_Orders_sum.shape[0]):
if df_Orders_sum['Payment Mode'][i]: ### if PAYMENT MODE = 1, i.e COD
price = df_Orders_sum['Net Price'][i]
if price > 300:
df_Orders_sum['Net Price'][i] = 0.05*df_Orders_sum['Net Price'][i]
else:
df_Orders_sum['Net Price'][i] = 15
else: ### if PAYMENT MODE = 0, i.e PREPAID
df_Orders_sum['Net Price'][i] = 0
df_Orders_sum.head()
#TOTAL_WEIGHT and COD_CHARGES for each ORDER_ID
### ORDER_ID is the KEY to both the mapping
total_weights = {}
COD_charges = {}
for i in range(df_Orders_sum.shape[0]):
id = df_Orders_sum['ExternOrderNo'][i]
total_weights[id] = total_weights.get(id, 0) + df_Orders_sum['Net Weight(kg)'][i]
COD_charges[id] = COD_charges.get(id, 0) +df_Orders_sum['Net Price'][i]
##### Mapping PINCODES provided by COMPANY X to that of the COURIER COMPANY
pincode_map = {}
for i in range(df_pincodes.shape[0]):
pincode_map[df_pincodes['Customer Pincode'][i]] = df_pincodes['Zone'][i]
print(len(pincode_map))
df_courierInvoice['Zone by X'] = df_courierInvoice['Customer Pincode'].map(pincode_map)
df_courierInvoice.head()
### creating a Collective Order details consisting data from all the tables
df_order_details = pd.DataFrame()
df_order_details[['Order ID', 'AWB Number', 'Zone (Courier Company)', 'Zone (as per X)','Weight(courier comp.)', 'Billing Amount(Rs.)']] = df_courierInvoice[['Order ID', 'AWB Code', 'Zone', 'Zone by X', 'Charged Weight', 'Billing Amount (Rs.)']]
df_order_details['Weight (as per X)'] = df_order_details['Order ID'].map(total_weights)
df_order_details['COD charge'] = df_order_details['Order ID'].map(COD_charges)
df_order_details.head()
weight_slabs = {'a':0.25, 'b':0.50, 'c':0.75, 'd':1.25, 'e':1.50}
df_courierRates.set_index('Zone', inplace=True)
df_courierRates.head()
### calculate weight slabs for courier company
df_courier_slabs = df_order_details[['Order ID', 'Zone (Courier Company)', 'Weight(courier comp.)']]
df_courier_slabs['num_slabs'] = [0]*len(df_courier_slabs)
df_courier_slabs['weight_slabs_byCourier'] = [0]*len(df_courier_slabs)
for i in range(df_courier_slabs.shape[0]):
num = ceil(df_courier_slabs['Weight(courier comp.)'][i] / weight_slabs[df_courier_slabs['Zone (Courier Company)'][i]])
df_courier_slabs['num_slabs'][i] = num
df_courier_slabs['weight_slabs_byCourier'][i] = num * weight_slabs[df_courier_slabs['Zone (Courier Company)'][i]]
df_courier_slabs.head()
df_expected_charge_calc = df_order_details[['Order ID', 'Zone (as per X)', 'Weight (as per X)', 'COD charge']]
df_expected_charge_calc = df_expected_charge_calc.merge(df_courierInvoice[['Order ID', 'Type of Shipment']], on='Order ID', how='left')
df_expected_charge_calc['num_slabs'] = [0]*len(df_expected_charge_calc)
df_expected_charge_calc['weight_slabs'] = [0]*len(df_expected_charge_calc)
for i in range(df_expected_charge_calc.shape[0]):
num = ceil(df_expected_charge_calc['Weight (as per X)'][i] / weight_slabs[df_expected_charge_calc['Zone (as per X)'][i]])
df_expected_charge_calc['num_slabs'][i] = num
df_expected_charge_calc['weight_slabs'][i] = num * weight_slabs[df_expected_charge_calc['Zone (as per X)'][i]]
df_expected_charge_calc.head()
###calculating total expected fare
forward_charge, rto_charge = [], []
for i in range(df_expected_charge_calc.shape[0]):
id = df_expected_charge_calc['Zone (as per X)'][i].upper()
extra_slabs = df_expected_charge_calc['num_slabs'][i]-1
forward_charge.append(df_courierRates['Forward Fixed Charge'][id] + extra_slabs*df_courierRates['Forward Additional Weight Slab Charge'][id])
if df_expected_charge_calc['Type of Shipment'][i] == 'Forward charges':
rto_charge.append(0)
else:
rto_charge.append(df_courierRates['RTO Fixed Charge'][id] + extra_slabs*df_courierRates['RTO Additional Weight Slab Charge'][id])
df_expected_charge_calc['forward_charge'] = forward_charge
df_expected_charge_calc['rto_charge'] = rto_charge
df_expected_charge_calc['total_sum'] = df_expected_charge_calc['COD charge'] + df_expected_charge_calc['forward_charge'] + df_expected_charge_calc['rto_charge']
df_expected_charge_calc.head(10)
df_order_details = df_order_details.merge(df_expected_charge_calc[['Order ID', 'weight_slabs', 'total_sum']], on='Order ID', how='left')
df_order_details.head()
df_order_details = df_order_details.merge(df_courier_slabs[['Order ID', 'weight_slabs_byCourier']], on='Order ID', how='left')
df_order_details.head()
df_order_details['Difference'] = df_order_details['total_sum']-df_order_details['Billing Amount(Rs.)']
df_order_details.head()
### REORDERING COLUMNS
df_order_details = df_order_details.loc[:,['Order ID', 'AWB Number', 'Weight (as per X)', 'weight_slabs', 'Weight(courier comp.)', 'weight_slabs_byCourier',
'Zone (as per X)', 'Zone (Courier Company)', 'total_sum', 'Billing Amount(Rs.)', 'Difference']]
df_order_details.head()
### Preparing the SUMMARY DATAFRAME
count_corr, count_over, count_under = 0, 0, 0
amt_corr, amt_over, amt_under = 0, 0, 0
for i in range(df_order_details.shape[0]):
if df_order_details['Difference'][i] == 0:
count_corr += 1
amt_corr += df_order_details['Billing Amount(Rs.)'][i]
elif df_order_details['Difference'][i] > 0:
count_under += 1
amt_under += df_order_details['Difference'][i]
elif df_order_details['Difference'][i] < 0:
count_over += 1
amt_over += df_order_details['Difference'][i]
df_summary = pd.DataFrame()
df_summary[''] = ['Total Orders - Correctly Charged', 'Total Orders - Over Charged', 'Total Order - Under Charged']
df_summary['Count'] = [count_corr, count_over, count_under]
df_summary['Amount'] = [amt_corr, amt_over, amt_under]
df_summary.head()
### RENAMING THE COLUMNS OF DATAFRAME
df_order_details.columns.values[:] = ['Order ID', 'AWB Number', 'Total weight as per X (KG)', 'Weight slab as per X (KG)', 'Total weight as per Courier Company (KG)', 'Weight slab charged by Courier Company (KG)', 'Delivery Zone as per X', 'Delivery Zone charged by Courier Company', 'Expected Charge as per X (Rs.)', 'Charges Billed by Courier Company (Rs.)', 'Difference Between Expected Charges and Billed Charges (Rs.)']
df_order_details.head()
####saving order details to EXCEL files on drive
df_order_details.to_excel('C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\cointab.xlsx', index=False)
df_summary.to_excel('C:\\Users\\Harshit Jha\\Downloads\\Cointab - Asessment\\SUBMISSION\\summary.xlsx', index=False)
| jayesh-jha/courier_charges_analysis | analysis.py | analysis.py | py | 8,023 | python | en | code | 0 | github-code | 13 |
34016301075 | # test locally with: docker compose run --rm airflow-cli dags test atd_executive_dashboard_expenses_revenue
import os
from datetime import datetime, timedelta
from airflow.decorators import task
from airflow.models import DAG
from airflow.operators.docker_operator import DockerOperator
from utils.onepassword import get_env_vars_task
from utils.slack_operator import task_fail_slack_alert
DEPLOYMENT_ENVIRONMENT = os.getenv("ENVIRONMENT", "development")
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2015, 12, 1),
"email_on_failure": False,
"email_on_retry": False,
"retries": 0,
"on_failure_callback": task_fail_slack_alert,
}
docker_image = "atddocker/atd-executive-dashboard:production"
REQUIRED_SECRETS = {
"SO_WEB": {
"opitem": "Socrata Key ID, Secret, and Token",
"opfield": "socrata.endpoint",
},
"SO_TOKEN": {
"opitem": "Socrata Key ID, Secret, and Token",
"opfield": "socrata.appToken",
},
"SO_SECRET": {
"opitem": "Socrata Key ID, Secret, and Token",
"opfield": "socrata.apiKeySecret",
},
"SO_KEY": {
"opitem": "Socrata Key ID, Secret, and Token",
"opfield": "socrata.apiKeyId",
},
"EXP_DATASET": {
"opitem": "Executive Dashboard",
"opfield": "datasets.Expenses",
},
"REV_DATASET": {
"opitem": "Executive Dashboard",
"opfield": "datasets.Revenue",
},
"BUCKET_NAME": {
"opitem": "Executive Dashboard",
"opfield": "s3.Bucket",
},
"AWS_ACCESS_KEY": {
"opitem": "Executive Dashboard",
"opfield": "s3.AWS Access Key",
},
"AWS_SECRET_ACCESS_KEY": {
"opitem": "Executive Dashboard",
"opfield": "s3.AWS Secret Access Key",
},
"BASE_URL": {
"opitem": "Microstrategy API",
"opfield": "shared.Base URL",
},
"PROJECT_ID": {
"opitem": "Microstrategy API",
"opfield": "shared.Project ID",
},
"MSTRO_USERNAME": {
"opitem": "Microstrategy API",
"opfield": "shared.Username",
},
"MSTRO_PASSWORD": {
"opitem": "Microstrategy API",
"opfield": "shared.Password",
},
}
with DAG(
dag_id="atd_executive_dashboard_expenses_revenue",
description="Downloads two Microstrategy Reports for Expenses and Revenue. \
Places the results as a CSV in a S3 bucket. \
Then publishes the data to a Socrata dataset",
default_args=default_args,
schedule_interval="00 11 * * *" if DEPLOYMENT_ENVIRONMENT == "production" else None,
dagrun_timeout=timedelta(minutes=120),
tags=["repo:atd-executive-dashboard", "socrata", "microstrategy"],
catchup=False,
) as dag:
env_vars = get_env_vars_task(REQUIRED_SECRETS)
t1 = DockerOperator(
task_id="download_microstrategy_reports",
image=docker_image,
api_version="auto",
auto_remove=True,
command=f"python finance-reports/rev_exp_report_to_s3.py",
environment=env_vars,
tty=True,
force_pull=True,
)
t2 = DockerOperator(
task_id="update_socrata",
image=docker_image,
api_version="auto",
auto_remove=True,
command=f"python finance-reports/mstro_reports_to_socrata.py",
environment=env_vars,
tty=True,
force_pull=False,
)
t1 >> t2
| cityofaustin/atd-airflow | dags/atd_executive_dashboard_expenses_revenue.py | atd_executive_dashboard_expenses_revenue.py | py | 3,420 | python | en | code | 2 | github-code | 13 |
70501394898 | larger_set = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20 ]
for element in larger_set:
element = str(element)
smaller_set = ['a','b','c','d','e']
# So, det_set refers to the determining set. that's the set that decides how big the partitions will be.
# so if we do a tenth of the det_set, in this case, we'll be making 'test sets' of a size of 2
def partition_so_many_times(det_set,indet_set,fraction):
word_count = int(len(det_set)/fraction)
for number in range (1,fraction+1):
start_point = word_count*(number-1)
end_point = word_count*(number)
# ok so, here I'm just adding the determining set on to the test set. this is the easy part.
test_set = det_set[start_point:end_point]
""" ok now we have to deal with the smaller set. so, I'm putting in these multiple things so that we can
ultimately figure out what the slices should be. imagien if on the bigger list we are going from,
I don't know, 240 to 250. and let's say the smaller list is like, only 100 elements long.
then we want to remove the length of the smaller list twice from the starting and ending points, giving us
fourty and fifty, rather than two hundred and fourty and two hundred and fifty. """
end_point_multiple = int(int(end_point) / len(indet_set))
start_point_multiple = int(int(start_point) / len(indet_set))
start_point = start_point-len(indet_set)*start_point_multiple
end_point = end_point- len(indet_set)*end_point_multiple
# we need to see if we need clipping. we are clipping if we the edges of the list need to be in the test set.
if end_point < start_point:
test_set += indet_set[start_point:]
test_set += indet_set[:end_point]
elif end_point >= start_point:
test_set += indet_set[start_point:end_point]
print ("this is the test set for iteration %s" %number)
print(test_set)
partition_so_many_times(larger_set,smaller_set,10) | vanshcsingh/indic-lang-development | tests/reiterating_partitioner.py | reiterating_partitioner.py | py | 2,015 | python | en | code | 0 | github-code | 13 |
14279052656 | #! /usr/bin/python3
""" The tao of unix programming, Python-adapted from github:globalcitizen/taoup
Quotes via:
https://raw.githubusercontent.com/globalcitizen/taoup/master/taoup
"""
import itertools
import os
import random
import re
import shutil
import sys
import textwrap
TAOFILE = os.path.expanduser('~/.taoup.txt')
QUOTES_URL = (
'https://raw.githubusercontent.com/globalcitizen/taoup/master/taoup'
)
def wrap(txt):
"""Wrap txt to width of terminal"""
global _wrapper
width, height = shutil.get_terminal_size((80, 20))
try:
_wrapper.width = width
except NameError:
_wrapper = textwrap.TextWrapper(width=width)
return '\n'.join(_wrapper.wrap(txt))
def fetch_quotes(url=QUOTES_URL, encoding='utf-8'):
"""Yield quotes fetched from original repo at github:globalcitizen/taoup"""
from urllib.request import urlopen
with urlopen(url, timeout=5) as response:
if response.status != 200:
raise ValueError('{status} {reason}'.format(**response.__dict__))
body = response.read()
text = body.decode(encoding)
quoted = re.compile(
r'"(?:[^"\\]|\\.)+"' # double-quoted substrings
'|'
r"'(?:[^'\\]|\\.)+'" # single-quoted substrings
)
for line in text.splitlines():
if not line.strip().startswith('puts '): # not a quote
continue
parts = quoted.findall(line)
parts = (
p[1:-1].replace(r'\"', '"').replace(r"\'", "'") for p in parts
)
quote = ''.join(parts).strip()
if quote:
yield quote
def write_quotes(quotes, append=False):
"""Write quotes to TAOFILE"""
mode = 'a' if append else 'w'
with open(TAOFILE, mode, encoding='UTF-8') as taofile:
for quote in quotes:
print(quote, file=taofile)
def random_item(iterable):
# http://nedbatchelder.com/blog/201208/selecting_randomly_from_an_unknown_sequence.html
# http://stackoverflow.com/questions/12128948/python-random-lines-from-subfolders/12134726#12134726
choice = None
for i, item in enumerate(iterable):
if random.randint(0, i) == 0:
choice = item
return choice
def yield_quotes(path):
with open(path) as infile:
yield from (line for line in infile if line.strip())
def all(lines, jump=None):
"""print all non-blank lines in file, waiting for enter after each one"""
count = 0
for line in lines:
if jump and jump(line):
print('\n', wrap(line))
else:
count += 1
print(wrap('({:,}) {}'.format(count, line)))
input('<enter>')
print('\033[A' + ' ' * len('<enter>'))
def usage(script):
print('''{script} [random|all]
The Tao of Unix Programming, lines from https://github.com/globalcitizen/taoup/
'''.format(script=script))
def main(script='taoup', mode='random'):
def is_header(line):
return line.startswith('-----')
if not os.path.exists(TAOFILE):
quotes = list(fetch_quotes(QUOTES_URL))
write_quotes(quotes)
else:
quotes = yield_quotes(TAOFILE)
try:
if mode == 'random':
quote = random_item(q for q in quotes if not is_header(q))
if quote:
print(quote)
elif mode == 'all':
all(quotes, jump=is_header)
print('Done, thank you!')
else:
usage(script)
exit(0 if mode in {'-h', '--help'} else 1)
except KeyboardInterrupt:
if mode == 'all':
print('\b\b', end='') # erase "^C" from terminal
print('Okay, bye.')
if __name__ == '__main__':
script, *args = sys.argv
*_, script = os.path.split(script)
main(script, *args)
| tilboerner/pytaoup | taoup.py | taoup.py | py | 3,770 | python | en | code | 1 | github-code | 13 |
21311912579 | # Python script for decompressing the baserom file.
import sys
# Address of the first file in the overlay table.
firstFileAddr = None
# Sizes of all the decompressed files combined.
rawSize = None
# List of files to skip.
skipFiles = []
# List of all addresses for the files.
fileAddrs = []
# List of all file sizes for the files.
fileSizes = []
# List of the new addresses for the files.
newFileAddrs = []
# List of all the decompressed file sizes.
newFileSizes = []
##### Search #####
def preprocess_find_match(signature):
prefixPosition = 0
suffixPosition = 1
partial_match_array = [0] * len(signature)
while suffixPosition < len(signature):
if signature[prefixPosition] == signature[suffixPosition]:
partial_match_array[suffixPosition] = prefixPosition + 1
prefixPosition = prefixPosition + 1
suffixPosition = suffixPosition + 1
elif prefixPosition == 0:
partial_match_array[suffixPosition] = 0
suffixPosition = suffixPosition + 1
else:
prefixPosition = partial_match_array[prefixPosition - 1]
return partial_match_array
def find_match(input, signature):
filePosition = 0
signaturePosition = 0
partial_match_array = preprocess_find_match(signature)
while filePosition < len(input):
if signature[signaturePosition] == input[filePosition]:
filePosition = filePosition + 1
signaturePosition = signaturePosition + 1
if signaturePosition == len(signature):
# Return position at end of signature.
return filePosition
elif signaturePosition > 0:
signaturePosition = partial_match_array[signaturePosition - 1]
else:
signaturePosition = 0
filePosition = filePosition + 1
return -1
##### Decompression #####
def decompress(input, sizeCompressed):
buffer = bytearray(0xFFFFFF) # Max file size for a compressed LZKN64 file.
inPos = 4 # Offset in input file.
bufPos = 0 # Offset in output file.
while inPos < sizeCompressed:
curCmd = input[inPos]
inPos += 1
if curCmd < 0x80: # Sliding window lookback and copy with length.
lookBackLength = input[inPos] + (curCmd << 8) & 0x3FF
for _ in range(2 + (curCmd >> 2)):
buffer[bufPos] = buffer[bufPos - lookBackLength]
bufPos += 1
inPos += 1
elif curCmd < 0xA0: # Raw data copy with length.
for _ in range(curCmd & 0x1F):
buffer[bufPos] = input[inPos]
bufPos += 1
inPos += 1
elif curCmd <= 0xFF: # Write specific byte for length.
value = 0
length = 2 + (curCmd & 0x1F)
if curCmd == 0xFF:
length = 2 + input[inPos]
inPos += 1
elif curCmd < 0xE0:
value = input[inPos]
inPos += 1
for _ in range(length):
buffer[bufPos] = value
bufPos += 1
else:
inPos += 1
return buffer[:bufPos]
# Decompression code modified to just increment the position counters.
def decompress_get_len(input, sizeCompressed):
inPos = 4 # Offset in input file.
bufPos = 0 # Offset in output file.
while inPos < sizeCompressed:
curCmd = input[inPos]
inPos += 1
if curCmd < 0x80: # Sliding window lookback and copy with length.
for _ in range(2 + (curCmd >> 2)):
bufPos += 1
inPos += 1
elif curCmd < 0xA0: # Raw data copy with length.
for _ in range(curCmd & 0x1F):
bufPos += 1
inPos += 1
elif curCmd <= 0xFF: # Write specific byte for length.
length = 2 + (curCmd & 0x1F)
if curCmd == 0xFF:
length = 2 + input[inPos]
inPos += 1
elif curCmd < 0xE0:
inPos += 1
for _ in range(length):
bufPos += 1
else:
inPos += 1
return bufPos
##### Modify Overlay Table #####
def copy_buffer(input, output):
output[0:len(input)] = input
return output
def copy_buffer_from_pos_with_len(input, output, pos, len):
output[0:len] = input[pos:pos + len]
return output
def copy_buffer_to_pos_with_len(input, output, pos, len):
output[pos:pos + len] = input[0:len]
return output
def zero_out_buffer_from_pos_with_len(output, pos, len):
for i in range(len):
output[i + pos] = 0
return output
def get_compressed_file_addresses_and_sizes(input, tableAddr):
pos = 0
fileAddr = int.from_bytes(
input[tableAddr + pos + 1:tableAddr + pos + 4], byteorder='big')
nextFileAddr = int.from_bytes(
input[tableAddr + pos + 5:tableAddr + pos + 8], byteorder='big')
global firstFileAddr
firstFileAddr = fileAddr
while fileAddr != 0:
# Highest bit of address is not set, file is already decompressed.
if input[tableAddr + pos] == 0:
skipFiles.append(1)
fileAddrs.append(fileAddr)
if (nextFileAddr - fileAddr) > 0:
fileSizes.append(nextFileAddr - fileAddr)
else:
fileSizes.append(0)
else:
skipFiles.append(0)
fileAddrs.append(fileAddr)
# Headers of compressed files have their compressed sizes within them.
fileSizes.append(int.from_bytes(
input[fileAddr + 1:fileAddr + 4], byteorder='big'))
pos += 4
fileAddr = int.from_bytes(
input[tableAddr + pos + 1:tableAddr + pos + 4], byteorder='big')
nextFileAddr = int.from_bytes(
input[tableAddr + pos + 5:tableAddr + pos + 8], byteorder='big')
def get_raw_file_sizes(input):
# Max file size for a compressed LZKN64 file.
compressedBuf = bytearray(0xFFFFFF)
for i in range(len(fileSizes)):
copy_buffer_from_pos_with_len(
input, compressedBuf, fileAddrs[i], fileSizes[i])
if skipFiles[i] != 1:
# "Fake decompress" to get the length of the raw data.
newFileSizes.append(decompress_get_len(
compressedBuf, fileSizes[i]))
else:
newFileSizes.append(fileSizes[i])
def get_raw_file_addresses():
pos = firstFileAddr
for i in range(len(fileAddrs)):
newFileAddrs.append(pos)
pos += newFileSizes[i]
global rawSize
rawSize = pos - firstFileAddr
def write_raw_files(input, buffer, tableAddr):
# Max file size for a compressed LZKN64 file.
fileBuf = bytearray(0xFFFFFF)
for i in range(len(fileAddrs)):
copy_buffer_from_pos_with_len(
input, fileBuf, fileAddrs[i], fileSizes[i])
if skipFiles[i] != 1:
fileBuf = decompress(fileBuf, fileSizes[i])
copy_buffer_to_pos_with_len(
fileBuf, buffer, newFileAddrs[i], newFileSizes[i])
# Write the new locations to the overlay table.
buffer[tableAddr + (i * 4):tableAddr + (i * 4) +
4] = newFileAddrs[i].to_bytes(4, 'big')
# Find the nearest power of two for the final ROM size. (https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2)
def get_new_file_size(size):
size -= 1
size |= size >> 1
size |= size >> 2
size |= size >> 4
size |= size >> 8
size |= size >> 16
size += 1
return size
def decompress(input, tableAddr):
buffer = bytearray(0x4000000) # 512Mbit (64Mbyte) is the maximum ROM size.
buffer = copy_buffer(input, buffer)
# List all of the file addresses and sizes in a table.
get_compressed_file_addresses_and_sizes(input, tableAddr)
# Get the decompressed file sizes.
get_raw_file_sizes(input)
# Get the decompressed file addresses.
get_raw_file_addresses()
buffer = zero_out_buffer_from_pos_with_len(buffer, firstFileAddr, rawSize)
write_raw_files(input, buffer, tableAddr)
return buffer[:get_new_file_size(rawSize + firstFileAddr)]
def main():
input = open(sys.argv[1], "rb")
output = open(sys.argv[2], "wb")
inputBuf = input.read()
tableAddr = find_match(
inputBuf, b'\x4E\x69\x73\x69\x74\x65\x6E\x6D\x61\x2D\x49\x63\x68\x69\x67\x6F')
output.write(decompress(inputBuf, tableAddr))
input.close()
output.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("LZKN64 decompression Script")
print("")
print("decompress.py input_file output_file")
print(" input_file: Path to the ROM file for an LZKN64 compressed game.")
print(" output_file: Path to the resulting decompressed ROM file.")
else:
main()
| Fluvian/mnsg | tools/decompress.py | decompress.py | py | 8,878 | python | en | code | 26 | github-code | 13 |
14938933858 | from deap import base, algorithms, creator, tools
from getFitness import getFitness
import random
import numpy as np
import libElitism
def geneticAlgorithm(X, y, target, crossover, selection, population, xprob, mutationprob, generations,elitism,real, *args, **kwargs):
if (target == "a"):
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
elif (target == "as"):
creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)
elif (target == "an"):
creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)
elif (target == "asn"):
creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)
else:
raise TypeError('Invalid Targed Value')
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat,creator.Individual, toolbox.attr_bool, n=len(X.columns))
toolbox.register("population", tools.initRepeat, list,toolbox.individual)
toolbox.register("evaluate", getFitness, X=X, y=y, target=target, real=real)
if (crossover == 1):
toolbox.register("mate", tools.cxOnePoint)
elif (crossover == 2):
toolbox.register("mate", tools.cxTwoPoint)
elif(crossover > 2 and crossover < 10 ):
toolbox.register("mate", tools.cxuniform, indpb=crossover/100)
else:
raise TypeError('Value has to be between 1 and 9')
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
if(selection == "r"):
toolbox.register("select", tools.selRoulette)
elif(selection == "t"):
toolbox.register("select", tools.selTournament, tournsize=population)
elif(selection == "b"):
toolbox.register("select", tools.selBest)
else:
raise TypeError('Invalid Argument Value')
pop = toolbox.population(n=population)
hof = tools.HallOfFame(2 if elitism else generations * population)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
if elitism:
pop, log = libElitism.eaSimpleWithElitism(pop, toolbox, cxpb=xprob, mutpb=mutationprob,
ngen=generations, stats=stats, halloffame=hof,
verbose=True)
else:
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=xprob, mutpb=mutationprob,
ngen=generations, stats=stats, halloffame=hof,
verbose=True)
return hof, pop, log | marcosvporto/ga-wrapper | Scripts/geneticAlgorithm.py | geneticAlgorithm.py | py | 2,985 | python | en | code | 0 | github-code | 13 |
73389472337 | import re
import spacy
class Filter:
def __init__(self):
self.nlp = spacy.load('en_core_web_sm')
self.ents = []
pass
def remove_postal_codes(self, text):
return re.sub("[1-9]{1}[0-9]{2}\\s{0,1}[0-9]{3}$", "<POSTAL INDEX NUMBER>", text)
def remove_emails(self, text):
return re.sub("(([\w-]+(?:\.[\w-]+)*)@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?))(?![^<]*>)",
"<EMAIL>", text)
def remove_phone_numbers(self, text):
return re.sub("(\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})", "<PHONE NUMBER>", text)
def remove_urls(self, text):
text = re.sub("(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?", "<URL>",text)
return re.sub("([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?", "<URL>",text)
def remove_dates(self, text):
text = re.sub("(\d{4}[- /.]\d{2}[- /.]\d{,2})|(\d{2}[- /.]\d{2}[- /.]\d{,4})", "<DATUM> ", text)
text = re.sub(
"(\d{1,2}[^\w]{,2}(january|february|march|april|may|june|july|august|september|october|november|december|January|February|March|April|May|June|July|August|September|October|November|December)([- /.]{,2}(\d{4}|\d{2})){,1})(?P<n>\D)(?![^<]*>)",
"<DATE> ", text)
text = re.sub(
"(\d{1,2}[^\w]{,2}(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)([- /.]{,2}(\d{4}|\d{2})){,1})(?P<n>\D)(?![^<]*>)",
"<DATE> ", text)
return text
def remove_gender(self, text):
return re.sub("(male|female|Male|Female|MALE|FEMALE)", '<GENDER>', text)
def process(self, text):
doc = self.nlp(text)
self.ents = list(doc.ents)
def remove_address(self, text):
for ent in self.ents:
if ent.label_ == 'GPE':
text = text.replace(ent.text, '<ADDRESS>')
return text
def remove_name(self, text):
for ent in self.ents:
if ent.label_ == 'PERSON':
text = text.replace(ent.text, '<NAME>')
return text
def filter(self, text, options):
self.process(text)
if options['removeName']:
text = self.remove_name(text)
if options['removeAddress']:
text = self.remove_address(text)
if options['removePostalCode']:
text = self.remove_postal_codes(text)
if options['removeEmail']:
text = self.remove_emails(text)
if options['removePhone']:
text = self.remove_phone_numbers(text)
if options['removeURL']:
text = self.remove_urls(text)
if options['removeDate']:
text = self.remove_dates(text)
if options['removeGender']:
text = self.remove_gender(text)
return text
| theanmolsharma/blurked-backend | filter.py | filter.py | py | 2,912 | python | en | code | 0 | github-code | 13 |
38256211822 | import unittest
from karabo.bound import Configurator, Hash, PythonDevice
from ..ImageApplyMask import ImageApplyMask
class ImageMask_TestCase(unittest.TestCase):
def test_proc(self):
proc = Configurator(PythonDevice).create("ImageApplyMask", Hash(
"Logger.priority", "WARN",
"deviceId", "ImageMask_0"))
proc.startFsm()
if __name__ == '__main__':
unittest.main()
| European-XFEL/imageProcessor | src/imageProcessor/tests/image_mask_test.py | image_mask_test.py | py | 417 | python | en | code | 0 | github-code | 13 |
34552482446 | # coding=UTF-8
# importando os outros arquivos que complementam o código
from intermediario import *
from prepara_tabela import *
from prepara_treino_teste import *
from relatorio_executions import *
from datetime import datetime
class Principal(object):
def __init__(self, c, gamma, windowSize, windowPosition):
'''Informações dos dados (parâmetros obrigatórios) '''
# informar qual o experimento sendo executado para nomear o diretório
experimento = 'Result'
# parâmetros de execução do SVM
# tipo do kernel
kernel = 'rbf'
# varia c de 0 a 1000 em intervalos de 50 em 50
c = [c]
# abertura da rbf varia de 2^-5 a 2^5 com expoente variando de 1 em 1
gamma = [gamma]
# parâmetros de janelamento
# tamanho da janela
tamanho_janela = [windowSize]
# posição do rotulo da janela
rotulo = [windowPosition]
'''parâmetros obrigatórios para definir as tabelas de treino e teste'''
tabela_treino = ['data.csv']
tabela_teste = ['data.csv']
'''parâmetro obrigatório para selecionar a estrátegia multiclasse para a svm'''
# obs: estratégia um contra todos - (ordem dos classificadores) = D, P, H, S, R
estrategia = 'um contra todos'
'''parâmetro obrigatórios para selecionar as carcterísticas das tabelas'''
# colunas da tabela normal (sem janelamento) para selecionar as caracteristicas (-1 siginifica última coluna (rotulo))'''
colunas = [[0, 1, 2, -1]]
'''parâmetros que servem apenas para nomear o arquivo txt (recomendo usar para ficar organizado a descrição da execução!!!'''
# selecionar tipo de caracteristica (escalar ou vetorial: ['escalar'], escalar e vetorial: ['escalar','vetorial'])
tipo_caracteristica = [['espacial']]
# label de caracteristica (velocidade ou aceleracao: ['velocidade'], velocidade e aceleracao: ['velocidade','aceleracao']) para nomear o arquivo de saida
caracteristica = [['xyz']]
# label de pontos de articulacao (maos ou pulsos: ['maos'], maos e pulsos: ['maos','pulsos']) para nomear arquivo de saida
ponto_articulacao = [['rh','-1']]
'''não precisa parâmetrizar a partir daqui!!!'''
contador = 0
for i, caracteristicas in enumerate(colunas):
# pega os parâmetros de treino e teste setados pelo usuário e salva em um dicionário
dados_treino_teste = {
'tabela_treino': tabela_treino,
'tabela_teste': tabela_teste,
'tipo_caracteristica': tipo_caracteristica[0],
'caracteristica': caracteristica[0],
'ponto_articulacao': ponto_articulacao[i]
}
for rotulo_posicao in rotulo:
for janela in tamanho_janela:
# pega os parâmetros do janelamento setados pelo usuário e salva em um dicionário
dados_janelamento = {
'tamanho_janela': janela,
'rotulo': rotulo_posicao
}
dados = prepara_treino_teste(
experimento,
dados_treino_teste['tabela_treino'],
dados_treino_teste['tabela_teste'],
dados_treino_teste['tipo_caracteristica'],
dados_treino_teste['caracteristica'],
dados_treino_teste['ponto_articulacao'],
dados_janelamento,
caracteristicas
)
for svm_c in c:
for svm_gamma in gamma:
# pega os parâmetros da svm setados pelo usuário e salva em um dicionário
parametros_svm = {
'kernel': kernel,
'c': svm_c,
'gamma': svm_gamma
}
contador += 1
self.__resultado = intermediario(experimento,estrategia,parametros_svm,dados,dados_treino_teste,dados_janelamento,contador)
def Fscore(self):
return self.__resultado['f_score_mc']
| Jallyssonmr/DollyTDC | TechDay/MachineLearning/Principal.py | Principal.py | py | 3,575 | python | pt | code | 0 | github-code | 13 |
6948587554 | from typing import *
from collections import defaultdict
class Solution:
def __init__(self):
self.max_time = 0
self.map1 = defaultdict(lambda: [])
def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:
for i, m in enumerate(manager):
if m == -1:
continue
else:
self.map1[m].append(i)
self.dfs(headID, 0, informTime)
return self.max_time
def dfs(self, i, time, informTime):
time += informTime[i]
self.max_time = max(self.max_time, time)
for j in self.map1[i]:
self.dfs(j, time, informTime)
if __name__ == '__main__':
n = 7
headID = 6
manager = [1, 2, 3, 4, 5, 6, -1]
informTime = [0, 6, 5, 4, 3, 2, 1]
sol=Solution()
print(sol.numOfMinutes(n,headID,manager,informTime))
| Xiaoctw/LeetCode1_python | 搜索_回溯/通知所有员工所需的时间_1376.py | 通知所有员工所需的时间_1376.py | py | 883 | python | en | code | 0 | github-code | 13 |
19750878488 | """
kryptoxin VBA output module.
This module contains functions for the visual basic outputs
"""
from kryptoxin.core.toxin import Toxin
from kryptoxin.core.constants import JINJA_TEMPLATES_VBA
from kryptoxin.core.constants import JINJA_TEMPLATES_ACTSDIR
from kryptoxin.core.constants import JINA_TEMPLATES_FEXT, LANG_VBA
from kryptoxin.output import get_jinja_env
# Create Jinja2 environment variable
env = get_jinja_env()
# Actions templates directories string
tmpl_action_rpath = JINJA_TEMPLATES_VBA + JINJA_TEMPLATES_ACTSDIR
def render_load_asm(t: Toxin):
""" This function return a VBA script that loads assembly in memory
and start a new thread using native Windows API function.
Arguments:
- ciphertext: the base64 encoded ciphertext (bytes[])
- password: the password or key (bytes[])
"""
# cast ciphertext to string
_ciphertext = t.get_ciphertext(lang=LANG_VBA, width=50)
_key = t.key.decode()
_iv = fmt_vbadarray(t.get_iv_decarray())
_salt = fmt_vbadarray(t.get_salt_decarray())
template = env.get_template(
tmpl_action_rpath + "load-asm" + JINA_TEMPLATES_FEXT)
return template.render(int=int, ciphertext=_ciphertext, mode=t.opmode,
key=_key, iv=_iv, salt=_salt,
iter=t.pbkdf2_iter, hmac=t.pbkdf2_halg,
key_size=t.key_size, action=t.action)
def fmt_vbadarray(darray):
""" This function convert a decimal byte array to a
VBA compatible format.
"""
_darray = "".join("{}, ".format(d) for d in darray)[:-2]
return _darray
# functions mapping
actions = {
"load-asm": render_load_asm
}
| e3prom/kryptoxin | kryptoxin/output/vba.py | vba.py | py | 1,677 | python | en | code | 4 | github-code | 13 |
35711122036 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 淘宝/天猫秒杀
import datetime
import time
from selenium import webdriver
# 登陆淘宝
def login(browser):
# 打开淘宝登录页,并进行扫码登录
browser.get("https://www.taobao.com/")
time.sleep(3)
if browser.find_element_by_link_text("亲,请登录"):
browser.find_element_by_link_text("亲,请登录").click()
print("请使用手机淘宝扫码登陆,操作限时20s")
time.sleep(20)
browser.get(buy_link)
time.sleep(3)
now = datetime.datetime.now()
print('login success:', now.strftime('%Y-%m-%d %H:%M:%S'))
# 秒杀
def spike(buy_time):
while True:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
if now > buy_time:
webdriver.find_element_by_link_text("立即购买").click()
break;
time.sleep(0.1)
while True:
try:
if webdriver.find_element_by_link_text("提交订单"):
webdriver.find_element_by_link_text("提交订单").click()
except:
time.sleep(1)
print(now)
time.sleep(0.1)
# main
if __name__ == "__main__":
# 抢购链接
# 时间格式:"2022-2-28 12:30:00.000000"
chrome_browser = webdriver.Chrome() # path形参缺省为环境变量 / 打包为exe后缺省为exe当前目录
chrome_browser.maximize_window()
cur_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
buy_time = input(f"请输入抢购时间, 格式如 {cur_time} :\n")
buy_link = input("请输入秒杀链接, like: https://detail.tmall.com/item.htm?id=653484370363")
# 登陆
login(chrome_browser)
spike(buy_time,buy_link,chrome_browser)
| jokereven/zjing-tools | spike/spike/spike.py | spike.py | py | 1,751 | python | en | code | 1 | github-code | 13 |
12317125469 | #Part 1#
import requests
import pandas as pd
import numpy as np
import os
get_resp = requests.get("http://3.85.131.173:8000/random_company")
get_resp.text
fake_html = get_resp.text.split("\n")
fake_html
n = 50
df = pd.DataFrame(index = np.arange(n), columns = ["Name", "Purpose"])
#filtering on Name and Purpose, 50 times
for i in range(n):
get_resp = requests.get("http://3.85.131.173:8000/random_company")
fake_html = get_resp.text.split("\n")
for line in fake_html:
if "Name" in line:
a = line
if "Purpose" in line:
b = line
df.iloc[i-1,:] = [a, b]
#getting rid of extra elements
df["Name"] = df["Name"].str.replace("</li>","")
df["Name"] = df["Name"].str.replace("<li>Name:","")
df["Purpose"] = df["Purpose"].str.replace("</li>","")
df["Purpose"] = df["Purpose"].str.replace("<li>Purpose:","")
print(df)
df.to_csv("fake_company.csv", sep='\t')
#####################################################
#Parts 2 and 3#
import pandas as pd
import numpy as np
from textblob import TextBlob
import nltk
import os
os.chdir("/Users/hfsladmin/OneDrive - stevens.edu/PhD FE/Semester 3/FE 595- Fintech")
#os.getcwd()
# read the files
df1 = pd.read_csv('Companies.csv')
df2 = pd.read_csv('company_info.csv')
df3 = pd.read_csv('fake_company.csv')
df4 = pd.read_csv('name_purpose_pairs.csv')
# concat the dataframes
df = pd.concat([df1.rename(columns={'Company_Name':'Name', 'Company_Purpose':'Purpose'}) , df2 , df3 , df4.rename(columns={'Company_Name':'Name', 'Company_Purpose':'Purpose'})])
#df.shape()
# get the sentiment score
def sent_sc(text):
sent = TextBlob(text).sentiment.polarity
return sent
# add sentiment score to the df
df["sentiment"] = df.apply(lambda a: sent_sc(a["Purpose"]), axis=1)
#df.head()
# sort them based on sentiment score of purpose
df_sort = df.sort_values(by=["sentiment"])
# top and bottom 5
df_sort_worst = df_sort.head(5) #bottom
df_sort_best = df_sort.tail(5) #top
df_sort_worst.to_csv(r'company_worst.csv', index=None, header=True, sep=';')
df_sort_best.to_csv(r'company_best.csv', index=None, header=True, sep=';')
# Observations
# most of the companies have sentiment score of 0. The lowest is -0.60 and the highest score is 0.50. This might be because of the way the column purpose is written; it's short and uses
# lots of technical words with neutrality which the pretrained sentiment analyzer might not take as polarity or might even not recognize the world. Also, this is a discription of a company
# which is pretty straightforward and objective.
| Agatheee/FE595_Assignment2 | NLP Assignment.py | NLP Assignment.py | py | 2,563 | python | en | code | 0 | github-code | 13 |
20814434660 | import requests, json
import networkx as nx
from itertools import permutations
# Google API URL
url = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
# Google Dev API key
api_key = 'AIzaSyBfYHLkwRPSbY1MucNJL30FtJo7Er-kXTY'
# Helper methods
def next_permutation(arr):
# Find non-increasing suffix
i = len(arr) - 1
while i > 0 and arr[i - 1] >= arr[i]:
i -= 1
if i <= 0:
return False
# Find successor to pivot
j = len(arr) - 1
while arr[j] <= arr[i - 1]:
j -= 1
arr[i - 1], arr[j] = arr[j], arr[i - 1]
# Reverse suffix
arr[i:] = arr[len(arr) - 1: i - 1: -1]
return True
def calculate_tour_cost(G, source, potholes_tour):
cost = G[source][potholes_tour[0]]["weight"] + G[source][potholes_tour[-1]]["weight"]
for i in range(len(potholes_tour) - 1):
cost += G[potholes_tour[i]][potholes_tour[i + 1]]["weight"]
return cost
# optimized over distance or duration
def calculate_optimal_tours(trucks_start_location, pothole_locations):
locations = [trucks_start_location] + pothole_locations
# Create distance and duration graphs (vertices are locations)
distanceG = nx.Graph()
durationG = nx.Graph()
# Adding the weighted edges of the graphs
# Note that we assume symmetric pairwise distances (we model locations using an undirected graph)
for i in range(len(locations) - 1):
loc_i = locations[i]
for j in range(i + 1, len(locations)):
loc_j = locations[j]
# retreive the distance and duration information from Google
resp_obj = requests.get(url + 'origins=' + loc_i +
'&destinations=' + loc_j +
'&key=' + api_key)
dist_obj = resp_obj.json()
distance_mts = dist_obj["rows"][0]["elements"][0]["distance"]["value"]
duration_scs = dist_obj["rows"][0]["elements"][0]["duration"]["value"]
distanceG.add_edge(loc_i, loc_j, weight=distance_mts)
durationG.add_edge(loc_i, loc_j, weight=duration_scs)
best_distance_cost = float("inf")
best_distance_tour = []
best_duration_cost = float("inf")
best_duration_tour = []
# Explore all possible tours that include all unfilled potholes
# Note: since all tours must start and end with the trucks_start_location, we only need to explore all permutations of pothole_locations
int_potholes_tour = list(range(len(pothole_locations)))
while True:
potholes_tour = [pothole_locations[int_potholes_tour[i]] for i in range(len(pothole_locations))]
# costs to first pothole and from last pothole
distance_cost = distanceG[trucks_start_location][potholes_tour[0]]["weight"] + \
distanceG[trucks_start_location][potholes_tour[-1]]["weight"]
duration_cost = durationG[trucks_start_location][potholes_tour[0]]["weight"] + \
durationG[trucks_start_location][potholes_tour[-1]]["weight"]
# costs in between potholes
for i in range(len(potholes_tour) - 1):
distance_cost += distanceG[potholes_tour[i]][potholes_tour[i + 1]]["weight"]
duration_cost += durationG[potholes_tour[i]][potholes_tour[i + 1]]["weight"]
# if we have found a better tour, update
if distance_cost < best_distance_cost:
best_distance_cost = distance_cost
best_distance_tour = potholes_tour
if duration_cost < best_duration_cost:
best_duration_cost = duration_cost
best_duration_tour = potholes_tour
# generate the (lexicographically) next potholes tour
if not next_permutation(int_potholes_tour):
# if the current int_potholes_tour was the lexicographically largest one, exit loop
break
distance_based = {"tour": best_distance_tour, "cost": best_distance_cost}
duration_based = {"tour": best_duration_tour, "cost": best_duration_cost}
optimal = {"distance": distance_based, "duration": duration_based}
return optimal
| dawn-ds/KnoxPot-Guru | flask_potholes/flask_pot/routing.py | routing.py | py | 4,107 | python | en | code | 0 | github-code | 13 |
22677079512 | import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
signs = [rock,paper,scissors]
player_sign = int(input("What do you chose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n"))
#Invalid input
if player_sign >2 or player_sign < 0:
print("You type invalid number, you lose!")
else:
computer_sign = random.randint(0,2)
print(signs[player_sign]+"\n"+"Computer chose:\n"+signs[computer_sign]+"\n")
# Conditions
# Rock wins against scissors.
# Scissors win against paper.
# Paper wins against rock.
if player_sign == computer_sign:
print("It's a draw")
elif ((player_sign == 0 and computer_sign == 2) or (player_sign == 2 and computer_sign == 1) or (player_sign == 1 and computer_sign == 0) ):
print("You win")
else:
print("You lose") | masthanshaik7/Rock-Paper-Scissors | Rock_Paper_Scissors.py | Rock_Paper_Scissors.py | py | 1,060 | python | en | code | 0 | github-code | 13 |
42242719003 | #This file contains the basic functions for sound manipulation:
# Sound(note, duration)
# get_key(value, dictionary)
# get_song(file, number)
# make_music_sheet(list_notes, invert, transpos, amount_of_transposition=0)
# get_all_songs_titles(file)
# transpose(list_of_notes, transposition):
# transposenote(note, transposition):
# invert(list):
# inverse_note(note):
# addsong(strin, title, file):
# delsong(numb, file):
# create_markov_map(numb, file):
# sum_of_maps(m1, m2):
# create_markov_map_all_songs(file):
# markov_new_song(markovmapnotes, total_notes, markovmapdurations, total_durations, songlength):
# at the bottom you can find a list of tests to test these functions without running the GUI
from Settings import *
pg.mixer.init()
def sound(note, duration): #plays a sound
if note == "Z":
sleep(duration)
else:
soundfile = pg.mixer.Sound(os.path.join(wav_folder,notesdict[note]))
soundfile.play()
pg.mixer.music.fadeout(0)
sleep(duration / tempo)
def get_key(value, dictionary): #find key for a value in a dict, returns the correct key
for key in dictionary:
if dictionary[key] == value:
return key
def get_song(file, number):
#searches for the correct song in a file returns the list of notes of the song in the form: ["SOLr", "FAb"....]
file = open(os.path.join(TXT_folder, file), "r")
# Finds the lines of the title of correct song
for line in file:
if "#" + number + " " == line[:3] or "#" + number + " " == line[:4]:
list_of_notes = str(next(file)).split(" ")
list_of_notes[-1] = list_of_notes[-1].strip()
return list_of_notes
def make_music_sheet(list_notes, inver, transpos, amount_of_transposition=0):
#This function creates partition based on a list and modification if there is one
# ivert and transpos are booleans
music_sheet = []
# check if need for inversion or transposition
if inver:
list_notes = invert(list_notes)
if transpos:
list_notes = transpose(list_notes, int(amount_of_transposition))
#finding the correct duration to play
for index in range(len(list_notes)):
note_and_duration = list_notes[index]
if note_and_duration != "p": #find the duration if note is not a p
duration = duration_of_notes[note_and_duration[-1]]
if index != len(list_notes) - 1:
if list_notes[index+1]=="p": #Add 1/2 of the duration if next term is p
duration += duration / 2
music_sheet.append((note_and_duration[:-1], duration))
return music_sheet
def get_all_songs_titles(file):
# This function creates a dictionnary containing as key the number of the song in the file
# and as value the title of the song
# It is mainly to be able to write all titles of a file in the menu
file = open(os.path.join(TXT_folder, file), "r")
all_titles = dict()
for line in file:
if line[0] == "#":
if line[2] == " ":
all_titles[line[1]] = line[3:].strip()
else:
all_titles[line[1:3]] = line[3:].strip()
return all_titles
def transpose(list_of_notes, transposition):
# This function transposes a list of notes with the correct amount
# by calling the next function : transposenote
# return the new list of notes transposed
newline = []
for note in list_of_notes:
if note[:1] != "Z" and note[:1] != "p":
newline.append(transposenote(note, transposition))
else:
newline.append(note)
return newline
def transposenote(note, transposition):
# This function transposes a note with the correct amount
# creates a list with all notes to facilitate transposition
list = notes[:-1] # removes the Z of the list "notes"
index = list.index(note[:-1]) + transposition
while index >= 7:
index -= 7
note = list[index] + note[-1:]
return note
def invert(list):
# This function inverses a list of notes
# by calling function inversenote
# returns the inversed list
inversed_list = []
for element in list:
if element != "p":
inversed_list.append(inverse_note(element))
else:
inversed_list.append(element)
return inversed_list
def inverse_note(note):
#This function inverses a note and duration and returns the inversed element
newnote = notes[(len(notes) - notes.index(note[:-1])) % len(notes)]
newduration = durations[(len(durations) - durations.index(note[-1:])) % len(durations)]
return newnote + newduration
def addsong(notes, title, file):
# this function takes a song(as a list or a string seperated with spaces)
# and adds it to the end of the inputted file with the correct title
if type(notes) == list: # if the song is a list, it makes it a string
notes =" ".join(notes)
else:
notes = notes[:-1] # if the function is a string, it removes space after the last note
# this was done because when writing the song in write player, we automatically add
# spaces after each note
filesong = open(os.path.join(TXT_folder, file),"a")
fd = open(os.path.join(TXT_folder, file), 'r')
n = 0
for line in fd:
n += 1
if n>=2:
n=n//2
n+=1
filesong.write("#" + str(n) + " " + title + "\n"+notes+"\n")
filesong.close
def delsong(numb, file):
# this function deletes a song contained in a file with the file number
filesong = open(os.path.join(TXT_folder, file),"r")
lines=filesong.readlines()
numb=(numb-1)*2 #finds line number to delete
del lines[numb]
del lines[numb]
filesong.close
editfile=open(os.path.join(TXT_folder, file),"w")
# this part is a sort of security
# when deleting a song that is not the last one of the file,
# it changes the number of other songs to have them in the ascending order
number_of_before = 0
for line in lines:
if line[0] == "#":
if number_of_before + 1 != int(line[1:3]):
editfile.write("#" + str(int(number_of_before)+1) + line[3:])
else:
editfile.write(line)
number_of_before += 1
else:
editfile.write(line)
editfile.close
def create_markov_map(numb, file):
# this function creates 2 markov maps for a song
# one for the notes, and another one for duration
# it returns 4 elements, the 2 markov maps and the 2 lists containing the number of occurences of each note
songlist = get_song(file, numb)
list_of_notes = list(filter(lambda note: note != "p", songlist)) #remove "p"
# calculate the total number of durations
total_durations = [0, 0, 0, 0]
for note in list_of_notes:
total_durations[durations.index(note[-1:])] += 1
# fill lines of matrix for duration with 0
markovmap_durations = []
for i in range(4):
markovmap_durations.append([0, 0, 0, 0])
# count sucessors of each duration
for i in range(len(list_of_notes) - 1):
if i > 0:
before_duration = list_of_notes[i - 1][-1:]
markovmap_durations[durations.index(before_duration)][durations.index(list_of_notes[i][-1:])] += 1
#remove durations
for i in range(len(list_of_notes)):
list_of_notes[i] = list_of_notes[i][:-1]
#calculate total number of notes notes:
total_notes = [0 ,0 ,0, 0, 0, 0, 0, 0]
for note in list_of_notes:
total_notes[notes.index(note)] += 1
#create matrix of successors for each note
markovmap_notes = []
for i in range(8):
markovmap_notes.append([0, 0, 0, 0, 0, 0, 0, 0])
#count sucessors of each note
for i in range(len(list_of_notes)-1):
if i>0:
before_note = list_of_notes[i-1]
markovmap_notes[notes.index(before_note)][notes.index(list_of_notes[i])] += 1
return markovmap_notes, total_notes, markovmap_durations, total_durations
def sum_of_maps(m1, m2):
# this functions adds two matrixes using numpy, it the returns the sum of both matrixes as a normal matrix
m1 = numpy.array(m1)
m2 = numpy.array(m2)
new_map = numpy.sum([m1, m2], axis=0)
return new_map.tolist()
def create_markov_map_all_songs(file):
# this function uses the function "create_markov_map" to create a markov map of a whole file,
# it returns 4 elements, the 2 markov maps(notes and duration)
# and the 2 lists containing the number of occurences of each note and each duration
song_titles = get_all_songs_titles(file)
#create all 4 maps and lists, and fill them with zeros
markov_map_notes_all_songs = []
for i in range(8):
markov_map_notes_all_songs.append([0, 0, 0, 0, 0, 0, 0, 0])
markov_map_durations_all_songs = []
for i in range(4):
markov_map_durations_all_songs.append([0, 0, 0, 0])
total_notes = [0, 0, 0, 0, 0, 0, 0, 0]
total_durations = [0, 0, 0, 0]
for song in song_titles: # adds all the maps and lists to obtain the sum of all 4 lists and maps
markov_map_notes, all_notes, markov_map_durations, all_durations = create_markov_map(song, file)
# sums markov map for the notes
markov_map_notes_all_songs = sum_of_maps(markov_map_notes_all_songs, markov_map_notes)
# sums markov map for the durations
markov_map_durations_all_songs = sum_of_maps(markov_map_durations_all_songs, markov_map_durations)
# sums lists of number of occurences of each note and each duration
total_notes = sum_of_maps(total_notes, all_notes)
total_durations = sum_of_maps(total_durations, all_durations)
return markov_map_notes_all_songs, total_notes, markov_map_durations_all_songs, total_durations
def markov_new_song(markovmapnotes, total_notes, markovmapdurations, total_durations, songlength):
# This function creates a new song using the markov map technique
# it returns the partition created
new_song = []
# create a list of lists of all successors for each note
succesors_matrix_notes = []
for line in markovmapnotes:
succesors_matrix_line = []
for i in range(len(line)):
for j in range(line[i]):
succesors_matrix_line.append(notes[i])
succesors_matrix_notes.append(succesors_matrix_line)
# take the most played note in song and set it as first note
firstnote = notes[total_notes.index(max(total_notes))]
new_song.append(firstnote)
# create new partition only composed of notes by randomly choosing in lists of all successors
for i in range(1, int(songlength)):
beforenote = new_song[i-1]
new_song.append(random.choice(succesors_matrix_notes[notes.index(beforenote)]))
# create a list of lists of all successors for each duration
succesors_matrix_durations = []
for line in markovmapdurations:
succesors_matrix_line = []
for i in range(len(line)):
for j in range(line[i]):
succesors_matrix_line.append(durations[i])
succesors_matrix_durations.append(succesors_matrix_line)
# take the most played duration in song and add it to the first note
firstduration = durations[total_durations.index(max(total_durations))]
new_song[0] += firstduration
# add to new partition durations by randomly choosing in lists of all duration successors
for i in range(1, int(songlength)):
beforeduration = new_song[i-1][-1:]
new_song[i] += (random.choice(succesors_matrix_durations[durations.index(beforeduration)]))
return new_song
# TESTS
# You can find here a series of test to test these function with the GUI
# feel free to change the variables or add in a few print statements to test them how you wish
# TEST 1
"""# This first test is to get the list of notes from the second song from the file 1
# you can change the variables as you wish to test different songs
song_number = "2"
file = "File1.txt"
song = get_song(file, song_number)
print(song)"""
# TEST 2
"""# This second test is to transpose the second song from the file 1 of three notes
# you can change the variables as you wish to test different songs
# normally, when running the GUI, this is done automatically with the function make_music_sheet
# as you can test in test 4
# first we need to get the song list, and then send it to transpose function
song_number = "2"
file = "File1.txt"
transposition_amount = 3
song = get_song(file, song_number)
transposed_song = transpose(song, transposition_amount)
print(song)
print(transposed_song)
# you can now compare the two songs to see that transposotion worked correctly
# here is a list of the notes to help you if you want to compare:
print(notes[:-1])"""
# TEST 3
"""# This thrid test is to invert the second song from the file 1
# you can change the variables as you wish to test different songs
# normally, when running the GUI, this is done automatically with the function make_music_sheet
# as you can test in test 4
# first we need to get the song list, and then send it to invert function
song_number = "2"
file = "File1.txt"
song = get_song(file, song_number)
inverted_song = invert(song)
print(song)
print(inverted_song)
# you can now compare the two songs to see that inversion worked correctly
# here is the list we use for inversion to help you if you want to compare:
print(notes)"""
# TEST 4
"""# This fourth test is to play the second song from the file 1
# you can change the variables as you wish to test different songs, different files or apply modifications
song_number = "2"
file = "File1.txt"
inversion = False
transposition = False
amount = 2
partition = make_music_sheet(get_song(file, song_number), inversion, transposition, amount_of_transposition=amount)
print(partition)
# you can send this partition to the sound function with a simple while loop:
for note in partition:
sound(note[0], note[1])"""
# TEST 5
"""# This fifth test creates a markov song for song 2 of file1
#you can change the variables as you wish to test different songs
song_number = "2"
file = "File1.txt"
map = create_markov_map(song_number, file)
#map contains a list of 4 elements :
# the successors table for notes
# a list of the total repetions for each note
# the successors table for durations
# a list of the total repetitons for each duration
#if desired you can send these elements to markov_new_song to create a new partition:
newsonglength = 10
new_song =markov_new_song(map[0], map[1], map[2], map[3], newsonglength)
# now lets send this new song to the sound function by using same method than in test 1
inversion = False
transposition = False
amount = 2
new_parition = make_music_sheet(new_song, inversion, transposition, amount)
for note in new_parition:
sound(note[0], note[1])
sleep(1) #this line is added because this program is not in a while loop
#so as soon as the for loop is finished it stops, thus cutting of the last note and
#not letting it fade away
"""
# TEST 6
"""# This sixth test creates a markov song for file1
#you can change the variables as you wish to test different files
file = "File1.txt"
map = create_markov_map_all_songs(file)
#map contains a list of 4 elements :
# the successors table for notes
# a list of the total repetions for each note
# the successors table for durations
# a list of the total repetitons for each duration
#if desired you can send these elements to markov_new_song to create a new partition:
newsonglength = 10
new_song =markov_new_song(map[0], map[1], map[2], map[3], newsonglength)
# now lets send this new song to the sound function by using same method than in test 1
inversion = False
transposition = False
amount = 2
new_parition = make_music_sheet(new_song, inversion, transposition, amount)
for note in new_parition:
sound(note[0], note[1])
sleep(1)"""
| PaulZaman/MusikReader | Main.py | Main.py | py | 16,213 | python | en | code | 0 | github-code | 13 |
34908358686 | import yaml
data = {
'key1': ['list1', 'list2', 'list3'],
'key2': 234,
'key3': {
'key31': '€',
'key32': '†'
}
}
with open('file.yaml', 'w') as fh:
yaml.dump(data, fh, default_flow_style=False, allow_unicode=True)
with open('file.yaml', 'r') as fh:
data_yml = yaml.load_all(fh)
for dict in data_yml:
print(f"Compare dictionary in programm and from YAML file: {data == dict}")
| YuKars1996/Applications | Lesson_2/task_2_3.py | task_2_3.py | py | 434 | python | en | code | 0 | github-code | 13 |
34747313762 | import time
import pymongo
import queue
from dateutil import parser
from queue import Queue
from helpers import OANDA
from helpers.misc import (
load_config,
seconds_to_human,
seconds_to_us,
ignore_keyboard_interrupt,
create_logger,
)
from helpers.ipc import expose_telemetry, telemetry_format, TelemetryManager
from helpers.balancing import LoadBalancer, AutoscalingGroup
from multiprocessing import Manager
def process_datapoint(datapoint: dict) -> dict:
"""
:param datapoint: Raw OANDA datapoint in dictionary format
:return: Returns a datapoint formatted for database storage
:rtype: dict
"""
processed_datapoint = {
"time": parser.parse(datapoint["time"]),
"bid": datapoint["closeoutBid"],
"ask": datapoint["closeoutAsk"],
"status": datapoint["status"],
"tradeable": datapoint["tradeable"],
"instrument": datapoint["instrument"],
}
db_packet = {"dest": datapoint["instrument"], "data": processed_datapoint}
return db_packet
class DataGatherer:
# Process number limits
GATHERING_PROCESS_COUNT_MIN = 2
RECORDING_PROCESS_COUNT_MIN = 1
RECORDING_PROCESS_COUNT_MAX = 64
PROCESSING_PROCESS_COUNT_MIN = 1
PROCESSING_PROCESS_COUNT_MAX = 10
# Periodic action intervals (perform action every n seconds)
TICK_INTERVAL = 0.01
DATA_REFRESH_INTERVAL = 60 * 10
STATUS_INTERVAL = 5
AUTOSCALE_INTERVAL = 0.25
UPDATE_TELEMETRY_INTERVAL = 5
# Queue limits
MAX_QUEUE_SIZE = 10
QUEUE_TIMEOUT = 0.1
def __init__(self, db_string: str, api_config: dict) -> None:
"""
:param db_string: MongoDB database connection string
:param api_config: API config dictionary
:rtype: None
"""
self.db_string = db_string
self.logger = create_logger()
self.manager = Manager()
self.latest_data = self.manager.list()
self.unsaved_data = self.manager.Queue()
self.unprocessed_data = self.manager.Queue()
self.telemetry_manager = TelemetryManager(self.manager.dict(telemetry_format))
self.api_config = self.manager.dict()
for key, value in api_config.items():
self.api_config[key] = value
self.ipc = AutoscalingGroup(expose_telemetry, (self.telemetry_manager.shared_telemetry,), 1)
self.gatherers = AutoscalingGroup(
self.gather_data,
(self.api_config, self.unprocessed_data),
DataGatherer.GATHERING_PROCESS_COUNT_MIN,
)
self.processors = LoadBalancer(
self.process_data,
(self.latest_data, self.unsaved_data),
DataGatherer.PROCESSING_PROCESS_COUNT_MIN,
DataGatherer.PROCESSING_PROCESS_COUNT_MAX,
self.unprocessed_data,
DataGatherer.MAX_QUEUE_SIZE,
)
self.recorders = LoadBalancer(
self.record_data,
(self.db_string,),
DataGatherer.RECORDING_PROCESS_COUNT_MIN,
DataGatherer.RECORDING_PROCESS_COUNT_MAX,
self.unsaved_data,
DataGatherer.MAX_QUEUE_SIZE,
)
@staticmethod
@ignore_keyboard_interrupt
def gather_data(
api_config: dict, unprocessed_queue: Queue, telemetry: dict = None
) -> None:
"""
:param api_config: API config dictionary
:param unprocessed_queue: Queue for unprocessed data points
:rtype: None
"""
logger = create_logger()
token = api_config["token"]
id = api_config["id"]
instruments = api_config["instruments"]
url = api_config["url"]
data = OANDA.stream_prices(token, id, instruments, url)
for datapoint in data:
unprocessed_queue.put(datapoint)
if telemetry is not None:
# do telemetry...
telemetry["action_count"] += 1
logger.critical("Data stream closed - terminating process.")
@staticmethod
@ignore_keyboard_interrupt
def process_data(
unprocessed_queue: Queue,
latest_data: list,
unsaved_queue: Queue,
telemetry: dict = None,
) -> None:
"""
:param unprocessed_queue: Queue containing unprocessed data points
:param latest_data: List of recently processed data points for removing duplicates
:param unsaved_queue: Queue of unsaved data points (used by saving processes)
:rtype: None
"""
logger = create_logger()
while True:
try:
unprocessed_datapoint = unprocessed_queue.get(
timeout=DataGatherer.QUEUE_TIMEOUT
)
datapoint = unprocessed_datapoint
if datapoint in latest_data:
continue
latest_data.append(datapoint)
if len(latest_data) > 100:
latest_data.pop(0)
logger.info(f"Processing datapoint: {datapoint}")
# Save every datapoint
to_queue = {"dest": "raw", "data": datapoint}
unsaved_queue.put(to_queue)
# Save relevant price data in correct database
if datapoint["type"] == "PRICE":
to_queue = process_datapoint(datapoint)
unsaved_queue.put(to_queue)
if telemetry is not None:
telemetry["action_count"] += 1
instrument = to_queue['dest']
if instrument in telemetry:
telemetry[instrument] += 1
else:
telemetry[instrument] = 1
except queue.Empty:
logger.debug("Unprocessed queue is empty.")
@staticmethod
@ignore_keyboard_interrupt
def record_data(
unsaved_queue: Queue, db_string: str, telemetry: dict = None
) -> None:
"""
:param unsaved_queue: Queue containing unsaved datapoints
:param db_string: MongoDB connection string
:rtype: None
"""
logger = create_logger()
client = pymongo.MongoClient(db_string)
tidepooldb = client["tidepool"]
raw = tidepooldb["raw"]
while True:
try:
datapoint = unsaved_queue.get(timeout=DataGatherer.QUEUE_TIMEOUT)
if datapoint is None:
logger.error("'NoneType' datapoint found in unsaved queue.")
continue
if datapoint["dest"] == "raw":
raw.insert_one(datapoint["data"])
else:
logger.info("Inserting data point into db...")
start = time.time_ns()
tidepooldb[datapoint["dest"]].insert_one(datapoint["data"])
elapsed = (time.time_ns() - start) / 1000
logger.info(f"Successfully inserted data point in {elapsed}μs")
if telemetry is not None:
telemetry["action_count"] += 1
except queue.Empty:
logger.debug("Unsaved queue is empty.")
def update_telemetry(self, uptime: float):
self.telemetry_manager.update_data_stats(
self.gatherers, self.processors, self.recorders, DataGatherer.UPDATE_TELEMETRY_INTERVAL
)
self.telemetry_manager.update_server_stats(
self.gatherers, self.processors, self.recorders, self.unsaved_data, self.unprocessed_data, uptime
)
self.telemetry_manager.update_instrument_stats(self.processors, DataGatherer.UPDATE_TELEMETRY_INTERVAL)
self.telemetry_manager.update_shared_telemetry()
def autoscale(self) -> None:
"""
:rtype: None
"""
self.ipc.autoscale()
self.gatherers.autoscale()
self.processors.autoscale()
self.recorders.autoscale()
def run(self) -> None:
"""
:rtype: None
"""
self.ipc.start()
self.gatherers.start()
self.processors.start()
self.recorders.start()
tick_count = 1
tick_time = 0
while True:
tick_start = time.time()
uptime = tick_count * DataGatherer.TICK_INTERVAL
gathering_proc_count = self.gatherers.proc_count()
processing_proc_count = self.processors.proc_count()
recording_proc_count = self.recorders.proc_count()
if uptime % DataGatherer.AUTOSCALE_INTERVAL == 0:
self.autoscale()
if uptime % DataGatherer.STATUS_INTERVAL == 0:
proc_count_message = (
f"# Subprocesses: [Gathering: {gathering_proc_count} | "
f"Processing: {processing_proc_count} | "
f"Recording: {recording_proc_count}]"
)
queue_size_message = (
f"Queue Sizes (current | avg): "
f"[Unprocessed: ({self.unprocessed_data.qsize()} | {self.processors.queue_average:.2f}) | "
f"Unsaved: ({self.unsaved_data.qsize()} | {self.recorders.queue_average:.2f})]"
)
timing_message = f"Timing: [Uptime: {seconds_to_human(uptime)} | Previous Tick Time: {seconds_to_us(tick_time)}µs]"
self.logger.warning(proc_count_message)
self.logger.warning(queue_size_message)
self.logger.warning(timing_message)
if uptime % DataGatherer.DATA_REFRESH_INTERVAL == 0:
self.gatherers.refresh_procs()
if uptime % DataGatherer.UPDATE_TELEMETRY_INTERVAL == 0:
self.update_telemetry(uptime)
tick_time = time.time() - tick_start
sleep_time = DataGatherer.TICK_INTERVAL - tick_time
sleep_time = 0 if sleep_time < 0 else sleep_time
time.sleep(sleep_time)
tick_count += 1
def stop(self) -> None:
"""
:rtype: None
"""
self.gatherers.stop()
self.processors.stop()
self.recorders.stop()
def main() -> None:
"""
:rtype: None
"""
logger = create_logger()
logger.info("Starting data collection.")
cfg = load_config()
token = cfg["token"]
alias = cfg["alias"]
db_string = cfg["db_string"]
live = cfg["live"]
api = OANDA.API(token, live=live)
account = api.get_account(alias)
if not account:
print("Error connecting to ")
instruments = api.get_instruments(alias)
api_config = {
"url": api.stream_url,
"token": token,
"id": account["id"],
"instruments": instruments,
}
dg = DataGatherer(db_string=db_string, api_config=api_config)
try:
dg.run()
except KeyboardInterrupt:
logger.critical("KeyboardInterrupt, stopping data collection.")
dg.stop()
quit(0)
if __name__ == "__main__":
main()
| declanomara/Tidepool | DataGatherer.py | DataGatherer.py | py | 11,050 | python | en | code | 1 | github-code | 13 |
15881534177 | import os
import sys
from peano.db.connect import get_db
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
def main():
db = get_db()
total_count = db.images.aggregate(
{"_id": "total", "count": {"$sum": {"$toInt": 1}}}
)
if input(f"{total_count['count']}件削除しますか? (y/n)") != "y":
exit()
db.images.delete_many({"belong_workspaces": {"$in": ["test"]}})
if __name__ == "__main__":
main()
| microwaver17/peano | peano_backend/maintenance/db_del_test.py | db_del_test.py | py | 483 | python | en | code | 1 | github-code | 13 |
145956744 | import tweepy
consumer_key = 'auHo5WKYN4jxzMQkj0D5MRmp7'
consumer_secret = 'tIg8lPxREu6U3FGnM5YLyJwS51os0aDN4qjMxZ4e3n6hFjyJJP'
key = '1302252892359815169-wuv0kpE41P78thQbbc7UKCC0I1yjvb'
secret = 'LqHrNzQZG7Je84W8F3b6ekiPuhEKTKILNQYsN3JFBCar8'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
tweets = api.mentions_timeline()
for i in tweets:
print(i.text) | DhanooshTamizh/Twitter-Bot | bot.py | bot.py | py | 438 | python | en | code | 1 | github-code | 13 |
21853656875 | #!/usr/bin/python3
""" Improvement algorithm complete routing solution. """
import random
import pickle
import logging
import sys
import copy
import json
import sortedcontainers
import numpy as np
from math import ceil
from .baseobjects import Dispatch, Vehicle, Cost, Solution, Utils
from .RollOut import RollOut
from .GridSearch import search, search_improvement
from db.queries import get_best_solutions
LOGGER = logging.getLogger(__name__)
def geographic_similarity(dispatch, vehicle):
""" Ranks vehicles in dispatch by geographic similarity to input vehicle """
dist_f = lambda x: np.linalg.norm(np.asarray(vehicle.geographic_center()) \
- np.asarray(x.geographic_center()))
dist = sortedcontainers.SortedListWithKey(key=dist_f)
dist.update(dispatch.vehicles)
return dist
def by_customers_dist(vehicle):
""" Determines how vehicles should be compared """
return (vehicle.served_customers(), vehicle.total_dist)
def log_solution(dispatch, dispatch_backup):
new_num_veh, new_dist = Cost.of_vehicles(dispatch.vehicles)
old_num_veh, old_dist = Cost.of_vehicles(dispatch_backup.vehicles)
LOGGER.debug("Before Improvement: {}: {}"\
.format(old_num_veh, old_dist))
LOGGER.debug("After Improvement: {}: {}"\
.format(new_num_veh, new_dist))
LOGGER.debug("Improvement: {} vehicles, {} distance".format(\
old_num_veh - new_num_veh,\
old_dist - new_dist))
def build_solution_from_str(solution_obj, problem_name):
''' build a dispatch out of the string '''
# load the original problem - organize solution from it
problem_def = Utils.open_sp(problem_name + ".p")
vehs = json.loads(solution_obj.values[0])
cust_dict = dict(zip(map(lambda x: x.custNo, problem_def.customers),
problem_def.customers))
new_dispatch = Dispatch(problem_def.customers,
capacity=problem_def.capacity)
for veh in vehs:
vehicle = Vehicle(problem_def.customers[0], problem_def.capacity)
for c in veh[1:]:
vehicle.serve(cust_dict[c])
new_dispatch.vehicles.append(vehicle)
return new_dispatch
class Improvement:
""" Encapslates the improvement algorithm """
def __init__(self):
""" Setup very simple memoization"""
self.previous_candidates = []
def run(self, base_solution=None, search_params=None, improv_params=None):
""" Master function for this class - initiates optimization """
best_solutions = get_best_solutions()
if base_solution == None:
problem = search_params.problem
found = best_solutions.loc[best_solutions['problem'] == problem]
base_solution_obj = found['solution_string']
dispatch = build_solution_from_str(base_solution_obj, problem)
for i in range(improv_params["iterations"]):
dispatch = self.improve(dispatch, search_params, improv_params)
return dispatch
def improve(self, dispatch, search_params, improv_params):
""" Workhorse of Improvement. Manages the improve phase"""
tmp_dispatch, old_vehicles = self.setup_next_round(dispatch)
if(len(old_vehicles) > 2):
solution, all_solutions = search_improvement(tmp_dispatch,
improv_params["algo"],
search_params)
if self.should_replace_with(old_vehicles, solution.solution.vehicles):
dispatch = self.replace_vehicles(dispatch, old_vehicles,
solution.solution.vehicles)
else:
LOGGER.debug("Wont replace because {} is worse than {}".format(
Cost.of_vehicles(solution.solution.vehicles),
Cost.of_vehicles(old_vehicles)))
return dispatch
def replace_vehicles(self, dispatch, old_vehicles, new_vehicles):
""" Replace the old vehicles in a dispatch object with new vehicles """
LOGGER.debug("Replace routes")
LOGGER.debug("Are they the same? {}".format(\
set(old_vehicles) == set(new_vehicles)))
for v in old_vehicles:
dispatch.vehicles.remove(v)
for v in new_vehicles:
dispatch.vehicles.append(v)
return dispatch
def should_replace_with(self, old_vehicles, new_vehicles):
""" Criterion for replacing vehicle sets for improvement """
new_num, new_dist = Cost.of_vehicles(new_vehicles)
old_num, old_dist = Cost.of_vehicles(old_vehicles)
LOGGER.debug("New solution: {}".format((new_num, new_dist)))
LOGGER.debug("Original solution: {}".format((old_num, old_dist)))
if new_num < old_num:
replace = 1
else:
if new_num > old_num:
replace = 0
else:
replace = new_dist < old_dist
return replace
def chose_candidates(self, dispatch, worst, count=5):
""" method for choosing the vehicles for improvement"""
criterion = geographic_similarity
all_cands = criterion(dispatch, worst)
return all_cands[: ceil(len(all_cands)/3)]
def candidate_vehicles(self, dispatch):
""" Find next vehicles to improve """
worst = self.worst_vehicle(dispatch)
candidate_vehicles = self.chose_candidates(dispatch, worst)
LOGGER.debug("Improvement candidates around {}: {} vehicles with distance {}"\
.format(worst, *Cost.of_vehicles(candidate_vehicles)))
return candidate_vehicles
def worst_vehicle(self, solution):
""" Find worst vehicle to improve around """
sorted_vehicles = sortedcontainers.SortedListWithKey(key=by_customers_dist)
sorted_vehicles.update(solution.vehicles)
# choose the best one or a random one
rbest = sorted_vehicles.pop(0)
if(len(sorted_vehicles)):
rbestR = random.choice(sorted_vehicles)
rbest = random.choice([rbest, rbestR])
return rbest
def setup_next_round(self, dispatch):
""" With the candidate vehicles, setup the rollout algorithm """
similar_vehicles = self.candidate_vehicles(dispatch)
customers = self.flatten_vehicles(similar_vehicles)
new_dispatch = Dispatch(customers, capacity=dispatch.capacity)
new_dispatch.set_delta(dispatch.delta)
return new_dispatch, similar_vehicles
def flatten_vehicles(self, vehicles):
""" Get all customers from many vehicles """
return list({c for v in vehicles for c in v.customer_history})
| sauln/ICRI | src/Improvement.py | Improvement.py | py | 6,733 | python | en | code | 1 | github-code | 13 |
6482485962 | import threading
import queue
import requests
def test_worker():
while True:
try:
print("LITLITLITLITLITLI")
except Exception:
print("death")
else:
print("I cry")
class StatusChecker(threading.Thread):
"""
The thread that will check HTTP statuses.
"""
#: The queue of urls
url_queue = None
#: The queue our results will go into
result_queue = None
#: Event thingy that tells threads to stop.
stopper = None
#: Target functions for use, optional
target = None
def __init__(self, url_queue, result_queue, stopper):
super().__init__()
self.url_queue = url_queue
self.result_queue = result_queue
self.stopper = stopper
print(type(target))
if type(target) != None:
print(self.getName())
def run(self):
while True:
try:
# this will throw queue.Empty immediately if there's
# no tasks left
to_check = self.url_queue.get_nowait()
except queue.Empty:
break # empty queue, we're done
else:
resp = requests.get(to_check)
self.result_queue.put((to_check, resp.status_code,))
self.url_queue.task_done() # the the queue we're done
if __name__ == '__main__':
url_queue = queue.Queue()
result_queue = queue.Queue()
for i in range(10):
url_queue.put('http://jodalyst.com')
stopper = threading.Event()
num_workers = 4
threads = list()
for i in range(num_workers):
t = StatusChecker(url_queue, result_queue, stopper)
threads.append(t)
print('Starting worker {}'.format(i))
t.start()
# wait for the queue to empty
url_queue.join()
while not result_queue.empty():
url, status = result_queue.get_nowait()
print('{} - {}'.format(url, status)) | dggsax/playground | multithreading/webpageexample.py | webpageexample.py | py | 1,969 | python | en | code | 0 | github-code | 13 |
12947050922 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
class SurnameDataset(Dataset):
@calssmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=="train"]
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
def __getitem__(self, index):
row = self._target_df.iloc[index]
surname_vector, vec_length = self._vectorizer.vectorize(row.surname, self._max_seq_length)
nationality_index = self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_data':surname_vector,
'y_target':nationality_index,
'x_legnth':vec_length}
class SurnameVectorizer(object):
def vectorize(self, surname, vector_length=-1):
indices = [self.char_vocab.begin_seq_index]
indices.extend(self.char_vocab.lookup_token(token) for token in surname)
indices.append(self.char_vocab.end_seq_index)
if vector_length <0:
vector_legnth = len(indices)
out_vector = np.zeros(vector_length, dtype=np.int64)
out_vector[:len(indices)] = indices
out_vector[len(indices):] = self.char_vocab.mask_index
return out_vector, len(indices)
@classmethod
def from_dataframe(cls, surname_df):
char_vocab = SequenceVocabulary()
nationality_vocab = Vocabulary()
for index, row in surname_df.iterrows():
for char in row.surname:
char_vocab.add_token(char)
nationality_vocab.add_token(row.nationality)
return cls(char_vocab, nationality_vocab) | joannekim0420/PythonStudy | NLP/withPytorch/chapter6/6-2.py | 6-2.py | py | 1,742 | python | en | code | 0 | github-code | 13 |
30883064306 | import heapq
dx = [-1,0,0,1]
dy = [0,-1,1,0]
dy = [0,1,-1,0]
d = ['u', 'r', 'l', 'd']
def solution(n, m, sx, sy, ex, ey, k):
answer = []
q = []
gr = [[0]*(m+1) for i in range(n+1)]
heapq.heappush(q,(0,0,sx,sy,[]))
while(q):
dist, cnt, x, y,dir = heapq.heappop(q)
if cnt==k and [x,y]==[ex,ey]:
answer=dir
break
if cnt+abs(ex-x)+abs(ey-y)>k:
continue
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
cd = d[i]
if nx<=0 or ny<=0 or nx>n or ny>m:
continue
if dist-i-1>gr[nx][ny]:
continue
gr[nx][ny]=dist-i-1
heapq.heappush(q,(dist-i-1,cnt+1,nx,ny,dir+[cd]))
return "".join(answer) if answer else "impossible"
print(solution(3, 4, 2, 3, 3, 1, 5))
| weeeeey/programmers | 미로 탈출 명령어.py | 미로 탈출 명령어.py | py | 860 | python | en | code | 0 | github-code | 13 |
70434215699 | #!/usr/bin/python3
# Author: Talhah Peerbhai
# Email: hello@talhah.tech
'''
This file contains the Internet Relay Chat client, it imports the irc library which
I made to abstract the IRC protocol wheras this program deals with the GUI and putting
it all together. I'm going to try to break it into more files but at the moment
this is it.
'''
from irclib import IrcCon
# PySimpleGUI is a wrapper for tkinter and some other frameworks
import PySimpleGUI as sg
import time
from colorhash import ColorHash as chash
from windows import loginWin,errorWin,commandsWin,aboutWin,filterWin
from sys import platform
import os
import datetime
if platform == "darwin" or platform == "win32":
print("\033[93mUnsupported Operating System, this program only works on Linux\033[0m")
raise OSError
sg.theme("SystemDefault")
#### CUSTOM EXCEPTIONS ####
class EmptyValue(Exception):
def __init__(self):
self.message = "User Input is required"
super().__init__(self.message)
class InvalidCommand(Exception):
def __init__(self):
self.message = "User gave an invalid command"
super().__init__(self.message)
class Client(IrcCon):
'''
Extend the IrcCon class which abstracts the IRC protocol so that we can
handle events and appropriately update the GUI
'''
def __init__(self,window):
'''
Initialise the socket and pass the GUI window to object
'''
IrcCon.__init__(self)
self.window = window
def on_error(self,errorType):
if errorType == "ConnectionRefusedError":
pass
if errorType == "NickInUse":
self.failedLogin = True
pass
def on_message(self,who,channel,msg):
#msg = f"{current_time} | {who} > {msg}"
check = msg.lower()
check = set(check.split())
# Filter out messages
for item in fList:
if item in check:
return
if channel == self.NICK:
channel = who
if channel not in self.channels:
create_tab(self.window,channel)
openTabs.append(channel)
self.channels.add(channel)
ms = f"{current_time} | "
self.window[f"{channel}B"].update(ms,append=True)
ms = f"{who} "
color = chash(f"{who}").hex
self.window[f"{channel}B"].update(ms,text_color_for_value=color,append=True)
msg = f"> {msg}"
self.window[f"{channel}B"].update(msg + "\n",append=True)
markUnread(channel)
def on_user_join(self,who,channel,hostname):
msg = f"{current_time} | ---> {who} ({hostname}) has joined {channel}\n"
self.window[f"{channel}B"].update(msg,text_color_for_value="green",append=True)
markUnread(channel)
# Add user to the names list
namesList = names[channel]
namesList.append(who)
namesList.sort()
names[channel] = namesList
self.window[f"{channel}L"].update(values=names[channel])
def on_user_part(self,who,channel,hostname):
msg = f"{current_time} | <--- {who} ({hostname}) has parted {channel}\n"
self.window[f"{channel}B"].update(msg,text_color_for_value="orange",append=True)
markUnread(channel)
# Remove the user from the names list
namesList = names[channel]
if who in namesList:
namesList.remove(who)
elif f"@{who}" in namesList:
namesList.remove(f"@{who}")
elif f"~{who}" in namesList:
namesList.remove(f"~{who}")
elif f"+{who}" in namesList:
namesList.remove(f"+{who}")
names[channel] = namesList
self.window[f"{channel}L"].update(values=names[channel])
def on_user_nick_change(self,who,newNick):
msg = f"{current_time} | {who} is now known as {newNick}\n"
for chan in names:
if who in names[chan]:
self.window[f"{chan}B"].update(msg,text_color_for_value="blue",append=True)
# Update the users name in the name list
namesList = names[chan]
# handle nick changes when +,~ @ in front of name, we need to preserve
# the leading inidicator
if who in namesList:
namesList.remove(who)
elif f"@{who}" in namesList:
namesList.remove(f"@{who}")
newNick = "@" + newNick
elif f"~{who}" in namesList:
namesList.remove(f"~{who}")
newNick = "~" + newNick
elif f"+{who}" in namesList:
namesList.remove(f"+{who}")
newNick = "+" + newNick
namesList.append(newNick)
namesList.sort()
names[chan] = namesList
markUnread(chan)
self.window[f"{chan}L"].update(values=names[chan])
def on_user_quit(self,who,hostname,msg):
msg = f"{current_time} | {who} ({hostname}) quit: {msg}\n"
for chan in names:
inChan = False
# Update the users name in the name list
namesList = names[chan]
# A name may have a leading +,~ @ so we need to appropriately remove
# user from the names list
if who in namesList:
namesList.remove(who)
inChan = True
elif f"@{who}" in namesList:
namesList.remove(f"@{who}")
inChan = True
elif f"~{who}" in namesList:
namesList.remove(f"~{who}")
inChan = True
elif f"+{who}" in namesList:
namesList.remove(f"+{who}")
inChan = True
if inChan:
self.window[f"{chan}B"].update(msg,text_color_for_value="red",append=True)
names[chan] = namesList
markUnread(chan)
self.window[f"{chan}L"].update(values=names[chan])
def on_topic(self,chan,topic):
self.window[f"{chan}T"].update(topic)
def on_whois(self,line):
line = line[3:]
line = ' '.join(line)
msg = f"{current_time} | {line}\n"
self.window["infoB"].update(msg,append=True)
markUnread("info")
def unknown_message(self,line):
# This used to "print" to infoB box which had all console output rerouted to it
# that would apparently lead to a race condition when printing too fast?
# and lead to a seg fault error, fixed now so hooray I guess!
# It is important to note that pysimplegui was overriding the builtin print
# so I'm talking about that "print" function. It wasn't transparent that it was
# overriding the default one and the bug may lie therein or it's some sort of
# limitation. TODO Make a PR or github issue one day to fix this
line = f"{current_time} | " + line + "\n"
self.window["infoB"].update(line,append=True)
markUnread("info")
def on_nickserv(self,msg):
msg = f"{current_time} | " + "NickServ " + msg + "\n"
self.window["infoB"].update(msg,text_color_for_value="dark red",font_for_value="Helvetica 10 bold",append=True)
markUnread("info")
def on_names(self,channel,namesChan):
namesChan[0] = namesChan[0].lstrip(":")
if channel not in names:
names[channel] = []
names[channel] = names[channel] + namesChan
def end_names(self,channel):
nameslist = names[channel]
nameslist.sort()
names[channel] = nameslist
time.sleep(1)
self.window[f"{channel}L"].update(values=names[channel])
def on_list(self,channel,members):
msg = f"{current_time} | Chan: {channel} Members: {members}\n"
self.window["infoB"].update(msg,append=True,text_color_for_value="dark green",font_for_value=("Helvetica",10,"bold"))
def on_notice(self,chan,msg):
if chan not in openTabs:
create_tab(self.window,chan)
self.channels.add(chan)
openTabs.append(chan)
msg = f"{current_time} | " + "Notice " + msg + "\n"
font = ("Helvetica",10,"bold")
if msg == "Server is shutting down":
self.disconnect()
self.window[f"{chan}B"].update(msg,text_color_for_value="dark red",font_for_value=font,append=True)
markUnread(chan)
# Returns the main window layout
def mainLayout():
# Box to display server info and other information non-specific to channels
info = [[sg.Multiline(size=(93,19),font=('Helvetica 10'),key="infoB",reroute_stdout=False,autoscroll=True,disabled=True)]]
menu = ['SlickIRC', ['&Exit']],['&Server',['Server settings']],["&Filters",['Filter settings']],['&Help', ['&Commands', '---', '&About'],]
layout = [[sg.Menu(menu)],
[sg.TabGroup([[sg.Tab("info",info)]],key="chats",selected_background_color="grey")],
[sg.Multiline(size=(59, 2), enter_submits=True, key='msgbox', do_not_clear=True),
sg.Button('SEND', bind_return_key=True,visible=True),
sg.Button('EXIT',visible=False)
]]
return layout
# Creates a new tab, we keep a record of tabs created, so if a this tab exists
# in hist then we just unhide it
def create_tab(win,channel):
if channel in tabHist:
win[f"{channel}"].update(visible=True)
else:
leftCol = [[sg.Multiline("No channel topic",size=(75, 3), font=('Helvetica 10'),key=f"{channel}T",autoscroll=False,disabled=True)],
[sg.Multiline(size=(75, 15), font=('Helvetica 10'),key=f"{channel}B",autoscroll=True,disabled=True)]]
rightCol = [[sg.Listbox(values=[""],key=f"{channel}L",size=(10,13))]]
element = [[sg.Column(leftCol),sg.Column(rightCol)]]
tab = sg.Tab(f"{channel}",element,key=channel)
win["chats"].add_tab(tab)
load_tab(channel)
tabHist.append(channel)
# Deletes a tab, we don't truly delete it but just hide it. I can delete a tab
# but unable to delete the contents related to it and we have issues if we need to
# recreate this tab. Hiding it isn't too bad of a tradeoff and history is preserved
def delete_tab(win,channel):
win[f"{channel}"].update(visible=False)
def save_tab(tab):
if not os.path.exists("chatlog"):
os.mkdir("chatlog")
if not os.path.exists(f"chatlog/{irc.HOST}"):
os.mkdir(f"chatlog/{irc.HOST}")
f = open(f"chatlog/{irc.HOST}/{tab}.txt","w+")
tabLog = mainWin[f"{tab}B"].get()
tabLog = tabLog.splitlines(True)
try:
index = len(tabLog)-tabLog[::-1].index("======= End of backlog =======\n")-1
except ValueError:
index = 0
today = datetime.date.today()
date = today.strftime("%B %d, %Y")
tabLog.insert(index,f"======= {date} =======\n")
tabLog = ' '.join(tabLog)
f.write(tabLog)
f.close()
def load_tab(tab):
if os.path.exists(f"chatlog/{irc.HOST}/{tab}.txt"):
f = open(f"chatlog/{irc.HOST}/{tab}.txt","r")
hist = f.read()
hist = hist + "\n" + "======= End of backlog =======\n"
mainWin[f"{tab}B"].update(hist,append=True)
f.close()
# Add an asterisk infront of a tabs name to indicate there is an unread message
def markUnread(tab):
tabgroup = mainWin["chats"].Widget
# Get the index of the tab, we add to openTabs elsewhere so we can get index
# over here
for i in range(len(openTabs)):
if openTabs[i] == tab:
break
# Get the tab object
tab = mainWin[f"{tab}"]
title = tab.Title
# Only if it isn't already unread append an asterisk
if not title.startswith("*"):
title = "*" + title
tabgroup.tab(i,text=title)
# Mark a tab as read by removing the asterisk at the start of its name
def markRead(tab):
temp = tab.lstrip("*")
tabgroup = mainWin["chats"].Widget
for i in range(len(openTabs)):
if openTabs[i] == temp:
break
title = openTabs[i]
tabgroup.tab(i,text=title)
# Process IRC commands such as /join etc.
def processCommand(win,irc,query):
global nick
global loggedIn
query = query.lstrip("/")
try:
if query == "":
raise InvalidCommand
query = query.split()
currentTab = vals1["chats"]
command = query[0].lower()
if command == "join":
channels = query[1:]
for chan in channels:
if chan not in irc.channels:
openTabs.append(chan)
irc.join(chan)
create_tab(win,chan)
elif command == "part":
channels = query[1:]
for chan in channels:
if chan in irc.channels:
openTabs.remove(chan)
delete_tab(win,chan)
irc.part(chan)
names[chan] = []
else:
win[f"{currentTab}B"].update("Need to be in channel",append=True)
elif command == "whois":
if len(query) == 2:
irc.whois(query[1])
elif command == "unread":
channels = query[1:]
for chan in channels:
if chan in irc.channels:
markUnread(chan)
elif command == "nick":
if len(query) == 2:
nick = query[1]
loggedIn = False
elif command == "quit":
msg = None
if len(query) >= 2:
msg = ' '.join(query[1:])
irc.quitC(msg)
quit()
elif command == "reconnect":
irc.reconnect()
for tab in openTabs[1:]:
irc.join(tab)
elif command == "query" or command == "msg":
nick = query[1]
if nick == "NickServ":
# Make sure that there's enough params
if len(query) > 2:
irc.nickserv(query[2],query[3:])
else:
raise InvalidCommand
else:
openTabs.append(nick)
irc.join(nick)
create_tab(win,nick)
if len(query) > 2:
msg = ' '.join(query[2:])
sendMsg(win,irc,nick,msg)
elif command == "save":
channels = query[1:]
if len(channels) >= 1:
for tab in channels:
if tab in tabHist:
save_tab(tab)
else:
for tab in tabHist:
save_tab(tab)
msg = "Sucessfully saved chat(s) in folder chatlog\n"
font = ("Helvetica",10,"bold")
mainWin[f"{currentTab}B"].update(msg,text_color_for_value="dark red",font_for_value=font,append=True)
elif command == "list":
irc.listChan()
else:
raise InvalidCommand
except InvalidCommand:
win["infoB"].update("Unknown command\n",append=True)
finally:
win["msgbox"].update("")
# Send a message to a channel or private message
# We break the string down to color the users own nick for better readability
def sendMsg(win,irc,chan,msg):
# We don't send messages in the info channel
if chan != "info":
irc.privmsg(f"{chan}",msg)
ms = f"{current_time} | "
win[f"{chan}B"].update(ms,append=True)
ms = f"{irc.NICK} "
win[f"{chan}B"].update(ms,text_color_for_value="purple",append=True)
msg = "> " + msg + "\n"
win[f"{chan}B"].update(msg,append=True)
# Clear message box
win["msgbox"].update("")
t = time.localtime()
# Initial login window
(server,port,nick,user,rname,ssl) = loginWin("irc.tilde.chat","6697")
# Initialize main window thereafter
mainWin = sg.Window("Slick IRC",mainLayout(),font=("Helvetica","13"),default_button_element_size=(8,2),finalize=True)
# Initialize irc client and connect
irc = Client(mainWin)
irc.connect(server,port,ssl)
loggedIn = False
failedLogin = False
fList = []
# All the tabs seen so far
tabHist = ["info"]
openTabs = ["info"]
names = dict()
while True:
# Event and values
ev1, vals1 = mainWin.read(timeout=1)
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
# We haven't sucessfully logged in yet
if not loggedIn:
irc.login(nick,user,rname)
loggedIn = True
# We failed login, darn it, try again and display error
if irc.failedLogin:
errorWin("Nickname in use, try a different one!")
(server,port,nick,user,rname,ssl) = loginWin(server,port,nick,user,rname)
loggedIn = False
if not irc.connected:
errorWin("Cannot connect to server")
(server,port,nick,user,rname,ssl) = loginWin(server,port,nick,user,rname)
irc.connect(server,port,ssl)
irc.login(nick,user,rname)
if ev1 == "SEND":
query = vals1["msgbox"].rstrip()
# Ignore bogus empty messages
if query != "":
# An IRC command
if query.startswith("/"):
processCommand(mainWin,irc,query)
else:
sendMsg(mainWin,irc,vals1["chats"],query)
# Mark a channel as read:
if vals1["chats"].startswith("*"):
markRead(vals1["chats"])
if ev1 == "Server settings":
(oldServ,oldPort) = (server,port)
(server,port,nick,user,rname,ssl) = loginWin(server,port,nick,user,rname)
if oldServ != server or oldPort != port:
irc.disconnect()
irc.connect(server,port,ssl)
irc.login(nick,user,rname)
if ev1 == "Commands":
commandsWin()
if ev1 == "Filter settings":
fList = filterWin(fList)
if ev1 == "About":
aboutWin()
# User wants to exit :(
if ev1 == sg.WIN_CLOSED or ev1 == "EXIT" or ev1 == "Exit":
irc.quitC()
break
mainWin.close() | tvlpirb/slick-irc | client.py | client.py | py | 17,925 | python | en | code | 1 | github-code | 13 |
35534079583 | import random
from hw5_cards import Card, print_hand
# create the Hand with an initial set of cards
class Hand:
'''a hand for playing card
Class Attributes
----------------
None
Instance Attributes
-------------------
init_card: list
a list of cards
'''
def __init__(self, init_cards=None):
if init_cards is None:
init_cards = []
self.init_cards = init_cards
def add_card(self, card):
'''add a card
add a card to the hand
silently fails if the card is already in the hand
Parameters
-------------------
card: instance
a card to add
Returns
-------
None
'''
for card_iter in self.init_cards:
if card_iter.__str__() == card.__str__():
return
self.init_cards.append(card)
def remove_card(self, card):
'''remove a card from the hand
Parameters
-------------------
card: instance
a card to remove
Returns
-------
the card, or None if the card was not in the Hand
'''
try:
self.init_cards.remove(card)
return card
except ValueError as e:
return None
def draw(self, deck):
'''draw a card
draw a card from a deck and add it to the hand
side effect: the deck will be depleted by one card
Parameters
-------------------
deck: instance
a deck from which to draw
Returns
-------
None
'''
card = deck.deal_card()
self.add_card(card)
def remove_pairs(self):
'''Removes pairs of cards in a hand.
If there are three of a kind, only one will be left.
If there are four of a kind, none will be left.
Returns
-------
None
'''
sort_hand = [[] for _ in range(13)]
for card in self.init_cards:
rank_list = sort_hand[card.rank - 1]
if len(rank_list) == 0:
rank_list.append(card)
elif len(rank_list) == 1:
sort_hand[card.rank - 1] = []
self.init_cards = [rank_list[0] for rank_list in sort_hand if len(rank_list) > 0]
class Deck:
'''a deck of Cards
Instance Attributes
-------------------
cards: list
the list of Cards currently in the Deck. Initialized to contain
all 52 cards in a standard deck
'''
def __init__(self):
self.cards = []
for suit in range(4):
for rank in range(1, 14):
card = Card(suit, rank)
self.cards.append(card) # appends in a sorted order
def deal_card(self, i=-1):
'''remove a card from the Deck
Parameters
-------------------
i: int (optional)
the index of the ard to remove. Default (-1) will remove the "top" card
Returns
-------
Card
the Card that was removed
'''
return self.cards.pop(i)
def shuffle(self):
'''shuffles (randomizes the order) of the Cards
self.cards is modified in place
Parameters
----------
None
Returns
-------
None
'''
random.shuffle(self.cards)
def replace_card(self, card):
card_strs = [] # forming an empty list
for c in self.cards: # each card in self.cards (the initial list)
card_strs.append(c.__str__()) # appends the string that represents that card to the empty list
if card.__str__() not in card_strs: # if the string representing this card is not in the list already
self.cards.append(card) # append it to the list
def sort_cards(self):
'''returns the Deck to its original order
Cards will be in the same order as when Deck was constructed.
self.cards is modified in place.
Parameters
----------
None
Returns
-------
None
'''
self.cards = []
for suit in range(4):
for rank in range(1, 14):
card = Card(suit, rank)
self.cards.append(card)
def deal_hand(self, hand_size):
'''removes and returns hand_size cards from the Deck
self.cards is modified in place. Deck size will be reduced
by hand_size
Parameters
-------------------
hand_size: int
the number of cards to deal
Returns
-------
list
the top hand_size cards from the Deck
'''
hand_cards = []
for i in range(hand_size):
hand_cards.append(self.deal_card())
return hand_cards
def deal(self, num_hands, num_cards) -> list:
'''Deals "num_hands" hands of "num_cards" cards for each one. If num_cards is -1, all cards in the deck should be
dealt even though this may result in uneven hands.
Parameters
----------
num_hands: int
Number of hands to generate.
num_cards: int
Number of cards per hand.
Returns
-------
hands: list of Hand
Hands generated.
'''
if num_cards > 0:
assert num_hands * num_cards <= len(self.cards), "too many cards to be dealt"
hands = []
if num_cards == -1:
num_cards_tgt = len(self.cards) // num_hands
for _ in range(num_hands - 1):
hands.append(Hand(self.deal_hand(num_cards_tgt)))
hands.append(Hand(self.deal_hand(len(self.cards))))
else:
for _ in range(num_hands):
hands.append(Hand(self.deal_hand(num_cards)))
return hands
| 10258392511/W21_HW5 | hw5_cards_ec2.py | hw5_cards_ec2.py | py | 5,859 | python | en | code | null | github-code | 13 |
17039312764 | from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
# database setup
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
"""
create the db file by going to terminal:
>>> from app import db
>>> db.create_all()
in terminal:
add entries:
>>> db.session.add(ModelName(**kwargs))
>>> db.session.commit() # must commit or it disappear after session ends
querys:
>>> ModelName.query.all()
>>> ModelName.query.first()
>>> ModelName.query.get(index)
>>> ModelName.query.all()[index]
>>> ModelName.query.all()[index].key
>>> ModelName.query.filter_by(key=value).all()
>>> ModelName.query.order_by(BlogPost.date_posted).all()
delete:
>>> db.session.delete(ModelName.query...)
>>> db.session.commit()
"""
# database schema/model
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False) # up to 100 characters
content = db.Column(db.Text, nullable=False) # Text has no length limit
author = db.Column(db.String(20), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return 'Blog post ' + str(self.id)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/posts', methods=['GET', 'POST'])
def posts():
# POST
if request.method == 'POST':
post_title = request.form['title']
post_content = request.form['content']
post_author = request.form['author']
if post_title and post_content and post_author:
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post) # insert
db.session.commit() # don't forget
return redirect('/posts')
# GET
else:
all_posts = BlogPost.query.order_by(BlogPost.date_posted).all() # retrieve
return render_template('posts.html', posts=all_posts)
@app.route('/posts/delete/<int:id>')
def delete_post(id):
post = BlogPost.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect('/posts')
@app.route('/posts/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = BlogPost.query.get_or_404(id)
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
db.session.commit()
return redirect('/posts')
else:
return render_template('edit.html', post=post)
@app.route('/home/<string:name>/posts/<int:id>')
def hello(name, id):
return 'Hi, ' + name + str(id)
@app.route('/onlyget', methods=['GET'])
def get_req():
print("GET: /onlyget")
return "GET"
if __name__ == '__main__':
app.run(debug=True)
| TerryLun/Learm-Flask-with-Database-CRUD | app.py | app.py | py | 3,009 | python | en | code | 0 | github-code | 13 |
8665953581 | #!/usr/bin/env python
# coding: utf-8
import parselmouth
import numpy as np
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
st.markdown('# How Analysis Parameters Affect Pitch Measures in Praat')
st.markdown('# ')
# Load sound into Praat
sound = parselmouth.Sound("03-01-01-01-01-01-01.wav")
audio_file = open('03-01-01-01-01-01-01.wav', 'rb')
audio_bytes = audio_file.read()
st.audio(audio_bytes, format='audio/wav')
st.sidebar.markdown("## Praat's Pitch Floor and Ceiling Settings")
floor = st.sidebar.slider('Pitch Floor', 50, 250, 75)
ceiling = st.sidebar.slider('Pitch Ceiling', 300, 700, 600)
kill_octave_jumps = st.sidebar.checkbox("Kill Octave Jumps")
pitch = sound.to_pitch(pitch_floor=floor, pitch_ceiling=ceiling)
if kill_octave_jumps:
parselmouth.praat.call(pitch, "Kill octave jumps")
x = pitch.xs()
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values==0] = np.nan
y = pitch_values
df = pd.DataFrame({"Time (s)": x,
"Frequency (Hz)":y})
st.markdown(f"# Mean Pitch: {round(df['Frequency (Hz)'].mean(), 3)}")
fig, ax = plt.subplots()
ax.plot(df['Time (s)'], df['Frequency (Hz)'], marker='o')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Frequency (Hz)')
ax.grid(True)
st.pyplot(fig)
st.markdown(f"# Data")
st.markdown(f"## NaN's filtered out")
st.table(df.dropna())
st.text("""Sound from: Livingstone SR, Russo FA (2018)
The Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS):
A dynamic, multimodal set of facial and vocal expressions in North American English.
PLoS ONE 13(5): e0196391. https://doi.org/10.1371/journal.pone.0196391.""")
| drfeinberg/PraatPitchParameters | app.py | app.py | py | 1,649 | python | en | code | 0 | github-code | 13 |
37535264833 | """ This is a config file for the entire application to work"""
ORG_EMAIL = "@gmail.com"
FROM_EMAIL = "anitoshri" + ORG_EMAIL
FROM_PWD = "tobinaruto"
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993
CONTENT_EMAIL = "animesh.mukherjeei323460@gmail.com"
EXCEL_CONFIG = './input_data.xlsx'
LOGO = 'input_logo.jpg' | Animesh420/automated_email | config.py | config.py | py | 339 | python | en | code | 0 | github-code | 13 |
9051930737 | import re
from const import Url
from util import get_requests_response, get_beautiful_soup_object
def get_ammo_data():
ammo_caliber_url_list = []
ammo_header = []
ammo_list = {}
res = get_requests_response(Url.EN_WIKI, "Ammunition")
soup = get_beautiful_soup_object(res, class_name="mw-parser-output")
soup.find("div", {"class": "toc"}).decompose()
for table in soup.find_all("table", {"class": "wikitable"}):
for ammo_caliber in table.find("tbody").find_all("tr")[1:]:
ammo_caliber_url_list.append(
ammo_caliber.find("a").get("href").replace("/wiki/", "")
)
for ammo_caliber_url in ammo_caliber_url_list:
res = get_requests_response(Url.EN_WIKI, ammo_caliber_url)
soup = get_beautiful_soup_object(res, class_name="mw-parser-output")
ammo_list[ammo_caliber_url.replace("_", " ")] = []
for table in soup.find_all("table", {"class": "wikitable"}):
for n, ammo_caliber in enumerate(table.find("tbody").find_all("tr")):
if n == 0:
for theader in ammo_caliber.find_all("th")[1:]:
ammo_header.append(
theader.get_text(strip=True).replace("\xa0%", "")
)
else:
ammo_data = {}
ammo_data["Caliber"] = ammo_caliber_url.replace("_", " ")
try:
ammo_data["Icon"] = (
re.sub(
"scale-to-width-down/[0-9]*\?cb=[0-9]*",
"",
ammo_caliber.find(["th", "td"]).find(
"img")["data-src"],
)
+ "?format=original"
)
except:
ammo_data["Icon"] = (
re.sub(
"scale-to-width-down/[0-9]*\?cb=[0-9]*",
"",
ammo_caliber.find(["th", "td"]).find(
"img")["src"],
)
+ "?format=original"
)
for theader, ammo in zip(
ammo_header, ammo_caliber.find_all(["th", "td"])[1:]
):
ammo_data[theader] = ammo.get_text(strip=True)
ammo_list[ammo_caliber_url.replace(
"_", " ")].append(ammo_data)
return ammo_list | sai11121209/Discord-EFT-V2-Bot | src/loadData/get_ammo_data.py | get_ammo_data.py | py | 2,657 | python | en | code | 0 | github-code | 13 |
43061977521 | import torch
import torch.nn as nn
from .module import Flatten
import math
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
__all__ = ['cifar10','cifar100']
DIM=128
class CIFAR10(nn.Module):
def __init__(self):
super(CIFAR10, self).__init__()
self.classifier = nn.Sequential(
nn.Conv2d(3,96,3),
nn.GroupNorm(32,96),
nn.ELU(),
nn.Conv2d(96,96,3),
nn.GroupNorm(32,96),
nn.ELU(),
nn.Conv2d(96,96,3, stride=2),
nn.GroupNorm(32,96),
nn.ELU(),
nn.Dropout2d(0.5),
nn.Conv2d(96,192,3),
nn.GroupNorm(32,192),
nn.ELU(),
nn.Conv2d(192,192,3),
nn.GroupNorm(32,192),
nn.ELU(),
nn.Conv2d(192,192,3,stride=2),
nn.GroupNorm(32,192),
nn.ELU(),
nn.Dropout2d(0.5),
nn.Conv2d(192,192,3),
nn.GroupNorm(32,192),
nn.ELU(),
nn.Conv2d(192,192,1),
nn.GroupNorm(32,192),
nn.ELU(),
nn.Conv2d(192,10,1),
nn.AvgPool2d(2),
Flatten()
)
def forward(self,x):
x = self.classifier(x)
return x
class CIFAR10_VGG(nn.Module):
def __init__(self, vgg_name):
super(CIFAR10_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
self._initialize_weights()
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def vgg11():
model = CIFAR10_VGG('VGG11')
return model
def vgg13():
model = CIFAR10_VGG('VGG13')
return model
def vgg16():
model = CIFAR10_VGG('VGG16')
return model
def vgg19():
model = CIFAR10_VGG('VGG19')
return model
def simpleCNN():
model = CIFAR10()
return model
| psr6275/ensembleDL | models/cifar10.py | cifar10.py | py | 3,872 | python | en | code | 1 | github-code | 13 |
20787332804 | """
collect.py
"""
from collections import Counter
import matplotlib.pyplot as plt
import networkx as nx
import sys
import time
import itertools
from TwitterAPI import TwitterAPI
import pickle
consumer_key = '1rhjWHiOG0xfHf5zkQL3yqjK6'
consumer_secret = 'qWxOCAgtZbhgXb8uJelTPE4xdlAm9A9N64vK9XvRTuCv4Stm2w'
access_token = '768548145852669952-qDKfAdGqT2gZZXgTFxQqy2w3rSjzf2U'
access_token_secret = 'b9JoEo88LoauZpFP3Twny6raogOjGvHeXv7rAMvCz580F'
friend_dict = {}
def get_twitter():
""" Construct an instance of TwitterAPI using the tokens you entered above.
Returns:
An instance of TwitterAPI.
"""
return TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret)
def robust_request(twitter, resource, params, max_tries=5):
""" If a Twitter request fails, sleep for 15 minutes.
Do this at most max_tries times before quitting.
Args:
twitter .... A TwitterAPI object.
resource ... A resource string to request; e.g., "friends/ids"
params ..... A parameter dict for the request, e.g., to specify
parameters like screen_name or count.
max_tries .. The maximum number of tries to attempt.
Returns:
A TwitterResponse object, or None if failed.
"""
for i in range(max_tries):
try:
request = twitter.request(resource, params)
if request.status_code == 200:
return request
else:
print('Got error %s \nsleeping for 15 minutes.' % request.text)
sys.stderr.flush()
time.sleep(61 * 15)
except:
print('Got error %s \nsleeping for 15 minutes.' % request.text)
time.sleep(61 * 15)
def gather_tweets():
twitter = get_twitter()
request = robust_request(twitter,'search/tweets', {'q':'@JohnGrisham','language':'en'})
tweets = [r for r in request]
maximum_id = tweets[len(tweets)-1]['id']
while len(tweets) <= 400:
request = robust_request(twitter,'search/tweets', {'q':'@JohnGrisham','language':'en','max_id':maximum_id})
for r in request:
tweets.append(r)
maximum_id = tweets[len(tweets)-1]['id']
pickle.dump(tweets, open('tweets.pkl', 'wb'))
f = open('collect_data_results.txt','w',encoding='utf-8')
number_of_users = len(set([t['user']['screen_name'] for t in tweets]))
number_of_messages = len(tweets)
f.write("Number of users collected:%s"%number_of_users)
f.write("\nNumber of messages collected:%s"%number_of_messages)
f.close()
return tweets
def get_friends(user_screen_name):
twitter = get_twitter()
request = robust_request(twitter,'friends/list',{'screen_name':user_screen_name,'count':200})
friends = [r for r in request]
friend_dict[user_screen_name] = friends
pickle.dump(friend_dict, open('friends.pkl', 'wb'))
return friends
def get_followers():
twitter = get_twitter()
request = robust_request(twitter,'followers/list',{'screen_name':'JohnGrisham','count':200})
followers = [r for r in request]
pickle.dump(followers, open('followers.pkl', 'wb'))
return followers
def main():
tweets = gather_tweets()
tweet_authors = [t['user']['screen_name'] for t in tweets]
get_followers()
for author in tweet_authors:
get_friends(author)
if __name__ == '__main__':
main()
| sshenoy6/Online-Social-Network-Analysis | Sentiment Analysis about John Grisham/collect.py | collect.py | py | 3,404 | python | en | code | 0 | github-code | 13 |
32010405734 | import os
import re
from flask import request
from validate_email import validate_email
from werkzeug.utils import secure_filename
pass_reguex = "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)[^\W_]{8,}$"
user_reguex = "^[a-zA-Z0-9 _.-]+$"
id_reguex = "^[0-9]+$"
F_ACTIVE = 'ACTIVE'
F_INACTIVE = 'INACTIVE'
EMAIL_APP = 'EMAIL_APP'
REQ_ACTIVATE = 'REQ_ACTIVATE'
REQ_FORGOT = 'REQ_FORGOT'
U_UNCONFIRMED = 'UNCONFIRMED'
U_CONFIRMED = 'CONFIRMED'
def isEmailValid(email):
is_valid = validate_email(email)
return is_valid
def isUsernameValid(user):
if re.search(user_reguex, user):
return True
else:
return False
def isPasswordValid(password):
if re.search(pass_reguex, password):
return True
else:
return False
def isIdValid(id):
if re.search(id_reguex, id):
return True
else:
return False
def isNombreValid(nombre):
if re.search(user_reguex, nombre):
return True
else:
return False
def isCantidadValid(cantidad):
if re.search(id_reguex, cantidad):
return True
else:
return False
# UPLOAD_FOLDER = '/path/to/the/uploads'
# ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
# app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.jpeg']
# app.config['UPLOAD_FOLDER'] = 'UPLOAD_FOLDER' # configuracion para directorio en el servidor
def validarImagen():
msj = ""
archivoCargado = request.files['imagen']
nombreArchivo = secure_filename(archivoCargado.filename)
if nombreArchivo != '':
archivoExtension = os.path.splitext(nombreArchivo)[1] # divido el archivo para ver la extension de ese archivo
if archivoExtension not in ['.jpg', '.png', '.jpeg']:
msj = "Tipo de archivo no es válido"
return (msj,"Error")
# msj = f"El archivo {nombreArchivo} cargo exitosamente"
archivoCargado.save(
'static/imagenes/cargadas/' + nombreArchivo) # graba la imagen donde este guardado este archivo de python app.py
# archivoCargado.save(os.path.join(app.config['UPLOAD_FOLDER'], nombreArchivo)) # Se guarda en un directorio en el servidor
print(archivoCargado.filename)
print(type(archivoCargado))
img = convertirADatoBinario('static/imagenes/cargadas/'+archivoCargado.filename)
# print(img)
return (msj,img)
else:
msj = "Por favor cargar un archivo"
return (msj,"Error")
def convertirADatoBinario(imagen):
with open(imagen, 'rb') as archivo:
blob = archivo.read()
return blob
def convertirANombreImagen_lista(productos):
lista = []
for p in productos:
registro_accesorio = list(p)
registro_accesorio[3] = convertirBinarioAImagen(p[0],p[3])
p = tuple(registro_accesorio)
lista.append(p)
return lista
def convertirBinarioAImagen(id_foto,binario):
with open('static/imagenes/cargadas/accesorio_{}.jpg'.format(id_foto),'wb') as archivo:
archivo.write(binario)
return "accesorio_{}.jpg".format(id_foto)
| leonardochica/MGInventarios | utils.py | utils.py | py | 3,068 | python | es | code | 0 | github-code | 13 |
26554216669 | #Inspired by: https://towardsdatascience.com/inroduction-to-neural-networks-in-python-7e0b422e6c24
import numpy as np
import ActivationFunctions as af
input = np.array([
[1,0,1],
[0,0,1],
[0,0,1],
[0,0,1],
[1,0,1],
[1,0,1],
[0,0,1]])
output = [1,0,0,0,1,1,0]
class NeuralNetwork:
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
self.weights = [.50, .50, .50]
def feed_forward(self):
self.hidden = self.apply_sigmoid(self.inputs, self.weights)
def backpropagation(self):
#The error is the difference between the correct output and the the predicted one.
self.error = self.outputs - self.hidden
#Delta is how much we're going to adjust. Basically, we multiply the error by the derivative of the prediction (i.e., the slope).
delta = self.error * af.sigmoid_derivative(self.hidden)
#We adjust the weights by taking the product of delta and the transposed inputs. As of this writting, I'm unsure of the reason why we're taking the T of inputs.
self.weights += np.dot(self.inputs.T, delta)
def train(self, epochs=25000):
for epoch in range(epochs):
self.feed_forward()
self.backpropagation()
def predict(self, input):
return self.apply_sigmoid(input, self.weights)
def apply_sigmoid(self, inputs, weights):
return af.sigmoid(np.dot(inputs, weights))
NN = NeuralNetwork(input, output)
NN.train()
print("Prediction: ", NN.predict(np.array([[0,0,1]])), " - Correct: 0")
print("Prediction: ", NN.predict(np.array([[1,0,1]])), " - Correct: 1")
print("Prediction: ", NN.predict(np.array([[0,0,0]])), " - Correct: 0")
| MarianoVilla/NeuralPlayground | NeuralPlayground.PythonConsole/BinaryClassifier.py | BinaryClassifier.py | py | 1,774 | python | en | code | 0 | github-code | 13 |
13790344211 | import utils
import sys
import time
import numpy as np
import itertools
def vis(dct):
l_vals, c_vals = [x[0] for x in dct], [x[1] for x in dct]
l_min, l_max, c_min, c_max = int(min(l_vals)), int(max(l_vals)), int(min(c_vals)), int(max(c_vals))
for l in range(l_max - l_min + 1):
for c in range(c_max - c_min + 1):
if (l, c,) in dct:
print('#', end='')
else:
print('.', end='')
print()
print()
if __name__ == '__main__':
start_time = time.time()
elves_map = np.array([[1 if x is "#" else 0 for x in line] for line in utils.read_file_as_lines(sys.argv[1])])
diml, dimc = np.shape(elves_map)
elves_map_new = set()
for l, c in itertools.product(range(diml), range(dimc)):
if elves_map[(l, c)] == 1:
elves_map_new.add((l, c, ))
elves_map = elves_map_new
stop_time = time.time()
print(f"Initialization time: {stop_time - start_time} seconds")
start_time = stop_time
dir_check = (((-1, -1,), (-1, 0,), (-1, 1,)), # north
((1, -1,), (1, 0,), (1, 1,)), # south
((-1, -1,), (0, -1,), (1, -1,)), # left
((-1, 1,), (0, 1,), (1, 1,)) # right
)
counter = 0
def do_step(dir_check, steps):
do_move = False
# propose steps
proposed_steps = {}
for e in elves_map:
e = tuple(e)
nb = utils.get_diagneighbours(e)
if elves_map.isdisjoint(set(nb)):
continue
for d in range(4):
d = (d + counter) % 4
means = [sum([x[0] for x in dir_check[d]])/3, sum([x[1] for x in dir_check[d]])/3]
if elves_map.isdisjoint(set([(x[0] + e[0], x[1] + e[1],) for x in dir_check[d]])):
proposed_steps.setdefault((int(e[0] + means[0]), int(e[1] + means[1])), []).append(e)
break
# check if proposed steps are unique
for newpos, oldpos in proposed_steps.items():
if len(oldpos) == 1:
do_move = True
elves_map.remove(oldpos[0])
elves_map.add(newpos)
return steps + 1, do_move
for i in range(10):
counter, did_move = do_step(dir_check, counter)
def calc_score():
# done, calculate final result
l_vals, c_vals = [x[0] for x in elves_map], [x[1] for x in elves_map]
l_min, l_max, c_min, c_max = int(min(l_vals)), int(max(l_vals)), int(min(c_vals)), int(max(c_vals))
return (l_max - l_min + 1) * (c_max - c_min + 1) - len(l_vals)
score = calc_score()
stop_time = time.time()
if sys.argv[1].startswith("Test"):
assert score == 110
print(f"pt1 solution: {score.real} time overall: {stop_time - start_time}")
while True:
counter, did_move = do_step(dir_check, counter)
if not did_move:
break
score = counter
if sys.argv[1].startswith("Test"):
assert score == 20
stop_time = time.time()
print(f"pt2 solution: {score} time overall: {stop_time - start_time}")
| DennisKlpng/AOC_2022 | AOC_23.py | AOC_23.py | py | 3,137 | python | en | code | 1 | github-code | 13 |
28700831486 | import pygame
class Player (pygame.sprite.Sprite):
def __init__(self):
super().__init__()
width = 50
height = 50
self.image = pygame.Surface((width, height))
self.image.fill((150, 0, 0))
self.rect = self.image.get_rect()
def update(self):
pos = pygame.mouse.get_pos()
x = pos[0]
y = pos[1]
self.rect.x = x
self.rect.y = y
pygame.init()
screen = pygame.display.set_mode((800, 800))
pygame.display.set_caption("Mouse Movement Test")
running = True
ourPlayer = Player()
sprites = pygame.sprite.Group()
sprites.add(ourPlayer)
# pygame.mouse.set_visible(False)
while running:
# pygame.time.wait(0)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
sprites.update()
screen.fill((0, 0, 0))
sprites.draw(screen)
pygame.display.flip()
pygame.quit()
| DJLeemstar/PythonLessons | MouseMovement.py | MouseMovement.py | py | 978 | python | en | code | 0 | github-code | 13 |
71460683537 | from flask import jsonify, make_response, request
from app_api import app
from app_api.function import *
if not models.Role.query.all():
role_list = ['admin', 'guest', 'vip']
index = 1
for r in role_list:
add_role(r)
@app.route('/')
def index():
return "Hello, this is API!"
@app.route('/api/create_user', methods=['POST'])
def create_user():
if not request.json:
abort(400)
try:
full_name = request.json['full_name']
phone = request.json['phone']
if full_name and phone:
new_user = add_user(full_name, phone)
user_dict = dict(id=new_user.id,
full_name=new_user.full_name, phone=new_user.phone,
created_on=new_user.created_on, updated_on=new_user.updated_on)
return jsonify({'new_user': user_dict}), 201
else:
abort(500)
except KeyError:
abort(500)
@app.route('/api/edit_user/<int:id>', methods=['PUT'])
def edit_user(id):
user = filter_id(id)
if not request.json:
abort(400)
full_name_replace, phone_replace = check_request_user_change(request.json)
check_same_user(full_name_replace, phone_replace)
if full_name_replace or phone_replace:
user_replace(id, full_name_replace, phone_replace)
user_dict = dict(id=user.id,full_name=user.full_name, phone=user.phone,
created_on=user.created_on, updated_on=user.updated_on, role=user.role)
return jsonify({'data changed': user_dict}), 201
else:
abort(400)
@app.route('/api/give_roles/<int:id>', methods=['PUT'])
def give_role(id):
user = filter_id(id)
if not user or not request.json or not 'id' in request.json:
abort(500)
id_role_list = request.json['id']
if id_role_list:
role_all = models.Role.query.all()
roles_list = roles_id_list(role_all)
if type(id_role_list) == int:
add_in_table_role(user, id_role_list)
else:
check_roles_not_found(id_role_list, roles_list)
for r in id_role_list:
add_in_table_role(user, r)
user_dict = show_user_roles(id)
return jsonify({'new_role_assigned': user_dict}), 201
else:
abort(400)
@app.route('/api/remove_roles/<int:id>', methods=['PUT'])
def remove_role(id):
user = filter_id(id)
if not user or not request.json or not 'id' in request.json:
abort(500)
id_role_list = request.json['id']
if id_role_list:
role_all = models.Role.query.all()
roles_list = roles_id_list(role_all)
if type(id_role_list) == int:
user.roles.remove(role_all[id_role_list - 1])
db.session.commit()
else:
check_roles_not_found(id_role_list, roles_list)
for r in id_role_list:
user.roles.remove(role_all[r-1])
db.session.commit()
user_dict = show_user_roles(id)
return jsonify({'new_role_assigned': user_dict}), 201
else:
abort(400)
@app.route('/api/get_roles', methods=['GET'])
def get_roles():
roles = models.Role.query.all()
roles_list = []
for r in roles:
roles_dict = dict(id=r.id,
role_name=r.role_name)
roles_list.append(roles_dict)
return jsonify({'roles': roles_list})
@app.route('/api/get_users', methods=['GET'])
def get_users():
users = models.Users.query.all()
users_list = []
for u in users:
u_roles_list = []
for r in u.roles:
u_roles_list.append(r.role_name)
user_dict = dict(id=u.id,
full_name=u.full_name, phone=u.phone, created_on=u.created_on,
updated_on=u.updated_on, roles=u_roles_list)
users_list.append(user_dict)
return jsonify({'users': users_list}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Data is in the database'}), 404)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 400)
@app.errorhandler(500)
def repeat_inform(error):
return make_response(jsonify({'error': 'Data is not correct'}), 500)
@app.errorhandler(401)
def not_found(error):
return make_response(jsonify({'error': 'more two id in database'}), 401)
| haraleks/RestAPI_xdev | app_api/routes.py | routes.py | py | 4,387 | python | en | code | 0 | github-code | 13 |
29530387131 | import pika
from Djikstra_Path_Calculator import *
from Preprocessing import *
"""
This code should receive as input:
-------------- For the djikstra --------------
Topology["Adjacency_Matrix"] (rand_net)
Topology["Network_nodes"] (rand_net)
Stream_Source_Destination (rand_net) This value has to be regard as it is not properly from the rand net
Topology["Network_links"] (rand_net)
-------------- For the preprocessing --------------
Topology["Network_links"] (rand network)
Link_order_Descriptor (djikstra)
Number_of_streams (rand stream parameters)
max_frames (rand stream parameters)
Topology["Network_links"] (rand network)
Stream_information["Frames_per_Stream"] (ran stream parameters)
Streams_Period (ran stream parameters)
hyperperiod (ran stream parameters)
-------------- For the preprocessing --------------
This file call the functions created in Djikstra.py and Preprocessing.py and ensures communication ussing the
rabbitMQ microservice. For the each external microservices exists a rabbitMQ queue for sending/receiving a Json file
Input parameters
┌────────────────────────────────────┬────────────────────────────────────┐
│ Information in the Jet_pre queue │ Information in the Top_pre queue │
│ Stream_Source_Destination │ Adjacency_Matrix (rand_net) │
│ Number_of_streams │ Network_nodes (rand_net) │
│ Max_frames │ Network_links (rand_net) │
│ Frames_per_Stream │ │
│ Streams_Period │ │
│ hyperperiod │ │
└────────────────────────────────────┴────────────────────────────────────┘
Output parameters
┌───────────────────────────────────┐
│ Information in the pre_ilp queue │
│ Number_of_Streams │
│ Network_links │
│ Link_order_Descriptor │
│ Streams_Period │
│ Hyperperiod │
│ Frames_per_Stream │
│ Max_frames │
│ Num_of_Frames │
│ Model_Descriptor │
│ Model_Descriptor_vector │
│ Deathline_Stream │
│ Repetitions │
│ Repetitions_Descriptor │
│ Unused_links │
│ Frame_Duration │
│ │
└───────────────────────────────────┘
┌──────────────┐ Preprocessing Microservice
│ Jetconf │ ┌────────────────────────────────┐
│ Microservice │ │ │
└──────┬───────┘ │ ┌────────────────────────────┐ │ ┌───────────────────┐
│ │ │ │ │ │ ILP │
│ │ │ Djikstra.py │ │ │ Calculator │
└──────────►│ │ │ │ └───────────────────┘
Jet_pre queue │ │ │ │ ▲
│ └────────────────────────────┘ │ │
│ ├─────────────┘
│ ┌────────────────────────────┐ │ pre_ilp queue
Top_pre queue │ │ │ │
┌──────────►│ │ Preprocessing.py │ │
│ │ │ │ │
│ │ │ │ │
┌──────┴───────┐ │ └────────────────────────────┘ │
│ Topology │ │ │
│ Discovery │ └────────────────────────────────┘
└──────────────┘
xxxxxxxxxxxxxxxx
x RabbitMQ x
x Queues x
xxxxxxxxxxxxxxxx
"""
# Connecting and declaring the rabbitmq channel for the jet_pre queueue
import os
import json
from Rabbitmq_queues import *
if __name__ == "__main__":
topo_flag = os.path.exists('/var/topology.txt')
jetconf_flag = os.path.exists ('/var/jetconf.txt')
if(topo_flag and jetconf_flag):
with open('/var/topology.txt') as topology_json_file:
Topology = json.load(topology_json_file)
with open('/var/jetconf.txt') as jetconf_json_file:
Stream_information = json.load(jetconf_json_file)
# Djikstra scheduler
network = Network_Topology(Topology["Adjacency_Matrix"]) # Using the Network Topology class
all_paths_matrix = all_paths_matrix_generator(Topology["Network_nodes"], network)
Streams_paths = Streams_paths_generator(all_paths_matrix, Topology["Stream_Source_Destination"])
Streams_links_paths = Streams_links_paths_generator(Streams_paths)
Link_order_Descriptor = Link_order_Descriptor_generator(Streams_links_paths, Topology["Network_links"])
# Preprocessing
Links_per_Stream = Links_per_Stream_generator(Topology["Network_links"], Link_order_Descriptor)
Model_Descriptor, Model_Descriptor_vector, Streams = Model_Descriptor_generator(Stream_information["Number_of_Streams"], Stream_information["Max_frames"], Topology["Network_links"], Stream_information["Frames_per_Stream"], Links_per_Stream)
Frame_Duration = Frame_Duration_Generator(Stream_information["Number_of_Streams"], Stream_information["Max_frames"], Topology["Network_links"] )
Repetitions, Repetitions_Matrix, Repetitions_Descriptor, max_repetitions= Repetitions_generator(Stream_information["Streams_Period"], Streams, Stream_information["Hyperperiod"])
unused_links = unused_links_generator(Topology["Network_links"], Link_order_Descriptor)
Preprocessed_data = {}
Preprocessed_data["Number_of_Streams"] = Stream_information["Number_of_Streams"]
Preprocessed_data["Stream_Source_Destination"] = Topology["Stream_Source_Destination"]
Preprocessed_data["identificator"] = Topology["identificator"]
Preprocessed_data["interface_Matrix"] = Topology["interface_Matrix"]
Preprocessed_data["Network_links"] = Topology["Network_links"]
Preprocessed_data["Adjacency_Matrix"] = Topology["Adjacency_Matrix"]
Preprocessed_data["Link_order_Descriptor"] = Link_order_Descriptor
Preprocessed_data["Streams_Period"] = Stream_information["Streams_Period"]
Preprocessed_data["Hyperperiod"] = Stream_information["Hyperperiod"]
Preprocessed_data["Frames_per_Stream"] = Stream_information["Frames_per_Stream"]
Preprocessed_data["Max_frames" ] = Stream_information["Max_frames"]
Preprocessed_data["Streams_size"] = Stream_information["Streams_size"]
Preprocessed_data["Num_of_Frames"] = Stream_information["Num_of_Frames"]
Preprocessed_data["Destinations"] = Stream_information["Destinations"]
Preprocessed_data["Sources"] = Stream_information["Sources"]
Preprocessed_data["Model_Descriptor"] = Model_Descriptor
Preprocessed_data["Model_Descriptor_vector"] = Model_Descriptor_vector
Preprocessed_data["Deathline_Stream"] = Stream_information["Deathline_Stream"]
Preprocessed_data["Repetitions"] = Repetitions
Preprocessed_data["Streams_links_paths"] = Streams_links_paths
Preprocessed_data["Repetitions_Descriptor"] = Repetitions_Descriptor
Preprocessed_data["Frame_Duration"] = Frame_Duration
Preprocessed_data["unused_links"] =unused_links
Preprocessed_data["Links_per_Stream"] = Links_per_Stream
print(Preprocessed_data)
json_Preprocessed_data = json.dumps(Preprocessed_data, indent = 4)
print("working")
# Sending the messages to the RabbitMQ server
send_message(json_Preprocessed_data, 'pre-ilp')
else:
print("There is not input data, check the previous microservices or the RabbitMQ logs")
| gabriel-david-orozco/TSN-CNC-CUC-UPC | CNC/Microservices/Preprocessing_microservice/__init__.py | __init__.py | py | 9,140 | python | en | code | 4 | github-code | 13 |
37349993668 | # (c) Nelen & Schuurmans & Deltares. GPL licensed, see LICENSE.rst
# Code copied from openearth
# system modules
import bisect
import datetime
from functools import partial
import logging
# numpy/scipy
from numpy import any, all, ma, apply_along_axis, nonzero, array, isnan, logical_or, nan
from numpy.ma import filled
import numpy as np
from scipy.interpolate import interp1d
# pydata
import pandas
# web
from django.conf import settings
# data/gis
import netCDF4
import pyproj
logger = logging.getLogger(__name__)
if '4.1.3' in netCDF4.getlibversion():
logger.warn('There is a problem with the netCDF 4.1.3 library that causes performance issues for opendap queries, you are using netcdf version {}'.format(netCDF4.getlibversion()))
proj = pyproj.Proj('+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +towgs84=565.237,50.0087,465.658,-0.406857,0.350733,-1.87035,4.0812 +units=m +no_defs')
class Transect(object):
"""Transect that has coordinates and time"""
def __init__(self, id):
self.id = id
# x and y can be lat,lon, we don't care here...
self.x = array([])
self.y = array([])
self.z = array([])
self.t = array([])
# Cross shore is the local (engineering) coordinate system
# or engineering datum, see for example:
# http://en.wikipedia.org/wiki/Datum_(geodesy)#Engineering_datums
self.cross_shore = array([])
def begindates(self):
return [date for date in self.t]
def enddates(self):
return [date.replace(year=date.year+1) for date in self.t]
def interpolate_z(self):
"""interpolate over missing z values"""
if not self.z.any():
return self.z
def fillmissing(x,y):
"""fill nans in y using linear interpolation"""
f = interp1d(x[~isnan(y)], y[~isnan(y)], kind='linear',bounds_error=False, copy=True)
new_y = f(list(x)) #some bug causes it not to work if x is passed directly
return new_y
# define an intorpolation for a row by partial function application
rowinterp = partial(fillmissing, self.cross_shore)
# apply to rows (along columns)
z = apply_along_axis(rowinterp, 1, self.z)
# mask missings
z = ma.masked_array(z, mask=isnan(z))
return z
def move_by(self, distance):
"""
Move the x,y coordinates by distance, perpendicular, assuming that they are lat,lon and that we can move in EPSG:28992
>>> t = Transect(0)
>>> t.x = array([4.0])
>>> t.y = array([51.0])
>>> x,y = t.move_by(1000)
>>> x, y # doctest:+ELLIPSIS
(array([ 3.999...]), array([ 51.0089...]))
"""
# project from wgs84 to rd, assuming x,y are lon, lat
# compute the angle from the transect coordinates
x,y = proj(self.x, self.y)
dx = self.x[-1] - self.x[0]
dy = self.y[-1] - self.y[0]
angle = np.arctan2(dy,dx) + np.pi*0.5 # rotate by 90 degrees
x += distance * np.cos(angle);
y += distance * np.sin(angle);
lon, lat = proj(x,y,inverse=True)
return lon, lat
# Some factory functions, because the classes are dataset unaware (they were also used by other EU countries)
# @cache.beaker_cache('id', expire=60)
def makejarkustransect(id, **args):
"""Make a transect object, given an id (1000000xareacode + alongshore distance)"""
id = int(id)
# TODO: Dataset does not support with ... as dataset, this can lead to too many open ports if datasets are not closed, for whatever reason
dataset = netCDF4.Dataset(settings.NC_RESOURCE, 'r')
tr = Transect(id)
# Opendap is index based, so we have to do some numpy tricks to get the data over (and fast)
# read indices for all years (only 50 or so), takes 0.17 seconds on my wireless
years = dataset.variables['time'][:]
# days = dataset.variables['time']
# TODO: dates = netcdftime.num2date(days, days.units)
# read all indices (this would be nice to cache)... takes 0.24 seconds on my wireless
id = dataset.variables['id'][:]
alongshoreindex = nonzero(id == tr.id)
alongshoreindex = alongshoreindex[0][0]
lon = dataset.variables['lon'][alongshoreindex,:]
lat = dataset.variables['lat'][alongshoreindex,:]
#filter out the missing to make it a bit smaller
z = dataset.variables['altitude'][:,alongshoreindex,:]
# why are missings not taken into account?, just in case also filter out fill value.
filter = logical_or(
isnan(z),
z == dataset.variables['altitude']._FillValue
)
# Convert from masked to regular array
z = filled(z, nan)
# Make sure we set all missings and nans to nan
z[filter] = nan
# convert to datetime objects. (netcdf only stores numbers, we use years here (ignoring the measurement date))
t = array([datetime.datetime.fromtimestamp(days*3600*24) for days in years])
cross_shore = dataset.variables['cross_shore'][:]
# leave out empty crossections and empty dates
tr.lon = lon[(~filter).any(0)]
tr.lat = lat[(~filter).any(0)]
# use lat, lon as x here...
tr.x = tr.lon
tr.y = tr.lat
# keep what is not filtered in 2 steps
# [over x ][over t ]
tr.z = z[:,(~filter).any(0)][(~filter).any(1),:]
tr.t = t[(~filter).any(1)]
tr.cross_shore = cross_shore[(~filter).any(0)]
# get the water level variables
mhw = dataset.variables['mean_high_water'][alongshoreindex]
mlw = dataset.variables['mean_low_water'][alongshoreindex]
tr.mhw = mhw.squeeze()
tr.mlw = mlw.squeeze()
dataset.close()
# return dict to conform to the "rendering context"
tr.lon, tr.lat
return tr
#TODO: @cache.beaker_cache(None, expire=600)
def makejarkuslod():
dataset = netCDF4.Dataset(settings.NC_RESOURCE, 'r')
overview = {}
# Get the locations of the beach transect lines..
# For some reason index 0 leads to the whole variable being send over.
# TODO: bug in netCDF4 + 4.1.3 library opendap index 0 with nc_get_vara doesn't use index....
# Make sure you use netcdf >=4.2
id = dataset.variables['id'][:]
lon0 = dataset.variables['lon'][:,0]
lat0 = dataset.variables['lat'][:,1]
lon1 = dataset.variables['lon'][:,-1]
lat1 = dataset.variables['lat'][:,-1]
overview['lon0'] = lon0
overview['lon1'] = lon1
overview['lat0'] = lat0
overview['lat1'] = lat1
rsp_lon = dataset.variables['rsp_lon'][:]
rsp_lat = dataset.variables['rsp_lat'][:]
# few
overview['north'] = rsp_lat + 0.002
overview['south'] = rsp_lat - 0.002
# HACK: not circle safe...
overview['east'] = rsp_lon + .0025
overview['west'] = rsp_lon - .0025
overview['id'] = id
dataset.close()
# return dict to conform to the "rendering context"
return overview
# Now for a series of functions that read some datasets about the coast. I'm transforming everything to pandas dataframes
# That way the data looks a bit more relational.
# I'm using uncached versions at the moment. Total query time is about 3seconds on my wifi, which is just a bit too much.
# Optional we can make local copies. Through wired connections it should be a bit faster.
def makeshorelinedf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/strandlijnen/strandlijnen.nc'):
"""Read information about shorelines"""
ds = netCDF4.Dataset(url)
transectidx = bisect.bisect_left(ds.variables['id'], transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find shoreline for transect {}, closest is {}".format(transect, idfound))
year = ds.variables['year'][:]
time = [datetime.datetime(x, 1,1) for x in year]
mean_high_water = ds.variables['MHW'][transectidx,:]
mean_low_water = ds.variables['MLW'][transectidx,:]
dune_foot = ds.variables['DF'][transectidx,:]
shorelinedf = pandas.DataFrame(
data=dict(
time=time,
mean_high_water=mean_high_water,
mean_low_water=mean_low_water,
dune_foot=dune_foot,
year=year
)
)
return shorelinedf
def maketransectdf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/profiles/transect.nc'):
"""Read some transect data"""
ds = netCDF4.Dataset(url)
transectidx = bisect.bisect_left(ds.variables['id'],transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
alongshore = ds.variables['alongshore'][transectidx]
areaname = netCDF4.chartostring(ds.variables['areaname'][transectidx])
mean_high_water = ds.variables['mean_high_water'][transectidx]
mean_low_water = ds.variables['mean_low_water'][transectidx]
ds.close()
transectdf = pandas.DataFrame(index=[transect], data=dict(transect=transect, areaname=areaname, mean_high_water=mean_high_water, mean_low_water=mean_low_water))
return transectdf
# note that the areaname is a hack, because it is currently missing
def makenourishmentdf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/suppleties/suppleties.nc', areaname=""):
"""Read the nourishments from the dataset (only store the variables that are a function of nourishment)"""
ds = netCDF4.Dataset(url)
transectidx = bisect.bisect_left(ds.variables['id'],transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
alongshore = ds.variables['alongshore'][transectidx]
# TODO fix this name, it's missing
# areaname = netCDF4.chartostring(ds.variables['areaname'][transectidx,:])
alltypes = set(x.strip() for x in netCDF4.chartostring(ds.variables['type'][:]))
# this dataset has data on nourishments and per transect. We'll use the per nourishments, for easier plotting.
# skip a few variables that have nasty non-ascii (TODO: check how to deal with non-ascii in netcdf)
vars = [name for name, var in ds.variables.items() if 'survey' not in name and 'other' not in name and 'nourishment' in var.dimensions]
vardict = {}
for var in vars:
if ('date' in var and 'units' in ds.variables[var].ncattrs()):
# lookup the time variable
t = netCDF4.netcdftime.num2date(ds.variables[var], ds.variables[var].units)
vardict[var] = t
elif 'stringsize' in ds.variables[var].dimensions:
vardict[var] = netCDF4.chartostring(ds.variables[var][:])
else:
vardict[var] = ds.variables[var][:]
# this is specified in the unit decam, which should be dekam according to udunits specs.
assert ds.variables['beg_stretch'].units == 'decam'
ds.close()
# Put the data in a frame
nourishmentdf = pandas.DataFrame.from_dict(vardict)
# Compute nourishment volume in m3/m
nourishmentdf['volm'] = nourishmentdf['vol']/(10*(nourishmentdf['end_stretch']-nourishmentdf['beg_stretch']))
# simplify for colors
typemap = {'':'strand',
'strandsuppletie':'strand',
'dijkverzwaring':'duin',
'strandsuppletie banket':'strand',
'duinverzwaring':'duin',
'strandsuppletie+vooroever':'overig',
'Duinverzwaring':'duin',
'duin':'duin',
'duinverzwaring en strandsuppleti':'duin',
'vooroever':'vooroever',
'zeewaartse duinverzwaring':'duin',
'banket': 'strand' ,
'geulwand': 'geulwand',
'anders':'overig',
'landwaartse duinverzwaring':'duin',
'depot':'overig',
'vooroeversuppletie':'vooroever',
'onderwatersuppletie':'vooroever',
'geulwandsuppletie':'geulwand'
}
beachcolors = {
'duin': 'peru',
'strand': 'khaki',
'vooroever': 'aquamarine',
'geulwand': 'lightseagreen',
'overig': 'grey'
}
# Filter by current area and match the area
filter = reduce(np.logical_and, [
alongshore >= nourishmentdf.beg_stretch,
alongshore < nourishmentdf.end_stretch,
nourishmentdf['kustvak'].apply(str.strip)==areaname.tostring().strip()
])
nourishmentdfsel = nourishmentdf[filter]
return nourishmentdfsel
def makemkldf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/BKL_TKL_MKL/MKL.nc'):
"""the momentary coastline data"""
ds = netCDF4.Dataset(url)
# Use bisect to speed things up
transectidx = bisect.bisect_left(ds.variables['id'], transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
vars = [name for name, var in ds.variables.items() if var.dimensions == ('time', 'alongshore')]
# Convert all variables that are a function of time to a dataframe
vardict = dict((var, ds.variables[var][:,transectidx]) for var in vars)
vardict['time'] = netCDF4.netcdftime.num2date(ds.variables['time'], ds.variables['time'].units)
# Deal with nan's in an elegant way:
mkltime = ds.variables['time_MKL'][:,transectidx]
mkltime = np.ma.masked_array(mkltime, mask=np.isnan(mkltime))
vardict['time_MKL'] = netCDF4.netcdftime.num2date(mkltime, ds.variables['time_MKL'].units)
ds.close()
mkldf = pandas.DataFrame(vardict)
mkldf = mkldf[np.logical_not(pandas.isnull(mkldf['time_MKL']))]
return mkldf
def makebkldf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/BKL_TKL_MKL/BKL_TKL_TND.nc' ):
"""the basal coastline data"""
ds = netCDF4.Dataset(url)
# Use bisect to speed things up
transectidx = bisect.bisect_left(ds.variables['id'], transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
vars = [name for name, var in ds.variables.items() if var.dimensions == ('time', 'alongshore')]
# Convert all variables that are a function of time to a dataframe
vardict = dict((var, ds.variables[var][:,transectidx]) for var in vars)
vardict['time'] = netCDF4.netcdftime.num2date(ds.variables['time'], ds.variables['time'].units)
ds.close()
bkldf = pandas.DataFrame(vardict)
return bkldf
def makebwdf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/strandbreedte/strandbreedte.nc'):
# Now read the beachwidth data.
ds = netCDF4.Dataset(url)
# Use bisect to speed things up
transectidx = bisect.bisect_left(ds.variables['id'], transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
vars = [name for name, var in ds.variables.items() if var.dimensions == ('time', 'alongshore')]
# Convert all variables that are a function of time to a dataframe
vardict = dict((var, ds.variables[var][:,transectidx]) for var in vars)
vardict['time'] = netCDF4.netcdftime.num2date(ds.variables['time'], ds.variables['time'].units)
ds.close()
bwdf = pandas.DataFrame(vardict)
return bwdf
# <codecell>
def makedfdf(transect, url='http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/DuneFoot/DF.nc'):
"""read the dunefoot data"""
ds = netCDF4.Dataset(url)
# Use bisect to speed things up
transectidx = bisect.bisect_left(ds.variables['id'], transect)
if ds.variables['id'][transectidx] != transect:
idfound = ds.variables['id'][transectidx]
ds.close()
raise ValueError("Could not find transect data for transect {}, closest is {}".format(transect, idfound))
vars = [name for name, var in ds.variables.items() if var.dimensions == ('alongshore', 'time')]
# Convert all variables that are a function of time to a dataframe
# Note inconcsiste dimension ordering
vardict = dict((var, ds.variables[var][transectidx,:]) for var in vars)
vardict['time'] = netCDF4.netcdftime.num2date(ds.variables['time'], ds.variables['time'].units)
ds.close()
dfdf = pandas.DataFrame(vardict)
return dfdf
def makedfs(transect):
"""create dataframes for coastal datasets available from openearth"""
# We could do this in a multithreading pool to speed up, but not for now.
shorelinedf = makeshorelinedf(transect)
transectdf = maketransectdf(transect)
nourishmentdf = makenourishmentdf(transect, areaname=transectdf['areaname'].irow(0))
mkldf = makemkldf(transect)
bkldf = makebkldf(transect)
bwdf = makebwdf(transect)
dfdf = makedfdf(transect)
return dict(
shorelinedf=shorelinedf,
transectdf=transectdf,
nourishmentdf=nourishmentdf,
mkldf=mkldf,
bkldf=bkldf,
bwdf=bwdf,
dfdf=dfdf
)
| pombredanne/lizard-kml | lizard_kml/jarkus/nc_models.py | nc_models.py | py | 17,724 | python | en | code | null | github-code | 13 |
16980477185 | # Frontend UI
from recogScript import *
from tkinter import *
import PIL.ImageGrab as ImageGrab
# from PIL import Image, ImageTk #for jpeg or jpg image
class Draw() :
def __init__(self, root) :
# Initial config values
self.root = root
# Window title
self.root.title("Handwritten Text Recognition")
# Window resolution
self.root.geometry("1500x700")
# Window background color
self.root.configure(background = "#242424")
# Do not let the window to be resizable on both axes
self.root.resizable(0, 0)
# Define the colors
self.pointer = "black"
self.erase = "white"
# Size or width of the drawing pen
self.pointer_size = 17.5
# Pen Button(it's modified)
# bg1=Image.open("bg_example.jpg") #image and text not working together rn
# pen_bg1=ImageTk.PhotoImage(bg1) #this is for jpg image
# pen_bg1=PhotoImage("filename") check this for png
# Pen Button
self.pen_btn = Button(self.root, text = "Pen", bd = 4, fg='black', font=("Arial",16,"bold"), bg= "#23c47d", command = self.pen, width = 14, relief = RAISED)
self.pen_btn.place(x = 180, y = 10)
# Eraser button
self.eraser_btn = Button(self.root, text = "Eraser", bd = 4, fg='black', font=("Arial",16,"bold"), bg= "#23c47d", command = self.eraser, width = 14, relief = RAISED)
self.eraser_btn.place(x = 600, y = 10)
# Reset Button to clear the entire screen
self.clear_screen = Button(self.root, text = "Clear Screen", bd = 4, fg='black', font=("Arial",16,"bold"), bg= "#23c47d", width = 14, relief = RAISED, command = self.clearScreen)
self.clear_screen.place(x = 1050, y = 10)
# # Button to recognise the drawn number
# self.rec_btn = Button(self.root, text = "Recognise", bd = 4, bg = 'white', command = self.rec_drawing, width = 9, relief = RIDGE)
# self.rec_btn.place(x = 0, y = 257)
# Defining a background color for the Canvas
self.background = Canvas(self.root, bg = 'white', bd = 5, relief = FLAT, height = 510, width = 1390)
self.background.place(x = 45, y = 60)
#output box
self.outbox = Label(self.root, text="RESULT TEXT HERE ...", font=("Calibri",20,"bold"), fg='black', bg="#23c47d", width=16)
self.outbox.place(x = 742, y = 640)
# Bind the background Canvas with mouse click
self.background.bind("<B1-Motion>", self.paint)
#bind the rec_drawing method with mouse click release
self.background.bind("<ButtonRelease-1>", self.rec_drawing)
# Set brush color to eraser
def eraser(self) :
self.pointer = self.erase
self.pointer_size = 22
# Set brush color back
def pen(self) :
self.pointer = 'black'
self.pointer_size = 17.5
# Clear all objects on canvas
def clearScreen(self) :
self.background.delete('all')
self.output("RESULT TEXT HERE ...")
# Paint the elipses
def paint(self, event) :
x1, y1 = (event.x - 2), (event.y - 2)
x2, y2 = (event.x + 2), (event.y + 2)
self.background.create_oval(x1, y1, x2, y2, fill = self.pointer, outline = self.pointer, width = self.pointer_size)
# Update output Box
def output(self, word) :
if word == "" :
self.outbox.config(text="RESULT TEXT HERE ...")
else :
self.outbox.config(text=word)
def rec_drawing(self, event):
# Get the coordinate values of the canvas
x = self.root.winfo_rootx() + self.background.winfo_x()
y = self.root.winfo_rooty() + self.background.winfo_y()
x1 = x + self.background.winfo_width()
y1 = y + self.background.winfo_height()
# Screenshot the whole display and then crop out the canvas
img = ImageGrab.grab().crop((x + 7 , y + 7, x1 - 7, y1 - 7))
res = recog(img)
self.output(res)
root = Tk()
p = Draw(root)
root.mainloop()
| ShambaC/Handwritten-Text-Recognition | textrecog_ui.py | textrecog_ui.py | py | 4,037 | python | en | code | 4 | github-code | 13 |
27529470893 | import pandas as pd
import json
from itertools import combinations
from nltk.corpus import stopwords
# Load the dataset
df = pd.read_csv('scrubbed.csv')
df = df[df['country'] == 'us'].dropna()
df['datetime'] = pd.to_datetime(df['datetime'], errors='coerce')
duration = 'duration (seconds)'
df[duration] = df[duration].astype(float)
#df = df[df['duration (seconds)'] > 1000]
# Extract year and month
df['year'] = df['datetime'].dt.year
df['month'] = df['datetime'].dt.month
# Preprocess comments and generate bigrams
comments = df['comments'].tolist()
# Preprocessing function for generating bigrams
def preprocess_comments(comments, stopwords):
desired_keywords = ['ufo', 'alien', 'extraterrestrial'] # Desired keywords or phrases related to the UFO
stopwords_cust = ['event', 'took']
word_counts = {}
processed_bigrams = []
for comment in comments:
words = comment.lower().split()
words = [word for word in words if word not in stopwords and word not in stopwords_cust and not word.isdigit()] # Remove stopwords, select desired keywords, and exclude numbers
for word in words:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
comment_bigrams = list(combinations(words, 2))
processed_bigrams.extend(comment_bigrams)
return processed_bigrams, word_counts
# Remove duplicate bigrams
def remove_duplicate_bigrams(bigrams_df):
return bigrams_df.drop_duplicates()
# Remove bigrams with word counts less than 100
def remove_low_count_bigrams(bigrams_df, word_counts, threshold):
return bigrams_df[(bigrams_df['word1'].map(word_counts) >= threshold) & (bigrams_df['word2'].map(word_counts) >= threshold)]
# Generate bigrams from comments using NLTK
stopwords_list = set(stopwords.words('english'))
bigrams_list, word_counts = preprocess_comments(comments, stopwords_list)
# Remove unwanted bigrams
unwanted_bigrams = [('of', 'the'), ('a', 'lot'), ('in', 'the')] # Add any additional unwanted bigrams
bigrams_df = pd.DataFrame(bigrams_list, columns=['word1', 'word2'])
bigrams_df = remove_duplicate_bigrams(bigrams_df)
# Add count column to the words
bigrams_df['word1_count'] = bigrams_df['word1'].map(word_counts)
bigrams_df['word2_count'] = bigrams_df['word2'].map(word_counts)
# Remove bigrams with word counts less than 100
threshold = 2000
bigrams_df = remove_low_count_bigrams(bigrams_df, word_counts, threshold)
# Save bigrams to a JSON file
bigrams_df.to_json('bigrams.json', orient='records')
| BigSuj/cosmic | network_process.py | network_process.py | py | 2,563 | python | en | code | 0 | github-code | 13 |
21580977055 | import OpenGL.GL as gl
from PIL import Image
import numpy as np
from typing import Union
__all__ = ['Texture2D']
class Texture2D:
Format = {
1 : gl.GL_RED,
3 : gl.GL_RGB,
4 : gl.GL_RGBA,
}
InternalFormat = {
(1, 'uint8') : gl.GL_R8, # 归一化
(3, 'uint8') : gl.GL_RGB8,
(4, 'uint8') : gl.GL_RGBA8,
(1, 'float32') : gl.GL_R32F,
(3, 'float32') : gl.GL_RGB32F,
(4, 'float32') : gl.GL_RGBA32F,
(1, 'float16') : gl.GL_R16F,
(3, 'float16') : gl.GL_RGB16F,
(4, 'float16') : gl.GL_RGBA16F,
}
DataType = {
'uint8' : gl.GL_UNSIGNED_BYTE,
'float16' : gl.GL_HALF_FLOAT,
'float32' : gl.GL_FLOAT,
}
UnitCnt = 0
def __init__(
self,
source = None,
tex_type: str = "tex_diffuse",
mag_filter = gl.GL_LINEAR,
min_filter = gl.GL_LINEAR_MIPMAP_LINEAR,
wrap_s = gl.GL_REPEAT,
wrap_t = gl.GL_REPEAT,
flip_y = False,
flip_x = False,
generate_mipmaps=True,
):
self._id = None
self.unit = None
# if the texture image is updated, the flag is set to True,
# meaning that the texture needs to be updated to the GPU.
self._img_update_flag = False
self._img = None # the texture image
self.flip_y = flip_y
self.flip_x = flip_x
self.mag_filter = mag_filter
self.min_filter = min_filter
self.wrap_s = wrap_s
self.wrap_t = wrap_t
self.type = tex_type
self.generate_mipmaps = generate_mipmaps
if source is not None:
self.updateTexture(source)
def updateTexture(self, img: Union[str, np.ndarray]):
if not isinstance(img, np.ndarray):
self._path = str(img)
img = np.array(Image.open(self._path))
self._img = flip_image(img, self.flip_x, self.flip_y)
self._img_update_flag = True
def bind(self):
""" Bind the texture to the specified texture unit,
if unit is None, the texture will be bound to the next available unit.
Must be called after the OpenGL context is made current."""
if self.unit is None:
self.unit = Texture2D.UnitCnt
Texture2D.UnitCnt += 1
if self._img is None:
raise ValueError('Texture not initialized.')
# do this job in bind() instead of updateTexture() to make sure that
# the context is current.
gl.glActiveTexture(gl.GL_TEXTURE0 + self.unit)
if self._img_update_flag: # bind and update texture
channels = 1 if self._img.ndim==2 else self._img.shape[2]
dtype = self._img.dtype.name
# -- set alignment
nbytes_row = self._img.shape[1] * self._img.dtype.itemsize * channels
if nbytes_row % 4 != 0:
gl.glPixelStorei( gl.GL_UNPACK_ALIGNMENT, 1)
else:
gl.glPixelStorei( gl.GL_UNPACK_ALIGNMENT, 4)
self.delete()
self._id = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self._id)
gl.glTexImage2D(
gl.GL_TEXTURE_2D, 0,
self.InternalFormat[(channels, dtype)],
self._img.shape[1], self._img.shape[0], 0,
self.Format[channels],
self.DataType[dtype],
self._img,
)
if self.generate_mipmaps:
gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
# -- texture wrapping
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, self.wrap_s)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, self.wrap_t)
# -- texture filterting
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, self.min_filter)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, self.mag_filter)
self._img_update_flag = False
else: # bind texture
gl.glBindTexture(gl.GL_TEXTURE_2D, self._id)
def delete(self):
if self._id is not None:
gl.glDeleteTextures([self._id])
self._id == None
def flip_image(img, flip_x=False, flip_y=False):
if flip_x and flip_y:
img = np.flip(img, (0, 1))
elif flip_x:
img = np.flip(img, 1)
elif flip_y:
img = np.flip(img, 0)
return img | Liuyvjin/pyqtOpenGL | pyqtOpenGL/items/texture.py | texture.py | py | 4,447 | python | en | code | 0 | github-code | 13 |
1448051351 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-02-14-13:48
# Python 3.5
#最先让你选择校区,然后在校区中进行各种视图操作
import os,sys,time,datetime,pickle,json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from conf import settings
from core.lessons import Lesson
class School(object):
'''
班级字典example
拼字典
example
{'NAME,LINUXS1': # 班级姓名
{
'TECHERNAME': 'TECHERNAME', # 老师姓名
'LESSON_NAME': 'LESSON_NAME', # 课程名
'STUDENTS': ['stu1', 'stu2', '.........'], # 学生列表
}
}
'''
def __init__(self):
# self.SCHOOL_NAME = None
self.SCHOOL_STUDENTS = {} #存放学生 + 学费 {stuname:120000}
self.SCHOOL_TECHER = [] #存放教师
self.SCHOOL_LESSON = {} #存放课程
self.SCHOOL_CLASS = {} #存放班级
def create_lesson(self): #创建课程方法
self.lesson_name = input("课程名:")
self.lesson_price = input('课程价格:')
self.lesson_perid = input('课程周期:')
lesson_obj = Lesson(self.lesson_name,self.lesson_perid,self.lesson_price)
self.SCHOOL_LESSON[lesson_obj.lesson_name] = lesson_obj.__dict__
def create_class(self): #创建班级方法
self.class_name = input("您要创建的班级名称:")
while True:
for i in self.SCHOOL_LESSON.keys():
print('课程如下%s'%i)
self.class_lesson_name = input("班级关联课程:")
if self.class_lesson_name not in self.SCHOOL_LESSON.keys():
print('课程未找到,请确认课程是否存在')
continue
else:
print('班级:%s 成功关联课程内容%s'%(self.class_name,self.class_lesson_name))
# 关联讲师 先打印出学校讲师,在去关联讲师,如果没有讲师则说明没讲师,无法关联,
if not self.SCHOOL_TECHER:
print('有问题,学校没有老师,您先招工')
break
else:
while True: #判断是否存在该老师id
print('当前学校老师如下')
print(self.SCHOOL_TECHER)
tech_all_id = []
for index,name in enumerate(self.SCHOOL_TECHER):
print('techer_id:\t%s techer_name:\t%s'%(index,name))
tech_all_id.append(str(index))
inp_techer_id = input("请输入老师工号ID:")
if inp_techer_id in tech_all_id:
self.techer_name = self.SCHOOL_TECHER[int(inp_techer_id)]
break
else:
print('tech id not found')
continue
break
self.SCHOOL_CLASS[self.class_name ] = {'techername':self.techer_name,'lesson_name':self.class_lesson_name,'students':[]} #拼接班级字典
print(self.SCHOOL_CLASS)
def create_techer(self): #创建老师方法
techer_name = input('添加老师,请输入老师姓名:')
if techer_name in self.SCHOOL_TECHER:
print('教师存在,请从新添加')
else:
# techer_obj = Techer(techer_name)
self.SCHOOL_TECHER.append(techer_name)
| 248808194/python | M3/kcb/core/schools.py | schools.py | py | 3,543 | python | en | code | 0 | github-code | 13 |
74209440976 | ## Static Imports
import os
import importlib
import gym
import gym_everglades
import pdb
import sys
import random
import json
import pytest
import numpy as np
from everglades_server import server
from everglades_server import generate_map
from everglades_server import generate_3dmap
# TODO Change this so that it's using the agent Jerold made for the test script
agent0_file = 'agents/random_actions'
agent1_file = 'agents/random_actions'
## Specific Imports
agent0_name, agent0_extension = os.path.splitext(agent0_file)
agent0_mod = importlib.import_module(agent0_name.replace('/','.'))
agent0_class = getattr(agent0_mod, os.path.basename(agent0_name))
agent1_name, agent1_extension = os.path.splitext(agent1_file)
agent1_mod = importlib.import_module(agent1_name.replace('/','.'))
agent1_class = getattr(agent1_mod, os.path.basename(agent1_name))
config_dir = os.path.abspath('config')
# Choose which map you want by setting map_name.
# To enable wind go to server.py and in the init() for EvergladesGame set self.enableWind = 1
# ********WARNING - ENABLING BOTH 3DMAP AND WIND WILL BREAK THE SERVER.***********
# 3dmap.json - 3D
# RandomMap.json - 2D
def createSetupJson(map_name, mapType, wind):
gamesetup = {}
gamesetup["__type"] = "Setup"
gamesetup["MapFile"] = map_name
gamesetup["MapType"] = mapType
gamesetup["Agents"] = ["random_actions.py", "random_actions.py"]
gamesetup["UnitFile"] = "UnitDefinitions.json"
gamesetup["PlayerFile"] = "PlayerConfig.json"
gamesetup["UnitBudget"] = 100
gamesetup["TurnLimit"] = 150
gamesetup["CaptureBonus"] = 1000
gamesetup["enableWind"] = wind
gamesetup["Stochasticity"] = random.randint(0, 1000)
gamesetup["FocusTurnMin"] = 4
gamesetup["FocusTurnMax"] = 6
gamesetup["FocusHeatMovement"] = 15
gamesetup["FocusHeatCombat"] = 25
gamesetup["FocusHeatCooloff"] = 10
gamesetup["RL_IMAGE_X"] = 600
gamesetup["RL_IMAGE_Y"] = 380
gamesetup["RL_ORTHO_X"] = 12
gamesetup["RL_ORTHO_Y"] = 7
gamesetup["RL_Render_P1"] = 1
gamesetup["RL_Render_P2"] = 0
gamesetup["RL_Render_SaveToDisk"] = 0
gamesetup["SubSocketAddr0"] = "opp-agent"
gamesetup["SubSocketPort0"] = 5556
gamesetup["SubSocketAddr1"] = "agent"
gamesetup["SubSocketPort1"] = 5555
FileO = open(os.path.join(config_dir, "GameSetup.json"), "w")
FileO.write(json.dumps(gamesetup, indent = 4))
FileO.close()
@pytest.mark.parametrize("mapType,wind", [
("2D", False),
("2D", True),
])
def test_runGame(mapType, wind):
debug = 1
# map_name = "Map.json"
# createSetupJson(map_name, mapType, wind)
map_name = "pytestmap.json"
createSetupJson(map_name, mapType, wind)
if mapType == '2D':
print("Generating 2D map")
generate_map.exec(7)
elif mapType == '3D':
print("Generating 3D map")
generate_3dmap.exec(7, 7, 10)
elif mapType == 'Static':
print("Static map specified. No map generated.")
map_file = os.path.join(config_dir, map_name)
setup_file = os.path.join(config_dir, "GameSetup.json")
unit_file = os.path.join(config_dir, "UnitDefinitions.json")
output_dir = os.path.abspath('game_telemetry')
## Main Script
env = gym.make('everglades-v0')
players = {}
names = {}
players[0] = agent0_class(env.num_actions_per_turn, 0, map_name)
names[0] = agent0_class.__name__
players[1] = agent1_class(env.num_actions_per_turn, 1, map_name)
names[1] = agent1_class.__name__
observations = env.reset(
players=players,
config_dir = config_dir,
map_file = map_file,
setup_file = setup_file,
unit_file = unit_file,
output_dir = output_dir,
pnames = names,
debug = debug
)
actions = {}
## Game Loop
done = 0
while not done:
if debug:
env.game.debug_state()
#print("ACTIONS: ", actions)
for pid in players:
actions[pid] = players[pid].get_action( observations[pid] )
observations, reward, done, info = env.step(actions)
print("Reward: ", reward)
| JLodge99/Everglades-Server | testing/test_pytest.py | test_pytest.py | py | 4,174 | python | en | code | 3 | github-code | 13 |
26575475742 | class SparseVector:
def __init__(self, nums: List[int]):
self.tracnum = {}
for i in range(0, len(nums)):
if nums[i] != 0:
self.tracnum[i] = nums[i]
# Return the dotProduct of two sparse vectors
def dotProduct(self, vec: 'SparseVector') -> int:
dot_product = 0
for key in self.tracnum.keys():
if key in vec.tracnum:
dot_product += self.tracnum[key] * vec.tracnum[key]
return dot_product
# Your SparseVector object will be instantiated and called as such:
# v1 = SparseVector(nums1)
# v2 = SparseVector(nums2)
# ans = v1.dotProduct(v2) | ujas09/Leetcode | 1570.py | 1570.py | py | 680 | python | en | code | 0 | github-code | 13 |
18312188478 | """You are going to design a magical calculator with the following functions.
• Function that takes input and calculates it’s factorial. (A)
• Function that takes input and calculate it’s sum of digits. (B)
• Function that takes input and find’s the largest digit in the input. (C)
- Implement all the above functions.
- Get input and pass the input to factorial function (A), get the output from
factorial function and pass it as input to sum of digits function (B). Get the output
from sum of digits function, add the output with random 5 digit number and pass
the outcome to largest digit function (C) and print the output that you receive from
function C.
Sample I/O:
• Input 5
• Output of A = 120
• Output of B(120) = 1+2+0 = 3
• Output of C(3 + 10000 = 10003) = 3 (Here 10000 is the random number)
• Hence output is 3 , where 3 is the largest digit of 10003."""
import random
def A(number):
fact=1
while(number>1):
fact = fact *number
number-=1
return fact
def B(number):
number=str(number)
total=0
for digit in number:
total=total+int(digit)
return total
def C(number):
number=str(number)
digit_contents=[]
for digit in number:
digit_contents.append(int(digit))
return max(digit_contents)
number = int(input('Enter number: '))
random_num = random.randint(0,100000)
fact_num = A(number)
sum_fact = B(fact_num)
largest_digit = C(sum_fact + random_num)
print(largest_digit) | mohammed1916/Projects | python_basic_concepts/magical_calculator.py | magical_calculator.py | py | 1,482 | python | en | code | 0 | github-code | 13 |
28047642805 | import matplotlib.pyplot as plt
def funkcija(x1,y1,x2,y2):
a = (y2-y1)/(x2-x1)
b = y1 - a*x1
if b>=0:
predznak = "+"
else:
predznak = ""
print("Jednadžba pravca je y = ",round(a,2),"x",predznak, round(b,2))
plt.plot(x1, y1, marker="o", color="blue")
plt.plot(x2, y2, marker="o", color="blue")
plt.axline((x1,y1),(x2,y2))
plt.axis('equal')
plt.grid()
while True:
try:
graf = int(input("Ako želite prikazati graf na ekranu upišite 1, ako ga želite spremiti kao PDF upišite 2: "))
except ValueError:
print("Pogrešan unos.")
continue
else:
break
if graf == 1:
plt.show()
if graf == 2:
plt.savefig(input("Spremi kao: ")+".pdf")
funkcija(x1 = int(input("Upišite x1: ")), y1 = int(input("Upišite y1: ")), x2 = int(input("Upišite x2: ")), y2 = int(input("Upišite y2: ")))
| kvilibic/PAF | Vjezbe/Vjezbe1/Zad5.py | Zad5.py | py | 950 | python | hr | code | 0 | github-code | 13 |
20654506174 | """
Tests pour le module `math.linalg.utils`
"""
from unittest import TestCase
import numpy as np
from pytools.math.linalg.utils import produit_scalaire, sum_vectors, gram_schmidt
class TestLinAlgUtils(TestCase):
def test_produit_scalaire(self):
x = [1,2,3]
y = [2,3,4]
self.assertEqual(20, produit_scalaire(x,y), "Test du produit scalaire avec une liste d'entier")
x = np.array(x)
y = np.array(y)
self.assertEqual(20, produit_scalaire(x, y), "Test du produit scalaire avec de tableau numpy d'entier")
def test_sum_vectors(self):
vectors_list = (
[1, 2, 3, 4],
[4, 3, 2, 1],
[1, 0, 1, 0]
)
self.assertListEqual([6,5,6,5], sum_vectors(*vectors_list).tolist())
def test_gram_schmidt(self):
vectors_list = np.array([
[0, 1, 1],
[1, 1, 0],
[1, 1, -1]
])
gram_schmidt(vectors_list)
self.assertListEqual([
[0, 1, 1],
[1, 0, 0],
[0, 1, -1]
],
vectors_list.tolist()) | Eric-Oll/pytools | tests/math/linalg/test_utils.py | test_utils.py | py | 1,117 | python | fr | code | 0 | github-code | 13 |
10518354643 | #!/usr/bin/env python
from dep_search import *
import time
import sys
import os
import ast
import DB
import Blobldb
import importlib
import multiprocessing as mp
THISDIR=os.path.dirname(os.path.abspath(__file__))
os.chdir(THISDIR)
import json
import subprocess
import pickle
import sqlite3
import codecs
from datetime import datetime
#from tree import Tree
import re
import zlib
import importlib
import argparse
#§import db_util
import glob
import tempfile
import sys
from collections import defaultdict
field_re=re.compile(r"^(!?)(gov|dep|token|lemma|tag)_(a|s)_(.*)$",re.U)
query_folder = './queries/'
def map_set_id(args, db, qobj):
#XXX: figure out a way to check if this and that is in the db.
just_all_set_ids = []
optional = []
types = []
c_args_s = []
s_args_s = []
c_args_m = []
s_args_m = []
solr_args = []
or_groups = defaultdict(list)
for arg in args:
compulsory = False
it_is_set = True
or_group_id = None
if arg.startswith('!'):
compulsory = True
narg = arg[1:]
else:
narg = arg
if narg.startswith('org_'):
or_group_id = int(narg.split('_')[1])
narg = narg[6:]
#print >> sys.stderr, "narg:", narg
optional.append(not compulsory)
oarg = 0
if narg.startswith('dep_a'):
if db.has_id(u'd_' + narg[6:]):
oarg = db.get_id_for(u'd_' + narg[6:])
it_is_set = False
if narg.startswith('gov_a'):
if db.has_id(u'g_' + narg[6:]):
oarg = db.get_id_for(u'g_' + narg[6:])
it_is_set = False
if narg.startswith('lemma_s'):
if db.has_id(u'l_' + narg[8:]):
oarg = db.get_id_for(u'l_' + narg[8:])
it_is_set = True
if narg.startswith('token_s'):
if db.has_id(u'f_' + narg[8:]):
oarg = db.get_id_for(u'f_' + narg[8:])
it_is_set = True
#Here! Add so that if not found as tag, try tokens
if narg.startswith('tag_s'):
it_is_set = True
if db.has_id(u'' + narg[6:]):
#if narg[6:] in set_dict.keys():
oarg = db.get_id_for(u'' + narg[6:])
solr_args.append(arg)
if or_group_id != None:
or_groups[or_group_id].append(arg[6:])
else:
if db.has_id(u'p_' + narg[6:]):
#if 'p_' + narg[6:] in set_dict.keys():
oarg = db.get_id_for(u'p_' + narg[6:])
solr_args.append(arg)
if or_group_id != None:
or_groups[or_group_id].append(arg[6:])
else:
try:
if compulsory:
solr_args.append('!token_s_' + narg[6:])
else:
solr_args.append('token_s_' + narg[6:])
if or_group_id != None:
or_groups[or_group_id].append('token_s_' + narg[6:])
if db.has_id(u'f_' + narg[6:]):
#oarg = db.get_id_for(u'f_' + narg[6:])
oarg = db.get_id_for(u'f_' + narg[6:])
except:
pass#import pdb;pdb.set_trace()
else:
if not arg.startswith('org_'):
solr_args.append(arg)
else:
solr_args.append(arg[6:])
if or_group_id != None:
or_groups[or_group_id].append(arg[6:])
types.append(not it_is_set)
#print compulsory
#print it_is_set
just_all_set_ids.append(oarg)
if compulsory:
if it_is_set:
c_args_s.append(oarg)
else:
c_args_m.append(oarg)
else:
if it_is_set:
s_args_s.append(oarg)
else:
s_args_m.append(oarg)
for item in qobj.org_has_all:
#
or_groups[item].append('dep_a_anyrel')
together = c_args_s + c_args_m
counts = []# [set_count[x] for x in together]
min_c = 0#min(counts)
rarest = 0#together[0]#counts.index(min_c)]
#print >> sys.stderr, 'optional:', optional
#print >> sys.stderr, 'types:', types
solr_or_groups = []
return rarest, c_args_s, s_args_s, c_args_m, s_args_m, just_all_set_ids, types, optional, solr_args, or_groups
def query(query_fields):
#print >> sys.stderr, 'query fields:', query_fields
"""
query_fields: A list of strings describing the data to fetch
Each string names a set to retrieve
(gov|dep)_(a|s)_deptype
- gov -> retrieve a from-governor-to-dependent mapping/set
- dep -> retrieve a from-dependent-to-governor mapping/set
- a -> retrieve a mapping (i.e. used as the third argument of the pairing() function
- s -> retrieve a set (i.e. the set of governors or dependents of given type)
- deptype -> deptype or u"anytype"
prefixed with "!" means that only non-empty sets are of interest
tag_s_TAG -> retrieve the token set for a given tag
prefixed with "!" means that only non-empty sets are of interest
token_s_WORD -> retrieve the token set for a given token
lemma_s_WORD -> retrieve the token set for a given lemma
prefixed with "!" means that only non-empty sets are of interest
"""
joins=[(u"FROM graph",[])]
wheres=[]
args=[]
selects=[u"graph.graph_id",u"graph.token_count"]
for i,f in enumerate(query_fields):
match=field_re.match(f)
assert match
req,ftype,stype,res=match.groups() #required? field-type? set-type? restriction
if req==u"!":
j_type=u""
elif not req:
j_type=u"LEFT "
else:
assert False #should never happen
if ftype in (u"gov",u"dep"):
joins.append((u"%sJOIN rel AS t_%d ON graph.graph_id=t_%d.graph_id and t_%d.dtype=?"%(j_type,i,i,i),[res]))
if stype==u"s":
selects.append(u"t_%d.token_%s_set"%(i,ftype))
elif stype==u"a":
selects.append(u"t_%d.token_%s_map"%(i,ftype))
elif ftype in (u"token",u"lemma",u"tag"):
joins.append((u"%sJOIN %s_index AS t_%d ON graph.graph_id=t_%d.graph_id and t_%d.%s=?"%(j_type,ftype,i,i,i,ftype),[res]))
selects.append(u"t_%d.token_set"%i)
joins.sort() #This is a horrible hack, but it will sort FROM JOIN ... LEFT JOIN the right way and help the QueryPlan generator
q=u"SELECT %s"%(u", ".join(selects))
q+=u"\n"+(u"\n".join(j[0] for j in joins))
q+=u"\n"
args=[]
for j in joins:
args.extend(j[1])
return q,args
def get_data_from_db(db_conn,graph_id):
results=db_conn.execute('SELECT conllu_data_compressed,conllu_comment_compressed FROM graph WHERE graph_id=?',(str(graph_id),))
for sent,comment in results.fetchall():
return zlib.decompress(sent).strip(),zlib.decompress(comment).strip()
return None,None
'''
def load(pyxFile):
"""Loads a search pyx file, returns the module"""
###I need to hack around this, because this thing is messing stdout
print >> sys.stderr, "Loading", pyxFile
error=subprocess.call(["python","compile_ext.py",pyxFile], stdout=sys.stderr, stderr=sys.stderr)
if error!=0:
print >> sys.stderr, "Cannot compile search code, error:",error
sys.exit(1)
mod=importlib.import_module(pyxFile)
return mod
'''
def load(pyxFile):
"""Loads a search pyx file, returns the module"""
###I need to hack around this, because this thing is messing stdout
#cythonize -a -i xxx.pyx
error=subprocess.call(["/home/mjluot/.local/bin/cythonize","-a","-i",pyxFile+'.pyx'], stdout=sys.stderr, stderr=sys.stderr)
if error!=0:
sys.exit(1)
mod=importlib.import_module(pyxFile)
return mod
def get_url(comments):
for c in comments:
if c.startswith(u"# URL:"):
return c.split(u":",1)[1].strip()
return None
def queue_query_from_db(q_obj, args, db, fdb, q):
#init the dbs
q_obj.set_db(db)
#This is a first try and an example without filter db
idx = 1
counter = 0
max_hits = args.max
end_cnt = 0
q = fdb.tree_id_queue
while True:
try:
idx = q.get()
if idx == -1:
end_cnt += 1
print (fdb.is_finished())
print (fdb.finished)
print (fdb.started)
print (fdb.processes)
if end_cnt == len(args.langs.split(',')):
break
res_set = q_obj.check_tree_id(idx, db)
#idx += 1
if len(res_set) > 0:
res_list = []
#tree
#import pdb;pdb.set_trace()
hit = q_obj.get_tree_text()
tree_comms = q_obj.get_tree_comms()
tree_lines=hit.split("\n")
if counter >= max_hits and max_hits > 0:
break
its_a_hit = False
try:
print ('# lang:', fdb.get_lang(idx))
except:
pass
for r in res_set:
res_list.append("# db_tree_id:"+idx)
res_list.append("# visual-style\t" + str(r + 1) + "\tbgColor:lightgreen")
try:
res_list.append("# hittoken:\t"+tree_lines[r])
its_a_hit = True
except:
pass
if its_a_hit:
if args.context>0:
hit_url=get_url(tree_comms)
texts=[]
# get +/- context sentences from db
for i in range(idx-args.context,idx+args.context+1):
if i==idx:
data=hit
else:
err = db.xset_tree_to_id(i)
if err != 0: continue
data = db.get_tree_text()
data_comment = db.get_tree_comms()
if data is None or get_url(data_comment)!=hit_url:
continue
text=u" ".join(t.split(u"\t",2)[1] for t in data.split(u"\n"))
if i<idx:
texts.append(u"# context-before: "+text)
elif i==idx:
texts.append(u"# context-hit: "+text)
else:
texts.append(u"# context-after: "+text)
res_list.append(u"\n".join(text for text in texts))
res_list.append(tree_comms)
res_list.append(hit)
res_list.append()
counter += 1
q.put(res_list)
#import pdb;pdb.set_trace(
except:
pass
if idx > 0: break
#import pdb;pdb.set_trace()
fdb.kill_threads()
#print ('cn', counter)
return counter
#import pdb; pdb.set_trace()
# init all necessary dbs
# if id-flow:
#init it
# for id:
# q_obj.fill_sets
# q_obj.check
# q_obj.get_tree_text and also comms
# else:
# for range, I suppose, stop @ db error
def old_query_from_db(q_obj,args):
#args -> the command line args
start = time.time()
db=db_util.DB()
db.open(solr_url, db_name)
rarest, c_args_s, s_args_s, c_args_m, s_args_m, just_all_set_ids, types, optional, solr_args, solr_or_groups = map_set_id(query_obj.query_fields, db, query_obj)
db.init_lmdb(c_args_s, c_args_m, rarest)
q_obj.set_db_options(just_all_set_ids, types, optional)
try:
extra_params= ast.literal_eval(args.extra_solr_params)
except:
extra_params = {}
from solr_query_thread import SolrQuery
solr_q = SolrQuery(args.extra_solr_term, [item[1:] for item in solr_args if item.startswith('!')], solr_or_groups, solr_url, case, q_obj, extra_params=extra_params)
tree_id_queue = solr_q.get_queue()
counter = 0
while (not solr_q.finished or not tree_id_queue.empty()):
idx = tree_id_queue.get()
if idx == -1:break
try:
err = db.xset_tree_to_id(idx)
if err != 0: continue
res_set = q_obj.check_tree_id(idx, db)
if len(res_set) > 0:
#Get the tree text:
hit = db.get_tree_text()
tree_comms = db.get_tree_comms()
tree_lines=hit.split("\n")
if counter >= max_hits and max_hits > 0:
break
its_a_hit = False
for r in res_set:
print ("# db_tree_id:",idx)
print ("# visual-style\t" + str(r + 1) + "\tbgColor:lightgreen")
try:
print ("# hittoken:\t"+tree_lines[r].encode('utf8'))
its_a_hit = True
except:
pass#import traceback; traceback.print_exc()
#hittoken once the tree is really here!
if its_a_hit:
if args.context>0:
hit_url=get_url(tree_comms)
texts=[]
# get +/- context sentences from db
for i in range(idx-args.context,idx+args.context+1):
if i==idx:
data=hit
else:
err = db.xset_tree_to_id(i)
if err != 0: continue
data = db.get_tree_text()
data_comment = db.get_tree_comms()
if data is None or get_url(data_comment)!=hit_url:
continue
text=u" ".join(t.split(u"\t",2)[1] for t in data.split(u"\n"))
if i<idx:
texts.append(u"# context-before: "+text)
elif i==idx:
texts.append(u"# context-hit: "+text)
else:
texts.append(u"# context-after: "+text)
print (u"\n".join(text for text in texts)).encode(u"utf-8")
print (tree_comms.encode('utf8'))
print (hit.encode('utf8'))
print ()
counter += 1
except: pass#import traceback; traceback.print_exc()
solr_q.kill()
print >> sys.stderr, "Found %d trees in %.3fs time"%(counter,time.time()-start)
return counter
def main(argv):
global query_obj
#XXX: Will fix!
global solr_url
parser = argparse.ArgumentParser(description='Execute a query against the db')
parser.add_argument('-m', '--max', type=int, default=500, help='Max number of results to return. 0 for all. Default: %(default)d.')
parser.add_argument('-d', '--database', default="/mnt/ssd/sdata/pb-10M/*.db",help='Name of the database to query or a wildcard of several DBs. Default: %(default)s.')
parser.add_argument('-o', '--output', default=None, help='Name of file to write to. Default: STDOUT.')
parser.add_argument('-s', '--solr', default="http://localhost:8983/solr/dep_search", help='Solr url. Default: %(default)s')
parser.add_argument('search', nargs="?", default="parsubj",help='The name of the search to run (without .pyx), or a query expression. Default: %(default)s.')
parser.add_argument('--context', required=False, action="store", default=0, type=int, metavar='N', help='Print the context (+/- N sentences) as comment. Default: %(default)d.')
parser.add_argument('--keep_query', required=False, action='store_true',default=False, help='Do not delete the compiled query after completing the search.')
parser.add_argument('-i', '--case', required=False, action='store_true',default=False, help='Case insensitive search.')
parser.add_argument('--extra-solr-term',default=[],action="append",help="Extra restrictions on Solr, strings passed verbatim in the Solr query, you can have several of these")
parser.add_argument('--extra-solr-params',default="",help="Extra parameters on Solr - a dictionary passed verbatim in the Solr request")
parser.add_argument('--langs',default="",help="List of language codes to be queried")
args = parser.parse_args(argv[1:])
q = mp.Queue()
import glob
dbs = glob.glob(args.database)
for xdb in dbs:
p = mp.Process(target=run_db, args=(args,xdb,q))
p.start()
ends = 0
while True:
inc = q.get()
if inc==-1:
ends += 1
if ends == len(dbs):
break
else:
print ('\n'.join(inc))
def run_db(args, db, q):
#The blob and id database
inf = open(db+'/db_config.json', 'rt')
db_args = json.load(inf)
inf.close()
db_class = importlib.import_module(db_args['blobdb'])
db = db_class.DB(db_args['dir'])
db.open()
if args.output is not None:
sys.stdout = open(args.output, 'w')
if os.path.exists(args.search+".pyx"):
print >> sys.stderr, "Loading "+args.search+".pyx"
mod=load(args.search)
else:
path = '/'.join(args.database.split('/')[:-1])
#json_filename = path + '/symbols.json'
import pseudocode_ob_3 as pseudocode_ob
import hashlib
m = hashlib.md5()
m.update(args.search.encode('utf8') + str(args.case).encode('utf8') + args.database.encode('utf8'))
try:
os.mkdir(query_folder)
except:
pass
#load pickle db here
#db = DB.PickleDB(args.database)
#here should in the future be a loop to handle multile databases
# fdb_class = importlib.import_module(db_args['filterdb'])
# '''
#class Query():
# def __init__(self,extra_terms, compulsory_items,or_groups, solr, case, q_obj, extra_params={}):
# '''
#rarest, c_args_s, s_args_s, c_args_m, s_args_m, just_all_set_ids, types, optional, solr_args, solr_or_groups = map_set_id(query_obj.query_fields, db, query_obj)
#db.init_lmdb(c_args_s, c_args_m, rarest)
#q_obj.set_db_options(just_all_set_ids, types, optional)
#try:
# extra_params= ast.literal_eval(args.extra_solr_params)
#except:
# extra_params = {}
# if not db_args['filterdb'] == 'solr_filter_db':
# solr_q = SolrQuery(args.extra_solr_term, [item[1:] for item in solr_args if item.startswith('!')], solr_or_groups, db_args['dir'], case, q_obj, extra_params=extra_params)
# else:
# solr_q = SolrQuery(args.extra_solr_term, [item[1:] for item in solr_args if item.startswith('!')], solr_or_groups, solr_url, case, q_obj, extra_params=extra_params)
#fdb = fdb_class.Query(db_args['dir'])
#fdb.open()
#import pdb;pdb.set_trace()
json_filename = ''
temp_file_name = 'qry_' + m.hexdigest() + '.pyx'
if not os.path.isfile(query_folder + temp_file_name):
f = open('qry_' + m.hexdigest() + '.pyx', 'wt')
try:
pseudocode_ob.generate_and_write_search_code_from_expression(args.search, f, json_filename=json_filename, db=db, case=args.case)
except Exception as e:
os.remove(temp_file_name)
raise e
mod=load(temp_file_name[:-4])
#os.rename(temp_file_name, query_folder + temp_file_name)
#os.rename(temp_file_name[:-4] + '.cpp', query_folder + temp_file_name[:-4] + '.cpp')
#os.rename(temp_file_name[:-4] + '.so', query_folder + temp_file_name[:-4] + '.so')
else:
os.rename(query_folder + temp_file_name, temp_file_name)
mod=load(temp_file_name[:-4])
#os.rename(temp_file_name, query_folder + temp_file_name)
#os.rename(temp_file_name[:-4] + '.cpp', query_folder + temp_file_name[:-4] + '.cpp')
#os.rename(temp_file_name[:-4] + '.so', query_folder + temp_file_name[:-4] + '.so')
query_obj=mod.GeneratedSearch()
total_hits=0
#Loading and opening the databases or connections
#The blob and id database
#inf = open(args.database+'/db_config.json', 'rt')
#db_args = json.load(inf)
#inf.close()
#db_class = importlib.import_module(db_args['blobdb'])
#db = db_class.DB(db_args['dir'])
#db.open()
#... and lets load the filter db for fetching the filter list
fdb_class = importlib.import_module(db_args['filterdb'])
rarest, c_args_s, s_args_s, c_args_m, s_args_m, just_all_set_ids, types, optional, solr_args, solr_or_groups = query_obj.map_set_id(db)
try:
extra_params= ast.literal_eval(args.extra_solr_params)
except:
extra_params = {}
langs = [args.langs]
if ',' in args.langs:
langs = args.langs.split(',')
if not db_args['filterdb'] == 'solr_filter_db':
fdb = fdb_class.Query(args.extra_solr_term, [item[1:] for item in solr_args if item.startswith('!')], solr_or_groups, db_args['dir'], args.case, query_obj, extra_params=extra_params, langs=langs)
else:
fdb = fdb_class.Query(args.extra_solr_term, [item[1:] for item in solr_args if item.startswith('!')], solr_or_groups, solr_url, args.case, query_obj, extra_params=extra_params, langs=langs)
total_hits+=queue_query_from_db(query_obj, args, db, fdb, q)
print ("Total number of hits:",total_hits,file=sys.stderr)
q.put(-1)
if not args.keep_query:
try:
pass
os.remove(query_folder + temp_file_name)
os.remove(query_folder + temp_file_name[:-4] + '.cpp')
os.remove(query_folder + temp_file_name[:-4] + '.so')
except:
pass
if __name__=="__main__":
sys.exit(main(sys.argv))
| TurkuNLP/dep_search | query_mdb.py | query_mdb.py | py | 22,569 | python | en | code | 1 | github-code | 13 |
1681769397 | from datetime import datetime
from datetime import time
now = datetime.now() # time object
print('date and time=', now)
now = datetime.now().time()
print("time =", now)
time1 = time(0, 0, 12) #makes an object with 12 seconds length
print(time1)
time2 = datetime()
#now.microsecond
#now.second
#now.hour
#now.minute
| farzan-dehbashi/toolkit | date_time.py/date_time.py | date_time.py | py | 318 | python | en | code | 5 | github-code | 13 |
4321482451 | from financepy.utils.date import Date
from financepy.products.equity.equity_vanilla_option import EquityVanillaOption
from financepy.utils.global_types import OptionTypes
from financepy.models.heston import Heston, HestonNumericalScheme
import numpy as np
# Reference see table 4.1 of Rouah book
valuation_date = Date(1, 1, 2015)
expiry_date = Date(1, 4, 2015)
v0 = 0.05 # initial variance of volatility
theta = 0.05 # long term variance
kappa = 2.0 # speed of variance reversion
sigma = 0.10 # volatility of variance
rho = -0.9 # correlation
interest_rate = 0.05
dividend_yield = 0.01
seed = 2838
num_steps = 100
num_paths = 20000
stock_price = 100.0
def test_heston():
rho = -0.90000
sigma = 0.75000
strike_price = 105.00
hestonModel = Heston(v0, kappa, theta, sigma, rho)
call_option = EquityVanillaOption(
expiry_date, strike_price, OptionTypes.EUROPEAN_CALL)
value_mc_Heston = hestonModel.value_mc(
valuation_date,
call_option,
stock_price,
interest_rate,
dividend_yield,
num_paths,
num_steps,
seed)
valueGatheral = hestonModel.value_gatheral(
valuation_date, call_option, stock_price, interest_rate, dividend_yield)
valueLewisRouah = hestonModel.value_lewis_rouah(
valuation_date, call_option, stock_price, interest_rate, dividend_yield)
valueLewis = hestonModel.value_lewis(
valuation_date, call_option, stock_price, interest_rate, dividend_yield)
valueWeber = hestonModel.value_weber(
valuation_date, call_option, stock_price, interest_rate, dividend_yield)
assert round(value_mc_Heston, 4) == 1.7333
assert round(valueGatheral, 4) == 1.8416
assert round(valueLewisRouah, 4) == 1.8416
assert round(valueLewis, 4) == 1.8416
assert round(valueWeber, 4) == 1.8416
| domokane/FinancePy | tests/test_FinModelHeston.py | test_FinModelHeston.py | py | 1,846 | python | en | code | 1,701 | github-code | 13 |
73891707856 | import pandas as pd
import numpy as np
df = pd.read_pickle('/data/MERGERS/datasets/df_sample_with_sfr.pk')
only_mergers = df.iloc[np.where(df.merger_label < 2)]
z0_subfind = only_mergers.z0_subfind
unique_at_z0 = np.unique(z0_subfind)
n_merging_events = np.zeros_like(unique_at_z0)
for i, subfind in enumerate(unique_at_z0):
temp = only_mergers.iloc[np.where(z0_subfind == subfind)]
n_merging_events[i] = temp.shape[0]
unique, counts = np.unique(n_merging_events, return_counts=True)
pd.DataFrame(np.array([unique, counts]).T, columns=['MM in the Past', 'Counts'])
columns=['snap', 'subfind', 'np_subfind', 'z0_subfind', 'ratio', 'fp_mass', 'np_mass', 'central_snap', 'central_subfind', 'num', 'matches']
tng100_mergers = pd.read_csv('/data/captain/sources/tireless-tracker/TNG100_mergers.dat', sep=';', names=columns)
z0_subfinds = np.unique(tng100_mergers.z0_subfind)
num_mergers = np.zeros_like(z0_subfinds)
for i, sub in enumerate(z0_subfinds):
num_mergers[i] = tng100_mergers.iloc[np.where((tng100_mergers.z0_subfind == sub) & (tng100_mergers.ratio > 0.25))].shape[0]
available = np.loadtxt('/data/captain/TNG/sdss/snapnum_099/subfind_ids.txt').astype(int)
reference = pd.DataFrame(np.array([z0_subfinds, num_mergers]).T, columns=['subfind', 'Counts'])
reference = reference.set_index('subfind')
availables = reference.reindex(available)
availables.iloc[np.where(np.isnan(availables.Counts))] = 0
print(availables.groupby('Counts').size())
import glob
broadbands = glob.glob('/data/captain/TNG/sdss/snapnum_099/data/*.fits')
from IPython.display import Image
from astropy.io import fits
from matplotlib import pyplot as plt
from scipy.ndimage import zoom
dataset = np.zeros((availables.shape[0], 128, 128, 4))
for k, (index, row) in enumerate(availables.iterrows()):
if(k % 100 == 0):
print(k)
data = fits.getdata(f'/data/captain/TNG/sdss/snapnum_099/data/broadband_{index}.fits')
g = data[0]
g = zoom(g, 128/data[0].shape[0])
r = data[1]
r = zoom(r, 128/data[1].shape[0])
i = data[2]
i = zoom(i, 128/data[2].shape[0])
z = data[3]
z = zoom(z, 128/data[3].shape[0])
dataset[k] = np.dstack([g, r, i, z])
np.save(arr=dataset, file='dataset.npy') | astroferreira/random-scripts | generate_history_dataset.py | generate_history_dataset.py | py | 2,318 | python | en | code | 0 | github-code | 13 |
39130168526 | from typing import List, TypedDict, Union
from .....models import *
from .forum import AV3Forum
from .post import AV3DictSubPosts, AV3Posts
from .user import AV3User
__all__ = (
"AV3ThreadInfo",
"AV3ArchiveOptions",
"AV3ArchiveUpdateInfo",
"AV3ArchiveThread",
)
class AV3ThreadInfo:
class ArchivePart(TypedDict):
id: int
title: str
author: AV3User.ArchivePart
forum: AV3Forum.ArchivePart
create_time: int
@staticmethod
def archive_dump(thread_info: ThreadInfo) -> ArchivePart:
return {
"id": thread_info.id,
"title": thread_info.title,
"author": AV3User.archive_dump(thread_info.author),
"forum": AV3Forum.archive_dump(thread_info.forum),
"create_time": thread_info.create_time,
}
@staticmethod
def archive_load(archive: ArchivePart):
return ThreadInfo(
id=archive["id"],
title=archive["title"],
author=AV3User.archive_load(archive["author"]),
forum=AV3Forum.archive_load(archive["forum"]),
create_time=archive["create_time"],
)
class AV3ArchiveOptions:
class ArchivePart(TypedDict):
images: bool
audios: bool
videos: bool
portraits: bool
@staticmethod
def archive_dump(archive_info: ArchiveOptions) -> ArchivePart:
return {
"images": archive_info.images,
"audios": archive_info.audios,
"videos": archive_info.videos,
"portraits": archive_info.portraits,
}
@staticmethod
def archive_load(archive: ArchivePart):
return ArchiveOptions(
images=archive["images"],
audios=archive["audios"],
videos=archive["videos"],
portraits=archive["portraits"],
)
class AV3ArchiveUpdateInfo:
class ArchivePart(TypedDict):
store_time: int
last_update_time: Union[int, None]
@staticmethod
def archive_dump(archive_update_info: ArchiveUpdateInfo) -> ArchivePart:
return {
"store_time": archive_update_info.archive_time,
"last_update_time": archive_update_info.last_update_time,
}
@staticmethod
def archive_load(archive: ArchivePart):
return ArchiveUpdateInfo(
archive_time=archive["store_time"],
last_update_time=archive["last_update_time"],
)
class AV3ArchiveThread:
class ArchivePart(TypedDict):
archive_time: int
thread_info: AV3ThreadInfo.ArchivePart
posts: AV3Posts.ArchivePart
dict_subposts: AV3DictSubPosts.ArchivePart
users: List[AV3User.ArchivePart]
@staticmethod
def archive_dump(archive_thread: ArchiveThread) -> ArchivePart:
return {
"archive_time": archive_thread.archive_time,
"thread_info": AV3ThreadInfo.archive_dump(archive_thread.thread_info),
"posts": AV3Posts.archive_dump(archive_thread.posts),
"dict_subposts": AV3DictSubPosts.archive_dump(archive_thread.dict_subposts),
"users": [AV3User.archive_dump(user) for user in archive_thread.users],
}
@staticmethod
def archive_load(archive: ArchivePart):
users = {AV3User.archive_load(user) for user in archive["users"]}
return ArchiveThread(
archive_time=archive["archive_time"],
thread_info=AV3ThreadInfo.archive_load(archive["thread_info"]),
posts=AV3Posts.archive_load(archive["posts"], users),
dict_subposts=AV3DictSubPosts.archive_load(archive["dict_subposts"], users),
users=users,
)
| 283375/tieba-thread-archive | src/tieba_thread_archive/local/archive/v3/models/archive.py | archive.py | py | 3,699 | python | en | code | 2 | github-code | 13 |
31634825242 | import math
import numpy as np
import vincenty as vn
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Distances
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def euclideanDistance(a, b):
'''
Should be changed for another function if we are using latlongs.
Vincenty's formula is available in the pip package.
'''
dist = math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
return dist
def distanceMat(landscape, distFun=euclideanDistance):
'''
Returns the distance matrix according to the provided distance function.
There's likely a faster implementation using list comprehension, but
this is readable/good enough for now.
'''
coordsNum = len(landscape)
distMatrix = np.empty((coordsNum, coordsNum))
for (i, coordA) in enumerate(landscape):
for (j, coordB) in enumerate(landscape):
distMatrix[i][j] = distFun(coordA, coordB)
return distMatrix
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Kernels
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def inverseLinearStep(distance, params=[.75, 1]):
'''
This function returns a migration estimate based on the inverse of the
distance. NOTE: This is a terrible way to do it, but it's a first
approximation. Should be replaced with the zero-inflated exponential.
'''
if math.isclose(distance, 0):
return params[0]
else:
return (1 / (distance * params[1]))
return True
def migrationKernel(distMat, params=[.75, 1], kernelFun=inverseLinearStep):
'''
Takes in the distances matrix, zero inflated value (step) and two extra
parameters to determine the change from distances into distance-based
migration probabilities (based on the kernel function provided).
'''
coordsNum = len(distMat)
migrMat = np.empty((coordsNum, coordsNum))
for (i, row) in enumerate(distMat):
for (j, dst) in enumerate(row):
migrMat[i][j] = kernelFun(dst, params=params)
# Normalize rows to sum 1
migrMat[i] = migrMat[i] / sum(migrMat[i])
return migrMat
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Tests
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if __name__ == "__main__":
landscape = ((42.3541165, -71.0693514), (40.7791472, -73.9680804))
distMat = distanceMat(landscape, distFun=vn.vincenty)
distMat
row = distMat[0]
inverseLinearStep(0, .75, 1)
| Chipdelmal/MoNeT | Markov/distances.py | distances.py | py | 2,632 | python | en | code | 7 | github-code | 13 |
35736512943 | def tub(a,b):
tub_sonlar = []
for n in range(a,b+1):
son =True
if n == 1:
son = False
elif n == 2:
son =True
else :
for x in range(2,n):
if n % x == 0:
son = False
if son:tub_sonlar.append(n)
return tub_sonlar
a = int(input("1-oraliqni kiriting:"))
b = int(input("2-oraliqni kiriting:"))
print(tub(a,b))
| ogabekbahrombekogli/lessons_sariq | 20.2 q q func.py | 20.2 q q func.py | py | 428 | python | en | code | 0 | github-code | 13 |
38035045208 | from __future__ import print_function
__author__ = "Will Buttinger"
__doc__ = """Extract dataset parameters from AMI, and write them to a text file.\nExamples:\n\n\ngetMetadata.py --inDS="mc15_13TeV.361103%DAOD_TRUTH%" --fields=dataset_number,ldn,nfiles,events,crossSection,genFiltEff,generator_name"""
import logging
#pinched from pandatools!
def readDsFromFile(txtName):
import re
dsList = []
try:
# read lines
txt = open(txtName)
for tmpLine in txt:
# remove \n
tmpLine = re.sub('\n','',tmpLine)
# remove white spaces
tmpLine = tmpLine.strip()
# skip comment or empty
if tmpLine.startswith('#') or tmpLine == '':
continue
# append
dsList += [tmpLine]
# close file
txt.close()
except:
errType,errValue = sys.exc_info()[:2]
logging.error("cannot read datasets from %s due to %s:%s" % (txtName,errType,errValue))
sys.exit(-1)
return dsList
def isfloat(x):
try:
a = float(x)
except ValueError:
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except ValueError:
return False
else:
return a == b
def main():
logging.basicConfig(format='%(levelname)s:%(message)s')
import time,datetime
from pytz import timezone
import argparse
try:
import pyAMI.client
import pyAMI.atlas.api as AtlasAPI
import pyAMI.config
except ImportError:
logging.error("Unable to find pyAMI client. Please try this command first: lsetup pyAMI")
return -1
extraFieldDefaults = {} #{"approx_crossSection":None,"approx_GenFiltEff":1.0}
fieldDefaults = {"subprocessID":0,"dataset_number":0}
#populate the fieldDefaults ... for all, assume 'None'
for field in pyAMI.config.tables['datasets'].keys():
if str(field) == "cross_section": continue #special exception because this field only present in
if str(field) in fieldDefaults.keys(): continue
if str(field).startswith("@"): continue
fieldDefaults[str(field)] = None
import commands
#check the voms proxy
status,out = commands.getstatusoutput("voms-proxy-info -fqan -exists")
if status!=0:
logging.error("Please renew your certificate with this command: voms-proxy-init -voms atlas");
return -1
try:
client = pyAMI.client.Client('atlas')
AtlasAPI.init()
except:
logging.error("Could not establish pyAMI session. Are you sure you have a valid certificate? Do: voms-proxy-init -voms atlas")
return -1
#need to collect the ami dataset parameter defaults
paramExplains = [] #for the help message only
paramUnits = dict()
paramDefaults = {}
res = client.execute('ListPhysicsParameterDefs',format='dom_object')
for r in res.get_rows() : #r is OrderedDict
explainString = "%s: %s" % (r[u'PARAMNAME'],r[u'DESCRIPTION']);
if r[u'UNITS']!=u'NULL':
explainString += " (units: %s)" % r[u'UNITS']
paramUnits[r[u'PARAMNAME']] = r[u'UNITS']
if r[u'HASDEFAULT']==u'N' : paramDefaults[str(r[u'PARAMNAME'])] = None
else:
explainString += " (default value = %s)" % r[u'DEFAULTVALUE']
if r[u'PARAMTYPE']==u'number': paramDefaults[str(r[u'PARAMNAME'])] = float(r[u'DEFAULTVALUE']) #FIXME: Assumes all parameters are floats
elif r[u'PARAMTYPE']==u'string': paramDefaults[str(r[u'PARAMNAME'])] = str(r[u'DEFAULTVALUE'])
paramExplains += [explainString]
paramDefaults["crossSection_pb"] = None
paramUnits["crossSection_pb"] = "pb"
paramExplains += ["crossSection_pb: Same as crossSection except in pb units (units: pb)"]
cern_time = timezone('UCT')
current_time = datetime.datetime.fromtimestamp(time.time(),cern_time).strftime('%Y-%m-%d %H:%M:%S')
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__,formatter_class=RawTextHelpFormatter)
parser.add_argument('--inDS',nargs='+',default=[""],help="List of datasets to retrieve parameters for")
parser.add_argument('--inDsTxt',default="",help="Alternative to --inDS, can specify the datasets from an input file")
parser.add_argument('--fields',nargs='+',help="List of parameters to extract. Available parameters are: \n\n %s\n\nYou can also include any from:\n %s\nYou can also do keyword_xxx to add a bool branch for keywords" % ("\n ".join(paramExplains),", ".join(fieldDefaults.keys()+extraFieldDefaults.keys())),default=["dataset_number","crossSection","kFactor","genFiltEff"])
parser.add_argument('--timestamp',default=current_time,help="The timestamp to query parameters at, specified in Universal Central Time (UCT). If left blank, will take the current time")
parser.add_argument('--physicsGroups',nargs='+',default=["PMG,MCGN"],help="Physics group from which to retrieve parameters, listed in order of priority (highest first). Default value is 'PMG,MCGN' (i.e. try to use PMG values, fallback on MCGN values if unavailable). Allowed groups are:\n PMG (this is the PMG's group name), BPHY, COSM, DAPR, EGAM, EXOT, FTAG, HIGG, HION, IDET, IDTR, JETM, LARG, MCGN (this is the AMI default group name), MDET, MUON, PHYS, REPR, SIMU, STDM, SUSY, TAUP, TCAL, TDAQ, THLT, TOPQ, TRIG, UPGR, VALI")
parser.add_argument('--oldTimestamp',default="",help="If specified, will instead display a diff between the old and new timestamp, showing explanation of any changed parameters")
parser.add_argument('--explainFields',nargs='+',default=[],help="The fields you would like explained .. will appear as comment lines after each row in the output")
parser.add_argument('--explainInfo',nargs='+',default=['explanation','insert_time'],help="Properties of the parameter you want to show in the explanation. Can list from: explanation, insert_time, physicsGroup, createdby. Default is: explanation,insert_time")
parser.add_argument('--outFile',default=sys.stdout,type=argparse.FileType('w'),help="Where to print the output to. Leave blank to print to stdout")
parser.add_argument('--delim',default="",help="The delimiter character. Defaults to spaces leading to nice formatting table")
parser.add_argument('-v',action='store_true',help="Verbose output for debugging")
args = parser.parse_args()
if args.v: logging.getLogger().setLevel(logging.DEBUG)
else: logging.getLogger().setLevel(logging.INFO)
logging.debug(args.inDS)
logging.debug(args.fields)
logging.debug(args.timestamp)
if args.timestamp=="the dawn of time":
logging.error("Unfortunately we don't know any parameters from this time period... but we're working on it!")
return 9999
#split elements of fields by comma to get full list
args.fields = sum((y.split(',') for y in args.fields),[])
args.fields = [x.strip() for x in args.fields] #strips whitespace
#look for keyword_ fields, these are special ...
args.keywords=[]
for f in args.fields:
if f.startswith("keyword_"):
k = f[8:]
#and then add each keyword to the extraFieldDefaults so it is recognised thusly
extraFieldDefaults["keyword_%s"%k]=bool(False)
args.keywords += [k]
#same for physics groups
args.physicsGroups = sum((y.split(',') for y in args.physicsGroups),[])
args.physicsGroups = [x.strip() for x in args.physicsGroups] #strips whitespace
#same for explainFields and explainInfo
args.explainFields = sum((y.split(',') for y in args.explainFields),[])
args.explainFields = [x.strip() for x in args.explainFields] #strips whitespace
args.explainInfo = sum((y.split(',') for y in args.explainInfo),[])
args.explainInfo = [x.strip() for x in args.explainInfo] #strips whitespace
if args.inDsTxt != '': args.inDS = readDsFromFile(args.inDsTxt)
#and same for inDS
args.inDS = sum((y.split(',') for y in args.inDS),[])
args.inDS = [x.strip() for x in args.inDS] #strips whitespace
#1. check field values are allowed, we obtain default field values at same time..
#2. For each entry in inDS, if contains wildcard we obtain list of DS, otherwise check DS exists. During this time we obtain the datasetid and numEvents properties, incase we need them
#3. For each of these DS, get parameters from ami matching the timestamp. Organize into fields and index by subprocessID
#4. Output a line to our output file
#1.
#before adding all the ami parameters, identify which of provided fields are: 1). Obtained from list_datasets command (dsFields) 2). actual parameters
dsFields = [ x for x in args.fields if x in fieldDefaults.keys() and x not in ["subprocessID","ldn"] ]
extraFields = [ x for x in args.fields if x in extraFieldDefaults.keys() ]
paramFields = [ x for x in args.fields if x in paramDefaults.keys() ]
if len(paramFields)>0 and args.physicsGroups==[""]:
logging.error("You must specify at least one physics group. See -h for allowed groups")
return -1;
#combine paramDefaults with fieldDefaults
fieldDefaults.update(paramDefaults)
#and with extra fields
fieldDefaults.update(extraFieldDefaults)
for field in args.fields:
if field not in fieldDefaults:
logging.error("%s is not a recognised field. Allowed fields are:" % field)
logging.error(fieldDefaults.keys())
return -1;
if args.oldTimestamp!="":
logging.info("oldTimestamp option specified. Running in diff mode...")
args.explainFields = args.fields
args.explainInfo = ["explanation","insert_time","physicsGroup","createdby"]
#2.
#replace all '*' with '%' and strip "/"
args.inDS = [ds.replace("*","%") for ds in args.inDS]
args.inDS = [ds.rstrip("/") for ds in args.inDS]
if len(args.inDS)==0 or (len(args.inDS)==1 and args.inDS[0]==""):
logging.error("No datasets provided. Please specify datasets with the --inDS or --inDsTxt options")
return -1;
logging.info("Fetching list of datasets from AMI (this may take a few minutes)...")
#obtain list of datasets
res = AtlasAPI.list_datasets(client,patterns=args.inDS,fields=dsFields+['ldn'],ami_status="VALID") #changed status from %, to only catch valid now: wb 08/2015
logging.info("...Found %d datasets matching your selection" % len(res))
if len(res)==0:
return 0;
#NOTE: Should we allow retrieval of the extra information: keyword, genfiltereff, approx crossection, .. these all come from GetDatasetInfo ami command
dataset_values = dict()
for r in res:
mydict = dict()
dataset_values[str(r['ldn'])] = mydict
for field in r.items():
if str(field[0]) == "ldn": continue
if str(field[0]) not in args.fields: continue
mydict[str(field[0])] = str(field[1])
#also if we have the 'extra fields or keywords' we will need to execute AtlasAPI.get_dataset_info ..
if len(extraFields)>0 or len(args.keywords)>0:
info_res = AtlasAPI.get_dataset_info(client,str(r['ldn']))
#print(info_res)
if len(info_res)==0: logging.error("Unable to retrieve dataset info for %s" % str(r['ldn']));return -1
for field in extraFields:
#ignore the keyword_ fields
if field.startswith("keyword_"): continue
mydict[field] = float(info_res[0][unicode(field)]) if isfloat(info_res[0][unicode(field)]) else extraFieldDefaults[field]
for k in args.keywords:
mydict["keyword_%s" % k] = int( (k in str(info_res[0][unicode('keyword')]).split(",")) )
#sort dataset_values as well as possible
from collections import OrderedDict
sorted_values = OrderedDict()
for ds in args.inDS:
if ds in dataset_values.keys():
sorted_values[ds] = dataset_values[ds]
for ds in sorted(dataset_values):
if ds not in sorted_values.keys():
sorted_values[ds] = dataset_values[ds]
dataset_values = sorted_values
logging.debug(dataset_values)
#res = client.execute(['GetDatasetInfo
for ds in args.inDS:
if '%' not in ds and ds not in dataset_values.keys():
logging.warning("Unknown dataset: %s" % ds)
datasetsToQuery = ",".join(dataset_values.keys())
#if using inDsTxt, retain any comment or blank lines in structure of output
complete_values = OrderedDict()
if args.inDsTxt != "":
# read lines
commentcount=0
import re
txt = open(args.inDsTxt)
for tmpLine in txt:
# remove \n
tmpLine = re.sub('\n','',tmpLine)
# remove white spaces
tmpLine = tmpLine.strip()
# skip comment or empty
if tmpLine.startswith('#') or tmpLine == '':
complete_values['comment%d'%(commentcount)] = tmpLine
commentcount = commentcount+1
continue
# append
tmpLine = tmpLine.rstrip("/")
if tmpLine in dataset_values.keys():
complete_values[tmpLine] = dataset_values[tmpLine]
else:
print("cannot find %s" % tmpLine)
# close file
txt.close()
dataset_values = complete_values
logging.info("Obtaining %s for selected datasets at timestamp=%s... (please be patient)" % (args.fields,args.timestamp))
#do as one query, to be efficient
if(args.timestamp==current_time):
res = client.execute(['GetPhysicsParamsForDataset',"--logicalDatasetName=%s"% datasetsToQuery,"--timestamp='%s'"%args.timestamp], format='dom_object')
else:
res = client.execute(['GetPhysicsParamsForDataset',"--logicalDatasetName=%s"% datasetsToQuery,"--timestamp='%s'"%args.timestamp,"--history=true"], format='dom_object')
#organize results by dataset
parameterQueryResults = dict()
for r in res.get_rows():
if r[u'logicalDatasetName'] not in parameterQueryResults.keys():
parameterQueryResults[r[u'logicalDatasetName']] = []
parameterQueryResults[r[u'logicalDatasetName']] += [r] #puts row in the list for this dataset
if args.oldTimestamp!="" :
logging.info("Obtaining %s for selected datasets at timestamp=%s... (please be patient)" % (args.fields,args.oldTimestamp))
res2 = client.execute(['GetPhysicsParamsForDataset',"--logicalDatasetName=%s"% datasetsToQuery,"--timestamp='%s'"%args.oldTimestamp,"--history=true"], format='dom_object')
old_parameterQueryResults = dict()
for r in res2.get_rows():
if r[u'logicalDatasetName'] not in old_parameterQueryResults.keys():
old_parameterQueryResults[r[u'logicalDatasetName']] = []
old_parameterQueryResults[r[u'logicalDatasetName']] += [r] #puts row in the list for this dataset
headerString = ""
doneHeader=False
commentCache = ""
commentCount = 0
#result is a list of lists (each list is 1 row)
outputTable = []
tableHeaders = []
for ds in dataset_values.keys():
if ds.startswith('comment'):
if commentCount > 0 : commentCache += "\n"
commentCache += dataset_values[ds]
commentCount=commentCount+1
continue
#obtain list of parameters for this dataset
#if(args.timestamp==current_time):
# res = client.execute(['GetPhysicsParamsForDataset',"--logicalDatasetName=%s"% ds,"--timestamp='%s'"%args.timestamp], format='dom_object')
#else:
# res = client.execute(['GetPhysicsParamsForDataset',"--logicalDatasetName=%s"% ds,"--timestamp='%s'"%args.timestamp,"--history=true"], format='dom_object')
res = parameterQueryResults.get(ds,[])
if args.oldTimestamp!="": res2 = old_parameterQueryResults.get(ds,[])
#first we have to determine how many subprocesses this ds has
dsSubprocesses = [0] #always have the 0 subprocess
for r in res:
sp = int(r[u'subprocessID'])
if sp not in dsSubprocesses: dsSubprocesses += [sp]
#now for each subprocess we have to locate each required field value (in paramFields)
#rank by physicsGroup
for sp in dsSubprocesses:
paramVals = dict()
paramVals2 = dict()
groupsWithVals = dict() #held for helpful output
#need to keep explanations for requested fields
explainInfo = dict()
for i in args.explainFields: explainInfo[i] = dict()
for param in paramFields:
groupsWithVals[param] = []
bestGroupIndex = len(args.physicsGroups)
import copy
paramVals[param] = copy.copy(fieldDefaults[param])
for r in res:
if int(r[u'subprocessID']) != sp: continue
if str(r[u'paramName']) != param and not (param=="crossSection_pb" and str(r[u'paramName'])=="crossSection"): continue
if str(r[u'physicsGroup']) not in args.physicsGroups:
groupsWithVals[param] += [(str(r[u'physicsGroup']),str(r[u'paramValue']))]
continue
if args.physicsGroups.index(str(r[u'physicsGroup'])) > bestGroupIndex : continue
if args.physicsGroups.index(str(r[u'physicsGroup'])) == bestGroupIndex : logging.warning("Duplicate parameter %s for group %s in dataset %s (subprocess %d). Please report this!" % (param,str(r[u'physicsGroup']),ds,sp))
paramVals[param] = str(r[u'paramValue'])
if param=="crossSection_pb": paramVals[param] = str(float(paramVals[param])*1000.0)
bestGroupIndex=args.physicsGroups.index(str(r[u'physicsGroup']))
#keep the explanation info for the requested fields
if param in explainInfo.keys():
for e in args.explainInfo:
if unicode(e) not in r:
logging.error("Unrecognised explainInfo field: %s" % e)
return -1
explainInfo[param][e]=str(r[unicode(e)])
if args.oldTimestamp!="":
bestGroupIndex = len(args.physicsGroups)
paramVals2[param] = copy.copy(fieldDefaults[param])
for r in res2:
if int(r[u'subprocessID']) != sp: continue
if str(r[u'paramName']) != param and not (param=="crossSection_pb" and str(r[u'paramName'])=="crossSection"): continue
if str(r[u'physicsGroup']) not in args.physicsGroups: continue
if args.physicsGroups.index(str(r[u'physicsGroup'])) > bestGroupIndex : continue
if args.physicsGroups.index(str(r[u'physicsGroup'])) == bestGroupIndex : logging.warning("Duplicate parameter %s for group %s in dataset %s (subprocess %d). Please report this!" % (param,str(r[u'physicsGroup']),ds,sp))
paramVals2[param] = str(r[u'paramValue'])
if param=="crossSection_pb": paramVals2[param] = str(float(paramVals2[param])*1000.0)
bestGroupIndex=args.physicsGroups.index(str(r[u'physicsGroup']))
#at this stage, parameters reside in paramVals dict or dataset_values[ds] dict
#print them in the requested order .. if any is "None" then stop, because it doesn't have a default value and didn't find a value for it either
rowString = ""
rowList = []
firstPrint=False
for param in args.fields:
val = None
if param == "ldn": val = ds
elif param == "subprocessID": val = sp
elif param in dataset_values[ds].keys(): val = dataset_values[ds][param]
else: val = paramVals.get(param,None)
if val == None:
if args.outFile != sys.stdout: logging.warning("dataset %s (subprocess %d) does not have parameter %s, which has no default." % (ds,sp,param))
if len(groupsWithVals.get(param,[]))>0:
logging.warning("The follow physicsGroups have defined that parameter though:")
logging.warning(groupsWithVals[param])
val = "#UNKNOWN#"
#return -1
#if isfloat(str(val)): val = "%.6g" % float(val)
if args.oldTimestamp!="":
#diff val to old val
val2 = None
if param == "ldn": val2 = ds
elif param == "subprocessID": val2 = sp
elif param in dataset_values[ds].keys(): val2 = dataset_values[ds][param]
else: val2 = paramVals2.get(param,None)
if val2 == None: val2 = "#UNKNOWN#"
#if isfloat(str(val2)): val2 = "%.6g" % float(val)
if(str(val)!=str(val2)):
if not firstPrint: print("%s:" % ds)
firstPrint=True
print(" %s : %s ---> %s" % (param,str(val2),str(val)))
print(" insert_time : %s" % explainInfo[param]['insert_time'])
print(" explanation : %s" % explainInfo[param]['explanation'])
print(" createdby : %s" % explainInfo[param]['createdby'])
print(" physicsGroup : %s" % explainInfo[param]['physicsGroup'])
continue
rowList += [str(val)]
if rowString != "" and args.delim!="": rowString += args.delim
rowString += str(val)
#inspect the type of str(val) to build up the header
if not doneHeader:
headerString += param
if args.outFile != sys.stdout:
if type(fieldDefaults[param])==bool: headerString += "/O:"
elif type(fieldDefaults[param])==int: headerString += "/I:"
elif type(fieldDefaults[param])==float: headerString += "/D:"
elif isfloat(str(val)): headerString += "/D:"
#elif isint(str(val)): headerString += "/I:" TO BE SAFE WE MAKE ALL NUMERIC FIELDS FLOATS, EXCEPT if the default value is type int
else: headerString += "/C:"
else:
v = param
if param in paramUnits:
headerString += " [%s]" % paramUnits[param]
v += " [%s]" % paramUnits[param]
tableHeaders += [v]
headerString += " "
if args.oldTimestamp!="": continue #print nothing more for diff mode
if not doneHeader:
doneHeader=True
if args.outFile!=sys.stdout: print(headerString[:-1],file=args.outFile)
if commentCount > 0:
if args.outFile!=sys.stdout and args.delim!="": print(commentCache,file=args.outFile)
outputTable += [["COMMENT",commentCache]]
commentCache = ''; commentCount = 0
if args.outFile != sys.stdout and args.delim!="": print(rowString,file=args.outFile)
outputTable += [rowList]
#also print the required explanations
for (field,expl) in explainInfo.items():
outString = "#%s: { " % field
doneFirst=False
for eField in args.explainInfo:
if doneFirst: outString += " , "
if not eField in expl.keys(): outString += " %s: <NONE .. value is default>"%eField
else: outString += "%s: %s" % (eField,expl[eField])
doneFirst=True
outString += " }"
#print(outString,file=args.outFile)
outputTable += [["COMMENT",outString]]
if args.oldTimestamp!="":
args.outFile.close()
return 0
#print the table in nicely formatted state
if args.outFile == sys.stdout or args.delim=="":
#determine column widths
columnWidths = [0]*len(args.fields)
for i in range(0,len(tableHeaders)):
columnWidths[i] = len(tableHeaders[i])
for r in outputTable:
if len(r)>0 and r[0]=="COMMENT": continue
for i in range(0,len(r)):
if len(r[i])>columnWidths[i]: columnWidths[i]=len(r[i])
lineout = ""
for i in range(0,len(tableHeaders)):
lineout += tableHeaders[i].ljust(columnWidths[i]) + " "
print(lineout)
for r in outputTable:
lineout = ""
if len(r)>0 and r[0]=="COMMENT": lineout = r[1]
else:
for i in range(0,len(r)):
lineout += r[i].ljust(columnWidths[i]) + " "
print(lineout,file=args.outFile)
#print the footer, which is the command to reproduce this output
import os
if args.outFile != sys.stdout:
#remove comment from dataset_values
datasetss = [x for x in dataset_values.keys() if not x.startswith("comment")]
print("",file=args.outFile)
print("#lsetup \"asetup %s,%s\" pyAMI" % (os.environ.get('AtlasProject','UNKNOWN!'),os.environ.get('AtlasVersion','UNKNOWN!')),file=args.outFile)
print("#getMetadata.py --timestamp=\"%s\" --physicsGroups=\"%s\" --fields=\"%s\" --inDS=\"%s\"" % (args.timestamp,",".join(args.physicsGroups),",".join(args.fields),",".join(datasetss)),file=args.outFile )
logging.info("Results written to: %s" % args.outFile.name)
args.outFile.close()
if __name__ == "__main__":
import sys
sys.exit(main())
| rushioda/PIXELVALID_athena | athena/Tools/PyUtils/bin/getMetadata.py | getMetadata.py | py | 26,317 | python | en | code | 1 | github-code | 13 |
10977871198 | import configparser
import json
from abc import ABCMeta, abstractmethod
from pathlib import Path
from typing import List, Optional
from model.manufacturer import Manufacturer
from pydantic.json import pydantic_encoder
from repository.exceptions import (
ManufacturerAlreadyExistsException,
ManufacturerNotFoundException,
)
class ManufacturersRepository(metaclass=ABCMeta):
def __init__(self, file_source: Optional[str]):
self._file_source = file_source
@abstractmethod
def get_manufacturer(self, id: int) -> Manufacturer:
pass
@abstractmethod
def create_manufacturer(self, manufacturer: Manufacturer) -> None:
pass
@abstractmethod
def update_manufacturer(self, id: int, manufacturer: Manufacturer) -> None:
pass
def get_manufacturers(self) -> List[Manufacturer]:
return self._manufacturers
class ManufacturersRepositoryJSONFile(ManufacturersRepository):
def __init__(self, file_name: str = "manufacturers.json") -> None:
self._file_name = file_name
self._manufacturers = (
self._get_manufacturers_from_json_file()
if Path(file_name).is_file()
else []
)
def _get_manufacturers_from_json_file(self) -> List[Manufacturer]:
with open(self._file_name, mode="r") as f:
raw_manufacturers = f.read()
manufacturers = json.loads(raw_manufacturers)
return [Manufacturer(**p) for p in manufacturers]
return []
def _save_manufacturers_to_json_file(self) -> None:
manufacturers_to_save = [p.dict() for p in self._manufacturers]
with open(self._file_name, mode="w") as f:
data = json.dumps(manufacturers_to_save, indent=4, default=pydantic_encoder)
f.write(data)
def get_manufacturer(self, id: int) -> Manufacturer:
self._get_manufacturers_from_json_file()
for manufacturer in self._manufacturers:
if manufacturer.id == id:
return manufacturer
raise ManufacturerNotFoundException(id)
def create_manufacturer(self, manufacturer: Manufacturer) -> None:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
self._manufacturers.append(manufacturer)
self._save_manufacturers_to_json_file()
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
def update_manufacturer(self, id: int, manufacturer: Manufacturer) -> None:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
pass
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
class ManufacturersRepositoryINIFile(ManufacturersRepository):
def __init__(self, file_name: str = "manufacturers.ini") -> None:
self._file_name = file_name
self._manufacturers = (
self._get_manufacturers_from_ini_file() if Path(file_name).is_file() else []
)
def _get_manufacturers_from_ini_file(self) -> List[Manufacturer]:
ini_parser = configparser.ConfigParser()
ini_parser.read(self._file_name)
manufacturer_list = []
for section in ini_parser.sections():
manufacturer_dictionary = {
option: ini_parser.get(section, option)
for option in ini_parser.options(section)
}
manufacturer_dictionary.update({"name": section})
manufacturer_list.append(Manufacturer(**manufacturer_dictionary))
return manufacturer_list
def _save_manufacturers_to_ini_file(self) -> None:
manufacturers_to_save = [p.dict() for p in self._manufacturers]
parser = configparser.ConfigParser()
for manufacturer in manufacturers_to_save:
parser.add_section(manufacturer["name"])
for k, v in manufacturer.items():
if v and k != "name":
parser.set(manufacturer["name"], k, str(v))
with open(self._file_name, "w") as configfile:
parser.write(configfile)
def get_manufacturer(self, id: int) -> Manufacturer:
self._get_manufacturers_from_ini_file()
for manufacturer in self._manufacturers:
if manufacturer.id == id:
return manufacturer
raise ManufacturerNotFoundException(id)
def create_manufacturer(self, manufacturer: Manufacturer) -> None:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
self._manufacturers.append(manufacturer)
self._save_manufacturers_to_ini_file()
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
def update_manufacturer(self, id: int, manufacturer: Manufacturer) -> None:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
pass
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
| pedrolp85/pydevice | app/repository/manufacturers/manufacturers.py | manufacturers.py | py | 5,094 | python | en | code | 0 | github-code | 13 |
38072288258 | from ROOT import *
def printClustersAndCells():
fin = TFile('diagnostics.root')
fclusters = open('clusters.txt', 'w')
fcells = open('cells.txt', 'w')
fclusters.write('chain name\t mean number of clusters \t mean Et \n')
fcells.write('chain name\t mean number of cells \t mean energy \n')
hists = [k.GetName() for k in fin.GetListOfKeys()]
for hist in hists:
if "clusters" in hist:
hNclusters = fin.Get('%s/nClusters' % hist)
hEt = fin.Get('%s/Et' % hist)
fclusters.write(hist + '\t' + str(hNclusters.GetMean()) + '\t' + str(hEt.GetMean()) + '\n')
if "cells" in hist:
hNcells = fin.Get('%s/nCells' % hist)
hEnergy = fin.Get('%s/Energy' % hist)
fcells.write(hist + '\t' + str(hNcells.GetMean()) + '\t' + str(hEnergy.GetMean()) + '\n')
fin.Close()
fclusters.close()
fcells.close()
printClustersAndCells()
| rushioda/PIXELVALID_athena | athena/Trigger/TrigValidation/TrigJetValidation/python/clustersAndCells.py | clustersAndCells.py | py | 887 | python | en | code | 1 | github-code | 13 |
32951459002 | from user import User
BROWSERS = ('chrome', 'safari', 'firefox')
class Crawler:
def __init__(self, method=None, usr=None, pwd=""):
'''
:param method - method which app will crawl data
'''
self.method = method
self.usr = User(usr=usr, pwd=pwd)
self.limit = None
self.headless = False
self._browser = None
@property
def crawler_method(self):
return self.method
@crawler_method.setter
def crawler_method(self, method):
if method == 'bs4':
self.method = method
elif method == 'selenium':
self.method = method
else:
self.method = None
@property
def username(self):
return self.usr.usr
@username.setter
def username(self, usr):
if len(usr) > 0:
self.usr.usr = usr
@property
def password(self):
return self.usr.pwd
@password.setter
def password(self, pwd):
self.usr.pwd = pwd
@property
def limit_mode(self):
return self.limit
@limit_mode.setter
def limit_mode(self, limit):
try:
self.limit = int(limit)
except ValueError:
self.limit = None
@property
def browser(self):
return self._browser
@browser.setter
def browser(self, browser):
if browser in BROWSERS:
self._browser = browser
else:
self._browser = None
@property
def headless_mode(self):
return self.headless
@headless_mode.setter
def headless_mode(self, flag):
if type(flag) == str:
if flag == 'y':
self.headless = True
elif flag == 'n':
self.headless = False
else:
self.headless = None
elif type(flag) == int:
if int(flag) == 0 or int(flag) == 1:
if int(flag) == 0:
self.headless = False
else:
self.headless = True
else:
self.headless = None
def crawl(self):
'''
:return: dictionary with user's information
'''
if self.crawler_method == 'bs4':
return self.crawl_with_bs4()
if self.crawler_method == 'selenium':
return self.crawl_with_selenium()
def crawl_with_bs4(self):
'''
:return: dictionary with user's information parsed/crawled by BeautifulSoup
'''
from Crawler.Method.bs4_crawler import BS4
bs4 = BS4(self.username)
if bs4.status != 200:
return None, f'Username {self.username} is not found'
result = bs4.fetch_data()
return {
'method': self.crawler_method,
'username': self.username,
'posts': result['posts'],
'followers': result['followers'],
'following': result['following'],
'biography': '',
'post': ''
}, ''
def crawl_with_selenium(self):
'''
:return: dictionary with user's information parsed/crawled by Selenium
'''
# Import needed Selenium class
from Crawler.Method.selenium_crawler import SeleniumCrawler
selenium = SeleniumCrawler(self.username, headless=self.headless)
if selenium.status != 200:
return None, f'Username {self.username} is not found.'
if len(self.password) > 0:
selenium.logged_in, response = selenium.log_in(self.username, self.password)
if selenium.logged_in is False and len(response) > 0:
return None, response['response']
if self.limit is None:
self.limit = int(selenium.get_posts())
return {
'method': self.crawler_method,
'username': self.username,
'posts': selenium.get_posts(),
'followers': selenium.get_followers(),
'following': selenium.get_following(),
'biography': selenium.get_account_biography(),
'post': selenium.get_all_posts(self.limit)
}, ''
| thecoldstone/Instagram-api | Crawler/crawler.py | crawler.py | py | 4,160 | python | en | code | 0 | github-code | 13 |
43114265552 | import sys, heapq
input = sys.stdin.readline
INF = int(1e9)
n, m, k, x = map(int, input().split())
graph = [[] for _ in range(n+1)]
distance = [INF for _ in range(n+1)]
for _ in range(m):
a, b = map(int, input().split())
graph[a].append((b, 1)) # graph : (노드, 거리)
def dijkstra(start):
q = []
heapq.heappush(q, (0, start)) # heapq : (거리, 노드)
distance[start] = 0
while q:
dist, now = heapq.heappop(q)
# 방문한 적이 있으면 skip
if distance[now] < dist:
continue
# 인접노드 탐색
for v in graph[now]:
cost = dist + v[1]
# 현재 노드를 거쳐가는 거리가 기존 거리보다 짧으면 갱신
if cost < distance[v[0]]:
distance[v[0]] = cost
# 갱신된 정보 힙큐에 저장
heapq.heappush(q, (cost, v[0]))
dijkstra(x)
# print(distance)
is_possible = False
for i in range(1, len(distance)):
if distance[i] == k:
is_possible = True
print(i)
if not is_possible:
print(-1)
# BFS로도 풀 수 있음! | jinhyungrhee/Problem-Solving | BOJ/BOJ_18352_특정거리의도시찾기.py | BOJ_18352_특정거리의도시찾기.py | py | 1,034 | python | ko | code | 0 | github-code | 13 |
1690124537 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
font = FontProperties()
font.set_family('serif')
font.set_name('Times')
plt.yticks(fontname="Times", fontsize = "15")
plt.xticks(fontname="Times", fontsize = "15")
plt.legend(prop={'family': 'Times'})
plt.plot([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13 ,14, 15, 16], [-54, -50.4, -45.9, -42.8, -44.8, -43.9, -45.3, -44.9, -49.8, -50.8, -50.3, -50.1, -49.6, -48.2, -49.8, -48.3, -46.1], label = "Tag 1", marker='o', linewidth=2, color = "#0066ff")
plt.plot([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13 ,14, 15, 16], [-50, -52, -52.1, -48.5, -47.9, -47.5, -47, -47.5, -46.9, -47.4, -46.8, -46.2, -44.1, -42.9, -46.3, -50.1, -52.6], label='Tag 2', marker='o', linewidth=2, color = "#ff0066")
plt.axis([-0.5, 16.5, -60, -40])
plt.grid(b=True, which='major', color='#666666', linestyle=':')
plt.ylabel('RSSI (dBm)', fontfamily = "Times", fontsize = "18")
plt.xlabel('Finger touch location (cm)', fontfamily = "Times", fontsize = "18")
plt.legend()
plt.show()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| farzan-dehbashi/toolkit | mtplotlib_lines/rfid_vs_witag.py | rfid_vs_witag.py | py | 1,270 | python | en | code | 5 | github-code | 13 |
19407352041 | import dataclasses
import importlib
import inspect
import logging
from typing import Any, Dict, Iterable, List, Optional, Set
from pynguin.setup.testcluster import TestCluster
@dataclasses.dataclass(eq=True, frozen=True)
class DefiningClass:
"""A wrapper for a class definition."""
class_name: str = dataclasses.field(hash=True, compare=True)
class_obj: Any = dataclasses.field(hash=False, compare=False)
@dataclasses.dataclass
class MethodBinding:
"""A wrapper for a method definition."""
method_name: str
method_obj: Any
defining_classes: Set[DefiningClass]
signature: inspect.Signature
class DuckMockAnalysis:
"""Provides an analysis that collects all methods provided by classes."""
_logger = logging.getLogger(__name__)
def __init__(self, module_name: str) -> None:
self._module_name = module_name
self._method_bindings: Dict[str, MethodBinding] = {}
def analyse(self) -> None:
"""Do the analysis."""
def is_member(obj: object) -> bool:
return inspect.ismethod(obj) or inspect.isfunction(obj)
module = importlib.import_module(self._module_name)
for class_name, class_obj in inspect.getmembers(module, inspect.isclass):
defining_class = DefiningClass(class_name, class_obj)
for method_name, method_obj in inspect.getmembers(class_obj, is_member):
signature = inspect.signature(method_obj)
if method_name not in self._method_bindings:
method_binding = MethodBinding(
method_name=method_name,
method_obj=method_obj,
defining_classes={defining_class},
signature=signature,
)
self._method_bindings[method_name] = method_binding
else:
method_binding = self._method_bindings[method_name]
# TODO(sl) check signatures
method_binding.defining_classes.add(defining_class)
self._method_bindings[method_name] = method_binding
@property
def method_bindings(self) -> Dict[str, MethodBinding]:
"""Provides access to the method-bindings dictionary.
Returns:
The method-bindings dictionary
"""
return self._method_bindings
def get_classes_for_method(self, method_name: str) -> Optional[Set[DefiningClass]]:
"""Extracts all classes that provide a certain method.
If no class provides an appropriate method, `None` is returned.
Args:
method_name: the name of the method
Returns:
A set of defining classes, if any
"""
if method_name not in self._method_bindings:
return None
return self._method_bindings[method_name].defining_classes
def get_classes_for_methods(
self, method_names: Iterable[str]
) -> Optional[Set[DefiningClass]]:
"""Extracts all classes that provide a given selection of methods.
If no class provides all methods, `None` is returned.
Args:
method_names: the names of the methods as iterable
Returns:
A set of defining classes, if any
"""
defining_classes: List[Set[DefiningClass]] = []
for method_name in method_names:
defining_class = self.get_classes_for_method(method_name)
if defining_class is not None:
defining_classes.append(defining_class)
result = set.intersection(*defining_classes) if defining_classes else None
return result
def update_test_cluster(self, test_cluster: TestCluster) -> None:
"""
Args:
test_cluster:
Returns:
"""
| Abdur-rahmaanJ/pynguin | pynguin/analyses/duckmock/duckmockanalysis.py | duckmockanalysis.py | py | 3,836 | python | en | code | null | github-code | 13 |
34346390782 | import asyncio
import mock
import pytest
from aioredis_cluster.pooler import Pooler
from aioredis_cluster.structs import Address
def create_pool_mock():
mocked = mock.NonCallableMock()
mocked.closed = False
mocked.wait_closed = mock.AsyncMock()
return mocked
async def test_ensure_pool__identical_address():
mocked_create_pool = mock.AsyncMock(
return_value=create_pool_mock(),
)
pooler = Pooler(mocked_create_pool)
result = await pooler.ensure_pool(Address("localhost", 1234))
assert result is mocked_create_pool.return_value
mocked_create_pool.assert_called_once_with(("localhost", 1234))
result2 = await pooler.ensure_pool(Address("localhost", 1234))
assert result2 is result
assert mocked_create_pool.call_count == 1
async def test_ensure_pool__multiple():
pools = [object(), object(), object()]
mocked_create_pool = mock.AsyncMock(side_effect=pools)
pooler = Pooler(mocked_create_pool)
result1 = await pooler.ensure_pool(Address("localhost", 1234))
result2 = await pooler.ensure_pool(Address("localhost", 4321))
result3 = await pooler.ensure_pool(Address("127.0.0.1", 1234))
assert result1 is pools[0]
assert result2 is pools[1]
assert result3 is pools[2]
assert mocked_create_pool.call_count == 3
mocked_create_pool.assert_has_calls(
[
mock.call(("localhost", 1234)),
mock.call(("localhost", 4321)),
mock.call(("127.0.0.1", 1234)),
]
)
async def test_ensure_pool__only_one(event_loop):
pools = {
("h1", 1): create_pool_mock(),
("h2", 2): create_pool_mock(),
}
pool_creation_fut = event_loop.create_future()
async def create_pool_se(addr):
nonlocal pool_creation_fut
await pool_creation_fut
return pools[addr]
mocked_create_pool = mock.AsyncMock(side_effect=create_pool_se)
pooler = Pooler(mocked_create_pool)
tasks = []
for i in range(10):
for addr in pools.keys():
task = event_loop.create_task(pooler.ensure_pool(Address(addr[0], addr[1])))
tasks.append(task)
pool_creation_fut.set_result(None)
results = await asyncio.gather(*tasks)
assert len(results) == 20
assert len([r for r in results if r is pools[("h1", 1)]]) == 10
assert len([r for r in results if r is pools[("h2", 2)]]) == 10
assert mocked_create_pool.call_count == 2
async def test_ensure_pool__error():
pools = [RuntimeError(), object()]
mocked_create_pool = mock.AsyncMock(side_effect=pools)
pooler = Pooler(mocked_create_pool)
addr = Address("localhost", 1234)
with pytest.raises(RuntimeError):
await pooler.ensure_pool(addr)
result = await pooler.ensure_pool(addr)
assert result is pools[1]
assert mocked_create_pool.call_count == 2
mocked_create_pool.assert_has_calls(
[
mock.call(("localhost", 1234)),
mock.call(("localhost", 1234)),
]
)
async def test_close__empty_pooler():
pooler = Pooler(mock.AsyncMock())
await pooler.close()
assert pooler.closed is True
async def test_close__with_pools(mocker):
addrs_pools = [
(Address("h1", 1), create_pool_mock()),
(Address("h2", 2), create_pool_mock()),
]
addrs = [p[0] for p in addrs_pools]
pools = [p[1] for p in addrs_pools]
mocked_create_pool = mock.AsyncMock(side_effect=pools)
pooler = Pooler(mocked_create_pool)
result1 = await pooler.ensure_pool(addrs[0])
result2 = await pooler.ensure_pool(addrs[1])
assert len(pooler._nodes) == 2
await pooler.close()
assert len(pooler._nodes) == 0
assert pooler.closed is True
result1.close.assert_called_once()
result2.close.assert_called_once()
result1.wait_closed.assert_called_once()
result2.wait_closed.assert_called_once()
async def test_reap_pools(mocker):
addrs_pools = [
(Address("h1", 1), create_pool_mock()),
(Address("h2", 2), create_pool_mock()),
]
addrs = [p[0] for p in addrs_pools]
pools = [p[1] for p in addrs_pools]
mocked_create_pool = mock.AsyncMock(side_effect=pools)
pooler = Pooler(mocked_create_pool, reap_frequency=-1)
# create pools
await pooler.ensure_pool(addrs[0])
await pooler.ensure_pool(addrs[1])
# try to reap pools
reaped = await pooler._reap_pools()
assert len(reaped) == 0
# touch only one pool
await pooler.ensure_pool(addrs[1])
reaped = await pooler._reap_pools()
assert len(reaped) == 1
assert reaped[0] is pools[0]
assert len(pooler._nodes) == 1
reaped = await pooler._reap_pools()
assert len(reaped) == 1
assert reaped[0] is pools[1]
assert len(pooler._nodes) == 0
async def test_reaper(mocker):
pooler = Pooler(mock.AsyncMock(), reap_frequency=0)
assert pooler._reap_calls == 0
# force two event loop cycle
# 1 - creation reaper task
# 2 - reaper one cycle
await asyncio.sleep(0)
await asyncio.sleep(0)
assert pooler._reap_calls == 1
await pooler.close()
assert pooler._reaper_task.cancelled() is True
async def test_add_pubsub_channel__no_addr():
pooler = Pooler(mock.AsyncMock(), reap_frequency=-1)
addr = Address("h1", 1234)
result = pooler.add_pubsub_channel(addr, b"channel", is_pattern=False)
assert result is False
async def test_add_pubsub_channel():
pooler = Pooler(mock.AsyncMock(return_value=create_pool_mock()), reap_frequency=-1)
addr1 = Address("h1", 1234)
addr2 = Address("h2", 1234)
pooler._pubsub_addrs[addr1] = set()
pooler._pubsub_addrs[addr2] = set()
result1 = pooler.add_pubsub_channel(addr1, b"ch1", is_pattern=False)
result2 = pooler.add_pubsub_channel(addr1, b"ch1", is_pattern=True)
result3 = pooler.add_pubsub_channel(addr1, b"ch2", is_pattern=False)
result4 = pooler.add_pubsub_channel(addr2, b"ch3", is_pattern=False)
result5 = pooler.add_pubsub_channel(addr1, b"ch3", is_pattern=False)
result6 = pooler.add_pubsub_channel(addr1, b"ch3", is_sharded=True)
result7 = pooler.add_pubsub_channel(addr1, b"ch3", is_sharded=True)
assert result1 is True
assert result2 is True
assert result3 is True
assert result4 is True
assert result5 is False
assert result6 is True
assert result7 is False
assert len(pooler._pubsub_addrs[addr1]) == 4
assert len(pooler._pubsub_addrs[addr2]) == 1
assert len(pooler._pubsub_channels) == 5
collected_channels = [(ch.name, ch.is_pattern) for ch in pooler._pubsub_channels]
assert (b"ch1", False) in collected_channels
assert (b"ch1", True) in collected_channels
assert (b"ch2", False) in collected_channels
assert (b"ch3", False) in collected_channels
async def test_remove_pubsub_channel__no_addr():
pooler = Pooler(mock.AsyncMock(), reap_frequency=-1)
result = pooler.remove_pubsub_channel(b"channel", is_pattern=False)
assert result is False
async def test_remove_pubsub_channel():
pooler = Pooler(mock.AsyncMock(), reap_frequency=-1)
addr1 = Address("h1", 1234)
addr2 = Address("h2", 1234)
pooler._pubsub_addrs[addr1] = set()
pooler._pubsub_addrs[addr2] = set()
pooler.add_pubsub_channel(addr1, b"ch1", is_pattern=False)
pooler.add_pubsub_channel(addr1, b"ch2", is_pattern=False)
pooler.add_pubsub_channel(addr2, b"ch3", is_pattern=True)
pooler.add_pubsub_channel(addr1, b"ch3", is_sharded=True)
result1 = pooler.remove_pubsub_channel(b"ch1", is_pattern=False)
result2 = pooler.remove_pubsub_channel(b"ch1", is_pattern=True)
result3 = pooler.remove_pubsub_channel(b"ch2", is_pattern=False)
result4 = pooler.remove_pubsub_channel(b"ch3", is_pattern=True)
result5 = pooler.remove_pubsub_channel(b"ch3", is_pattern=True)
result6 = pooler.remove_pubsub_channel(b"ch3", is_sharded=True)
assert result1 is True
assert result2 is False
assert result3 is True
assert result4 is True
assert result5 is False
assert result6 is True
assert len(pooler._pubsub_addrs[addr1]) == 0
assert len(pooler._pubsub_addrs[addr2]) == 0
assert len(pooler._pubsub_channels) == 0
async def test_get_pubsub_addr():
pooler = Pooler(mock.AsyncMock(), reap_frequency=-1)
addr1 = Address("h1", 1234)
addr2 = Address("h2", 1234)
pooler._pubsub_addrs[addr1] = set()
pooler._pubsub_addrs[addr2] = set()
pooler.add_pubsub_channel(addr1, b"ch1", is_pattern=False)
pooler.add_pubsub_channel(addr2, b"ch2", is_pattern=True)
result1 = pooler.get_pubsub_addr(b"ch1", is_pattern=False)
result2 = pooler.get_pubsub_addr(b"ch1", is_pattern=True)
result3 = pooler.get_pubsub_addr(b"ch2", is_pattern=False)
result4 = pooler.get_pubsub_addr(b"ch2", is_pattern=True)
assert result1 == addr1
assert result2 is None
assert result3 is None
assert result4 == addr2
async def test_ensure_pool__create_pubsub_addr_set():
addr1 = Address("h1", 1234)
addr2 = Address("h2", 1234)
pooler = Pooler(mock.AsyncMock(return_value=create_pool_mock()))
assert len(pooler._pubsub_addrs) == 0
await pooler.ensure_pool(addr1)
await pooler.ensure_pool(addr2)
await pooler.ensure_pool(addr2)
assert len(pooler._pubsub_addrs) == 2
assert addr1 in pooler._pubsub_addrs
assert addr2 in pooler._pubsub_addrs
assert len(pooler._pubsub_addrs[addr1]) == 0
pooler._pubsub_addrs[addr1].add(object())
await pooler.ensure_pool(addr1)
assert len(pooler._pubsub_addrs[addr1]) == 1
async def test_reap_pools__cleanup_channels():
pooler = Pooler(mock.AsyncMock(), reap_frequency=-1)
addr1 = Address("h1", 1)
addr2 = Address("h2", 2)
# create pools
await pooler.ensure_pool(addr1)
await pooler.ensure_pool(addr2)
pooler.add_pubsub_channel(addr1, b"ch1")
pooler.add_pubsub_channel(addr2, b"ch2")
# try to reap pools
reaped = await pooler._reap_pools()
assert len(reaped) == 0
reaped = await pooler._reap_pools()
assert len(reaped) == 2
assert len(pooler._pubsub_addrs) == 0
assert len(pooler._pubsub_channels) == 0
async def test_close_only():
pool1 = create_pool_mock()
pool2 = create_pool_mock()
pool3 = create_pool_mock()
mocked_create_pool = mock.AsyncMock(side_effect=[pool1, pool2, pool3])
pooler = Pooler(mocked_create_pool)
addr1 = Address("h1", 1)
addr2 = Address("h2", 2)
result_pool1 = await pooler.ensure_pool(addr1)
await pooler.ensure_pool(addr2)
result_pool2 = await pooler.ensure_pool(addr2)
assert result_pool1 is pool1
assert result_pool2 is pool2
assert mocked_create_pool.call_count == 2
await pooler.close_only([addr2])
pool2.close.assert_called_once_with()
pool2.wait_closed.assert_called_once_with()
result_pool3 = await pooler.ensure_pool(addr2)
assert result_pool3 is pool3
result_pool1 = await pooler.ensure_pool(addr1)
assert result_pool1 is pool1
| DriverX/aioredis-cluster | tests/unit_tests/aioredis_cluster/test_pooler.py | test_pooler.py | py | 11,058 | python | en | code | 24 | github-code | 13 |
15569860019 | from argparse import ArgumentParser
import logging
import os
import sys
from io_utils.io_utils import load_text_corpus, save_speech_corpus
from synthesis_utils.synthesis_utils import create_sounds
asr_dataset_logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser()
parser.add_argument('-d', '--dataset', dest='dataset_name', type=str, required=True,
help='The target directory with dataset which will be built.')
parser.add_argument('-l', '--list', dest='text_list', type=str, required=True,
help='The file with text list for speech synthesis.')
parser.add_argument('-p', '--paraphrases', dest='paraphrases', type=int, required=False, default=0,
help='The paraphrases number.')
args = parser.parse_args()
dataset_path = os.path.normpath(args.dataset_name)
dataset_parent_dir = os.path.dirname(dataset_path)
dataset_name = os.path.basename(dataset_path)
if len(dataset_name) == 0:
err_msg = f'The dataset path "{dataset_path}" is wrong!'
asr_dataset_logger.error(err_msg)
raise IOError(err_msg)
if len(dataset_parent_dir) > 0:
if not os.path.isdir(dataset_parent_dir):
err_msg = f'The directory "{dataset_path}" does not exist!'
asr_dataset_logger.error(err_msg)
raise IOError(err_msg)
if not os.path.isdir(dataset_path):
os.mkdir(dataset_path)
text_corpus_fname = os.path.normpath(args.text_list)
if not os.path.isfile(text_corpus_fname):
err_msg = f'The file "{text_corpus_fname}" does not exist!'
asr_dataset_logger.error(err_msg)
raise IOError(err_msg)
try:
text_corpus = load_text_corpus(text_corpus_fname)
except BaseException as ex:
err_msg = str(ex)
asr_dataset_logger.error(err_msg)
raise
info_msg = f'The text corpus is loaded from the "{text_corpus_fname}". There are {len(text_corpus)} texts.'
asr_dataset_logger.info(info_msg)
number_of_paraphrases = args.paraphrases
if number_of_paraphrases < 0:
err_msg = f'The paraphrases number is wrong! Expected a non-negative integer, got {number_of_paraphrases}.'
asr_dataset_logger.error(err_msg)
raise IOError(err_msg)
try:
speech_corpus = create_sounds(texts=text_corpus, variants_of_paraphrasing=number_of_paraphrases)
except BaseException as ex:
err_msg = str(ex)
asr_dataset_logger.error(err_msg)
raise
info_msg = f'There are {len(speech_corpus)} samples in the created speech corpus.'
asr_dataset_logger.info(info_msg)
try:
save_speech_corpus(dataset_path, speech_corpus)
except BaseException as ex:
err_msg = str(ex)
asr_dataset_logger.error(err_msg)
raise
if __name__ == '__main__':
asr_dataset_logger.setLevel(logging.INFO)
fmt_str = '%(filename)s[LINE:%(lineno)d]# %(levelname)-8s ' \
'[%(asctime)s] %(message)s'
formatter = logging.Formatter(fmt_str)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
asr_dataset_logger.addHandler(stdout_handler)
file_handler = logging.FileHandler('asr_dataset.log')
file_handler.setFormatter(formatter)
asr_dataset_logger.addHandler(file_handler)
main()
| bond005/speechmonger | asr_dataset.py | asr_dataset.py | py | 3,373 | python | en | code | 0 | github-code | 13 |
31625655034 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 17:46:36 2016
@author: ajaver
"""
import pandas as pd
import os
import tables
import numpy as np
import matplotlib.pylab as plt
from collections import OrderedDict
import sys
sys.path.append('/Users/ajaver/Documents/GitHub/Multiworm_Tracking')
from MWTracker.helperFunctions.timeCounterStr import timeCounterStr
if __name__ == '__main__':
#base directory
#masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch5_17112015_205616.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch3_17112015_205616.hdf5'
masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch1_18112015_075624.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/04-03-11/MaskedVideos/575 JU440 swimming_2011_03_04__13_16_37__8.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/04-03-11/MaskedVideos/575 JU440 on food Rz_2011_03_04__12_55_53__7.hdf5'
skeletons_file = masked_image_file.replace('MaskedVideos', 'Results1')[:-5] + '_skeletons.hdf5'
intensities_file = skeletons_file.replace('_skeletons', '_intensities')
min_block_size = 1
#get the trajectories table
with pd.HDFStore(skeletons_file, 'r') as fid:
trajectories_data = fid['/trajectories_data']
#at this point the int_map_id with the intensity maps indexes must exist in the table
assert 'int_map_id' in trajectories_data
trajectories_data = trajectories_data[trajectories_data['int_map_id']>0]
grouped_trajectories = trajectories_data.groupby('worm_index_joined')
tot_worms = len(grouped_trajectories)
base_name = skeletons_file.rpartition('.')[0].rpartition(os.sep)[-1].rpartition('_')[0]
progress_timer = timeCounterStr('');
#%%
with tables.File(intensities_file, 'r') as fid:
resampling_length = fid.get_node('/straighten_worm_intensity_median').shape[1]
#%%
all_worm_profiles = np.zeros((tot_worms, resampling_length));
all_worm_profiles_N = np.zeros(tot_worms);
worm_index_ranges = OrderedDict()
for index_n, (worm_index, trajectories_worm) in enumerate(grouped_trajectories):
if index_n % 10 == 0:
dd = " Getting median intensity profiles. Worm %i of %i." % (index_n+1, tot_worms)
dd = base_name + dd + ' Total time:' + progress_timer.getTimeStr()
print(dd)
int_map_id = trajectories_worm['int_map_id'].values
int_skeleton_id = trajectories_worm['skeleton_id'].values
#read the worm intensity profiles
with tables.File(intensities_file, 'r') as fid:
worm_int_profile = fid.get_node('/straighten_worm_intensity_median')[int_map_id,:]
#%%
#normalize intensities of each individual profile
frame_med_int = np.median(worm_int_profile, axis=1);
worm_int_profile = worm_int_profile - frame_med_int[:, np.newaxis]
#worm median intensity
median_profile = np.median(worm_int_profile, axis=0).astype(np.float)
all_worm_profiles[index_n, :] = median_profile
all_worm_profiles_N[index_n] = len(int_map_id)
worm_index_ranges[worm_index] = {'skel_group' : (np.min(int_skeleton_id),np.max(int_skeleton_id)),
'int_group' : (np.min(int_map_id),np.max(int_map_id))}
#%%
average_profile = np.sum(all_worm_profiles*all_worm_profiles_N[:,np.newaxis], axis=0)/np.sum(all_worm_profiles_N)
#diff_ori = np.sum(np.abs(all_worm_profiles-average_profile), axis=1)
#diff_inv = np.sum(np.abs(all_worm_profiles-average_profile[::-1]), axis=1) | ver228/work-in-progress | work_in_progress/_old/worm_orientation/correctHeadTailIntensity_global.py | correctHeadTailIntensity_global.py | py | 3,755 | python | en | code | 0 | github-code | 13 |
32802828433 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 27/03/2019 12:28 AM
# @Author : Pengfei Xiao
# @FileName: harvester_manager.py
# @Software: PyCharm
"""This file is used to manage restful harvester that crawling mention and reply to the politicians."""
import pandas as pd
import time
import sys
sys.path.append('..')
from restful_replies import RestfulReplies
from restful_by_mentioned import RestfulByMentioned
import gc
gc.enable()
if __name__ == '__main__':
start_time = time.time()
temp_df = pd.read_csv('../data/full_politician_list.csv', usecols=['Screen_Name'])
politician_list = temp_df['Screen_Name'].dropna().tolist()
print("Start restful crawling.")
for screen_name in politician_list:
print('============================================')
print('Process: {}/{}'.format(politician_list.index(screen_name) + 1, len(politician_list)))
restful_mentioned = RestfulByMentioned(screen_name, 'capstone', 'streamingMentionedCorrectDate')
restful_replies = RestfulReplies(screen_name, 'capstone', 'streamingMentionedCorrectDate')
print("Crawling tweets mentioned {}.".format(screen_name))
restful_mentioned.start()
print("Crawling replies to {}.".format(screen_name))
restful_replies.start()
restful_replies.join()
restful_mentioned.join()
print('Finished. Time used: %f mins' % ((time.time() - start_time) / 60))
gc.collect()
| pengfei123xiao/Political_Analysis | restful_harvester/harvester_manager.py | harvester_manager.py | py | 1,458 | python | en | code | 1 | github-code | 13 |
37694241147 | import threading
from typing import Optional
from loguru import logger
from pwnlib.util.cyclic import cyclic, cyclic_find
from pypwn.core.abstract.module import AbstractModule
from pypwn.core.abstract.process import AbstractProcess
from pypwn.core.protocols import IDebuggable, ITarget
class FindOffset(AbstractModule):
thread_timeout = 5
class __TargetType(ITarget, IDebuggable): ...
@classmethod
def execute(cls, target: __TargetType, max_offset: int, process: AbstractProcess, *args, **kwargs) -> Optional:
r = []
def _signal_handler(event):
logger.success(f'Stop signal caught')
if event.stop_signal == 'SIGSEGV':
logger.info('Received SIGSEGV')
else:
logger.warning(f'Received another signal: {event.stop_signal}')
result = debugger.read_value('$rsp', to_string=True)
value = int(result.split('\t')[1].strip(), base=16)
offset = cyclic_find(value)
if offset != -1:
logger.success(f'Found offset: {hex(offset)}')
r.append(offset)
sighandler_done.set()
logger.info(f"Running {cls.__name__} module")
logger.info('Looking for the offset')
logger.info('Starting up the debugger...')
with target.debugger() as debugger:
sighandler_done = threading.Event()
logger.info(f'Setting up signal callback')
debugger.api().events.stop.connect(_signal_handler)
debugger.resume()
logger.info(f'Generating payload, size={hex(max_offset)}')
payload = cyclic(max_offset)
logger.info(f'Running main...')
process(payload)
if not sighandler_done.wait(timeout=cls.thread_timeout):
logger.critical("Failed to trigger overflow")
logger.info("Cleaning up gdb")
debugger.resume()
if sighandler_done.is_set():
return r.pop()
else:
return None
| lim8en1/pypwn | src/pypwn/modules/find_offset.py | find_offset.py | py | 2,044 | python | en | code | 0 | github-code | 13 |
43771204006 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wzw', '0006_remove_group_token'),
]
operations = [
migrations.AddField(
model_name='group',
name='token',
field=models.CharField(default=-2468, max_length=19, editable=False),
preserve_default=False,
),
]
| Steh/WZW | wzw_projekt/wzw/migrations/0007_group_token.py | 0007_group_token.py | py | 460 | python | en | code | 0 | github-code | 13 |
14219905040 | def best_sum(n , array , memo={}) :
if n in memo:
return memo[n]
if n == 0 :
return []
if n < 0 :
return None
shortestResult = None
for a in array:
remainder = n - a
result = best_sum(remainder , array, memo)
if result != None:
newResult = result + [a]
if shortestResult == None or len(newResult)<len(shortestResult):
shortestResult = newResult
memo[n] = shortestResult
return memo[n]
print(best_sum(100 , [5,2,1])) | tarekichalalen2002/dynamic-programmming | memoization/best-sum.py | best-sum.py | py | 533 | python | en | code | 3 | github-code | 13 |
28255335119 | import matplotlib.pyplot as plt
import math
import numpy as np
import seaborn as sb
import pandas as pd
# EFFECT OF ISOTHERM
def IsothermResult(x,y):
reg = np.polyfit(x,y, deg=1) # out: array(slope, intercept)
# r_square
correlation_mat = np.corrcoef(x,y)
r = correlation_mat[0,1]
Isorsquare = format(r**2,".3f")
IsoSlope = format(reg[0],".3f")
IsoIntercept = format(reg[1],".3f")
return([IsoSlope,IsoIntercept,Isorsquare])
def KineticsResult(x,y):
reg = np.polyfit(x,y, deg=1) # out: array(slope, intercept)
# r_square
correlation_mat = np.corrcoef(x,y)
r = correlation_mat[0,1]
KinSquare = format(r**2,".3f")
KinSlope = format(reg[0],".3f")
KinIntercept = format(reg[1],".3f")
return([KinSlope,KinIntercept,KinSquare])
def ThermoResult(x,y):
reg = np.polyfit(x,y, deg=1) # out: array(slope, intercept)
# r_square
correlation_mat = np.corrcoef(x, y)
r = correlation_mat[0, 1]
ThermoSquare = format(r ** 2, ".3f")
ThermoSlope = format(reg[0], ".3f")
ThermoIntercept = format(reg[1], ".3f")
return ([ThermoSlope, ThermoIntercept, ThermoSquare])
# Linear regression plot
def plot(x,y):
reg = np.polyfit(x,y, deg=1) # out: array(slope, intercept)
trend = np.polyval(reg, x)
plt.scatter(x, y)
plt.plot(x, trend, 'r')
plt.show()
# EFFECT OF CONCENTRATION
def Isotherms():
file = pd.ExcelFile('sample.xlsx')
df1 = file.parse('Isotherms')
ci = df1['Ci(mg/L)'].values # ci is initial concentration of dye
b_max = df1['λmax(B)'].values # b_max is the value before Adsorption
a_max = df1['λmax(A)'].values # a_max is the value after Adsorption
'''list = []
print(ci)
print(b_max)
print(a_max)'''
ph = float(input("Enter the pH of the solution, pH: ")) # pH of the solution
m = float(input("Enter the adsorbent dose in grams, m: ")) # mass or dose of the adsorbent(g)
temp = float(input("Enter the temperature in Kelvin, T: ")) # temperature of the reaction in K
ce = (a_max / b_max) * ci # ce-Equilibrium concentration (Ce),
cr = ci - ce # concentration removed
qe = (cr / m) # qe-quantity removed
re = (((ci - ce) / ci) * 100) # Removal efficiency
logce = np.log10(ce) # Freundlich: x-axis 'logce'
logqe = np.log10(qe) # Freundlich: y-axis 'logqe'
cebyqe = (ce / qe) # Langmuir: x-axis 'ce', y-axis 'ce/qe'
log_cebyqe = np.log10(cebyqe) # Redlich-Peterson: y-axis 'log(ce/qe)'
lnce = np.log(ce) # Tempkin: x-axis 'lnce'
'''print(ce)
print(re)
print(logce)
print(qe)
print(logqe)
print(cebyqe)
print(log_cebyqe)
print(lnce)'''
# ISOTHERM TABLE: Data for Freundlich, Langmuir, Redlich-Peterson & Tempkin
print("----------------------------------------------------------------------------------------------------------------")
print(format("ISOTHERM MODEL:" ,'<16') + format("pH:",'>8'), ph,"" + format("Temperature:",'>16'), temp, "K" + format("Adsorbent Dose:",'>26'), m,"mg/L")
print("----------------------------------------------------------------------------------------------------------------")
print(
"S.No" + format("Ci(mg/L)",'>11') + format("%Removal",'>12') +format("Ce(mg/L)",'>12')+ format("Qe(mg/g)",'>12') +format("Ce/Qe",'>12') + format("log(Ce)",'>12') +format("log(Qe)" ,'>12')+format("log(Ce/Qe)",'>12') +format("ln(Ce)",'>12'))
print("----------------------------------------------------------------------------------------------------------------")
x = 0
for x in range(len(ce)):
print("%s%s%s%s%s%s%s%s%s%s" % (
format((x + 1),'^4'), format(ci[x],'>10'), format(format(re[x],".3f"),'>12'), format(format(ce[x],".3f"),'>12'), format(format(qe[x],".3f"),'>12'), format(format(cebyqe[x],".3f"),'>12'),
format(format(logce[x],".3f"),'>12'), format(format(logqe[x],".3f"),'>12'), format(format(log_cebyqe[x],".3f"),'>12'), format(format(lnce[x],".3f"),'>12')))
print("----------------------------------------------------------------------------------------------------------------")
# ISOTHERM RESULTS: Freundlich, Langmuir, Redlich-Peterson & Tempkin
print("--------------------------------------------------------")
print(format("ISOTHERM MODEL:",'<18') + format("SLOPE",'>12') + format("INTERCEPT",'>14') + format("R-SQUARE",'>12'))
print("--------------------------------------------------------")
x = 0
if x == 0:
result = IsothermResult(logce, logqe)
print(format("FREUNDLICH",'<18') + "%s%s%s" % (format(result[0],'>12'), format(result[1],'>12'), format(result[2],'>12')))
x = x + 1
if x == 1:
result = IsothermResult(ce, cebyqe)
print(format("LANGMUIR",'<18') + "%s%s%s" % (format(result[0],'>12'), format(result[1],'>12'), format(result[2],'>12')))
x = x + 1
if x == 2:
result = IsothermResult(lnce, qe)
print(format("TEMPKIN",'<18') +"%s%s%s" % (format(result[0],'>12'), format(result[1],'>12'), format(result[2],'>12')))
x = x + 1
if x == 3:
result = IsothermResult(logce, log_cebyqe)
print(format("REDLICH-PETERSON",'<18') + "%s%s%s" % (format(result[0],'>12'), format(result[1],'>12'), format(result[2],'>12')))
print("--------------------------------------------------------")
#Isotherm plots
for i in range(4):
if i==0:
dict = {"TITLE": 'FREUNDLICH ISOTHERM PLOT', "XLABLE": 'log Ce', "YLABLE": 'log Qe'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(logce, logqe)
if i==1:
dict = {"TITLE": 'LANGMUIR ISOTHERM PLOT', "XLABLE": 'Ce', "YLABLE": 'log Ce/Qe'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(ce, cebyqe)
if i==2:
dict = {"TITLE": 'TEMPKIN ISOTHERM PLOT', "XLABLE": 'ln Ce', "YLABLE": 'Qe'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(lnce, qe)
if i==3:
dict = {"TITLE": 'REDLICH-PETERSON ISOTHERM PLOT', "XLABLE": 'log Ce', "YLABLE": 'log Ce/Qe'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(logce, log_cebyqe)
# EFFECT OF TIME
def Kinetics():
file = pd.ExcelFile('sample.xlsx')
df1 = file.parse('Kinetics')
time = df1['Time'].values
b_max = df1['λmax(B)'].values # b_max is the value before Adsorption
a_max = df1['λmax(A)'].values # a_max is the value after Adsorption
b_max = b_max[0]
list_time = []
list_amax = []
#print(time)
#print(a_max)
#print(b_max)
#print(type(b_max))
#ph = float(input("Enter the pH of the solution, pH: ")) # pH of the solution
#mass = float(input("Enter the adsorbent dose in grams, m: ")) # mass or dose of the adsorbent(g)
#temp = float(input("Enter the temperature in Kelvin, T: ")) # temperature of the reaction in K
i = 0
while (a_max[i]!=a_max[i+1]):
list_time.append(time[i])
list_amax.append(a_max[i])
i = i + 1
list_time.append(time[i])
list_amax.append(a_max[i])
time = np.array(list_time)
a_max = np.array(list_amax)
'''
print(time)
print(a_max)
'''
ph = float(input("Enter the pH of the solution, pH: ")) # pH of the solution
mass = float(input("Enter the adsorbent dose in grams, m: ")) # mass or dose of the adsorbent(g)
temp = float(input("Enter the Temperature in K: ")) # mass or dose of the adsorbent(g)
ci = float(input("Enter the initial concentration of the solution in mg/L, c: ")) # concentration of the solution in mg/L
#b_max = float(input("Enter the absorption value before adsorption, λmax(B): ")) # b_max is the value after Adsorption
rem_eff = np.around((((b_max - a_max) / b_max) * 100),4) # Removal efficiency
ce = ((a_max/b_max)*ci) # ce-Equilibrium concentration (Ce)
cr = (ci - ce) # concentration removed
qe = (cr / mass) # qe-quantity removed
logqe = np.log10(qe)
qe_qt = (qe[-1]-qe)
logqe_qt = []
i = 0
while qe_qt[i] != 0:
c = math.log10(qe_qt[i])
logqe_qt.append(format(c,'.3f'))
i += 1
for i in range(len(time)):
if len(logqe_qt)!=len(time):
logqe_qt.append('inf')
tbyqt = (time/qe)
log_t = np.log10(time)
ln_t = np.log(time)
root_t = np.sqrt(time)
'''
print(rem_eff)
print(ce)
print(cr)
print(qe)
print(logqe)
print(qe_qt)
print(logqe_qt)
print(tbyqt)
print(log_t)
print(ln_t)
print(root_t) '''
print("----------------------------------------------------------------------------------------------------------------------------------")
print(format("KINETICS MODEL:",'<16') +format("pH:",'>8'), ph, "" + format("Temperature:",'>16'), temp, "K" + format("Adsorbent Dose:",'>18'),
mass, "mg/L" + format("Conc.:",'>12'), ci, "mg/L" + format("λmax(B):",'>12'), b_max)
print("----------------------------------------------------------------------------------------------------------------------------------")
print(
"S.No" + format("time(min)",'>12') + format("%Removal",'>12') + format("Ce(mg/L)",'>12') + format("Qe(mg/g)",'>12') +format("log(Qe-Qt)",'>15') + format("log(Qe)",'>12') +format("t/Qt",'>10') +format("log t",'>12') +format("ln t",'>10') +format("sq.root t",'>16'))
print("----------------------------------------------------------------------------------------------------------------------------------")
x = 0
for x in range(len(qe_qt)):
print("%s%s%s%s%s%s%s%s%s%s%s" % (
format((x + 1), '^4'), format(time[x], '>9'), format(format(rem_eff[x], ".3f"), '>14'),
format(format(ce[x], ".3f"), '>12'), format(format(qe[x], ".3f"), '>12'),
format(logqe_qt[x], '>14'),
format(format(logqe[x], ".3f"), '>13'), format(format(tbyqt[x], ".3f"), '>12'),
format(format(log_t[x], ".3f"), '>12'), format(format(ln_t[x], ".3f"), '>10'),
format(format(root_t[x], ".3f"), '>15')))
x = x + 1
print("----------------------------------------------------------------------------------------------------------------------------------")
# KINETICS TABLE: RESULT for Legargren, Second order, Elovich & IPD
i = 0
time_1 = time[:-1]
qe_qt_1 = qe_qt[:-1]
'''while (qe_qt[i] != 0):
time_1 = np.append(time_1,time[i])
qe_qt_1 = np.append(qe_qt_1,qe_qt[i])
i = i + 1'''
logqe_qt_1 = np.log10(qe_qt_1)
print("-----------------------------------------------------------------")
print(format("KINETICS MODEL:",'<25') +format("SLOPE",'>8') +format("INTERCEPT",'>16') + format("R-SQUARE",'>16'))
print("-----------------------------------------------------------------")
x = 0
if x == 0:
#print(time_1)
#print(qe_qt_1)
#print(logqe_qt_1)
result = KineticsResult(time_1,logqe_qt_1)
print(format("LEGARGREN FIRST ORDER","<25") + "%s%s%s" % (format(result[0],">8"), format(result[1],">14"), format(result[2],">16")))
x = x + 1
if x == 1:
result = IsothermResult(time, tbyqt)
print(format("PSEUDO SECOND ORDER","<25") + "%s%s%s" % (format(result[0],">8"), format(result[1],">14"), format(result[2],">16")))
x = x + 1
if x == 2:
result = IsothermResult(log_t,qe)
print(format("ELOVICH KINETICS","<25") + "%s%s%s" % (format(result[0],">8"), format(result[1],">14"), format(result[2],">16")))
x = x + 1
if x == 3:
result = IsothermResult(root_t, qe)
print(format("INTRA PARTICLE DIFFUSION","<25") + "%s%s%s" % (format(result[0],">8"), format(result[1],">14"), format(result[2],">16")))
print("-----------------------------------------------------------------")
# Isotherm plots
for i in range(4):
if i == 0:
dict = {"TITLE": 'LEGARGREN PSEUDO FIRST ORDER PLOT', "XLABLE": 'Time', "YLABLE": 'log Qe-Qt'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(time_1, logqe_qt_1)
if i == 1:
dict = {"TITLE": 'PSEUDO SECOND ORDER PLOT', "XLABLE": 'Time', "YLABLE": 't/Qt'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(time, tbyqt)
if i == 2:
dict = {"TITLE": 'ELOVICH KINETICS PLOT', "XLABLE": 'ln t', "YLABLE": 'Qt'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(ln_t, qe)
if i == 3:
dict = {"TITLE": 'INTRA-PARTICLE DIFFUSION PLOT', "XLABLE": 'Square root(t)', "YLABLE": 'Qt'}
plt.title(dict['TITLE'], fontsize=14)
#plt.legend('LEGEND', fontsize=12)
plt.xlabel(dict['XLABLE'], fontsize=12)
plt.ylabel(dict['YLABLE'], fontsize=12)
plot(root_t, qe)
# EFFECT OF TEMPERATURE (THERMODYNAMICS)
def Thermodynamics():
file = pd.ExcelFile('sample.xlsx')
df1 = file.parse('Thermodynamics')
temp = df1['Temp(K)'].values # Temperature in Kelvin
b_max = df1['λmax(B)'].values # b_max is the value before Adsorption
a_max = df1['λmax(A)'].values # a_max is the value after Adsorption
#print(temp)
#print(b_max)
#print(a_max)
ph = float(input("Enter the pH of the solution, pH: ")) # pH of the solution
m = float(input("Enter the adsorbent dose in grams, m: ")) # mass or dose of the adsorbent(g) per 1000 mL of solution
ci = float(input("Enter the initial concentration of the solution in mg/L, c: ")) # concentration of the solution in mg/L
#b_max = float(input("Enter the absorption value before adsorption, λmax(B): ")) # b_max is the value after Adsorption
rem_eff = np.around((((b_max - a_max) / b_max) * 100), 4) # Removal efficiency
ce = ((a_max / b_max) * ci) # ce-Equilibrium concentration (Ce)
cr = ci - ce # concentration removed
qe = (cr / m) # qe-quantity removed
re = (((ci - ce) / ci) * 100) # Removal efficiency
inv_temp = 1/temp # inverse of temperature (X-axis)
lnqembyce = np.log((qe/ce)*m) # Y-axis
delta_g_k = (-8.314 / 1000) * temp * lnqembyce # from K_L values, unit - kJ/K/mol
'''print(qe)
print(inv_temp)
print(lnqembyce)
#print(delta_g_k)
print(type(qe))
print(type(inv_temp))
print(type(lnqembyce))'''
result = ThermoResult(inv_temp, lnqembyce)
#print(result)
slop = float(result[0])
incpt = float(result[1])
'''print(slop)
print(incpt)
print(type(slop))
print(type(incpt))'''
delta_h = (-8.314 * slop)/1000 # unit - kJ/K/mol
delta_s = (8.314 * incpt)/1000 # unit - kJ/K/mol
delta_g = (delta_h - (temp * delta_s)) # unit - kJ/K/mol
'''print(delta_h)
print(delta_s)
print(delta_g)'''
delta_h1 = format(delta_h,".3f")
delta_s1 = format(delta_s,".3f")
delta_g1 = ["{:.3f}".format(x) for x in (delta_g)]
'''print(slop)
print(incpt)
print(delta_h1)
print(delta_s1)
print(delta_g1)'''
# THERMODYNAMICS RESULT
print("------------------------------------")
print("THERMODYNAMICS MODEL:")
print("Concentration:",ci,"ppm ", "pH:",ph)
print("------------------------------------")
print(" ∆H◦(kJ/K/mol):",delta_h1)
print(" ∆S◦(kJ/K/mol):", delta_s1)
print("------------------------------------")
print(" Temperature(K) ∆G◦(kJ/K/mol)")
print("------------------------------------")
for i in range (len(temp)):
print("%s%s"%(format(temp[i],'>9'),format(delta_g1[i],'>22')))
print("------------------------------------")
'''
print("-------------------------------------------------------------------------------------")
print("THERMODYNAMICS MODEL:"+ "\t\t\t" + "pH:", ph, "\t\t\t"+ "Concentration:",ci,"ppm" )
print("-------------------------------------------------------------------------------------")
print(" ∆H◦" + "\t\t\t\t" + "∆S◦" + "\t\t\t\t\t" + "∆G◦(kJ/K/mol)")
print(" (kJ/K/mol) (kJ/K/mol) --------------------------------------------------")
print(" 303 K 313 K 323 K 333 K")
print("-------------------------------------------------------------------------------------")
print( "\t", delta_h1, "\t\t", delta_s1,"\t\t", delta_g1[0],"\t\t", delta_g1[1],"\t\t", delta_g1[2],"\t\t", delta_g1[3])
print("-------------------------------------------------------------------------------------")
'''
#Isotherms()
#Kinetics()
#Thermodynamics()
# Starting point
def Option():
print("-----SELECT THE DESIRED OPTION FROM THE FOLLOWING----")
print(" 1- ISOTHERM 2-KINETICS 3-THERMODYNAMICS 0-EXIT")
a=1
while a!=0:
try:
print("WELCOME TO THE ADSORPTION WORLD")
Option()
a = int(input("Enter the desired option: "))
if a >= 0 and a <= 4:
if a == 1:
Isotherms()
print("Thank You.")
if a == 2:
Kinetics()
print("Thank You.")
if a == 3:
Thermodynamics()
print("Thank You.")
if a == 0:
print("THANK YOU.")
else:
print("ENTER THE CORRECT OPTION")
except:
print('ENTER THE INTEGER VALUE BETWEEN 0 TO 3') | ramanpy/isokintemp | Isokintem.py | Isokintem.py | py | 19,204 | python | en | code | 0 | github-code | 13 |
69894358418 | # 1 . Biggie Size - Given a list, write a function that changes all positive numbers in the list to "big".
# Example: biggie_size([-1, 3, 5, -5]) returns that same list, but whose values are now [-1, "big", "big", -5]
def convertbig(x):
y = len(x)
for val in range (0,y,1):
if x[val] > 0:
x[val] = 'big'
return x
print(convertbig([-1,-3, 5, 6, -5, -9]))
# Count Positives - Given a list of numbers, create a function to replace the last value with the number of positive values. (Note that zero is not considered to be a positive number).
# Example: count_positives([-1,1,1,1]) changes the original list to [-1,1,1,3] and returns it
# Example: count_positives([1,6,-4,-2,-7,-2]) changes the list to [1,6,-4,-2,-7,2] and returns it
def count_positives(x):
y = len(x)
count = 0
for i in range (0,y,1):
if x[i] > 0:
count = count + 1
x[y-1] = count
return x
print(count_positives([1,6,-4,-2,-7,-2]))
# Sum Total - Create a function that takes a list and returns the sum of all the values in the array.
def sumoflist(x):
y = len(x)
sum = 0
for i in range(y):
sum = sum + x[i]
return sum
print(sumoflist([2,3,4,5,1]))
# Average - Create a function that takes a list and returns the average of all the values.
def averageoflist(x):
y = len(x)
sum = 0
for i in range(y):
sum = sum + x[i]
avg = float(sum)/y
return avg
print(averageoflist([1,2,3,4]))
# Length - Create a function that takes a list and returns the length of the list.
def lengthoflist(x):
count = 0;
y = len(x)
for i in range(0,y,1):
count = count + 1
return count
print(lengthoflist([3,4,5,6,7,8]))
# Minimum - Create a function that takes a list of numbers and returns the minimum value in the list. If the list is empty, have the function return False.
def minimum(x):
y = len(x)
min = 0
for i in range(0,y,1):
if x[i] < min:
min = x[i]
return min
else: return False
print(minimum([37,2,1,-9,-8]))
print(minimum([]))
# Maximum - Create a function that takes a list and returns the maximum value in the array. If the list is empty, have the function return False.
def maximum(x):
y = len(x)
max = 0
for i in range(0,y,1):
if x[i] > max:
max = x[i]
return max
else: return False
print(maximum([37,2,1,-9,-8]))
print(maximum([]))
# Ultimate Analysis - Create a function that takes a list and returns a dictionary that has the sumTotal, average, minimum, maximum and length of the list.
def createDict(x):
result = {
'sum' : 0,
'min' : None,
'max' : None,
'Avg' : None,
'length': 0
}
y = len(x)
if y == 0:
return result
else:
result['sum'] = 0
result['max'] = x[0]
result['min'] = x[0]
for val in x:
if val > result['max']:
result['max'] = val
elif val < result['min']:
result['min'] = val
result['sum'] = result['sum'] + val
result['length'] = result['length'] + 1
result['Avg'] = float (result ['sum'])/y
return result
print(createDict([1, 2, 3, 4, 5, 0]))
print(createDict([]))
| Jarvis2021/Coding-Dojo | Python_Stack/python/fundamentals/ForLoopBasicII.py | ForLoopBasicII.py | py | 3,277 | python | en | code | 0 | github-code | 13 |
21579155925 | import copy
from gurobipy import Model,GRB,LinExpr
import re
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Gurobi_direct.OptModel_m_pre import OptModel_gurobi
class Node:
def __init__(self):
self.local_LB = 0
self.local_UB = np.inf
self.x_sol ={}
self.x_int_sol = {}
self.branch_var_list = []
self.model = None
self.cnt = None
self.is_integer = False
self.var_LB = { }
self.var_UB = {}
def deepcopy_node(node):
new_node = Node()
new_node.local_LB = 0
new_node.var_UB = np.inf
new_node.x_sol = copy.deepcopy(node.x_int_sol)
new_node.x_int_sol = copy.deepcopy(node.x_int_sol)
new_node.branch_var_list = []
new_node.model = node.model.copy()
new_node.cnt = node.cnt
new_node.is_integer = node.is_integer
return new_node
class Node_2:
def __init__(self):
self.local_LB = 0
self.local_UB = np.inf
self.x_sol = {}
self.x_int_sol = {}
self.branch_var_list = []
self.cnt = None
self.is_integer = False
self.var_LB = {}
self.var_UB = {}
def deepcopy_node(node):
new_node = Node()
new_node.local_LB = 0
new_node.var_UB = np.inf
new_node.x_sol = copy.deepcopy(node.x_int_sol)
new_node.x_int_sol = copy.deepcopy(node.x_int_sol)
new_node.branch_var_list = []
new_node.cnt = node.cnt
new_node.is_integer = node.is_integer
new_node.var_LB = copy.deepcopy(node.var_LB)
new_node.var_UB = copy.deepcopy(node.var_UB)
return new_node
def Branch_and_bound(VRPTW_model, summary_interval):
#Relax_VRPTW_model = VRPTW_model.relax()
global_UB = np.inf
global_LB = VRPTW_model.ObjVal
eps = 1e-6
incumbent_node = None
Gap = np.inf
feasible_sol_cnt = 0
'''
Branch and Bound starts
'''
Queue = []
node = Node()
node.local_LB = global_LB
node.local_UB = np.inf
node.model = VRPTW_model.copy()
node.model.setParam("OutputFlag",0)
node.cnt = 0
Queue.append(node)
cnt = 0
branch_cnt = 0
Global_UB_change = []
Global_LB_change = []
while (len(Queue) > 0 and global_UB - global_LB > eps):
#pop()取列表的最后一个元素,且将其从列表中删掉
current_node = Queue.pop()
cnt += 1
current_node.model.optimize()
#status==2,代表最优;status=3,代表无可行解;status==5,无界解
Solution_status = current_node.model.Status
is_integer = True
is_pruned = False
if(Solution_status == 2):
for var in current_node.model.getVars():
if(var.VarName.startswith('X')):
#current_node.x_sol[var.varName] = copy.deepcopy(current_node.model.getVarByName())
current_node.x_sol[var.varName] = var.x
#print(var.VarName,'=',var.x)
#将非整数决策变量全部加入节点的分支变量列表
if(abs(round(var.x,0) - var.x) >= eps):
is_integer = False
current_node.branch_var_list.append(var.VarName)
if (is_integer == True):
feasible_sol_cnt += 1
current_node.is_integer = True
current_node.local_LB = current_node.model.ObjVal
current_node.local_UB = current_node.model.ObjVal
if (current_node.local_UB < global_UB):
global_UB = current_node.local_UB
#深拷贝,开拓新内存,“=”是共用同一块内存
incumbent_node = Node.deepcopy_node(current_node)
if (is_integer == False):
current_node.is_integer = False
current_node.local_UB = global_UB
current_node.local_LB = current_node.model.ObjVal
if(is_integer == True):
is_pruned = True
if(is_integer == False and current_node.local_LB > global_UB):
is_pruned = True
Gap = round(100*(global_UB - global_LB)/global_LB,2)
elif (Solution_status != 2):
is_integer = False
is_pruned = True
continue
if(is_pruned == False):
branch_cnt += 1
branch_var_name = None
#从所有非整数变量中找到一个离0.5最近的变量
min_diff = 100
for var_name in current_node.branch_var_list:
if(abs(current_node.x_sol[var_name] - 0.5) < min_diff):
branch_var_name = var_name
min_diff = abs(current_node.x_sol[var_name] - 0.5)
#每迭代50次,输出用来分支的变量
if(branch_cnt % summary_interval == 0):
print('Branch var name',branch_var_name,'\t,Branch var value :',current_node.x_sol[branch_var_name])
#左边是0,右边是1
left_var_bound = (int)(current_node.x_sol[branch_var_name])
right_var_bound = (int)(current_node.x_sol[branch_var_name]) + 1
left_node = Node.deepcopy_node(current_node)
right_node = Node.deepcopy_node(current_node)
temp_var = left_node.model.getVarByName(branch_var_name)
left_node.model.addConstr(temp_var <= left_var_bound,name = 'branch_left' + str(cnt))
left_node.model.setParam("OutputFlag",0)
left_node.model.update()
cnt +=1
left_node.cnt = cnt
temp_var = right_node.model.getVarByName(branch_var_name)
right_node.model.addConstr(temp_var >= right_var_bound, name='branch_right' + str(cnt))
right_node.model.setParam("OutputFlag", 0)
right_node.model.update()
cnt += 1
right_node.cnt = cnt
Queue.append(left_node)
Queue.append(right_node)
temp_global_LB = np.inf
#遍历叶子节点队列,更新下界
for node in Queue:
node.model.optimize()
if(node.model.status == 2):
if(node.model.ObjVal <= temp_global_LB and node.model.ObjVal <= global_UB):
temp_global_LB = node.model.ObjVal
global_LB = temp_global_LB
Global_UB_change.append(global_UB)
Global_LB_change.append(global_LB)
if((cnt - 2)% summary_interval == 0):
print('\n\n==================')
print('Queue length:',len(Queue))
print('\n -------------- \n',cnt,'UB =',global_UB,'LB =',global_LB,'\t Gap = ',Gap,' %','feasible_sol_cnt:',feasible_sol_cnt)
#all the nodes are explored,update the LB and UB
incumbent_node.model.optimize()
global_UB = incumbent_node.model.ObjVal
global_LB = global_UB
Gap = round(100 * (global_UB - global_LB)/global_LB,2)
Global_UB_change.append(global_UB)
Global_LB_change.append(global_LB)
print('\n\n\n\n')
print('-----------------------------------------')
print(' Branch and Bound terminates ')
print(' Optimal solution found ')
print('-----------------------------------------')
print('\nIter cnt = ',cnt, '\n\n')
print('\nFinal Gap = ',Gap, '%\n\n')
print(' -------Optimal Solution ---------')
'''for key in incumbent_node.x_sol.keys():
if(incumbent_node.x_sol[key] > 0):
print(key, '=', incumbent_node.x_sol[key])'''
print('\nOptimal Obj:',global_LB)
return incumbent_node,Gap,Global_UB_change,Global_LB_change
m = OptModel_gurobi()
m.start()
incumbent_node,Gap,Global_UB_change,Global_LB_change = Branch_and_bound(m.model,summary_interval= 50)
for key in incumbent_node.x_sol.keys():
if(incumbent_node.x_sol[key] > 0):
print(key, '=', incumbent_node.x_sol[key])
import matplotlib.pyplot as plt
print(len(Global_LB_change))
x = range(len(Global_UB_change))
plt.figure(figsize=(12,8),dpi=80)
plt.plot(x,Global_UB_change,label='UB',color='red',linestyle=':',marker='.',markersize=5)
plt.plot(x,Global_LB_change,label='LB',color='black',linestyle='--',marker='.',markersize=5)
plt.xlabel('Iterations')
plt.ylabel('Bound Value')
plt.title('Change of the Bound')
#xtick = ['{}'.format(i) for i in x ]
#plt.xticks(x,xtick)
#plt.grid(alpha=0.8)
plt.legend(loc = 'upper left')
plt.show() | LiuZunzeng/Code_VRPTW | Branch_and _Bound/Branch_and_Bound.py | Branch_and_Bound.py | py | 8,533 | python | en | code | 0 | github-code | 13 |
12400832889 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
import re
class CuisineLibre(object):
@staticmethod
def search(query_dict):
"""
Search recipes parsing the returned html data.
"""
base_url = "http://www.cuisine-libre.fr/?page=recherche&"
query_url = urllib.parse.urlencode(query_dict)
url = base_url + query_url
html_content = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html_content, 'html.parser')
search_data = []
articles = soup.findAll("li", {"class": "clearfix"})
for article in articles:
data = {}
try:
data["name"] = article.find("a").find("strong").get_text().strip(' \t\n\r')
data["url"] = article.find("a")['href']
try:
data["image"] = article.find("a").find("img")["src"][2:]
except Exception as e1:
pass
except Exception as e2:
print(e2)
pass
search_data.append(data)
return search_data
@staticmethod
def get(uri):
"""
'url' from 'search' method.
"""
base_url = "http://www.cuisine-libre.fr/"
url = base_url + uri
html_content = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html_content, 'html.parser')
image_url = soup.find("div", {"class": "illustration"}).find("img")["src"][2:]
ingredients_data = soup.find("div", {"class": "texte surlignable"})
ingredients_title = ingredients_data.find("h2").get_text()
list_ingredients_data = ingredients_data.findAll("li", {"class": "ingredient"})
list_ingredients = [ingredient.get_text()[1:] for ingredient in list_ingredients_data]
try:
author = soup.find("strong", {"class": "author fn"}).get_text()
except:
author = "Inconnu"
preparation_data = soup.find("div", {"id": "preparation"})
list_instructions_data = preparation_data.findAll("p")
list_instructions = [instr.get_text() for instr in list_instructions_data]
data = {
"author": author,
"image": image_url,
"ingredients_title": ingredients_title,
"ingredients": list_ingredients,
"instructions": list_instructions
}
return data
| remaudcorentin-dev/python-cuisinelibre | cuisinelibre/__init__.py | __init__.py | py | 2,075 | python | en | code | 0 | github-code | 13 |
9077841000 | import sys
sys.setrecursionlimit(10000) # 런타임 에러 방지용
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
T = int(input())
def dfs(x, y):
# 상,하,좌,우 확인
for d in range(4):
nx = x + dx[d]
ny = y + dy[d]
if (0 <= nx < N) and (0 <= ny < M):
if matrix[nx][ny] == 1: # 상 하 좌 우 탐색하면서 1이 있으면
matrix[nx][ny] = -1 # -1로 변경해놓기
dfs(nx, ny)
for _ in range(T):
M, N, K = map(int, input().split())
matrix = [[0] * M for _ in range(N)]
cnt = 0
# 행렬 생성
for _ in range(K):
m, n = map(int, input().split())
matrix[n][m] = 1
for i in range(N): # 행 (바깥 리스트)
for j in range(M): # 열 (내부 리스트)
if matrix[i][j] == 1: # 1일때 dfs 탐색 시작
dfs(i, j) # 상하좌우 탐색해서
cnt += 1
print(cnt)
| Mins00oo/PythonStudy_CT | BACKJOON/Python/S2/S2_1012_유기농 배추.py | S2_1012_유기농 배추.py | py | 989 | python | ko | code | 0 | github-code | 13 |
24356099412 | import argparse
import numpy.random as rand
from datastore import datastore
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--haste', help='', default=None, type=float)
parser.add_argument('--pickiness', help='', default=None, type=float)
parser.add_argument('--rate', help='', default=None, type=float)
parser.add_argument('--max_time', help='', default=None, type=float)
parser.add_argument('--parameter', help='', default=None, type=float)
args = parser.parse_args()
HASTE = args.haste
PICKY = args.pickiness
RATE = args.rate
MAX_TIME = args.max_time
BATCH_TIME = args.parameter
# constant
BOY = 0
GIRL = 1
A_TIME = 0
SCORE = 1
GENDER = 2
data = datastore("batch", BATCH_TIME, HASTE, PICKY, RATE, MAX_TIME, time_store=int(MAX_TIME / BATCH_TIME))
boys = []
girls = []
dump = []
dump_gender = BOY
def arrival_generator():
last_arrival_time = 0
while True:
last_arrival_time = last_arrival_time + rand.exponential(scale=1 / RATE)
yield last_arrival_time, rand.uniform(), rand.randint(2) # Atime, Score, Gender
arrival = arrival_generator()
next_batch_time = BATCH_TIME
current_time = 0
def get_score(x):
return x[SCORE]
def get_atime(x):
return x[A_TIME]
def opp_gender(x):
return x ^ 1
# Equalize the boy girl arrays, load or save to dump as neccessary
def manage_dump(high_gender_list, low_gender_list, low_gender_int):
global dump_gender, dump
if dump_gender is low_gender_int and len(dump) > 0:
num_retrieve = min(len(dump), len(high_gender_list) - len(low_gender_list))
for x in range(num_retrieve):
low_gender_list.append(dump.pop())
num_dispose = len(high_gender_list) - len(low_gender_list)
if num_dispose > 0:
high_gender_list.sort(key=get_atime)
for_the_dump = [high_gender_list.pop() for blah in range(num_dispose)]
for_the_dump.reverse()
dump_gender = opp_gender(low_gender_int)
for person in for_the_dump:
dump.append(person)
while current_time < MAX_TIME:
#print(current_time, end="\r")
candidate = next(arrival)
current_time = candidate[A_TIME]
if current_time < MAX_TIME:
if dump_gender is BOY:
data.add_time_stat(len(boys) + len(dump), len(girls), current_time)
else:
data.add_time_stat(len(boys), len(girls) + len(dump), current_time)
if current_time > next_batch_time:
# BATCH PROCESS
if len(boys) > len(girls):
manage_dump(high_gender_list=boys, low_gender_list=girls, low_gender_int=GIRL)
else:
manage_dump(high_gender_list=girls, low_gender_list=boys, low_gender_int=BOY)
boys.sort(key=get_score)
girls.sort(key=get_score)
while len(boys) > 0: # Pop the matches while they exist
data.add_match(boys.pop(), girls.pop(), next_batch_time)
next_batch_time += BATCH_TIME
boys.append(candidate) if candidate[GENDER] is BOY else girls.append(candidate)
boy_scores = []
girl_scores = []
boy_atimes = []
girl_atimes = []
dump_scores = [entry[SCORE] for entry in dump]
dump_atimes = [entry[A_TIME] for entry in dump]
if dump_gender is BOY:
boy_scores, boy_atimes = dump_scores, dump_atimes
else:
girl_scores, girl_atimes = dump_scores, dump_atimes
data.save_stranglers(boy_scores, girl_scores, boy_atimes, girl_atimes)
print("Saving")
data.save_stats()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| amrith1/CommNets | batch.py | batch.py | py | 3,512 | python | en | code | 0 | github-code | 13 |
8104332694 | class Solution:
def minSwapsCouples(self, row: List[int]) -> int:
from collections import defaultdict
num2i = defaultdict(int)
for i, v in enumerate(row):
num2i[v] = i
n = len(row)
res = 0
def swap(i, j):
row[i], row[j] = row[j], row[i]
num2i[row[j]] = j
num2i[row[i]] = i
nonlocal res
res += 1
for i in range(1, n, 2):
partner = row[i] - 1
if row[i] % 2 == 0:
partner = row[i] + 1
if partner == row[i - 1]:
continue
swap(i-1, num2i[partner])
return res
| lumiZGorlic/leetcode | solutions/CouplesHoldingHands/solution.py | solution.py | py | 678 | python | en | code | 0 | github-code | 13 |
29726183968 | # -*- coding:utf-8 -*-
""" paddle train demo """
import os
import numpy as np
import paddle # 导入paddle模块
import paddle.fluid as fluid
import gzip
import struct
import argparse
import time
from rudder_autosearch.sdk.amaas_tools import AMaasTools
def parse_arg():
"""parse arguments"""
parser = argparse.ArgumentParser(description='paddle2.1.1 mnist Example')
parser.add_argument('--train_dir', type=str, default='./train_data',
help='input data dir for training (default: ./train_data)')
parser.add_argument('--test_dir', type=str, default='./test_data',
help='input data dir for test (default: ./test_data)')
parser.add_argument('--output_dir', type=str, default='./output',
help='output dir for auto_search job (default: ./output)')
parser.add_argument('--job_id', type=str, default="job-1234",
help='auto_search job id (default: "job-1234")')
parser.add_argument('--trial_id', type=str, default="0-0",
help='auto_search id of a single trial (default: "0-0")')
parser.add_argument('--metric', type=str, default="acc",
help='evaluation metric of the model')
parser.add_argument('--data_sampling_scale', type=float, default=1.0,
help='sampling ratio of the data (default: 1.0)')
parser.add_argument('--batch_size', type=int, default=64,
help='number of images input in an iteration (default: 64)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate of the training (default: 0.001)')
parser.add_argument('--epoch', type=int, default=5,
help='number of epochs to train (default: 5)')
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.job_id, args.trial_id)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print("job_id: {}, trial_id: {}".format(args.job_id, args.trial_id))
return args
def load_data(file_dir, is_train=True):
"""
:param file_dir:
:param is_train:
:return:
"""
if is_train:
image_path = file_dir + '/train-images-idx3-ubyte.gz'
label_path = file_dir + '/train-labels-idx1-ubyte.gz'
else:
image_path = file_dir + '/t10k-images-idx3-ubyte.gz'
label_path = file_dir + '/t10k-labels-idx1-ubyte.gz'
with open(image_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(image_path) as zip_f:
out_f.write(zip_f.read())
# os.unlink(image_path)
with open(label_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(label_path) as zip_f:
out_f.write(zip_f.read())
# os.unlink(label_path)
with open(label_path[:-3], 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(image_path[:-3], 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
return images, labels
def reader_creator(file_dir, is_train=True, buffer_size=100, data_sampling_scale=1):
"""
:param file_dir:
:param is_train:
:param buffer_size:
:return:
"""
images, labels = load_data(file_dir, is_train)
if is_train:
np.random.seed(0)
sample_data_num = int(data_sampling_scale * len(images))
idx = np.arange(len(images))
np.random.shuffle(idx)
images, labels = images[0:sample_data_num], labels[0:sample_data_num]
def reader():
"""
:return:
"""
for num in range(int(len(labels) / buffer_size)):
for i in range(buffer_size):
yield images[num * buffer_size + i, :], int(labels[num * buffer_size + i])
return reader
def reader_load(args):
"""reader_load"""
# 每次读取训练集中的500个数据并随机打乱,传入batched reader中,batched reader 每次 yield args.batch_size个数据
train_reader = paddle.batch(
paddle.reader.shuffle(
reader_creator(args.train_dir, is_train=True, buffer_size=100,
data_sampling_scale=args.data_sampling_scale), buf_size=500),
batch_size=args.batch_size)
# 读取测试集的数据,每次 yield 64个数据
test_reader = paddle.batch(
reader_creator(args.test_dir, is_train=False, buffer_size=100), batch_size=args.batch_size)
return train_reader, test_reader
def softmax_regression():
"""
定义softmax分类器:
一个以softmax为激活函数的全连接层
Return:
predict_image -- 分类的结果
"""
# 输入的原始图像数据,大小为28*28*1
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
# 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10
predict = fluid.layers.fc(
input=img, size=10, act='softmax')
return predict
def multilayer_perceptron():
"""
定义多层感知机分类器:
含有两个隐藏层(全连接层)的多层感知器
其中前两个隐藏层的激活函数采用 ReLU,输出层的激活函数用 Softmax
Return:
predict_image -- 分类的结果
"""
# 输入的原始图像数据,大小为28*28*1
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
# 第一个全连接层,激活函数为ReLU
hidden = fluid.layers.fc(input=img, size=200, act='relu')
# 第二个全连接层,激活函数为ReLU
hidden = fluid.layers.fc(input=hidden, size=200, act='relu')
# 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
return prediction
def convolutional_neural_network():
"""
定义卷积神经网络分类器:
输入的二维图像,经过两个卷积-池化层,使用以softmax为激活函数的全连接层作为输出层
Return:
predict -- 分类的结果
"""
# 输入的原始图像数据,大小为28*28*1
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
# 第一个卷积-池化层
# 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
# 第二个卷积-池化层
# 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
# 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
return prediction
def train_program():
"""
配置train_program
Return:
predict -- 分类的结果
avg_cost -- 平均损失
acc -- 分类的准确率
"""
paddle.enable_static()
# 标签层,名称为label,对应输入图片的类别标签
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# predict = softmax_regression() # 取消注释将使用 Softmax回归
# predict = multilayer_perceptron() # 取消注释将使用 多层感知器
predict = convolutional_neural_network() # 取消注释将使用 LeNet5卷积神经网络
# 使用类交叉熵函数计算predict和label之间的损失函数
cost = fluid.layers.cross_entropy(input=predict, label=label)
# 计算平均损失
avg_cost = fluid.layers.mean(cost)
# 计算分类准确率
acc = fluid.layers.accuracy(input=predict, label=label)
return predict, [avg_cost, acc]
def optimizer_program():
"""
:return:
"""
return fluid.optimizer.Adam(learning_rate=0.001)
def event_handler(pass_id, batch_id, cost):
"""event_handler"""
# 打印训练的中间结果,训练轮次,batch数,损失函数
print("Pass %d, Batch %d, Cost %f" % (pass_id, batch_id, cost))
def train_test(train_test_program,
train_test_feed, train_test_reader, executor, fetch_list):
"""train_test"""
# 将分类准确率存储在acc_set中
acc_set = []
# 将平均损失存储在avg_loss_set中
avg_loss_set = []
# 将测试 reader yield 出的每一个数据传入网络中进行训练
for test_data in train_test_reader():
avg_loss_np, acc_np = executor.run(
program=train_test_program,
feed=train_test_feed.feed(test_data),
fetch_list=fetch_list)
acc_set.append(float(acc_np))
avg_loss_set.append(float(avg_loss_np))
# 获得测试数据上的准确率和损失值
acc_val_mean = np.array(acc_set).mean()
avg_loss_val_mean = np.array(avg_loss_set).mean()
# 返回平均损失值,平均准确率
return avg_loss_val_mean, acc_val_mean
class Model():
def __init__(self, args, train_reader, test_reader):
self.args = args
self.create_model()
self.train_reader = train_reader
self.test_reader = test_reader
def create_model(self):
"""create_model"""
# 该模型运行在单个CPU上
self.place = fluid.CPUPlace()
# 调用train_program 获取预测值,损失值
self.prediction, [self.avg_loss, self.acc] = train_program()
# 输入的原始图像数据,大小为28*28*1
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
# 标签层,名称为label,对应输入图片的类别标签
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# 告知网络传入的数据分为两部分,第一部分是img值,第二部分是label值
self.feeder = fluid.DataFeeder(feed_list=[img, label], place=self.place)
# 选择Adam优化器
optimizer = fluid.optimizer.Adam(learning_rate=self.args.lr)
optimizer.minimize(self.avg_loss)
def run_train(self):
PASS_NUM = self.args.epoch
epochs = [epoch_id for epoch_id in range(PASS_NUM)]
self.exe = fluid.Executor(self.place)
self.exe.run(fluid.default_startup_program())
main_program = fluid.default_main_program()
step = 0
for epoch_id in epochs:
print("Epoch %d:" % (epoch_id))
for step_id, data in enumerate(self.train_reader()):
metrics = self.exe.run(main_program,
feed=self.feeder.feed(data),
fetch_list=[self.avg_loss, self.acc])
if step % 100 == 0: # 每训练100次 更新一次图片
event_handler(step, epoch_id, metrics[0])
step += 1
def save_model(self):
"""save_model"""
# 将模型参数存储在名为 save_dirname 的文件中
save_dirname = self.args.output_dir
fluid.io.save_inference_model(save_dirname,
["img"], [self.prediction], self.exe,
model_filename='model',
params_filename='params')
def evaluate(self):
"""evaluate"""
test_program = fluid.default_main_program().clone(for_test=True)
avg_loss_val, acc_val = train_test(train_test_program=test_program,
train_test_reader=self.test_reader,
train_test_feed=self.feeder,
executor=self.exe,
fetch_list=[self.avg_loss, self.acc])
print("accuracy: %f" % acc_val)
return acc_val
def report_final(args, metric):
"""report_final_result"""
# 结果上报sdk
amaas_tools = AMaasTools(args.job_id, args.trial_id)
metric_dict = {args.metric: metric}
for i in range(3):
flag, ret_msg = amaas_tools.report_final_result(metric=metric_dict,
export_model_path=args.output_dir,
checkpoint_path="")
print("End Report, metric:{}, ret_msg:{}".format(metric, ret_msg))
if flag:
break
time.sleep(1)
assert flag, "Report final result to manager failed! Please check whether manager'address or manager'status " \
"is ok! "
def main():
"""main"""
# 获取参数
args = parse_arg()
# 加载数据集
train_reader, test_reader = reader_load(args)
# 模型定义
model = Model(args, train_reader, test_reader)
# 模型训练
model.run_train()
# 模型保存
model.save_model()
# 模型评估
acc = model.evaluate()
# 上报结果
report_final(args, metric=acc)
if __name__ == '__main__':
main()
| Baidu-AIP/BML-AutoML-AutoSearch | bml_auto_search_job/paddle_2_1_1/paddlepaddle2.1.1_autosearch.py | paddlepaddle2.1.1_autosearch.py | py | 13,336 | python | en | code | 3 | github-code | 13 |
23737934815 | import sys
from config import Config, Logger
from utils import IpUtils
from web_connector import GoDaddyConnector
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Invalid arguments')
print('Usage:')
print('update_dns.py <dev|prod>')
sys.exit(1)
environment = sys.argv[1]
config = Config(environment)
logger = Logger(config)
ip_utils = IpUtils(config, logger)
external_ip = ip_utils.get_external_ip()
go_daddy_connector = GoDaddyConnector(config, logger)
if external_ip != go_daddy_connector.fetch_ip_from_dns():
go_daddy_connector.update_dns(external_ip)
| esceer/godaddy-dns-sync | src/update_dns.py | update_dns.py | py | 640 | python | en | code | 1 | github-code | 13 |
34828290831 | from math import sqrt
import stats_batch as sb
import numpy as np
from pytest import approx
from scipy.stats import ttest_ind_from_stats
from scipy.stats import ttest_ind
def test_batch_mean_var_t_test():
n = 10_000
a = np.random.normal(size=n)
b = np.random.normal(size=n)
# First batch
# a -----------
batch_1_a = a[:100]
a_current = sb.mean_var_batch(batch_1_a)
# b -----------
batch_1_b = b[:100]
b_current = sb.mean_var_batch(batch_1_b)
# Second batch
# a -----------
batch_2_a = a[100:n]
a_current.update(batch_2_a)
assert a_current.mean == approx(np.mean(a))
assert a_current.var == approx(np.var(a), rel = 1e-3)
# b -----------
batch_2_b = b[100:n]
b_current.update(batch_2_b)
assert b_current.mean == approx(np.mean(b))
assert b_current.var == approx(np.var(b), rel = 1e-3)
# t-test
batch_t = a_current.ttest_ind(b_current)
list_t = ttest_ind(a, b)
assert batch_t[0] == approx(list_t[0])
assert batch_t[1] == approx(list_t[1])
| christophergandrud/stats_batch | tests/test_batch_mean_var_t.py | test_batch_mean_var_t.py | py | 1,055 | python | en | code | 1 | github-code | 13 |
12342497443 | # a game of Rock Paper Scissors
# a working program that accepts the user's input (r, p, or s)
# user 1
while True:
while True:
p1 = input("Player 1, Enter Rock(r), Paper(p), or Scissors(s): ")
if p1 in ["r","p","s"]:
break
print("Please enter a valid input")
# user 2
while True:
p2 = input("Player 2, Enter Rock(r), Paper(p), or Scissors(s): ")
if p2 in ["r","p","s"]:
break
print("Please enter a valid input")
if((p1 == 'r' and p2 == 's') or (p1 == 's' and p2 == "p") or (p1 == 'p' and p2 == "r")):
print("Player 1 Wins!")
elif ((p2 == 'r' and p1 == 's') or (p2 == 's' and p1 == "p") or (p2 == 'p' and p1 == "r")):
print("Player 2 Wins!")
else:
print("It's a Tie!")
# the game runs once
again = input("play again (y/n)? ")
if again == "n":
print("Bye!")
break
| stevanvillegas/myrepo | Python_myCodingDemo_3.py | Python_myCodingDemo_3.py | py | 916 | python | en | code | 0 | github-code | 13 |
70391991699 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 11:45:12 2021
@author: testbenutzer
"""
import numpy as np
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.ndimage.measurements import label
import matplotlib.pyplot as pp
import cv2
import Basicfunctions_GUI as bsf
import os
from time import gmtime, strftime
def detect_peaks(image):
"""
Takes an image and detect the peaks using the local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise)
"""
# define an 8-connected neighborhood
neighborhood = generate_binary_structure(2,2)
#apply the local maximum filter; all pixel of maximal value
#in their neighborhood are set to 1
local_max = maximum_filter(image, footprint=neighborhood)==image
#local_max is a mask that contains the peaks we are
#looking for, but also the background.
#In order to isolate the peaks we must remove the background from the mask.
#we create the mask of the background
background = (image==0)
#a little technicality: we must erode the background in order to
#successfully subtract it form local_max, otherwise a line will
#appear along the background border (artifact of the local maximum filter)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
#we obtain the final mask, containing only peaks,
#by removing the background from the local_max mask (xor operation)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def split_detection(image, delta=40, increment=40, min_area=50, max_area=250, min_percent_nonblack=50):
original = np.copy(image)
mean = np.zeros((1286,))
for i in range(mean.shape[0]//2):
mean[i] = np.mean(image[i][delta:488-delta])
mean[643+i] = np.mean(image[i][488+delta:975-delta])
image[image < int(np.max(mean)+increment)] = np.max(mean)+increment
image = (image-np.min(image))*255.0/(np.max(image)-np.min(image))
image = image.astype('uint8')
mD, sD, _ = bsf.maxFinder_large(image, None)
image_dots_new = np.copy(image)
image_dots_new = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
dots = mD + sD
dot_num = []
dot_mean = []
end = False
if len(dots) < 1:
return original, 0
for dot in dots:
area = cv2.contourArea(dot)
if area < max_area and area > min_area:
x,y,w,h = cv2.boundingRect(dot)
ausschnitt = image[y:y+h,x:x+w]
mean = int(int(bsf.dotMean(image,dot)) * 255/np.max(ausschnitt))
dot_mean.append(mean)
#if mean > 30:
detected_peaks = detect_peaks(ausschnitt)
ausschnitt_peaks = detected_peaks*ausschnitt
_, num_features = label(ausschnitt_peaks)
#if num_features < 10:
percent_nonblack = int(100 * np.count_nonzero(ausschnitt)/(ausschnitt.shape[0]*ausschnitt.shape[1]))
if percent_nonblack > min_percent_nonblack:
dot_num.append(num_features)
cv2.rectangle(image_dots_new,(x,y),(x+w,y+h),(0,255,0),1)
image_dots_new = cv2.putText(image_dots_new, str(num_features), (x+w,y+5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,100), 1, cv2.LINE_AA, False)
image_dots_new = cv2.putText(image_dots_new, str(int(percent_nonblack)), (x+w,y+h+10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,100), 1, cv2.LINE_AA, False)
end = True
average_dots = np.mean(dot_num)
print("average dots per contur: " + str(round(average_dots,1)))
average_mean = np.mean(dot_mean)
print("average mean per contur: " + str(round(average_mean,1)))
if end:
return image_dots_new, average_dots
else:
return original, average_dots
filelist = ["test_split_detection_image.bmp", "test_split_detection_image.bmp", "Recht_20kV30mA_40p0mm_1800s_ohne_poly_000.bmp", "ACK_101_20kV30mA_40p0mm_3600s_test_nebenmaxima_verschwommen_000.bmp", "test_split_detection_image_2.bmp"]
i = 0
open_directory = "/home/testbenutzer/Schreibtisch/Laue-Software/GUI/JUK/4PunkteVerzwilligung/Proben ACK/101/ACK_101_20kV30mA_40p0mm_RASTER_840s_20x24_0.25mm_continuous_nach_bugfix/Raster_Bg_Removed"
save_directory = "/home/testbenutzer/Schreibtisch/Laue-Software/GUI/JUK/4PunkteVerzwilligung/Proben ACK/101/ACK_101_20kV30mA_40p0mm_RASTER_840s_20x24_0.25mm_continuous_nach_bugfix/Raster_Bg_Removed_split_detection/"
result = [f for f in os.listdir(open_directory) if os.path.isfile(os.path.join(open_directory, f))]
filenames = []
for j in sorted(result):
filenames.append(str(open_directory) + "/" + str(j))
datei = open(filenames[len(filenames)-1],'r')
text = datei.read().split('#',2)
datei.close()
logfile = text[0]
text_arr = text[1]
filenames = filenames[:-1]
where_split = np.zeros((24,20))
for file in filenames:
print(i)
position = (i//where_split.shape[1], i%where_split.shape[1])
print(file)
image = cv2.imread(file, -1)
image_dots_new, average_dots = split_detection(image)
if len(image_dots_new.shape) > 2:
grayImage = cv2.cvtColor(image_dots_new, cv2.COLOR_BGR2GRAY)
else:
grayImage = np.copy(image_dots_new)
new_save_directory = save_directory + strftime("%Y%m%d%H%M%S", gmtime()) + "_" + "(" + str(position[0]) + "," + str(position[1]) + ").tif"
cv2.imwrite(new_save_directory, grayImage)
if average_dots > 2:
where_split[position] = 2
elif average_dots > 0:
where_split[position] = 1
i = i + 1
text_arr = str(where_split)
with open(save_directory + strftime("%Y%m%d%H%M%S", gmtime()) + "_" + "_logfile.txt", 'w') as f:
f.write(logfile + "#" + text_arr)
pp.imshow(where_split)
"""
path = "test_mittelpunkt_detection.tif"
original_arr = cv2.imread(path, -1)
original_arr = np.uint8(original_arr)
mD, sD, dot_image = bsf.maxFinder_large(original_arr, None)
coord_mD = []
for dot in mD:
x,y,w,h = cv2.boundingRect(dot)
coord_mD.append((round(x+w/2,2),round(y+h/2,2)))
print(coord_mD)
"""
| roundplanet/Laue-Camera | test_split_detection.py | test_split_detection.py | py | 6,383 | python | en | code | 1 | github-code | 13 |
37743118340 | # encoding: utf-8
import unittest
from authors_network.authors_network_builder import AuthorsNetworkBuilder
from authors_network.papers_dictionary_builder import PapersDictionaryBuilder
class TestAuthorsNetworkBuilder(unittest.TestCase):
def test_build(self):
builder = AuthorsNetworkBuilder()
papersDictionaryBuilder = PapersDictionaryBuilder()
papers = papersDictionaryBuilder.build('resource/test.txt')
authors = builder.build(papers)
self.assertEqual(set(authors['author1']), set(['author2', 'author4']))
self.assertEqual(authors['author2'], ['author1'])
self.assertEqual(set(authors['author3']), set(['author1', 'author2', 'author4', 'author5', 'author6']))
self.assertEqual(authors['author4'], ['author1'])
if __name__ == '__main__':
unittest.main()
| ken57/personalize_search_experiment | test/authors_network_builder_test.py | authors_network_builder_test.py | py | 830 | python | en | code | 1 | github-code | 13 |
8442080834 | """
script to create pi zero mounting
"""
# ------------------- imports -----------------------------------------
import solid2 as ps
# ------------------- main dimensions ---------------------------------
thick = 2
screw_od = 2
mount_od = 6
hh = 20
mount_l = 40
mount_w = 40
servo = {
"l": 23,
"w": 13,
"hd": 27
}
# ------------------- Base rectangle ------------------------------------
p_base = ps.cube([mount_l, mount_w, thick], center=True).up(thick / 2).forward(mount_w/2)
p_servo = ps.cube([servo["l"], servo["w"], thick], center=True).up(thick/2).forward(servo["w"]/2)
h_servo = ps.cylinder(d=screw_od, h=hh).forward(servo["w"]/2)
h_servos = h_servo.right(servo["hd"]/2) + h_servo.left(servo["hd"]/2)
h_mount = ps.cylinder(d=mount_od, h=hh).forward(mount_w-mount_od)
# ------------------- Screw Tubes -----------------------------------------
p = p_base-p_servo - h_servos - h_mount
p.save_as_scad("OpenScad/SP2/ServoMount/Files/servomount.scad")
| greyliedtke/PyExplore | OpenScad/SP2/ServoMount/RectangleMounting.py | RectangleMounting.py | py | 973 | python | en | code | 0 | github-code | 13 |
586646095 | class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
# BST is inherently a binary tree
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
# Insert operation preserving BST properties
def insert(root, key):
if root is None:
return Node(key)
else:
if root.key < key:
root.right = insert(root.right, key)
else:
root.left = insert(root.left, key)
return root
# Create a sample BST
bst = BST()
keys = [5, 3, 8, 1, 4, 7, 9]
for key in keys:
bst.root = insert(bst.root, key)
# Create a sample BST
bst = BST()
keys = [5, 3, 8, 1, 4, 7, 9]
for key in keys:
bst.root = insert(bst.root, key)
def isBST(node, min_val=float('-inf'), max_val=float('inf')):
if node is None:
return True
if not (min_val <= node.key <= max_val):
return False
return (isBST(node.left, min_val, node.key - 1) and
isBST(node.right, node.key + 1, max_val))
# Binary Tree
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
# Non-BST Binary Tree
root = Node(5)
root.left = Node(3)
root.right = Node(8)
# Ensure no duplicate keys
def insert_no_duplicates(root, key):
if root is None:
return Node(key)
else:
if root.key == key:
return root # Avoid duplicates
elif root.key < key:
root.right = insert_no_duplicates(root.right, key)
else:
root.left = insert_no_duplicates(root.left, key)
return root
def insert_and_order(root, key):
if root is None:
return Node(key)
else:
if root.key == key:
return root # Avoid duplicates
elif root.key < key:
root.right = insert_and_order(root.right, key)
else:
root.left = insert_and_order(root.left, key)
return root
def in_order_traversal(node):
if node:
in_order_traversal(node.left)
print(node.key)
in_order_traversal(node.right)
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
| Hienu/TranDanhHieu_CTDL | Đề tài giữa kỳ_DK009/11 Binary Search Trees/001 What are Binary Search Trees/a.py | a.py | py | 2,304 | python | en | code | 0 | github-code | 13 |
38046101303 | from setuptools import setup, find_packages
import os
version = open(os.path.join("collective", "wfform", "version.txt")).read().strip()
setup(name='collective.wfform',
version=version,
description="",
long_description=open(os.path.join("README.txt")).read() + "\n" +
open(os.path.join("docs", "INSTALL.txt")).read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='Michael Davis',
author_email='m.r.davis@cranfield.ac.uk',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'z3c.form',
'plone.app.registry',
'plone.app.z3cform',
],
extras_require = {
'test': [
'plone.app.testing',
]
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| davismr/collective.wfform | setup.py | setup.py | py | 1,237 | python | en | code | 0 | github-code | 13 |
16882555891 | from tests.Pages.UpdateBlogPage import UpdateBlogPage
from selenium.webdriver.common.action_chains import ActionChains
import pytest
import marks
import os
pytestmark = [marks.update_blog_page, pytest.mark.update_blog]
# caveats:
# - this test share the login state for this entire module, so each test function might affect each other.
# so please keep the state original if you modify the state during a test function
# ex)
# if you add avatar image during a test function, as cleanup, you should remove the avatar image after assertion
# BLOGLISTPAGE
@marks.all_ssize
def test_update_blog_page_should_display_update_blog_page_heading(responsive_target, login_for_update_blog):
update_blog_page = UpdateBlogPage(responsive_target['driver'], independent=False)
# check page title does exist
assert update_blog_page.does_have_text_in_page('Update Blog')
@marks.all_ssize
def test_update_blog_page_should_save_update_blog(responsive_target, login_for_update_blog):
update_blog_page = UpdateBlogPage(responsive_target['driver'], independent=False)
update_blog_page.scroll_to_top()
update_blog_page.enter_text_in_element(os.getcwd()+"/tests/data/test_image.jpg", 'main_image_input')
update_blog_page.enter_text_in_element('selenium title', 'blog_title_input', clear=True)
update_blog_page.enter_text_in_element('selenium subtitle', 'blog_subtitle_input', clear=True)
update_blog_page.enter_text_in_element_and_click('new-tag', 'blog_tag_input')
# update_blog_page.enter_text_in_element('selenium content', 'blog_content_input')
# update_blog_page.wait_for_text('saving...')
# update_blog_page.wait_for_text('ok')
update_blog_page.wait_for_text('ok')
# errors:
# - sometimes, fetch result display 'timeout error'
# - firefox does not allow to input text at div element
# - steps such as wait for element issue
# should re-implement this
# check page title does exist
assert update_blog_page.does_element_exist('fetch_status_title')
# clean up
update_blog_page.click_element('image_delete_icon')
update_blog_page.click_element('blog_tag_delete_icon')
@marks.all_ssize
def test_update_blog_page_should_publish_update_blog(responsive_target, login_for_update_blog):
update_blog_page = UpdateBlogPage(responsive_target['driver'], independent=False)
update_blog_page.scroll_to_top()
update_blog_page.enter_text_in_element('selenium title', 'blog_title_input', clear=True)
update_blog_page.enter_text_in_element('selenium subtitle', 'blog_subtitle_input', clear=True)
update_blog_page.scroll_to_bottom()
update_blog_page.wait_for_animation_finish()
update_blog_page.click_element("publish_button")
update_blog_page.wait_for_text('ok')
# errors:
# - sometimes, fetch result display 'timeout error'
# - firefox does not allow to input text at div element
# - steps such as wait for element issue
# should re-implement this
# check page title does exist
assert update_blog_page.does_element_exist('fetch_status_title')
| stsiwo/python-selenium-testing | tests/TestCase/LoginRequired/UpdateBlogPage/test_update_page.py | test_update_page.py | py | 3,087 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.