seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
42634550933 | __author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/10/2020 5:05 PM'
class Solution:
def reverse(self, x: int) -> int:
flag = x < 0
c_list = [c for c in list(str(x))]
c_list.reverse()
res = ""
for i, c in enumerate(c_list):
if c.isnumeric():
res += c
if not flag:
res = int(res)
else:
res = int(res) * -1
if -2 ** 31 <= res <= 2 **31 + 1:
return res
else:
return 0
if __name__ == '__main__':
num = 120
res = Solution().reverse(num)
print(res)
print(2 // 2) | AaronYang2333/CSCI_570 | records/08-10/rever.py | rever.py | py | 650 | python | en | code | 107 | github-code | 50 |
33470901957 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 25 19:57:25 2022
@author: tim
"""
import skrf as rf
import matplotlib.pyplot as plt
f_div=1e9
short2port = rf.Network('test/short_after_cal_v3.s2p')
short1port = rf.Network(frequency = short2port.f/f_div, s=short2port.s[:,0,0], name='short')
open2port = rf.Network('test/open_after_cal_v3.s2p')
open1port = rf.Network(frequency = open2port.f/f_div, s=open2port.s[:,0,0], name='open')
load2port = rf.Network('test/load_after_cal_v3.s2p')
load1port = rf.Network(frequency = load2port.f/f_div, s=load2port.s[:,0,0], name='load')
dut2port = rf.Network('test/dut_after_cal_v3.s2p')
dut1port = rf.Network(frequency = dut2port.f/f_div, s=dut2port.s[:,0,0], name='dut')
plt.figure()
short1port.plot_s_db()
open1port.plot_s_db()
load1port.plot_s_db()
dut1port.plot_s_db()
plt.savefig('SOLD_db.png',dpi=300)
plt.show()
plt.figure()
short1port.plot_s_deg()
open1port.plot_s_deg()
load1port.plot_s_deg()
dut1port.plot_s_deg()
plt.savefig('SOLD_deg.png',dpi=300)
plt.show()
plt.figure()
dut1port.plot_it_all()
plt.savefig('dut_all.png',dpi=300)
plt.show()
| practable/pocket-vna-one-port | arduino/plot_manual_test.py | plot_manual_test.py | py | 1,122 | python | en | code | 0 | github-code | 50 |
20436652183 | from youtube_transcript_api import YouTubeTranscriptApi as trans
from youtube_transcript_api._errors import TranscriptsDisabled, NoTranscriptFound
import pandas as pd
import bs4 as bs
import requests
import os
from datetime import datetime, timedelta
import re
from exceptions import check_keyerror_cause, QuotaExceededError
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# Changes video duration from PTxMxS to MM:SS
def format_duration(duration):
cropped = duration[2:-1]
cropped = cropped.replace("H", ":").replace("M", ":")
parts = cropped.split(":")
if len(parts) == 3:
pass
else:
symbols = ["H", "M", "S"]
for i, symbol in enumerate(symbols):
if symbol not in duration:
parts.insert(i, "00")
out = []
for val in parts:
if len(val) != 2:
val = "0" + val
out.append(val)
return ":".join(out)
def extract_vid_data(video):
out = [
video["id"],
video["snippet"]["title"],
video["snippet"]["description"],
(video["snippet"]["tags"] if "tags" in video["snippet"].keys() else ""),
video["snippet"]["publishedAt"],
video["snippet"]["thumbnails"]["high"]["url"],
format_duration(video["contentDetails"]["duration"]),
int(video["statistics"]["viewCount"]),
(int(video["statistics"]["likeCount"]) if "likeCount" in video["statistics"].keys() else "Likes Disabled"),
(int(video["statistics"]["commentCount"]) if "commentCount" in video["statistics"].keys() else "Comments Disabled"),
video["snippet"]["channelTitle"],
video["snippet"]["channelId"]
]
return out
def extract_channel_data(channel):
out = (
channel["id"],
int(channel["statistics"]["subscriberCount"]) if channel["statistics"]["hiddenSubscriberCount"] == False else "Hidden"
)
return out
def get_transcript(video_id, index, number_of_videos):
try:
print(f"Fetching transcript for video : {index}/{number_of_videos}")
transcript = trans.list_transcripts(video_id).find_transcript(language_codes=["en"]).fetch()
transcript_text = " ".join(list(map(lambda x: x["text"], transcript)))
except (NoTranscriptFound, TranscriptsDisabled):
return "No transcript"
return transcript_text
def check_multi_tickers(df, filename="all_tickers.csv"):
tickers = pd.read_csv(filename)
tickers = list(tickers.iloc[:,0])
titles = list(df.Title)
number_stocks = []
stocks_in_title = []
for title in titles:
n = 0
s = []
words = re.split(r",|!|\$| |\||\.|\?|\:|\(|\)|/|#", title)
for word in words:
if word.upper() in tickers:
n += 1
s.append(word.upper())
number_stocks.append(n)
stocks_in_title.append(s)
return number_stocks, stocks_in_title
def generate_dataframe(vid_data, comments_data, channel_data, tickers, transcript_data=None, index="VideoID", existing_data=False):
# Extract data, collect into a dataframe, and save to csv file
headers = ["VideoID", "Title", "Description", "Tags", "Publish Date", "Thumbnail", "Duration", "Views", "Likes", "Number of Comments", "Channel Name", "Channel ID"]
df = pd.DataFrame(data=vid_data, columns=headers)
df["Stock"] = tickers
# If comment retrieval is turned off, the comments data list will be empty, so don't add it to the dataframe
if comments_data:
df["Comments"] = comments_data
if transcript_data != False:
df["Transcript"] = transcript_data
# Extract channel data into a separate dataframe and join with main dataframe by channel ID (this is so each of multiple videos from the same channel have subscriber count in the output file)
headers = ["Channel ID", "Subscriber Count"]
channel_df = pd.DataFrame(data=channel_data, columns=headers)
channel_df.set_index("Channel ID", inplace=True)
df = df.join(channel_df, on="Channel ID").drop_duplicates(subset=[index])
number_stocks, stocks_in_title = check_multi_tickers(df)
df["Number of Stocks in Title"] = number_stocks
df["Stocks in Title"] = stocks_in_title
if type(existing_data) != bool:
df = pd.concat([df, existing_data]).drop_duplicates(subset=[index])
return df.set_index(index)
def check_for_data(filename):
if os.path.exists(filename):
return True
return False
# If there is a settings file (containing information about how many videos to grab for each stock / whether to grab comments or not), open the file.
# If it has not been > 1 day since the program was last run, display a warning as it is unlikely the API quota will not have refreshed.
# If there is no settings file, the user must select how many videos to grab for each stock / whether to grab comments or not, based on how much quota they want to use.
def get_run_time():
try:
with open("settings.txt", "r") as f:
settings = f.readlines()
last_run_time = datetime.strptime(settings[0], '%d/%m/%y %H:%M:%S')
if datetime.now() - timedelta(days=1) < last_run_time:
print(f"""
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
WARNING! This program was last run less than 24 hours ago (at {last_run_time.strftime('%d/%m/%y %H:%M:%S')}).
It is likely that the API quota will be exceeded the program is run again within 24 hours, no extra data will be collected if the quota is exceeded.
The API quota will be refreshed at {(last_run_time + timedelta(days=1)).strftime('%d/%m/%y %H:%M:%S')}, it is recommended that this program is run after this time.
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
""")
while True:
con = input("Would you like to continue? (y/n) : ").lower()
if con in ["y", "n"]:
if con == "n":
quit()
else:
break
else:
print("INVALID INPUT : Enter 'y' for yes or 'n' for no\n")
except FileNotFoundError:
print("No previous run time available. Assuming full API quota.")
run_time = datetime.now().strftime('%d/%m/%y %H:%M:%S')
return run_time
def date_to_RFC(date):
return date.isoformat("T") + "Z"
def paginated_results(search_obj, request, limit_requests=4):
remaining = -1 if limit_requests is None else limit_requests
while request and remaining != 0:
response = request.execute()
yield response
request = search_obj.list_next(request, response)
remaining -= 1
def search_request(service, query, start_date, end_date, order, pages, max_results=50):
print(start_date, end_date)
search = service.search()
search_request = search.list(
part="snippet",
q=query,
publishedAfter=start_date,
publishedBefore=end_date,
order=order,
maxResults=max_results
)
responses = paginated_results(search, search_request, limit_requests=pages)
videos = []
for response in responses:
videos.extend(response["items"])
vid_ids = list(map(lambda x : x["id"]["videoId"], videos))
channel_ids = list(map(lambda x : x["snippet"]["channelId"], videos))
tickers = [query.split(" ")[0]] * len(vid_ids)
return vid_ids, channel_ids, tickers
# Returns a start date and end date a specified number of days around an earnings announcement date
def earnings_announcement_period(ea_date, width=10):
end_date = ea_date + timedelta(days=width)
start_date = ea_date - timedelta(days=width)
return start_date, end_date
def search_queries(API_KEYS, queries, dates, ids, pages_per_query):
# Set empty lists to be filled in following loop
vid_ids = []
channel_ids = []
tickers = []
ids_done = []
# Loop through API keys allocated to search requests
for i, API_KEY in enumerate(API_KEYS):
print(f"API Key {i+1} of {len(API_KEYS)}")
try:
# Build the YouTube API search object (this must be done each time there is a new API key used)
with build("youtube", "v3", developerKey=API_KEY) as service:
print("Fetching Search Results...")
# Loop through queries
for i, query in enumerate(queries):
print(query)
print(ids[i])
print(type(dates[i][0]))
try:
# Fetch video + channel IDs and the ticker of the stock in question
v, c, t = search_request(service, query, date_to_RFC(dates[i][0]), date_to_RFC(dates[i][1]), order="date", pages=pages_per_query)
# Catches instances where API quota has been exceeded, saves the index of the query currently being processed
# Stores this so query can be processed using the next API key
except HttpError as e:
if repr(e)[-18:-5] == "quotaExceeded":
print("API Quota Exceeded! Trying a different API Key...")
query_index = i
raise QuotaExceededError
# If no error occurs, add video/channel ids and tickers to relevant lists
else:
vid_ids.extend(v)
channel_ids.extend(c)
tickers.extend(t)
# Add id to ids_done list to determine which queries have been completed
ids_done.append(ids[i])
# If we get to this point in the loop with no quota-related errors, all queries must have been processed, so break from outer loop
print("Queries complete! Breaking from loop...")
break
except QuotaExceededError:
queries = queries[query_index:]
return vid_ids, channel_ids, tickers, ids_done | ethanhinton/finfluencer-finance | functions.py | functions.py | py | 10,468 | python | en | code | 0 | github-code | 50 |
73658936475 | def distance(space, bs_x, bs_y, tx, ty, lv):
st = [[bs_x, bs_y, 0]]
v = [[0] * N for _ in range(N)]
if space[tx][ty] > lv: return -1
while st:
x, y, d = st.pop(0)
if x == tx and y == ty:
return d
for dx, dy in ((-1,0), (0,-1), (0,1), (1,0)):
q, w = x + dx, y + dy
if 0 <= q < N and 0 <= w < N and v[q][w] == 0 and space[q][w] <= lv:
st.append([q, w, d+1])
v[q][w] = 1
import sys
input = sys.stdin.readline
N = int(input())
space = [list(map(int, input().split())) for _ in range(N)]
bs_lv = 2
bs_exp = 0
bs_x, bs_y = 0, 0
for i in range(N):
for j in range(N):
if space[i][j] == 9:
bs_x, bs_y = i, j
space[i][j] = 0
break
answer = 0
for _ in range(N*N-1):
edible = []
for tx in range(N):
for ty in range(N):
if space[tx][ty] != 0 and space[tx][ty] < bs_lv:
dist = distance(space, bs_x, bs_y, tx, ty, bs_lv)
if dist == None: continue
edible.append([tx, ty, dist])
if len(edible) == 0: break
edible.sort(key=lambda x: (x[2], x[0], x[1]))
tx, ty, dist = edible.pop(0)
space[tx][ty] = 0
bs_x, bs_y = tx, ty
bs_exp += 1
if bs_exp == bs_lv:
bs_lv += 1
bs_exp = 0
answer += dist
print(answer) | Dodant/potential-octo | 백준/Gold/16236. 아기 상어/아기 상어.py | 아기 상어.py | py | 1,434 | python | en | code | 0 | github-code | 50 |
23997978147 | bl_info = {
"name": "KTX Tools",
"author": "Roel Koster",
"version": (3, 5),
"blender": (2, 7, 0),
"location": "View3D > Tools",
"category": "Tools"}
import bpy, mathutils, math, random, colorsys, bmesh, operator
from mathutils import Vector
class KTXAssignRandomDiffuseColors(bpy.types.Operator):
bl_idname = "wm.ktx_assign_random_diffuse_colors"
bl_label = "Rnd Diff. Colors"
bl_options = {'REGISTER', 'UNDO'}
random_seed = bpy.props.IntProperty(name="Random Seed",
description="Seed value for the random generator",
min=0,
max=10000,
default=0)
rgb_or_hsv = bpy.props.BoolProperty(
name="RGB/HSV",
description="RGB or Select to choose HSV",
default=False)
rminmax = bpy.props.FloatVectorProperty(
size=2,
name="RH Min/Max Values",
description="Red or Hue Min/Max Values",
default=(0.0, 1.0), min=0.0, max=1.0)
gminmax = bpy.props.FloatVectorProperty(
size=2,
name="GS Min/Max Values",
description="Green or Saturation Min/Max Values",
default=(0.0, 1.0), min=0.0, max=1.0)
bminmax = bpy.props.FloatVectorProperty(
size=2,
name="BV Min/Max Values",
description="Blue or Value Min/Max Values",
default=(0.0, 1.0), min=0.0, max=1.0)
def execute(self, context):
import random
from random import uniform
random.seed(self.random_seed)
for obj in bpy.context.selected_objects:
if (obj.type=='MESH' or obj.type=='CURVE'):
r=uniform(self.rminmax[0],self.rminmax[1])
g=uniform(self.gminmax[0],self.gminmax[1])
b=uniform(self.bminmax[0],self.bminmax[1])
m=obj.active_material
if self.rgb_or_hsv:
col=colorsys.hsv_to_rgb(r,g,b)
m.node_tree.nodes[1].inputs[0].default_value=(col[0],col[1],col[2],1)
obj.active_material.diffuse_color=(col)
else:
m.node_tree.nodes[1].inputs[0].default_value=(r,g,b,1)
obj.active_material.diffuse_color=(r,g,b)
return {'FINISHED'}
class KTXAddRandomCubes(bpy.types.Operator):
bl_idname = "wm.ktx_add_random_cubes"
bl_label = "Rnd Cubes"
bl_options = {"REGISTER", "UNDO"}
random_seed = bpy.props.IntProperty(name="Random Seed",
description="Seed value for the random generator",
min=0,
max=10000,
default=0)
count = bpy.props.IntProperty(name="Count",
description="Number of Cubes",
default=20, min=3, max=1000)
uniformscale = bpy.props.BoolProperty(name="UniScale",
description="Uniform Scale",
default=True)
minsize = bpy.props.FloatProperty(name="MinSize",
description="Minumum Cube Size",
default=0.1, min=0.01, max=20.0)
maxsize = bpy.props.FloatProperty(name="MaxSize",
description="Maximum Cube Size",
default=0.1, min=0.01, max=20.0)
span = bpy.props.FloatVectorProperty(name="Span",
description="Distribution Area",
default=(1.0, 1.0, 1.0), min=0.01, max=200.0)
rotation = bpy.props.FloatVectorProperty(name="Rotation",
description="Rotation",
default=(0.0, 0.0, 0.0), min=-3.141592, max=3.141592, subtype='EULER')
def execute(self, context):
import random
from random import uniform
random.seed(self.random_seed)
for i in range (1,self.count):
fspan=Vector(uniform(-val, val) for val in self.span)
frotation=Vector(uniform(-val, val) for val in self.rotation)
xrand=uniform(self.minsize,self.maxsize)
yrand=uniform(self.minsize,self.maxsize)
zrand=uniform(self.minsize,self.maxsize)
if self.uniformscale:
fsize=Vector((xrand,xrand,xrand))
else:
fsize=Vector((xrand,yrand,zrand))
bpy.ops.mesh.primitive_cube_add(location=fspan, rotation=frotation)
ob=bpy.context.object
ob.name='Kuub'
ob.scale=fsize
return {'FINISHED'}
class KTXAddRandomCopies(bpy.types.Operator):
bl_idname = "wm.ktx_add_random_copies"
bl_label = "Rnd Copies"
bl_options = {"REGISTER", "UNDO"}
random_seed = bpy.props.IntProperty(name="Random Seed",
description="Seed value for the random generator",
min=0,
max=10000,
default=0)
linkedcopy = bpy.props.BoolProperty(name="Linked",
description="Make a Linked copy",
default=False)
count = bpy.props.IntProperty(name="Count",
description="Number of Cubes",
default=20, min=3, max=1000)
uniformscale = bpy.props.BoolProperty(name="UniScale",
description="Uniform Scale",
default=True)
minsize = bpy.props.FloatProperty(name="MinSize",
description="Minimum Size",
default=1.0, min=0.001, max=20.0)
maxsize = bpy.props.FloatProperty(name="MaxSize",
description="Maximum Size",
default=1.0, min=0.001, max=20.0)
span = bpy.props.FloatVectorProperty(name="Span",
description="Distribution Area",
default=(1.0, 1.0, 1.0), min=0.01, max=200.0)
rotation = bpy.props.FloatVectorProperty(name="Rotation",
description="Rotation",
default=(0.0, 0.0, 0.0), min=-3.141592, max=3.141592, subtype='EULER')
def execute(self, context):
import random
from random import uniform
random.seed(self.random_seed)
obj=bpy.context.active_object
if obj:
for i in range (1,self.count):
fspan=Vector(uniform(-val, val) for val in self.span)
frotation=Vector(uniform(-val, val) for val in self.rotation)
xrand=uniform(self.minsize,self.maxsize)
yrand=uniform(self.minsize,self.maxsize)
zrand=uniform(self.minsize,self.maxsize)
if self.uniformscale:
fsize=Vector((xrand,xrand,xrand))
else:
fsize=Vector((xrand,yrand,zrand))
bpy.ops.object.duplicate(linked=self.linkedcopy)
obj=bpy.context.active_object
obj.location=fspan
obj.scale=fsize
obj.rotation_euler=frotation
return {'FINISHED'}
class KTXAssignMaterials(bpy.types.Operator):
bl_idname = "wm.ktx_assign_materials"
bl_label = "Add Deflt Mtrls"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for obj in bpy.context.selected_objects:
if (obj.type=='MESH' or obj.type=='CURVE'):
mat=bpy.data.materials.new(obj.name)
obj.active_material=mat
obj.material_slots[0].material.use_nodes=True
return {'FINISHED'}
class KTXAddGlossyMixShaders(bpy.types.Operator):
bl_idname = "wm.ktx_add_glossy_mix_shaders"
bl_label = "Add G/M Shaders"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
unique_mats=[]
for obj in bpy.context.selected_objects:
obj_mat_name=obj.material_slots[0].name
if not obj_mat_name in unique_mats:
unique_mats.append(obj_mat_name)
for mat in bpy.data.materials:
if mat.name in unique_mats:
tree=mat.node_tree
nodes=tree.nodes
links=tree.links
nodes[0].location.x = nodes[0].location.x + 200
node_glossy=nodes.new('ShaderNodeBsdfGlossy')
node_glossy.location=(10,150)
node_glossy.inputs[1].default_value=0
node_mix=nodes.new('ShaderNodeMixShader')
node_mix.location=(300,300)
node_mix.inputs[0].default_value=random.randint(0,20)/100
links.new(nodes[1].outputs[0],node_mix.inputs[1])
links.new(node_glossy.outputs[0],node_mix.inputs[2])
links.new(node_mix.outputs[0],nodes[0].inputs[0])
return {'FINISHED'}
class KTXAddSubsurfCreases(bpy.types.Operator):
bl_idname = "wm.ktx_add_subsurf_creases"
bl_label = "Add SubSurf Crsd"
bl_options = {'REGISTER', 'UNDO'}
sub = bpy.props.BoolProperty(name="Sub Surface",
description="Add Sub Surface",
default=False)
viewlevels = bpy.props.IntProperty(name="View Levels",
description="Viewport Levels",
default=3, min=1, max=4)
renderlevels = bpy.props.IntProperty(name="Render Levels",
description="Render Levels",
default=3, min=1, max=4)
creasevalue = bpy.props.FloatProperty(name="Crease Value",
description="Crease Value",
default=0.9, min=0.0, max=1.0)
def execute(self, context):
for obj in bpy.data.objects:
if obj.type=='MESH':
if self.sub:
mod1=obj.modifiers.new('sub','SUBSURF')
mod1.levels=self.viewlevels
mod1.render_levels=self.renderlevels
for i in obj.data.edges:
i.crease=self.creasevalue
return {'FINISHED'}
class KTXSetViewportColor(bpy.types.Operator):
bl_idname = "wm.ktx_set_viewport_color"
bl_label = "Set View Color"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
obj=bpy.context.active_object
col=obj.material_slots[0].material.node_tree.nodes[1].inputs[0].default_value
obj.active_material.diffuse_color=(col[0],col[1],col[2])
return {'FINISHED'}
class KTXEraseAllMaterials(bpy.types.Operator):
bl_idname = "wm.ktx_erase_all_materials"
bl_label = "Erase Unused Mtrls"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
bmat=bpy.data.materials
for mat in bmat:
# mat.use_fake_user=False
if mat.users < 1:
bmat.remove(mat)
return {'FINISHED'}
class KTXEraseUnusedTextures(bpy.types.Operator):
bl_idname = "wm.ktx_erase_unused_textures"
bl_label = "Erase Unused Txtrs"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
img_names = []
textures = bpy.data.textures
for tex in textures:
if tex.type == 'IMAGE':
img_names.append(tex.image.name)
imgs = bpy.data.images
for image in imgs:
name = image.name
if name not in img_names:
image.user_clear()
return {'FINISHED'}
class KTXEraseUnusedPalettes(bpy.types.Operator):
bl_idname = "wm.ktx_erase_unused_palettes"
bl_label = "Erase Unused Palettes"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
bpal=bpy.data.palettes
for pal in bpal:
pal.use_fake_user=False
if pal.users < 1:
bpal.remove(pal)
return {'FINISHED'}
class KTXFunction(bpy.types.Operator):
bl_idname="wm.ktx_function"
bl_label="KTX Function"
bl_options={'REGISTER','UNDO'}
startx=bpy.props.FloatProperty(name="X min",
description="X minimum value",
default=-math.pi)
endx=bpy.props.FloatProperty(name="X max",
description="X maximum value",
default=math.pi)
starty=bpy.props.FloatProperty(name="Y min",
description="Y minimum value",
default=-math.pi)
endy=bpy.props.FloatProperty(name="Y max",
description="Y maximum value",
default=math.pi)
stepsx=bpy.props.IntProperty(name="Faces along X",
description="How many faces in X direction",
default=20)
stepsy=bpy.props.IntProperty(name="Faces along Y",
description="How many faces in Y direction",
default=20)
func=bpy.props.StringProperty(name="Function",
description="Function to evaluate",
default="math.sin(x)*math.cos(y)")
def execute(self,context):
msh=bpy.data.meshes.new('KTX Function')
obj=bpy.data.objects.new('KTX Function',msh)
bpy.data.scenes[0].objects.link(obj)
bm=bmesh.new()
if hasattr(bm.verts, "ensure_lookup_table"):
bm.verts.ensure_lookup_table()
incx=(self.endx-self.startx)/self.stepsx
incy=(self.endy-self.starty)/self.stepsy
y=self.starty
r=0
while r<=self.stepsy:
x=self.startx
c=0
while c<=self.stepsx:
z=eval(self.func)
bm.verts.new((x,y,z))
c+=1
x+=incx
r+=1
y+=incy
offsetx=0
r=0
while r<self.stepsy:
c=0
while c<self.stepsx:
bm.verts.ensure_lookup_table()
f=[bm.verts[offsetx+c+1+self.stepsx],bm.verts[offsetx+c],bm.verts[offsetx+c+1],bm.verts[offsetx+c+2+self.stepsx]]
bm.faces.new(f)
c+=1
r+=1
offsetx+=self.stepsx
offsetx+=1
bm.to_mesh(msh)
obj.data.update()
return {'FINISHED'}
class KTXCylinders(bpy.types.Operator):
bl_idname="wm.ktx_cylinders"
bl_label="KTX Cylinders"
bl_options={'REGISTER','UNDO'}
mesh=bpy.props.BoolProperty(name="Mesh/Curve",
description="on=Mesh, off=Curve",
default=True)
startrad=bpy.props.FloatProperty(name="Start Radius",
description="Cylinder Start Radius",
default=0.01,min=0.001,precision=4,step=1)
sizefactor=bpy.props.FloatProperty(name="Size Factor",
description="Multiplication Factor",
default=1.7,precision=4,step=1)
count=bpy.props.IntProperty(name="Count",
description="Number of Circles",
default=8)
segments=bpy.props.IntProperty(name="Cylinder Segments",
description="Number of Circle Segments",
default=32)
startheight=bpy.props.FloatProperty(name="Start Height",
description="Cylinder Start Height",
default=0.01,precision=4,step=1)
heightmode=bpy.props.BoolProperty(name="Height Mode",
description="off=Increment, on=Multiplication",
default=True)
heightfactor=bpy.props.FloatProperty(name="Height Factor",
description="Cylinder Height Inc. Factor",
default=1.1,precision=4,step=1)
heightoption=bpy.props.BoolProperty(name="Height Option",
description="off=from center, on=from bottom",
default=True)
angle=bpy.props.FloatProperty(name="Calculated Angle",
description="Angle is Calculated",
default=1.00000,precision=4)
def execute(self,context):
angle=math.asin(((self.startrad*self.sizefactor)-self.startrad)/((self.startrad*self.sizefactor)+self.startrad))
x=self.startrad/math.sin(angle)
self.angle=math.degrees(angle)
rad=self.startrad
height=self.startheight
for number_of_cylinders in range(0,self.count):
if self.heightoption:
z=height/2
else:
z=0
if self.mesh:
bpy.ops.mesh.primitive_cylinder_add(vertices=self.segments, radius=rad, depth=height, location=(x,0,z))
else:
bpy.ops.curve.primitive_bezier_circle_add(radius=rad, location=(x,0,0))
obj=bpy.context.active_object
obj.data.extrude=height
obj.data.dimensions='2D'
obj.data.fill_mode='BOTH'
rad_old=rad
rad*=self.sizefactor
x+=rad_old+rad
if self.heightmode:
height*=self.heightfactor
else:
height+=self.heightfactor
return {'FINISHED'}
class KTXCylinderGrid(bpy.types.Operator):
bl_idname="wm.ktx_cylinder_grid"
bl_label="KTX Cylinder Grid"
bl_options={'REGISTER','UNDO'}
mesh=bpy.props.BoolProperty(name="Mesh/Curve",
description="on=Mesh, off=Curve",
default=True)
radius=bpy.props.FloatProperty(name="Radius",
description="Cylinder Radius",
default=0.01,min=0.001,precision=4,step=1)
radsup=bpy.props.FloatProperty(name="Radius Supplement",
description="Cylinder Radius Extra",
default=0.0,precision=4,step=0.01)
height=bpy.props.FloatProperty(name="Height",
description="Cylinder Height",
default=0.01,precision=4,step=1)
segments=bpy.props.IntProperty(name="Cylinder Segments",
description="Number of Circle Segments",
default=32)
countx=bpy.props.IntProperty(name="Count X",
description="Number of Cylinders on X-axis",
default=8)
county=bpy.props.IntProperty(name="Count Y",
description="Number of Cylinders on Y-axis",
default=8)
def execute(self,context):
x=0
y=0
for v in range(0,self.county):
if operator.mod(v,2)==0:
x=0
else:
x=self.radius
for u in range(0,self.countx):
if self.mesh:
bpy.ops.mesh.primitive_cylinder_add(vertices=self.segments, radius=self.radius+self.radsup, depth=self.height, location=(x,y,0))
else:
bpy.ops.curve.primitive_bezier_circle_add(radius=self.radius+self.radsup, location=(x,y,0))
obj=bpy.context.active_object
obj.data.extrude=self.height
obj.data.dimensions='2D'
obj.data.fill_mode='BOTH'
x+=2*self.radius
y+=2*self.radius*math.sqrt(0.75)
return {'FINISHED'}
class KTXObjectGrid(bpy.types.Operator):
bl_idname="wm.ktx_object_grid"
bl_label="KTX Object Grid"
bl_options={'REGISTER','UNDO'}
linkedcopy = bpy.props.BoolProperty(name="Linked Copies",
description="Make a Linked copy",
default=False)
trisq = bpy.props.BoolProperty(name="Triangular or Square",
description="on=Triangular, off=Square",
default=True)
radius=bpy.props.FloatProperty(name="Triangular Distance",
description="Triangular Distance",
default=0.01,min=0.001,precision=4,step=0.1)
countx=bpy.props.IntProperty(name="Count X",
description="Number of Cylinders on X-axis",
default=8)
county=bpy.props.IntProperty(name="Count Y",
description="Number of Cylinders on Y-axis",
default=8)
def execute(self,context):
x=0
y=0
obj=bpy.context.active_object
if obj:
for v in range(0,self.county):
if self.trisq:
if operator.mod(v,2)==0:
x=0
else:
x=self.radius
else:
x=0
for u in range(0,self.countx):
if not (u==0 and v==0):
bpy.ops.object.duplicate(linked=self.linkedcopy)
obj=bpy.context.active_object
obj.location=(x,y,0)
x+=2*self.radius
if self.trisq:
y+=2*self.radius*math.sqrt(0.75)
else:
y+=2*self.radius
return {'FINISHED'}
class KTXPolarArray(bpy.types.Operator):
bl_idname="wm.ktx_polar_array"
bl_label="KTX Polar Array"
bl_options={'REGISTER','UNDO'}
linkedcopy = bpy.props.BoolProperty(name="Linked Copies",
description="Make a Linked copy",
default=False)
startang=bpy.props.FloatProperty(name="Start Angle",
description="Start Angle",
default=0.0)
endang=bpy.props.FloatProperty(name="End Angle",
description="End Angle",
default=360.0)
count=bpy.props.IntProperty(name="Number of Items",
description="Number of Arrayed Items",
default=8)
def execute(self,context):
inc=(360/self.count)
angle=math.radians(self.startang)
obj=bpy.context.active_object
while angle <= self.endang:
x=math.sin(math.radians(angle))
y=math.cos(math.radians(angle))
bpy.ops.object.duplicate(linked=self.linkedcopy)
obj=bpy.context.active_object
obj.rotation_euler=(0,0,math.radians(-angle))
angle+=inc
return {'FINISHED'}
class KTXPolarArray_old(bpy.types.Operator):
bl_idname="wm.ktx_polar_array_old"
bl_label="KTX Polar Array Old"
bl_options={'REGISTER','UNDO'}
linkedcopy = bpy.props.BoolProperty(name="Linked Copies",
description="Make a Linked copy",
default=False)
startang=bpy.props.FloatProperty(name="Start Angle",
description="Start Angle",
default=0.0)
endang=bpy.props.FloatProperty(name="End Angle",
description="End Angle",
default=360.0)
count=bpy.props.IntProperty(name="Number of Items",
description="Number of Arrayed Items",
default=8)
def execute(self,context):
inc=(360/self.count)
angle=math.radians(self.startang)
obj=bpy.context.active_object
while angle <= self.endang:
x=math.sin(math.radians(angle))
y=math.cos(math.radians(angle))
bpy.ops.object.duplicate(linked=self.linkedcopy)
obj=bpy.context.active_object
obj.location=(x,y,0)
obj.rotation_euler=(0,0,math.radians(-angle))
angle+=inc
return {'FINISHED'}
class KTXSpiralCircles(bpy.types.Operator):
bl_idname="wm.ktx_spiral_circles"
bl_label="KTX Circles on a spiral"
bl_options={'REGISTER','UNDO'}
cadd = bpy.props.BoolProperty(name="Add Circles",
description="Add Circles to Spiral",
default=False)
ctype = bpy.props.BoolProperty(name="Segm.Circle/Curve",
description="on=Segmented Circle, off=Bezier Circle",
default=False)
linkedcopy = bpy.props.BoolProperty(name="Linked Copies",
description="Make a Linked copy",
default=False)
startrad = bpy.props.FloatProperty(name="Start Radius",
description="Start Radius",
default=1.0)
rincrement=bpy.props.FloatProperty(name="Radius Increment",
description="Radius Increment",
default=0.1)
startang=bpy.props.FloatProperty(name="Start Angle",
description="Start Angle",
default=0.0)
endang=bpy.props.FloatProperty(name="End Angle",
description="End Angle",
default=360.0)
increment=bpy.props.FloatProperty(name="Angle Increment",
description="Angle Increment",
default=10.0)
zincrement=bpy.props.FloatProperty(name="Z Increment",
description="Z Increment",
default=0.0)
height=bpy.props.FloatProperty(name="Circle Height",
description="Curve Circle Extrude Height",
default=0.1)
csegments=bpy.props.IntProperty(name="Circle Segments",
description="Circle Segments",
default=16)
def twopcircle(self,point_1,point_2):
origin_x=(point_1[0]+point_2[0])/2.0
origin_y=(point_1[1]+point_2[1])/2.0
a=math.pow((point_2[0]-point_1[0]),2)
b=math.pow((point_2[1]-point_1[1]),2)
radius=math.sqrt(a+b)/2.0
return(origin_x,origin_y,radius)
def circle(self,origin_x,origin_y,origin_z,radius,segments):
for angle in range(0,round(360+segments),round(360/segments)):
x=origin_x+math.cos(math.radians(angle))*radius
y=origin_y+math.sin(math.radians(angle))*radius
s.verts.new((x,y,0))
if not (angle==0 or angle==round(360+segments)):
s.edges.new((s.verts[-2],s.verts[-1]))
return('True')
def execute(self,context):
import math, bmesh
from math import radians
msh=bpy.data.meshes.new('KTX Spiral')
obj=bpy.data.objects.new('KTX Spiral',msh)
bpy.data.scenes[0].objects.link(obj)
s=bmesh.new()
angle=self.startang
r=self.startrad
z=self.zincrement
while angle<=self.endang:
x=math.cos(math.radians(angle))*r
y=math.sin(math.radians(angle))*r
s.verts.new((x,y,z))
if angle>self.startang:
s.verts.ensure_lookup_table()
s.edges.new((s.verts[-2],s.verts[-1]))
circ=self.twopcircle(s.verts[-2].co,s.verts[-1].co)
bpy.ops.curve.primitive_bezier_circle_add(radius=circ[2], location=(circ[0],circ[1],z))
obj1=bpy.context.active_object
obj1.data.extrude=self.height
obj1.data.dimensions='2D'
obj1.data.fill_mode='BOTH'
r+=self.rincrement
angle+=self.increment
z+=self.zincrement
s.to_mesh(msh)
obj.data.update()
return {'FINISHED'}
class KTXPolish(bpy.types.Operator):
bl_idname = "wm.ktx_polish"
bl_label = "Polish"
bl_options = {'REGISTER','UNDO'}
def execute(self,context):
bpy.ops.object.mode_set(mode = 'OBJECT')
bpy.ops.object.modifier_add(type='DECIMATE')
bpy.context.object.modifiers["Decimate"].ratio = 0.03
bpy.context.object.modifiers["Decimate"].use_collapse_triangulate = True
bpy.ops.object.modifier_add(type='BEVEL')
bpy.context.object.modifiers["Bevel"].segments = 2
bpy.context.object.modifiers["Bevel"].profile = 1
bpy.context.object.modifiers["Bevel"].limit_method = 'ANGLE'
bpy.context.object.modifiers["Bevel"].limit_method = 'ANGLE'
bpy.ops.object.modifier_remove(modifier="Subsurf")
bpy.ops.object.subdivision_set(level=2)
bpy.ops.object.convert(target='MESH')
bpy.ops.object.shade_smooth()
bpy.ops.object.mode_set(mode = 'SCULPT')
bpy.ops.sculpt.dynamic_topology_toggle()
bpy.ops.sculpt.symmetrize()
return {'FINISHED'}
class KTXTriTangle(bpy.types.Operator):
bl_idname = "wm.ktx_tri_tangle"
bl_label = "Create Ordered Tangle Triangle"
bl_options = {'REGISTER','UNDO'}
angletype = bpy.props.BoolProperty(name="Sharp Corner Angle",
description="Sharp (60) or NonSharp (30) Corner Angle",
default=False)
vx = bpy.props.FloatProperty(name="Size X (mm)",
description="Length of Side",
default=44)
vy = bpy.props.FloatProperty(name="Size Y (mm)",
description="Height of Side",
default=44)
r = bpy.props.FloatProperty(name="Corner Radius",
description="Corner Radius (mm)",
default=4, min=0.0)
bevel = bpy.props.BoolProperty(name="Bevel Corners",
description="Bevel Corners",
default=True)
bevelr = bpy.props.FloatProperty(name="Bevel Radius",
description="Bevel Radius",
default=4.0, min=0.0)
beveltype = bpy.props.BoolProperty(name="Only Long Edges",
description="Bevel only the long edges",
default=True)
smooth = bpy.props.BoolProperty(name="Smooth",
description="Smooth Surfaces",
default=True)
edgesplit = bpy.props.BoolProperty(name="Edge Split",
description="Edge Split",
default=False)
sl = bpy.props.FloatProperty(name="Saw Length (mm)",
description="Saw Length",
default=0.0)
def execute(self,context):
from math import radians
alpha = math.radians(90) - math.acos(1/3)
a = (self.vy/100) / 2.0
b = a * math.tan(alpha)
c = a / math.cos(alpha)
d = b + c
e = (self.vx/100) / math.cos(math.radians(30))
h = d + e
f = math.sqrt(3) * h
g = f
h = d + e
i1 = 2 * g * math.sqrt(3)
i2 = i1 - e
vx1 = (self.vx/100) - (2 * self.r)/100 + (2 * self.r * math.sin(radians(45)))/100
factorx = i1 / (self.vx/100)
i1x = factorx * vx1
i2x = i1x - e
g1 = math.tan(radians(30)) * 0.5 * i1x
sharpdist = (self.vx/100) / math.tan(radians(30))
if self.angletype:
_a1x = g1-(self.vx/100)
_a1y = (0.5 * i1x) - sharpdist
_b1x = g1
_b1y = (0.5 * i1x)
_c1x = g1
_c1y = (-0.5 * i1x)
_d1x = g1 - (self.vx/100)
_d1y = (-0.5 * i1x) + sharpdist
_z = (self.vy / 200)
self.sl = i1x*100
else:
_a1x = g1-(self.vx/100)
_a1y = (0.5 * i1x) - sharpdist
_b1x = g1
_b1y = (0.5 * i1x) - e
_c1x = g1
_c1y = (-0.5 * i1x)
_d1x = g1 - (self.vx/100)
_d1y = (-0.5 * i1x) + sharpdist - e
_z = (self.vy / 200)
self.sl = i2x*100
verts = [(_a1x,_a1y,_z),(_b1x,_b1y,_z),(_c1x,_c1y,_z),(_d1x,_d1y,_z),(_a1x,_a1y,-_z),(_b1x,_b1y,-_z),(_c1x,_c1y,-_z),(_d1x,_d1y,-_z)]
faces = [(0,3,2,1),(1,2,6,5),(5,6,7,4),(3,0,4,7),(2,3,7,6),(0,1,5,4)]
me = bpy.data.meshes.new('OrdTri_Mesh')
me.from_pydata(verts,[],faces)
me.update()
ob = bpy.data.objects.new('OrdTri',me)
ob.location = (0,0,0)
bpy.context.scene.objects.link(ob)
ob.select=True
bpy.context.scene.objects.active=ob
if self.smooth:
bpy.ops.object.shade_smooth()
bpy.context.object.data.use_auto_smooth=True
if self.bevel:
bpy.ops.object.modifier_add(type='BEVEL')
bpy.context.object.modifiers["Bevel"].offset_type = 'WIDTH'
bpy.context.object.modifiers["Bevel"].segments = 5
bpy.context.object.modifiers["Bevel"].width = self.bevelr/100
if self.beveltype:
bpy.context.object.modifiers["Bevel"].limit_method='WEIGHT'
me.use_customdata_edge_bevel = True
me.edges[3].bevel_weight=1.0
me.edges[6].bevel_weight=1.0
me.edges[7].bevel_weight=1.0
me.edges[10].bevel_weight=1.0
if self.edgesplit:
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
bpy.context.object.modifiers['EdgeSplit'].split_angle = 3.14159
bpy.ops.object.duplicate(linked=True)
obj=bpy.context.active_object
obj.rotation_euler=(0,0,math.radians(120))
bpy.ops.object.duplicate(linked=True)
obj1=bpy.context.active_object
obj1.rotation_euler=(0,0,math.radians(240))
ob.select=True
obj.select=True
obj1.select=True
bpy.ops.object.duplicate_move_linked()
bpy.ops.transform.rotate(value=math.radians(180), axis=(0,0,1))
bpy.ops.transform.rotate(value=math.radians(70.5287793655), axis=(1,0,0))
bpy.ops.object.duplicate_move_linked()
bpy.ops.transform.rotate(value=math.radians(120), axis=(0,0,1))
bpy.ops.object.duplicate_move_linked()
bpy.ops.transform.rotate(value=math.radians(120), axis=(0,0,1))
return {'FINISHED'}
class KTXSpiroGraph2(bpy.types.Operator):
bl_idname="wm.ktx_spirograph_2"
bl_label="KTX Make a Spirograph 2"
bl_options={'REGISTER','UNDO', 'PRESET'}
fact1 = bpy.props.FloatProperty(name="Factor 1",
description="Factor 1",
default=5.0)
fact2 = bpy.props.FloatProperty(name="Factor 2",
description="Factor 2",
default=28.0)
fact3 = bpy.props.FloatProperty(name="Factor 3",
description="Factor 3",
default=7.0)
fact4 = bpy.props.FloatProperty(name="Factor 4",
description="Factor 4",
default=8.0)
fact5 = bpy.props.FloatProperty(name="Factor 5",
description="Factor 5",
default=0.0)
fact6 = bpy.props.FloatProperty(name="Factor 6",
description="Factor 6",
default=12.0)
functx = bpy.props.StringProperty(name="Function x",
description="Function x",
default="f1*math.cos(f2*a)+f3*math.sin(f4*a)+f5*math.cos(f6*a)")
functy = bpy.props.StringProperty(name="Function y",
description="Function y",
default="f1*math.sin(f2*a)+f3*math.cos(f4*a)+f5*math.sin(f6*a)")
functz = bpy.props.StringProperty(name="Function z",
description="Function z",
default="f5*math.sin(f6*a)")
endangle=bpy.props.IntProperty(name="Angle",
description="Angle",
default=3600)
increment=bpy.props.FloatProperty(name="Angle Increment",
description="Angle Increment",
default=1.0)
def execute(self,context):
import math, bmesh
from math import radians
msh=bpy.data.meshes.new('KTX Spiral')
obj=bpy.data.objects.new('KTX Spiral',msh)
bpy.data.scenes[0].objects.link(obj)
s=bmesh.new()
z=0.0
angle=0.0
f1=self.fact1/10
f2=self.fact2/10
f3=self.fact3/10
f4=self.fact4/10
f5=self.fact5/10
f6=self.fact6/10
while angle<=self.endangle:
# x=self.fact7/10*math.cos(math.radians(angle))+self.fact1/10*math.sin(self.fact2/10*math.radians(angle))+self.fact3/10*math.cos(self.fact4/10*math.radians(angle))+self.fact5/10*math.cos(self.fact6/10*math.radians(angle))
# y=self.fact7/10*math.sin(math.radians(angle))+self.fact1/10*math.cos(self.fact2/10*math.radians(angle))+self.fact3/10*math.sin(self.fact4/10*math.radians(angle))+self.fact5/10*math.sin(self.fact6/10*math.radians(angle))
a=math.radians(angle)
x=eval(self.functx)
y=eval(self.functy)
z=eval(self.functz)
s.verts.new((x,y,z))
if angle > 0:
s.verts.ensure_lookup_table()
s.edges.new((s.verts[-2],s.verts[-1]))
if angle>self.endangle:
s.verts.ensure_lookup_table()
s.edges.new((s.verts[-2],s.verts[-1]))
angle+=self.increment
s.to_mesh(msh)
obj.data.update()
return {'FINISHED'}
class KTXObjLib(bpy.types.Operator):
bl_idname="wm.ktx_objlib"
bl_label="KTX Object Library"
bl_options={'REGISTER','UNDO'}
def mode_options(self,context):
import os
filepath = os.path.join(os.path.sys.path[1],'KTX_Objects.blend')
with bpy.data.libraries.load(filepath, link=True) as (data_from, data_to):
return [(ob,ob,"") for ob in data_from.objects]
count=bpy.props.EnumProperty(items=mode_options,
description="KTX Object Library",
name="Objects found in Library")
def execute(self,context):
import os
scn = bpy.context.scene
filepath = os.path.join(os.path.sys.path[1],'KTX_Objects.blend')
with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith(self.count)]
for obj in data_to.objects:
if obj is not None:
scn.objects.link(obj)
return {'FINISHED'}
class KTXBottle(bpy.types.Operator):
bl_idname="wm.ktx_bottle_1"
bl_label="KTX Create a Bottle and Cap"
bl_options={'REGISTER','UNDO', 'PRESET'}
expert_mode = bpy.props.BoolProperty(name="Expert Mode",
description="Tweak bottle/cap shape On/Off",
default=False)
hide_bottle = bpy.props.BoolProperty(name="Hide Bottle",
description="Hide Bottle On/Off",
default=False)
hide_cap = bpy.props.BoolProperty(name="Hide Cap",
description="Hide Cap On/Off",
default=False)
comp_bot = bpy.props.BoolProperty(name="Generate Complete Bottle",
description="Generate Complete Bottle or only Threads",
default=True)
overall_scale = bpy.props.FloatProperty(name="Overall Scale",
description="Overall Scale",
default=0.1)
v = bpy.props.IntProperty(name="Vertices",
description="Cylinder divided into this many Vertices",
default=12,min=3,max=24)
thread_height = bpy.props.FloatProperty(name="Thread Height",
description="Thread Height",
default=1.0)
thread_steps = bpy.props.IntProperty(name="Thread Steps",
description="Thread Steps",
default=28)
neck_diameter = bpy.props.FloatProperty(name="Neck Diameter",
description="Neck Diameter",
default=2.0)
trap = bpy.props.FloatProperty(name="Trapezium Thread",
description="Trapezium Thread",
default=0.15)
depth = bpy.props.FloatProperty(name="Depth",
description="Depth",
default=0.44)
eoff_onoff = bpy.props.BoolProperty(name="Enlarge Cap",
description="Enlarge Cap (to prevent intersection between threads",
default=False)
eoffset = bpy.props.IntProperty(name="Enlarge Cap Percentage",
description="Percentage of Neck Diameter",
default=1)
skip_onoff = bpy.props.BoolProperty(name="Step Thread Bottle",
description="Step Thread Bottle",
default=False)
soffset = bpy.props.IntProperty(name="Skip Offset Bottle",
description="Skip Offset Bottle",
default=4)
sckip_onoff = bpy.props.BoolProperty(name="Step Thread Cap",
description="Step Thread Cap",
default=False)
scoffset = bpy.props.IntProperty(name="Skip Offset Cap",
description="Skip Offset Cap",
default=4)
remdoub_onoff = bpy.props.BoolProperty(name="Remove Doubles",
description="Remove Doubles On/Off",
default=True)
doubles = bpy.props.FloatProperty(name="Merge Verts Dist",
description="Merge Verts Dist",
default=0.01)
smooth_onoff = bpy.props.BoolProperty(name="Smoothing",
description="Smoothing Doubles On/Off",
default=False)
subs_onoff = bpy.props.BoolProperty(name="SubSurf",
description="SubSurf On/Off",
default=True)
nl = bpy.props.FloatProperty(name="Neck Length",
description="Neck Length",
default=0.1)
x1 = bpy.props.FloatProperty(name="x1",
description="x1",
default=4.0,)
z1 = bpy.props.FloatProperty(name="z1",
description="z1",
default=2.09)
x2 = bpy.props.FloatProperty(name="x2",
description="x2",
default=4.0)
z2 = bpy.props.FloatProperty(name="z2",
description="z2",
default=5.0)
x3 = bpy.props.FloatProperty(name="x3",
description="x3",
default=3.4)
z3 = bpy.props.FloatProperty(name="z3",
description="z3",
default=15.0)
x4 = bpy.props.FloatProperty(name="x4",
description="x4",
default=2.0)
z4 = bpy.props.FloatProperty(name="z4",
description="z4",
default=15.0)
x5 = bpy.props.FloatProperty(name="x5",
description="x5",
default=1.2)
z5 = bpy.props.FloatProperty(name="z5",
description="z5",
default=15.0)
tl = bpy.props.FloatProperty(name="Top Length",
description="Top Length",
default=0.1)
tt = bpy.props.FloatProperty(name="Top Tickness",
description="Top Tickness",
default=0.45)
x6 = bpy.props.FloatProperty(name="x6",
description="x6",
default=3.14)
z6 = bpy.props.FloatProperty(name="z6",
description="z6",
default=2.0)
x7 = bpy.props.FloatProperty(name="x7",
description="x7",
default=2.7)
z7 = bpy.props.FloatProperty(name="z7",
description="z7",
default=12.0)
x8 = bpy.props.FloatProperty(name="x8",
description="x8",
default=1.9)
z8 = bpy.props.FloatProperty(name="z8",
description="z8",
default=14.0)
x9 = bpy.props.FloatProperty(name="x9",
description="x9",
default=1.9)
z9 = bpy.props.FloatProperty(name="z9",
description="z9",
default=4.5)
x10 = bpy.props.FloatProperty(name="x10",
description="x10",
default=0.1)
z10 = bpy.props.FloatProperty(name="z10",
description="z10",
default=5.0)
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, 'comp_bot')
if self.comp_bot:
col.prop(self, 'expert_mode')
col.separator()
col.prop(self, 'hide_bottle')
col.prop(self, 'hide_cap')
col.separator()
col.prop(self, 'overall_scale')
col.prop(self, 'v')
col.prop(self, 'thread_height')
col.prop(self, 'thread_steps')
col.prop(self, 'neck_diameter')
col.prop(self, 'trap')
col.prop(self, 'depth')
if self.comp_bot:
col.prop(self, 'nl')
col.prop(self, 'tl')
col.prop(self, 'tt')
col.separator()
col.prop(self, 'eoff_onoff')
if self.eoff_onoff:
col.prop(self, 'eoffset')
col.separator()
col.prop(self, 'skip_onoff')
if self.skip_onoff:
col.prop(self, 'soffset')
col.prop(self, 'sckip_onoff')
if self.sckip_onoff:
col.prop(self, 'scoffset')
col.separator()
col.prop(self, 'remdoub_onoff')
if self.remdoub_onoff:
col.prop(self, 'doubles')
col.separator()
col.prop(self, 'smooth_onoff')
col.prop(self, 'subs_onoff')
if self.expert_mode and self.comp_bot:
col.label(text='Bottle Outside Shape')
col.prop(self,'x1')
col.prop(self,'z1')
col.prop(self,'x2')
col.prop(self,'z2')
col.prop(self,'x3')
col.prop(self,'z3')
col.prop(self,'x4')
col.prop(self,'z4')
col.prop(self,'x5')
col.prop(self,'z5')
col.separator()
col.label(text='Bottle Inside Shape')
col.prop(self,'x6')
col.prop(self,'z6')
col.prop(self,'x7')
col.prop(self,'z7')
col.prop(self,'x8')
col.prop(self,'z8')
col.separator()
col.label(text='Cap Shape')
col.prop(self,'x9')
col.prop(self,'z9')
col.prop(self,'x10')
col.prop(self,'z10')
def execute(self,context):
import math, bmesh
from math import radians
#------midden
bm=bmesh.new()
v1=bm.verts.new((self.neck_diameter, 0.0, self.thread_height))
v2=bm.verts.new((self.neck_diameter, 0.0, 0.0))
bm.edges.new((v1,v2))
bmesh.ops.spin(bm,geom=bm.verts[:]+bm.edges[:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,self.thread_height/self.v),angle=self.thread_steps * ((2.0 * math.pi)/self.v),steps=self.thread_steps,use_duplicate=0)
bm.faces.ensure_lookup_table()
gg=bm.faces[:]
if self.skip_onoff:
for i in range(0,self.thread_steps,self.soffset):
gg.remove(bm.faces[i])
bmesh.ops.inset_region(bm,faces=gg,thickness=self.thread_height/5.0,depth=0.0,use_boundary=1,use_even_offset=1,use_relative_offset=0,use_interpolate=0)
bmesh.ops.inset_region(bm,faces=gg,thickness=self.trap,depth=self.depth,use_boundary=0,use_even_offset=1,use_relative_offset=0,use_interpolate=0)
#----------Bottom
v1=bm.verts.new((self.neck_diameter, 0.0, 0.0))
bmesh.ops.spin(bm,geom=[v1],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,self.thread_height/self.v),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
# bm.edges.ensure_lookup_table()
ret=bmesh.ops.extrude_edge_only(bm,edges=bm.edges[-self.v:])
geom_new = ret["geom"]
del ret
verts_new=[ele for ele in geom_new if isinstance(ele, bmesh.types.BMVert)]
bmesh.ops.translate(bm,verts=verts_new,vec=(0.0,0.0,-0.5))
bmesh.ops.scale(bm,verts=verts_new,vec=(1.0,1.0,0.0))
#---------BottleBody
if self.comp_bot:
v1=bm.verts.new((self.neck_diameter, 0.0, 0.0))
v2=bm.verts.new((self.neck_diameter, 0.0, -self.nl))
v3=bm.verts.new((self.neck_diameter+self.x1, 0.0, -self.z1))
v4=bm.verts.new((self.neck_diameter+self.x2, 0.0, -self.z2))
v5=bm.verts.new((self.neck_diameter+self.x3, 0.0, -self.z3))
v6=bm.verts.new((self.neck_diameter+self.x4, 0.0, -self.z4))
v7=bm.verts.new((self.neck_diameter+self.x5, 0.0, -self.z5))
v8=bm.verts.new((0.0, 0.0, -self.z5))
bm.edges.new((v1,v2))
bm.edges.new((v2,v3))
bm.edges.new((v3,v4))
bm.edges.new((v4,v5))
bm.edges.new((v5,v6))
bm.edges.new((v6,v7))
bm.edges.new((v7,v8))
bmesh.ops.spin(bm,geom=bm.verts[-8:]+bm.edges[-7:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,0.0),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
#----------Top
aa=((self.thread_height/self.v)*self.thread_steps)+self.thread_height
bb=self.thread_steps%self.v
v1=bm.verts.new((self.neck_diameter, 0.0, aa))
bmesh.ops.rotate(bm,verts=[v1],cent=(0.0,0.0,0.0),matrix=mathutils.Matrix.Rotation(((2*math.pi)/self.v)*bb,3,'Z'))
bmesh.ops.spin(bm,geom=[v1],axis=(0.0,0.0,-1.0),cent=(0,0,0),dvec=(0,0,-self.thread_height/self.v),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
# bm.edges.ensure_lookup_table()
ret=bmesh.ops.extrude_edge_only(bm,edges=bm.edges[-self.v:])
geom_new = ret["geom"]
del ret
verts_new=[ele for ele in geom_new if isinstance(ele, bmesh.types.BMVert)]
bmesh.ops.scale(bm,verts=verts_new,vec=(1.0,1.0,0.0))
ret_boven=bmesh.ops.translate(bm,verts=verts_new,vec=(0.0,0.0,aa))
#---------BottleInside
if self.comp_bot:
v1=bm.verts.new((self.neck_diameter, 0.0, aa))
v2=bm.verts.new((self.neck_diameter, 0.0, aa+self.tl))
v3=bm.verts.new((self.neck_diameter-self.tt, 0.0, aa+self.tl))
v3a=bm.verts.new((self.neck_diameter-self.tt, 0.0, aa-self.tl))
v4=bm.verts.new((self.neck_diameter-self.tt, 0.0, -1.0))
v4a=bm.verts.new((self.neck_diameter-self.tt, 0.0, -1.2))
v5=bm.verts.new((self.neck_diameter+self.x6, 0.0, -self.z6))
v6=bm.verts.new((self.neck_diameter+self.x7, 0.0, -self.z7))
v7=bm.verts.new((self.neck_diameter+self.x8, 0.0, -self.z8))
v8=bm.verts.new((0.0, 0.0, -self.z8))
bm.edges.new((v8,v7))
bm.edges.new((v7,v6))
bm.edges.new((v6,v5))
bm.edges.new((v5,v4a))
bm.edges.new((v4a,v4))
bm.edges.new((v4,v3a))
bm.edges.new((v3a,v3))
bm.edges.new((v3,v2))
bm.edges.new((v2,v1))
bmesh.ops.spin(bm,geom=bm.verts[-10:]+bm.edges[-9:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,0.0),angle=(2.0*math.pi),steps=self.v,use_duplicate=0)
#---------Generate Bottle
if self.remdoub_onoff and self.doubles != 0.0:
bmesh.ops.remove_doubles(bm, verts=bm.verts[:], dist=self.doubles)
bmesh.ops.scale(bm,vec=(self.overall_scale,self.overall_scale,self.overall_scale),verts=bm.verts[:])
me = bpy.data.meshes.new("Bottle_Mesh")
bm.to_mesh(me)
bm.free()
if self.smooth_onoff:
pols = me.polygons
for p in pols:
p.use_smooth = True
scene = bpy.context.scene
obj = bpy.data.objects.new("Bottle", me)
obj.location = bpy.context.scene.cursor_location
obj.location.z = (obj.location.z + self.z5)*self.overall_scale
scene.objects.link(obj)
if self.subs_onoff:
obj.modifiers.new("subd", type='SUBSURF')
obj.modifiers['subd'].levels = 3
bpy.context.scene.objects.active = obj
if self.hide_bottle:
bpy.context.object.hide = True
else:
bpy.context.object.hide = False
#------Dop/Cap
#------Draad/Thread
if self.eoff_onoff:
ca=(self.neck_diameter/100.0)*self.eoffset
else:
ca=0.0
bm=bmesh.new()
v1=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, self.thread_height))
v2=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, 0.0))
bm.edges.new((v2,v1))
bmesh.ops.spin(bm,geom=bm.verts[:]+bm.edges[:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,self.thread_height/self.v),angle=self.thread_steps * ((2.0 * math.pi)/self.v),steps=self.thread_steps,use_duplicate=0)
bm.faces.ensure_lookup_table()
gg=bm.faces[:]
if self.sckip_onoff:
for i in range(0,self.thread_steps,self.scoffset):
gg.remove(bm.faces[i])
bmesh.ops.inset_region(bm,faces=gg,thickness=self.thread_height/5.0,depth=0.0,use_boundary=1,use_even_offset=1,use_relative_offset=0,use_interpolate=0)
bmesh.ops.inset_region(bm,faces=gg,thickness=self.trap,depth=self.depth,use_boundary=0,use_even_offset=1,use_relative_offset=0,use_interpolate=0)
#----------Bottom
v1=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, 0.0))
bmesh.ops.spin(bm,geom=[v1],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,self.thread_height/self.v),angle=(2.0*math.pi),steps=self.v,use_duplicate=0)
# bm.edges.ensure_lookup_table()
ret=bmesh.ops.extrude_edge_only(bm,edges=bm.edges[-self.v:])
geom_new = ret["geom"]
del ret
verts_new=[ele for ele in geom_new if isinstance(ele, bmesh.types.BMVert)]
bmesh.ops.translate(bm,verts=verts_new,vec=(0.0,0.0,-0.5))
bmesh.ops.scale(bm,verts=verts_new,vec=(1.0,1.0,0.0))
#----------Top
aa=((self.thread_height/self.v)*self.thread_steps)+self.thread_height
bb=self.thread_steps%self.v
v1=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, aa))
bmesh.ops.rotate(bm,verts=[v1],cent=(0.0,0.0,0.0),matrix=mathutils.Matrix.Rotation(((2*math.pi)/self.v)*bb,3,'Z'))
bmesh.ops.spin(bm,geom=[v1],axis=(0.0,0.0,-1.0),cent=(0,0,0),dvec=(0,0,-self.thread_height/self.v),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
# bm.edges.ensure_lookup_table()
ret=bmesh.ops.extrude_edge_only(bm,edges=bm.edges[-self.v:])
geom_new = ret["geom"]
del ret
verts_new=[ele for ele in geom_new if isinstance(ele, bmesh.types.BMVert)]
bmesh.ops.scale(bm,verts=verts_new,vec=(1.0,1.0,0.0))
ret_boven=bmesh.ops.translate(bm,verts=verts_new,vec=(0.0,0.0,aa))
#---------Cap Inside
if self.comp_bot:
v1=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, aa))
v2=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, aa+self.tl))
v3=bm.verts.new((self.neck_diameter+self.depth-self.tt+ca, 0.0, aa+self.tl))
v4=bm.verts.new((0.0, 0.0, aa+self.tl))
bm.edges.new((v4,v3))
bm.edges.new((v3,v2))
bm.edges.new((v2,v1))
bmesh.ops.spin(bm,geom=bm.verts[-4:]+bm.edges[-3:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,0.0),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
#---------CapBody
v1=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, 0.0))
v2=bm.verts.new((self.neck_diameter+self.depth+ca, 0.0, -self.nl))
v3=bm.verts.new((self.neck_diameter+self.depth+self.tt+ca, 0.0, self.nl))
v4=bm.verts.new((self.neck_diameter+self.depth+self.x9+ca, 0.0, self.z9))
v5=bm.verts.new((self.neck_diameter+self.depth+self.x10+ca, 0.0, self.z10))
v6=bm.verts.new((0.0, 0.0, self.z10))
bm.edges.new((v6,v5))
bm.edges.new((v5,v4))
bm.edges.new((v4,v3))
bm.edges.new((v3,v2))
bm.edges.new((v2,v1))
bmesh.ops.spin(bm,geom=bm.verts[-6:]+bm.edges[-5:],axis=(0.0,0.0,1.0),cent=(0,0,0),dvec=(0,0,0.0),angle=(2.0 * math.pi),steps=self.v,use_duplicate=0)
#---------Generate Cap
if self.remdoub_onoff and self.doubles != 0.0:
bmesh.ops.remove_doubles(bm, verts=bm.verts[:], dist=self.doubles)
bmesh.ops.scale(bm,vec=(self.overall_scale,self.overall_scale,self.overall_scale),verts=bm.verts[:])
me = bpy.data.meshes.new("Cap_Mesh")
bm.to_mesh(me)
bm.free()
if self.smooth_onoff:
pols = me.polygons
for p in pols:
p.use_smooth = True
scene = bpy.context.scene
obj = bpy.data.objects.new("Cap", me)
obj.location = bpy.context.scene.cursor_location
obj.location.z = (obj.location.z + self.thread_height/2 + self.z5)*self.overall_scale
scene.objects.link(obj)
if self.subs_onoff:
obj.modifiers.new("subd", type='SUBSURF')
obj.modifiers['subd'].levels = 3
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.editmode_toggle()
if self.hide_cap:
bpy.context.object.hide = True
else:
bpy.context.object.hide = False
return {'FINISHED'}
class KTXPanel( bpy.types.Panel ):
bl_label = "KosteX Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Custom"
bl_context = "objectmode"
def draw( self, context ):
scn = context.scene
layout = self.layout
new_col = self.layout.column
new_col().column().operator("wm.ktx_bottle_1")
new_col().column().operator("wm.ktx_tri_tangle")
new_col().column().operator("wm.ktx_function")
new_col().column().operator("wm.ktx_cylinders")
new_col().column().operator("wm.ktx_cylinder_grid")
new_col().column().operator("wm.ktx_object_grid")
new_col().column().operator("wm.ktx_polar_array")
new_col().column().operator("wm.ktx_spiral_circles")
new_col().column().operator("wm.ktx_spirograph_2")
new_col().column().operator("wm.ktx_add_random_cubes")
new_col().column().operator("wm.ktx_add_random_copies")
new_col().column().separator()
new_col().column().operator("wm.ktx_objlib")
new_col().column().separator()
new_col().column().operator("wm.ktx_erase_all_materials")
new_col().column().operator("wm.ktx_erase_unused_textures")
new_col().column().operator("wm.ktx_erase_unused_palettes")
new_col().column().separator()
new_col().column().operator("wm.ktx_add_subsurf_creases")
new_col().column().operator("wm.ktx_polish")
new_col().column().separator()
new_col().column().operator("wm.ktx_assign_materials")
new_col().column().operator("wm.ktx_assign_random_diffuse_colors")
new_col().column().operator("wm.ktx_add_glossy_mix_shaders")
new_col().column().operator("wm.ktx_set_viewport_color")
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| JT-a/blenderpython279 | scripts/addons_extern/KTX_Tools.py | KTX_Tools.py | py | 53,694 | python | en | code | 5 | github-code | 50 |
3712147461 | import numpy as np
from matplotlib import pyplot as plt
from PIL import Image as im
# reading the image and store it in img object
img = im.open('c:\\Users\\User\\Downloads\\black.jpg')
#disply the image by img object
img.show()
#convert image to array
img_to_array = np.asarray(img)
#convert array to flat array
img_to_flatarray=img_to_array.flatten()
#print(img_to_flatarray)
#count no of occurances of each value in array
array_hist = np.bincount(img_to_flatarray, minlength=256)
#print(array_hist)
#find the total no of pixels
total_pixel = np.sum(array_hist)
#print(total_pixel)
# normalizing the values by dividing with total no. of pixels
array_hist = array_hist/total_pixel
# finding the cumulative sum
cumulative_array = np.cumsum(array_hist)
#print(cumulative_array)
# multiply by maximum grey level and round off the values by taking its floor
transform = np.floor(255 * cumulative_array).astype(np.uint8)
#print(transform)
#convert 1D array to 1D list
list_image = list(img_to_flatarray )
#print(list_image)
#we transform pixel values so that we can eqalise
equalise_list = [transform[k] for k in list_image]
# reshaping the array and write into another object
equalise_img_array = np.reshape(np.asarray(equalise_list), img_to_array.shape)
# convert the array to image
final_img=im.fromarray(equalise_img_array)
# we now save the file in the location we want
final_img.save('c:\\Users\\User\\Downloads\\final_image.jpg')
#calculating histogram of equalized image
#count no of occurances of each value in array
equalized_histogram_array = np.bincount(equalise_img_array.flatten(), minlength=256)
#find the total no of pixels
total2_pixels = np.sum(equalized_histogram_array)
# normalizing the values by dividing with total no. of pixels
norm_values = equalized_histogram_array/total2_pixels
# finding the cumulative sum
cum_sum2 = np.cumsum(norm_values)
#now we plot the histogram before and after eqqualization
#plt histogram before equalization
plt.figure()
plt.plot(array_hist)
plt.title("BEFORE EQUALIZATION")
plt.xlabel('pixel intensity ')
plt.ylabel('distribution')
#plot histogram after equalization
plt.figure()
plt.plot(norm_values)
plt.title("AFTER EQUALIZATION")
plt.xlabel('pixel intensity')
plt.ylabel('distribution')
imgfinal = im.open('c:\\Users\\User\\Downloads\\final_image.jpg')
#showing the final image after the histogram equalization
imgfinal.show()
| gupta06rashika/Histogram-Equalization-algorithm-for-a-given-gray-scale-image | hist.py | hist.py | py | 2,515 | python | en | code | 1 | github-code | 50 |
11906126869 | """
This service makes AE.Cache use a memcached backend rather than disk
for the component cache.
To turn this on, define memcacheCacheBackend to be a list of ip
address of memcache servers. If it is None, this will fall back to
the usual skunk cache.
"""
import cPickle
import memcache
import AE.Cache
from Logger import logException, ERROR
import SkunkWeb.Configuration as C
C.mergeDefaults(memcacheCacheBackend=None,
memcachePathPrefix='component_')
_clients={}
def _get_memcache_client():
global _clients
servers=C.memcacheCacheBackend
if not servers:
return None
servers.sort()
servers=tuple(servers)
try:
return _clients[servers]
except KeyError:
client=memcache.Client(servers, False)
_clients[servers]=client
return client
def store_component(path, value, svr):
client=_get_memcache_client()
if not client:
return AE.Cache._disk_store(path, value, svr)
fullpath=C.memcachePathPrefix+path
pickled=cPickle.dumps(value, cPickle.HIGHEST_PROTOCOL)
exp_time=value.get('exp_time', 0)
try:
res=client.set(fullpath, pickled, exp_time)
except:
ERROR("exception storing component at path %s" % fullpath)
logException()
def load_component(path, svr):
client=_get_memcache_client()
if not client:
return AE.Cache._disk_load(path, svr)
fullpath=C.memcachePathPrefix+path
try:
data=client.get(fullpath)
except:
ERROR("exception reaching memcached")
else:
if data is not None:
return cPickle.loads(data)
AE.Cache._disk_load=AE.Cache._loadCachedComponent
AE.Cache._disk_store=AE.Cache._storeCachedComponent
AE.Cache._loadCachedComponent=load_component
AE.Cache._storeCachedComponent=store_component
| BackupTheBerlios/skunkweb-svn | tags/SKUNKWEB_RELEASE_3_4_4/SkunkWeb/Services/aememcache.py | aememcache.py | py | 1,828 | python | en | code | 1 | github-code | 50 |
29976365994 | from __future__ import print_function
from pysnmp.entity.rfc3413.oneliner import cmdgen
from config import SNMP_DETAILS
def collect_snmp_data(hostname, oid):
# cmdGen = cmdgen.CommandGenerator()
snmp_target = (hostname, SNMP_DETAILS['port'])
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = \
cmd_gen.getCmd(cmdgen.CommunityData(SNMP_DETAILS['community_string']), cmdgen.UdpTransportTarget(snmp_target),
oid, lookupNames=True, lookupValues=True)
if not error_detected:
return snmp_data[0].prettyPrint()
else:
# print('ERROR DETECTED: ')
# print(' %-16s %-60s' % ('error_message', error_detected))
# print(' %-16s %-60s' % ('error_status', error_status))
# print(' %-16s %-60s' % ('error_index', error_index))
return ''
| rfdmeshkath/dcim_tool | networking_scripts/snmp.py | snmp.py | py | 879 | python | en | code | 0 | github-code | 50 |
24161153551 | from mapeventApp.models import AddEvent,Staff
from django.shortcuts import redirect, render
from django.core.paginator import Paginator
import datetime
def map(request):
# if request.user.is_anonymous:
# return redirect ("/login")
# date=datetime.date.today()
# maping = AddEvent.objects.filter(fromdate__gte=date).all().order_by('fromdate')
# staff = Staff.objects.all()
# pagination = Paginator(maping,2)
# page_number = request.GET.get('page')
# try:
# paging = pagination.get_page(page_number)
# except PageNotAnInteger:
# paging = pagination.get_page(1)
# except EmptyPage:
# paging = pagination.get_page(pagination.num_pages)
# maping1 = {'mapings':maping,'staff':staff,'paging':paging}
#if request.method =="POST":
if 'search' in request.POST:
search= request.POST.get('search')
events = AddEvent.objects.filter(event__icontains = search).all()
location = AddEvent.objects.filter(location__icontains = search).all()
return render(request,'searchDetail.html',{'events':events,'searches':search,'locations':location})
if 'lat' in request.POST:
lang = request.POST.get('lang')
lat = request.POST.get('lat')
return render(request,'map.html',{'lat':lat,'lang':lang,'mapings':maping,'paging':paging})
if 'active_event' in request.POST:
active_event = request.POST.get('active_event')
eventsbook = AddEvent.objects.filter(id= active_event).all()
return render(request,'eventForm1.html',{'bookevents':eventsbook})
if 'event_id' in request.POST:
event_id = request.POST.get('event_id')
eventsinfo = AddEvent.objects.filter(id= event_id).all()
return render(request,'eventdetail.html',{'eventinfo':eventsinfo})
return render (request,'map.html',maping1)
| Vipul-Patilw/In-Progress-Mapevent-Class-based-Django | mapeventProjectClassBased/mapeventApp/home.py | home.py | py | 1,753 | python | en | code | 0 | github-code | 50 |
25123689269 | #!/bin/env python3
__author__ = "Richard Pöttler"
__copyright__ = "Copyright (c) 2022 Richard Pöttler"
__license__ = "MIT"
__email__ = "richard.poettler@gmail.com"
from argparse import ArgumentParser
from configparser import ConfigParser, ExtendedInterpolation
from json import loads
from logging import error, info, debug
from pyproj import Transformer
from scipy.interpolate import griddata
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import logging
import math
import numpy as np
import os
import pcraster as pcr
import shapefile
import xarray as xr
from units import MM, kelvin_to_celsius
from os.path import isfile
from os import makedirs, remove
# import gdal
DEFAULT_MAX_STREAMORDER = 4
"""Mapping from log level strings to logging levels"""
LOG_LEVEL_MAP = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
# Missing values for PCRaster maps.
PCR_MISSING_VALUE = 9999 # originally was 10
PCR_MISSING_WEATHER_VALUE = -9999
def write_pcr(pcrmap, outfile):
"""Write pcr map to file.
Wraper to delete the file beforehand, or else it wont be written by pcr.report"""
if isfile(outfile):
remove(outfile)
pcr.report(pcrmap, outfile)
# generates a map of all the cell centers of the rasters
def init_cellcenter(rows, cols, cell_size, xmin, ymin):
cell_centers = np.zeros((rows, cols, 2))
for i in range(0, rows):
for j in range(0, cols):
cell_centers[i][j][0] = xmin + cell_size / 2.0 + cell_size * j
cell_centers[i][j][1] = ymin - cell_size / 2.0 - cell_size * i
return cell_centers
def gen_inpoly(shapefile, coord_map, rows, cols):
polygon = Polygon(shapefile["coordinates"][0])
raster = np.zeros((rows, cols))
i = 0
for i in range(0, rows):
j = 0
for j in range(0, cols):
point = Point(coord_map[i][j][0], coord_map[i][j][1])
if polygon.contains(point):
raster[i][j] = 1
return raster
def generate_river_points(shapefile, cell_size):
# read shape features
points_2D = []
for i in range(0, len(shapefile.shapes())):
feature = shapefile.shapes()[i].__geo_interface__["coordinates"]
# resamples to 80% of the raster size
d = np.diff(feature, axis=0)
segdists = np.hypot(d[:, 0], d[:, 1])
divisions = np.ceil(segdists / (cell_size * 0.8))
points_2D.append((feature[0][0], feature[0][1]))
for j in range(0, len(feature) - 1):
x1 = feature[j][0]
x2 = feature[j + 1][0]
y1 = feature[j][1]
y2 = feature[j + 1][1]
n = int(divisions[j])
for i in range(1, n):
a = float(i) / n
x = (1 - a) * x1 + a * x2
y = (1 - a) * y1 + a * y2
points_2D.append((x, y))
points_2D.append((x2, y2))
points_2D = np.asarray(points_2D)
return points_2D
def burn_in_river(cell_centers, rows, cols, riv_points):
river_array = np.empty((rows, cols))
river_array[:] = np.NaN
for point in riv_points:
i_x, i_y = find_nearest_neighbour(cell_centers, point)
river_array[i_x][i_y] = 1
return river_array
def find_nearest_neighbour(centers, point):
"""Finds the indexes of the nearest neighbour of point in centers."""
distances = np.sqrt(
(centers[:, :, 0] - point[0]) ** 2.0 + (centers[:, :, 1] - point[1]) ** 2.0
)
min_distance = np.amin(distances)
index = np.where(distances == min_distance)
return index[0][0], index[1][0]
def gen_river_connectivity(river_array, rows, cols):
river_array_corrected = np.copy(river_array)
for i in range(0, rows):
for j in range(0, cols):
if river_array[i][j] == 1:
if river_array[i - 1][j - 1] == 1 or river_array[i - 1][j + 1] == 1:
if math.isnan(river_array[i - 1][j]) == True:
river_array_corrected[i - 1][j] = 1
return river_array_corrected
def read_soil_to_dict(soils_folder):
for subdir, dirs, files in os.walk(soils_folder):
for file in files:
filepath = os.path.join(subdir, file)
map = pcr.readmap(filepath)
map_np = pcr.pcr2numpy(map, 0.0)
strings = filepath.split("/")
mapstring = strings[-1]
namestring = mapstring[:2]
depthstring = strings[-2]
dictionary[depthstring][namestring] = map_np
# Populate variables
# Populate variables uniform
i = 0
for key in dictionary:
# Create outermost layer
Cli = dictionary[key]["CL"] / 1000.0
Cli[Cli == 0] = np.median(Cli[Cli > 0])
SAi = dictionary[key]["SA"] / 1000.0
SAi[SAi == 0] = np.median(SAi[SAi > 0])
SIi = dictionary[key]["SI"] / 1000.0
SIi[SIi == 0] = np.median(SIi[SIi > 0])
BDi = dictionary[key]["BD"] * 0.01
BDi[BDi == 0] = np.median(BDi[BDi > 0])
OCi = dictionary[key]["OC"] / 10000.0
OCi[OCi == 0] = np.median(OCi[OCi > 0])
thetaRi = (
0.09878
+ 0.002127 * Cli
- (8.366 * 10**-4) * SIi
- 0.0767 / (OCi + 1)
+ SIi * Cli * (3.853 * 10**-5)
+ 0.00233 * Cli / (OCi + 1)
+ 9.498 * 10**-4 * SIi / (OCi + 1)
)
thetaSi = (
0.6819
+ 0.06480 / (OCi + 1)
- 0.119 * BDi**2.0
- 0.02668
+ (8.031 * 10**-4) * SIi
+ 0.02312 * BDi**2.0 / (OCi + 1.0)
+ Cli * 0.001489
+ 0.01908 * BDi**2.0
- 0.001109 * Cli
- (2.315 * 10**-5) * SIi * Cli
- 0.001197 * SIi * BDi**2.0
- (1.068 * 10**-4) * Cli * BDi**2.0
)
ksat_veri = 240.19 * np.exp(
19.52348 * thetaSi
- 8.96847
- 0.028212 * Cli
+ 1.8107 * 10**-4 * SAi**2.0
- 9.4125 * 10**-3 * Cli**2.0
- 8.395215 * thetaSi**2.0
+ 0.077718 * SAi * thetaSi
- 0.00298 * SAi**2.0 * thetaSi**2.0
- 0.019492 * Cli**2 * thetaSi**2.0
+ 1.73 * 10**-5 * SAi**2.0 * Cli
+ 0.02733 * Cli**2 * thetaSi
+ 0.001434 * SAi**2.0 * thetaSi
- 3.5 * 10**-6 * Cli**2.0 * SAi
)
lambda_i = np.exp(
-0.784
+ 0.018 * SAi
- 1.062 * thetaSi
- SAi**2.0 * 5 * 10**-5
- 0.003 * Cli**2.0
+ 1.111 * thetaSi**2.0
- 0.031 * SAi * thetaSi
+ 3.10**-4 * SAi**2.0 * thetaSi**2.0
- 0.006 * Cli**2.0 * thetaSi**2.0
- 2 * 10**-6 * SAi**2.0 * Cli
+ 0.008 * Cli**2.0 * thetaSi
- 0.007 * thetaSi**2.0 * Cli
)
ci = 3 + 2 / lambda_i
if i == 0:
thetaR = np.copy(thetaRi)
thetaS = np.copy(thetaSi)
ksat_ver = np.copy(ksat_veri)
c = np.copy(ci)
if i > 0:
thetaR = np.dstack((thetaR, thetaRi))
thetaS = np.dstack((thetaS, thetaSi))
ksat_ver = np.dstack((ksat_ver, ksat_veri))
c = np.dstack((c, ci))
i = i + 1
return thetaS, thetaR, c, ksat_ver
def create_gauges_map(config, rows, cols, cell_centers):
"""Creates the gauges map"""
info("Generate gauges map")
INITIAL_VALUE = -9999
gauges_array = np.empty((rows, cols))
gauges_array[:] = INITIAL_VALUE
counter = 1
for name, coords in config["Gauges"].items():
point = loads(coords)
i_x, i_y = find_nearest_neighbour(cell_centers, point)
if i_x == 0 or i_y == 0 or i_x == (rows - 1) or i_y == (cols - 1):
info(f"{name} is placed at the border of the map")
if gauges_array[i_x][i_y] != INITIAL_VALUE:
info(
f"Skipping {name} because it would overwrite id {gauges_array[i_x][i_y]}"
)
else:
gauges_array[i_x][i_y] = counter
info(f"Wrote {name} with id {counter}")
counter += 1
gauges_pcr = pcr.numpy2pcr(pcr.Scalar, gauges_array, INITIAL_VALUE)
write_pcr(gauges_pcr, config["Outfiles"]["gauges_map"])
def create_catchment_mask(config, rows, cols, cell_centers):
"""Crete the catchment mask"""
info("Create catchment mask")
# reads the catchment shapefile
shape = shapefile.Reader(config["Shapefiles"]["shape_catchment"])
feature = shape.shapeRecords()[0]
# contains shape geometry
first = feature.shape.__geo_interface__
# creates a numpy array of the mask
raster = gen_inpoly(first, cell_centers, rows, cols)
# write raster out
mask_raster = pcr.numpy2pcr(pcr.Ordinal, raster, PCR_MISSING_VALUE)
write_pcr(mask_raster, config["Outfiles"]["catchment_mask"])
return mask_raster
def create_river_burn(config, rows, cols, cell_size, cell_centers):
"""Create river burnin map"""
info("Burn in river")
riv_shape = shapefile.Reader(config["Shapefiles"]["rivershape"])
riv_points = generate_river_points(riv_shape, cell_size)
riv_array = burn_in_river(cell_centers, rows, cols, riv_points)
riv_corrected = gen_river_connectivity(riv_array, rows, cols)
## turn off correction
riv_corrected = riv_array
riv_pcr = pcr.numpy2pcr(pcr.Ordinal, riv_corrected, PCR_MISSING_VALUE)
write_pcr(riv_pcr, config["Outfiles"]["river_burn"])
return riv_corrected, riv_pcr
def create_ldd_map(config, dem, riv_corrected):
"""Create local drainage direction map"""
info("Create local drainage direction")
# removing nans
riv_where_nan = np.isnan(riv_corrected)
riv_corrected[riv_where_nan] = 0.0
riv_pcr_no_nan = pcr.numpy2pcr(pcr.Scalar, riv_corrected, PCR_MISSING_VALUE)
# determine regional slope where the river should run
# ldddem = pcr.ifthen(pcr.boolean(mask_raster), dem)
ldddem = pcr.ifthenelse(riv_pcr_no_nan >= 1.0, dem - 1000.0, dem)
ldd = pcr.lddcreate(ldddem, 10.0e35, 10.0e35, 10.0e35, 10.0e35)
lddrep = pcr.lddrepair(ldd)
# lddmasked = pcr.ifthen(pcr.boolean(mask_raster), lddrep)
write_pcr(lddrep, config["Outfiles"]["ldd_map"])
##riv_pcr = pcr.ifthen(pcr.scalar(mask_raster) >= 1, riv_pcr)
##disttocatch = pcr.spread(pcr.nominal(mask_raster), 0.0, 1.0)
##demmax = pcr.ifthenelse(pcr.scalar(mask_raster) >= 1.0,demmax,demmax + (pcr.celllength() * 100.0) / disttocatch,)
return ldd
def create_streamorder(config, rows, cols, mask_raster, ldd):
"""Create streamorder map"""
info("Create stream order map")
# manually adjust maximum streamorder
stro = pcr.streamorder(ldd)
stro_scalar = pcr.scalar(stro)
stro_np = pcr.pcr2numpy(stro_scalar, 0.0)
ist_max = np.amax(stro_np)
factor = ist_max / config.getint(
"Configuration", "max_stream_order", fallback=DEFAULT_MAX_STREAMORDER
)
for i in range(0, rows):
for j in range(0, cols):
stro_np[i][j] = np.floor(stro_np[i][j] / factor)
if stro_np[i][j] == 0.0:
stro_np[i][j] = 1.0
stro_corr = pcr.numpy2pcr(pcr.Scalar, stro_np, PCR_MISSING_VALUE)
stro_masked = pcr.ifthen(pcr.boolean(mask_raster), stro_corr)
write_pcr(stro_masked, config["Outfiles"]["streamorder_map"])
return stro_np
def create_river_width(config, rows, cols, riv_pcr, stro_np):
"""
Compute width on basis of strahler order
Downing et al (2012): Global abundace and size distribution of streams and rivers.
"""
info("Create river width")
width_np = np.copy(stro_np)
for i in range(0, rows):
for j in range(0, cols):
width_np[i][j] = 0.542 * math.exp(0.842 * width_np[i][j])
width_pcr = pcr.numpy2pcr(pcr.Scalar, width_np, PCR_MISSING_VALUE)
write_pcr(width_pcr, config["Outfiles"]["river_width_map"])
def create_soil_maps(config, rows, cols):
"""Create soil maps"""
info("Create unifrom soil map")
soil_np = np.ones((rows, cols))
soil_pcr = pcr.numpy2pcr(pcr.Nominal, soil_np, PCR_MISSING_VALUE)
write_pcr(soil_pcr, config["Outfiles"]["soil_map"])
# print('Create soil thickness map')
# soil_thick_np = np.ones((rows,cols)) * soil_thickness
# soil_thick_pcr = pcr.numpy2pcr(pcr.Scalar,soil_thick_np,PCR_MISSING_VALUE)
# write_pcr(soil_thick_pcr, working_folder + '/' + soil_thickness_map)
# write_pcr(soil_thick_pcr, working_folder + '/' + min_soil_thickness_map)
#
# thetaS, thetaR, c, ksat_ver = read_soil_to_dict(soils_folder)
#
# print('Create thetaS')
# thetaS_pcr = pcr.numpy2pcr(pcr.Scalar,np.copy(thetaS[:,:,0]),PCR_MISSING_VALUE)
# out_thetaS = working_folder + '/' + thetaS_file
# write_pcr(thetaS_pcr, out_thetaS)
# print('Create thetaR')
# thetaR_pcr = pcr.numpy2pcr(pcr.Scalar,np.copy(thetaR[:,:,0]),PCR_MISSING_VALUE)
# out_thetaR = working_folder + '/' + thetaR_file
# write_pcr(thetaR_pcr, out_thetaR)
#
# print('ksatver')
# ksatver_pcr = pcr.numpy2pcr(pcr.Scalar,np.copy(ksat_ver[:,:,0]),PCR_MISSING_VALUE)
# out_ksat_ver = working_folder + '/' + ksat_ver_file
# write_pcr(ksatver_pcr, out_ksat_ver)
#
# print('Create M')
# M = np.zeros((rows,cols))
# for i in range(0,rows):
# for j in range(0,cols):
# ks_depth = ksat_ver[i,j,:]
# y = ks_depth/ksat_ver[i,j,0]
# fit = np.polyfit(soil_depth, np.log(y), 1, w=np.sqrt(y))
# f = -fit[0]
# M[i][j] = (thetaS[i][j][0]-thetaR[i][j][0])/f
#
# M_pcr = pcr.numpy2pcr(pcr.Scalar,M,PCR_MISSING_VALUE)
# out_ksat_ver = working_folder + '/' + M_file
# write_pcr(M_pcr, out_ksat_ver)
#
# print('Create c')
#
# for i in range(0,len(take_c)):
# c_pcr = pcr.numpy2pcr(pcr.Scalar,np.copy(c[:,:,take_c[i]]),PCR_MISSING_VALUE)
# out_c = working_folder + '/c_' + str(i) + '.map'
# write_pcr(c_pcr, out_c)
def generate_landuse_lookup(path):
"""Read landuse lookup and create a dictionary for it."""
lookup_np = np.loadtxt(path, delimiter=",")
lookup_dict = {}
for row in lookup_np:
lookup_dict[int(row[0])] = {
"N": row[1],
"Sl": row[2],
"Swood": row[3],
"Kext": row[4],
"RD": row[5],
}
return lookup_dict
def create_land_use(config, rows, cols):
"""Creates land use maps"""
info("Create landuse maps")
landuse = pcr.readmap(config["Paths"]["landuse_file"])
lookup = generate_landuse_lookup(config["Paths"]["landuse_lookup"])
lan_np = pcr.pcr2numpy(landuse, 0.0)
N = np.zeros((rows, cols))
Sl = np.zeros((rows, cols))
Swood = np.zeros((rows, cols))
Kext = np.zeros((rows, cols))
RD = np.zeros((rows, cols))
for i in range(0, rows):
for j in range(0, cols):
row = lookup[int(lan_np[i][j])]
N[i][j] = row["N"]
Sl[i][j] = row["Sl"]
Swood[i][j] = row["Swood"]
Kext[i][j] = row["Kext"]
RD[i][j] = row["RD"]
N_pcr = pcr.numpy2pcr(pcr.Scalar, N, PCR_MISSING_VALUE)
write_pcr(N_pcr, config["Outfiles"]["N_file"])
Sl_pcr = pcr.numpy2pcr(pcr.Scalar, Sl, PCR_MISSING_VALUE)
write_pcr(Sl_pcr, config["Outfiles"]["Sl_file"])
Swood_pcr = pcr.numpy2pcr(pcr.Scalar, Swood, PCR_MISSING_VALUE)
write_pcr(Swood_pcr, config["Outfiles"]["Swood_file"])
Kext_pcr = pcr.numpy2pcr(pcr.Scalar, Kext, PCR_MISSING_VALUE)
write_pcr(Kext_pcr, config["Outfiles"]["Kext_file"])
RD_pcr = pcr.numpy2pcr(pcr.Scalar, RD, PCR_MISSING_VALUE)
write_pcr(RD_pcr, config["Outfiles"]["rooting_file"])
write_pcr(landuse, config["Outfiles"]["landuse_map"])
def get_dem_info(dem):
"""Determines raster infos of the dem"""
# Get values of the clone
rows = dem.clone().nrRows()
cols = dem.clone().nrCols()
cell_size = dem.clone().cellSize()
# coordinates are in upper left corner
xmin = dem.clone().west()
ymin = dem.clone().north()
return rows, cols, cell_size, xmin, ymin
def split_timedelta64_ns(td):
"""Splits timedelta into days, hours, minutes and seconds portions."""
seconds = int(td / (10**9)) # [ns] -> [s]
minutes = int(seconds / 60)
seconds %= 60
hours = int(minutes / 60)
minutes %= 60
days = int(hours / 24)
hours %= 60
return days, hours, minutes, seconds
def create_inmap_temperature(config, rows, cols, cell_centers):
"""Creates temperature inmaps.
Needed values are Celsius."""
info("Create temperature inmaps")
grib_keys = (key for key in config["Weatherfiles"] if key.startswith("temperature"))
grib_projection = config["Projections"]["in_temperature"]
grib_variable = "t2m"
file_template = config["Paths"]["inmaps"] + "/TEMP{:08.3f}"
makedirs(config["Paths"]["inmaps"], exist_ok=True)
counter = 0
for grib_key in sorted(grib_keys):
counter = create_inmap_era5_grib(
config,
rows,
cols,
cell_centers,
config["Weatherfiles"][grib_key],
grib_projection,
grib_variable,
file_template,
converter=kelvin_to_celsius,
counter=counter,
)
if counter == -1:
break
def create_inmap_era5_grib(
config,
rows,
cols,
cell_centers,
grib_file,
grib_projection,
grib_variable,
file_template,
converter=None,
counter=0,
):
"""Creates mapstacks from era5 grib files."""
info(f"Handling {grib_file}")
grib = xr.open_dataset(grib_file, engine="cfgrib")
# create cell centers in input projection
xscale = grib.coords["longitude"].data
yscale = grib.coords["latitude"].data
xlen = len(xscale)
ylen = len(yscale)
input_centers = np.zeros((xlen * ylen, 2))
for i, xpos in enumerate(xscale):
for j, ypos in enumerate(yscale):
input_centers[i * ylen + j][0] = xpos
input_centers[i * ylen + j][1] = ypos
transformer = Transformer.from_proj(
grib_projection, config["Projections"]["out"], always_xy=True
)
input_centers[:, 0], input_centers[:, 1] = transformer.transform(
input_centers[:, 0], input_centers[:, 1]
)
# flat centers for interpolator
centers_flat = cell_centers.reshape(rows * cols, 2)
# loop over timesteps
is_first = True
is_second = True
first_step = None
max_steps = config.getint("Weatherfiles", "max_steps", fallback=0)
date_time = None
for step in grib[grib_variable]:
date_time = np.datetime_as_string(step.time + step.step, unit="s")
if np.isnan(step).all() and is_first:
# skip first empty records
debug(f"skipping: {date_time}")
continue
elif np.isnan(step).all():
# skip empty records
info(f"Skipping: {date_time} due to NaN")
continue
elif is_first:
# print start
info(f"Recording starts at: {date_time}")
first_step = step
is_first = False
elif not is_first and is_second:
# print step
days, hours, minutes, seconds = split_timedelta64_ns(
(step.time + step.step) - (first_step.time + first_step.step)
)
info(f"Step is: {days} d {hours} h {minutes} m {seconds} s")
is_second = False
elif max_steps and counter >= max_steps:
info(f"max_steps reached at {date_time}")
return -1
# convert values if needed
if converter:
step = converter(step)
# build interpolator
input_values_flat = step.data.reshape(len(input_centers))
(step_rows, step_cols) = np.shape(step)
assert step_rows == len(yscale), "length of rows doesn't match"
assert step_cols == len(xscale), "length of columns doesn't match"
interpolated = griddata(input_centers, input_values_flat, centers_flat).reshape(
rows, cols
)
write_pcr(
pcr.numpy2pcr(pcr.Scalar, interpolated, -9999),
file_template.format(counter / 1000),
)
counter += 1
if date_time:
info(f"Recording ends at: {date_time}")
return counter
def create_inmap_precipitation(config, rows, cols, cell_centers):
"""Creates precipitation inmaps.
Needed values are milli meters."""
info("Create precipitation inmaps")
grib_keys = (
key for key in config["Weatherfiles"] if key.startswith("precipitation")
)
grib_projection = config["Projections"]["in_precipitation"]
grib_variable = "tp"
file_template = config["Paths"]["inmaps"] + "/P{:011.3f}"
makedirs(config["Paths"]["inmaps"], exist_ok=True)
counter = 0
for grib_key in sorted(grib_keys):
counter = create_inmap_era5_grib_steps(
config,
rows,
cols,
cell_centers,
config["Weatherfiles"][grib_key],
grib_projection,
grib_variable,
file_template,
converter=lambda m: m / MM,
counter=counter,
)
if counter == -1:
break
def create_inmap_evaporation(config, rows, cols, cell_centers):
"""Creates evaporation inmaps.
Needed values are milli meters."""
info("Create evaporation inmaps")
grib_keys = (key for key in config["Weatherfiles"] if key.startswith("evaporation"))
grib_projection = config["Projections"]["in_evaporation"]
grib_variable = "pev"
file_template = config["Paths"]["inmaps"] + "/PET{:09.3f}"
makedirs(config["Paths"]["inmaps"], exist_ok=True)
converter = lambda m: m / MM
if config.getboolean("Weatherfiles", "ecmwf_evaporation", fallback=False):
converter = lambda m: -m.clip(max=0) / MM
counter = 0
for grib_key in sorted(grib_keys):
counter = create_inmap_era5_grib_steps(
config,
rows,
cols,
cell_centers,
config["Weatherfiles"][grib_key],
grib_projection,
grib_variable,
file_template,
converter=converter,
counter=counter,
)
if counter == -1:
break
def create_inmap_era5_grib_steps(
config,
rows,
cols,
cell_centers,
grib_file,
grib_projection,
grib_variable,
file_template,
converter=None,
counter=0,
):
"""Creates mapstacks from era5 grib files with multiple steps."""
info(f"Handling {grib_file}")
grib = xr.open_dataset(grib_file, engine="cfgrib")
# create cell centers in input projection
xscale = grib.coords["longitude"].data
yscale = grib.coords["latitude"].data
xlen = len(xscale)
ylen = len(yscale)
input_centers = np.zeros((xlen * ylen, 2))
for i, xpos in enumerate(xscale):
for j, ypos in enumerate(yscale):
input_centers[i * ylen + j][0] = xpos
input_centers[i * ylen + j][1] = ypos
transformer = Transformer.from_proj(
grib_projection, config["Projections"]["out"], always_xy=True
)
input_centers[:, 0], input_centers[:, 1] = transformer.transform(
input_centers[:, 0], input_centers[:, 1]
)
# flat centers for interpolator
centers_flat = cell_centers.reshape(rows * cols, 2)
# loop over timesteps
is_first = True
is_second = True
first_step = None
max_steps = config.getint("Weatherfiles", "max_steps", fallback=0)
date_time = None
for steps in grib[grib_variable]:
for step in steps:
date_time = np.datetime_as_string(step.time + step.step, unit="s")
if np.isnan(step).all() and is_first:
# skip first empty records
debug(f"skipping: {date_time}")
continue
elif np.isnan(step).all():
# skip empty records
info(f"Skipping: {date_time} due to NaN")
continue
elif is_first:
# print start
info(f"Recording starts at: {date_time}")
first_step = step
is_first = False
elif not is_first and is_second:
# print step
days, hours, minutes, seconds = split_timedelta64_ns(
(step.time + step.step) - (first_step.time + first_step.step)
)
info(f"Step is: {days} d {hours} h {minutes} m {seconds} s")
is_second = False
elif max_steps and counter >= max_steps:
info(f"max_steps reached at {date_time}")
return -1
# convert values if needed
if converter:
step = converter(step)
# build interpolator
input_values_flat = step.data.reshape(len(input_centers))
(step_rows, step_cols) = np.shape(step)
assert step_rows == len(yscale), "length of rows doesn't match"
assert step_cols == len(xscale), "length of columns doesn't match"
interpolated = griddata(
input_centers, input_values_flat, centers_flat
).reshape(rows, cols)
write_pcr(
pcr.numpy2pcr(pcr.Scalar, interpolated, PCR_MISSING_WEATHER_VALUE),
file_template.format(counter / 1000),
)
counter += 1
if date_time:
info(f"Recording ends at: {date_time}")
return counter
def main():
"""Main function to prepare the files"""
parser = ArgumentParser(description="Prepare wflow files")
parser.add_argument("config_file", help="configuration file destination")
args = parser.parse_args()
if not isfile(args.config_file):
error(f"Configuration file {args.config_file} doesn't exist")
exit(1)
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(args.config_file)
root_logger = logging.getLogger()
log_formatter = logging.Formatter("%(levelname)s %(asctime)s: %(message)s")
log_level = LOG_LEVEL_MAP.get(
config.get("Configuration", "log_level", fallback="INFO").lower(),
logging.INFO,
)
root_logger.setLevel(log_level)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
log_file = config.get("Configuration", "log_file", fallback=None)
if log_file:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
# Soil stuff
# soils_folder = "/home/iwbworkstation/Desktop/working_dir/50m_data/2_Soil"
# dictionary = {
# "0-5": {},
# "5-15": {},
# "15-30": {},
# "30-60": {},
# "60-100": {},
# "100-200": {},
# }
# soil_depth = [25, 100, 225, 450, 800, 1500]
# soil_thickness = 2000.0
# takes c from which layers
# take_c = [1, 2, 3, 4]
pcr.setglobaloption("unitcell")
pcr.setclone(config["Paths"]["masterdem"])
rows, cols, cell_size, xmin, ymin = get_dem_info(pcr)
debug(f"rows: {rows} cols: {cols}")
debug(f"cell_size: {cell_size}")
debug(f"xmin: {xmin} ymin: {ymin}")
cell_centers = init_cellcenter(rows, cols, cell_size, xmin, ymin)
# resolve dependencies
need_gauges_map = config.getboolean("Jobs", "gauges_map", fallback=False)
need_land_use = config.getboolean("Jobs", "land_use_map", fallback=False)
need_soil_map = config.getboolean("Jobs", "soil_map", fallback=False)
need_river_width = config.getboolean("Jobs", "river_width", fallback=False)
need_stream_order = (
config.getboolean("Jobs", "stream_order", fallback=False) or need_river_width
)
need_ldd_map = (
config.getboolean("Jobs", "ldd_map", fallback=False) or need_stream_order
)
need_river_burn = (
config.getboolean("Jobs", "river_burn", fallback=False)
or need_ldd_map
or need_river_width
)
need_catchment_mask = (
config.getboolean("Jobs", "catchment_mask", fallback=False) or need_stream_order
)
need_inmap_precipitation = config.getboolean(
"Jobs", "inmap_precipitation", fallback=False
)
need_inmap_temperature = config.getboolean(
"Jobs", "inmap_temperature", fallback=False
)
need_inmap_evaporation = config.getboolean(
"Jobs", "inmap_evaporation", fallback=False
)
# execute tasks
if need_catchment_mask:
mask_raster = create_catchment_mask(config, rows, cols, cell_centers)
if need_gauges_map:
create_gauges_map(config, rows, cols, cell_centers)
if need_river_burn:
riv_corrected, riv_pcr = create_river_burn(
config, rows, cols, cell_size, cell_centers
)
if need_ldd_map:
dem = pcr.readmap(config["Paths"]["masterdem"])
ldd = create_ldd_map(config, dem, riv_corrected)
if need_stream_order:
stro_np = create_streamorder(config, rows, cols, mask_raster, ldd)
if need_river_width:
create_river_width(config, rows, cols, riv_pcr, stro_np)
if need_soil_map:
create_soil_maps(config, rows, cols)
if need_land_use:
create_land_use(config, rows, cols)
if need_inmap_precipitation:
create_inmap_precipitation(config, rows, cols, cell_centers)
if need_inmap_temperature:
create_inmap_temperature(config, rows, cols, cell_centers)
if need_inmap_evaporation:
create_inmap_evaporation(config, rows, cols, cell_centers)
info("Tasks complete")
if __name__ == "__main__":
main()
| poettler-ric/pylib | preparewflow.py | preparewflow.py | py | 30,017 | python | en | code | 0 | github-code | 50 |
24616010947 | # implementation based on "A Comparison of Several Greatest Common Divisor Algorithms"
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.259.1877&rep=rep1&type=pdf
def brute_force(a, b):
gcd = 0
# check whether a or b is the lower value
if a > b:
low = b
else:
low = a
for i in range(1, low + 1):
if a % i == 0 and b % i == 0:
gcd = i
return gcd | caterinasworld/gcd | gcd_naive.py | gcd_naive.py | py | 419 | python | en | code | 0 | github-code | 50 |
21730770753 | from time import time
import pytest
from algorithm.goal_function import iterator_over_day, Metric
from basic_structures import Classes, Lecturer as Lect, Room
from basic_structures.classes import UnavailableClasses
from data_generation.basic_config import DAY_TIME_WEIGHTS, \
GOAL_FUNCTION_WEIGHTS
from schedule.week_scheadule import WeekSchedule
from time_ import Time as Tim, TimeDelta as TD
from utils.constans import DU, BTW, WA, UNI
from utils.types_ import MONDAY, ClassesType as CT, THURSDAY, FRIDAY
@pytest.fixture(scope="class")
def metric():
start, dur, day = Tim(12, 30), TD(1, 0), MONDAY
ws = WeekSchedule([UnavailableClasses(1, start, dur, day)])
classes = [Classes(1, "a", TD(1, 0), CT.LECTURE, [], Lect(1, "l"), [], room=Room(1,1,1)),
Classes(2, "a", TD(1, 0), CT.LECTURE, [], Lect(1, "l"), [], room=Room(1,1,1)),
Classes(3, "a", TD(1, 0), CT.LECTURE, [], Lect(1, "l"), [], room=Room(1,1,1)),
Classes(4, "a", TD(1, 0), CT.LECTURE, [], Lect(1, "l"), [], room=Room(1,1,1)),
Classes(5, "a", TD(1, 0), CT.LECTURE, [], Lect(1, "l"), [], room=Room(1,1,1))]
for i in range(len(classes))[:2]:
classes[i].day = THURSDAY
for i in range(len(classes))[2:]:
classes[i].day = FRIDAY
iod = iterator_over_day()
for i in range(len(classes)):
classes[i].start_time = next(iod)[0]
next(iod)
ws.assign(classes[i])
ws.assigned_classes_time = 5 * 60
ws.assigned_classes_amount = 5
m = Metric(ws)
return m
class TestMetric:
def test__calc_worst_brake_time(self, metric):
metric._calc_worst_brake_time()
assert metric._worst_brake_time == 60 * 60
def test__calc_medium_unfolding(self, metric):
metric._calc_medium_unfolding()
assert metric._medium_unfolding == 300
def test__calc_worst_uniformity(self, metric):
metric._calc_worst_uniformity()
assert metric._worst_uniformity == 1500
def test__calc_worst_days_unfolding(self, metric):
metric._calc_worst_days_unfolding()
assert metric._worst_days_unfolding == 100
def test__calc_days_unfolding(self, metric):
d_a = metric._calc_days_unfolding()
assert d_a == 13 / 100
def test__calc_brake_time_value(self, metric):
bt = metric._calc_brake_time_value()
assert bt == (3 * 60 - 0) / (60 * 60)
def test__calc_week_arrangement(self, metric):
wa = metric._calc_week_arrangement()
assert wa == 2 / 7
def test__calc_uniformity(self, metric):
uni = metric._calc_uniformity()
assert uni == (300 - 120 + 300 - 180) / 1500
def test__calc_all_basics(self, metric):
metric._calc_all_basics()
def test_calc_goal_fcn(self, metric):
gfv = metric.calc_goal_fcn()
sum_ = 13 / 100 * GOAL_FUNCTION_WEIGHTS[DU]
sum_ += (3 * 60 - 0) / (60 * 60) * GOAL_FUNCTION_WEIGHTS[BTW]
sum_ += 2 / 7 * GOAL_FUNCTION_WEIGHTS[WA]
sum_ += (300 - 120 + 300 - 180) / 1500 * GOAL_FUNCTION_WEIGHTS[UNI]
assert round(gfv, 5) == round(sum_, 5)
def test_time(self, metric):
st = time()
for i in range(1000):
gfv = metric.calc_goal_fcn()
et = time()
print('\nGoal function speed: ', (et - st), "/ 1000")
| Ignisolver/The-Optimization-Algorithm-for-the-University-Timetabling-Problem | tests/test_algorithm/test_goal_function.py | test_goal_function.py | py | 3,337 | python | en | code | 0 | github-code | 50 |
13730490173 | #LUCKY 7s
#Arya Vishnu
#Virtual Dice I guess
import random
while True:
count7 = 0
rolls = int(input("---------------\nHow many rolls: "))
for i in range(0, rolls):
r1 = random.randint(1, 6)
r2 = random.randint(1, 6)
add = r1 + r2
print("(" + str(r1) + ", " + str(r2)+ ")" + ";" + "sum " + str(add))
if add == 7:
count7 += 1
print(str(count7) + " seven(s) were rolled")
| Ar-Vi/pythonChallenges | ICS3.py | ICS3.py | py | 460 | python | en | code | 0 | github-code | 50 |
32796007853 | from __future__ import division
from __future__ import print_function
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
import datetime
import sys, gzip
import numpy as np
import tensorflow as tf
import tensorflow.contrib.metrics as tf_metrics
import tensorflow.contrib.layers as tf_layers
def highway_layer(x, num_outputs, activation_fn=tf.nn.relu, carry_bias=-1.0, scope=""):
with tf.variable_scope(str(scope)):
x = tf_layers.flatten(x)
w_shape = [num_outputs, num_outputs]
b_shape = [num_outputs]
W_H = tf.get_variable(
"weight",
shape=w_shape,
initializer=tf.random_normal_initializer(stddev=0.1),
trainable=True)
b_H = tf.get_variable(
"bias",
shape=b_shape,
initializer=tf.constant_initializer(carry_bias))
W_T = tf.get_variable(
"weight_transform",
shape=w_shape,
initializer=tf.random_normal_initializer(stddev=0.1),
trainable=True)
b_T = tf.get_variable(
"bias_transform",
shape=b_shape,
initializer=tf.constant_initializer(0.1))
T = tf.sigmoid(
tf.add(tf.matmul(x, W_T), b_T),
name="transform_gate")
H = activation_fn(
tf.add(tf.matmul(x, W_H), b_H),
name="activation")
C = tf.subtract(1.0, T, name="carry_gate")
y = tf.add(
tf.multiply(H, T),
tf.multiply(x, C),
"y")
return y
def saveModel(model, file_path):
saver = tf.train.Saver()
saver.save(model.session, "{}_sess".format(file_path))
joblib.dump(model, file_path, compress = 3)
def loadModel(file_path):
model = joblib.load(file_path)
saver = tf.train.Saver()
saver.restore(model.session, "{}_sess".format(file_path))
return model
class Network:
EMBEDDING_SIZE = 256
feature_encoder = DictVectorizer(sparse=False)
target_encoder = LabelEncoder()
char_vocabulary = LabelEncoder()
batch_size = 64
def __init__(self, logdir="logs-nn", expname="basic-nn", threads=1, seed=42):
# Create an empty graph and a session
tf.set_random_seed(seed)
self.session = tf.Session(
config=tf.ConfigProto(
inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
self.summary_writer = tf.summary.FileWriter(
"{}/{}-{}".format(logdir, timestamp, expname))
self.global_step = None
def _build_network(self, input_width):
raise ValueError("Abstract Method Not Implemented.")
def _initialize_variables(self):
# Initialize variables
with self.session.graph.as_default():
self.session.run(tf.global_variables_initializer())
if self.summary_writer:
self.summary_writer.add_graph(self.session.graph)
@property
def training_step(self):
return self.session.run(self.global_step)
def _train(self, tokens, tok_lens, features, targets):
raise ValueError("Abstract Method Not Implemented.")
def _predict(self, tokens, features):
raise ValueError("Abstract Method Not Implemented.")
def _create_vocabulary(self, X):
voc = {"<pad>" : 1, "<unk>" : 1}
for line in X:
for key, token in line.items():
for letter in token:
voc[letter] = 1
self.char_vocabulary.fit(list(voc.keys()))
def _encode_tokens(self, X):
tok_lens = []
tok_enc = []
pad_value = self.char_vocabulary.transform(['<pad>'])[0]
for line in X:
lens = []
enc = []
for key in sorted(line):
tok = list(line[key])
for i, _ in enumerate(tok):
if not tok[i] in self.char_vocabulary.classes_:
tok[i] = '<unk>'
if len(tok) > 0:
enc.append(list(self.char_vocabulary.transform(tok)))
else:
enc.append([])
lens.append(len(enc[-1]))
tok_lens.append(np.array(lens))
tok_enc.append(np.array(enc))
# padding
max_len = np.max(tok_lens)
for i, _ in enumerate(tok_enc):
tok = [np.pad(x, (0, max_len - len(x)), 'constant', constant_values=pad_value) for x in tok_enc[i]]
tok_enc[i] = tok
return tok_enc, tok_lens
def fit(self, X, y):
tokens, features = self._split_features(X)
self._create_vocabulary(tokens)
tokens_tr, token_lens = self._encode_tokens(tokens)
features_tr = self.feature_encoder.fit_transform(features)
y_tr = self.target_encoder.fit_transform(y)
assert (len(tokens_tr) == len(features_tr)), "Tokens_len does not match Features_len"
assert (len(features_tr) == len(y_tr)), "Tokens_len does not match Y_len"
tf.reset_default_graph()
self._build_network((len(tokens_tr[1]) * self.EMBEDDING_SIZE) + len(features_tr[1]))
for i in range((len(tokens_tr) // self.batch_size) + 1):
start_idx = i * self.batch_size
end_idx = (i + 1) * self.batch_size
self._train(
tokens_tr[start_idx : end_idx],
token_lens[start_idx : end_idx],
features_tr[start_idx : end_idx],
y_tr[start_idx : end_idx])
return None
def predict(self, X):
tokens, features = self._split_features(X)
tokens_tr, token_lens = self._encode_tokens(tokens)
features_tr =self.feature_encoder.transform(features)
pred = self._predict(tokens_tr, token_lens, features_tr)
return self.target_encoder.inverse_transform(pred[0])
def predict_proba(self, X):
tokens, features = self._split_features(X)
tokens_tr, token_lens = self._encode_tokens(tokens)
features_tr =self.feature_encoder.transform(features)
pred = self._predict(tokens_tr, token_lens, features_tr)
return pred[1]
def _split_features(self, X):
# Split the data X to a tuple of dictionaries (form_lemmas, attributes)
# The first is to be embedded, the second to be one hot encoded
tokens = []
attributes = []
for line in X:
t = {}
attr = {}
for key, value in line.items():
if "form" in key or "lemma" in key:
t[key] = value
else:
attr[key] = value
tokens.append(t)
attributes.append(attr)
return (tokens, attributes)
class FeedForwardNetwork(Network):
layer_type = "FeedForward"
def __init__(
self,
network_width,
network_depth,
dropout,
rnn_cell_dim,
rnn_cell="GRU",
layer_type="FeedForward",
logdir="logs-nn",
expname="basic-nn",
threads=1,
seed=42):
Network.__init__(self, logdir, expname, threads, seed)
self.h_width = network_width
self.h_depth = network_depth
self.rnn_cell_dim = rnn_cell_dim
self.rnn_cell_type = rnn_cell
self.dropout = dropout
self.layer_type = layer_type
def _build_network(self, input_width):
with self.session.graph.as_default():
if self.rnn_cell_type == "LSTM":
self.rnn_cell = tf.contrib.rnn.LSTMCell(self.rnn_cell_dim)
elif self.rnn_cell_type == "GRU":
self.rnn_cell = tf.contrib.rnn.GRUCell(self.rnn_cell_dim)
else:
raise ValueError("Unknown rnn_cell {}".format(rnn_cell))
self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')
self.tokens = tf.placeholder(tf.int32, [None, None, None], name="tokens")
self.token_lens = tf.placeholder(tf.int32, [None, None], name="token_lens")
self.features = tf.placeholder(tf.float32, [None, None], name="features")
self.labels = tf.placeholder(tf.int64, [None], name="labels")
self.alphabet_size = len(self.char_vocabulary.classes_)
self.dropout_keep = tf.placeholder(tf.float32)
self.input_width = input_width
char_embedding_matrix = tf.get_variable(
"char_embeddings",
[self.alphabet_size, self.EMBEDDING_SIZE],
initializer=tf.random_normal_initializer(stddev=0.01),
dtype=tf.float32)
with tf.variable_scope("token_encoder"):
tokens_flat = tf.reshape(self.tokens, [-1, tf.shape(self.tokens)[-1]])
token_lens_flat = tf.reshape(self.token_lens, [-1])
char_embeddings = tf.nn.embedding_lookup(char_embedding_matrix, tokens_flat)
hidden_states, final_states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.rnn_cell,
cell_bw=self.rnn_cell,
inputs=char_embeddings,
sequence_length=token_lens_flat,
dtype=tf.float32,
scope="char_BiRNN")
tokens_encoded = tf_layers.linear(
tf.concat(final_states, 1),
self.EMBEDDING_SIZE,
scope="tokens_encoded")
tokens_encoded = tf.reshape(tokens_encoded, [tf.shape(self.features)[0], -1])
self.input_layer = tf.concat((tokens_encoded, self.features), 1)
self.input_layer = tf.reshape(self.input_layer, [-1, self.input_width])
# input transform
self.hidden_layer = tf.nn.dropout(tf_layers.fully_connected(
self.input_layer,
num_outputs=self.h_width,
activation_fn=None,
scope="input_layer"), self.dropout_keep)
# hidden layers
for i in range(self.h_depth):
if self.layer_type == "FeedForward":
self.hidden_layer = tf.nn.dropout(tf_layers.fully_connected(
self.hidden_layer,
num_outputs=self.h_width,
activation_fn=tf.nn.relu,
scope="ff_layer_{}".format(i)), self.dropout_keep)
elif self.layer_type == "Highway":
self.hidden_layer = tf.nn.dropout(highway_layer(
self.hidden_layer,
num_outputs=self.h_width,
activation_fn=tf.nn.relu,
scope="highway_layer_{}".format(i)), self.dropout_keep)
else:
raise ValueError("Unknown hidden layer type.")
self.output_layer = tf_layers.fully_connected(
self.hidden_layer,
num_outputs=len(self.target_encoder.classes_),
activation_fn=None,
scope="output_layer")
self.predictions = tf.argmax(self.output_layer, 1)
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.output_layer, labels=self.labels), name="loss")
self.training = tf.train.AdamOptimizer().minimize(self.loss, global_step=self.global_step)
self.accuracy = tf_metrics.accuracy(self.predictions, self.labels)
self.summary = tf.summary.merge([
tf.summary.scalar("train/loss", self.loss),
tf.summary.scalar("train/accuracy", self.accuracy)])
self._initialize_variables()
def _train(self, tokens, token_lens, features, labels):
try:
_, summary, pred = self.session.run([self.training, self.summary, self.predictions],
{self.tokens: tokens,
self.token_lens: token_lens,
self.features: features,
self.labels: labels,
self.dropout_keep: self.dropout})
except Exception as e:
import pdb; pdb.set_trace()
raise e
self.summary_writer.add_summary(summary, self.training_step)
def _predict(self, tokens, token_lens, features):
try:
pred, logits = self.session.run([self.predictions, self.output_layer],
{self.tokens: tokens,
self.token_lens: token_lens,
self.features: features,
self.dropout_keep: 1.0})
except Exception as e:
import pdb; pdb.set_trace()
raise e
return (pred, logits)
| varisd/MLFix | scripts/neural.py | neural.py | py | 12,913 | python | en | code | 0 | github-code | 50 |
41393011134 | import jieba
txt = open("d:/Desktop/Emily/HKUST/MAFS 6010U - Artificial Intelligence in Finance/project/weibo/云从科技_19.txt", encoding="utf-8").read()
#加载停用词表
stopwords = [line.strip() for line in open("d:/Desktop/Emily/HKUST/MAFS 6010U - Artificial Intelligence in Finance/project/CS.txt",encoding="utf-8").readlines()]
words = jieba.lcut(txt)
counts = {}
for word in words:
#不在停用词表中
if word not in stopwords:
#不统计字数为一的词
if len(word) == 1:
continue
else:
counts[word] = counts.get(word,0) + 1
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True)
for i in range(40):
word, count = items[i]
print ("{:<10}{:>7}".format(word, count)) | aifin-hkust/aifin-hkust.github.io | 2019/project/Orange_source/Analysis.py | Analysis.py | py | 784 | python | en | code | 5 | github-code | 50 |
24914307854 | #!/usr/bin/env python
import json
import redis
import os
from random import randint
from flask import Flask, render_template, request
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = os.getenv("REDIS_PORT", 6379)
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/balance')
def get_balance():
account_id = request.args.get("accountId")
return json.dumps({ 'balance': _get_account_balance(account_id) })
def _get_account_balance(account_id):
balance_key = "balance-{}".format(account_id)
# We're instantiating this everytime so that the Intentions are effecive
redis_conn = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
return redis_conn.get(balance_key)
if __name__ == '__main__':
app.run(host="0.0.0.0")
| jharley/flask-balance | app/balance.py | balance.py | py | 823 | python | en | code | 0 | github-code | 50 |
5655333560 | doc = 'i bought an apple .\ni ate it .\nit is delicious .'
lst = doc.replace('\n', ' ').split(' ')
print(lst)
# ['i', 'bought', 'an', 'apple', '.', 'i', 'ate', 'it', '.', 'it', 'is', 'delicious', '.']
word2freq = {}
for w in lst:
if w in word2freq:
word2freq[w] += 1
else:
word2freq[w] = 1
print(word2freq)
# {'i': 2, 'bought': 1, 'an': 1, 'apple': 1, '.': 3, 'ate': 1, 'it': 2, 'is': 1, 'delicious': 1}
| SeiichiN/LaLa-Python | 55hon-2/en04.py | en04.py | py | 428 | python | en | code | 0 | github-code | 50 |
4456728622 | from time import sleep
print()
print('=-'*30)
print()
bco='BANCO EPB INVESTIMENTOS LTDA'
print(f'{bco:^60}')
print()
print('=-'*30)
print()
sleep(1)
cx_eletr='CAIXA ELETRONICO 24H'
cx_1=('**'*10)
cx_2=('--'*30)
agrd=('AGUARDE.....')
print(f'{cx_1}{cx_eletr}{cx_1}')
print()
sleep(1)
saldo_inicial=1000
deposito=0
saque=0
saldo_real=0
saldo_real=saldo_inicial
while True:
menu=int(input("""\n\nSelecione uma opção:
[1] Verificar saldo
[2] Depositar
[3] Sacar
[4] Terminar
Digite a opção= """))
if menu==1:
#saldo_real+=deposito
#saldo_real-=saque
#saldo_real=saldo_inicial-saque
sleep(1)
print(f'\n\n\n{agrd:^60}\n\n\n')
sleep(2)
print(f'{cx_2}')
seu_sld='Seu saldo é de =>=>=> '
print(f'\n\n{seu_sld:^50} R${saldo_inicial:.2f}\n\n')
print(f'{cx_2}')
sleep(2)
if menu==2:
dpsto_sucss='Deposito realizado com sucesso!!!'
dgt_dpsto=('Digite o valor a depositar: R$=>=>=>')
sleep(1)
print(f'\n\n\n{agrd:^60}\n\n\n')
sleep(2)
print(f'{cx_2}')
deposito=int(input(f'{dgt_dpsto:^55}'))
if deposito>0:
saldo_inicial+=deposito
else:
print('VALOR DE DEPÓSITO INVÁLIDO')
sleep(1)
print(f'\n\n{dpsto_sucss:^60}')
print(f'{cx_2}')
elif menu==3:
saque_sucess=(' Saque realizado com sucesso!')
vlr_saque=('\n\nDigite o valor a sacar: R$=>=>=>')
saque=int(input(f'{vlr_saque:^60}'))
if saque>0:
saldo_inicial-=saque
sleep(1)
print(f'\n\n\n{agrd:^60}\n\n\n')
sleep(2)
sleep(1)
if saque<=saldo_inicial:
saque_sucess=(' Saque realizado com sucesso!')
print(f'{cx_2}')
print(f'\n\n{saque_sucess:^60}')
print(f'\n\n{cx_2}')
elif saque>saldo_inicial:
saque_sucess=(' Saque realizado com sucesso!')
print(f'{cx_2}')
sleep(1)
print(""" ATENÇÃO!!!
VOCÊ ESTÁ USANDO SEU CHEQUE ESPECIAL""")
sleep(2)
print(f'\n\n{saque_sucess:^60}')
print(f'\n\n{cx_2}')
sleep(1)
if menu==4:
sleep(2)
print()
print()
saida='Saindo do sistema..'
sleep(2)
print()
print(f'{saida:^60}')
break
else:
menu=int(input("""Selecione uma opção:
[1] Verificar saldo
[2] Depositar
[3] Sacar
[4] Terminar
Digite a opção= """))
print('=-'*30)
print()
bco='BANCO EPB INVESTIMENTOS LTDA'
print(f'{bco:^60}')
print()
print('=-'*30)
print()
msg_final='AGRADECEMOS POR USAR NOSSOS SERVIÇOS'
cx_1=('**'*5)
print(f'{cx_1}{msg_final}{cx_1}')
print()
| Edubernardes70/Python_Atividades | Caixa eletrônico.py | Caixa eletrônico.py | py | 3,018 | python | pt | code | 0 | github-code | 50 |
28036725360 | import math
class Calcolatrice:
def somma(self, a, b):
return a + b
def sottrazione(self, a, b):
return a - b
def moltiplicazione(self, a, b):
return a * b
def divisione(self, a, b):
if b == 0:
return "Impossibile dividere per zero"
return a / b
def potenza(self, base, esponente):
if (isinstance(base, (int, float)) and isinstance(esponente, (int, float))) or (isinstance(base, str) and isinstance(esponente, str)):
base = float(base)
esponente = float(esponente)
return base ** esponente
else:
return "Entrambi i parametri devono essere numeri (int o float) o stringhe numeriche"
def radice(self, base, esponente):
if (isinstance(base, (int, float)) and isinstance(esponente, (int, float))) or (isinstance(base, str) and isinstance(esponente, str)):
base = float(base)
esponente = float(esponente)
if base < 0 and esponente % 2 == 0:
return "Impossibile calcolare la radice di un numero negativo con esponente pari"
return base ** (1 / esponente)
else:
return "Entrambi i parametri devono essere numeri (int o float) o stringhe numeriche"
def modulo(self, a, b):
return a % b
def conversione_base(self, numero, base_origine, base_destinazione):
try:
numero_intermedio = int(numero, base_origine)
numero_convertito = format(numero_intermedio, f'0{base_destinazione}b')
return numero_convertito
except ValueError:
return "Errore di conversione"
# Richiedi all'utente di inserire i due valori
valore1 = input("Inserisci il primo valore: ")
valore2 = input("Inserisci il secondo valore: ")
# Richiedi all'utente di scegliere l'operazione
print("Scegli l'operazione:")
print("1. Somma")
print("2. Sottrazione")
print("3. Moltiplicazione")
print("4. Divisione")
print("5. Potenza")
print("6. Radice")
print("7. Modulo")
print("8. Conversione di base")
scelta = input("Inserisci il numero dell'operazione scelta: ")
# Esempi di utilizzo della classe Calcolatrice
calc = Calcolatrice()
# Effettua l'operazione scelta con i valori inseriti
try:
valore1 = float(valore1)
valore2 = float(valore2)
if scelta == "1":
risultato = calc.somma(valore1, valore2)
elif scelta == "2":
risultato = calc.sottrazione(valore1, valore2)
elif scelta == "3":
risultato = calc.moltiplicazione(valore1, valore2)
elif scelta == "4":
risultato = calc.divisione(valore1, valore2)
elif scelta == "5":
risultato = calc.potenza(valore1, valore2)
elif scelta == "6":
risultato = calc.radice(valore1, valore2)
elif scelta == "7":
risultato = calc.modulo(valore1, valore2)
elif scelta == "8":
risultato = calc.conversione_base("1010", 2, 10)
else:
risultato = "Scelta non valida"
print("Risultato:", risultato)
except ValueError:
print("Inserisci valori numerici validi.")
| Pietrofox/Python_Volpe | calcolatrice_user.py | calcolatrice_user.py | py | 3,121 | python | it | code | 1 | github-code | 50 |
40727268302 | import time
from lxml import etree
from pykml.parser import Schema
from pykml.factory import KML_ElementMaker as KML
from pykml.factory import GX_ElementMaker as GX
from quad_mesh_simplify import simplify_mesh
import numpy as np
from aerpawlib.util import Coordinate, VectorNED
from lib.util import *
from lib.mapping import WorldMap
drone_colors = {
"drone1": "ff0000ff",
"drone2": "ff00ff00",
"drone3": "ffff0000",
"drone4": "ffffff00",
"drone5": "ff00ffff",
"drone6": "ffff00ff",
}
drone_poly_scale_x = 0.00001
drone_poly_scale_y = 0.00001
drone_poly = [
[[0, 0, 0], [0, 0.5, -1], [0, 1, 0]],
[[0, 0, 0], [0, -0.5, -1], [0, -1, 0]],
[[1, 0, 0], [1, 0.5, -1], [1, 1, 0]],
[[1, 0, 0], [1, -0.5, -1], [1, -1, 0]],
]
class Logger:
def __init__(self, world_map: WorldMap):
self._drone_log = {}
self._world_map = world_map
def update_drone(self, drone_id: str, drone_block: MapBlockCoord):
if drone_id not in self._drone_log:
self._drone_log[drone_id] = []
self._drone_log[drone_id].append((time.time(), drone_block))
def serialize_kml(self) -> str:
doc = KML.kml(
KML.Document(
KML.Name("drone paths")
)
)
for a in self._serialize_kml_drones():
doc.Document.append(a)
for b in self._serialize_kml_blocks():
doc.Document.append(b)
return etree.tostring(doc, pretty_print=True)
def _serialize_kml_blocks(self):
# calculate polys for each block
# iterate over the entire airspace, find adjacencies, add faces
m = self._world_map._map.copy() # to avoid race conditions ugh
kml_polys = []
def _get_bounds(idx):
vs = {i[idx] for i in m}
return range(min(vs)-1, max(vs)+2)
x_bounds, y_bounds, z_bounds = [_get_bounds(i) for i in range(3)]
coords_searching = set()
for x in x_bounds:
for y in y_bounds:
for z in z_bounds:
coords_searching.add((x, y, z))
def _get_adj(coord):
x, y, z = coord
return {
(x+1, y, z),
(x-1, y, z),
(x, y+1, z),
(x, y-1, z),
(x, y, z+1),
(x, y, z-1)
}
def _get_corners(coord):
# get corners of a unit cube. assume that coord given is in center
cube_size = 1
delta = cube_size / 2
x, y, z = coord
return {
(x+delta, y+delta, z+delta),
(x+delta, y+delta, z-delta),
(x+delta, y-delta, z+delta),
(x+delta, y-delta, z-delta),
(x-delta, y+delta, z+delta),
(x-delta, y+delta, z-delta),
(x-delta, y-delta, z+delta),
(x-delta, y-delta, z-delta),
}
def _normalize_point(point):
# to account for floating point fun
x, y, z = point
return (round(x, 1), round(y, 1), round(z, 1))
triangles = set()
targeting = Traversability.FREE # or BLOCKED
for block_coord in coords_searching:
if block_coord in m and m[block_coord] == targeting:
continue
corners = _get_corners(block_coord)
for adj in _get_adj(block_coord):
if adj in m and m[adj] == targeting:
# add face
points = list(corners & _get_corners(adj))
# share [0] and [1]
t_1 = tuple(sorted([points[0], points[1], points[2]]))
t_2 = tuple(sorted([points[0], points[1], points[3]]))
triangles |= {t_1, t_2}
# simplify mesh
# find corners/faces a la obj
positions = []
faces = []
for t in triangles:
f = []
for c in t:
if c not in positions:
positions.append(c)
f.append(positions.index(c))
faces.append(f)
positions, faces = simplify_mesh(np.array(positions), np.array(faces, dtype=np.uint32), 600)
# # find distinct submeshes to avoid simplification issues
# submeshes = [] # collection of collection of corner idxs
# accounted = set() # flattened ^
# def _recursive_find_corners(corner_idx, corner_group, depth=0, max_depth=40):
# if corner_idx in accounted:
# return
# if depth >= max_depth:
# return
# # get triangle corners w/ this corner
# cors = set()
# for f_idx, face in enumerate(faces):
# if corner_idx in face:
# corner_group.append(face)
# cors |= set(face)
# accounted.add(corner_idx)
# for c in cors:
# _recursive_find_corners(c, corner_group, depth+1)
# for c_idx, corner in enumerate(corners):
# s = []
# _recursive_find_corners(c_idx, s)
# print(s)
# if len(s) != 0:
# submeshes.append(s)
# triangles = []
# for submesh in submeshes:
# new_positions, new_face = simplify_mesh(np.array(corners), np.array(submesh), 30)
# for face in new_face:
# triangles.append([new_positions[i] for i in face])
# print(triangles)
# convert triangles to world space
def _coord_to_world_raw(coord):
x, y, z = [i*self._world_map._resolution for i in coord]
delta_vec = VectorNED(y, x, -z)
return self._world_map._center_coords + delta_vec
# world_triangles = []
# for triangle in triangles:
# c1, c2, c3 = [_coord_to_world_raw(c) for c in triangle]
# world_triangles.append((c1, c2, c3))
world_triangles = []
for triangle in faces:
c1, c2, c3 = [_coord_to_world_raw(positions[c]) for c in triangle]
world_triangles.append((c1, c2, c3))
# convert coordinates defining poly tris to lines to be rendered in KML
adding = []
for tri in world_triangles:
cs = [*tri] + [tri[0]]
adding.append(KML.Placemark(
KML.LineString(
GX.altitudeMode("relativeToGround"),
KML.coordinates("\n".join([f"{i.lon},{i.lat},{i.alt}" for i in cs]))
)
))
return adding
def _serialize_kml_drones(self):
# get kml w/ each drone's path, caring only about the blocks and time
r = []
for drone in self._drone_log:
path = self._drone_log[drone]
unique_tiles = []
for i in path:
if len(unique_tiles) != 0:
last_tile = unique_tiles[-1][1]
if last_tile == i[1]:
continue
unique_tiles.append((i[0], i[1], self._world_map.block_to_coord(i[1])))
adding_style = KML.Style(
KML.id(f"{drone}_sty"),
KML.LineStyle(
KML.color(drone_colors.get(drone, "ff00aaff")),
KML.width(10),
)
)
r.append(adding_style)
d_poly = []
for tri in drone_poly:
t = []
for node in tri:
x, y, alt = node
x *= drone_poly_scale_x
y *= drone_poly_scale_y
i = unique_tiles[-1]
x += i[2].lon
y += i[2].lat
alt += i[2].alt
t.append([x, y, alt])
d_poly.append(t)
for tri in d_poly:
cs = [*tri] + [tri[0]]
r.append(KML.Placemark(
KML.styleUrl(f"#{drone}_sty"),
KML.LineString(
GX.altitudeMode("relativeToGround"),
KML.coordinates("\n".join([f"{j[0]},{j[1]},{j[2]}" for j in cs]))
)
))
adding = KML.Placemark(
KML.name(f"{drone} path"),
KML.styleUrl(f"#{drone}_sty"),
KML.LineString(
# KML.extrude(1),
KML.tessellate(1),
GX.altitudeMode("relativeToGround"),
KML.coordinates("\n".join([f"{i[2].lon},{i[2].lat},{i[2].alt}" for i in unique_tiles]))
)
)
r.append(adding)
return r
def save(self, filename: str):
with open(filename, 'w') as f:
f.write(self.serialize_kml)
| MihailSichitiu/aerpaw_drone_corridor_IEEE | aerpaw-drone-corridor/ground/ground_logger.py | ground_logger.py | py | 9,252 | python | en | code | 0 | github-code | 50 |
13174461869 | import sys,os
import readline
import pyfiglet
from .miscellaneous.completer import *
from .settings import *
############ OUTPUT GRAPHICS ################
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
############### COMMAND LINE FUNCTIONS ##############
def example(cmd=None):
if cmd != None and len(cmd) == 2:
try:
if(os.path.exists(cmd[1])):
os.system(config.editor + " " + cmd[1])
else:
print("{}File does not exists!{}".format(bcolors.WARNING,bcolors.ENDC))
except:
print("{}Usage: example <FILE_PATH>".format(bcolors.WARNING,bcolors.ENDC))
else:
print("{}Usage: example <FILE_PATH>".format(bcolors.WARNING,bcolors.ENDC))
def get_options(d,options,id=False):
for k,v in d.items():
if id == True:
options.append(k)
elif k == menu_state:
options = get_options(v,options,True)
elif isinstance(v, dict):
options = get_options(v,options)
return options
def help(cmd=None):
print("Command list:")
options = get_options(menu_option,[])
for option in options:
print("{}[*] {}{}{}".format(bcolors.OKBLUE,bcolors.ENDC,bcolors.BOLD,option))
def state(cmd=None):
global menu_state
global completer
menu_state = cmd[0]
completer.update(get_options(menu_option,[]))
def exit(cmd=None):
global menu_state
menu_state = "exit"
def invalid(cmds=None):
print("{}Invalid Command! Use help for options.{}".format(bcolors.WARNING,bcolors.ENDC))
def get_parent(d,t):
out = t
for k,v in d.items():
if k == menu_state:
return ("",True)
elif isinstance(v, dict) and len(v) > 0:
tmp = get_parent(v,t)
if tmp[0] == "" and tmp[1] == True:
return (k,True)
else:
out = tmp
else:
return t
return out
def back(cmd=None):
global menu_state
menu_state = get_parent(menu_option,("",False))[0]
def parse(cmd):
values = cmd.split()
switcher_menu[menu_state].get(values[0], invalid)(values)
return menu_state
# MENU OPTIONS VALUES
menu_option = {
"main": {
"example" : {
"edit":{},
"help":{},
"back":{}
},
"help":{},
"exit":{}
}
}
switcher_menu = {"main":{"exit":exit,"help":help,"example":state},"example":{"edit":example,"help":help,"back":back}}
menu_state = "main"
# LOAD SETTINGS
config = Config()
# AUTOCOMPLETE SETUP
completer = Completer(get_options(menu_option,[]))
readline.set_completer(completer.complete)
readline.parse_and_bind('tab: complete')
# BANNER
print("{}{}{}".format(bcolors.HEADER,pyfiglet.figlet_format(config.name),bcolors.ENDC))
| EB113/RandomStuff | SimpleCI/src/menu.py | menu.py | py | 3,119 | python | en | code | 0 | github-code | 50 |
36603226692 | import functools
import warnings
from typing import Any, Callable, Optional, TypeVar, overload
from exabel_data_sdk.util.warnings import ExabelDeprecationWarning
FunctionT = TypeVar("FunctionT", bound=Callable[..., Any])
# Pylint flags '__func' as an invalid argument name, but we want the '__' prefix to make Mypy
# interpret it as a positional-only argument. Therefore, we disable the check for this argument.
@overload
def deprecate_arguments(
**deprecation_replacements: Optional[str],
) -> Callable[[FunctionT], FunctionT]:
...
@overload
def deprecate_arguments(
__func: None, # pylint: disable=invalid-name
**deprecation_replacements: Optional[str],
) -> Callable[[FunctionT], FunctionT]:
...
@overload
def deprecate_arguments(
__func: FunctionT, # pylint: disable=invalid-name
**deprecation_replacements: Optional[str],
) -> FunctionT:
...
def deprecate_arguments(
__func: Optional[FunctionT] = None, # pylint: disable=invalid-name
**deprecation_replacements: Optional[str],
) -> FunctionT:
"""
Decorator for warning about and replacing deprecated arguments in a function that will be
removed in a future release.
Only works for deprecating [keyword-only arguments](https://peps.python.org/pep-3102/).
Args:
deprecation_replacements: a mapping from deprecated argument names to the new argument
names or `None` if the argument has been removed and no longer serves any purpose.
"""
if not deprecation_replacements:
raise ValueError("No deprecations specified")
def decorator(func: FunctionT) -> FunctionT:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
func_name = func.__qualname__
module_name = func.__module__
if module_name != "__main__":
func_name = f"{module_name}.{func_name}"
new_kwargs = {}
for arg_name, arg_value in kwargs.items():
if arg_name in deprecation_replacements:
warning_message = (
f"Argument '{arg_name}' is deprecated in '{func_name}' and will be removed "
"in a future release. "
)
replacement = deprecation_replacements[arg_name]
if replacement:
if replacement in kwargs:
raise ValueError(
f"Cannot specify both '{arg_name}' and '{replacement}' in "
f"'{func_name}'."
)
new_kwargs[replacement] = arg_value
warning_message += f"Use '{replacement}' instead."
warnings.warn(
warning_message,
ExabelDeprecationWarning,
)
else:
new_kwargs[arg_name] = arg_value
return func(*args, **new_kwargs)
return wrapper # type: ignore[return-value]
if __func:
return decorator(__func)
return decorator # type: ignore[return-value]
| Exabel/python-sdk | exabel_data_sdk/util/deprecate_arguments.py | deprecate_arguments.py | py | 3,180 | python | en | code | 5 | github-code | 50 |
73397106075 | import csv
import os
import random
import sys
import time
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt, QTimer
from model import Player
from utils import log
class Game(QWidget):
def __init__(self):
super().__init__()
self.counter = 0
self.load_data()
self.init_ui()
def init_ui(self):
"""
A procedure, Create main window and bind widgets and event
:return:
"""
self.resize(900, 600)
self.setFixedSize(900, 600) # Changing window size is not allowed
self.center()
self.setWindowTitle('抽奖程序')
self.setWindowIcon(QIcon('ico.png'))
self.init_widgets()
self.init_grid()
self.init_style()
self.bind_events()
self.show()
def center(self):
"""
Set window at the center of screen
:return:
"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def init_widgets(self):
"""
Create widgets and initialize their attribute(style)
:return:
"""
self.start_button = QPushButton('开始', self)
self.font_syle(self.start_button)
self.stop_button = QPushButton('清零', self)
self.font_syle(self.stop_button)
self.save_button = QPushButton('保存', self)
self.font_syle(self.save_button)
self.reload_button = QPushButton('重载数据', self)
self.font_syle(self.reload_button)
self.label_count = QLabel(f'已选出{self.counter}位', self)
self.label_style(self.label_count)
self.label_title_id = QLabel('工号', self)
self.label_title_name = QLabel('姓名', self)
self.label_title_depart = QLabel('部门', self)
self.label_id = QLabel('', self)
self.label_name = QLabel('', self)
self.label_depart = QLabel('', self)
self.labels = [
(self.label_title_id, self.label_id),
(self.label_title_name, self.label_name),
(self.label_title_depart, self.label_depart)
]
for label in self.labels:
for l in label:
self.label_style(l)
logo = QPixmap('logo.png')
self.label_logo = QLabel('logo', self)
self.label_logo.setPixmap(logo)
self.label_logo.setAlignment(Qt.AlignCenter)
self.mtext_result = QTextEdit(self)
self.font_syle(self.mtext_result, font_size=16)
def init_grid(self):
"""
init layout, this programme uses grid layout
:return:
"""
grid = QGridLayout()
self.setLayout(grid)
grid.addWidget(self.label_logo, 0, 0, 1, 3)
for x in range(len(self.labels)):
for y in range(len(self.labels[x])):
grid.addWidget(self.labels[x][y], x + 1, y, 1, y + 1)
grid.addWidget(self.start_button, 4, 0, 1, 1)
grid.addWidget(self.stop_button, 4, 1, 1, 1)
grid.addWidget(self.save_button, 4, 2, 1, 1)
grid.addWidget(self.reload_button, 4, 6, 1, 1)
grid.addWidget(self.mtext_result, 0, 3, 4, 4)
grid.addWidget(self.label_count, 4, 3, 1, 3)
def init_style(self):
"""
use QSS to set widgets style
:return:
"""
self.setStyleSheet('''
QWidget{
background-color:white;
border-radius:10px;
}
QPushButton{
color:white;
background-color:rgb(61, 79, 93);
border:1px solid white;
}
QPushButton:hover{
background-color:#08c;
}
QPushButton{
}
QPushButton{
border-radius:10px
}
QPushButton{
padding:2px 4px
}
QTextEdit{
border: 1px solid;
border-radius:10px;
background-color:white;
font-size: 1em;
}
QLabel{
}
''')
def bind_events(self):
"""
all events binding should use this function
:return:
"""
self.start_button.clicked.connect(self.on_click)
self.stop_button.clicked.connect(self.reset)
self.save_button.clicked.connect(self.save_result)
self.reload_button.clicked.connect(self.reload)
def load_data(self):
"""
from namelist.csv load data, if encoding get wrong, try to change form ut8-sig to utf-8
:return:
"""
if not os.path.exists('namelist.csv'):
msg = QMessageBox.question(self, "警告", "未找到namelist.csv", QMessageBox.Yes | QMessageBox.No,
QMessageBox.No) # 这里是固定格式,yes/no不能动
return msg
with open('namelist.csv', 'r') as f:
lines = csv.reader(f)
self.namelist = []
for line in lines:
# log(line)
# log(len(line))
if not (len(line) > 3 and line[-1] != ''):
self.namelist.append(Player(line[0], line[1], line[2]))
# print(self.namelist)
def on_click(self):
"""
action for start button. if not starting then start timer,
or select the lucky dog
:return:
"""
if self.start_button.text() == '开始':
self.start_button.setText('抽取')
self.timer = QTimer(self)
self.timer.timeout.connect(self.setname)
self.timer.start(10) # set timer check 1 time per 10 ms
return 0
# if namelist is clear, then quit
if len(self.namelist) == 0:
return 0
if self.start_button.text() == '抽取' and len(self.namelist) > 0:
self.counter += 1
# load the infor label's text as the lucky dog
text = self.mtext_result.toPlainText()
infor_labels = [self.label_id.text(), self.label_name.text(), self.label_depart.text()]
text = ' '.join(infor_labels) + '\n' + text
self.mtext_result.setPlainText(text)
# record the ID of the lucky one
winner = infor_labels[0]
# log(text, winner)
# kick the one out of the list
self.label_count.setText(f'已选出{self.counter}位')
for p in self.namelist:
if p.id == winner:
self.namelist.pop(self.namelist.index(p))
def save_result(self):
"""
save the name list to a csv file
:return:
"""
# use pc time as filename
fn = str(int(time.time())) + '.csv'
with open(fn, 'w', encoding='utf-8-sig') as f:
result = self.mtext_result.toPlainText()
# remember to change the \t to ',' or excel cannot recognize the row
result = result.replace(' ', ',')
f.write(result)
# log('Successfully import')
def reset(self):
"""
reset button events
:return:
"""
self.set_zero()
self.timer.stop()
def set_zero(self):
self.counter = 0
self.start_button.setText('开始')
self.label_count.setText(f'已选出{self.counter}位')
self.mtext_result.setText('')
self.label_id.setText('')
self.label_name.setText('')
self.label_depart.setText('')
def setname(self):
if len(self.namelist) == 0:
self.label_id.setText(f'结束')
self.label_name.setText(f'结束')
self.label_depart.setText(f'结束')
self.timer.stop()
return 0
p = self.namelist[random.randint(0, len(self.namelist) - 1)]
self.label_id.setText(f'{p.id}')
self.label_name.setText(f'{p.name}')
self.label_depart.setText(f'{p.depart}')
def reload(self):
msg = QMessageBox.question(self, "警告", "重载数据后, 抽奖必须重新开始, 确认重载?", QMessageBox.Yes | QMessageBox.No,
QMessageBox.No) # 这里是固定格式,yes/no不能动
if msg == QMessageBox.Yes:
self.load_data()
self.set_zero()
@staticmethod
def label_style(label):
label.setAlignment(Qt.AlignCenter)
label.setFont(QFont("微软雅黑", 20, QFont.Bold))
@staticmethod
def font_syle(widget, font_size=20, font='微软雅黑', bold=True):
w = widget
if bold:
w.setFont((QFont(font, font_size, QFont.Bold)))
else:
w.setFont((QFont(font, font_size)))
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Game()
sys.exit(app.exec_())
| Curt-H/LotteryWithGUI | app.py | app.py | py | 9,220 | python | en | code | 0 | github-code | 50 |
74899355356 | import re
from typing import Set
from .model.board import *
from .model.player import Player
from collections import defaultdict
class StateNode():
def __init__(self, move: str, state: str, comment: str = "", depth: int = None):
self.move = move
self.state = state
self.comment = comment
self.depth = depth
def __eq__(self, __o: object) -> bool:
return isinstance(__o, StateNode)and self.move == __o.move and self.state == __o.state
def __ne__(self, __o: object) -> bool:
return not (self == __o)
def __hash__(self) -> int:
return hash((self.move, self.state))
class StatePair():
def __init__(self, white_moved=None, black_moved=None):
self.white_moved = white_moved
self.black_moved = black_moved
def copy(self):
newstate = StatePair(
white_moved=copy(self.white_moved),
black_moved=copy(self.black_moved))
return newstate
def make_move(self, player, move):
"""
param player:
white or black
param move:
the move string, i.e. "1. d4 d5"
return:
a tuple of the form (old_state, updated_state)
"""
if player is Player.WHITE:
self.white_moved = self.black_moved.update(move)
return self.black_moved, self.white_moved
elif player is Player.BLACK:
self.black_moved = self.white_moved.update(move)
return self.white_moved, self.black_moved
def state_map_from_pgn(filepath, state_map: Dict[str, Set[StateNode]] = None):
if state_map is None:
state_map = defaultdict(set)
pattern_header = "(\[[^\[]*\])"
# pattern_comment = "(\{[^\}]*\})"
pattern_exclamation = "(\$\d+)"
# pattern_move_and_variation = "(?:\d+\.+\s*((?:(?:[PNBRQK](?:[a-h]|[1-8])?)?[a-h][1-8]|O(?:-?O"\
# "){1,2}|(?:[PNBRQK](?:[a-h]|[1-8])?|[a-h])x[a-h][1-8])(?:=[NBRQ]"\
# ")?[\+#]?)\s*((?:(?:[PNBRQK](?:[a-h]|[1-8])?)?[a-h][1-8]|O(?:-?O"\
# "){1,2}|(?:[PNBRQK](?:[a-h]|[1-8])?|[a-h])x[a-h][1-8])(?:=[NBRQ]"\
# ")?[\+#]?)?)|\(|\)"
pattern_move_comment_variation = "(?:\s*(\{[^\}]*\})?\s*(\d+\.+)\s*((?:(?:[PNBRQK](?:[a-h]|"\
"[1-8])?)?[a-h][1-8]|O(?:-?O){1,2}|(?:[PNBRQK](?:[a-h]|["\
"1-8])?|[a-h])x[a-h][1-8])(?:=[NBRQ])?[\+#]?)\s*(\{[^\}]"\
"*\})?\s*((?:(?:[PNBRQK](?:[a-h]|[1-8])?)?[a-h][1-8]|O(?"\
":-?O){1,2}|(?:[PNBRQK](?:[a-h]|[1-8])?|[a-h])x[a-h][1-8"\
"])(?:=[NBRQ])?[\+#]?)?)\s*(\{[^\}]*\})?|\(|\)"
with open(filepath) as f:
pgn = f.read().replace("\n", " ")
f.close()
# Remove comments, exclamations, and headers from the pgn
pgn_pruned = re.sub(pattern_header, '', pgn)
pgn_pruned = re.sub(pattern_exclamation, '', pgn_pruned)
# Each StatePair element in the stack contains the board States after the white and black moves
variation_states = []
for match in re.finditer(pattern_move_comment_variation, pgn_pruned):
overall = match.group(0)
if overall == "(":
# Entering a variation, push the current white and black states to the stack so we
# can go back to our current state after we are done with the variation
variation_states.append(copy(variation_states[-1]))
elif overall == ")":
# Exiting a variation, pop the current states of the top of the stack and return to
# the state we were in before starting the variation
variation_states.pop()
else:
move_number = match.group(2)
first_move = match.group(3)
first_move_comment = match.group(4) if match.group(4) is not None else ""
second_move = match.group(5)
second_move_comment = match.group(6) if match.group(6) is not None else ""
if move_number == "1.":
# Started a new pgn chapter, add the initial state to the stack
variation_states.append(StatePair(black_moved=Board()))
if "..." in move_number:
# This indicates white has moved and it is currently black's turn
key, val = variation_states[-1].make_move(Player.BLACK, first_move)
state_map[str(key)].add(StateNode(first_move, str(val), comment=first_move_comment))
else:
# It is white's turn to move
key, val = variation_states[-1].make_move(Player.WHITE, first_move)
state_map[str(key)].add(StateNode(first_move, str(val), comment=first_move_comment))
if second_move is None:
continue
key, val = variation_states[-1].make_move(Player.BLACK, second_move)
state_map[str(key)].add(StateNode(second_move, str(val), comment=second_move_comment))
# Third element of each tuple in the continuations should be the greatest depth in that variation.
states_to_compute: List[StateNode] = list(state_map[str(Board())])
finished_states = 0
while states_to_compute:
curr_node = states_to_compute[-1]
if curr_node.depth is not None:
states_to_compute.pop()
finished_states = finished_states + 1
continue
possible_continuations = state_map[curr_node.state]
if not possible_continuations:
curr_node.depth = 1
states_to_compute.pop()
finished_states = finished_states + 1
continue
depths = []
found_none = False
for node in possible_continuations:
node: StateNode = node
if node.depth is None:
if node in states_to_compute:
node.depth = 1
else:
found_none = True
states_to_compute.append(node)
depths.append(node.depth)
if not found_none:
curr_node.depth = max(depths) + 1
states_to_compute.pop()
finished_states = finished_states + 1
return state_map
| ameyerow/chess-trainer | src/preprocess.py | preprocess.py | py | 6,410 | python | en | code | 2 | github-code | 50 |
30826629234 | import unittest
from sample import HandleHolder
class HandleHolderTest(unittest.TestCase):
def testCreation(self):
holder = HandleHolder(HandleHolder.createHandle())
holder2 = HandleHolder(HandleHolder.createHandle())
self.assertEqual(holder.compare(holder2), False)
def testTransfer(self):
holder = HandleHolder()
holder2 = HandleHolder(holder.handle())
self.assertTrue(holder.compare(holder2))
def testUseDefinedType(self):
holder = HandleHolder(8)
holder2 = HandleHolder(holder.handle2())
self.assertTrue(holder.compare2(holder2))
if __name__ == '__main__':
unittest.main()
| pyside/Shiboken | tests/samplebinding/handleholder_test.py | handleholder_test.py | py | 670 | python | en | code | 83 | github-code | 50 |
14075574208 | def get_sqrt(x):
low=0
high=x
while low<=high:
mid=(low+high)//2
if mid*mid>x:
high=mid-1
elif mid*mid<x:
low=mid+1
else:
return mid
return high
print(get_sqrt(25))
print(get_sqrt(8))
| abuchireddygari/com_sandbox | py-leetcode/find_sqrt.py | find_sqrt.py | py | 272 | python | en | code | 0 | github-code | 50 |
74680335515 | print("输入第一个数")
a = int(input())
print("输入第二个数")
b = int(input())
#开始循环取余数,因为这里循环次数是未知的,所以我们使用while
while a%b!=0:
num = a%b #交叉赋值
a = b
b = num #这段可以简写为 a,b=b,(a%b)
print("最大公约数为%d" % b) | LearnerPing/coding-think | Python/Untitled-1.py | Untitled-1.py | py | 308 | python | zh | code | 0 | github-code | 50 |
4703132626 | # -*- coding: utf-8 -*-
"""
Just an example on how to set parameters for plots
"""
import matplotlib.pyplot as plt
from . import plots
import numpy as np
#%% plot configurations
plt.rcParams["figure.figsize"] = plt.rcParamsDefault["figure.figsize"]
plt.rcParams["figure.figsize"] = (24,10)
def set_parameters(main_title, axes_title, axes_labels, legend, x_ticks, y_ticks, text_inside):
plt.rcParams.update({'font.size': text_inside}) # for text inside figures
plt.rc('figure', titlesize=main_title) # for main title
plt.rc('axes', titlesize=axes_title) # for axis titles
plt.rc('axes', labelsize=axes_labels) # for axes labels
plt.rc('legend', fontsize = legend)
plt.rc('xtick', labelsize=x_ticks)
plt.rc('ytick', labelsize=y_ticks)
def use_default_parameters(type = 'normal'):
valid_types = ['small', 'normal', 'big', 'huge']
assert type in valid_types, ("valid types are {}. Got {} instead".format(valid_types, type))
if type == 'small':
set_parameters(20,18,16,14,12,12,10)
if type == 'normal':
set_parameters(28,24,22,20,19,19,15)
if type == 'big':
set_parameters(34,30,28,26,24,24,20)
if type == 'huge':
set_parameters(38,34,32,30,28,28,24)
def see_parameters():
start = 0
stop = 100
step = 1
x = np.arange(start, stop, step)
y = np.arange(start, stop, step)+np.random.rand(len(x))*10
fig, ax = plots.plts([[x,x+5],[x],[],[x,x]],[[y,y],[y],[],[y,y-10]],
mainTitle = 'plt.rc(\'figure\', titlesize=__)]',
listLegLabels = ['legend [plt.rc(\'legend\', fontsize = __)]'],
listOfkwargs = [{'color': 'C4'}, {'color' : 'C2'}],
sharex = True, sharey = True,
listTitles = ['axes_title [plt.rc(\'axes\', titlesize=__)]']*4,
listXlabels=['axes_labels [plt.rc(\'axes\', labelsize=__)]'],
listYlabels=['axes_labels [plt.rc(\'axes\', labelsize=__)]'],)
ax[1,0].text(40, 60, 'text_inside [plt.rcParams.update({\'font.size\': __})]',ha = 'center')
ax[1,0].text(40, 0, 'x_ticks [plt.rc(\'xtick\', labelsize=__)]',ha = 'center')
ax[1,0].text(0, 60, 'y_ticks [plt.rc(\'ytick\', labelsize=__)]',ha = 'center')
| eferlius/basicPlots | figure_parameters.py | figure_parameters.py | py | 2,305 | python | en | code | 0 | github-code | 50 |
21869090633 | import uuid
from django.conf import settings
from rest_framework.response import Response
from rest_framework.views import APIView
from django.core.files.storage import default_storage
from . import recognizer
class RecognizerView(APIView):
def get(self, _):
return Response({
'check': True
}, status=200)
def post(self, request):
payload = request.data
filename = "{}.png".format(str(uuid.uuid4()))
with default_storage.open(filename, 'wb+') as destination:
for chunk in payload['file'].chunks():
destination.write(chunk)
print(settings.MEDIA_ROOT + filename)
result = recognizer.recognizer('media/' + filename)
return Response({
'extracting_data': result
}, status=201)
| buldozzzer/inventorybase | tess_ocr/main/views.py | views.py | py | 809 | python | en | code | 0 | github-code | 50 |
13016739020 | import time
import webapp2
import logging
import json
from Request import Request
from google.appengine.api.logservice import logservice
from gcm import GCM
from RequestHandler import RequestHandler
from Users import User
class getRequestHandler(webapp2.RequestHandler):
def head(self):
self.response.status = 200
self.response.headerlist = [("Content-type", "text/html")]
# this will be a request from the app for information, server will send json to app.
def get(self, requestId=""):
"""Respond to a GET request."""
user = self.getRequestByUUID(requestId)
user.requestAccepted = True
user.put()
logging.info("requestAccepted = %s" %str(user.requestAccepted))
# Send everyone a push notification about the request being accepted
gcm = GCM('AIzaSyC6y8uyxPSjxPABKieRa2iB2wLxFVyJuQY')
data = {'param1': 'value1', 'param2': 'value2', 'appName': 'SafeWalk'}
users = User.query().fetch()
gcm_ids = []
for user in users:
logging.info("user = %s" %str(user))
logging.info("this gcm_id = %s" %str(user.gcmID))
gcm_ids.append(user.gcmID)
logging.info("gcm_ids = %s" %str(gcm_ids))
response = gcm.json_request(registration_ids=gcm_ids, data=data)
self.response.status = 200;
return
#respond to POST Request, which will come from Safewalk App
def post(self, requestId=""):
request = self.getRequestByUUID(requestId)
request.requestAccepted = True
self.response.status = 200
# Send everyone a push notification about the request being accepted
gcm = GCM('AIzaSyC6y8uyxPSjxPABKieRa2iB2wLxFVyJuQY')
data = {'param1': 'value1', 'param2': 'value2', 'appName': 'SafeWalk'}
users = User.query().fetch()
gcm_ids = []
for user in users:
gcm_ids.append(user.gcmID)
response = gcm.json_request(registration_ids=gcm_ids, data=data)
def getRequestByUUID(self,id):
match = Request.query(Request.requestId == id).fetch()
logging.info("match %s" %str(match[0]))
return match[0]
| kdroll/SafeWalk | server/getRequestHandler.py | getRequestHandler.py | py | 2,201 | python | en | code | 0 | github-code | 50 |
73794229916 | def main():
adapters = [0]
with open("input.txt") as f:
for line in f:
adapters.append(int(line))
adapters.sort()
diff_1, diff_3 = 0, 1
for i in range(len(adapters) - 1):
if adapters[i + 1] - adapters[i] == 1:
diff_1 += 1
elif adapters[i + 1] - adapters[i] == 3:
diff_3 += 1
print(diff_1 * diff_3)
main()
| 916-Serban-Cristian/AOC2020 | Day10/level1.py | level1.py | py | 391 | python | en | code | 0 | github-code | 50 |
11464853891 | # BJ2776_암기왕
def binary(s, e, nums, num):
while s <= e:
mid = (s+e)//2
if num1[mid] == num:
return 1
elif num1[mid] < num:
s = mid + 1
else:
e = mid - 1
return 0
T = int(input())
for _ in range(T):
N = int(input())
num1 = list(map(int, input().split()))
M = int(input())
num2 = list(map(int, input().split()))
num1.sort()
for num in num2:
if num < num1[0] or num > num1[N-1]:
print(0)
else:
print(binary(0, N-1, num1, num))
| 5angjae/Algorithm | BAEKJOON/Python/BJ2776.py | BJ2776.py | py | 578 | python | en | code | 0 | github-code | 50 |
15932857848 |
# Returns index of x in arr if present, else -1
def binarySearch_rec (arr, l, r, x): #https://www.geeksforgeeks.org/binary-search/
# Check base case
if r >= l:
mid = l + (r - l)//2
# If element is present at the middle itself
if arr[mid] == x:
return mid
# If element is smaller than mid, then it
# can only be present in left subarray
elif arr[mid] > x:
return binarySearch_rec(arr, l, mid-1, x)
# Else the element can only be present
# in right subarray
else:
return binarySearch_rec(arr, mid + 1, r, x)
else:
# Element is not present in the array
return -1
def binarySearch(arr, val): #To make calling it easier
return(binarySearch_rec(arr, 0, len(arr)-1, val))
def mergeSort(arr): #https://www.geeksforgeeks.org/merge-sort/
if len(arr) > 1:
mid = len(arr)//2
L = arr[:mid] # Dividing the array elements
R = arr[mid:] # into 2 halves
mergeSort(L) # Sorting the first half
mergeSort(R) # Sorting the second half
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i+=1
else:
arr[k] = R[j]
j+=1
k+=1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i+=1
k+=1
while j < len(R):
arr[k] = R[j]
j+=1
k+=1
#class inverted_list():
class inverted_index():
def __init__(self, lexicon_array=[], n=0):
self.lexicon_array = lexicon_array
self.n = n #number of items in lexicon
def sort_lexicon(self):
mergeSort(self.lexicon_array)
def insert_value(self, timestamp):
self.lexicon_array.append(timestamp)
self.n += 1
self.sort_lexicon()
#def remove_value(self, timestamp):
def point_search(self, target_timestamp):
target_timestamp_index = binarySearch(self.lexicon_array, target_timestamp)
return(target_timestamp_index)
def bounded_range_search(self, l, r):
l_index = binarySearch(self.lexicon_array, l)
r_index = binarySearch(self.lexicon_array, r)
return(self.lexicon_array[l_index:r_index])
def unbounded_range_search(self, l, r): #There are many unbounded query options
l_index = binarySearch(self.lexicon_array, l)
r_index = binarySearch(self.lexicon_array, r)
return(self.lexicon_array[:l_index] + self.lexicon_array[r_index:])
def remove_value(self, timestamp):
self.n = self.n - 1
target_timestamp = binarySearch(self.lexicon_array, timestamp)
#placeholder = self.lexicon_array
#self.lexicon_array = placeholder[:target_timestamp] + placeholder[target_timestamp + 1:]
self.lexicon_array.pop(target_timestamp)
def show_index(self):
for i in self.lexicon_array:
print(str(i))
def get_size(self):
return(len(self.lexicon_array))
'''
if __name__=="__main__":
x = [6,2,1,5,8]
mergeSort(x)
print(x)
''' | ShanaWeissman/Senior-Capstone | invertedindex.py | invertedindex.py | py | 3,427 | python | en | code | 0 | github-code | 50 |
28550604009 | # coding=utf-8
"""Test working with net."""
import pytest
from loader import network
def test_download():
"""Test downloading URL document."""
# PREPARE
expected = open(
'tests/pages/origin/stepanenkoartem.github.io.html',
mode='rb',
).read()
actual = network.download('https://stepanenkoartem.github.io/')
# CHECK
assert expected == actual
@pytest.mark.parametrize(
'url',
[
'https://httpbin.org/status/301',
'https://httpbin.org/status/302',
],
)
@pytest.mark.xfail
def test_redirects(url):
"""Testing 3xx response.
Args:
url : (str) test URL
"""
with pytest.raises(network.NetworkError):
network.download(url)
@pytest.mark.parametrize(
'url',
[
'https://httpbin.org/status/403',
'https://httpbin.org/status/404',
],
)
def test_client_connection_errors(url):
"""Testing 4xx Error exceptions.
Args:
url : (str) test URL
"""
with pytest.raises(network.NetworkError):
network.download(url)
@pytest.mark.parametrize(
'url',
[
'https://httpbin.org/status/500',
'https://httpbin.org/status/502',
],
)
def test_server_connection_errors(url):
"""Testing 5xx Error exceptions.
Args:
url : (str) test URL
"""
with pytest.raises(network.NetworkError):
network.download(url)
@pytest.mark.parametrize(
'url',
[
'https://#incetorrectdomain.com',
' ',
],
)
def test_network_failed_connection(url):
"""Testing failed connection exceptions.
Args:
url : (str) test URL
"""
with pytest.raises(network.NetworkError):
network.download(url)
| StepanenkoArtem/python-project-lvl3 | tests/test_network.py | test_network.py | py | 1,721 | python | en | code | 2 | github-code | 50 |
30237115768 | #!/usr/bin/python
print("Content-Type: text/html\n\n")
def pokemontable(data):
table = "<table border = 1>\n"
for list in data:
table += "\t<tr>"
for item in list:
table += "<td>" + str(item) + "</td>"
table += "</tr>\n"
table += "</table>"
return table
with open("img/front/1.png", "r"):
image = "<img src = \"img/front/1.png\">"
with open("pokemon.csv", "r") as text:
pokemons = []
for i in range(152):
with open("img/front/" + str(i) + ".png", "r"):
with open("img/back/" + str(i) + ".png", "r"):
temp_pokemons = []
if i == 0:
temp_pokemons.append("Front")
temp_pokemons.append("Back")
else:
temp_pokemons.append("<img src = \"img/front/" + str(i) + ".png\">")
temp_pokemons.append("<img src = \"img/back/" + str(i) + ".png\">")
for data in (text.readline()).split(","):
temp_pokemons.append(data)
pokemons.append(temp_pokemons)
webpage = '''
<html>
<head>
<style>
body {
background-color: Tomato
}
p {
color: WhiteSmoke;
font-size: 100%;
font-family: "Arial";
}
h1{
font-size: 500%;
font-family: "Impact";
}
table {
background-color: WhiteSmoke;
font-family: "Calibri";
}
</style>
</head>
<body>
<center><h1><font>POKEMONS</font></h1></center>
<center><p>Pokemon is a wholesome game for family and kids as children are taught to run away from home to capture wild animals without consent. The game teaches valuable life lessons to the kids, while parents are free to let video games raise their children (as all excellent parents of the 21st century do). 151/10, one point for each pokemon. Below is a table containing information on every 151 pokemons, from their front and back view to their names to their stats. Note: Charizard is pokemon number 6, all other pokemon are irrelevant.</p></center>
<center>
TABLE
</center>
<br/>
</body>
</html>
'''
webpage = webpage.replace("TABLE", pokemontable(pokemons))
print(webpage)
| JasonX354/computationalOmicsLab | python/pokemon/HW36.py | HW36.py | py | 2,410 | python | en | code | 0 | github-code | 50 |
42013984188 | import requests
import asyncio
from time import sleep
from datetime import datetime
from utils import net_monitor
from config import COLLECTOR_URL, INTERVAL
from classes.flow import Flow
from classes.frr_ospfv3 import FrrOspfv3
flow = Flow(INTERVAL)
while True:
connections = []
try:
flows = net_monitor.get_conntrack_flows()
conn_list = flows['conntrack']['flow']
except:
print('Error getting conntrack flows')
sleep(INTERVAL)
continue
for conn_object in conn_list:
for direction in conn_object['meta']:
connection = net_monitor.get_formatted_flow(direction)
if connection:
connections.append(connection)
timestamp = str(datetime.now())
current_flows = connections
flow.update_flows(current_flows, timestamp)
significant_flows = list(flow.filter(net_monitor.filter_valid))
total_bytes = sum([int(x['bytes']) for x in flow.flows])
topo = FrrOspfv3()
lsdb_intf = asyncio.run(topo.get_interfaces())
interfaces = net_monitor.get_io_counters()
status = asyncio.run(topo.get_status())
for intf in lsdb_intf:
interfaces[intf]['intf_id'] = lsdb_intf[intf]["interfaceId"]
if 'bdr' in lsdb_intf[intf]:
interfaces[intf]['neighbor'] = lsdb_intf[intf]["bdr"]
del(interfaces['lo'])
data = {
'source': {
'ipv6': None,
'id': status['routerId']
},
'flows': significant_flows,
'total': total_bytes,
'interfaces': interfaces,
'timestamp': str(datetime.now())
}
try:
print(f'Reporting {len(significant_flows)} flows to the controller')
requests.post(COLLECTOR_URL, json=data)
except (ConnectionError, OSError) as e:
print(f'Error connecting to {COLLECTOR_URL}')
sleep(INTERVAL)
| maurohirt/Docker_GNS3 | routers/src/pcc.py | pcc.py | py | 1,861 | python | en | code | 0 | github-code | 50 |
9890375405 | """
97. Interleaving String Add to List
Description Submission Solutions
Total Accepted: 64775
Total Submissions: 268978
Difficulty: Hard
Contributors: Admin
Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.
For example,
Given:
s1 = "aabcc",
s2 = "dbbca",
When s3 = "aadbbcbcac", return true.
When s3 = "aadbbbaccc", return false.
Hide Tags Dynamic Programming String
Keep two points on s1 and s2 and traverse s3, the current char in s3 is
either from s1 or s2 or both. Use a set to record all possibility and dp on.
The key here is to use a set to record the pointers, because duplicates
are possible, using a list cause TLE.
"""
class Solution(object):
def isInterleave(self, s1, s2, s3):
"""
:type s1: str
:type s2: str
:type s3: str
:rtype: bool
"""
l1, l2 = len(s1), len(s2)
if l1 + l2 != len(s3):
return False
last = set([(0, 0)])
for char in s3:
current = set()
for i, j in last:
if i < l1 and s1[i] == char:
current.add((i + 1, j))
if j < l2 and s2[j] == char:
current.add((i, j + 1))
if not current:
return False
last = current
return True
| fwangboulder/DataStructureAndAlgorithms | #97InterleavingString.py | #97InterleavingString.py | py | 1,329 | python | en | code | 0 | github-code | 50 |
35267967180 | from collections import deque
from threading import Lock
import logging
from spinn_utilities.log import FormatAdapter
from spinnman.messages.eieio.command_messages import (
EventStopRequest, HostSendSequencedData)
from spinn_front_end_common.utilities.exceptions import SpinnFrontEndException
logger = FormatAdapter(logging.getLogger(__name__))
#: The total number of sequence numbers
_N_SEQUENCES = 256
class BuffersSentDeque(object):
"""
A tracker of buffers sent / to send for a region
"""
__slots__ = [
#: The region being managed
"_region",
#: A queue of messages sent, ordered by sequence number
"_buffers_sent",
#: The current sequence number of the region
"_sequence_number",
#: A lock for the sequence number
"_sequence_lock",
#: The last sequence number to be received on the machine
"_last_received_sequence_number",
#: True if the stop message has been sent
"_sent_stop_message",
#: The number of sequence numbers allowed in a single transmission
"_n_sequences_per_transmission"
]
def __init__(self, region, sent_stop_message=False,
n_sequences_per_tranmission=64):
"""
:param int region: The region being managed
:param bool sent_stop_message: True if the stop message has been sent
:param int n_sequences_per_tranmission:
The number of sequences allowed in each transmission set
"""
self._region = region
self._buffers_sent = deque(maxlen=n_sequences_per_tranmission)
self._sequence_number = 0
self._sequence_lock = Lock()
self._last_received_sequence_number = _N_SEQUENCES - 1
self._sent_stop_message = sent_stop_message
self._n_sequences_per_transmission = n_sequences_per_tranmission
@property
def is_full(self):
"""
Whether the number of messages sent is at the limit for the
sequencing system.
:rtype: bool
"""
return len(self._buffers_sent) >= self._n_sequences_per_transmission
def is_empty(self):
"""
Determine if there are no messages.
:rtype: int
"""
return len(self._buffers_sent) == 0
def send_stop_message(self):
"""
Send a message to indicate the end of all the messages.
"""
if not self._sent_stop_message:
self._sent_stop_message = True
self.add_message_to_send(EventStopRequest())
def add_message_to_send(self, message):
"""
Add a message to send. The message is converted to a sequenced
message.
:param message: The message to be added
:type message:
~spinnman.messages.eieio.abstract_messages.AbstractEIEIOMessage
"""
# If full, raise an exception
if self.is_full:
raise SpinnFrontEndException("The buffer is full")
# Create a sequenced message and update the sequence number
self._sequence_lock.acquire()
sequenced_message = HostSendSequencedData(
self._region, self._sequence_number, message)
self._sequence_number = (self._sequence_number + 1) % _N_SEQUENCES
self._sequence_lock.release()
# Add the sequenced message to the buffers
self._buffers_sent.append(sequenced_message)
@property
def messages(self):
"""
The messages that have been added to the set.
:rtype:
iterable(~spinnman.messages.eieio.command_messages.HostSendSequencedData)
"""
return self._buffers_sent
def update_last_received_sequence_number(self, last_received_sequence_no):
"""
Updates the last received sequence number. If the sequence number is
within the valid window, packets before the sequence number within the
window are removed, and the last received sequence number is updated,
thus moving the window for the next call. If the sequence number is
not within the valid window, it is assumed to be invalid and so is
ignored.
:param int last_received_sequence_no: The new sequence number
:return: True if update went ahead, False if it was ignored
:rtype: bool
"""
# The sequence number window is between the last received and
# the last received + window size, taking account that the end
# of the window might wrap
min_seq_no_acceptable = self._last_received_sequence_number
max_seq_no_acceptable = (
(min_seq_no_acceptable + self._n_sequences_per_transmission)
% _N_SEQUENCES)
if (min_seq_no_acceptable <= last_received_sequence_no <=
max_seq_no_acceptable):
# The sequence hasn't wrapped and the sequence is valid
self._last_received_sequence_number = last_received_sequence_no
self._remove_messages()
return True
elif max_seq_no_acceptable < min_seq_no_acceptable:
# The sequence has wrapped
if (0 <= last_received_sequence_no <= max_seq_no_acceptable or
min_seq_no_acceptable <= last_received_sequence_no <=
_N_SEQUENCES):
# The sequence is in the valid range
self._last_received_sequence_number = last_received_sequence_no
self._remove_messages()
return True
# If none of the above match, the sequence is out of the window
return False
def _remove_messages(self):
"""
Remove messages that are no longer relevant, based on the last
sequence number received.
"""
min_sequence = (self._last_received_sequence_number -
self._n_sequences_per_transmission)
logger.debug("Removing buffers between {} and {}",
min_sequence, self._last_received_sequence_number)
# If we are at the start of the sequence numbers, keep going back up to
# the allowed window
if min_sequence < 0:
back_min_sequence = min_sequence + _N_SEQUENCES
while (self._buffers_sent and
self._buffers_sent[0].sequence_no > back_min_sequence):
logger.debug("Removing buffer with sequence {}",
self._buffers_sent[0].sequence_no)
self._buffers_sent.popleft()
# Go back through the queue until we reach the last received sequence
while (self._buffers_sent and
min_sequence < self._buffers_sent[0].sequence_no <=
self._last_received_sequence_number):
logger.debug("Removing buffer with sequence {}",
self._buffers_sent[0].sequence_no)
self._buffers_sent.popleft()
| SpiNNakerManchester/SpiNNFrontEndCommon | spinn_front_end_common/interface/buffer_management/storage_objects/buffers_sent_deque.py | buffers_sent_deque.py | py | 6,922 | python | en | code | 12 | github-code | 50 |
27780693808 | import unittest
from unittest.mock import MagicMock
from flashflow.cmd.coord import States
from flashflow.msg import FFMsg
class MockMeasrProtocol:
''' Mock coord.MeasrProtocol '''
pass
class MockTorController(MagicMock):
pass
def rand_listen_addr():
from random import randint
# return '[::1]:' + str(randint(10000, 64000))
return 'localhost:' + str(randint(10000, 64000))
# def loop():
# import asyncio
# return asyncio.get_event_loop()
class Base(unittest.TestCase):
''' Abstract out the some state creation for tests in this file '''
def setUp(self):
from flashflow.cmd.coord import StateMachine
from flashflow.config import get_config
from tempfile import TemporaryDirectory
self.datadir = TemporaryDirectory()
self.conf = get_config(None)
self.conf['coord']['datadir'] = self.datadir.name
self.conf['coord']['keydir'] = 'tests/data/coord/keys'
self.conf['coord']['listen_addr'] = rand_listen_addr()
self.sm = StateMachine(self.conf)
self.sm.tor_client = MockTorController()
def tearDown(self):
self.datadir.cleanup()
class TestInitialState(Base):
''' Start out with properly initialized state '''
def test(self):
self.assertEqual(self.sm.state, States.START)
self.assertFalse(self.sm.measurements)
self.assertFalse(self.sm.measurers)
class TestMeasrConnect(Base):
''' What happens when a measurer connects
We always accept new measurer connections. This probably isn't what we
actually want to do.
https://gitlab.torproject.org/pastly/flashflow/-/issues/13 '''
def test_READY(self):
''' If we're in the READY state, we accept the new connection. '''
self.sm.state = States.READY
self.sm.notif_measurer_connected(MockMeasrProtocol())
self.assertEqual(len(self.sm.measurers), 1)
@unittest.skip('pastly/flashflow#13')
def test_nonREADY(self):
''' When any state other than READY, should not accept measr conn '''
# Just test the one state, START, for now
assert self.sm.state == States.START
self.sm.notif_measurer_connected(MockMeasrProtocol())
self.assertFalse(self.sm.measurers)
class TestMeasrDisconnect(Base):
''' What happens when a measurer disconnects '''
def test_empty(self):
''' While this should never happen, nothing bad happens if the measr
that disconnects doesn't exist '''
assert self.sm.state == States.START
m = MockMeasrProtocol()
self.sm.notif_measurer_disconnected(m)
# empty
self.assertFalse(self.sm.measurers)
# still in init state
self.assertEqual(self.sm.state, States.START)
def test_exist(self):
''' Measr exists, and is removed '''
m = MockMeasrProtocol()
self.sm.measurers.append(m)
assert len(self.sm.measurers) == 1
self.sm.notif_measurer_disconnected(m)
self.assertFalse(self.sm.measurers)
def test_not_exist(self):
''' We have measurers, but this isn't one of them '''
m_listed = MockMeasrProtocol()
m_unlisted = MockMeasrProtocol()
self.sm.measurers.append(m_listed)
assert len(self.sm.measurers) == 1
self.sm.notif_measurer_disconnected(m_unlisted)
self.assertEqual(len(self.sm.measurers), 1)
class TestEnsureListenSocks(Base):
''' Transition to the state for opening listening sockets. '''
@unittest.skip(
'Can\'t figure out why contained cb() called twice, the 1st time '
'with "address already in use". Actually ... it\'s probably because '
'transitions calls _ensure_listen_socks itself and we are also '
'calling it explicitly.')
def test(self):
assert self.sm.state == States.START
self.sm.change_state_starting()
# While working on this, I modified _ensure_listen_socks to return the
# task.
# task = self.sm._ensure_listen_socks()
# loop().run_until_complete(task)
# print(task)
# print(self.sm.state)
# assert False
def test_bad_addr_port(self):
''' We're configured to use an invalid "hostname:port" string '''
self.conf['coord']['listen_addr'] = 'example.com11111'
assert self.sm.state == States.START
self.sm.change_state_starting()
with self.assertLogs('flashflow.cmd.coord', 'ERROR'):
self.sm._ensure_listen_socks()
self.assertEqual(self.sm.state, States.FATAL_ERROR)
def test_no_keydir(self):
''' Our configured keydir doesn't exist, but must in order to load
client TLS certs '''
# This exists
self.conf['coord']['key'] = 'tests/data/coord/keys/coord.pem'
# This doesn't
self.conf['coord']['keydir'] = '/tmp/directory/does/not/exist'
assert self.sm.state == States.START
self.sm.change_state_starting()
with self.assertLogs('flashflow.cmd.coord', 'ERROR'):
self.sm._ensure_listen_socks()
self.assertEqual(self.sm.state, States.FATAL_ERROR)
def test_no_key(self):
''' Our configured keydir doesn't exist, but must in order to load
client TLS certs '''
self.conf['coord']['key'] = '/tmp/coord/key/does/not/exist'
assert self.sm.state == States.START
self.sm.change_state_starting()
with self.assertLogs('flashflow.cmd.coord', 'ERROR'):
self.sm._ensure_listen_socks()
self.assertEqual(self.sm.state, States.FATAL_ERROR)
class TestRecvMeasrMsg(Base):
''' What happens when we receive a FFMsg from a measurer in various
situations.
The only time we want to handle a FFMsg is when we are READY.
'''
def test_nonREADY(self):
assert self.sm.state == States.START
msg = FFMsg()
measr = MockMeasrProtocol()
for start_state in [
States.START,
States.ENSURE_LISTEN_SOCKS,
States.ENSURE_CONN_W_TOR,
# States.READY, # testing all BUT this state
States.NONFATAL_ERROR,
States.FATAL_ERROR,
]:
self.sm.state = start_state
with self.assertLogs('flashflow.cmd.coord', 'ERROR'):
self.sm.notif_measr_msg(measr, msg)
self.assertEqual(self.sm.state, States.NONFATAL_ERROR)
| pastly/flashflow | tests/unit/test_coord.py | test_coord.py | py | 6,423 | python | en | code | 1 | github-code | 50 |
26260564088 | import openai
import os
import requests
from data_layer.storage import upload_blob
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('OPENAI_API_KEY')
script_dir = os.path.dirname(os.path.abspath(__file__))
static_folder = os.path.join(script_dir, '../static')
context = f"""
You are an illustrator \n
Based on the story title your task is to create an illustration (take into account that the story is targeted to children): \n\n
"""
'''Returns illustration generated by OpenAI DALL-E.'''
def generate_illustration(story_id, story_title):
prompt = f'''
{context}
{story_title}
'''
# Limit the prompt to 1000 tokens
prompt = prompt[:1000]
response = openai.Image.create(
prompt=prompt,
n=1,
size="512x512",
)
image_url = response['data'][0]['url']
# Get image file from the URL
image_file = requests.get(image_url).content
# Save the image file to the static folder
image_name = f"story-{story_id}.jpg"
image_path = f"{static_folder}/{image_name}"
with open(image_path, "wb") as f:
f.write(image_file)
upload_blob(image_path, image_name)
return image_path | kpister/prompt-linter | data/scraping/repos/jpscardoso97~code-tales/src~backend~illustration_generator.py | src~backend~illustration_generator.py | py | 1,280 | python | en | code | 0 | github-code | 50 |
19298900539 | # -*- coding:utf-8 -*-
"""
@Author: lamborghini
@Date: 2018-11-30 13:58:24
@Desc: 主窗口
"""
from PyQt5.QtWidgets import QMainWindow, QDockWidget, QSizePolicy, QMenuBar, QAction
from PyQt5.QtCore import Qt
from bpwidget import graphictab
from bpwidget import detailui, menuui, bpattrwidget, searchui
from pubcode.pubqt.pubmenu import menumgr, menudefine
from mainwidget import logwidget
class CBlueprintView(QMainWindow):
def __init__(self, bpID, parent=None):
super(CBlueprintView, self).__init__(parent)
self.m_BPID = bpID
self.m_BPTabWidget = graphictab.CBPTabWidget(bpID, self)
self.m_BPAttrWidget = bpattrwidget.CBPAttrWidget(bpID, self)
self.m_DeltailWidget = detailui.CDetailUI(bpID, self)
self.m_MenuWidget = menuui.CMenuUI(bpID, self)
self.m_SearchWidget = searchui.CSearchWidget(bpID, self)
self.m_LogWidget = logwidget.GetLogWidget()
self._InitCorner()
self._InitDock()
self._InitMenu()
self._InitWindowsMenu()
def _InitCorner(self):
self.setCorner(Qt.TopLeftCorner, Qt.LeftDockWidgetArea)
self.setCorner(Qt.BottomLeftCorner, Qt.LeftDockWidgetArea)
self.setCorner(Qt.TopRightCorner, Qt.RightDockWidgetArea)
self.setCorner(Qt.BottomRightCorner, Qt.RightDockWidgetArea)
def _InitDock(self):
# self.setDockNestingEnabled(True)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
topDock = QDockWidget("菜单", self)
topDock.setSizePolicy(sizePolicy)
topDock.setObjectName("topDock")
topDock.setWidget(self.m_MenuWidget)
bottomDock = QDockWidget("搜索", self)
bottomDock.setSizePolicy(sizePolicy)
bottomDock.setObjectName("bottomDock")
bottomDock.setWidget(self.m_SearchWidget)
leftDock = QDockWidget("属性", self)
leftDock.setSizePolicy(sizePolicy)
leftDock.setObjectName("leftDock")
leftDock.setWidget(self.m_BPAttrWidget)
rightDock = QDockWidget("细节", self)
rightDock.setSizePolicy(sizePolicy)
rightDock.setObjectName("rightDock")
rightDock.setWidget(self.m_DeltailWidget)
logDock = QDockWidget("日志面板", self)
logDock.setSizePolicy(sizePolicy)
logDock.setObjectName("logDock")
logDock.setWidget(self.m_LogWidget)
self.addDockWidget(Qt.RightDockWidgetArea, rightDock)
self.addDockWidget(Qt.TopDockWidgetArea, topDock)
self.addDockWidget(Qt.BottomDockWidgetArea, bottomDock)
self.addDockWidget(Qt.BottomDockWidgetArea, logDock)
self.tabifyDockWidget(bottomDock, logDock)
logDock.raise_()
self.addDockWidget(Qt.LeftDockWidgetArea, leftDock)
self.setCentralWidget(self.m_BPTabWidget)
def _InitMenu(self):
oMenu = menumgr.InitMenu(self)
for dMenuConfig in self.GetMenunInfo():
oMenu.AddMenu(dMenuConfig)
pMenuBar = oMenu.BuildChildMenu()
self.setMenuBar(pMenuBar)
def _InitWindowsMenu(self):
def UpdateWindowsStatue():
dMap = {oAction.text(): oAction for oAction in oWindowsMenu.actions()}
for oChild in lstChilde:
dMap[oChild.windowTitle()].setChecked(oChild.isVisible())
def OnWindows():
for oChild in lstChilde:
oSender = self.sender()
if oSender.text() != oChild.windowTitle():
continue
if oSender.isChecked():
oChild.show()
else:
oChild.hide()
return
oMenu = menumgr.GetMenu(self)
oWindowsMenu = oMenu.GetSubMenu("窗口")
oWindowsMenu.aboutToShow.connect(UpdateWindowsStatue)
oWindowsMenu.clear()
lstChilde = []
for oChild in self.children():
if not isinstance(oChild, (QDockWidget,)):
continue
lstChilde.append(oChild)
for oChild in lstChilde:
oAction = QAction(oChild.windowTitle(), self)
oAction.triggered.connect(OnWindows)
oAction.setCheckable(True)
oWindowsMenu.addAction(oAction)
def GetMenunInfo(self):
return [
{
menudefine.MENU_NAME: "窗口/",
}
]
| mandeling/Blueprint | bpwidget/blueprintview.py | blueprintview.py | py | 4,372 | python | en | code | 1 | github-code | 50 |
40887923288 |
# add function
result = 0
def add(num):
global result
result += num
return result
print(add(3)) # 3
print(add(4)) # 7
result1 = 0
result2 = 0
# 각각의 함수에는 영향을 끼치지 않는다.
def add1(num):
global result1
result1 += num
return result1
def add2(num):
global result2
result2 += num
return result2
print(add1(3)) # 3
print(add1(4)) # 7
print(add2(3)) # 3
print(add2(7)) # 10
class Calculator:
def __init__(self):
self.result = 0
def add(self,num):
self.result += num
return self.result
cal1 = Calculator()
cal2 = Calculator()
print(cal1.add(3)) # 3
print(cal1.add(4)) # 7
print(cal2.add(3)) # 3
print(cal2.add(7)) # 10
def sub(self, num):
self.result -= num
return self.result
class calcul:
def __init__(self, first, second, method):
self.first = first
self.second = second
self.method = method
def setdata(self, first, second):
self.first = first
self.second = second
def add(self):
result = self.first + self.second
return result
def mul(self):
result = self.first * self.second
return result
def div(self):
result = self.first / self.second
if self.method == 'int':
return int(result)
else:
return result
def sub(self):
result = self.first - self.second
return result
a = calcul()
a.setdata(4,2)
a.add()
a.sub()
a = calcul(4, 2, 'int')
a.div()
a.div()
a.mul()
# __init__ 을 사용하게 되면 바로 호출된다.
# class 상속
class Morecal(calcul):
pass
a = 1
int(a)
import mod1
mod1.add(4,1)
from mod2 import Math
PI = 3.141592
a = Math()
a.sol(4)
| naelkim/study | Algorithm/class/class.py | class.py | py | 1,858 | python | en | code | 0 | github-code | 50 |
24046919934 | from time import sleep
import time
import threading
class BoxFiller(threading.Thread):
def __init__(self,parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
count = 0
for i in range(30):
sleep(.5)
count += 1
self.parent._box_lock.acquire()
self.parent._box.append(count)
self.parent._box_lock.release()
class Maker:
def __init__(self):
self._box = []
self._boring = range(10)
self._box_lock = threading.Lock()
self.filler = BoxFiller(self)
def go(self):
self.filler.start()
@property
def box(self):
while True:
if len(self._box) == 0 and not self.filler.is_alive():
raise StopIteration
if len(self._box) == 0:
sleep(.05)
continue
self._box_lock.acquire()
tmp = self._box.pop(0)
self._box_lock.release()
yield tmp
@property
def boring(self):
while True and len(self._boring) != 0:
#self._box_lock.acquire()
tmp = self._boring.pop(0)
#self._box_lock.release()
yield tmp
raise StopIteration
| rouge8/hitsearch | threadtest/maker.py | maker.py | py | 1,288 | python | en | code | 8 | github-code | 50 |
24355048748 | import re
import requests
# 爬取所有奥特曼图片
# 声明 UA
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36"
}
# 存储异常路径,防止出现爬取失败情况
errorList = []
# run方法
def run():
url = "http://www.ultramanclub.com/allultraman/"
try:
res = requests.get(url=url, timeout=10)
res.encoding = "gb2312"
html = res.text
return getLinks(html)
except Exception as e:
print("发生异常", e)
# 获取请求链接
def getLinks(html):
startTag = '<ul class="lists">'
start = html.find(startTag)
html = html[start:]
links = re.findall('<li class="item"><a href="(.*)">', html)
print(
links) # ['./taiga/', './zett/', './trigger/', './tiga/', './zero/', './groob/', './ultraman/', './ultraseven/', './80/', './trigger/', './zett/', './reiga/', './taiga/', './titas/', './fuma/', './groob/', './grigio/', './tregear/', './ruebe/', './rosso/', './blu/', './geed/', './orb/', './x/', './ribut/', './ginga-victory/', './victory/', './ginga/', './saga/', './zero/', './belial/', './7x/', './hotto/', './motto/', './kitto/', './hikari/', './mebius/', './xenon/', './max/', './noa/', './nexus/', './thenext/', './boy/', './legend/', './pict/', './justice/', './cosmos/', './nice/', './agul/', './gaia/', './dyna/', './tiga', './zearth/', './ultraseven21/', './neos/', './powered/', './great/', './beth/', './chuck/', './scott/', './yullian/', './80/', './joneus/', './king/', './astra/', './leo/', './mother/', './taro/', './father/', './ace/', './jack/', './ultraseven/', './zoffy/', './ultraman/']
# links = list(set(links)) # set去重
links = [f"http://www.ultramanclub.com/allultraman/{i.split('/')[1]}/" for i in
set(links)] # 拼接url成 'http://www.ultramanclub.com/allultraman/xxx/' 的格式
# print(links)
return links
def getImg(url):
try:
# 网页访问速度慢,需要设置 timeout
res = requests.get(url=url, headers=headers, timeout=15)
res.encoding = "gb2312"
html = res.text
print(url)
# 获取详情页标题,作为图片文件名
title = re.search('<title>(.*?)\[', html).group(1)
# 获取图片短连接地址
image_short = re.search('<figure class="image tile">[.\s]*?<img src="(.*?)"', html).group(1)
# 拼接完整图片地址
img_url = "http://www.ultramanclub.com/allultraman/" + image_short[3:]
# 获取图片数据
img_data = requests.get(img_url).content
print(f"正在爬取{title}")
if title is not None and image_short is not None:
with open(f"ultramanImg/{title}.png", "wb") as f:
f.write(img_data)
except Exception as e:
print("*" * 100)
print(url)
print("请求异常", e)
errorList.append(url)
if __name__ == '__main__':
details = run()
for detail in details:
getImg(detail)
while len(errorList) > 0:
print("再次爬取")
detail = errorList.pop()
getImg(detail)
print("数据全部爬取完毕")
| Nevergiveupp/python-in-action | src/main/prince/getUltramanImage.py | getUltramanImage.py | py | 3,209 | python | en | code | 0 | github-code | 50 |
18071561060 | import pprint
pp = pprint .PrettyPrinter()
sigs = {}
gates = []
class Gate():
def __init__(self, op, dst, src0, src1):
self.op = op
self.dst = dst
self.src0 = src0
self.src1 = src1
self.resolved = False
def resolve(self):
if self.resolved:
return True
dst = self.dst
src0 = self.resolve_src(self.src0)
src1 = self.resolve_src(self.src1)
if src0 != '' and src1 != '':
if self.op == 'ASSIGN':
sigs[dst] = src0
elif self.op == 'NOT':
sigs[dst] = ~src0
elif self.op == 'AND':
sigs[dst] = src0 & src1
elif self.op == 'OR':
sigs[dst] = src0 | src1
elif self.op == 'LSHIFT':
sigs[dst] = src0 << src1
elif self.op == 'RSHIFT':
sigs[dst] = src0 >> src1
else:
print('Unrecognized op:', self.op)
self.resolved = True
def resolve_src(self, src):
if src is None:
return None
if is_int(src):
return int(src)
if src in sigs:
return sigs[src]
return ''
def is_int(value):
try:
int(value)
except ValueError:
return False
return True
def process_line(line):
logic, dst = line.split(' -> ')
dst = dst.strip()
src0 = None
src1 = None
logic_parts = logic.split()
if len(logic_parts) == 1:
op = 'ASSIGN'
src0 = logic_parts[0]
elif len(logic_parts) == 2:
op = logic_parts[0]
src0 = logic_parts[1]
elif len(logic_parts) == 3:
op = logic_parts[1]
src0 = logic_parts[0]
src1 = logic_parts[2]
else:
print('Parse error:', line)
gates.append(Gate(op, dst, src0, src1))
# with open('test.txt', 'r') as f:
with open('input-2.txt', 'r') as f:
input = f.readlines()
for x in input:
process_line(x)
while False in [x.resolved for x in gates]:
for g in gates:
g.resolve()
# Convert to unsigned 16-bit ints
for x in sigs:
sigs[x] = sigs[x] & 0xffff
pp.pprint(sigs)
| falyse/advent-of-code | 2015/07/main.py | main.py | py | 2,213 | python | en | code | 0 | github-code | 50 |
36793218735 | import math
from heapq import *
def func(s):
counters = {}
for c in s:
counters[c] = counters.setdefault(c,0) + 1
buckets = []
for c,f in counters.items():
if f > math.ceil(len(s)/2.0):
raise ValueError('No valid')
heappush(buckets, [-f,c])
output = []
while len(buckets) > 0:
f = heappop(buckets)
output.append(f[1])
f[0] += 1
s = None
if len(buckets) > 0:
s = heappop(buckets)
output.append(s[1])
s[0] += 1
if s[0] < 0:
heappush(buckets,s)
if f[0] < 0:
heappush(buckets,f)
return output
print(func(list('aaaabbcc')))
| baites/examples | algorithms/python/ex001.py | ex001.py | py | 716 | python | en | code | 4 | github-code | 50 |
40095094210 | import copy
import os
def SplitV(config, validationDir):
##List with all jobs
jobs = []
SplitVType = "single"
##List with all wished IOVs
IOVs = []
##Start with single SplitV jobs
if not SplitVType in config["validations"]["SplitV"]:
raise Exception("No 'single' key word in config for SplitV")
for singleName in config["validations"]["SplitV"][SplitVType]:
for IOV in config["validations"]["SplitV"][SplitVType][singleName]["IOV"]:
##Save IOV to loop later for merge jobs
if not IOV in IOVs:
IOVs.append(IOV)
for alignment in config["validations"]["SplitV"][SplitVType][singleName]["alignments"]:
##Work directory for each IOV
workDir = "{}/SplitV/{}/{}/{}/{}".format(validationDir, SplitVType, singleName, alignment, IOV)
##Write local config
local = {}
local["output"] = "{}/{}/SplitV/{}/{}/{}/{}".format(config["LFS"], config["name"], SplitVType, alignment, singleName, IOV)
local["alignment"] = copy.deepcopy(config["alignments"][alignment])
local["validation"] = copy.deepcopy(config["validations"]["SplitV"][SplitVType][singleName])
local["validation"].pop("alignments")
local["validation"]["IOV"] = IOV
if "dataset" in local["validation"]:
local["validation"]["dataset"] = local["validation"]["dataset"].format(IOV)
if "goodlumi" in local["validation"]:
local["validation"]["goodlumi"] = local["validation"]["goodlumi"].format(IOV)
##Write job info
job = {
"name": "SplitV_{}_{}_{}_{}".format(SplitVType, alignment, singleName, IOV),
"dir": workDir,
"exe": "cmsRun",
"cms-config": "{}/src/Alignment/OfflineValidation/python/TkAlAllInOneTool/SplitV_cfg.py".format(os.environ["CMSSW_BASE"]),
"run-mode": "Condor",
"dependencies": [],
"config": local,
}
jobs.append(job)
##Do merge SplitV if wished
if "merge" in config["validations"]["SplitV"]:
##List with merge jobs, will be expanded to jobs after looping
mergeJobs = []
SplitVType = "merge"
##Loop over all merge jobs/IOVs which are wished
for mergeName in config["validations"]["SplitV"][SplitVType]:
for IOV in IOVs:
##Work directory for each IOV
workDir = "{}/SplitV/{}/{}/{}".format(validationDir, SplitVType, mergeName, IOV)
##Write job info
local = {}
job = {
"name": "SplitV_{}_{}_{}".format(SplitVType, mergeName, IOV),
"dir": workDir,
"exe": "SplitVmerge",
"run-mode": "Condor",
"dependencies": [],
"config": local,
}
for alignment in config["alignments"]:
##Deep copy necessary things from global config
local.setdefault("alignments", {})
if alignment in config["validations"]["SplitV"]["single"][mergeName]["alignments"]:
local["alignments"][alignment] = copy.deepcopy(config["alignments"][alignment])
local["validation"] = copy.deepcopy(config["validations"]["SplitV"][SplitVType][mergeName])
local["output"] = "{}/{}/SplitV/{}/{}/{}".format(config["LFS"], config["name"], SplitVType, mergeName, IOV)
##Loop over all single jobs
for singleJob in jobs:
##Get single job info and append to merge job if requirements fullfilled
alignment, singleName, singleIOV = singleJob["name"].split("_")[2:]
if int(singleIOV) == IOV and singleName in config["validations"]["SplitV"][SplitVType][mergeName]["singles"]:
local["alignments"][alignment]["file"] = singleJob["config"]["output"]
job["dependencies"].append(singleJob["name"])
mergeJobs.append(job)
jobs.extend(mergeJobs)
return jobs
| cms-sw/cmssw | Alignment/OfflineValidation/python/TkAlAllInOneTool/SplitV.py | SplitV.py | py | 4,390 | python | en | code | 985 | github-code | 50 |
70481602397 | import os
import threading
import logging
import sched
import time
import flask
from flask import Flask, render_template, request, redirect, url_for
import pyttsx3
import license
mit = license.find("MIT")
from functions import create_alarm, bbc_news, announcements_alarm, notifications_covid
app = Flask(__name__)
logging.basicConfig(
filename="alarm.log", filemode="w", format="%(name)s - %(levelname)s - %(message)s"
)
# The global variables that we need, returning lists of values.
covid_list = []
list_of_alarms = []
@app.route("/create", methods=["GET", "POST"])
def handle_data():
"""
Upon submitting a alarm form request, this function applies and
validates the necessary data in order to create an adequate
alarm with conditions specified by user.
:return: Redirect - main_route.
"""
if flask.request.method == "POST":
clock_name = request.form["clockname"]
date_time = request.form["date_time"]
# We make sure that a date is given so that an alarm could be created.
if not date_time:
new_engine = pyttsx3.init()
new_engine.say("Alarm can not be created")
new_engine.runAndWait()
logging.warning("No date time input - Alarm could not be created.")
else:
try:
weather_briefing = request.form["weather"]
except:
weather_briefing = "off"
try:
news_briefing = request.form["brefingsname"]
except:
news_briefing = "off"
year = int(date_time[0:4])
month = int(date_time[5:7])
day = int(date_time[8:10])
hour = int(date_time[11:13])
minute = int(date_time[14:16])
total_time = create_alarm(year, month, day, hour, minute)
initialise_alarm(total_time, clock_name, news_briefing, weather_briefing)
# We make sure the alarm is not set in the past.
if total_time > 0:
if not clock_name:
if minute < 10:
list_of_alarms.append(
"Date:"
+ str(day)
+ "/"
+ str(month)
+ "/"
+ str(year)
+ " "
+ "Time:"
+ str(hour)
+ ":"
+ "0"
+ str(minute)
)
else:
list_of_alarms.append(
"Date:"
+ str(day)
+ "/"
+ str(month)
+ "/"
+ str(year)
+ " "
+ "Time:"
+ str(hour)
+ ":"
+ str(minute)
)
else:
if minute < 10:
list_of_alarms.append(
"Alarm name:"
+ clock_name
+ " "
+ "Date:"
+ str(day)
+ "/"
+ str(month)
+ "/"
+ str(year)
+ " "
+ "Time:"
+ str(hour)
+ ":"
+ "0"
+ str(minute)
)
else:
list_of_alarms.append(
"Alarm name:"
+ clock_name
+ " "
+ "Date:"
+ str(day)
+ "/"
+ str(month)
+ "/"
+ str(year)
+ " "
+ "Time:"
+ str(hour)
+ ":"
+ str(minute)
)
return redirect(url_for("main_route"))
@app.route("/")
def main_route():
"""
Main route.
:return: None
"""
return render_template(
"alarm.html", covid_list=covid_list, list_of_alarms=list_of_alarms
)
def initialise_alarm(total_time: float, alarm_name: str, is_news: str, is_weather: str):
"""
:param total_time: float - total_time
:param alarm_name: str - alarm_name
:param is_news: str - is_news
:param is_weather: str - is_weather
:return: None
"""
scheduler = sched.scheduler(time.time, time.sleep)
def print_event(name: str, display_news: str, display_weather: str):
"""
This function is going to trigger the alarm.
:param name: str - name
:param display_news: str - display_news
:param display_weather: str -display_weather
:return: None
"""
print("EVENT:", time.time(), name)
to_say = "Alarm: " + name + ", has triggered. "
new_engine = pyttsx3.init()
covid_list.append(notifications_covid())
# We make sure that the checkbox for news and weather.
if display_news == "on":
to_say = bbc_news(to_say)
if display_weather == "on":
to_say = announcements_alarm(to_say)
new_engine.say(to_say)
new_engine.runAndWait()
print("START:", time.time())
if total_time > 0:
event = scheduler.enter(
total_time,
1,
print_event,
(
alarm_name,
is_news,
is_weather,
),
)
else:
new_engine = pyttsx3.init()
new_engine.say("Alarm can not be created")
new_engine.runAndWait()
logging.warning(
"Alarm can not be created in the past - Alarm could not be created."
)
threading.Thread(target=scheduler.run).start()
if __name__ == "__main__":
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
| BiancaStaicu16/SmartAlarm | smart_alarm.py | smart_alarm.py | py | 6,548 | python | en | code | 0 | github-code | 50 |
71382326235 | # -*- coding: utf-8 -*-
import logging
import uvclight
from grokcore.component import provider
from fanstatic import Library, Resource
from nva.psyquizz.models.interfaces import IQuizzSecurity
from grokcore.component import context, Subscription
from zope.interface import Interface, implementer
from uvclight.utils import current_principal
from nva.psyquizz.browser.forms import CreateCourse
from nva.psyquizz.models.interfaces import MySimpleTerm
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.vocabulary import SimpleTerm, SimpleVocabulary
from nva.psyquizz.models.quizz.corona_set import IHomeOfficeQuestions
from uvc.themes.btwidgets import IBootstrapRequest
class IETEMTheme(IBootstrapRequest):
pass
library = Library('psyquizz.bgetem', 'static')
bgetemcss = Resource(library, 'bgetem.css')
condition_js = Resource(library, 'conditions.js')
def get_template(name):
return uvclight.get_template(name, __file__)
@implementer(IQuizzSecurity)
class SecurityCheck(Subscription):
context(Interface)
def check(self, name, quizz, context):
if name == 'quizz3' or name == 'quizz5':
principal = current_principal()
if (principal.id.endswith('bgetem.de') or
principal.id.endswith("novareto.de") or
principal.id.endswith("sw-i.de") or
principal.id.endswith("bayernwerk.de") or
principal.id.endswith("neymanns.thomas@bgetem.de")):
return True
return False
return True
@provider(IContextSourceBinder)
def source_fixed_extra_questions(context):
#rc = [MySimpleTerm('1', '1', u'Corona', ICoronaQuestions), MySimpleTerm('2', '2', u'Homeoffice', IHomeOfficeQuestions)]
rc = [MySimpleTerm('2', '2', u'Homeoffice', IHomeOfficeQuestions),]
return SimpleVocabulary(rc)
#CreateCourse.fields['quizz_type'].source = source_fixed_extra_questions
from nva.psyquizz.models.interfaces import ICourse, deferred_vocabularies
deferred_vocabularies['fixed_extra_questions'] = source_fixed_extra_questions
from zope.schema.vocabulary import SimpleTerm
from nva.psyquizz.models.vocabularies import make_vocabulary
FREQUENCY = make_vocabulary('frequency_corona', [
SimpleTerm(value=u'kein Homeoffice',
title=u'kein Homeoffice'),
SimpleTerm(value=u'trifft gar nicht zu',
title=u'trifft gar nicht zu'),
SimpleTerm(value=u'trifft wenig zu',
title=u'trifft wenig zu'),
SimpleTerm(value=u'trifft mittelmäßig zu',
title=u'trifft mittelmäßig zu'),
SimpleTerm(value=u'trifft überwiegend zu',
title=u'trifft überwiegend zu'),
SimpleTerm(value=u'trifft völlig zu',
title=u'trifft völlig zu'),
])
deferred_vocabularies['frequency_corona'] = FREQUENCY
from nva.psyquizz.browser.forms import CreateAccount
from . import condition_js
class CreateAccount(CreateAccount):
uvclight.layer(IETEMTheme)
def update(self):
self.fields['accept'].title = u"Bitte bestätigen Sie, dass Ihr Unternehmen bei der BG ETEM versichert ist:"
super(CreateAccount, self).update()
condition_js.need()
ICourse['quizz_type'].description = u""
| novareto/psyquizz.bgetem | src/psyquizz/bgetem/__init__.py | __init__.py | py | 3,245 | python | en | code | 0 | github-code | 50 |
31632386367 | from __future__ import print_function
import json
import logging
import time
import random
import pyDes
from .device import Device
from .DESFire_DEF import *
from .util import byte_array_to_human_readable_hex
_logger = logging.getLogger(__name__)
class DESFireCommunicationError(Exception):
"""Outgoing DESFire command received a non-OK reply.
The exception message is human readable translation of the error code if available. The ``status_code`` carries the original status word error byte.
"""
def __init__(self, msg, status_code):
super(DESFireCommunicationError, self).__init__(msg)
self.status_code = status_code
class DESFire:
def __init__(self, device, logger=None):
self.isAuthenticated = False
self.sessionKey = None
self.cmac = None
self.MaxFrameSize=60
"""
:param device: :py:class:`desfire.device.Device` implementation
:param logger: Python :py:class:`logging.Logger` used for logging output. Overrides the default logger. Extensively uses ``INFO`` logging level.
"""
#assert isinstance(device, Device), "Not a compatible device instance: {}".format(device)
self.device = device
#: 8 bytes of session key after authenticate()
self.session_key = None
self.lastSelectedApplication = None
if logger:
self.logger = logger
else:
self.logger = _logger
def decrypt_response(self, response, private_key=b"\00" * 16, session_key=None):
"""Decrypt the autheticated session answer from the card.
.. warn ::
Does not check CMAC.
"""
initial_value = b"\00" * 8
k = pyDes.triple_des(bytes(private_key), pyDes.CBC, initial_value, pad=None, padmode=pyDes.PAD_NORMAL)
decrypted = [b for b in (k.decrypt(bytes(response)))]
import pdb ; pdb.set_trace()
def authenticate(self, key_id, key, challenge = None):
"""Does authentication to the currently selected application with keyid (key_id)
Authentication is NEVER needed to call this function.
Args:
key_id (int) : Key number
key (DESFireKey) : The key used for authentication
challenge (DESFireKey): The challenge supplied by the reader to the card on the challenge-response authentication.
It will determine half of the session Key bytes (optional)
It's there for testing and crypto thiunkering purposes
Returns:
DESFireKey : the session key used for future communications with the card in the same session
"""
sessionKey = None
self.logger.debug('Authenticating')
self.isAuthenticated = False
cmd = None
keyType = key.GetKeyType()
if keyType == DESFireKeyType.DF_KEY_AES:
cmd = DESFireCommand.DFEV1_INS_AUTHENTICATE_AES.value
params = [ key_id ]
elif keyType == DESFireKeyType.DF_KEY_2K3DES or keyType == DESFireKeyType.DF_KEY_3K3DES:
cmd = DESFireCommand.DFEV1_INS_AUTHENTICATE_ISO.value
params = [ key_id ]
else:
raise Exception('Invalid key type!')
raw_data = self.communicate(self.command(cmd,params),"Authenticating key {:02X}".format(key_id),True, allow_continue_fallthrough=True)
RndB_enc = raw_data
self.logger.debug( 'Random B (enc):'+ byte_array_to_human_readable_hex(RndB_enc))
if keyType == DESFireKeyType.DF_KEY_3K3DES or keyType == DESFireKeyType.DF_KEY_AES:
if len(RndB_enc) != 16:
raise DESFireAuthException('Card expects a different key type. (enc B size is less than the blocksize of the key you specified)')
key.CiperInit()
RndB = key.Decrypt(RndB_enc)
self.logger.debug( 'Random B (dec): ' + byte_array_to_human_readable_hex(RndB))
RndB_rot = RndB[1:]+[RndB[0]]
self.logger.debug( 'Random B (dec, rot): ' + byte_array_to_human_readable_hex(RndB_rot))
if challenge != None:
RndA = bytes(bytearray.fromhex(challenge))
else:
RndA = Random.get_random_bytes(len(RndB))
self.logger.debug( 'Random A: ' + byte_array_to_human_readable_hex(RndA))
RndAB = list(RndA) + RndB_rot
self.logger.debug( 'Random AB: ' + byte_array_to_human_readable_hex(RndAB))
RndAB_enc = key.Encrypt(RndAB)
self.logger.debug( 'Random AB (enc): ' + byte_array_to_human_readable_hex(RndAB_enc))
params = RndAB_enc
cmd = DESFireCommand.DF_INS_ADDITIONAL_FRAME.value
raw_data = self.communicate(self.command(cmd,params),"Authenticating random {:02X}".format(key_id),True, allow_continue_fallthrough=True)
#raw_data = hexstr2bytelist('91 3C 6D ED 84 22 1C 41')
RndA_enc = raw_data
self.logger.debug('Random A (enc): ' + byte_array_to_human_readable_hex(RndA_enc))
RndA_dec = key.Decrypt(RndA_enc)
self.logger.debug( 'Random A (dec): ' + byte_array_to_human_readable_hex(RndA_dec))
RndA_dec_rot = RndA_dec[-1:] + RndA_dec[0:-1]
self.logger.debug( 'Random A (dec, rot): ' + byte_array_to_human_readable_hex(RndA_dec_rot))
if bytes(RndA) != bytes(RndA_dec_rot):
raise Exception('Authentication FAILED!')
self.logger.debug( 'Authentication succsess!')
self.isAuthenticated = True
self.lastAuthKeyNo = key_id
self.logger.debug( 'Calculating Session key')
RndA = list(RndA)
sessionKeyBytes = RndA[:4]
sessionKeyBytes += RndB[:4]
if key.keySize > 8:
if keyType == DESFireKeyType.DF_KEY_2K3DES:
sessionKeyBytes += RndA[4:8]
sessionKeyBytes += RndB[4:8]
elif keyType == DESFireKeyType.DF_KEY_3K3DES:
sessionKeyBytes += RndA[6:10]
sessionKeyBytes += RndB[6:10]
sessionKeyBytes += RndA[12:16]
sessionKeyBytes += RndB[12:16]
elif keyType == DESFireKeyType.DF_KEY_AES:
sessionKeyBytes += RndA[12:16]
sessionKeyBytes += RndB[12:16]
if keyType == DESFireKeyType.DF_KEY_2K3DES or keyType == DESFireKeyType.DF_KEY_3K3DES:
sessionKeyBytes = [( a & 0b11111110 ) for a in sessionKeyBytes ]
## now we have the session key, so we reinitialize the crypto!!!
key.GenerateCmac(sessionKeyBytes)
self.sessionKey = key
return self.sessionKey
def _communicate(self, apdu_cmd, description,nativ=False, allow_continue_fallthrough=False):
"""Communicate with a NFC tag.
Send in outgoing request and waith for a card reply.
TODO: Handle additional framing via 0xaf
:param apdu_cmd: Outgoing APDU command as array of bytes
:param description: Command description for logging purposes
:param allow_continue_fallthrough: If True 0xAF response (incoming more data, need mode data) is instantly returned to the called instead of trying to handle it internally
:raise: :py:class:`desfire.protocol.DESFireCommunicationError` on any error
:return: tuple(APDU response as list of bytes, bool if additional frames are inbound)
"""
result = []
additional_framing_needed = True
# TODO: Clean this up so readgwrite implementations have similar mechanisms and all continue is handled internally
while additional_framing_needed:
self.logger.debug("Running APDU command %s, sending: %s", description, byte_array_to_human_readable_hex(apdu_cmd))
resp = self.device.transceive(apdu_cmd)
self.logger.debug("Received APDU response: %s", byte_array_to_human_readable_hex(resp))
if not nativ:
if resp[-2] != 0x91:
raise DESFireCommunicationError("Received invalid response for command: {}".format(description), resp[-2:])
# Possible status words: https:g/github.com/jekkos/android-hce-desfire/blob/master/hceappletdesfire/src/main/java/net/jpeelaer/hce/desfire/DesfireStatusWord.java
status = resp[-1]
unframed = list(resp[0:-2])
status = resp[0]
# Check for known error interpretation
if status == 0xaf:
if allow_continue_fallthrough:
additional_framing_needed = False
else:
# Need to loop more cycles to fill in receive buffer
additional_framing_needed = True
apdu_cmd = self.command(0xaf) # Continue
elif status != 0x00:
raise DESFireCommunicationError(DESFire_STATUS(status).name, status)
else:
additional_framing_needed = False
# This will un-memoryview this object as there seems to be some pyjnius
# bug getting this corrupted down along the line
unframed = list(resp[1:])
result += unframed
return result
def communicate(self, apdu_cmd,description, nativ=False, allow_continue_fallthrough=False, isEncryptedComm = False, withTXCMAC = False, withCRC=False,withRXCMAC=True, encryptBegin=1):
"""
cmd : the DESFire instruction byte (in hex format)
data: optional parameters (in hex format)
isEncryptedComm: bool indicates if the communication should be sent encrypted
withTXCMAC: bool indicates if CMAC should be calculated
autorecieve: bool indicates if the receptions should implement paging in case there is more deata to be sent by the card back then the max message size
"""
result = []
#sanity check
if withTXCMAC or isEncryptedComm:
if not self.isAuthenticated:
raise Exception('Cant perform CMAC calc without authantication!')
#encrypt the communication
if isEncryptedComm:
apdu_cmd=self.sessionKey.EncryptMsg(apdu_cmd,withCRC,encryptBegin)
#communication with the card is not encrypted, but CMAC might need to be calculated
#calculate cmac for outgoing message
if withTXCMAC:
TXCMAC = self.sessionKey.CalculateCmac(apdu_cmd)
self.logger.debug("TXCMAC : " + byte_array_to_human_readable_hex(TXCMAC))
response = self._communicate(apdu_cmd,description,nativ, allow_continue_fallthrough)
if self.isAuthenticated and len(response) >= 8 and withRXCMAC:
#after authentication, there is always an 8 bytes long CMAC coming from the card, to ensure message integrity
#todo: verify CMAC
if len(response) == 8:
#if self.sessionKey.keyType == DESFireKeyType.DF_KEY_3DES or self.sessionKey.keyType == DESFireKeyType.DF_KEY_2K3DES or self.sessionKey.keyType == DESFireKeyType.DF_KEY_3K3DES:
RXCMAC = response
response = []
#else:
# #there is no CMAC
# return response
else:
RXCMAC = response[-8:]
response = response[:-8]
#if response == "":
# response = []
cmacdata = response + [0x00]
RXCMAC_CALC = self.sessionKey.CalculateCmac(cmacdata)
self.logger.debug("RXCMAC : " + byte_array_to_human_readable_hex(RXCMAC))
self.logger.debug("RXCMAC_CALC: " + byte_array_to_human_readable_hex(RXCMAC_CALC))
self.cmac=RXCMAC_CALC
if bytes(RXCMAC) != bytes(RXCMAC_CALC[0:len(RXCMAC)]):
raise Exception("RXCMAC not equal")
return response
@classmethod
def wrap_command(cls, command, parameters=None):
"""Wrap a command to native DES framing.
:param command: Command byte
:param parameters: Command parameters as list of bytes
https:g/github.com/greenbird/workshops/blob/master/mobile/Android/Near%20Field%20Communications/HelloWorldNFC%20Desfire%20Base/src/com/desfire/nfc/DesfireReader.java#L129
"""
if parameters:
return [0x90, command, 0x00, 0x00, len(parameters)] + parameters + [0x00]
else:
return [0x90,command,0x00,0x00,0x00]
@classmethod
def command(cls,command,parameters=None):
if parameters:
l=[command]
l=l+parameters
return l
else:
return [command]
def getApplicationIDs(self):
"""Lists all application on the card
Authentication is NOT needed to call this function
Args:
None
Returns:
list: A list of application IDs, in a 4 byte hex form
"""
self.logger.debug("GetApplicationIDs")
appids = []
cmd = DESFireCommand.DF_INS_GET_APPLICATION_IDS.value
raw_data = self.communicate([cmd], 'Get Application IDs',nativ=True, withTXCMAC=self.isAuthenticated)
pointer = 0
apps = []
while pointer < len(raw_data):
appid = [raw_data[pointer+2]] + [raw_data[pointer+1]] + [raw_data[pointer]]
self.logger.debug("Reading %s", byte_array_to_human_readable_hex(appid))
apps.append(appid)
pointer += 3
return apps
def getKeySetting(self):
ret=DESFireKey()
parameters=[]
#apdu_command = self.command(DESFire_DEF.DF_INS_GET_KEY_SETTINGS.value)
resp=self.communicate([DESFireCommand.DF_INS_GET_KEY_SETTINGS.value], "get key settings", nativ=True, withTXCMAC=self.isAuthenticated)
ret.setKeySettings(resp[1] & 0x0f,DESFireKeyType(resp[1] & 0xf0),resp[0] & 0x07)
return ret
def getCardVersion(self):
"""Gets card version info blob
Version info contains the UID, Batch number, production week, production year, .... of the card
Authentication is NOT needed to call this function
BEWARE: DESFire card has a security feature called "Random UID" which means that without authentication it will give you a random UID each time you call this function!
Args:
None
Returns:
DESFireCardVersion: Object containing all card version info parsed
"""
self.logger.debug('Getting card version info')
cmd = DESFireCommand.DF_INS_GET_VERSION.value
raw_data = self.communicate([cmd], 'GetCardVersion',nativ=True, withTXCMAC=self.isAuthenticated)
return DESFireCardVersion(raw_data)
def formatCard(self):
"""Formats the card
WARNING! THIS COMPLETELY WIPES THE CARD AND RESETS IF TO A BLANK CARD!!
Authentication is needed to call this function
Args:
None
Returns:
None
"""
self.logger.debug('Formatting card')
cmd = DESFireCommand.DF_INS_FORMAT_PICC.value
self.communicate([cmd], 'Format Card',nativ=True, withTXCMAC=self.isAuthenticated)
###### Application related
def selectApplication(self, appid):
"""Choose application on a card on which all the following commands will apply.
Authentication is NOT ALWAYS needed to call this function. Depends on the application settings.
Args:
appid (int): The application ID of the app to be selected
Returns:
None
"""
appid = getList(appid,3,'big')
self.logger.debug('Selecting application with AppID %s' % (byte_array_to_human_readable_hex(appid),))
parameters = [ appid[2], appid[1], appid[0] ]
cmd = DESFireCommand.DF_INS_SELECT_APPLICATION.value
self.communicate(self.command(cmd, parameters),'select Application',nativ=True)
#if new application is selected, authentication needs to be carried out again
self.isAuthenticated = False
self.lastSelectedApplication = appid
def createApplication(self, appid, keysettings, keycount, type):
"""Creates application on the card with the specified settings
Authentication is ALWAYS needed to call this function.
Args:
appid (int) : The application ID of the app to be created
keysettings (list): Key settings to be applied to the application to be created. MUST contain entryes derived from the DESFireKeySettings enum
keycount (int) :
type (int) : Key type that will specify the encryption used for authenticating to this application and communication with it. MUST be coming from the DESFireKeyType enum
Returns:
None
"""
appid = getList(appid,3,'big')
self.logger.debug('Creating application with appid: %s, ' %(byte_array_to_human_readable_hex(appid)))
appid = [appid[2],appid[1],appid[0]]
keycount=getInt(keycount,'big')
params = appid + [calc_key_settings(keysettings)] + [keycount|type.value]
cmd = DESFireCommand.DF_INS_CREATE_APPLICATION.value
self.communicate(self.command(cmd, params),'cereate application',nativ=True, withTXCMAC=self.isAuthenticated)
def deleteApplication(self, appid):
"""Deletes the application specified by appid
Authentication is ALWAYS needed to call this function.
Args:
appid (int) : The application ID of the app to be deleted
Returns:
None
"""
appid = getList(appid,3,'big')
self.logger.debug('Deleting application for AppID %s', byte_array_to_human_readable_hex(appid))
appid = [ appid[2], appid[1], appid[0] ]
params = appid
cmd = DESFireCommand.DF_INS_DELETE_APPLICATION.value
self.communicate(self.command(cmd, params),'delete Application',nativ=True, withTXCMAC=self.isAuthenticated)
###################################################################################################################
### This Function is not refecored
###################################################################################################################
###### FILE FUNTCIONS
def getFileIDs(self):
"""Lists all files belonging to the application currently selected. (SelectApplication needs to be called first)
Authentication is NOT ALWAYS needed to call this function. Depends on the application/card settings.
Args:
None
Returns:
list: A list of file IDs, in a 4 byte hex form
"""
self.logger.debug('Enumerating all files for the selected application')
fileIDs = []
cmd = DESFireCommand.DF_INS_GET_FILE_IDS.value
raw_data = self.communicate([cmd], 'get File ID\'s',nativ=True, withTXCMAC=self.isAuthenticated)
if len(raw_data) == 0:
self.logger.debug("No files found")
else:
for byte in raw_data:
fileIDs.append(byte)
self.logger.debug("File ids: %s" % (''.join([byte_array_to_human_readable_hex(bytearray([id])) for id in fileIDs]),))
return fileIDs
def getFileSettings(self, fileid):
"""Gets file settings for the File identified by fileid.(SelectApplication needs to be called first)
Authentication is NOT ALWAYS needed to call this function. Depends on the application/card settings.
Args:
fileid (int): FileID to get the settings for
Returns:
DESFireFileSettings: An object describing all settings for the file
"""
fileid=getList(fileid,1,'big')
self.logger.debug('Getting file settings for file %s' % (byte_array_to_human_readable_hex(fileid),))
cmd = DESFireCommand.DF_INS_GET_FILE_SETTINGS.value
raw_data = raw_data = self.communicate(self.command(cmd, fileid),'Get File Settings',nativ=True, withTXCMAC=self.isAuthenticated)
file_settings = DESFireFileSettings()
file_settings.parse(raw_data)
return file_settings
def readFileData(self,fileId,offset,length):
"""Read file data for fileID (SelectApplication needs to be called first)
Authentication is NOT ALWAYS needed to call this function. Depends on the application/card settings.
Args:
fileid (int): FileID to get the settings for
Returns:
str: the file data bytes
"""
fileId=getList(fileId,1)
offset=getInt(offset,'big')
length=getInt(length,'big')
ioffset=0
ret=[]
while (length > 0):
count=min(length, 48)
cmd=DESFireCommand.DF_INS_READ_DATA.value
params=fileId+getList(offset+ioffset,3,'little')+getList(count,3,'little')
ret+=self.communicate(self.command(cmd, params),'Read file data', nativ=True, withTXCMAC=self.isAuthenticated)
ioffset+=count
length-=count
return ret
def writeFileData(self,fileId,offset,length,data):
fileId=getList(fileId,1)
offset=getInt(offset,'big')
length=getInt(length,'big')
data=getList(data)
ioffset=0
while (length > 0):
count=min(length, self.MaxFrameSize-8)
cmd=DESFireCommand.DF_INS_WRITE_DATA.value
params=fileId+getList(offset+ioffset,3,'little')+getList(count,3,'little')+data[ioffset:(ioffset+count)]
self.communicate(self.command(cmd, params),'write file data', nativ=True, withTXCMAC=self.isAuthenticated)
ioffset+=count
length-=count
def deleteFile(self,fileId):
return self.communicate(self.command(DESFireCommand.DF_INS_DELETE_FILE.value, getList(fileId,1,'little')),'Delete File', nativ=True, withTXCMAC=self.isAuthenticated)
def createStdDataFile(self, fileId, filePermissions, fileSize):
params=getList(fileId,1,'big')
params+=[0x00]
params+=getList(filePermissions.pack(),2,'big')
params+=getList(getInt(fileSize,'big'),3, 'little')
apdu_command=self.command(DESFireCommand.DF_INS_CREATE_STD_DATA_FILE.value,params)
self.communicate(apdu_command,'createStdDataFile', nativ=True, withTXCMAC=self.isAuthenticated)
return
###### CRYPTO KEYS RELATED FUNCTIONS
def getKeyVersion(self, keyNo):
"""Gets the key version for the key identified by keyno. (SelectApplication needs to be called first, otherwise it's getting the settings for the Master Key)
Authentication is ALWAYS needed to call this function.
Args:
keyNo (int) : The key number
Returns:
str: key version byte
"""
self.logger.debug('Getting key version for keyid %x' %(keyNo,))
params = getList(keyNo,1,'big')
cmd = DESFireCommand.DF_INS_GET_KEY_VERSION.value
raw_data = self.communicate(self.command(cmd, params),'get key version',nativ=True, withTXCMAC=self.isAuthenticated)
self.logger.debug('Got key version 0x%s for keyid %x' + str(keyNo))
return raw_data
def changeKeySettings(self, newKeySettings):
"""Changes key settings for the key that was used to authenticate with in the current session.
Authentication is ALWAYS needed to call this function.
Args:
newKeySettings (list) : A list with DESFireKeySettings enum value
Returns:
None
"""
#self.logger.debug('Changing key settings to %s' %('|'.join(a.name for a in newKeySettings),))
params = [calc_key_settings(newKeySettings)]
cmd = DESFireCommand.DF_INS_CHANGE_KEY_SETTINGS.value
raw_data = self.communicate(self.command(cmd,params),'change key settings', nativ=True, isEncryptedComm=True, withCRC=True)
def changeKey(self, keyNo, newKey, curKey):
"""Changes current key (curKey) to a new one (newKey) in specified keyslot (keyno)
Authentication is ALWAYS needed to call this function.
Args:
keyNo (int) : Key number
newKey (DESFireKey) : The new key
curKey (DESFireKey) : The current key for that keyslot
Returns:
None
"""
keyNo=getInt(keyNo,'big')
self.logger.debug(' -- Changing key --')
#self.logger.debug('Changing key No: %s from %s to %s' % (keyNo, newKey, curKey))
if not self.isAuthenticated:
raise Exception('Not authenticated!')
self.logger.debug('curKey : ' + byte_array_to_human_readable_hex(curKey.getKey()))
self.logger.debug('newKey : ' + byte_array_to_human_readable_hex(newKey.getKey()))
isSameKey = (keyNo == self.lastAuthKeyNo)
#self.logger.debug('isSameKey : ' + str(isSameKey))
# The type of key can only be changed for the PICC master key.
# Applications must define their key type in CreateApplication().
if self.lastSelectedApplication == 0x00:
keyNo = keyNo | newKey.keyType.value
cryptogram = self.command(DESFireCommand.DF_INS_CHANGE_KEY.value, [keyNo])
#The following if() applies only to application keys.
#For the PICC master key b_SameKey is always true because there is only ONE key (#0) at the PICC level.
if not isSameKey:
keyData_xor=[]
if len(newKey.getKey())>len(curKey.getKey()):
keyData_xor = bytearray(strxor(bytes(newKey.getKey()), bytes(curKey.getKey()*2)))
else:
keyData_xor = bytearray(strxor(bytes(newKey.getKey()), bytes(curKey.getKey())))
cryptogram += keyData_xor
else:
cryptogram += newKey.getKey()
if newKey.keyType == DESFireKeyType.DF_KEY_AES:
cryptogram += [newKey.keyVersion]
cryptogram += bytearray(CRC32(cryptogram).to_bytes(4, byteorder='little'))
if not isSameKey:
cryptogram += bytearray(CRC32(newKey.getKey()).to_bytes(4, byteorder='little'))
#self.logger.debug( (int2hex(DESFireCommand.DF_INS_CHANGE_KEY.value) + int2hex(keyNo) + cryptogram).encode('hex'))
raw_data = self.communicate(cryptogram,'change key',nativ=True, isEncryptedComm = True, withRXCMAC = not isSameKey, withTXCMAC = False, withCRC= False, encryptBegin=2)
#If we changed the currently active key, then re-auth is needed!
if isSameKey:
self.isAuthenticated = False
self.sessionKey = None
return
#######################################################################################################################################
### Helper function
#######################################################################################################################################
def createKeySetting(self,key, keyNumbers, keyType, keySettings):
ret=DESFireKey()
ret.setKeySettings(getInt(keyNumbers,'big'),keyType,calc_key_settings(keySettings))
ret.setKey(getList(key))
return ret
| patsys/desfire-python | Desfire/DESFire.py | DESFire.py | py | 27,257 | python | en | code | 15 | github-code | 50 |
9263352550 | """Das Spielerobject"""
from pygame import image as pyimage
from pygame import transform as pytransform
import drawer
from healthbar import Healthbar
path = 'img//player//'
class Player(object):
def __init__(self):
self.IMG_stand = loadIMG(path + 'stand.png')
self.IMG_dodgeL = loadIMG(path + 'dodgeL.png')
self.IMG_dodgeR = loadIMG(path + 'dodgeR.png')
self.IMG_attack = loadIMG(path + 'attack.png')
self.centerPos = (drawer.CENTER[0] - int(self.IMG_stand.get_rect().width/2), int(drawer.CENTER[1]*1.1) )
self.leftPos = ( int(self.centerPos[0]*0.7), self.centerPos[1] )
self.rightPos = ( int(self.centerPos[0]*1.3), self.centerPos[1] )
self.attackPos = ( self.centerPos[0], int(self.centerPos[1]*0.7) )
self.curImg = self.IMG_stand
self.curPos = self.centerPos
self.posMode = 0
self.timeTillStand = 0
self.dodgeTime = 0.5
self.attackDuration = 0.3
self.damage = 20
self.killedBosses = 0
self.maxHealth = 100
self.health = self.maxHealth
self.healthbar = Healthbar(self.maxHealth, int(drawer.DISPLAY_WIDTH / 2), int( drawer.DISPLAY_HEIGHT * 0.9 ), int(drawer.DISPLAY_WIDTH * 0.4), int(drawer.DISPLAY_HEIGHT*0.05) )
def update(self, dt):
if self.health <= 0:
return 'dead'
if self.timeTillStand > 0:
self.timeTillStand -= dt
return
if self.posMode != 0:
self.goMiddle()
return 'goMiddle'
def draw(self):
drawer.showIMG(self.curPos[0], self.curPos[1], self.curImg)
self.healthbar.draw()
def dodgeL(self):
if self.posMode == 0:
self.posMode = 1
self.curPos = self.leftPos
self.curImg = self.IMG_dodgeL
self.timeTillStand = self.dodgeTime
def dodgeR(self):
if self.posMode == 0:
self.posMode = 2
self.curPos = self.rightPos
self.curImg = self.IMG_dodgeR
self.timeTillStand = self.dodgeTime
def goMiddle(self):
self.posMode = 0
self.curPos = self.centerPos
self.curImg = self.IMG_stand
def attack(self, boss):
if self.posMode == 0:
boss.hurt( self.damage )
self.posMode = 3
self.curPos = self.attackPos
self.curImg = self.IMG_attack
self.timeTillStand = self.attackDuration
def hurt(self, amount):
self.health -= amount
if self.health < 0:
self.health = 0
self.die()
self.healthbar.setVal(self.health)
def die(self):
print("You killed " + str(self.killedBosses) + " bosses!")
self.killedBosses = 0
def ressurect(self):
self.health = self.maxHealth
self.timeTillStand = 0
self.goMiddle()
self.healthbar.setVal(self.health)
self.killedBosses = 0
def loadIMG(path):
return pytransform.scale( pyimage.load( path ), (int(drawer.DISPLAY_HEIGHT*0.3), int(drawer.DISPLAY_HEIGHT*0.3)) ) | Benzcker/youKnowWhat | player.py | player.py | py | 3,123 | python | en | code | 0 | github-code | 50 |
18023383241 | # -*- coding:utf-8 -*-
"""
@author: guoxiaorui
@file: 2131_longest_palindrome.py
@time: 2022-01-12 23:56:25
"""
from typing import List
from collections import Counter
class Solution:
def longestPalindrome(self, words: List[str]) -> int:
count = Counter(words)
ans = 0
has_middle = False
for word, value in count.items():
# 本身便是回文串的情况 如gg
if word == word[::-1]:
if value % 2:
ans += (value - 1) * 2
has_middle = True
else:
ans += value * 2
# 对应回文串存在的情况 如lc/cl
elif count.get(word[::-1], 0) > 0:
tmp = min(count[word], count[word[::-1]])
ans += 4 * tmp
count[word] -= tmp
count[word[::-1]] -= tmp
if has_middle:
ans += 2
return ans
if __name__ == '__main__':
words = ["lc", "cl", "gg", "ob"]
s = Solution()
print(s.longestPalindrome(words))
| sun10081/leetcode_practice_xiaorui | questions/2101_2200/2131_2140/2131_longest_palindrome.py | 2131_longest_palindrome.py | py | 1,072 | python | en | code | 0 | github-code | 50 |
3046991163 | #/usr/bin/python3
import os
import pyfiglet
class FileRenamer:
def __init__(self, folderPath, text, replaceWith, extension):
self.folderPath = folderPath
self.text = text
self.replaceWith = replaceWith
self.extension = extension
def rename_files(self):
try:
os.chdir(self.folderPath)
count = 0
for file in os.listdir():
name, extension = os.path.splitext(file)
if self.text in name:
new_name = name.replace(self.text, self.replaceWith)
if self.extension == "":
os.rename(file, new_name + extension)
else:
os.rename(file, new_name + self.extension)
count += 1
else:
print(f'"{self.text}" not in filename "{name}"')
except(FileNotFoundError):
print("Invalid folder path entered!")
self.finish(self.folderPath, count)
@staticmethod
def start():
ascii_banner = pyfiglet.figlet_format("Bulk File Renamer")
print(ascii_banner)
folderPath = input("Folder path: ")
text = input("Find text: ")
replaceWith = input("Replace with: ")
change_extension = input("Amend file types? (Y/N) ", )
if change_extension == "Y" or change_extension == "y":
extension = input('New file extension: ')
if not extension.startswith("."):
raise ValueError('Not a valid file extension. Must start with "."')
else:
extension = ""
return folderPath, text, replaceWith, extension
def finish(self, folderPath, filesCount):
print("Done!", end="\n")
print(f"{filesCount} files in folder {folderPath} were renamed.")
| LRS4/python-automation | file-renamer/renamer.py | renamer.py | py | 1,894 | python | en | code | 1 | github-code | 50 |
255903687 | #!/bin/python
#-*- coding: utf8 -*-
def evalPoly(a, t, reverse = False):
if reverse:
a = list(a)
a.reverse()
n = len(a) - 1
b = [0.0] * len(a)
c = [0.0] * len(a)
b[-1] = a[-1]
c[-1] = b[-1]
for k in range(n-1, 0, -1):
b[k] = a[k] + t*b[k+1]
c[k] = b[k] + t*c[k+1]
b[0] = a[0] + t*b[1]
return b[0], c[1]
def evaluate(a, t, reverse = False):
return evalPoly(a, t, reverse=reverse)[0]
def main():
p1 = 1.414214
p2 = complex(1, 2)
a = [51200, 0, -39712, 0, 7392, 0, -170, 0, 1]
print("evaluation at " + str(p1))
print(evalPoly(a, p1))
print("evaluation at " + str(p2))
print(evalPoly(a, p2))
if __name__ == '__main__':
main()
| liyp0095/ISU_PA | 2019F/CS577/Assignment5/PolynomialEvaluation.py | PolynomialEvaluation.py | py | 734 | python | en | code | 0 | github-code | 50 |
71028070557 | import matplotlib.pyplot as plt
import csv
input_file = "/home/ole/master/test_onto/coords.csv"
x = []
y = []
labels = []
counter = 0
with open(input_file,'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
x.append(float(row[0]))
y.append(float(row[1]))
labels.append(str(row[2]))
counter += 1
#if counter > 50:
#break
#plt.scatter(x,y, label='Loaded from file!')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Plotting the vectors!!!!!')
plt.legend()
for i,type in enumerate(labels):
xi = x[i]
yi = y[i]
plt.scatter(xi, yi, marker='o', color='blue')
plt.text(xi+0.3, yi+0.3, type, fontsize=9)
#fig, ax = plt.subplots()
#for i, txt in enumerate(x):
#ax.annotate(txt, (x[i], y[i]))
plt.show()
| oholter/matcher-with-word-embedings | py/plot/plot.py | plot.py | py | 803 | python | en | code | 1 | github-code | 50 |
35423933415 | # -*- coding: utf-8 -*-
import logging
import ask_sdk_core.utils as ask_utils
import paho.mqtt.client as mqtt
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Bienvenido a la skill de prueba de cidesi MQTT"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
#-------------------------------------------------------------------------------------------------------------------------------------------------#
class ChTopicIntentHandler(AbstractRequestHandler):
def on_connect(self, client, userdata, flags, rc):
print("Conexión establecida con éxito al broker MQTT")
def on_message(self, client, userdata, message):
print("Mensaje recibido en el tópico " + message.topic + " con el siguiente contenido: " + message.payload.decode())
def on_disconnect(self, client, userdata, rc):
print("Desconectado del broker MQTT")
def can_handle(self, handler_input):
# type: (HandlerInput) -> boolt
return ask_utils.is_intent_name("ChTopic")(handler_input)
def handle(self, handler_input):
# Conf cliente MQTT
num = handler_input.request_envelope.request.intent.slots['valor']
num = str(num.value)
client = mqtt.Client(client_id="my_client_T") # Asigna un identificador único para el cliente
client.on_connect = self.on_connect # Define la función callback que se ejecuta cuando se establece la conexión
client.on_message = self.on_message # Define la función callback que se ejecuta cuando se recibe un mensaje
client.on_disconnect = self.on_disconnect # Define la función callback que se ejecuta cuando se desconecta del broker
client.connect("test.mosquitto.org", 1883) # Conecta al broker Mosquitto
client.publish("test/prueba", f"{num}", 1, True)
#client.disconnect()
# type: (HandlerInput) -> Response
speak_output = "mqtt test exitoso, se actualizo el valor!"
return (
handler_input.response_builder
.speak(speak_output)
.ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
#final
#-------------------------------------------------------------------------------------------------------------------------------------------------#
class RdTopicIntentHandler(AbstractRequestHandler):
def __init__(self):
self.mqtt_client = mqtt.Client(client_id="my_client_dede")
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.mqtt_client.on_disconnect = self.on_disconnect
self.mqtt_client.connect("test.mosquitto.org", 1883)
self.mqtt_client.subscribe("test/prueba")
self.mqtt_client.loop_start()
self.msg_topico = None
def on_connect(self, client, userdata, flags, rc):
print("Conexión establecida con éxito al broker MQTT")
def on_message(self, client, userdata, message):
self.msg_topico = message.payload.decode()
print("Mensaje recibido en el tópico test/prueba: " + self.msg_topico)
def on_disconnect(self, client, userdata, rc):
print("Desconectado del broker MQTT")
def can_handle(self, handler_input):
return ask_utils.is_intent_name("RdTopicIntent")(handler_input)
def handle(self, handler_input):
speak_output = f"Mensaje recibido en el tópico test/prueba con el siguiente contenido: {self.msg_topico}"
#client.disconnect()
return (
handler_input.response_builder
.speak(speak_output)
.ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Puedes decirme a que valor quieres cambiar el topico"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Tenga un buen dia!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Single handler for Fallback Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
speech = "Hmm, No entendi lo que me pediste, puedes pedirme ayuda diciendo: Alexa ayuda"
reprompt = "No he entendido lo que me has dicho, en que te puedo ayudar?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "Haz activado la funcion de cambiar el valor del topico" + ChTopicIntent + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Lo siento, he tenido problemas para hacer lo que me pides. Por favor, inténtelo de nuevo."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(ChTopicIntentHandler())
sb.add_request_handler(RdTopicIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler() | EliasAquino/LeerEscribirTopicos_PahoMQTT_Skill | lambda/lambda_function.py | lambda_function.py | py | 9,742 | python | en | code | 0 | github-code | 50 |
1844700960 | import sys, os, time
import numpy as np
import stft as STFT
import math
import sineModel as SM
import IPython
import utilFunctions as UF
from IPython.core.debugger import set_trace
class AudioSineModel:
def __init__(self, file_path):
self.file_path= file_path
self.frequencies = None
self.magnitudes = None
self.phases = None
self.sample_rate, self.signal = UF.wavread(file_path)
def sine_model_analysis(self, window_size=2047, fft_size=4096, hop_size=150, threshold_db=-80, min_sine_dur=0.15, max_sines=15):
window = np.blackman(window_size)
self.fft_size = fft_size
self.window_size = window_size
self.hop_size = hop_size
self.threshold_db = threshold_db
self.min_sine_dur = min_sine_dur
self.max_sines = max_sines
self.stft_magnitudes, self.stft_phases = STFT.stftAnal(self.signal, window, fft_size, hop_size)
self.frequencies, self.magnitudes, self.phases = SM.sineModelAnal(self.signal, self.sample_rate, window, fft_size, hop_size, threshold_db, max_sines, min_sine_dur)
self.compute_lines()
def compute_lines(self):
fvar = 150
start_val = None
last_val = None
lines = []
x0 = None
for sine_idx in range(0, self.frequencies[0].size):
for idx, val in enumerate(self.frequencies[:, sine_idx]):
if val > 0 and start_val==None:
start_val = val
x0 = idx
elif val == 0 and start_val!=None:
pos0 = round((math.floor((self.hop_size * x0)/10)*10)/self.sample_rate, 2)
pos1 = round(math.floor((self.hop_size * idx)/10)*10/self.sample_rate, 2)
val0 = math.floor(start_val/10)*10
val1 = math.floor(last_val/10)*10
lines.append([pos0, pos1, val0, val1, (val1-val0)/(pos1-pos0)])
start_val = None
last_val = val
self.lines = sorted(lines, key=lambda a_entry: a_entry[0]*10000 + a_entry[1])
| arthurtofani/sin-mod-fingerprint | lib/audio_sine_model.py | audio_sine_model.py | py | 1,953 | python | en | code | 0 | github-code | 50 |
29473179865 | import re
from utils.command import Command
class Oobify(Command):
def __init__(self):
super().__init__("oobify")
def oob(self, string):
new_string = re.sub('[aeiouy]b','a', string)
new_string = re.sub('[aeiouy]','oob', new_string)
new_string = re.sub('[AEIOUY]','Oob',new_string)
return new_string
async def run(self, client, message, type, params):
params = " ".join(params)
await client.send_message(message.channel, self.oob(params))
async def help(self, client, message):
await client.send_message(message.channel, self.name + " usage: `!oobify [string to oobify]`")
| XenonMolecule/G-Bot | commands/oobify.py | oobify.py | py | 656 | python | en | code | 0 | github-code | 50 |
23009922243 | __author__ = 'anastasiiakorosteleva'
import requests
from bs4 import BeautifulSoup
from Bio import Entrez
Entrez.email = 'ptichka.sinichka1@gmail.com'
def makelink(db, indexes):
index = [i for i in indexes]
list_of_ref = []
if db.lower() == "protein":
for i in index:
list_of_ref.append("http://www.ncbi.nlm.nih.gov/protein/" + str(i))
elif db.lower() == "nucleotide":
for i in index:
list_of_ref.append("http://www.ncbi.nlm.nih.gov/nuccore/" + str(i))
else:
print("Wrong database! Enter 'protein' or 'nucleotide'")
return list_of_ref
def fastafind(link):
list_of_inputlinks = [inputlink for inputlink in link]
list_of_links_output = []
for inputlink in list_of_inputlinks:
r = requests.get(inputlink)
soup = BeautifulSoup(r.content, "html.parser")
if "?report=fasta" not in str(soup.find_all("a")):
list_of_links_output.append("Broken url")
else:
for link in soup.find_all("a"):
if "?report=fasta" in str(link):
list_of_links_output.append("http://www.ncbi.nlm.nih.gov" + link.get("href"))
return list_of_links_output
filename = input("Enter name of file with indexes: ")
db = input("Enter database (protein or nucleotide): ")
rettype = input("Eter data type (gb or FASTA): ")
of_name = input("Enter output file name.format: ")
find_url = input("Find url's for FASTA files? (Yes/No): ")
i_file = open(filename, 'r')
list_of_indexes = i_file.readlines()
i_file.close()
if find_url == "Yes":
of_name_ = input("Enter output file name.format: ")
list_of_ref = makelink(db, indexes = [i.strip() for i in list_of_indexes])
list_of_links = fastafind(list_of_ref)
out = ''
j = 0
i = 0
while j < len(list_of_indexes) and i < len(list_of_links):
out += (list_of_indexes[j].strip() + '\t' + list_of_links[i] + '\n')
j += 1
i += 1
of_file = open(of_name_, "a")
of_file.write(out)
of_file.close()
id = [i.strip() for i in list_of_indexes]
handle = Entrez.efetch(db = db, id = id, rettype = rettype, retmode="text")
out = handle.read()
of_file = open(of_name, "a")
of_file.write(out)
of_file.close() | AnastasiiaKorosteleva/Python | bioinf_dobrynin/fasta_find.py | fasta_find.py | py | 2,241 | python | en | code | 0 | github-code | 50 |
41234180086 | import os
import mysql.connector
def main(path):
foldersInPath = os.listdir(path)
rows = []
for folder in foldersInPath:
if folder == "onSale":
getFilesPath = os.path.join(path, folder)
filesInFolder = os.listdir(getFilesPath)
for img in filesInFolder:
pathToImage = os.path.join(getFilesPath, img) #Full Path to Each Image
filename = os.path.basename(pathToImage)
if saveInDatabase(filename) == -1:
print(f"DB Insert SUCCESS for {pathToImage}")
else:
print(f"DB Insert FAILURE for {pathToImage}")
def saveInDatabase(filename):
try:
mydb = mysql.connector.connect(
host="127.0.0.1",
user="root",
passwd="",
database="batik"
)
mycursor = mydb.cursor()
sql = "INSERT INTO on_sales (path, name, price) VALUES (%s, %s, %s)"
val = (filename, "Fabric", 35.00)
mycursor.execute(sql, val)
mydb.commit()
mycursor.close()
mydb.close()
return mycursor.rowcount
except Exception as e:
print(f"An Error {e} occurred!")
if __name__ == "__main__":
main("C:\\Personal\\Laravel\\Projects\\BatikProject\\src\\public\\images") | DeeAmps/PyScripts | onSalesSql.py | onSalesSql.py | py | 1,321 | python | en | code | 0 | github-code | 50 |
69903390876 | from collections import deque
def sol(arr):
pile = float('inf')
while arr:
#num = arr.pop()
num = arr.pop(0) if arr[0] > arr[-1] else arr.pop(-1)
if num > pile:
return "No"
pile = num
return "Yes"
for _ in range(int(input())):
n = int(input())
#arr = deque(list(map(int, input().split())))
arr = list(map(int, input().split()))
print(sol(arr))
| AdityaChirravuri/CompetitiveProgramming | HackerRank/Python/Collections/PillingUp!.py | PillingUp!.py | py | 438 | python | en | code | 0 | github-code | 50 |
10827853738 | def Grundy(dict_succ):
vertices=list(reversed(sorted(dict_succ.keys()))) #start from the last nodes of graph (nodes having no successors)
g_dict={} #grundy function dictionnary (of each node)
g_list=[]
for k in vertices:
g_list=[]
l=[]
tmp_dic={}
if len(dict_succ[k])==0 :
g=0
l.append(k)
tmp_dic[g]=l
if g in g_dict :
if not isinstance(g_dict[g],list) :
ls=list(g_dict[g])
else:
ls=g_dict[g]
ls+=l
g_dict[g]=ls
else:
g_dict[g]=l
elif len(dict_succ[k])>=1 :
j=10000
g_list=[j+x for x in range(1000)]
jj=j+(1000)
ind=0
for i in dict_succ[k] :
for m in g_dict:
for n in g_dict[m] :
if n==i:
ind=m
g_list[ind]=jj
jj+=1
min_list=min(g_list)
g=g_list.index(min_list)
l.append(k)
if g in g_dict :
if not isinstance(g_dict[g],list) :
ls=list(g_dict[g])
else:
ls=g_dict[g]
ls+=l
g_dict[g]=ls
else:
g_dict[g]=l
print(g_dict)
return g_dict
| abmounir/Grundy | grundy.py | grundy.py | py | 1,015 | python | en | code | 1 | github-code | 50 |
34260995192 | """Testing facility for conkit.io.a2m"""
__author__ = "Felix Simkovic"
__date__ = "30 Jul 2018"
import unittest
from conkit.io.a2m import A2mParser
from conkit.io.tests.helpers import ParserTestCase
class TestA2mParser(ParserTestCase):
def test_read_1(self):
msa = """GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF
EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA
EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF
EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF
"""
f_name = self.tempfile(content=msa)
with open(f_name, "r") as f_in:
sequence_file = A2mParser().read(f_in)
for i, sequence_entry in enumerate(sequence_file):
if i == 0:
self.assertEqual("seq_0", sequence_entry.id)
self.assertEqual("GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF", sequence_entry.seq)
elif i == 1:
self.assertEqual("seq_1", sequence_entry.id)
self.assertEqual("EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA", sequence_entry.seq)
elif i == 2:
self.assertEqual("seq_2", sequence_entry.id)
self.assertEqual("EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF", sequence_entry.seq)
elif i == 3:
self.assertEqual("seq_3", sequence_entry.id)
self.assertEqual("EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF", sequence_entry.seq)
def test_read_2(self):
msa = """>header1
GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF
>header2
EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA
>header3
EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF
>header4
EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF
"""
f_name = self.tempfile(content=msa)
with open(f_name, "r") as f_in:
with self.assertRaises(ValueError):
A2mParser().read(f_in)
def test_write_1(self):
msa = [
"GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF",
"EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA",
"EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF",
"EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF",
]
f_name_in = self.tempfile(content='\n'.join(msa))
f_name_out = self.tempfile()
with open(f_name_in, "r") as f_in, open(f_name_out, "w") as f_out:
sequence_file = A2mParser().read(f_in)
A2mParser().write(f_out, sequence_file)
with open(f_name_out, "r") as f_in:
output = f_in.read().splitlines()
self.assertEqual(msa, output)
if __name__ == "__main__":
unittest.main(verbosity=2)
| rigdenlab/conkit | conkit/io/tests/test_a2m.py | test_a2m.py | py | 2,618 | python | en | code | 20 | github-code | 50 |
31858510463 | from torch import nn
import torch
def conv_nd(dims, *args, **kwargs):
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def avg_pool_nd(dims, *args, **kwargs):
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
class Downsample(nn.Module):
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResnetBlock(nn.Module):
def __init__(self, in_c, out_c, down, ksize=3, sk=False, use_conv=True):
super().__init__()
ps = ksize // 2
if in_c != out_c or sk == False:
self.in_conv = nn.Conv2d(in_c, out_c, ksize, 1, ps)
else:
# print('n_in')
self.in_conv = None
self.block1 = nn.Conv2d(out_c, out_c, 3, 1, 1)
self.act = nn.ReLU()
self.block2 = nn.Conv2d(out_c, out_c, ksize, 1, ps)
if sk == False:
self.skep = nn.Conv2d(out_c, out_c, ksize, 1, ps)
else:
self.skep = None
self.down = down
if self.down == True:
self.down_opt = Downsample(in_c, use_conv=use_conv)
def forward(self, x):
if self.down == True:
x = self.down_opt(x)
if self.in_conv is not None: # edit
x = self.in_conv(x)
h = self.block1(x)
h = self.act(h)
h = self.block2(h)
if self.skep is not None:
return h + self.skep(x)
else:
return h + x
class SdxlT2IAdapter(nn.Module):
def __init__(self, channels=[320, 640, 1280], nums_rb=2, cin=64, ksize=3, sk=False, use_conv=True):
super(SdxlT2IAdapter, self).__init__()
self.unshuffle = nn.PixelUnshuffle(8)
self.channels = channels
self.nums_rb = nums_rb
self.body = []
is_down = [True, False, False, False, True, False]
# before
# is_down = [False, False, True, False, True, False]
for i in range(len(channels)):
for j in range(nums_rb):
if (i != 0) and (j == 0):
self.body.append(
ResnetBlock(channels[i - 1], channels[i], down=is_down[i*nums_rb+j], ksize=ksize, sk=sk, use_conv=use_conv))
else:
self.body.append(
ResnetBlock(channels[i], channels[i], down=is_down[i*nums_rb+j], ksize=ksize, sk=sk, use_conv=use_conv))
self.body = nn.ModuleList(self.body)
self.conv_in = nn.Conv2d(cin, channels[0], 3, 1, 1)
def forward(self, x, inference=False):
# unshuffle
x = self.unshuffle(x)
if inference:
x = x.repeat(2,1,1,1)
# extract features
features = []
x = self.conv_in(x)
for i in range(len(self.channels)):
for j in range(self.nums_rb):
idx = i * self.nums_rb + j
x = self.body[idx](x)
features.append(x)
return features
class SdxlT2IAdapterFull(nn.Module):
def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=2, cin=64, ksize=3, sk=False, use_conv=True):
super(SdxlT2IAdapterFull, self).__init__()
self.unshuffle = nn.PixelUnshuffle(8)
self.channels = channels
self.nums_rb = nums_rb
self.body = []
for i in range(len(channels)):
for j in range(nums_rb):
if (i == len(channels)-1) and (j == 0):
self.body.append(
ResnetBlock(channels[i - 1], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv))
elif (i != 0) and (j == 0):
self.body.append(
ResnetBlock(channels[i - 1], channels[i], down=True, ksize=ksize, sk=sk, use_conv=use_conv))
else:
self.body.append(
ResnetBlock(channels[i], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv))
self.body = nn.ModuleList(self.body)
self.conv_in = nn.Conv2d(cin, channels[0], 3, 1, 1)
def forward(self, x, inference=False):
# unshuffle
x = self.unshuffle(x)
if inference:
x = x.repeat(2,1,1,1)
# extract features
features = []
x = self.conv_in(x)
for i in range(len(self.channels)):
for j in range(self.nums_rb):
idx = i * self.nums_rb + j
x = self.body[idx](x)
features.append(x)
return features
# if __name__ == "__main__":
# import time
# IN_CHANNELS: int = 4
# OUT_CHANNELS: int = 4
# ADM_IN_CHANNELS: int = 2816
# CONTEXT_DIM: int = 2048
# MODEL_CHANNELS: int = 320
# TIME_EMBED_DIM = 320 * 4
# device = 'cuda:4'
# print("create unet")
# unet = SdxlUNet2DConditionModel()
# unet.to(device)
# unet.set_use_memory_efficient_attention(True, False)
# unet.set_gradient_checkpointing(True)
# unet.eval()
# sdxl_adapter = SdxlT2IAdapter()
# sdxl_adapter.to(device)
# sdxl_adapter.train()
# unet.to(dtype=torch.float16)
# sdxl_adapter.to(dtype=torch.float16)
# # 使用メモリ量確認用の疑似学習ループ
# print("preparing optimizer")
# # optimizer = torch.optim.SGD(unet.parameters(), lr=1e-3, nesterov=True, momentum=0.9) # not working
# # import bitsandbytes
# # optimizer = bitsandbytes.adam.Adam8bit(unet.parameters(), lr=1e-3) # not working
# # optimizer = bitsandbytes.optim.RMSprop8bit(unet.parameters(), lr=1e-3) # working at 23.5 GB with torch2
# # optimizer= bitsandbytes.optim.Adagrad8bit(unet.parameters(), lr=1e-3) # working at 23.5 GB with torch2
# import transformers
# # optimizer = transformers.optimization.Adafactor(unet.parameters(), relative_step=True) # working at 22.2GB with torch2
# optimizer = transformers.optimization.AdamW(sdxl_adapter.parameters()) # working at 41.7GB with torch2
# scaler = torch.cuda.amp.GradScaler(enabled=True)
# print("start training")
# steps = 10
# batch_size = 2
# for step in range(steps):
# print(f"step {step}")
# if step == 1:
# time_start = time.perf_counter()
# x = torch.randn(batch_size, 4, 128, 128).to(device) # 1024x1024
# t = torch.randint(low=0, high=10, size=(batch_size,), device=device)
# ctx = torch.randn(batch_size, 77, 2048).to(device)
# y = torch.randn(batch_size, ADM_IN_CHANNELS).to(device)
# lineart_img = torch.randn(batch_size, 1, 1024, 1024).to(device)
# with torch.cuda.amp.autocast(enabled=True):
# ada_cond = sdxl_adapter(lineart_img)
# output = unet(x, t, ctx, y, adapter_features=ada_cond)
# target = torch.randn_like(output)
# loss = torch.nn.functional.mse_loss(output, target)
# scaler.scale(loss).backward()
# scaler.step(optimizer)
# scaler.update()
# optimizer.zero_grad(set_to_none=True)
# time_end = time.perf_counter()
# print(f"elapsed time: {time_end - time_start} [sec] for last {steps - 1} steps")
| Daming-TF/kohya_ray | library/sdxl_t2i_adapter.py | sdxl_t2i_adapter.py | py | 8,092 | python | en | code | 0 | github-code | 50 |
26203936898 | """
The code creates a web application using Streamlit, a Python library for building interactive web apps.
# Author: Anonymous
# Date: June 06, 2023
"""
# streamlit packages
import streamlit as st
from streamlit_extras.switch_page_button import switch_page
from streamlit_extras.app_logo import add_logo
from streamlit_option_menu import option_menu
from streamlit_ace import st_ace
from streamlit_extras.add_vertical_space import add_vertical_space
from st_pandas_text_editor import st_pandas_text_editor
import streamlit.components.v1 as components
from streamlit.components.v1 import html
# dataframe handling
import pandas as pd # read csv, df manipulation
# reusable functions, outsourced into another file
from helper_functions import GPTHelper
# multivision and threading
from multivision.multivision import Recommender
# handle GPT API
from langchain.chains import ConversationChain
# formats the prompt history in a particular way
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.llms import OpenAI
from langchain import LLMChain
# other modules
import time
import json
from PIL import Image
import vl_convert as vlc
import os
import base64
import path
import sys
# instanciate gptHelperFunctions
gpt_helper = GPTHelper()
# set the path in deployment
dir = path.Path(__file__).abspath()
sys.path.append(dir.parent.parent)
# configure the page
st.set_page_config(
page_title="Conversational Dashboard",
page_icon="✅",
layout="wide"
# initial_sidebar_state="collapsed"
)
# feedback counter so that the form doesn't reopen on rerun
if not "feedback_counter" in st.session_state:
st.session_state["feedback_counter"] = 0
# Initialize session states
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
def graph_counter():
if "graph_counter" not in st.session_state:
st.session_state["graph_counter"] = 1
return st.session_state["graph_counter"]
def increase_graph_counter():
st.session_state["graph_counter"] += 1
print(st.session_state["graph_counter"])
def page_counter():
if "page_counter" not in st.session_state:
st.session_state["page_counter"] = 1
return st.session_state["page_counter"]
def increase_page_counter():
st.session_state["page_counter"] += 1
print(st.session_state["page_counter"])
def model_initialisation(TEMPERATURE, MODEL, K, column_names):
# custom query template --> possible to add few shot examples in the future
# add dynamic variables columns and data types to the prompt
template = (
"""
You are a great assistant at vega-lite visualization creation. No matter what
the user ask, you should always response with a valid vega-lite specification
in JSON.
You should create the vega-lite specification based on user's query.
Besides, Here are some requirements:
1. Do not contain the key called 'data' in vega-lite specification.
2. If the user ask many times, you should generate the specification based on the previous context.
3. You should consider to aggregate the field if it is quantitative and the chart has a mark type of react, bar, line, area or arc.
4. The available fields in the dataset are:
%s
5. Always respond with exactly one vega-lite specfication. Not more, not less.
6. If you use a color attribute, it must be inside the encoding block attribute of the specification.
7. When the user tells you to give him a sample graph, then you give him a vega-lite specification that you think,
will look good.
8. remember to only respond with vega-lite specifications without additional explanations
Current conversation:
{history}
Human: {input}
AI Assistant:"""
% column_names
)
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=template
)
# Create an OpenAI instance
llm = OpenAI(
temperature=TEMPERATURE,
openai_api_key=st.secrets["openai_api_key"],
model_name=MODEL,
verbose=False,
streaming=True,
)
# Create a ConversationEntityMemory object if not already created
if "entity_memory" not in st.session_state:
st.session_state.entity_memory = ConversationBufferWindowMemory(k=K)
# Create the ConversationChain object with the specified configuration
Conversation = ConversationChain(
llm=llm,
prompt=PROMPT,
memory=st.session_state.entity_memory,
)
return Conversation
def model_initialisation_chart_description(TEMPERATURE, MODEL):
# custom query template --> possible to add few shot examples in the future
# add dynamic variables columns and data types to the prompt
template = """
You are a great assistant at chart to text tasks.\
Please describe the following vega lite chart. Your\
description will be shown on a data story. It should be concise and contain only 4 short bullet points.
It should also include additional\
information that is not included in the chart.\
Try not to explain in a descriptive style but be more user centric.
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["input"], template=template)
# Create an OpenAI instance
llm = OpenAI(
temperature=TEMPERATURE,
openai_api_key=st.secrets["openai_api_key"],
model_name=MODEL,
verbose=False,
streaming=True,
)
# Create the ConversationChain object with the specified configuration
Conversation = LLMChain(
llm=llm,
prompt=PROMPT,
)
return Conversation
def model_initialisation_story_title(TEMPERATURE, MODEL):
# custom query template --> possible to add few shot examples in the future
# add dynamic variables columns and data types to the prompt
template = """
You are a great assistant to create interesting titles.\
Summarize the following text into a 2-3 word long title.
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["input"], template=template)
# Create an OpenAI instance
llm = OpenAI(
temperature=TEMPERATURE,
openai_api_key=st.secrets["openai_api_key"],
model_name=MODEL,
verbose=False,
streaming=True,
)
# Create the ConversationChain object with the specified configuration
Conversation = LLMChain(
llm=llm,
prompt=PROMPT,
)
return Conversation
def model_initialisation_story_purpose(TEMPERATURE, MODEL):
# custom query template --> possible to add few shot examples in the future
# add dynamic variables columns and data types to the prompt
template = """
You are a great assistant to create interesting titles and descriptions.\
Create a data story title plus a one or two sentence long description from the following text:
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["input"], template=template)
# Create an OpenAI instance
llm = OpenAI(
temperature=TEMPERATURE,
openai_api_key=st.secrets["openai_api_key"],
model_name=MODEL,
verbose=False,
streaming=True,
)
# Create the ConversationChain object with the specified configuration
Conversation = LLMChain(
llm=llm,
prompt=PROMPT,
)
return Conversation
def style():
"""
Apply custom styles to the page, remove sidebar elements, and add custom
CSS for the sticky header.
This function applies custom CSS styles to the page, including removing
whitespace from the top of the page and sidebar.
It defines CSS classes for styling specific elements, such as custom-div,
block-container, blue-text, and normal-text.
The function also hides the footer, removes the sidebar pages, and adds
custom CSS for the sticky header.
Returns:
None
"""
# Remove whitespace from the top of the page and sidebar
st.markdown(
"""
<style>
.custom-div {
width: 30vw;
height: 280px;
overflow: hidden;
overflow-wrap: break-word;
}
.block-container {
padding-top: 0vh;
}
.blue-text {
color: blue;
font-family: Arial, sans-serif;
font-size: 20px;
}
.normal-text {
color: black;
font-family: Arial, sans-serif;
font-size: 20px;
}
footer{
visibility:hidden;
}
</style>
""",
unsafe_allow_html=True,
)
# remove the sidebar pages
no_sidebar_style = """
<style>
div[data-testid="stSidebarNav"] li {display: none;}
</style>
"""
# hide the sidebar
st.markdown(no_sidebar_style, unsafe_allow_html=True)
### Custom CSS for the sticky header
st.markdown(
"""
<style>
div[data-testid="stVerticalBlock"] div:has(div.fixed-header) {
position: sticky;
top: 2.875rem;
background-color: white;
z-index: 999;
}
.fixed-header {
}
</style>
""",
unsafe_allow_html=True,
)
# fonts for the website
st.markdown(
"""<style>/* Font */
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap');
/* You can replace 'Roboto' with any other font of your choice */
/* Title */
h1 {
font-family: 'Roboto', sans-serif;
font-size: 32px;
font-weight: 700;
padding-top:0px;
}
/* Chapter Header */
h2 {
font-family: 'Roboto', sans-serif;
font-size: 24px;
font-weight: 700;
}
/* Chapter Subheader */
h3 {
font-family: 'Roboto', sans-serif;
font-size: 20px;
font-weight: 700;
}
/* Normal Text */
p {
font-family: 'Roboto', sans-serif;
font-size: 16px;
font-weight: 400;
}
</style>""",
unsafe_allow_html=True,
)
# handle the session state callbacks
def change_handler_num_pages():
st.session_state["num_pages_data_story"] = st.session_state[
"num_pages_input"
]
def change_handler_dataset(data_path):
st.session_state["dataset"] = st.session_state["dataset_input"]
if (
f"multivision_specs_{st.session_state['dataset_input']}"
not in st.session_state
):
# the thread stores the created vega lite specifications in a
# session state variable called multivision_specs_{dataset}
recommender_thread = Recommender(
num_rec=12,
data_path=data_path,
dataset=st.session_state["dataset_input"],
)
recommender_thread.run()
# load the data that was selected by the user on previous pages
def handle_data():
# read in the data
# dataset_index = of which selection is selected first in the dropdown in
# the sidebardf
if st.session_state["dataset"] == "💶 Salaries":
data_path = "data/ds_salaries.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
df.work_year = df.work_year.apply(lambda x: str(x))
dataset_index = 1
elif st.session_state["dataset"] == "🎥 IMDB Movies":
data_path = "data/imdb_top_1000.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 0
elif st.session_state["dataset"] == "📈 Superstore Sales":
data_path = "data/superstore.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path, encoding="windows-1252")
df["Postal Code"] = df["Postal Code"].apply(lambda x: str(x) + "_")
dataset_index = 2
elif st.session_state["dataset"] == "😷 Covid Worldwide":
data_path = "data/covid_worldwide.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 3
elif st.session_state["dataset"] == "🗣️ Amazon Customer Behaviour":
data_path = "data/Amazon Customer Behavior Survey.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 4
elif st.session_state["dataset"] == "🧺 Food Prices":
data_path = "data/Food Prices.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 5
elif st.session_state["dataset"] == "🛌 Sleep, Health and Lifestyle":
data_path = "data/Sleep_health_and_lifestyle_dataset.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 6
elif st.session_state["dataset"] == "🎵 Spotify Song Attributes":
data_path = "data/Spotify_Song_Attributes.csv"
st.session_state["data_path"] = data_path
df = pd.read_csv(data_path)
dataset_index = 7
# Apply the custom function and convert date columns
for col in df.columns:
# check if a column name contains date substring
if "date" in col.lower():
df[col] = pd.to_datetime(df[col])
# remove timestamp
df[col] = df[col].dt.date
try:
df[col] = df[col].apply(lambda x: x.strftime("%Y-%m-%d"))
except:
print("Error in Date Parsing")
# replace space with _ in column names
cols_widget = df.columns
cols = ", ".join(cols_widget)
return df, cols, cols_widget, dataset_index
def get_vega_spec():
# display the code
gpt_response = st.session_state["generated"][-1]
print(gpt_response)
vega_spec = json.loads(gpt_response)
return vega_spec
def get_low_level_values(nested_dict):
values = []
for value in nested_dict.values():
if isinstance(value, dict):
values.extend(get_low_level_values(value))
else:
values.append(value)
return values
# deletes the last conversation so that we can go back to the old chart
def handle_undo_changes():
del st.session_state["generated"][-1]
del st.session_state["past"][-1]
del st.session_state.entity_memory.buffer[-1]
del st.session_state.entity_memory.buffer[-2]
def handle_confirm_viz(current_graph, spec, df):
with st.spinner("Generating the Story Purpose"):
# delete the flash message container
if "created_graph" in st.session_state:
del st.session_state["created_graph"]
if "created_page" in st.session_state:
del st.session_state["created_page"]
st.session_state[f"visualization_{current_graph}_confirmed"] = True
# save the vega-lite spec
st.session_state[f"fig_gpt_{current_graph}"] = spec
# get the fields that are used for the graph
used_fields = []
spec_fields = get_low_level_values(spec)
try:
for field in spec_fields:
if field in df.columns:
used_fields.append(field)
# use only the values that are relevant for this visualization
df_spec = df[used_fields].sample(10).to_dict(orient="records")
except:
df_spec = df.sample(10).to_dict(orient="records")
# create the spec for gpt to create a description
description_spec = spec.copy()
description_spec["data"] = {"values": df_spec}
# generate the chart description text
Conversation = model_initialisation_chart_description(
MODEL="gpt-4",
TEMPERATURE=1,
)
# save the chart description
st.session_state[
f"fig_gpt_{current_graph}_description"
] = Conversation.run(input=description_spec)
def main():
"""
Main function for the Data Story Authoring Tool - Create Visualizations.
Returns:
None
"""
# create a container to place in sticky header content
header = st.container()
with header:
# top page navigation bar
choose = option_menu(
"StoryPoint",
[
"Homepage",
"Data Exploration",
"Story Composition",
"Story Narration",
"Data Story",
],
icons=[
"house",
"clipboard-data",
"list-check",
"bar-chart",
"award",
"send-check",
],
menu_icon="app-indicator",
default_index=3,
key="visualization-menu",
orientation="horizontal",
styles={
"container": {
"padding": "0!important",
"background-color": "#FFFFFF",
},
"icon": {"color": "orange", "font-size": "16px"},
"nav-link": {
"font-size": "16px",
"text-align": "left",
"margin": "0px",
"--hover-color": "#eee",
},
"nav-link-selected": {"background-color": "#1A84C7"},
},
)
# delete the other session states so when we navigate back to the respective
# pages, we dont get endless loops
if "story-menu" in st.session_state:
del st.session_state["story-menu"]
if "exploration-menu" in st.session_state:
del st.session_state["exploration-menu"]
if "layout-menu" in st.session_state:
del st.session_state["layout-menu"]
if "homepage-menu" in st.session_state:
del st.session_state["homepage-menu"]
# handle the option that got chosen in the navigation bar
if choose == "Data Exploration":
switch_page("Exploratory Data Analysis")
elif choose == "Story Composition":
switch_page("Layout Creation")
elif choose == "Homepage":
switch_page("Homepage")
elif choose == "Data Story":
switch_page("Data Story 1")
st.write("""<div class='fixed-header'/>""", unsafe_allow_html=True)
# call the style function to apply the styles
style()
# use the handleData method
df, cols, cols_widget, dataset_index = handle_data()
# streamlit create counter
current_page = page_counter()
# streamlit graph counter
current_graph = graph_counter()
# add page logo to sidebar
with st.sidebar:
add_logo("static/img/chi_logo.png", height=30)
st.sidebar.write("### Your Dataset contains the following features")
with st.sidebar.expander("Dataset Features", expanded=True):
nl = "\n".join(df.columns)
st.write(
f"""
\n{nl}"""
)
# another sidebar header
with st.sidebar:
st.subheader("Configure the Chat Model")
# Set up sidebar with various options
with st.sidebar.expander("🛠️ Adjust Chatbot Settings", expanded=True):
MODEL = st.selectbox(
label="Model",
options=[
"gpt-3.5-turbo",
"gpt-4",
"text-davinci-003",
"text-davinci-002",
"code-davinci-002",
],
)
K = st.number_input(
" (#)Summary of prompts to consider", min_value=3, max_value=1000
)
TEMPERATURE = st.slider(
"Creativity of the Model", 0.0, 1.0, step=0.1, value=0.0
)
with st.sidebar:
gpt_helper.feedback(page=choose)
# Set up the Streamlit app layout
st.title("Data Story Authoring Tool - Visualizations")
# explanation text
st.write(
"This is the visualization creator page of the data story authoring tool.\
Here, you will sequentially create the graphs for your data story. \
For each page in your data story, you will be prompted to enter a Story Purpose\
by typing it into the story purpose text editor. Afterwards, you will use an \
Open AI Large Language Model to create Vega Lite visualizations through Natural \
Language input. For each created visualization, you will also be prompted to add \
explaining text to it. Make sure that the explaining text contains information that goes beyond the \
information that the viewer of the story can get from the visualization alone. \
Additionally, at the top of the page, you can also choose a set of filters for each page of the data story."
)
st.write(
f"###### Currently creating page {current_page} - Graph"
f' {current_graph}/{st.session_state["num_pages_data_story"]*2}'
)
st.write(f'###### Chosen Dataset: {st.session_state["dataset"]}')
st.write("***")
# show further headings on one side and the datastory on the other side
c1, c2 = st.columns([2, 2])
with c1:
# when reloading the page because of saving the graph, we keep the selected
# filters for the page
if f"filter_choice_{current_page}" in st.session_state:
pre_selected = st.session_state[f"filter_choice_{current_page}"]
else:
pre_selected = cols_widget[0]
# let the user choose the filters to be used on the current story page
st.write(
"Here you can select global filters for your Data Story. Once the Data Story is created, the filters\
will appear in the sidebar. There will also be a clear filter button to unapply them."
)
st.subheader(
"1️⃣Choose a set of filters that can be applied on the charts"
)
options = st.multiselect(
"Filter Choice",
cols_widget,
pre_selected,
help="Choose a set of Filters that you can use for the dashboard on the next page",
key=f"filter_choice_{current_page}_widget",
)
with c2:
# which layout was chosen by the user for the current page
page_layout = st.session_state[f"page_layout_{current_page-1}_entered"]
if page_layout == "Image 1":
img_path = "static/img/DataStory State"
elif page_layout == "Image 2":
img_path = "static/img/DataStory2 State"
with st.expander(
expanded=True, label=f"Data Story Progress of Page {current_page}"
):
# display the data story's state
# second graph is finished
if f"fig_gpt_{(current_page*2)}" in st.session_state:
image = Image.open(f"{img_path}/Folie5.PNG")
st.image(image)
# first text is finished
elif f"graph_{(current_page*2) - 1}_text" in st.session_state:
image = Image.open(f"{img_path}/Folie4.PNG")
st.image(image)
# first graph is finished
elif f"fig_gpt_{(current_page*2) - 1}" in st.session_state:
image = Image.open(f"{img_path}/Folie3.PNG")
st.image(image)
# story purpose is given
elif f"story_purpose_{current_page}_text" in st.session_state:
image = Image.open(f"{img_path}/Folie2.PNG")
st.image(image)
# no story purpose is given
else:
image = Image.open(f"{img_path}/Folie1.PNG")
st.image(image)
# make space
# add_vertical_space(2)
# give feedback when first page was created
if "created_page" in st.session_state:
html(
"""
<!DOCTYPE html>
<html>
<head>
<title>Flash Message Example</title>
<!-- Add jQuery library -->
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<style>
/* Style for the flash message container */
#flash {
position: fixed;
top: 0;
left: 0;
width: 100%;
background-color: #4CAF50; /* Green background color */
padding: 10px;
text-align: center;
display: none; /* hide initially */
font-size: 24px; /* bigger font size */
font-weight: bold; /* bold text */
color: #000000; /* Black font color */
}
</style>
</head>
<body>
<div id="flash">Page saved, continue with the rest.</div>
<script>
$(function() {
// Show and hide the flash message
$('#flash').delay(500).fadeIn('normal', function() {
$(this).delay(2500).fadeOut();
});
});
</script>
</body>
</html>
""",
height=50,
)
# example usage:
# st.markdown(eval(f'f"""{st.session_state[f"story_purpose_{current_page}_text"]}"""'), unsafe_allow_html=True)
# which number of visualization
if current_graph == 1:
st.subheader("2️⃣ Create the 1st visualization")
elif current_graph == 2:
st.subheader(f"2️⃣ Create the 2nd visualization")
elif current_graph == 3:
st.subheader(f"2️⃣ Create the 3rd visualization")
else:
st.subheader(f"2️⃣ Create the {current_graph}th visualization")
# give feedback when first graph was created
if "created_graph" in st.session_state:
html(
"""
<!DOCTYPE html>
<html>
<head>
<title>Flash Message Example</title>
<!-- Add jQuery library -->
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<style>
/* Style for the flash message container */
#flash {
position: fixed;
top: 0;
left: 0;
width: 100%;
background-color: #4CAF50; /* Green background color */
padding: 10px;
text-align: center;
display: none; /* hide initially */
font-size: 24px; /* bigger font size */
font-weight: bold; /* bold text */
color: #000000; /* Black font color */
}
</style>
</head>
<body>
<div id="flash">The graph has been created and saved!</div>
<script>
$(function() {
// Show and hide the flash message
$('#flash').delay(500).fadeIn('normal', function() {
$(this).delay(2500).fadeOut();
});
});
</script>
</body>
</html>
""",
height=50,
)
st.write(
"Create visualizations via Natural Language Prompts or get inspired by example visualizations\
in the kickstart tab."
)
tab1, tab2 = st.tabs(
[
"Use Large Language Model",
"Kickstart with example visualization",
]
)
with tab1:
# initialize the model
Conversation = model_initialisation(
MODEL=MODEL,
TEMPERATURE=TEMPERATURE,
K=K,
column_names=df.columns.tolist(),
)
# use chat GPT to write Code
gpt_input = st.text_input(
key="input_viz",
placeholder=(
"Briefly explain what you want to plot from your data. For example:"
" Plot the average salary per year"
),
label=(
"💡Use GPT to help generating the code for the visualizations. Refer to the help symbol for ideas. "
),
help=f"""# The dataframe has the following columns:
\n{[str(column) for column in df.columns]}\n
Possible prompts:\n
- Make a Scatterplot of <column x> and <column y>
- Create an ordered PieChart of ...
- Create a bar chart for the distribution of ...""",
)
if "json_decode_error" in st.session_state:
del st.session_state["json_decode_error"]
html(
"""
<!DOCTYPE html>
<html>
<head>
<title>Flash Message Example</title>
<!-- Add jQuery library -->
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<style>
/* Style for the flash message container */
#flash {
position: fixed;
top: 0;
left: 0;
width: 100%;
background-color: #ff3333; /* Green background color */
padding: 10px;
text-align: left;
display: none; /* hide initially */
font-size: 12px; /* bigger font size */
font-weight: bold; /* bold text */
color: #FFFFFF; /* Black font color */
}
</style>
</head>
<body>
<div id="flash">Error, please give the model more specification.</div>
<script>
$(function() {
// Show and hide the flash message
$('#flash').delay(500).fadeIn('normal', function() {
$(this).delay(2500).fadeOut();
});
});
</script>
</body>
</html>
""",
height=30,
)
if st.button("Commit Prompt"):
# for development environment: measure time it takes for API request
start_time = time.time()
with st.spinner("Busy API Servers (5-10 seconds) ...."):
output = Conversation.run(input=gpt_input)
st.session_state.past.append(gpt_input)
st.session_state.generated.append(output)
st.session_state[f"prompt_commited_{current_graph}"] = True
# for development environment: measure time it takes for API request
end_time = time.time()
execution_time = end_time - start_time
print("It took " + str(round(execution_time, 2)) + "seconds")
# implement a carousel to show the visualizations created by multivision
with tab2:
st.write(
"The visualizations are created by the visualization recommendation framework\
[MultiVision](https://arxiv.org/pdf/2107.07823.pdf) by Wu et al. (2021). \
Select a visualization from the list below and adjust it further in the next steps.\
If no visualization is shown, that means, that you're dataset is not suitable for the\
algorithm."
)
viz_container = st.container()
with st.spinner("Loading Visualizations"):
# create the dir if it doesn't exist
directory = f"static/img/VegaSpecs/{st.session_state['dataset']}"
os.makedirs(directory, exist_ok=True)
if (
f"spec_imgs_created_{st.session_state['dataset']}"
not in st.session_state
):
# prepare the data to be added to the vega specs
multivision_specs = []
data = df.dropna().iloc[0:30].to_dict(orient="records")
# add the data key to the dict
for i, multivision_spec in enumerate(
st.session_state[
f"multivision_specs_{st.session_state['dataset']}"
]
):
# create a copy so that the original reference is not overwritten
multivision_spec_copy = multivision_spec.copy()
multivision_spec_copy["data"] = {"values": data}
multivision_specs.append(multivision_spec_copy)
# for debugging
print(multivision_spec_copy)
# convert every spec into a png
png_data = vlc.vegalite_to_png(
vl_spec=multivision_spec_copy, scale=2
)
# numbers should be two digits long for the lexicographical ordering to work
if i <= 9:
with open(
f"static/img/VegaSpecs/{st.session_state['dataset']}/spec_0{i}.png",
"wb",
) as f:
f.write(png_data)
if i >= 10:
with open(
f"static/img/VegaSpecs/{st.session_state['dataset']}/spec_{i}.png",
"wb",
) as f:
f.write(png_data)
# add to the session state
st.session_state[
f"spec_imgs_created_{st.session_state['dataset']}"
] = True
# collect all image files from the folder
imageUrls = []
for file in os.listdir(
f"static/img/VegaSpecs/{st.session_state['dataset']}"
):
with open(
f"static/img/VegaSpecs/{st.session_state['dataset']}/" + file,
"rb",
) as image:
encoded = base64.b64encode(image.read()).decode()
imageUrls.append(f"data:image/png;base64,{encoded}")
# create the component with the code from the frontend folder
imageCarouselComponent = components.declare_component(
"image-carousel-component", path="frontend/public"
)
selectedImageUrl = imageCarouselComponent(
imageUrls=imageUrls, height=300
)
if selectedImageUrl is not None:
# st.image(selectedImageUrl[0])
# index of the vega lite spec from multivision
index = selectedImageUrl[1]
viz_container.success(
f'Visualization number {index+1} selected, scroll down and click "Select as Kickstarter Template" to continue'
)
# get the spec json without the data attribute
multivision_spec = st.session_state[
f"multivision_specs_{st.session_state['dataset']}"
][index]
# confirm the visuakization and send the vega lite spec without the data attribute
if st.button("Select as Kickstarter Template"):
# artificially add the vega lite spec to the gpt responses
st.session_state.past.append(
"Create a nice vega_lite visualization."
)
st.session_state.generated.append(
str(multivision_spec)
.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
)
# also add the answer to the entity memory
st.session_state.entity_memory.save_context(
{"input": "Create a nice vega_lite visualization."},
{
"output": str(
st.session_state[
f"multivision_specs_{st.session_state['dataset']}"
][index]
)
.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
},
)
# go to the next step
st.session_state[f"prompt_commited_{current_graph}"] = True
if f"prompt_commited_{current_graph}" in st.session_state:
# display the success message later on
container = st.empty()
st.subheader("3️⃣Choose one of the Graphs and adjust it")
try:
# vega specs
vega_spec = get_vega_spec()
except Exception as e:
# when gpt returns an empty answer
st.session_state[
"json_decode_error"
] = "Please write something more specific."
del st.session_state[f"prompt_commited_{current_graph}"]
st.experimental_rerun()
# make two views for example (Coding Expert, Business user)
tab1, tab2 = st.tabs(["Business user", "Coding Expert"])
with tab1:
# charts and their explanation
c1, _, c2 = st.columns([4, 1, 8])
with c1:
# give the user the possibility to adjust the plot
st.write("###### 2. Adjust the chart if needed")
gpt_input_adjust = st.text_input(
key="input_viz_adjust",
placeholder=(
"Give the plot ... color, add the plot title ..."
),
label="input_viz_adjust",
label_visibility="collapsed",
)
with st.expander("Expand for Examples"):
st.write(
"""
- Add the Plot Title *PlotTitle*
- Change the y-axis label to *yAxisLable*
- Use *FeatureX* on the x-Axis
- Use *FeatureX* as a color gradient
- Make it a Scatterplot instead
- Use timeUnit year --> only shows year on xaxis without months
- to group a bar chart, prompt: use 'xOffset':{'field':'<grouping field>'} within encoding
- make the information hoverable by including variables into the tooltip
- use aggregate by mean to get mean values for an axis
- use transform calculation to calculate the deaths divided by population
"""
)
# give the information, which plot shall be adjusted
# chart option contains values in the form "Graph x"
gpt_input_adjust = f"Adjust the chart " + gpt_input_adjust
with c2:
# display the chart
# render the vega lite chart that came as response
# use only the values that are relevant for this visualization
df_spec = df.head(100).to_dict(orient="records")
# create the spec for gpt to create a description
vega_spec_copy = vega_spec.copy()
vega_spec_copy["data"] = {"values": df_spec}
# by parsing the json string of the response
st.vega_lite_chart(
height=320,
data=df,
spec=vega_spec,
use_container_width=True,
)
# new columns for prettier layout with the two buttons
c1, _, c2 = st.columns([4, 1, 8])
with c1:
if st.button(
"Adjust visualization", key="adjust_visualization"
):
print(gpt_input_adjust)
output = Conversation.run(input=gpt_input_adjust)
st.session_state.past.append(gpt_input_adjust)
st.session_state.generated.append(output)
print(gpt_input_adjust)
# rerun so that the visualization changes
st.experimental_rerun()
with c2:
col1, col2 = st.columns([5, 2])
# disable the button if the plot has not been adjusted by the user yet
if len(st.session_state["generated"]) > 1:
st.session_state["button_disabled"] = False
else:
st.session_state["button_disabled"] = True
with col1:
st.button(
"Undo last Changes",
key="undo_last_changes",
on_click=handle_undo_changes,
disabled=st.session_state["button_disabled"],
)
with col2:
# let user confirm the visualization
confirm_visualization = st.button(
"Confirm visualization",
key="confirm_visualization",
on_click=handle_confirm_viz,
args=(current_graph, vega_spec, df),
)
with tab2:
# charts and their explanation
c1, _, c2 = st.columns([4, 1, 8])
with c1:
# give the user the possibility to adjust the plot
st.write("###### 2. Adjust the chart if needed")
content = st_ace(
language="json5", key="code-editor-one", value=vega_spec
)
if content:
print("content")
with c2:
# display the chart that was selected in the chart_option selectbox
try:
# render the vega lite chart that came as response
# by parsing the json string of the response
st.vega_lite_chart(
height=320,
data=df,
spec=vega_spec,
)
except Exception as e:
st.write(e)
# new columns for prettier layout with the two buttons
c1, _, c2 = st.columns([4, 1, 8])
with c1:
if st.button(
"Adjust visualization",
key="adjust_visualization_coding_expert",
):
# append the changed visualization from the Ace editor called content
# to the chatGPT conversation
vega_spec = get_vega_spec()
# append it to the response
st.session_state.generated.append(vega_spec)
st.experimental_rerun()
with c2:
# let user confirm the visualization
confirm_viz = st.button(
"Confirm visualization",
key="confirm_visualization_coding_expert",
on_click=handle_confirm_viz,
args=(current_graph, vega_spec, df),
)
if f"visualization_{current_graph}_confirmed" in st.session_state:
# DP2
st.subheader("4️⃣ Describe the plot and give further information")
# use the chart description from chatGPT
if f"fig_gpt_{current_graph}_description" in st.session_state:
chart_description = st.session_state[
f"fig_gpt_{current_graph}_description"
]
# Use My Self Made Custom Component
graph_explanation = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"plot_description_{current_graph}",
placeholder="The plot shows...",
value=chart_description,
)
else:
# Use My Self Made Custom Component
graph_explanation = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"plot_description_{current_graph}",
placeholder="The plot shows...",
value=chart_description,
)
if graph_explanation:
if f"graph_{current_graph}_confirmed" not in st.session_state:
st.session_state[f"graph_{current_graph}_confirmed"] = True
st.session_state[
f"graph_{current_graph}_text"
] = graph_explanation[1]
# only go further when text for the story is entered
if f"graph_{current_graph}_confirmed" in st.session_state:
# save the chosen filters
st.session_state[
f"filter_choice_{current_page}"
] = st.session_state[f"filter_choice_{current_page}_widget"]
# this means, that we have the last graph and want to create the story
# now
if current_graph == st.session_state["num_pages_data_story"] * 2:
# let the user input the story purpose
if current_page == 1:
st.subheader(
f"5️⃣Describe the story purpose of the 1st page"
)
elif current_page == 2:
st.subheader(
f"5️⃣Describe the story purpose of the 2nd page"
)
elif current_page == 3:
st.subheader(
f"5️⃣Describe the story purpose of the 3rd page"
)
else:
st.subheader(
f"5️⃣Describe the story purpose of the {current_page}th page"
)
# generate the chart description text
Conversation = model_initialisation_story_purpose(
MODEL="gpt-4",
TEMPERATURE=1,
)
# build the gpt query string from the former chart descriptions
story_purpose_prompt = f"""
{st.session_state[f"fig_gpt_{current_graph}_description"]}
{st.session_state[f"fig_gpt_{current_graph-1}_description"]}
"""
# generate the story purpose via chatgpt
if f"story_purpose_gpt_{current_page}" not in st.session_state:
with st.spinner("Generating the Story Purpose"):
st.session_state[
f"story_purpose_gpt_{current_page}"
] = Conversation.run(input=story_purpose_prompt)
# Use My Self Made Custom Component
story_purpose = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"story_purpose_{current_page}_widget",
placeholder="This story displays ...",
value=st.session_state[
f"story_purpose_gpt_{current_page}"
],
)
else:
# Use My Self Made Custom Component
story_purpose = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"story_purpose_{current_page}_widget",
placeholder="This story displays ...",
value=st.session_state[
f"story_purpose_gpt_{current_page}"
],
)
if story_purpose:
st.session_state[f"story_{current_page}_confirmed"] = True
st.session_state[
f"story_purpose_{current_page}_text"
] = story_purpose[1]
st.session_state[
f"story_purpose_{current_page}_editor_text"
] = story_purpose[2]
# generate the page title
page_title_generator = model_initialisation_story_title(
MODEL="gpt-4",
TEMPERATURE=1,
)
st.session_state[
f"page_{current_page}_title"
] = page_title_generator.run(
st.session_state[f"story_purpose_{current_page}_text"]
)
# finish the story
if st.button(
"✅ Finish the Data Story", key="finished_story"
):
st.session_state["first_graph"] = True
# delete entity memory to start a new conversation with chat model
del st.session_state["generated"]
del st.session_state["past"]
del st.session_state["entity_memory"]
# create a state for a finished data story
st.session_state["finished_data_story"] = True
switch_page("data story 1")
else:
# this means, that the page isnt complete yet
if current_graph % 2 == 1:
# finish this template
if st.button("✅ Finish this graph", key="finished_graph"):
st.session_state["created_graph"] = True
st.session_state["first_graph"] = False
increase_graph_counter()
# delete entity memory to start a new conversation with chat model
del st.session_state["generated"]
del st.session_state["past"]
del st.session_state["entity_memory"]
switch_page("create visualizations")
# this means, that one page of the story is complete
elif current_graph % 2 == 0:
# let the user input the story purpose
if current_page == 1:
st.subheader(
f"5️⃣Describe the story purpose of the 1st page"
)
elif current_page == 2:
st.subheader(
f"5️⃣Describe the story purpose of the 2nd page"
)
elif current_page == 3:
st.subheader(
f"5️⃣Describe the story purpose of the 3rd page"
)
else:
st.subheader(
f"5️⃣Describe the story purpose of the {current_page}th page"
)
# generate the chart description text
Conversation = model_initialisation_story_purpose(
MODEL="gpt-4",
TEMPERATURE=1,
)
# build the gpt query string from the former chart descriptions
story_purpose_prompt = f"""
{st.session_state[f"fig_gpt_{current_graph}_description"]}
{st.session_state[f"fig_gpt_{current_graph-1}_description"]}
"""
# generate the story purpose via chatgpt
if (
f"story_purpose_gpt_{current_page}"
not in st.session_state
):
with st.spinner("Generating the Story Purpose"):
st.session_state[
f"story_purpose_gpt_{current_page}"
] = Conversation.run(input=story_purpose_prompt)
# Use My Self Made Custom Component
story_purpose = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"story_purpose_{current_page}_widget",
placeholder="This story displays ...",
value=st.session_state[
f"story_purpose_gpt_{current_page}"
],
)
else:
# Use My Self Made Custom Component
story_purpose = st_pandas_text_editor(
columns=df.columns.tolist(),
key=f"story_purpose_{current_page}_widget",
placeholder="This story displays ...",
value=st.session_state[
f"story_purpose_gpt_{current_page}"
],
)
if story_purpose:
st.session_state[
f"story_{current_page}_confirmed"
] = True
st.session_state[
f"story_purpose_{current_page}_text"
] = story_purpose[1]
st.session_state[
f"story_purpose_{current_page}_editor_text"
] = story_purpose[2]
# generate the page title
page_title_generator = (
model_initialisation_story_title(
MODEL="gpt-4",
TEMPERATURE=1,
)
)
st.session_state[
f"page_{current_page}_title"
] = page_title_generator.run(
st.session_state[
f"story_purpose_{current_page}_text"
]
)
# finish this template
if st.button(
"✅ Finish this page", key="finished_page"
):
st.session_state["first_graph"] = True
st.session_state["created_page"] = True
# delete entity memory to start a new conversation with chat model
del st.session_state["generated"]
del st.session_state["past"]
del st.session_state["entity_memory"]
increase_page_counter()
increase_graph_counter()
switch_page("create visualizations")
def handle_new_number_pages():
# set the new page number
st.session_state["num_pages_data_story"] = st.session_state[
"increase_num_pages"
]
# set the page lyout fix to the first layout --> adjust after prototype
current_page = st.session_state["page_counter"]
st.session_state[f"page_layout_{current_page}_entered"] = "Image 1"
# create the new story
gpt_helper.create_story_layout_type_1(
file_name=f"pages/0{3+current_page}_data_story_{current_page+1}.py",
story_page=current_page + 1,
)
# increase the counters for the data story
increase_page_counter()
increase_graph_counter()
# delete the session state variable to show the page from the main method again
del st.session_state["finished_data_story"]
def finished_data_story():
# create a container to place in sticky header content
header = st.container()
with header:
# top page navigation bar
choose = option_menu(
"StoryPoint",
[
"Homepage",
"Data Exploration",
"Story Composition",
"Story Narration",
"Data Story",
],
icons=[
"house",
"clipboard-data",
"list-check",
"bar-chart",
"award",
"send-check",
],
menu_icon="app-indicator",
default_index=3,
key="visualization-menu",
orientation="horizontal",
styles={
"container": {
"padding": "0!important",
"background-color": "#FFFFFF",
},
"icon": {"color": "orange", "font-size": "16px"},
"nav-link": {
"font-size": "16px",
"text-align": "left",
"margin": "0px",
"--hover-color": "#eee",
},
"nav-link-selected": {"background-color": "#1A84C7"},
},
)
# delete the other session states so when we navigate back to the respective
# pages, we dont get endless loops
if "story-menu" in st.session_state:
del st.session_state["story-menu"]
if "exploration-menu" in st.session_state:
del st.session_state["exploration-menu"]
if "layout-menu" in st.session_state:
del st.session_state["layout-menu"]
if "homepage-menu" in st.session_state:
del st.session_state["homepage-menu"]
# handle the option that got chosen in the navigation bar
if choose == "Data Exploration":
switch_page("Exploratory Data Analysis")
elif choose == "Story Composition":
switch_page("Layout Creation")
elif choose == "Homepage":
switch_page("Homepage")
elif choose == "Data Story":
switch_page("Data Story 1")
st.write("""<div class='fixed-header'/>""", unsafe_allow_html=True)
# call the style function to apply the styles
style()
# add page logo to sidebar
with st.sidebar:
add_logo("static/img/chi_logo.png", height=30)
st.subheader(
"The data story has been created and can be found under the Data Story Tab"
)
st.write(
"If you want to create further pages, increase the number of the pages variable"
)
num_pages = st.number_input(
"\# of pages in data story",
value=st.session_state["num_pages_data_story"] + 1,
min_value=st.session_state["num_pages_data_story"] + 1,
key="increase_num_pages",
)
increase_num_pages_button = st.button(
"Confirm new Number of Pages", on_click=handle_new_number_pages
)
if __name__ == "__main__":
# when the data story is finished, we want a different page to be shown here
if "finished_data_story" in st.session_state:
finished_data_story()
else:
main()
| kpister/prompt-linter | data/scraping/repos/AnonymousPaperSubmission123~StoryPoint/pages~02_create_visualizations.py | pages~02_create_visualizations.py | py | 59,373 | python | en | code | 0 | github-code | 50 |
20374505028 | import logging
from abc import ABC
from pyrogram import types
from bot.errors import RuleViolated
from core import main_logger
from core.log import event_logger
log: logging.Logger = main_logger(__name__)
logger: logging.Logger = event_logger(__name__)
class BaseRule:
"""The basic rule for all validation rules."""
name: str = "預設規則"
def __init__(self):
self.error_message: str = "<unset>"
def update_error_message(self) -> None:
raise NotImplementedError
def is_violate_rule(self) -> bool:
"""Return True if the message violates the rule."""
raise NotImplementedError
def run_validate(self):
if self.is_violate_rule():
self.update_error_message()
raise RuleViolated(self.name, self.error_message)
class MessageRule(BaseRule, ABC):
"""The basic rule for all validation rules."""
name: str = "預設訊息規則"
def __init__(self):
super().__init__()
self.msg: types.Message | None = None
self.target: types.User | types.Chat | None = None
def run_validate(
self, *, msg: types.Message = None, target: types.User | types.Chat = None
) -> None:
if not msg:
raise ValueError("Must provide message to check.")
if not target:
raise ValueError("Must provide target to check.")
self.msg = msg
self.target = target
super().run_validate()
class UserRule(BaseRule, ABC):
"""Message contain blacklisted sender or content."""
name: str = "預設使用者規則"
def __init__(self):
super().__init__()
self.user: types.User | None = None
def run_validate(self, *, user: types.User = None) -> None:
if not user:
raise ValueError("Must provide user to check.")
self.user = user
super().run_validate()
| allen0099/UserBot | bot/validation/rules/base.py | base.py | py | 1,893 | python | en | code | 4 | github-code | 50 |
70068283997 | from radio_protocol import *
import json
import csrd
J_READ = "READ"
J_WRITE = "WRITE"
J_OPERATION = "OPERATION"
J_ACTION = "ACTION"
J_UNKOWN = "UNKNOW"
J_FROM = "from"
J_TO = "to"
J_NAME = "name"
J_DATE = "date"
J_ID = "id"
J_TYPE = "type"
J_ACTION_TYPE = "action_type"
J_GROUP = "group"
J_ELEMENT = "element"
J_NEXTSTATE = "next_state"
J_PARAM_INDEX = "param_index"
J_ACTION_PARAM = "action"
J_VALUES = "values"
J_STATUS_TYPE = "status_type"
J_STATUS = "status"
J_PARAMS = "params"
J_NODE_ID = "nodeid"
J_TYPE_BROADCAST = "BROADCAST"
J_TYPE_ADDRESSED = "ADDRESSED"
J_TYPE_STATUS = "STATUS"
J_TYPE_EMPTY = "EMPTY"
def json_to_csrd(message, logger):
buffer = []
try:
j = json.loads(message)
except Exception as e:
logger.error(f"Failed to load json message: {e}")
return None
if not is_message_valid(j, logger):
logger.error("Json message is invalid. [%s]", message)
return None
buffer[0] = createBuffer0(j)
buffer[1] = createBuffer1(j)
buffer[2] = createBuffer2(j)
buffer[3] = createBuffer3(j)
buffer[4] = createBuffer4(j)
buffer[5] = createBuffer5(j)
buffer[6] = createBuffer6(j)
buffer[7] = createBuffer7(j)
csrd_message = csrd.CSRD(logger, buffer, MESSAGE_SIZE)
if j[J_TO][J_ID] is not None:
to_radio = 0
else:
to_radio = int(j[J_TO][J_ID])
if j[J_FROM][J_ID] is not None:
from_radio = 0
else:
from_radio = int(j[J_FROM][J_ID])
csrd_message.setTo(to_radio)
csrd_message.setFrom(from_radio)
return csrd_message
def exists(message, tag, logger):
if message[tag] is not None:
return message[tag]
logger.error(f"Json message missing field: {tag}")
return None
def is_message_valid(message, logger):
# Check the presence of the obligatory message fields
json_from = exists(message, J_FROM, logger)
if json_from is not None:
if exists(json_from, J_NAME, logger) is None:
return False
if exists(json_from, J_ID, logger) is None:
return False
else:
return False
json_to = exists(message, J_FROM, logger)
if json_to is not None:
if exists(json_to, J_NAME, logger) is None:
return False
if exists(json_to, J_ID, logger) is None:
return False
else:
return False
if exists(message, J_TYPE, logger) is None:
return False
else:
# check for specific types
# exception here is status message
message_type = message[J_TYPE]
if message_type == J_TYPE_STATUS:
if exists(message, J_STATUS_TYPE, logger) is None:
return False
if exists(message, J_NODE_ID, logger) is None:
return False
if exists(message, J_STATUS, logger) is None:
return False
elif message_type == J_TYPE_BROADCAST:
if exists(message, J_ACTION_TYPE, logger) is None:
return False
else:
action_type = message[J_ACTION_TYPE]
if action_type == J_OPERATION:
if exists(message, J_NEXTSTATE, logger) is None:
return False
elif action_type == J_ACTION:
if exists(message, J_ACTION_PARAM, logger) is None:
return False
if exists(message, J_GROUP, logger) is None:
return False
if exists(message, J_ELEMENT, logger) is None:
return False
if exists(message, J_VALUES, logger) is None:
return False
return True
def createBuffer0(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
return RP_BROADCAST
elif message_type == J_TYPE_ADDRESSED:
return RP_ADDRESSED
elif message_type == J_TYPE_STATUS:
return RP_STATUS
else:
return RP_UNKOWN
def createBuffer1(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST or message_type == J_TYPE_ADDRESSED:
action_type = json_message[J_ACTION_TYPE]
if action_type == J_OPERATION:
return RP_OPERATION
elif action_type == J_ACTION:
return RP_ACTION
elif action_type == J_WRITE:
return RP_WRITE
elif action_type == J_READ:
return RP_READ
else:
return RP_UNKOWN
elif message_type == J_TYPE_STATUS:
status_type = int(json_message[J_STATUS_TYPE])
return status_type
else:
return RP_UNKOWN
def createBuffer2(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
group = int(json_message[J_GROUP])
return group
if message_type == J_TYPE_ADDRESSED:
nodeid = int(json_message[J_NODE_ID])
return highByte(nodeid)
elif message_type == J_TYPE_STATUS:
nodeid = int(json_message[J_NODE_ID])
return highByte(nodeid)
else:
return RP_UNKOWN
def createBuffer3(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
group = int(json_message[J_ELEMENT])
return group
if message_type == J_TYPE_ADDRESSED:
nodeid = int(json_message[J_NODE_ID])
return lowByte(nodeid)
elif message_type == J_TYPE_STATUS:
nodeid = int(json_message[J_NODE_ID])
return lowByte(nodeid)
else:
return RP_UNKOWN
def createBuffer4(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
action_type = json_message[J_ACTION_TYPE]
if action_type == J_OPERATION:
next_state = int(json_message[J_NEXTSTATE])
return next_state
elif action_type == J_ACTION:
action = int(json_message[J_ACTION_PARAM])
return action
elif action_type == J_WRITE:
param_index = int(json_message[J_PARAM_INDEX])
return param_index
elif action_type == J_READ:
param_index = int(json_message[J_PARAM_INDEX])
return param_index
else:
return RP_UNKOWN
if message_type == J_TYPE_ADDRESSED:
element = int(json_message[J_ELEMENT])
return element
elif message_type == J_TYPE_STATUS:
element = int(json_message[J_ELEMENT])
return element
else:
return RP_UNKOWN
def createBuffer5(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
values = json_message[J_VALUES]
if values[0] is None:
return 0
else:
value = int(values[0])
return value
if message_type == J_TYPE_ADDRESSED:
action_type = json_message[J_ACTION_TYPE]
if action_type == J_OPERATION:
next_state = int(json_message[J_NEXTSTATE])
return next_state
elif action_type == J_ACTION:
action = int(json_message[J_ACTION_PARAM])
return action
elif action_type == J_WRITE:
param_index = int(json_message[J_PARAM_INDEX])
return param_index
elif action_type == J_READ:
param_index = int(json_message[J_PARAM_INDEX])
return param_index
else:
return RP_UNKOWN
elif message_type == J_TYPE_STATUS:
status_type = int(json_message[J_STATUS_TYPE])
if status_type == RP_REPORT_ACK:
element = int(json_message[J_ELEMENT])
return element
else:
values = json_message[J_VALUES]
if values[0] is None:
return 0
else:
value = int(values[0])
return value
else:
return RP_UNKOWN
def createBuffer6(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
values = json_message[J_VALUES]
if values[1] is None:
return 0
else:
value = int(values[1])
return value
if message_type == J_TYPE_ADDRESSED:
values = json_message[J_VALUES]
if values[0] is None:
return 0
else:
value = int(values[0])
return value
elif message_type == J_TYPE_STATUS:
status_type = int(json_message[J_STATUS_TYPE])
index = 1
if status_type == RP_REPORT_ACK:
index = 0
values = json_message[J_VALUES]
if values[index] is None:
return 0
else:
value = int(values[index])
return value
else:
return RP_UNKOWN
def createBuffer7(json_message):
message_type = json_message[J_TYPE]
if message_type == J_TYPE_BROADCAST:
values = json_message[J_VALUES]
if values[2] is None:
return 0
else:
value = int(values[2])
return value
if message_type == J_TYPE_ADDRESSED:
values = json_message[J_VALUES]
if values[1] is None:
return 0
else:
value = int(values[1])
return value
elif message_type == J_TYPE_STATUS:
status_type = int(json_message[J_STATUS_TYPE])
index = 2
if status_type == RP_REPORT_ACK:
index = 1
values = json_message[J_VALUES]
if values[index] is None:
return 0
else:
value = int(values[index])
return value
else:
return RP_UNKOWN
def lowByte(a):
b = a & 0x00ff
return b
def highByte(a):
b = a >> 8
return b
| amaurial/projects | carsystem/control-station-gui/src/json_csrd.py | json_csrd.py | py | 9,763 | python | en | code | 1 | github-code | 50 |
74632025756 | from time import sleep
import requests
import bs4
from bs4 import BeautifulSoup
import pandas as pd
from random import choice
import re
from datetime import datetime
desktop_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0']
def random_headers():
return {'User-Agent': choice(desktop_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}
def get_indeed_jobs(job_set, url_template, job_url_template):
max_results_per_job = 500 # Set this to a high-value (5000) to generate more results.
# Crawling more results, will also take much longer. First test your code on a small number of results and then expand.
i = 0
results = []
job_description = []
final_report = []
df_more = pd.DataFrame(columns=["Job Type","Title","Location","Company","Salary", "Job_Url"])
city = ""
for job_type in job_set:
# Grab the results from the request (as above)
start = 0
i = 0
url = url_template.format(job_type, city, start)
# Append to the full set of results
total_jobs = 0
try:
html = requests.get(url, headers=random_headers())
soup = BeautifulSoup(html.content, 'html.parser', from_encoding="utf-8")
#total_jobs = soup.find(id="searchCount").text.replace('\n', '').split('of')[1]
total_jobs_text = soup.find(id="searchCount").text.replace('\n', '').split('of')[1]
print(str(job_type) + str(total_jobs_text))
total_jobs_list = re.findall('\d+', total_jobs_text)
print (total_jobs_list , str(len(total_jobs_list)))
if len(total_jobs_list) > 1:
total_jobs = (int(total_jobs_list[0]) * 1000) + int(total_jobs_list[1])
else:
total_jobs = int(total_jobs_list[0])
print("Running the loop for : " + str(job_type) + " .Total Jobs Found : " + str(total_jobs))
num_pages = int(total_jobs)//20 + 1
except:
try:
html = requests.get(url, headers=random_headers())
soup = BeautifulSoup(html.content, 'html.parser', from_encoding="utf-8")
# total_jobs = soup.find(id="searchCount").text.replace('\n', '').split('of')[1]
total_jobs_text = soup.find(id="searchCount").text.replace('\n', '').split('of')[1]
print(str(job_type) + str(total_jobs_text))
total_jobs_list = re.findall('\d+', total_jobs_text)
print(total_jobs_list, str(len(total_jobs_list)))
if len(total_jobs_list) > 1:
total_jobs = (int(total_jobs_list[0]) * 1000) + int(total_jobs_list[1])
else:
total_jobs = int(total_jobs_list[0])
print("Running the loop for : " + str(job_type) + " .Total Jobs Found : " + str(total_jobs))
num_pages = int(total_jobs) // 20 + 1
except:
continue
##
statement = "For the role of " + str(job_type) + ", we found total of " + str(total_jobs) + " jobs!"
final_report.append(statement)
for pages in range(0, num_pages):
# Grab the results from the request (as above)
url = url_template.format(job_type, city, start)
start += 20
#print(url)
# Append to the full set of results
html = requests.get(url, headers=random_headers())
soup = BeautifulSoup(html.content, 'html.parser', from_encoding="utf-8")
for each in soup.find_all(class_= "result" ):
try:
title = each.find(class_='jobtitle').text.replace('\n', '')
except:
title = None
try:
location = each.find('span', {'class':"location" }).text.replace('\n', '')
except:
location = None
try:
company = each.find(class_='company').text.replace('\n', '')
except:
company = None
try:
salary = each.find('span', {'class':'no-wrap'}).text
except:
salary = None
try:
job_key = each.get('data-jk')
job_url = job_url_template.format(job_key)
except:
job_url = None
df_more = df_more.append({'Job Type':job_type,'Title':title, 'Location':location, 'Company':company, 'Salary':salary, 'Job_Url':job_url}, ignore_index=True)
i += 1
if i%200 ==0:
sleep(5)
print (str(i) + " jobs extracted out of " + str(total_jobs) + " for the role of " + str(job_type))
print ("For the job of " + str(job_type) + ", a total of " + str(i) + " jobs were extracted in total!")
#df_more.to_csv('Indeed_Project_withoutJD.csv', encoding='utf-8')
print ("Total Jobs written in this run: " +str(len(df_more)))
return df_more
def get_indeed_data(out_file_name):
#job_master_set = set(['software+developer','software+engineer','data+scientist','ux+ui+designer','fullstack+developer',
# 'ai+machine+learning+developer','project+manager',"electrical+engineer", "qa+engineer",
# "cloud+developer","business+development+manager","frontend+software+developer",
# "business+analyst","backend+developer","cybersecurity+expert"])
job_set = set(['software+developer','software+engineer','project+manager',"electrical+engineer", "business+development+manager","business+analyst"])
url_template = "http://www.indeed.ca/jobs?q={}&l={}&start={}"
job_url_template = "https://ca.indeed.com/viewjob?jk={}"
df_indeed = get_indeed_jobs(job_set, url_template, job_url_template)
df_indeed_uniq = df_indeed.drop_duplicates(subset=['Job_Description'])
df_indeed_uniq.to_csv(out_file_name, encoding='utf-8')
return None
if __name__ == "__main__":
start_time = datetime.now()
get_indeed_data("Indeed_National_Data.csv")
#n_grams_wordCloud("C:/Users/a.vivek/PycharmProjects/Calgary/ngram2.py",'utf-8', n_filter=2, n=3)
#with open("output.csv", 'w',newline='') as resultFile:
# wr = csv.writer(resultFile, dialect='excel')
# wr.writerow(['Ngram_Freq','MI','Ngram_Prob','Count','Ngram'])
# wr.writerows(s)
end_time = datetime.now()
print('Duration: {}'.format(end_time - start_time)) | AmarVivek/Web-Scraping | Get_Data.py | Get_Data.py | py | 7,835 | python | en | code | 0 | github-code | 50 |
71334428635 | import numpy as np
from numpy.linalg import inv, det, norm as mag
from math import exp, log, pi
import time
from .kernel_methods import cartesian_operation, default_covariance_func, get_gradient_funcs
from functools import partial
from copy import deepcopy
from random import random
from .utilities import create_pool
def gradient_descent(hyperparams, X, Y, learning_rates, learning_func=None, epochs=5, cached_pool=None):
learning_func = default_learning_func if learning_func is None else learning_func
gradients = deepcopy(hyperparams)
params = deepcopy(hyperparams)
covariance_func = partial(default_covariance_func, hyperparams=params)
gradient_funcs = get_gradient_funcs(params)
log_prob = -np.inf
best_hyperparams = deepcopy(hyperparams)
best_log_prob = -np.inf
training_cov = cartesian_operation(X, function=covariance_func, cached_pool=cached_pool)
training_cov_inv = inv(training_cov)
new_log_prob = calc_log_prob(X, Y, training_cov, training_cov_inv)
print('INITIAL LOG PROB', new_log_prob)
all_log_probs = []
# for number of epochs
for i in range(epochs):
# generate inverse covariance matrix based on current hyperparameters
training_cov = cartesian_operation(X, function=covariance_func, cached_pool=cached_pool)
training_cov_inv = inv(training_cov)
# for each hyperparameter
for param_name in hyperparams:
if not param_name.startswith('theta'):
continue
if param_name == 'theta_length':
continue
# compute gradient of log probability with respect to the parameter
gradients[param_name] = gradient_log_prob(gradient_funcs[param_name], X, Y, training_cov_inv, cached_pool=cached_pool)
# update each parameter according to learning rate and gradient
step = learning_func(i, epochs, learning_rates[param_name]) * gradients[param_name]
print(step, param_name)
params[param_name] += step
print('params:')
print({ 'theta_amp': params['theta_amp'], 'theta_length': params['theta_length'] })
print('gradients:')
print({ 'theta_amp': gradients['theta_amp'], 'theta_length': gradients['theta_length'] })
print('log_prob:')
new_log_prob = calc_log_prob(X, Y, training_cov, training_cov_inv)
print(new_log_prob)
all_log_probs.append(new_log_prob)
# if i % 20:
# save_scatter('log_prob', all_log_probs)
log_prob = new_log_prob
if log_prob > best_log_prob:
best_hyperparams = deepcopy(params)
best_log_prob = log_prob
print("Completed %d" % i)
print('')
print('Best hyperparams:')
print(best_hyperparams)
print('Best log prob:')
print(best_log_prob)
return (best_hyperparams, best_log_prob)
def gradient_log_prob(gradient_func, X, Y, training_cov_inv, cached_pool=None):
print('Computing gradient of covariance matrix')
start = time.time()
gradient_cov_mat = cartesian_operation(X, function=gradient_func, cached_pool=cached_pool)
end = time.time()
print('%d seconds' % (end - start))
term_1 = -1 * np.trace(training_cov_inv.dot(gradient_cov_mat))
term_2 = Y.T.dot(training_cov_inv).dot(gradient_cov_mat).dot(training_cov_inv).dot(Y)
return 0.5 * (term_1 + term_2)
def calc_log_prob(X, Y, training_cov, training_cov_inv):
term_1 = log(mag(training_cov))
term_2 = Y.T.dot(training_cov_inv).dot(Y)
term_3 = len(X) / 2 * log(2 * pi)
return -0.5 * (term_1 + term_2)
def default_learning_func(i, total, scale=1.0):
internal_scale = 1.0
frac = float(i) / total
total_scale = scale * internal_scale
if frac < 0.1 :
return 1.5 * total_scale
elif frac < 0.2:
return 0.1 * total_scale
elif frac < 0.35:
return 0.05 * total_scale
elif frac < 0.45:
return 0.01 * total_scale
else:
return 0.003 * total_scale
def generate_random_hyperparams(params, randomize=[]):
rand_params = deepcopy(params)
for name in randomize:
if name not in params:
raise ValueError('Parameter to randomize should be in params')
rand_params[name] = 100.0 * random()
return rand_params
def initial_length_scales(X):
print("Generating %d scales" % X.shape[1])
length_scales = X.std(0)
length_scales[length_scales == 0.0] = 1.0
length_scales = np.square(np.reciprocal(length_scales)) / X.shape[1]
return length_scales.T
def optimize_hyperparams(params, X, Y, learning_rates, rand_restarts=1):
print('Optimizing hyperparams...')
pool = create_pool()
best_candidate = None
for i in range(0, rand_restarts):
new_params = generate_random_hyperparams(params)
try:
candidate = gradient_descent(new_params, X, Y, learning_rates, cached_pool=pool)
# if new candidates log prob is higher than best candidate's
if best_candidate is None or candidate[1] > best_candidate[1]:
best_candidate = candidate
except np.linalg.linalg.LinAlgError as e:
print('An error occurred')
print(e)
continue
pool.close()
pool.join()
print('Best candidate:')
print(best_candidate)
# return the best set of params found
return best_candidate[0]
| davidgbe/fpmd_with_ml | lib/gaussian_process/gradient_descent.py | gradient_descent.py | py | 5,404 | python | en | code | 2 | github-code | 50 |
19848930255 | import tkinter as tk #importing tkinter module
from PIL import ImageTk, Image #importing PIL for image processing.
import tkinter.filedialog as tf
from stegano import exifHeader as stg
from tkinter import messagebox
def close_open():
window.destroy()
First_Screen()
def back_decode():
Decode_tk.destroy()
First_Screen()
def back():
Encode_tk.destroy()
First_Screen()
def Encode():
S_Screen.destroy()
global Encode_tk
Encode_tk = tk.Tk()
Encode_tk.title("Encode")
Encode_tk.geometry("700x700")
bg_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\bg_en.jpg")
bg_img = ImageTk.PhotoImage(bg_img_load)
bg_label = tk.Label(master= Encode_tk, image = bg_img)
bg_label.place(x = 0, y= 0)
back_button_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\bb.png")
back_button_img = ImageTk.PhotoImage(back_button_img_load)
back_button = tk.Button(master= Encode_tk, image = back_button_img,bd = 0,command = back)
back_button.place(x = 0,y= 2)
file_name_entry = tk.Entry(bd = 0.1)
file_name_entry.place(x = 330, y = 194,height = 30,width = 242)
message_text = tk.Text(bd = 0)
message_text.place(x = 330 , y = 260,width = 240, height = 120)
Encode_button_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\encode_b.png")
Encode_button_img = ImageTk.PhotoImage(Encode_button_img_load)
def openfile():
global fileopen
fileopen = tk.StringVar()
fileopen = tf.askopenfilename(initialdir = "/Desktop", title = "Select File", filetypes = (("jpeg file", "*jpg"), ("all files","*.*")))
dirlabel = tk.Entry(master = Encode_tk,bd= 0)
dirlabel.insert(0, fileopen)
dirlabel.place(x = 329, y = 402, width = 240, height = 35)
def Encodee():
response = messagebox.askyesno("pop up", "Do you want to encode?")
if response == 1:
stg.hide(fileopen, file_name_entry.get()+'..jpg',message_text.get(1.0, tk.END))
messagebox.showinfo("pop up", "Successfully encode")
else:
messagebox.showwarning('pop up', "Unsuccessful")
Encode_button = tk.Button(master=Encode_tk, image = Encode_button_img, bd = 0, command = Encodee)
Encode_button.place(x = 240, y = 520,height = 68)
select_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\selcet.png")
select_img = ImageTk.PhotoImage(select_img_load)
select_button = tk.Button(master=Encode_tk,image = select_img, bd = 0,relief = tk.GROOVE, command = openfile)
select_button.place(x = 160, y = 400)
Encode_tk.mainloop()
def Decode():
S_Screen.destroy()
global Decode_tk
Decode_tk = tk.Tk()
Decode_tk.title("Decode")
Decode_tk.geometry("700x700")
bg_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\bg_en.jpg")
bg_img = ImageTk.PhotoImage(bg_img_load)
bg_label = tk.Label(master= Decode_tk, image = bg_img)
bg_label.place(x = 0, y= 0)
back_button_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\bb.png")
back_button_img = ImageTk.PhotoImage(back_button_img_load)
back_button = tk.Button(master= Decode_tk, image = back_button_img,bd = 0,command = back_decode)
back_button.place(x = 0,y= 2)
file_name_entry = tk.Entry(bd = 0.1)
file_name_entry.place(x = 330, y = 194,height = 30,width = 242)
message_text = tk.Text(bd = 0)
message_text.place(x = 330 , y = 260,width = 240, height = 100)
def openfile():
global fileopen1
fileopen1 = tk.StringVar()
fileopen1 = tf.askopenfilename(initialdir = "/Desktop", title = "Select File", filetypes = (("jpeg file", "*jpg"), ("all files","*.*")))
file_name_entry.insert(0,fileopen1)
def Decodee():
message = stg.reveal(fileopen1)
message_text.insert("1.0", message)
select_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\selcet1.png")
select_img = ImageTk.PhotoImage(select_img_load)
select_button = tk.Button(master=Decode_tk,image = select_img, bd = .3,command = openfile)
select_button.place(x = 280, y = 130)
Decode_button_img_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\decodeb.jpg")
Decode_button_img = ImageTk.PhotoImage(Decode_button_img_load)
Decode_button = tk.Button(master=Decode_tk, image = Decode_button_img, bd = 0,command = Decodee)
Decode_button.place(x = 250, y = 520)
Decode_tk.mainloop()
def First_Screen():
global S_Screen
S_Screen = tk.Tk() #creating tkinter instance.
S_Screen.title("Steganographer By D")
S_Screen.geometry("1200x800")
S_Screen.maxsize(1200,800)
S_Screen.minsize(1200,800)
Logo_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\data-encryption.png")
Logo_image = ImageTk.PhotoImage(Logo_load)
Logo_label = tk.Label(master= S_Screen, image = Logo_image)
Logo_label.place(x = 420, y = 80)
encode_img = ImageTk.PhotoImage(file="C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\button.png")
encode_b = tk.Button(master=S_Screen, image = encode_img , bd =0, activebackground = "#d8d8d8",relief = tk.GROOVE, command = Encode)
encode_b.place(x = 230, y = 400)
decode_img = ImageTk.PhotoImage(file="C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\button (1).png")
decode_b = tk.Button(master=S_Screen, image = decode_img , bd =0, activebackground = "#d8d8d8",relief = tk.SOLID,command = Decode)
decode_b.place(x = 670 , y= 400)
S_Screen.mainloop()
window = tk.Tk()
window.title("Steganographer By D")
window.geometry("1200x800")
Background_image = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\bg.jpg")
rendered_image = ImageTk.PhotoImage(Background_image)
Background_Label = tk.Label(master=window, image = rendered_image)
greeting = tk.Label(master = window,text = "WEL-COME IN STEGANOGRPHER")
greeting.config(font = ("Courier", 44))
Close_button_image_load = Image.open("C:\\Users\\u\\Desktop\\py programmes\\Tkinter_Programmes\\Stegnographer\\click.png")
Close_button_image = ImageTk.PhotoImage(Close_button_image_load)
window_close_button = tk.Button(image = Close_button_image, bd = 0, command = close_open)
window_close_button.place(x = 460, y = 400)
greeting.place(x = 200, y = 250)
Background_Label.place(x = 0, y = -3)
window.mainloop()
| VishvaAvenue/Steganographer | STEGNOGRAPHER.pyw | STEGNOGRAPHER.pyw | pyw | 7,213 | python | en | code | 0 | github-code | 50 |
37060922159 | import csv
import re
import pdb
import requests
from lxml import etree
import json
import string
import os
def validate(item):
if item == None:
item = ''
if type(item) == int or type(item) == float:
item = str(item)
if type(item) == list:
item = ' '.join(item)
return item.replace(u'\u2013', '-').encode('ascii', 'ignore').encode("utf8").strip().replace('\t', '').replace('\n', ' ')
def eliminate_space(items):
rets = []
for item in items:
item = validate(item)
if item != '':
rets.append(item)
return rets
def get_index(val, arr):
for idx, item in enumerate(arr):
if val == item:
return idx
return 0
def get_name(arr):
for item in arr:
if "Name:" in item:
return validate(item.split(':')[1])
return ''
def get_email(arr):
for idx, item in enumerate(arr):
if 'Email:' in item:
if idx+1 < len(arr):
return arr[idx+1]
return ''
def scrape():
output_list = []
session = requests.Session()
file_name = os.path.dirname(os.path.realpath(__file__)).split('/')[-1] + '.csv'
history = []
with open(file_name, mode='w') as output_file:
writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(["School", "Sports", "Position", "Name", "Email"])
for alpha in string.ascii_lowercase:
page_idx = 1
while True:
url = "http://officials.myohsaa.org/Outside/SearchSchool?OhsaaId=&Name={}&page={}".format(alpha, page_idx)
source = session.get(url).text
response = etree.HTML(source)
schools = eliminate_space(response.xpath('.//table[@class="table table-striped"]//a[@class="btn btn-warning"]/@href'))
if len(schools) == 0:
break
else:
for s_id in schools:
s_url = "http://officials.myohsaa.org/Outside/Schedule/SportsInformation?" + s_id.split('?')[1]
if s_url not in history:
history.append(s_url)
try:
s_response = etree.HTML(session.get(s_url).text)
if s_response is not None:
school_name = validate(validate(s_response.xpath('.//div[@class="schoolHeader"]//h2//text()')).split('(')[0])
events = s_response.xpath('.//table[@class="displayTable"]//tr')[1:]
for event in events:
tds = event.xpath('.//td')
sport_name = validate(tds[0].xpath('.//text()'))
gender = ['Boys', 'Girls']
for t_idx, td in enumerate(tds[1:]):
name = validate(td.xpath('.//text()'))
if name != 'N/A':
output = [
school_name,
gender[t_idx] + ' ' +sport_name,
"",
name,
validate(td.xpath('.//a/@href')).replace('mailto:', '')
]
writer.writerow(output)
except Exception as e:
pass
page_idx += 1
scrape()
| coralisland-git/Coach-Scraper | phase_1/officials_myohsaa_org/scrape.py | scrape.py | py | 3,828 | python | en | code | 0 | github-code | 50 |
31790259859 | """
This script handles the loading of the cleaned metadata into the mongodb.
Data from scraping server is compressed.
server: scraping server
workdir: data/corpus
command: tar -czvf KCP_<list_of_corpus_ids_to_compress_separated_by_underscore>.tar.gz
Send compressed data to gateway:
server: scraping server
workdir: data/corpus
command: rsync -avP KCP_<list_of_corpus_ids_to_compress_separated_by_underscore>.tar.gz gw1:~/
Send data in gateway to app server:
server: gateway server
workdir: ~/
command: rsync -avP KCP_<list_of_corpus_ids_to_compress_separated_by_underscore>.tar.gz app_server:/<path_to_app>/wb_nlp/data/corpus/
Decompress data in app server:
server: app server
workdir: /<path_to_app>/wb_nlp/data/corpus/
command: tar -xzvf KCP_<list_of_corpus_ids_to_compress_separated_by_underscore>.tar.gz
Enter nlp_api container
server: app server
command: docker exec -it wb_nlp_nlp_api_1 /bin/bash
Activate nlp_api conda environment
server: nlp_api container
command: conda activate nlp_api
Load available new data to db and es
server: nlp_api container
command: python pipelines/loading/load_metadata.py
Further steps:
Clean processed data
server: scraping server
workdir: /workspace/
command: python -u ./scripts/cleaning/clean_corpus.py --cleaning-config-id <cleaning_config_id> --input-dir data/corpus --source-dir-name EN_TXT_ORIG --recursive -vv |& tee ./logs/clean_corpus.py.log
Compress cleaned data
server: scraping server
workdir: data/corpus
command: tar -czvf KCP_cleaned_corpus-id-1_corpus-id-2_corpus-id-3.tar.gz cleaned/<cleaning_config_id>/corpus_id_1 cleaned/<cleaning_config_id>/corpus_id_2 cleaned/<cleaning_config_id>/corpus_id_3 ...
Send cleaned data to gateway
server: scraping server
workdir: data/corpus
command: rsync -avP KCP_cleaned_corpus-id-1_corpus-id-2_corpus-id-3.tar.gz gw1:~/
Send cleaned data from gateway to app server
server: gateway server
workdir: ~/
command: rsync -avP KCP_cleaned_corpus-id-1_corpus-id-2_corpus-id-3.tar.gz app_server:/<path_to_app>/wb_nlp/data/corpus/
Transform documents and load to model server
1. Load data to elasticsearch. Do this by running the following snippet:
# # # Optional - depends if the index is broken
# # from elasticsearch_dsl import Index
# # i = Index(name=elasticsearch.DOC_INDEX, using=elasticsearch.get_client())
# # i.delete()
# from wb_nlp.interfaces import elasticsearch, mongodb
# docs_metadata_coll = mongodb.get_collection(
# db_name="test_nlp", collection_name="docs_metadata")
# docs_metadata = list(docs_metadata_coll.find({}))
# elasticsearch.make_nlp_docs_from_docs_metadata(docs_metadata, ignore_existing=True, en_txt_only=True, remove_doc_whitespaces=True)
# elasticsearch.make_nlp_docs_from_docs_metadata(docs_metadata, ignore_existing=False, en_txt_only=True, remove_doc_whitespaces=True)
2. Next clean the documents and generate the vectors for the data.
"""
from datetime import datetime
import json
from pathlib import Path
from wb_nlp import dir_manager
from wb_nlp.interfaces import elasticsearch, mongodb
def get_docs_metadata_collection():
return mongodb.get_collection(
db_name="test_nlp", collection_name="docs_metadata")
def load_clean_metadata():
"""
This function loads the cleaned metadata generated from
pipelines/cleaning/document_pipeline.py to the mongodb database.
The files are expected to be stored in corpus/<corpus_id>/<l_corpus_id>_clean_metadata.jsonl paths.
"""
collection = get_docs_metadata_collection()
ids_in_db = {i["_id"]
for i in collection.find({}, projection=["_id"])}
corpus_path = Path(dir_manager.get_data_dir("corpus"))
for metadata_file in corpus_path.glob("*/*_clean_metadata.jsonl"):
corpus_id = metadata_file.parent.name
print(f"Processing metadata from {corpus_id}...")
metadata = []
total_meta = 0
with open(metadata_file) as open_file:
for line in open_file:
total_meta += 1
line = line.strip()
meta = json.loads(line)
if meta["id"] in ids_in_db:
continue
meta["_id"] = meta["id"]
pid = meta.get("project_id")
if not pid:
wb_pid = meta.get("wb_project_id")
if wb_pid:
meta["project_id"] = wb_pid
# ["adm_region", "doc_type", "major_doc_type"]:
for field in meta.keys():
# Convert any empty data to None
if not meta[field]:
meta[field] = None
metadata.append(meta)
ids_in_db.add(meta["id"])
print(
f"Inserting {len(metadata)} of {total_meta} data for {corpus_id} to DB...")
if metadata:
collection.insert_many(metadata)
def load_data_to_es(ignore_existing=True):
docs_metadata_coll = get_docs_metadata_collection()
docs_metadata = list(docs_metadata_coll.find({}))
elasticsearch.make_nlp_docs_from_docs_metadata(
docs_metadata, ignore_existing=ignore_existing, en_txt_only=True, remove_doc_whitespaces=True)
return len(docs_metadata)
def set_update_latest_data(mongodb_doc_count):
collection = mongodb.get_latest_update_collection()
search = elasticsearch.NLPDoc.search()
search.aggs.bucket("corpus_count", "terms", field="corpus").bucket(
"docs_count", "terms", field="major_doc_type")
executed_search = search.execute()
docs_summary_stats = executed_search.aggs.to_dict()
collection.insert_one(
dict(
last_update_date=datetime.now(),
docs_summary_stats=docs_summary_stats,
es_doc_count=search.count(),
mongodb_doc_count=mongodb_doc_count,
)
)
def main():
print("load_clean_metadata")
load_clean_metadata()
print("load_data_to_es")
mongodb_doc_count = load_data_to_es()
print("set_update_latest_data")
set_update_latest_data(mongodb_doc_count)
print("Finished...")
if __name__ == "__main__":
main()
| PinkDiamond1/wb-nlp-apps | pipelines/loading/load_metadata.py | load_metadata.py | py | 6,181 | python | en | code | null | github-code | 50 |
12787411219 | import sys
import argparse
import numpy as np
import pickle
from sklearn.tree import DecisionTreeRegressor
from sklearn.cluster import MiniBatchKMeans
from multiprocessing import Process, Queue
from multiprocessing.pool import ThreadPool
from helper import *
"""
Directory Structure:
depth-pose-estimation/
data/
datasets/
CAD-60/
NTU-RGBD/
...
processed/
CAD-60/
depth_images.npy
joints.npy
NTU-RGBD/
depth_images.npy
joints.npy
...
models/
random-tree-walks/
rtw.py
helper.py
...
output/
random-tree-walks/
CAD-60/
models/
preds/
png/
NTU-RGBD/
models/
preds/
png/
"""
###############################################################################
# Parser arguments
###############################################################################
parser = argparse.ArgumentParser(description='Random Tree Walks algorithm.')
# Loading options for the model and data
# parser.add_argument('--load-params', action='store_true',
# help='Load the parameters')
parser.add_argument('--load-model', action='store_true',
help='Load a pretrained model')
parser.add_argument('--load-test', action='store_true',
help='Run trained model on test set')
# Location of data directories
parser.add_argument('--input-dir', type=str, default='../../data/processed',
help='Directory of the processed input')
parser.add_argument('--dataset', type=str, default='NTU-RGBD', # NTU-RGBD, CAD-60
help='Name of the dataset to load')
# Location of output saved data directories
# parser.add_argument('--model-dir', type=str, default='../../output/random-tree-walks/models',
# help='Directory of the saved model')
# parser.add_argument('--preds-dir', type=str, default='../../output/random-tree-walks/preds',
# help='Directory to save predictions')
# parser.add_argument('--png-dir', type=str, default='../../output/random-tree-walks/png',
# help='Directory to save prediction images')
# Training options
parser.add_argument('--seed', type=int, default=1111,
help='Random seed')
parser.add_argument('--shuffle', type=int, default=1,
help='Shuffle the data')
parser.add_argument('--multithread', action='store_true',
help='Train each joint on a separate threads')
# parser.add_argument('--num-threads', type=int, default=3,
# help='Number of threads to use to concurrently process joints.')
# Evaluation hyperparameters
parser.add_argument('--num-steps', type=int, default=300,
help='Number of steps during evaluation')
parser.add_argument('--step-size', type=int, default=2,
help='Step size (in cm) during evaluation')
# Output options
parser.add_argument('--make-png', action='store_true',
help='Draw predictions on top of inputs')
args = parser.parse_args()
# Set location of output saved files
args.model_dir = '../../output/random-tree-walks/' + args.dataset + '/models'
args.preds_dir = '../../output/random-tree-walks/' + args.dataset + '/preds'
args.png_dir = '../../output/random-tree-walks/' + args.dataset + '/png'
###############################################################################
# Training hyperparameters
###############################################################################
# Train-test ratio
TRAIN_RATIO = 0.8
SMALL_DATA_SIZE = 5000
# Dimension of each feature vector
NUM_FEATS = 500
MAX_FEAT_OFFSET = 150
# Number of samples for each joint for each example
NUM_SAMPLES = 300
# Set maximum XYZ offset from each joint
MAX_XY_OFFSET = 10 # image xy coordinates (pixels)
MAX_Z_OFFSET = 0.5 # z-depth coordinates (meters)
# Number of clusters for K-Means regression
K = 20
###############################################################################
# Dataset Constants
###############################################################################
# Depth image dimension
# H, W = 240, 320
H, W = 424, 512
# See https://help.autodesk.com/view/MOBPRO/2018/ENU/?guid=__cpp_ref__nui_image_camera_8h_source_html
C = 3.8605e-3 # NUI_CAMERA_DEPTH_NOMINAL_INVERSE_FOCAL_LENGTH_IN_PIXELS
###############################################################################
# RTW Constants
###############################################################################
# Number of joints in a skeleton
NUM_JOINTS = 15
# List of joint names
JOINT_NAMES = ['NECK (0)', 'HEAD (1)', \
'LEFT SHOULDER (2)', 'LEFT ELBOW (3)', 'LEFT HAND (4)', \
'RIGHT SHOULDER (5)', 'RIGHT ELBOW (6)', 'RIGHT HAND (7)', \
'LEFT KNEE (8)', 'LEFT FOOT (9)', \
'RIGHT KNEE (10)', 'RIGHT FOOT (11)', \
'LEFT HIP (12)', \
'RIGHT HIP (13)', \
'TORSO (14)']
# Map from joint names to index
JOINT_IDX = {
'NECK': 0,
'HEAD': 1,
'LEFT SHOULDER': 2,
'LEFT ELBOW': 3,
'LEFT HAND': 4,
'RIGHT SHOULDER': 5,
'RIGHT ELBOW': 6,
'RIGHT HAND': 7,
'LEFT KNEE': 8,
'LEFT FOOT': 9,
'RIGHT KNEE': 10,
'RIGHT FOOT': 11,
'LEFT HIP': 12,
'RIGHT HIP': 13,
'TORSO': 14,
}
# Set the kinematic tree (starting from torso body center)
kinem_order = [14, 0, 13, 12, 1, 2, 5, 3, 6, 4, 7, 8, 10, 9, 11]
kinem_parent = [-1, 14, 14, 14, 0, 0, 0, 2, 5, 3, 6, 12, 13, 8, 10]
###############################################################################
# Load dataset splits
###############################################################################
def load_dataset(processed_dir, is_mask=False, small_data=False):
"""Loads the depth images and joints from the processed dataset.
Note that each joint is a coordinate of the form (im_x, im_y, depth_z).
Each depth image is an H x W image containing depth_z values.
depth_z values are in meters.
@return:
depth_images : depth images (N x H x W)
joints : joint positions (N x NUM_JOINTS x 3)
"""
logger.debug('Loading data from directory %s', processed_dir)
# Load input and labels from numpy files
depth_images = np.load(os.path.join(processed_dir, 'depth_images.npy')) # N x H x W depth images
joints = np.load(os.path.join(processed_dir, 'joints.npy')) # N x NUM_JOINTS x 3 joint locations
assert depth_images.shape[1] == H and depth_images.shape[2] == W, "Invalid dimensions for depth image"
# Load and apply mask to the depth images
if is_mask:
depth_mask = np.load(os.path.join(processed_dir, 'depth_mask.npy')) # N x H x W depth mask
depth_images = depth_images * depth_mask
# Run experiments on random subset of data
if small_data:
random_idx = np.random.choice(depth_images.shape[0], SMALL_DATA_SIZE, replace=False)
depth_images, joints = depth_images[random_idx], joints[random_idx]
logger.debug('Data loaded: # data: %d', depth_images.shape[0])
return depth_images, joints
def split_dataset(X, y, train_ratio):
"""Splits the dataset according to the train-test ratio.
@params:
X : depth images (N x H x W)
y : joint positions (N x NUM_JOINTS x 3)
train_ratio : ratio of training to test
"""
test_ratio = 1.0 - train_ratio
num_test = int(X.shape[0] * test_ratio)
X_train, y_train = X[num_test:], y[num_test:]
X_test, y_test = X[:num_test], y[:num_test]
logger.debug('Data split: # training data: %d, # test data: %d', X_train.shape[0], X_test.shape[0])
return X_train, y_train, X_test, y_test
processed_dir = os.path.join(args.input_dir, args.dataset) # directory of saved numpy files
depth_images, joints = load_dataset(processed_dir)
X_train, y_train, X_test, y_test = split_dataset(depth_images, joints, TRAIN_RATIO)
num_train = X_train.shape[0]
num_test = X_test.shape[0]
###############################################################################
# Train model
###############################################################################
def compute_theta(num_feats=NUM_FEATS, max_feat_offset=MAX_FEAT_OFFSET):
"""Computes the theta for each skeleton.
@params:
max_feat_offset : the maximum offset for features (before divided by d)
num_feats : the number of features of each offset point
"""
logger.debug('Computing theta...')
# Compute the theta = (-max_feat_offset, max_feat_offset) for 4 coordinates (x1, x2, y1, y2)
theta = np.random.randint(-max_feat_offset, max_feat_offset + 1, (4, num_feats)) # (4, num_feats)
return theta
def get_features(img, q, z, theta):
"""Gets the feature vector for a single example.
@params:
img : depth image = (H x W)
q : joint xyz position with some random offset vector
z : z-value of body center
theta : (-max_feat_offset, max_feat_offset) = (4, num_feats)
"""
# Retrieve the (y, x) of the joint offset coordinates
coor = q[:2][::-1] # coor: flip x, y -> y, x
coor[0] = np.clip(coor[0], 0, H-1) # limits y between 0 and H
coor[1] = np.clip(coor[1], 0, W-1) # limits x between 0 and W
coor = np.rint(coor).astype(int) # rounds to nearest integer
# Find z-value of joint offset by indexing into depth imag
LARGE_NUM = 100
img[img == 0] = LARGE_NUM # no division by zero
dq = z if (img[tuple(coor)] == LARGE_NUM) else img[tuple(coor)] # initialize to LARGE_NUM
# Normalize x theta by z-value
x1 = np.clip(coor[1] + theta[0] / dq, 0, W-1).astype(int)
x2 = np.clip(coor[1] + theta[2] / dq, 0, W-1).astype(int)
# Normalize y theta by z-value
y1 = np.clip(coor[0] + theta[1] / dq, 0, H-1).astype(int)
y2 = np.clip(coor[0] + theta[3] / dq, 0, H-1).astype(int)
# Get the feature vector as difference of depth-values
feature = img[y1, x1] - img[y2, x2]
return feature
def get_random_offset(max_offset_xy=MAX_XY_OFFSET, max_offset_z=MAX_Z_OFFSET):
"""Gets xyz vector with uniformly random xy and z offsets.
"""
offset_xy = np.random.randint(-max_offset_xy, max_offset_xy + 1, 2)
offset_z = np.random.uniform(-max_offset_z, max_offset_z, 1)
offset = np.concatenate((offset_xy, offset_z)) # xyz offset
return offset
def get_training_samples(joint_id, X, y, theta, num_feats=NUM_FEATS, num_samples=NUM_SAMPLES):
"""Generates training samples for each joint.
Each sample is (i, q, u, f) where:
i is the index of the depth image,
q is the random offset point from the joint,
u is the unit direction vector toward the joint location,
f is the feature array
@params:
X : depth images (N x H x W)
y : joint position = (N x NUM_JOINTS x 3) = (im_x, im_y, depth_z)
joint_id : current joint id
num_samples : number of samples of each joint
max_offset_xy : maximum offset for samples in (x, y) axes
max_offset_z : maximum offset for samples in z axis
@return:
S_f : samples feature array (N x num_samples x num_feats)
S_u : samples unit direction vectors (N x num_samples x 3)
"""
num_train, _, _ = X.shape
S_f = np.zeros((num_train, num_samples, num_feats), dtype=np.float64)
S_u = np.zeros((num_train, num_samples, 3), dtype=np.float64)
for train_idx in range(num_train):
if train_idx % 100 == 0:
logger.debug('Joint %s: Processing image %d / %d', JOINT_NAMES[joint_id], train_idx, num_train)
# Create samples for each training example
for sample_idx in range(num_samples):
depth_im = X[train_idx]
offset = get_random_offset()
unit_offset = 0 if np.linalg.norm(offset) == 0 else (-offset / np.linalg.norm(offset))
body_center_z = y[train_idx][JOINT_IDX['TORSO']][2] # body center (torso) index, 2 = z_index
S_f[train_idx, sample_idx] = get_features(depth_im, y[train_idx][joint_id] + offset, body_center_z, theta)
S_u[train_idx, sample_idx] = unit_offset
return S_f, S_u
def stochastic(regressor, features, unit_directions):
"""Applies stochastic relaxation when choosing the unit direction. Training
samples at the leaf nodes are further clustered using K-means.
"""
L = {}
indices = regressor.apply(features) # leaf id of each sample
leaf_ids = np.unique(indices) # array of unique leaf ids
logger.debug('Running stochastic (minibatch) K-Means...')
for leaf_id in leaf_ids:
kmeans = MiniBatchKMeans(n_clusters=K, batch_size=1000)
labels = kmeans.fit_predict(unit_directions[indices == leaf_id])
weights = np.bincount(labels).astype(float) / labels.shape[0]
# Normalize the centers
centers = kmeans.cluster_centers_
centers /= np.linalg.norm(centers, axis=1)[:, np.newaxis]
# checkUnitVectors(centers)
L[leaf_id] = (weights, centers)
return L
def train(joint_id, X, y, model_dir, min_samples_leaf=400, load_models=args.load_model):
"""Trains a regressor tree on the unit directions towards the joint.
@params:
joint_id : current joint id
X : samples feature array (N x num_samples x num_feats)
y : samples unit direction vectors (N x num_samples x 3)
min_samples_split : minimum number of samples required to split an internal node
load_models : load trained models from disk (if exist)
"""
logger.debug('Start training %s model...', JOINT_NAMES[joint_id])
regressor_path = os.path.join(model_dir, 'regressor' + str(joint_id) + '.pkl')
L_path = os.path.join(model_dir, 'L' + str(joint_id) + '.pkl')
# Load saved model from disk
if load_models and (os.path.isfile(regressor_path) and os.path.isfile(L_path)):
logger.debug('Loading model %s from files...', JOINT_NAMES[joint_id])
regressor = pickle.load(open(regressor_path, 'rb'))
L = pickle.load(open(L_path, 'rb'))
return regressor, L
X_reshape = X.reshape(X.shape[0] * X.shape[1], X.shape[2]) # (N x num_samples, num_feats)
y_reshape = y.reshape(y.shape[0] * y.shape[1], y.shape[2]) # (N x num_samples, 3)
# Count the number of valid (non-zero) samples
valid_rows = np.logical_not(np.all(X_reshape == 0, axis=1)) # inverse of invalid samples
logger.debug('Model %s - Valid samples: %d / %d', JOINT_NAMES[joint_id], X_reshape[valid_rows].shape[0], X_reshape.shape[0])
# Fit decision tree to samples
regressor = DecisionTreeRegressor(min_samples_leaf=min_samples_leaf)
regressor.fit(X_reshape[valid_rows], y_reshape[valid_rows])
L = stochastic(regressor, X_reshape, y_reshape)
# Print statistics on leafs
leaf_ids = regressor.apply(X_reshape)
bin = np.bincount(leaf_ids)
unique_ids = np.unique(leaf_ids)
biggest = np.argmax(bin)
smallest = np.argmin(bin[bin != 0])
logger.debug('Model %s - # Leaves: %d', JOINT_NAMES[joint_id], unique_ids.shape[0])
logger.debug('Model %s - Smallest Leaf ID: %d, # Samples: %d/%d', JOINT_NAMES[joint_id], smallest, bin[bin != 0][smallest], np.sum(bin))
logger.debug('Model %s - Biggest Leaf ID: %d, # Samples: %d/%d', JOINT_NAMES[joint_id], biggest, bin[biggest], np.sum(bin))
logger.debug('Model %s - Average Leaf Size: %d', JOINT_NAMES[joint_id], np.sum(bin) / unique_ids.shape[0])
# Save models to disk
pickle.dump(regressor, open(regressor_path, 'wb'))
pickle.dump(L, open(L_path, 'wb'))
return regressor, L
def train_parallel(joint_id, X, y, theta, model_dir, regressor_queue, L_queue):
"""Train each join in parallel.
"""
S_f, S_u = get_training_samples(joint_id, X, y, theta)
regressor, L = train(joint_id, S_f, S_u, model_dir)
regressor_queue.put({joint_id: regressor})
L_queue.put({joint_id: L})
def train_series(joint_id, X, y, theta, model_dir):
"""Train each joint sequentially.
"""
S_f, S_u = get_training_samples(joint_id, X, y, theta)
regressor, L = train(joint_id, S_f, S_u, model_dir)
return regressor, L
logger.debug('\n------- Training models -------')
theta = compute_theta()
regressors, Ls = {}, {}
if not args.multithread:
for joint_id in range(NUM_JOINTS):
regressors[joint_id], Ls[joint_id] = train_series(joint_id, X_train, y_train, theta, args.model_dir)
else:
processes = []
regressor_queue, L_queue = Queue(), Queue()
for joint_id in range(NUM_JOINTS):
p = Process(target=train_parallel, name='Thread #%d' % joint_id, args= \
(joint_id, X_train, y_train, theta, args.model_dir, regressor_queue, L_queue))
processes.append(p)
p.start()
regressors_tmp = [regressor_queue.get() for p in processes]
Ls_tmp = [L_queue.get() for p in processes]
regressors = dict(list(i.items())[0] for i in regressors_tmp)
Ls = dict(list(i.items())[0] for i in Ls_tmp)
[p.join() for p in processes]
###############################################################################
# Evaluate model
###############################################################################
def test_model(regressor, L, theta, qm0, img, body_center, num_steps=args.num_steps, step_size=args.step_size):
"""Test the model on a single example.
"""
qm = np.zeros((num_steps + 1, 3))
qm[0] = qm0
joint_pred = np.zeros(3)
for i in range(num_steps):
body_center_z = body_center[2]
f = get_features(img, qm[i], body_center_z, theta).reshape(1, -1) # flatten feature vector
leaf_id = regressor.apply(f)[0]
idx = np.random.choice(K, p=L[leaf_id][0]) # L[leaf_id][0] = weights
u = L[leaf_id][1][idx] # L[leaf_id][1] = centers
qm[i+1] = qm[i] + u * step_size
qm[i+1][0] = np.clip(qm[i+1][0], 0, W-1) # limit x between 0 and W
qm[i+1][1] = np.clip(qm[i+1][1], 0, H-1) # limit y between 0 and H
qm[i+1][2] = img[int(qm[i+1][1]), int(qm[i+1][0])] # index (y, x) into image for z position
joint_pred += qm[i+1]
joint_pred = joint_pred / num_steps
return qm, joint_pred
logger.debug('\n------- Testing models -------')
qms = np.zeros((num_test, NUM_JOINTS, args.num_steps+1, 3))
y_pred = np.zeros((num_test, NUM_JOINTS, 3))
local_error = np.zeros((num_test, args.num_steps+1, NUM_JOINTS, 3))
# if loadTest:
# qms = np.load(outDir+modelsDir+'/qms.npy')
# y_pred = np.load(outDir+modelsDir+'/y_pred.npy')
# localErr = np.load(outDir+modelsDir+'/local_err.npy')
# else:
for kinem_idx, joint_id in enumerate(kinem_order):
logger.debug('Testing %s model', JOINT_NAMES[joint_id])
parent_joint_id = kinem_parent[kinem_idx]
for test_idx in range(num_test):
qm0 = y_test[test_idx][JOINT_IDX['TORSO']] if parent_joint_id == -1 else y_pred[test_idx][parent_joint_id]
qms[test_idx][joint_id], y_pred[test_idx][joint_id] = test_model(regressors[joint_id], Ls[joint_id], theta, qm0, X_test[test_idx], y_test[test_idx][JOINT_IDX['TORSO']])
local_error[test_idx, :, joint_id, :] = y_test[test_idx][joint_id] - qms[test_idx][joint_id]
y_pred[:, :, 2] = y_test[:, :, 2]
# np.save(modelsDir + 'qms.npy', qms)
# np.save(modelsDir + 'y_pred.npy', y_pred)
# np.save(modelsDir + 'local_error.npy', local_error)
#
# for joint_id in range(NUM_JOINTS):
# # print(y_test[:, joint_id].shape)
# np.savetxt(outDir+modelsDir+'/pred/'+JOINT_NAMES[joint_id]+'_test.txt', y_test[:, joint_id], fmt='%.3f')
# # print(y_pred[:, jointID].shape)
# np.savetxt(outDir+modelsDir+'/pred/'+JOINT_NAMES[joint_id]+'_pred.txt', y_pred[:, joint_id], fmt='%.3f ')
###############################################################################
# Run evaluation metrics
###############################################################################
logger.debug('\n------- Computing evaluation metrics -------')
def get_distances(y_test, y_pred):
"""Compute the raw world distances between the prediction and actual joint
locations.
"""
assert y_test.shape == y_pred.shape, "Mismatch of y_test and y_pred"
distances = np.zeros((y_test.shape[:2]))
for i in range(y_test.shape[0]):
p1 = pixel2world(y_test[i], C)
p2 = pixel2world(y_pred[i], C)
distances[i] = np.sqrt(np.sum((p1-p2)**2, axis=1))
return distances
distances = get_distances(y_test, y_pred) * 100.0 # convert from m to cm
distances_path = os.path.join(args.preds_dir, 'distances.txt')
np.savetxt(distances_path, distances, fmt='%.3f')
distances_pixel = np.zeros((y_test.shape[:2]))
for i in range(y_test.shape[0]):
p1 = y_test[i]
p2 = y_pred[i]
distances_pixel[i] = np.sqrt(np.sum((p1-p2)**2, axis=1))
mAP = 0
for i in range(NUM_JOINTS):
logger.debug('\nJoint %s:', JOINT_NAMES[i])
logger.debug('Average distance: %f cm', np.mean(distances[:, i]))
logger.debug('Average pixel distance: %f', np.mean(distances_pixel[:, i]))
logger.debug('5cm accuracy: %f', np.sum(distances[:, i] < 5) / float(distances.shape[0]))
logger.debug('10cm accuracy: %f', np.sum(distances[:, i] < 10) / float(distances.shape[0]))
logger.debug('15cm accuracy: %f', np.sum(distances[:, i] < 15) / float(distances.shape[0]))
mAP += np.sum(distances[:, i] < 10) / float(distances.shape[0])
logger.debug('mAP (10cm): %f', mAP / NUM_JOINTS)
###############################################################################
# Visualize predictions
###############################################################################
# if args.make_png:
logger.debug('\n------- Saving prediction visualizations -------')
for test_idx in range(num_test):
png_path = os.path.join(args.png_dir, str(test_idx) + '.png')
drawPred(X_test[test_idx], y_pred[test_idx], qms[test_idx], y_test[test_idx][JOINT_IDX['TORSO']], png_path, NUM_JOINTS, JOINT_NAMES)
| ddxue/depth-pose-estimation | models/random-tree-walks/rtw.py | rtw.py | py | 22,165 | python | en | code | 16 | github-code | 50 |
17623358754 | # 文件读取的方式
from os import SEEK_SET
# 打开文件
f = open(file="file.txt", mode="r+", encoding="utf8");
# 一次读取指定长度的行,(默认读取一整行),当当前行字节长度小于指定长度时,全部读取
line = f.readline(2);
print("文件内容:",line);
# 一次性读取缓冲大小的文件8000多字节,返回每一行构成的列表,当前
lines = f.readlines(); #小于当前行的字节时仍产输出当前行的字节
print("文件内容:",lines);
# 获取指针的位置
print("当前的文件指针位置:",f.tell())
# 将指针回到文件开始处
f.seek(0,SEEK_SET); #首先将指针移动到文件开始,随后进行修正
# 使用迭代器完成对每行的读取
iter_f = iter(f); #将文件传入到迭代器中,进行迭代
for line in iter_f:
print("当前行内容:",line);
# 关闭文件
f.close();
| jionjion/Python_WorkSpace | PythonBase/src/grammar/file/文件读取.py | 文件读取.py | py | 974 | python | zh | code | 0 | github-code | 50 |
28150700620 | # Dictionaries provided by the instructor
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
# Importing the coffee cup ascii
from art import logo
print(logo)
def check_ingredient(order_ingredients):
""" Checks if there is enough water, milk or coffee in the machine to prepare the drink ordered
:param order_ingredients:
:return: boolean
"""
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry, there is not enough {item} for a {order}.")
return False
else:
return True
def add_coins():
"""
Asks for payment using coins and computes the total payment
:return: total
"""
print(f"Your coffee cost: {MENU[order]['cost']}")
quarters = int(input("How many quarters do you insert? "))
dimes = int(input("How many dimes do you insert? "))
nickles = int(input("How many nickles do you insert? "))
pennies = int(input("How many pennies do you insert? "))
total = round(quarters * 0.25 + dimes * 0.10 + nickles * 0.05 + pennies * 0.01, 2)
return total
def making_coffee(drink_order, drink_ingredients):
"""
Prints the order and updates the ingredient resources in the machine
:param drink_order:
:param drink_ingredients:
"""
print(f"Here is your {drink_order} ☕! Enjoy!")
for item in drink_ingredients:
resources[item] -= drink_ingredients[item]
machine_working = True
while machine_working:
# Input order, could be espresso, latte, cappuccino, or for maintenance: report or off
order = input("Hello! What would you like to drink? Espresso, Latte or Cappuccino: ").lower()
if order == "off":
print("Goodbye!")
machine_working = False
elif order == "report":
print(resources)
else:
if check_ingredient(MENU[order]["ingredients"]):
payment = add_coins()
# Compares amount paid to the cost of drink, gives money back and/or drink
if payment < MENU[order]["cost"]:
print(f"Sorry, this is not enough money. Here is your refund: ${payment}")
elif payment > MENU[order]["cost"]:
change = round(payment - MENU[order]["cost"], 2)
print(f"Here is your change: ${change}")
making_coffee(order,MENU[order]["ingredients"])
else:
making_coffee(order,MENU[order]["ingredients"])
| MarieTKD/coffee_machine | coffee.py | coffee.py | py | 3,031 | python | en | code | 0 | github-code | 50 |
31579912658 | """
Tweaks.
"""
import abjad
def bundle_tweaks(argument, tweaks, i=None, total=None, overwrite=False):
if not tweaks:
return argument
all_tweaks = []
for item in tweaks:
if isinstance(item, tuple):
assert len(item) == 2
item, index = item
if 0 <= index and index != i:
continue
if index < 0 and index != -(total - i):
continue
assert isinstance(item, abjad.Tweak), repr(item)
all_tweaks.append(item)
bundle = abjad.bundle(argument, *all_tweaks, overwrite=overwrite)
return bundle
def validate_indexed_tweaks(tweaks):
if tweaks is None:
return
assert isinstance(tweaks, tuple), repr(tweaks)
for tweak in tweaks:
if isinstance(tweak, str | abjad.Tweak):
continue
if (
isinstance(tweak, tuple)
and len(tweak) == 2
and isinstance(tweak[0], abjad.Tweak)
):
continue
raise Exception(tweak)
| trevorbaca/baca | baca/tweaks.py | tweaks.py | py | 1,032 | python | en | code | 7 | github-code | 50 |
39042954466 | from math import sqrt; from itertools import count, islice
def isPrime(n):
return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1)))
def max_prime(a,b):
n = 0
while True :
polynome = n**2 + a*n + b
#print("polynome",n,polynome)
if not isPrime(polynome):
return n
n += 1
max = 0
for a in range(-1000,1000) :
for b in range(-1000,1000 ):
max_ab = max_prime(a,b)
#print("max_ab",a,b,max_ab)
if max_ab>max :
print(a*b) # la derniere valeur est la bonne
max = max_ab
| axel584/Project_Euler | 027.py | 027.py | py | 601 | python | en | code | 0 | github-code | 50 |
19953939788 | import grpc
import threading
import sys
from infinera.chm6.dataplane.v2 import odu_config_pb2
from grpc_client_adaptor import CrudService
def delete_odu_object(odu_id):
odu_config = odu_config_pb2.Chm6OduConfig()
odu_config.base_config.config_id.value = "1-4-L" + str(odu_id) + "-1"
return odu_config
def delete(odu_id):
try:
crud = CrudService()
'''
Ideally only aid can be passed to server for deletion
Passing the empty object for consistency and timestamp
'''
odu = delete_odu_object(odu_id)
print(crud.delete(odu))
except grpc.RpcError as err:
print("Set failed")
print(err.details)
if __name__ == '__main__':
odu_id = sys.argv[1]
delete(odu_id)
| Sampu1980/chm5 | scripts/ut/deprecated/delete_odu_v2.py | delete_odu_v2.py | py | 758 | python | en | code | 0 | github-code | 50 |
3665486757 | import h5py
import blosc2
import blosc2_grok
import numpy as np
from skimage.metrics import structural_similarity as ssim
from tqdm import tqdm
from time import time
if __name__ == '__main__':
# Register grok codec locally
blosc2.register_codec('grok', 160)
# Define the compression and decompression parameters. Disable the filters and the
# splitmode, because these don't work with the codec.
cparams = {
'codec': 160,
'nthreads': 4,
'filters': [],
'splitmode': blosc2.SplitMode.NEVER_SPLIT,
}
# Open the dataset
f = h5py.File('/Users/faltet/Downloads/lung_raw_2000-2100.h5', 'r')
dset = f['/data']
print(f"Compressing dataset of {dset.shape} images ...")
for cratio in range(1, 11):
print(f"Compressing with cratio={cratio}x ...")
# Set the parameters that will be used by grok
kwargs = {
'cod_format': blosc2_grok.GrkFileFmt.GRK_FMT_JP2,
'num_threads': 1, # this does not have any effect (grok should work in multithreading mode)
'quality_mode': "rates",
'quality_layers': np.array([cratio], dtype=np.float64)
}
blosc2_grok.set_params_defaults(**kwargs)
# for i in tqdm(range(dset.shape[0])):
for i in range(1): #dset.shape[0]):
im = dset[i:i+1, ...]
# Transform the numpy array to a blosc2 array. This is where compression happens.
t0 = time()
#blocks = (1, im.shape[1] // 4, im.shape[2] // 4)
blocks = (1, im.shape[1], im.shape[2])
b2im = blosc2.asarray(im, chunks=im.shape, blocks=blocks, cparams=cparams)
time_ = time() - t0
if i == 0:
# Compare with original
im2 = b2im[:]
ssim_ = ssim(im[0], im2[0], data_range=im.max() - im.min())
cratio = b2im.schunk.cratio
print(f"SSIM: {ssim_}")
print(f"cratio: {cratio}")
print(f"time: {time_}")
f.close()
| Blosc/blosc2_grok | bench/encode-blocking.py | encode-blocking.py | py | 2,034 | python | en | code | 0 | github-code | 50 |
19248398401 | import pygame
from main_entity import *
class Main_destroyable_block(Main_entity):
def __init__(self, x, y, y_sprite_sheet_index):
super().__init__(x, y, y_sprite_sheet_index)
self.can_remove = False
self.life_span_after_removal = 300
self.timer_to_remove_start = 0
def update(self):
self.animate()
now = pygame.time.get_ticks()
if self.can_remove and self.timer_to_remove_start + self.life_span_after_removal < now:
self.kill()
def remove(self):
self.can_remove = True
self.timer_to_remove_start = pygame.time.get_ticks()
| ravenstudios/bomberman | main_destroyable_block.py | main_destroyable_block.py | py | 636 | python | en | code | 1 | github-code | 50 |
30843635414 | from slackclient import SlackClient
import pytz
#import datetime
import time
import re
import sys, json
import serial
from channelsList import ChannelsList
from slacker import Slacker
import unicodedata
import threading
from threadTimer import ThreadTimer
def hello(s):
print(s)
MSG_NUM = 16
NEW_DATA_FLAG = -1
SENT = 0
# s = "o shit, it works"
# t = threading.Timer(4.0, hello, [s])
slack = Slacker('')
channels_list = ChannelsList()
slack.get_channels(channels_list)
slack.get_messeges(channels_list)
users_map = slack.get_users()
print(users_map)
slack.send_message('CGTPA9HCK', "lorem ipsum")
slack.send_message('CGTPA9HCK', "dolor")
slack.send_message('CGTPA9HCK', "sit amet")
stopFlag = threading.Event()
thread = ThreadTimer(stopFlag, channels_list)
thread.start()
################################################################################
print("\n\nSerial\n\n")
ser = serial.Serial('COM7')
ser.isOpen()
out = ''
max_msg_index = len(channels_list.get('general').msgs.messages)-1
msg_index = max_msg_index
max_ch_index = len(channels_list.channels)-1
ch_index = 0
time.sleep(0.01)
while ser.inWaiting() > 0:
out += str(ser.read(1).decode('ascii'))
if out != '':
print("sinit>>" + out)
out = ''
while True:
while ser.inWaiting() > 0:
out += str(ser.read(1).decode('ascii'))
if out != '':
print(f"Recieved: {out}")
time.sleep(0.02)
if out.startswith('cms_up'):
out = ''
if msg_index > 0:
msg_index = msg_index - 1
text = channels_list.get_id(ch_index).msgs.messages[msg_index].text
data = f"sms_up~{users_map[channels_list.get_id(ch_index).msgs.messages[msg_index].user]}~" \
f"{text.replace('~', '``')}"
data = unicodedata.normalize('NFKD', data).encode('ASCII', 'ignore')
print(f"Sent data: {data}")
ser.write(data)
time.sleep(0.02)
elif out.startswith('cms_dn'):
out = ''
if msg_index<max_msg_index:
msg_index = msg_index + 1
text = channels_list.get_id(ch_index).msgs.messages[msg_index].text
data = f"sms_dn~{users_map[channels_list.get_id(ch_index).msgs.messages[msg_index].user]}~" \
f"{text.replace('~', '``')}"
data = unicodedata.normalize('NFKD', data).encode('ASCII', 'ignore')
print(f"Sent data: {data}")
if msg_index > max_msg_index-channels_list.get_id(ch_index).new:
channels_list.get_id(ch_index).new = 0
ser.write(data)
time.sleep(0.02)
elif out.startswith('cch_up'):
out = ''
if ch_index>0:
ch_index = ch_index - 1
else:
ch_index = max_ch_index
max_msg_index = len(channels_list.get_id(ch_index).msgs.messages) - 1
msg_index = max_msg_index
print(f"Changed channel to: {channels_list.get_id(ch_index).name}")
data = f"sch_get~{channels_list.get_id(ch_index).name}~{channels_list.get_id(ch_index).new}"
data = unicodedata.normalize('NFKD', data).encode('ASCII', 'ignore')
print(f"Sent data: {data}")
ser.write(data)
time.sleep(0.02)
elif out.startswith('cch_dn'):
out = ''
if ch_index < max_ch_index:
ch_index = ch_index + 1
else:
ch_index = 0
max_msg_index = len(channels_list.get_id(ch_index).msgs.messages) - 1
msg_index = max_msg_index
print(f"Changed channel to: {channels_list.get_id(ch_index).name}")
data = f"sch_get~{channels_list.get_id(ch_index).name}~{channels_list.get_id(ch_index).new}"
data = unicodedata.normalize('NFKD', data).encode('ASCII', 'ignore')
print(f"Sent data: {data}")
ser.write(data)
time.sleep(0.02)
elif out.startswith('cch_get'):
out = ''
slack.update_channels(channels_list)
max_ch_index = len(channels_list.channels) - 1
print(f"Changed channel to: {channels_list.get_id(ch_index).name}")
data = f"sch_get~{channels_list.get_id(ch_index).name}~{channels_list.get_id(ch_index).new}"
data = unicodedata.normalize('NFKD', data).encode('ASCII', 'ignore')
print(f"Sent data: {data}")
ser.write(data)
time.sleep(0.02)
else:
out = ''
time.sleep(0.02)
max_ch_index = len(channels_list.channels) - 1
ser.close()
exit()
| PUT-PTM/2019_SlackDisplay | Slack-Bot/slackbot.py | slackbot.py | py | 4,601 | python | en | code | 0 | github-code | 50 |
36590185495 | import pyfiglet
import os
import sys
import time
from termcolor import colored
os.system("clear")
def mengetik(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
# Kecepatan mengetik
time.sleep(0.1)
mengetik(colored(">> Halo, selamat datang di program kami","green"))
mengetik(colored(">> Selamat menggunakan!","green"))
os.system("clear")
# Mencetak banner dengan teks "IPAS"
pilihan = "y"
while pilihan == "y":
banner = pyfiglet.figlet_format(" IPAS")
print(banner)
print(colored("\t ~ Semoga Bisa Membantu ~ ","cyan"))
print("\n>> Silahkan pilih rumus yang akan digunakan!\n")
print("[1] Energi Kinetik")
print("[2] Energi Potensial")
print("[3] Hubungan Usaha Dengan Energi Kinetik")
print("[4] Hubungan Usaha Dengan Energi Potensial")
print("[5] Hukum Kekekalan Energi")
print("[6] Kalor")
print("[7] Program Konversi Temperatur")
print("[0] Keluar Program\n")
pilih = int(input("Pilih Nomor: "))
os.system("clear")
if pilih == 3:
mengetik(">>> Hubungan Usaha Dengan Energi Kinetik \n")
print("Silahkan pilih akan menghitung apa!\n")
print("[1] Menghitung Usaha(W)")
print("[2] Menghitung Jarak(S)")
print("[3] Menghitung Gaya (F)\n")
p = int(input("Pilih Nomor: "))
os.system("clear")
if p == 1:
mengetik(">>> Hubungan Usaha Dengan Energi Kinetik \n")
mengetik(">> Menghitung Usaha(W) \n")
m = float(input("Dik: M: "))
v1 = float(input(" V1: "))
v2 = float(input(" V2: "))
mengetik("Dit: W = ...?")
mengetik("jwb: W = ½mv2² - ½mv1²")
mengetik(" W = ½ x "+str(m)+" x ( "+str(v2)+" )² - ½ x "+str(m)+" x ( "+str(v1)+" )²")
v22 = v2**2
v12 = v1**2
mengetik(" W = ½ x "+str(m)+" x ( "+str(v22)+" ) - ½ x "+str(m)+" x ( "+str(v12)+" )")
ek2 = 1/2*m*(v22)
ek1 = 1/2*m*(v12)
mengetik(" W = "+str(ek2)+" - "+str(ek1))
jumlah = ek2 - ek1
mengetik(" W = "+str(jumlah)+" J\n")
elif p == 2:
mengetik(">>> Hubungan Usaha Dengan Energi Kinetik \n")
mengetik(">> Menghitung Jarak(S)\n")
m = float(input("Dik: M: "))
v1 = float(input(" V1: "))
v2 = float(input(" V2: "))
f = float(input(" F: "))
mengetik("Dit: S = ...?")
mengetik("jwb: S = ½mv2² - ½mv1² / f ")
v22 = v2**2
v12 = v1**2
mengetik(" S = ½ x "+str(m)+" x ( "+str(v22)+" ) - ½ x "+str(m)+" x ( "+str(v12)+" ) / "+str(f))
ek2 = 1/2*m*(v22)
ek1 = 1/2*m*(v12)
mengetik(" S = "+str(ek2)+" - "+str(ek1)+" / "+str(f))
ek2_ek1 = ek2 - ek1
mengetik(" S = "+str(ek2_ek1)+" / "+str(f))
jumlah = ek2_ek1 / f
mengetik(" S = "+str(jumlah)+" M\n")
elif p == 3:
print(">>> Hubungan Usaha Dengan Energi Kinetik |\n")
print(">> Menghitung Gaya(F)\n")
m = float(input("Dik: M: "))
v1 = float(input(" V1: "))
v2 = float(input(" V2: "))
s = float(input(" S: "))
print("Dit: F = ...?")
print("jwb: F = ½mv2² - ½mv1² / s ")
print(" F = ½",m,"x (",v2**2,") - ½ x",m,"x (",v1**2,")","/",s)
print(" F =",1/2*m*(v2**2),"-",1/2*m*(v1**2),"/",s)
print(" F =",1/2*m*(v2**2)-1/2*m*(v1**2),"/",s)
print(" F =",(1/2*m*(v2**2)-1/2*m*(v1**2))/s,"N \n")
else:
mengetik(colored("Error : Inputkan angka yang benar...\n","red"))
if pilih == 6:
mengetik(">> Kalor\n")
print("[1] Rumus Kalor Jenis")
print("[2] Azas Black\n")
kalor = int(input("Pilih Nomer: "))
os.system("clear")
if kalor == 1:
mengetik(">> Menentukan Kalor Jenis")
print(colored('''
Rumus Kalor Jenis Q = m . c . 𝚫T
Dengan
Q : Kalor (J)
m : Massa benda (kg)
c : Kalor jenis (J/kg°C)
∆T : Perubahan suhu (°C)
''',"green"))
m = float(input("Dik : m = "))
q = float(input(" : Q = "))
t1 = float(input(" :∆T = T1 = "))
t2 = float(input(" T2 = "))
mengetik("Dit : c =...?")
mengetik("Jwb : c = Q/m.∆T")
mengetik(" c = "+ str(q)+" J / "+str(m)+" kg × "+str(t2-t1)+" °C")
t2t1 = t2-t1
mengetik(" c = "+str(q)+" J / "+str(m*t2t1)+" °C")
mtt = m*t2t1
mengetik(" c = "+str(q/mtt)+" J/kg°c\n")
lan = input("Apakah anda ingin menentukan kapasitas kalornya y/n = ")
os.system("clear")
if lan == "y":
mengetik(">> Menetukan Kapasitas Kalor")
print(colored('''
Untuk menghitung kapasitas kalor kita gunakan rumus
C = m . c
''',"green"))
m = float(input("Dik : m = "))
c = float(input(" : c = "))
mengetik("Dit : C = ...?")
mengetik("Jwb : C = m . c")
mengetik(" C = "+str(m)+" kg x "+str(c)+" J/kg°C")
mengetik(" C = "+str(m*c)+" J/°C\n")
pilihan = input(" Apakah anda ingin menggunakan rumus yang lainnya y/n = ")
os.system("clear")
elif kalor == 2:
mengetik(">> Azas Black\n")
m1 = float(input("Dik : m1 = "))
t1 = float(input(" T1 = "))
t2 = float(input(" T2 = "))
tc = float(input(" Tc = "))
c1 = float(input(" c1 = "))
c2 = float(input(" c2 = "))
mengetik("\nDit : m2 = ...?\n")
mengetik("Jwb : Qlepas = Qterima")
mengetik(" m2c2∆T2 = m1c1∆T1")
mengetik(" m2 x "+str(c2)+" x (T2 - Tc) = "+ str(m1)+" x "+str(c1)+" x (Tc - T1)")
mengetik(" m2 x "+str(c2)+" x ("+str(t2)+" - "+str(tc)+") = "+str(m1)+" x "+str(c1)+" x ("+str(tc)+" - ("+str(t1)+"))")
mengetik(" m2 x "+str(c2*(t2-tc))+" = "+str(m1*c1*(tc-t1)))
has1 = c2*(t2-tc)
has2 = m1*c1*(tc-(t1))
mengetik(" m2 = "+str(has2/has1)+"\n")
mengetik(colored("Semoga bisa membantu!","green"))
pilihan = input(colored("\nApakah anda ingin menggunakan \nrumus yang lainnya y/n = ","green"))
os.system("clear")
else:
mengetik(colored("Error : Inputkan angka yang benar...\n","red"))
elif pilih == 1:
mengetik(">> Energi Kinetik ")
print(colored('''
Rumus energi kinetik Ek = 1/2 m.v2
Keterangan:
Ek = Energi kinetik (J)
m = Massa benda (kg)
v = Kecepatan atau laju benda (m/s)
''',"green"))
m = float(input("Dik: M = "))
v = float(input(" V = "))
mengetik("Dit: Ek = ...?")
mengetik("Jwb: Ek = ½mv²")
mengetik(" = ½ x "+str(m)+" kg x ("+str(v)+")² m/s")
a = 1/2*m
b = v**2
mengetik(" = "+str(a)+" x ("+str(b)+")")
c = a * b
mengetik(" = "+str(c)+" J\n")
mengetik(colored("Semoga bisa membantu!","green"))
pilihan = input("\nApakah anda ingin menggunakan rumus yang lainnya y/n = ")
os.system("clear")
elif pilih == 2:
mengetik(">> Energi Potensial")
print(colored('''
Rumus energi potensial EP = m . g . h
Keterangan:
EP = Energi potensial (J)
m = Massa benda (kg)
h = Ketinggian benda (m)
g = Gravitasi (m/s²)
''',"green"))
m = float(input("Dik: M = "))
g = float(input(" G = "))
h = float(input(" H = "))
mengetik("Dit: Ep = ...?")
mengetik("Jwb: Ep = m.g.h")
mengetik(" = "+str(m)+" kg x "+str(g)+" m/s² x "+str(h)+" m")
ep = m*g*h
mengetik(" = "+str(ep)+" J\n")
mengetik("Semoga bisa membantu!\n")
pilihan = input(" Apakah anda ingin menggunakan rumus yang lainnya y/n = ")
os.system("clear")
elif pilih == 4:
mengetik(">> Hubungan Usaha Dengan Energi Potensial\n")
m = float(input("Dik: M = "))
g = float(input(" G = "))
h1 = float(input(" H1 = "))
h2 = float(input(" H2 = "))
mengetik("Dit: W = ...?")
mengetik("Jwb: W = m.g.(h2-h1)")
mengetik(" W = "+str(m)+" x "+str(g)+" x ( "+str(h2)+" - "+str(h1)+" )")
mg = m*g
h2_h1 = h2 - h1
mengetik(" W = "+str(mg)+" x ( "+str(h2_h1)+" )")
w = mg*h2_h1
mengetik(" W = "+str(w)+" J\n")
mengetik("Semoga bisa membantu!\n")
pilihan = input(" Apakah anda ingin menggunakan rumus yang lainnya y/n = ")
os.system("clear")
elif pilih == 5:
mengetik(">> Hukum Kekekalan Energi\n")
m = float(input("Dik: M: "))
g = float(input(" G: "))
v1 = float(input(" V1: "))
h1 = float(input(" H1: "))
h2 = float(input(" H2: "))
mengetik("Dit: V2 = ...?")
mengetik("Jwb: EM1 = EM2")
mengetik(" EP1 + EK1 = EP2 + EK2")
mengetik(" mgh1 + ½mv1² = mgh2 + ½mv2²")
mengetik(" "+str(m)+" x "+str(g)+" x "+str(h1)+" + ½ x "+str(m)+" x ("+str(v1)+")² = "+str(m)+" x "+str(g)+" x "+str(h2)+" + ½ x "+str(m)+" x v2²")
mgh1 = m*g*h1
mv1 = 1/2*m*v1**2
mgh2 = m*g*h2
m2 = 1/2*m
mengetik(" "+str(mgh1)+" + "+str(mv1)+" = "+str(mgh2)+" + "+str(m2)+" v2²")
mengetik(" "+str(mgh1)+" - "+str(mgh2)+" = "+str(m2)+" v2²")
mgh1_mgh2 = m*g*h1 - m*g*h2
mengetik(" "+str(mgh1_mgh2)+" / "+str(m2))
a = m*g*h1 - m*g*h2
b = 1/2*m
c = a/b
mengetik(" V2² = √"+str(c))
hasil = c**(1/2)
mengetik(" V2 = "+str(hasil)+" m/s\n")
mengetik("Semoga bisa membantu!\n")
pilihan = input(" Apakah anda ingin menggunakan rumus yang lainnya y/n = ")
os.system("clear")
if pilih == 7 :
print("PROGRAM KONVERSI TEMPERATUR\n")
print("[1] Suhu dalam celcius")
print("[2] Suhu dalam kelvin")
print("[3] Suhu dalam fahrenheit")
print("[4] Suhu dalam reamur\n")
konversi = int(input("Pilih Nomer: "))
if konversi == 1 :
celcius = float(input("\nMasukan suhu dalam celcius = "))
print("")
print(colored("Suhu dalam kelvin","green"))
kelvin = 273 + celcius
print(celcius,"°C =",kelvin,"°K")
print("T °K = 273 + T °C")
print(" = 273 + ",celcius)
print(" =",kelvin,"°K \n")
print(colored("Suhu dalam fahrenheit","green"))
fahrenheit = (9/5 * celcius) + 32
print(celcius,"°C =",fahrenheit,"°F")
print("T °F = (9/5 * T °C) + 32")
print(" = (9/5 *",celcius,") + 32")
fah = (9/5 * celcius)
print(" =",fah,"+ 32")
print(" =",fahrenheit,"°F\n")
print(colored("Suhu dalam reamur ","green"))
reamur = 4/5 * celcius
print(celcius,"°C =",reamur,"°R")
print("T °R = 4/5 * T °C")
print(" = 4/5 *",celcius)
print(" =",reamur,"°R")
pilihan = input(colored("\nApakah anda ingin menggunakan \nrumus yang lainnya y/n = ","green"))
os.system("clear")
elif konversi == 2 :
kelvin = float(input("\nMasukan suhu dalam kelvin = "))
print("")
celcius = kelvin - 273
print(kelvin,"°K =",celcius,"C")
print("T °C = T °K - 273")
print(" =",kelvin,"- 273")
print(" =",celcius,"°K \n")
fahrenheit = (9/5 * celcius) + 32
print(celcius,"°C =",fahrenheit,"°F")
print("T °F = (9/5 * T °C) + 32")
print(" = (9/5 *",celcius,") + 32")
fah = 9/5 * celcius
print(" = ",fah,"+ 32")
print(" =",fahrenheit,"°F\n")
reamur = 4/5 * celcius
print(celcius,"°C =",reamur,"°R")
print("T °R = 4/5 * T °C")
print(" = 4/5 *",celcius)
print(" =",reamur,"°R")
pilihan = input(colored("\nApakah anda ingin menggunakan \nrumus yang lainnya y/n = ","green"))
os.system("clear")
elif konversi == 3 :
fahrenheit = float(input("\nMasukan suhu dalam fahrenheit = "))
print("")
celcius = 5/9 * (fahrenheit - 32)
print(fahrenheit,"°F =",celcius,"°C")
print("T °C = 5/9 * (T °F - 32)")
print(" = 5/9 * (",fahrenheit,"- 32)")
fah = fahrenheit - 32
print(" = 5/9 *",fah)
print(" =",celcius,"°C \n")
kelvin = 273 + celcius
print(celcius,"°C =",kelvin,"°K")
print("T °K = 273 + T °C")
print(" = 273 +",celcius)
print(" =",kelvin,"°K \n")
reamur = 4/5 * celcius
print(celcius,"°C =",reamur,"°R")
print("T °R = 4/5 * T °C")
print(" = 4/5 *",celcius)
print(" =",reamur,"°R")
pilihan = input(colored("\nApakah anda ingin menggunakan \nrumus yang lainnya y/n = ","green"))
os.system("clear")
elif konversi == 4 :
reamur = float(input("\nMasukan suhu dalam reamur = "))
print("")
celcius = 5/4 * reamur
print(reamur,"°R =",celcius,"°C")
print("T °C = 5/4 * T °R")
print(" = 5/4 *",reamur)
print(" =",celcius,"°C \n")
kelvin = 273 + celcius
print(celcius,"°C =",kelvin,"°K")
print("T °K = 272 + T °C")
print(" = 273 +",celcius)
print(" =",kelvin,"°K\n")
fahrenheit = (9/5 * celcius) + 32
print(celcius,"°C =",fahrenheit,"°F")
print("T °F = (9/5 * T °C) + 32")
print(" = (9/5 *",celcius,") + 32")
fah = 9/5 * celcius
print(" =",fah,"+ 32")
print(" =",fahrenheit,"°F")
pilihan = input(colored("\nApakah anda ingin menggunakan \nrumus yang lainnya y/n = ","green"))
os.system("clear")
else:
mengetik(colored("Error : Inputkan angka yang benar...\n","red"))
elif pilih == 0:
mengetik(colored("Terima kasih telah menggunakan programnya. Semoga hari Anda menyenangkan! \n ","green"))
break
else:
mengetik(colored("Error : Inputkan angka yang benar...\n","red"))
| RyanCod3/ImplementasiIPAS | IPAS.py | IPAS.py | py | 13,058 | python | id | code | 1 | github-code | 50 |
19769392333 | revision = '3b866be530cb'
down_revision = '802322a84154'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
alembic.op.add_column('clips',
sqlalchemy.Column('deleted', sqlalchemy.Boolean, nullable=False, server_default='false')
)
def downgrade():
alembic.op.drop_column('clips', 'deleted')
| mrphlip/lrrbot | alembic/versions/3b866be530cb_add_deleted_column_to_clips.py | 3b866be530cb_add_deleted_column_to_clips.py | py | 334 | python | en | code | 30 | github-code | 50 |
791611810 |
from config.scaled_yolov4_config import CFG as scaled_yolov4_cfg
class Struct(object):
"""Comment removed"""
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
def cfg_to_struct(train_cfgs):
args_list = []
for train_cfg in train_cfgs:
train_args = Struct(train_cfg)
if train_args.model_name=='scaled_yolov4':
scaled_yolov4_args = Struct(scaled_yolov4_cfg)
args_list.append((train_args,scaled_yolov4_args))
return args_list
| wangermeng2021/EfficientDet-tensorflow2 | utils/common.py | common.py | py | 793 | python | en | code | 10 | github-code | 50 |
20123904709 | import networkx as nx
import matplotlib.pyplot as plt
from networkx import jaccard_coefficient
import relate_code.util.filepath as fp
import relate_code.util.NMI as nmi
import relate_code.util.modularity as md
import math
import relate_code.util.tools as tools
import relate_code.util.lfrTools as lfrtool
import networkx as nx
from networkx.algorithms import community
from networkx.algorithms.community import greedy_modularity_communities
def Run_FN(G):
G_1 = G.copy()
result_FN = community.greedy_modularity_communities(G_1)
result = []
for c in (result_FN):
result.append(list(c))
return result
def main(name):
G = nx.read_gml(fp.getDataFilePath(name), label="id")
res = Run_FN(G)
# print(name + ":")
# print(result)
param = min(G.nodes())
NMI_value = nmi.cal_nmi(name, res, G)
mod = md.cal_Q(res, G)
print(name + ":NMI = " + str(NMI_value))
print(name + ":Q = " + str(mod))
def main_LFR(name):
list1 =["5000"]
for N in list1:
list = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]
for MU in list:
G = lfrtool.getNetwork(N, MU)
G_copy = G.copy()
res = Run_FN(G_copy)
# print(name + ":")
# print(result)
param = min(G.nodes())
NMI_value = lfrtool.LFR_nmi(N, MU, res, G)
mod = md.cal_Q(res, G)
print(name + "N = " + str(N) + "MU = " + str(MU) + ":NMI = " + str(NMI_value))
print(name + "N = " + str(N) + "MU = " + str(MU) + ":Q = " + str(mod))
if __name__ == '__main__':
# networkx = ['karate','football','dolphins','polbooks']
# for name in networkx:
# main(name)
name = "LFRz: "
main_LFR(name)
| wuhen15/community_dection | relate_code/testFN.py | testFN.py | py | 1,721 | python | en | code | 0 | github-code | 50 |
29285372245 | def solution(n, words):
answer = [0, 0]
past_lst = set([words[0]])
mod = [n] + [i for i in range(1, n)]
length = len(words)
for i in range(length - 1):
if words[i][-1] != words[i + 1][0]: break
if words[i + 1] in past_lst: break
if len(words[i + 1]) < 2: break
past_lst.add(words[i + 1])
if len(past_lst) != length:
answer = [mod[(i + 2) % n], (i + 1) // n + 1]
return answer | osj3474/Algorithm-Practice | BackToBasic/pro_english.py | pro_english.py | py | 446 | python | en | code | 1 | github-code | 50 |
38660861523 | from db_connect import db
from iexfinance.refdata import get_symbols
from my_enums import Exchange, StockColumn
from utils import convert_dataframe_to_document
from yfinance import Ticker
import json
import pandas as pd
def initialize_stocks():
'''Clear and initialize database.'''
# Clear db.Stocks
db.Stocks.delete_many({})
# Insert exchange, symbol, and name for stocks
for s in get_symbols():
db.Stocks.insert_one({
StockColumn.Exchange.name: s['exchange'],
StockColumn.Symbol.name: s['symbol'],
StockColumn.Name.name: s['name'],
StockColumn.Records.name: []
})
# Remove all with exchanges that are not Nasdaq or NYSE
db.Stocks.delete_many({
StockColumn.Exchange.name: {
'$nin': [
Exchange.Nasdaq.value[0],
Exchange.Nyse.value[0]
]
}
})
def update_stock_records():
'''Update all stock records from Yahoo API.'''
symbols = db.Stocks.distinct('Symbol')
for sym in symbols:
try:
stock = Ticker(sym).history(period='1y')
db.Stocks.update_one(
{StockColumn.Symbol.name: sym},
{'$set': {
StockColumn.Records.name: convert_dataframe_to_document(stock)}}
)
except:
db.Stocks.update_one(
{StockColumn.Symbol.name: sym},
{'$set': {StockColumn.Records.name: []}}
)
def delete_stocks():
'''Deletes all stocks.'''
try:
db.Stocks.delete_many({})
print('Cleared all stocks.\n')
except:
print('An error occurred when clearing stocks.\n')
def delete_stock_records():
'''Sets Records to empty list for all stocks.'''
try:
db.Stocks.update_many(
{},
{'$set': {StockColumn.Records.name: []}}
)
print('Cleared stock records.\n')
except:
print('An error occurred when clearing stock records.\n')
def print_stocks(exchange=None):
'''Print all stocks from exchange.
If no exchange is given, it prints all stocks.
Parameters
----------
exchange (optional)
'''
if exchange:
stocks = db.Stocks.find({StockColumn.Exchange.name: exchange.value})
print('\n' + exchange.name)
print('---------------------\n')
for s in stocks:
print(s[StockColumn.Symbol.name] +
' - ' + s[StockColumn.Name.name])
else:
for exchange in Exchange:
stocks = db.Stocks.find(
{StockColumn.Exchange.name: exchange.value})
print('\n' + exchange.name)
print('---------------------\n')
for s in stocks:
print(s[StockColumn.Symbol.name] +
' - ' + s[StockColumn.Name.name])
def query_as_dataframe(query_results):
'''
Parameters
----------
query_results
results from mongodb query
example - db.Stocks.find({})
Returns
-------
df
query_results as pandas DataFrame
'''
df = pd.DataFrame(list(query_results))
del df[StockColumn._id.name]
return df
def get_records_from_dataframe(df, col, value):
'''
Parameters
----------
df
pandas DataFrame
col
StockColumn name
value
value for Column lookup
Returns
-------
records_df
'''
records = df[df[col] == value][StockColumn.Records.name]
if len(records) == 1:
return pd.DataFrame(records.iloc[0])
elif len(records) == 0:
print('No stocks were matched.')
else:
print('More than one stock was matched.')
| plsloan/Stock_Analysis | db_utils.py | db_utils.py | py | 3,774 | python | en | code | 0 | github-code | 50 |
42735181219 | import torch.nn as nn
import torch
import torch.utils.data
import numpy as np
import pandas as pd
class NCFData(torch.utils.data.Dataset):
def __init__(self, features, num_item, train_mat=None, num_ng=0, is_training=None):
super(NCFData, self).__init__()
# Note that the labels are only useful when training, we thus add them in the ng_sample() function.
self.features_ps = features
self.num_item = num_item
self.train_mat = train_mat
self.num_ng = num_ng
self.is_training = is_training
self.labels = [0 for _ in range(len(features))]
def ng_sample(self):
assert self.is_training, 'no need to sampling when testing'
self.features_ng = []
for x in self.features_ps:
u = x[0]
for t in range(self.num_ng):
j = np.random.randint(self.num_item)
while (u, j) in self.train_mat:
j = np.random.randint(self.num_item)
self.features_ng.append([u, j])
labels_ps = [1 for _ in range(len(self.features_ps))]
labels_ng = [0 for _ in range(len(self.features_ng))]
self.features_fill = self.features_ps + self.features_ng
self.labels_fill = labels_ps + labels_ng
def __len__(self):
return (self.num_ng + 1) * len(self.labels)
def __getitem__(self, idx):
"""
if self.is_training:
self.ng_sample()
features = self.features_fill
labels = self.labels_fill
else:
features = self.features_ps
labels = self.labels
"""
features = self.features_fill if self.is_training else self.features_ps
labels = self.labels_fill if self.is_training else self.labels
user = features[idx][0]
item = features[idx][1]
label = labels[idx]
return user, item, label
class GMF(nn.Module):
def __init__(self, user_num, item_num, factor_num):
super(GMF, self).__init__()
self.embed_user_GMF = nn.Embedding(user_num, factor_num)
self.embed_item_GMF = nn.Embedding(item_num, factor_num)
self.predict_layer = nn.Linear(factor_num, 1)
self._init_weights_()
def _init_weights_(self):
nn.init.normal_(self.embed_user_GMF.weight, std=0.01)
nn.init.normal_(self.embed_item_GMF.weight, std=0.01)
def forward(self, user, item):
embed_user_GMF = self.embed_user_GMF(user)
embed_item_GMF = self.embed_item_GMF(item)
output_GMF = embed_user_GMF*embed_item_GMF
prediction = self.predict_layer(output_GMF)
return prediction.view(-1)
class MLP(nn.Module):
def __init__(self, user_num, item_num, factor_num, num_layers, dropout):
super(MLP, self).__init__()
self.embed_user_MLP = nn.Embedding(user_num, factor_num*(2**(num_layers-1)))
self.embed_item_MLP = nn.Embedding(item_num, factor_num*(2**(num_layers-1)))
MLP_modules = []
for i in range(num_layers):
input_size = factor_num * (2**(num_layers-i))
MLP_modules.append(nn.Dropout(p=dropout))
MLP_modules.append(nn.Linear(input_size, input_size//2))
MLP_modules.append(nn.ReLU())
self.MLP_layers = nn.Sequential(*MLP_modules)
self.predict_layer = nn.Linear(factor_num, 1)
self._init_weights_()
def __init_weights_(self):
nn.init.normal_(self.embed_user_MLP.weight, std=0.01)
nn.init.normal_(self.embed_item_MLP.weight, std=0.01)
for m in self.MLP_layers:
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.kaiming_normal_(self.predict_layer.weight, a=1, nonlinearity='sigmoid')
def forward(self, user, item):
embed_user_MLP = self.embed_user_MLP(user)
embed_item_MLP = self.embed_item_MLP(item)
interaction = torch.cat(embed_user_MLP, embed_item_MLP)
output_MLP = self.MLP_layers(interaction)
prediction = self.predict_layer(output_MLP)
return prediction
class NCF(nn.Module):
def __init__(self, user_num, item_num, factor_num, num_layers, dropout):
self.embed_user_GMF = nn.Embedding(user_num, factor_num)
self.embed_item_GMF = nn.Embedding(item_num, factor_num)
self.embed_user_MLP = nn.Embedding(user_num, factor_num*(2**(num_layers-1)))
self.embed_item_MLP = nn.Embedding(item_num, factor_num*(2**(num_layers-1)))
MLP_modules = []
for i in range(num_layers):
input_size = factor_num * (2**(num_layers-i))
MLP_modules.append(nn.Dropout(dropout))
MLP_modules.append(nn.Linear(input_size, input_size//2))
MLP_modules.append(nn.ReLU())
self.MLP_layers = nn.Sequential(*MLP_modules)
self.predict_layer = nn.Linear(factor_num*2, 1)
self._init_weights_()
def _init_weights_(self):
nn.init.normal_(self.embed_user_GMF, std=0.01)
nn.init.normal_(self.embed_item_GMF, std=0.01)
nn.init.normal_(self.embed_user_MLP, std=0.01)
nn.init.normal_(self.embed_item_MLP, std=0.01)
for m in self.MLP_layers:
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.kaiming_normal_(self.predict_layer.weight, a=1, nonlinearity='sigmoid')
def forward(self, user, item):
embed_user_GMF = self.embed_user_GMF(user)
embed_item_GMF = self.embed_item_GMF(item)
output_GMF = embed_user_GMF * embed_item_GMF
embed_user_MLP = self.embed_user_MLP(user)
embed_item_MLP = self.embed_item_MLP(item)
interaction = torch.cat((embed_user_MLP, embed_item_MLP), -1)
output_MLP = self.MLP_layers(interaction)
concat = torch.cat((output_GMF, output_MLP), -1)
prediction = self.predict_layer(concat)
return prediction.view(-1)
# loading dataset function
def load_dataset(test_num=100):
train_data = pd.read_csv("../DataSets/ncf/")
| yijianfenghou/PyRecommendationSystem | NCF/NCF_pytorch.py | NCF_pytorch.py | py | 6,076 | python | en | code | 2 | github-code | 50 |
38106581534 | #libreria para visualizar la interfaz
import pygame
# Colores del tablero de ajedrez
NEGRO = (0, 0, 0)
BLANCO = (255, 255, 255)
CAFE = (128,64,0)
REINA = (234,190,63)
# Tamaño de la celda
LARGO = 20
ALTO = 20
# Margen entre las celdas.
MARGEN = 5
grid = []
for fila in range(8):
grid.append([])
for columna in range(8):
grid[fila].append(0)
def analizandolados(grid,fila,columna):
tamaño = len(grid)
#izquierda
for i in range(columna):
if(grid[fila][i] == 1):
return False
#derecha
for i in range(columna,tamaño,1):
if(grid[fila][i] == 1):
return False
#diagonal superior izquierda
for f,c in zip(range(fila,-1,-1), range(columna,-1,-1)):
if(grid[f][c] == 1):
return False
#diagonal superior derecha
for f,c in zip(range(fila,-1,-1), range(columna,tamaño,1)):
if(grid[f][c] == 1):
return False
#diagonal inferior izquierda
for f,c in zip(range(fila,tamaño,1), range(columna,-1,-1)):
if(grid[f][c] == 1):
return False
#diagonal inferior derecha
for f,c in zip(range(fila,-1,-1), range(columna,tamaño,1)):
if(grid[f][c] == 1):
return False
return True
#Verifica si la columna ya tiene o no una Reina
def columnaLlena(grid,columna):
tamaño = len(grid)
for i in range(tamaño):
if(grid[i][columna] == 1):
return True
return False
def acomodandoReinas(grid, columna):
tamaño = len(grid)
if(columna >= tamaño):
return True
if(columnaLlena(grid,columna) == True):
if(acomodandoReinas(grid, columna + 1) == True):
return True
for i in range(tamaño):
if(analizandolados(grid,i,columna)):
grid[i][columna] = 1
if(acomodandoReinas(grid,columna + 1) == True):
return True
grid[i][columna] = 0
grid[i][columna] = 0
return False
# Colocando la primera reina
grid[0][0] = 1
acomodandoReinas(grid,0)
# Inicializamos pygame
pygame.init()
# Dimenciones de la ventana
DIMENSION_VENTANA = [215, 215]
pantalla = pygame.display.set_mode(DIMENSION_VENTANA)
# Título de la pantalla.
pygame.display.set_caption("8 Reinas")
# Como se visualiza la pantalla
reloj = pygame.time.Clock()
def main():
# Bandera de salida
hecho = False
while not hecho:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
hecho = True
# Fondo de pantalla.
pantalla.fill(CAFE)
# Dibujando el tablero de ajedrez
for fila in range(8):
for columna in range(8):
# if(grid[fila][columna] == 2):
# color = NEGRO
color = BLANCO
if grid[fila][columna] == 1:
color = REINA
pygame.draw.rect(pantalla,
color,
[(MARGEN+LARGO) * columna + MARGEN,
(MARGEN+ALTO) * fila + MARGEN,
LARGO,
ALTO])
# 60 fotogramas por segundo.
reloj.tick(60)
# Muestra la pantalla con lo que se haya dibujado.
pygame.display.flip()
pygame.quit()
main()
# 664 655 2242 | ArmandoRamirezCarrillo/reinasGui | queenGui.py | queenGui.py | py | 3,417 | python | es | code | 0 | github-code | 50 |
24963214631 | # F1 -> ao usuário e retorna a resposta do usuário
# F2 -> receberá um dicionário e insere um objeto dentro do dic
# Pesquisar -> recebe o dic e a chave, preenche uma lista
# com o resultado da pesquisa (get()), verifica se não está
# vazio ( != diferente ). Caso seja true, exibe os dados.
# ---- % ----
# Primeira posição (zero) -> nome do usuário
# Segunda posiação (um) -> última data de acesso
# Terceira posição (dois) -> última estação acessada
# ---- % ----
# Excluir -> recebe o dic de onde o objeto será excluído
# e a chave do objeto que deseja excluir.
# Antes da exclusão, deve verificar se a chave existe (get())
# se será retornado algo diferente de vazio
# Caso seja true, invoca o comando "del"
# ---- % ----
# Listar -> precisa apenas do dic que contém os dados a exibir
# montar um foreach utilizando dois valores (chave e valor) podendo
# dar uma saída mais "clean"
# ---- % ----
def perguntar():
resposta = input("O que deseja realizar?" +
"<I> - Para Inserir um usuário" +
"<P> - Para Pesquisar um usuário" +
"<E> - Para Excluir um usuário" +
"<L> - Para Listar um usuário: ").upper()
return resposta
def inserir(dicionario):
dicionario[input("Digite o login: ").upper()] = [input("Digite o nome: ").upper(),
input("Digite a última data de acesso: "),
input("Qual a última estação acessada: ").upper()]
def pesquisar(dicionario, chave):
lista=dicionario.get(chave)
if lista != None:
print("Nome...........: " + lista[0])
print("Último acesso..: " + lista[1])
print("Última estação.: " + lista[2])
def excluir(dicionario, chave):
if dicionario.get(chave)!= None:
del dicionario[chave]
print("Objeto Eliminado")
def listar(dicionario):
for chave, valor in dicionario.items():
print("Objeto...:")
print("Login....: ", chave)
print("Dados....: ", valor) | bielzfreitas/Exercicios-Python | Funcoes/Funcoes_Dicionarios.py | Funcoes_Dicionarios.py | py | 2,033 | python | pt | code | 0 | github-code | 50 |
27058482527 | from django.urls import path
from . import views
app_name = "administration"
urlpatterns = [
path('', views.home_view, name='home'),
path('register', views.register_request, name='register'),
path('login', views.login_request, name='login'),
path('logout', views.logout_request, name='logout'),
path('profile', views.profile_request, name='profile'),
]
| rajatnai49/PRAVAS | administration/urls.py | urls.py | py | 376 | python | en | code | 0 | github-code | 50 |
74851069916 | from __future__ import division, print_function
from six.moves import zip, map
from six import string_types
import warnings
import os
import sys
import gc
import fnmatch
import time
import json
from datetime import datetime
from collections import OrderedDict
if sys.version_info.major == 2:
try:
from numap.NuMap import NuMap
except ImportError as e:
print('parallel processing not available (NuMap missing)')
print(repr(e))
else:
print('parallel processing not available (NuMap not supported on Python 3 yet)')
import numpy as np
import auromat.fits
from auromat.coordinates.geodesic import wgs84A, wgs84B
from auromat.coordinates.intersection import ellipsoidLineIntersects
from auromat.mapping.mapping import BaseMappingProvider, FileImageMixin,\
sanitize_data, ArrayImageMixin
from auromat.mapping.astrometry import BaseAstrometryMapping,\
ImageMaskAstrometryMixin
from auromat.util.decorators import lazy_property, inherit_docs
from auromat.coordinates.ephem import EphemerisCalculator
@inherit_docs
class SpacecraftMappingProvider(BaseMappingProvider):
def __init__(self, imageSequenceFolder, wcsFolder=None, imageFileExtension=None, timeshift=None,
noradId=None, tleFolder=None, spacetrack=None, altitude=110, maxTimeOffset=3,
sequenceInParallel=False, fastCenterCalculation=False):
"""
:param imageSequenceFolder: folder path or a list of image file paths
:param wcsFolder: folder path or a list of wcs file paths;
optional if imageSequenceFolder is a folder path and contains
the wcs files
"""
BaseMappingProvider.__init__(self, maxTimeOffset=maxTimeOffset)
if wcsFolder is None:
assert not isinstance(imageSequenceFolder, list),\
'The wcsFolder parameter is required if imageSequenceFolder is a list'
wcsFolder = imageSequenceFolder
if isinstance(imageSequenceFolder, list) and isinstance(wcsFolder, list):
self.imagePaths = imageSequenceFolder
self.wcsPaths = wcsFolder
self._imageFileExtension = os.path.splitext(self.imagePaths[0])[1][1:]
self._checkEachWcsHasOneImage()
self._sortByDate()
elif not isinstance(imageSequenceFolder, list) and not isinstance(wcsFolder, list):
self.imageSequenceFolder = imageSequenceFolder
self.wcsFolder = wcsFolder
self._imageFileExtension = imageFileExtension
self.reload()
else:
raise ValueError('imageSequenceFolder and wcsFolder must be both path lists or folder paths')
self.timeshift = timeshift
self.noradId = noradId
self.tleFolder = tleFolder
self.spacetrack = spacetrack
self.altitude = altitude
self.fastCenterCalculation = fastCenterCalculation
metadataPath = os.path.join(os.path.dirname(self.imagePaths[0]), 'metadata.json')
if os.path.exists(metadataPath):
with open(metadataPath, 'r') as fp:
self.metadata = json.load(fp, object_hook=_parseDates)
else:
self.metadata = None
self._sequenceInParallel = sequenceInParallel
def __len__(self):
return len(self.wcsPaths)
def reload(self):
"""
Refresh to current disk state if imageSequenceFolder and wcsFolder
are folders instead of file path lists.
"""
wcsFilenames = os.listdir(self.wcsFolder)
wcsPaths = [os.path.join(self.wcsFolder, f) for f in wcsFilenames]
self.wcsPaths = fnmatch.filter(wcsPaths, '*.wcs')
imageFilenames = os.listdir(self.imageSequenceFolder)
imagePaths = [os.path.join(self.imageSequenceFolder, f) for f in imageFilenames]
try:
self.imagePaths = fnmatch.filter(imagePaths, '*.' + self.imageFileExtension)
except ValueError:
self.imagePaths = []
self.wcsPaths = []
self._checkEachWcsHasOneImage()
self._sortByDate()
def _checkEachWcsHasOneImage(self):
wcsFilenames = map(os.path.basename, self.wcsPaths)
ids = [os.path.splitext(f)[0] for f in wcsFilenames]
imageFilenames = list(map(os.path.basename, self.imagePaths))
imageIds = list(filter(lambda id_: id_ + '.' + self.imageFileExtension in imageFilenames, ids))
assert len(imageIds) == len(ids), 'image ids: ' + str(imageIds) + '; wcs ids: ' + str(ids)
self.ids = ids
def _sortByDate(self):
dates = {auromat.fits.getShiftedPhotoTime(auromat.fits.readHeader(p)): (p, id_)
for p, id_ in zip(self.wcsPaths, self.ids)}
dates = OrderedDict(sorted(dates.items(), key=lambda k_v: k_v[0]))
self.dates = dates.keys()
self.wcsPaths = [p for p,_ in dates.values()]
self.ids = [id_ for _,id_ in dates.values()]
@property
def imageFileExtension(self):
""" e.g. 'jpg' """
if self._imageFileExtension is None:
# try to find extension ourselves
imageFilenames = os.listdir(self.imageSequenceFolder)
wcsFilenames = fnmatch.filter(os.listdir(self.wcsFolder), '*.wcs')
if self.imageSequenceFolder == self.wcsFolder:
imageFilenames = set(imageFilenames) - set(wcsFilenames)
for wcsFilename in wcsFilenames:
fileBase = os.path.splitext(wcsFilename)[0]
matches = fnmatch.filter(imageFilenames, fileBase + '.*')
if len(matches) == 1:
self._imageFileExtension = os.path.splitext(matches[0])[1][1:]
break
elif len(matches) > 1:
raise ValueError('Image file extension not given but multiple candidates exist: ' + str(matches))
if self._imageFileExtension is None:
raise ValueError('Image file extension could not be determined. Make sure that there exists at least ' +
'one .wcs file and a corresponding image with the same filename base.')
return self._imageFileExtension
@property
def range(self):
return self.dates[0], self.dates[-1]
@property
def unsolvedIds(self):
imageFilenames = map(os.path.basename, self.imagePaths)
imageIds = [os.path.splitext(f)[0] for f in imageFilenames]
unsolvedIds = filter(lambda id_: id_ not in self.ids, imageIds)
return sorted(unsolvedIds)
def _getIdxWithOffset(self, date):
idx = auromat.utils.findNearest(self.dates, date)
offset = abs(self.dates[idx]-date).total_seconds()
return idx, offset
def contains(self, date):
_, offset = self._getIdxWithOffset(date)
return offset <= self.maxTimeOffset
def get(self, date):
idx, offset = self._getIdxWithOffset(date)
if offset > self.maxTimeOffset:
raise ValueError('No image found')
identifier = self.ids[idx]
imagePath = os.path.join(self.imageSequenceFolder, identifier + '.' + self.imageFileExtension)
wcsPath = self.wcsPaths[idx]
if self.metadata:
metadata = dict(list(self.metadata['sequence_metadata'].items()) +
list(self.metadata['image_metadata'][identifier].items()))
else:
metadata = None
mapping = getMapping(imagePath, wcsPath, self.timeshift,
self.noradId, self.tleFolder, self.spacetrack,
altitude=self.altitude, fastCenterCalculation=self.fastCenterCalculation,
metadata=metadata)
return mapping
def getById(self, identifier):
matchedIds = filter(lambda id_: identifier in id_, self.ids)
assert len(matchedIds) == 1, 'Ambiguous identifier: ' + str(matchedIds)
identifier = matchedIds[0]
idx = self.ids.index(identifier)
return self.get(self.dates[idx])
def getSequence(self, dateBegin=None, dateEnd=None):
assert dateBegin is None and dateEnd is None, 'Date ranges not supported'
try:
self.imageFileExtension
except ValueError as e:
warnings.warn(str(e) + ' Returning empty sequence.')
return []
imagePaths = [os.path.join(self.imageSequenceFolder, id_ + '.' + self.imageFileExtension) for id_ in self.ids]
if self.metadata:
seqmeta = list(self.metadata['sequence_metadata'].items())
metadatas = [dict(seqmeta + list(self.metadata['image_metadata'][k].items())) for k in self.ids]
else:
metadatas = None
return getMappingSequence(imagePaths, self.wcsPaths, metadatas=metadatas,
timeshift=self.timeshift, noradId=self.noradId,
tleFolder=self.tleFolder, spacetrack=self.spacetrack,
altitude=self.altitude, fastCenterCalculation=self.fastCenterCalculation,
parallel=self._sequenceInParallel)
@inherit_docs
class SpacecraftMappingPathProvider(BaseMappingProvider):
def __init__(self, imagePaths, wcsPaths, metadataPath=None, timeshift=None,
noradId=None, tleFolder=None, spacetrack=None, altitude=110, maxTimeOffset=3,
sequenceInParallel=False, fastCenterCalculation=False):
BaseMappingProvider.__init__(self, maxTimeOffset=maxTimeOffset)
assert len(imagePaths) == len(wcsPaths)
self.imagePaths, self.wcsPaths = self._sortByDate(imagePaths, wcsPaths)
self.timeshift = timeshift
self.noradId = noradId
self.tleFolder = tleFolder
self.spacetrack = spacetrack
self.altitude = altitude
self.sequenceInParallel = sequenceInParallel
self.fastCenterCalculation = fastCenterCalculation
if metadataPath and os.path.exists(metadataPath):
with open(metadataPath, 'r') as fp:
self.metadata = json.load(fp, object_hook=_parseDates)
else:
self.metadata = None
def __len__(self):
return len(self.wcsPaths)
@staticmethod
def _sortByDate(imagePaths, wcsPaths):
def date(wcsPath_imagePath):
wcsPath = wcsPath_imagePath[0]
wcsHeader = auromat.fits.readHeader(wcsPath)
return auromat.fits.getPhotoTime(wcsHeader)
paths = sorted(zip(wcsPaths, imagePaths), key=date)
wcsPaths = [wcsPath for wcsPath, _ in paths]
imagePaths = [imagePath for _, imagePath in paths]
return imagePaths, wcsPaths
@property
def imageFileExtension(self):
return os.path.splitext(self.imagePaths[0])[1][1:]
@property
def range(self):
fromDate = getMapping(self.imagePaths[0], self.wcsPaths[0]).photoTime
toDate = getMapping(self.imagePaths[-1], self.wcsPaths[-1]).photoTime
return fromDate, toDate
def contains(self, date):
raise NotImplementedError
def get(self, date):
# TODO implement provider access by date
raise NotImplementedError
def getById(self, identifier):
raise NotImplementedError
def getSequence(self, dateBegin=None, dateEnd=None):
assert dateBegin is None and dateEnd is None, 'Date ranges not supported'
if self.metadata:
keys = [os.path.splitext(os.path.basename(p))[0] for p in self.imagePaths]
seqmeta = list(self.metadata['sequence_metadata'].items())
metadatas = [dict(seqmeta + list(self.metadata['image_metadata'][k].items())) for k in keys]
else:
metadatas = None
return getMappingSequence(self.imagePaths, self.wcsPaths,
timeshift=self.timeshift, noradId=self.noradId,
tleFolder=self.tleFolder, spacetrack=self.spacetrack,
altitude=self.altitude,
parallel=self.sequenceInParallel,
fastCenterCalculation=self.fastCenterCalculation,
metadatas=metadatas)
def _getMappingSequenceArgs(imagePathsOrArrays, wcsPaths, timeshift=None,
noradId=None, tleFolder=None, spacetrack=None, altitude = 110,
fastCenterCalculation=False, metadatas=None):
if not metadatas:
metadatas = [{}] * len(wcsPaths)
return (dict(imagePathOrArray=imagePathOrArray, wcsPathOrHeader=wcsPath,
timeshift=timeshift, noradId=noradId,
tleFolder=tleFolder, spacetrack=spacetrack,
altitude=altitude,
fastCenterCalculation=fastCenterCalculation,
metadata=metadata)
for imagePathOrArray, wcsPath, metadata in zip(imagePathsOrArrays, wcsPaths, metadatas))
def getMappingSequence(imagePathsOrArrays, wcsPaths, metadatas=None, timeshift=None,
noradId=None, tleFolder=None, spacetrack=None, altitude = 110,
parallel=False, fastCenterCalculation=False):
"""
Returns a generator of SpacecraftMapping objects
for all images in 'imageSequenceFolder' which have a solution in 'wcsFolder'.
The order corresponds to the sorted filenames in 'wcsFolder'.
:param iterable imagePathsOrArrays:
:param list wcsPaths:
"""
mappingArgsArr = _getMappingSequenceArgs(imagePathsOrArrays, wcsPaths,
timeshift, noradId, tleFolder, spacetrack,
altitude, fastCenterCalculation,
metadatas=metadatas)
if parallel:
return _getMappingsParallel(mappingArgsArr)
else:
def mappingFromKw(kw):
mapping = getMapping(**kw)
# see _getMappingsParallel
gc.collect()
return mapping
return map(mappingFromKw, mappingArgsArr)
def _getMappingsParallel(mappingArgsArr):
# Each worker process takes 2-4GiB!
# We use only one worker as the main process is usually slower in consuming
# the mappings.
workerCount = 1
# TODO use iterator class instead of yield to conserve memory
# (local variable 'mapping' holds on to reference and is only released on next iteration)
mappings = NuMap(_getCalculatedMappingFromArgs, mappingArgsArr,
worker_type='process', worker_num=workerCount, buffer=workerCount)
try:
mappings.start()
for mapping in mappings:
yield mapping
# Due to some reference cycles there are numpy arrays which don't
# get freed implicitly. As the arrays we work with are quite huge
# this adds up quickly. The problem is that currently the garbage collector
# doesn't know about the real size of the numpy arrays (as they are C extension
# types and there is no API yet for communicating the real size to the Python
# interpreter). Therefore the thresholds for triggering a garbage collection
# are seldomly reached and instead we consume more and more memory and eventually
# run out of it. To fight against this, we manually run a collection to force
# freeing up native memory.
gc.collect()
finally:
mappings.stop(ends=[0])
def _getCalculatedMappingFromArgs(kwargs):
"""
A helper function which gets a mapping and forces the
calculation of its (lazy) properties.
See getMappingSequence().
"""
try:
os.nice(10) # only on UNIX systems
except:
pass
mapping = getMapping(**kwargs)
# force calculation within worker process
mapping.boundingBox
mapping.elevation
return mapping
def getMapping(imagePathOrArray, wcsPathOrHeader, timeshift=None, noradId=None, tleFolder=None, spacetrack=None,
altitude=110, fastCenterCalculation=False, metadata=None, nosanitize=False,
identifier=None):
"""
If timeshift is None, then the wcs header is first checked for a shifted timestamp
and corresponding spacecraft position. In case no shifted timestamp exists, the
wcs header is checked for the original timestamp and spacecraft position. If only the
timestamp exists (which may be the case for externally produced wcs files), the
spacecraft position is calculated from two-line elements. If the latter applies or
'timeshift' is given, then tleFolder must be given.
If the tleFolder doesn't contain a %noradid%.tle file, then spacetrack is used to download
the data (or an error is raised if spacetrack is None). The NORAD ID is determined from
the noradId parameter, or if that is None from the wcs header. If in the latter case
the wcs header doesn't contain the NORAD ID, then the ISS ID (25544) is used as a default and a
warning is printed.
:param imagePathOrArray:
:param wcsPathOrHeader:
:param datetime.timedelta timeshift: if set, overrides the shifted timestamp stored in the wcs headers
:param noradId: if set, overrides the NORAD ID stored in the wcs headers
:param tleFolder: folder containing TLE files named noradid.tle
:param spacetrack: a Spacetrack class instance
:param altitude:
:rtype: BaseSpacecraftMapping
"""
wcsHeader, photoTime, originalPhotoTime, cameraPosGCRS = \
_prepareMappingParams(wcsPathOrHeader, timeshift, noradId, tleFolder, spacetrack)
isImageArray = not isinstance(imagePathOrArray, string_types)
isWcsHeader = not isinstance(wcsPathOrHeader, string_types)
if identifier is None:
if not isImageArray:
identifier = os.path.splitext(os.path.basename(imagePathOrArray))[0]
elif not isWcsHeader:
identifier = os.path.splitext(os.path.basename(wcsPathOrHeader))[0]
if isImageArray:
cls = ArraySpacecraftMapping
else:
cls = FileSpacecraftMappingUnsanitized if nosanitize else FileSpacecraftMapping
mapping = cls(wcsHeader, altitude, imagePathOrArray, cameraPosGCRS, photoTime,
identifier, metadata,
originalPhotoTime=originalPhotoTime,
fastCenterCalculation=fastCenterCalculation)
return mapping
def _prepareMappingParams(wcsPathOrHeader, timeshift=None, noradId=None, tleFolder=None, spacetrack=None):
if noradId is not None:
noradId = int(noradId)
if isinstance(wcsPathOrHeader, string_types):
fitsWcsHeader = auromat.fits.readHeader(wcsPathOrHeader)
else:
fitsWcsHeader = wcsPathOrHeader
originalPhotoTime = auromat.fits.getPhotoTime(fitsWcsHeader)
if originalPhotoTime is None:
raise ValueError('DATE-OBS missing in FITS header')
if timeshift is not None:
photoTime = originalPhotoTime + timeshift
cameraPosGCRS = None
else:
cameraPosGCRS, photoTime_, _ = auromat.fits.getShiftedSpacecraftPosition(fitsWcsHeader)
if cameraPosGCRS is not None:
photoTime = photoTime_
else:
photoTime = originalPhotoTime
cameraPosGCRS, _ = auromat.fits.getSpacecraftPosition(fitsWcsHeader)
if cameraPosGCRS is None:
warnings.warn('Spacecraft position is missing in FITS header, will recalculate')
if cameraPosGCRS is None:
if noradId is None:
noradId = auromat.fits.getNoradId(fitsWcsHeader)
if noradId is None:
warnings.warn('NORAD ID is missing in FITS header, assuming ISS (25544)')
noradId = 25544
if tleFolder is None:
raise ValueError('You need to specify tleFolder to calculate spacecraft positions')
tleFilePath = os.path.join(tleFolder, str(noradId) + '.tle')
if os.path.exists(tleFilePath):
# the EphemerisCalculator doesn't need to be cached, fast enough (0.007s)
ephemCalculator = EphemerisCalculator(tleFilePath)
if not ephemCalculator.contains(photoTime):
if spacetrack is None:
raise ValueError('Please update ' + tleFilePath + ' or ' +
'supply a spacetrack instance for automatic download')
spacetrack.updateTLEsFor(noradId, tleFilePath, photoTime)
ephemCalculator = EphemerisCalculator(tleFilePath)
elif spacetrack is not None:
spacetrack.updateTLEsFor(noradId, tleFilePath, photoTime)
ephemCalculator = EphemerisCalculator(tleFilePath)
else:
raise ValueError('Please put ' + str(noradId) + '.tle inside ' + tleFolder + ' or ' +
'supply a spacetrack instance for automatic download')
cameraPosGCRS = ephemCalculator(photoTime)
return fitsWcsHeader, photoTime, originalPhotoTime, cameraPosGCRS
class BaseSpacecraftMapping(BaseAstrometryMapping):
"""
A mapping which is based on having a camera in/on a spacecraft looking both on earth
and the stars and where no exact camera pointing is known.
The stars were then used to derive a WCS definition with which it is possible to
calculate the direction vector of each pixel.
"""
def __init__(self, wcsHeader, alti, cameraPosGCRS, photoTime, identifier, metadata=None,
originalPhotoTime=None, fastCenterCalculation=False):
BaseAstrometryMapping.__init__(self, wcsHeader, alti, cameraPosGCRS, photoTime,
identifier, metadata,
fastCenterCalculation=fastCenterCalculation)
if originalPhotoTime is None:
originalPhotoTime = photoTime
self._originalPhotoTime = originalPhotoTime
@property
def originalPhotoTime(self):
return self._originalPhotoTime
@lazy_property
def intersectsEarth(self):
"""
Returns a boolean array indicating whether a pixel center intersects with the earth.
"""
direction = self.cameraToPixelCenterDirection
t0 = time.time()
intersectsEarth = ellipsoidLineIntersects(wgs84A, wgs84B,
self.cameraPosGCRS,
direction.reshape(-1,3))
print('intersectsEarth:', time.time()-t0, 's')
intersectsEarth = intersectsEarth.reshape(self.cameraToPixelCenterDirection.shape[0],
self.cameraToPixelCenterDirection.shape[1])
return intersectsEarth
def isConsistent(self, starPxCoords=None):
"""
Checks if the photo timestamp and astrometric solution used for mapping is plausible by analysing
the mapping result.
Note that in general there are virtually no false solves when using astrometry.net.
:param starPxCoords: array of shape (n,2) containing x,y pixel coordinates of stars which have been
used for obtaining an astrometry solution;
for astrometry.net, the quad stars can be used for this purpose,
see auromat.solving.readQuadMatch()
:rtype: True if consistent, False if not
"""
if np.all(self.intersectsEarth):
# Although we solved the image using stars, every pixel intersects
# with the modelled earth. Thus, the camera position is such
# that the camera would look directly at the earth, with no starfield in the image.
# The timestamp and/or astrometric solution must be wrong.
return False
elif not np.any(self.intersectsEarth):
# No pixel intersects with the modelled earth. As we assume that the images always contain
# a part of the earth, this must again be a wrong timestamp and/or astrometric solution.
return False
if starPxCoords is not None:
starCoveredByEarth = self.intersectsEarth[starPxCoords[:,1],starPxCoords[:,0]]
if np.any(starCoveredByEarth):
# There is at least one star used for the astrometry solution which would
# be covered by the modelled earth. Therefore, the timestamp and/or astrometric
# solution must be wrong.
return False
return True
@inherit_docs
class FileSpacecraftMappingUnsanitized(ImageMaskAstrometryMixin, FileImageMixin, BaseSpacecraftMapping):
"""
.. warning:: Consider using FileSpacecraftMapping instead of this class.
Masking is not supported here.
The purpose of this class is to access certain properties in a very efficient way by
skipping any sanitization.
See auromat.test.draw_test.testParallelsMeridiansPlotOptimized for an example usage of this
behaviour.
"""
def __init__(self, wcsHeader, alti, imagePath, cameraPosGCRS, photoTime, identifier, metadata=None,
originalPhotoTime=None, fastCenterCalculation=False):
ImageMaskAstrometryMixin.__init__(self)
FileImageMixin.__init__(self, imagePath)
BaseSpacecraftMapping.__init__(self, wcsHeader, alti, cameraPosGCRS, photoTime, identifier, metadata,
originalPhotoTime=originalPhotoTime,
fastCenterCalculation=fastCenterCalculation)
def createMasked(self, centerMask):
raise RuntimeError('Masking is not supported for unsanitized mappings, ' +
'please use nosanitize=False in getMapping()')
FileSpacecraftMapping = sanitize_data(FileSpacecraftMappingUnsanitized)
@sanitize_data
@inherit_docs
class ArraySpacecraftMapping(ImageMaskAstrometryMixin, ArrayImageMixin, BaseSpacecraftMapping):
"""
Like FileSpacecraftMapping but accepts an RGB image array instead of an image file path.
"""
def __init__(self, wcsHeader, alti, img, cameraPosGCRS, photoTime, identifier, metadata=None,
originalPhotoTime=None, fastCenterCalculation=False):
ImageMaskAstrometryMixin.__init__(self)
ArrayImageMixin.__init__(self, img)
BaseSpacecraftMapping.__init__(self, wcsHeader, alti, cameraPosGCRS, photoTime, identifier, metadata,
originalPhotoTime=originalPhotoTime,
fastCenterCalculation=fastCenterCalculation)
isoDateFormat = '%Y-%m-%dT%H:%M:%S.%f'
def _parseDates(dic):
keys = {'date'} & set(dic.keys())
for k in keys:
dic[k] = datetime.strptime(dic[k], isoDateFormat)
return dic | esa/auromat | auromat/mapping/spacecraft.py | spacecraft.py | py | 27,418 | python | en | code | 17 | github-code | 50 |
29448160283 | import matplotlib.pyplot as plt
import csv
import matplotlib
x = []
y = []
n = 10 #it takes every n-th values. n=1 is full resoltuion
with open('datavalues.txt','r') as csvfile:
data = csv.reader(csvfile, delimiter=';',quoting=csv.QUOTE_NONNUMERIC)
j=0
k=0
for row in data:
print("number of row: {}".format(len(row)))
for i in row:
j=j+1
#print("j = {}".format(j))
if (j%n)==0:
if isinstance(i,float)==True:
k=k+1
#print("k = {}".format(k))
x.append(k)
#print(i)
y.append((i))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
#figure 1
ax1.scatter(x,y,1, color='black',label='Federweg')
ax1.set_xlabel('Time [ms]')
ax1.set_ylabel('Federweg [mm]')
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax1.set_title('Federweg vs. Zeit')
ax1.legend()
#figure 2
ax2.scatter(x,y,1, color='black',label='Federweg')
ax2.set_xlabel('Time [ms]')
ax2.set_ylabel('Federweg [mm]')
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax2.set_title('Federweg vs. Zeit')
ax2.legend()
plt.show() | aprila14/DistanceSensor | plot.py | plot.py | py | 1,275 | python | en | code | 0 | github-code | 50 |
41154752341 |
import os.path
import sys
import numpy as np
import pandas as pd
from btax.util import get_paths
globals().update(get_paths())
_OOH_VALUE = os.path.join(_DATA_DIR, 'b101.csv')
_DEBT_NFCORP = os.path.join(_DATA_DIR, 'l103.csv')
_DEBT_NCORP = os.path.join(_DATA_DIR, 'l104.csv')
_DEBT_FCORP = os.path.join(_DATA_DIR, 'l208.csv')
_DEBT_HOME = os.path.join(_DATA_DIR, 'l218.csv')
_EQUITY_CORP = os.path.join(_DATA_DIR, 'l223.csv')
_EQUITY_NCORP = os.path.join(_DATA_DIR, 'l229.csv')
_NAICS_CODES = os.path.join(_DATA_DIR, 'NAICS_Codes.csv')
_CST_FACTOR = 10**6
def calibrate_financing():
skipped = [29,22,14,8,7,9,10]
columns = [0,11]
column_name = ['Type', 'Amount']
num_rows = [1,4]
#reads the equity data from the .csv file. Specifies which columns and rows to read in the file
corp_equity_df = pd.read_csv(_EQUITY_CORP, skiprows=skipped[5],
usecols=columns, header=None, names=column_name, nrows=num_rows[1])
non_fin_corp_equity = corp_equity_df[corp_equity_df.index==0]['Amount'][0] * _CST_FACTOR
#apportions the equity based on the ratio of equity held by each industry
equity_values = apportion_equity({'non_fin_corp_equity':non_fin_corp_equity})
non_fin_corp_debt = pd.read_csv(_DEBT_NFCORP, skiprows=skipped[0], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
#apportions the debt based on the ratio of interest paid by each industry
debt_values = apportion_debt({'non_fin_corp_debt': non_fin_corp_debt})
fin_corp_equity = corp_equity_df[corp_equity_df.index==3]['Amount'][3] * _CST_FACTOR
equity_values.update(apportion_equity({'fin_corp_equity':fin_corp_equity}))
fin_corp_debt = pd.read_csv(_DEBT_FCORP, skiprows=skipped[2], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
debt_values.update(apportion_debt({'fin_corp_debt':fin_corp_debt}))
non_corp_equity = pd.read_csv(_EQUITY_NCORP, skiprows=skipped[4], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
equity_values.update(apportion_equity({'non_corp_equity':non_corp_equity}))
non_corp_debt = pd.read_csv(_DEBT_NCORP, skiprows=skipped[1], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
debt_values.update(apportion_debt({'non_corp_debt':non_corp_debt}))
#calculates the overall debt ratios based on the equity and debt values, stored in dictionaries {key:dataframe}
debt_params = calc_debt(equity_values, debt_values)
return debt_params
mortg_debt = pd.read_csv(_DEBT_HOME, skiprows=skipped[3], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
house_value = pd.read_csv(_OOH_VALUE, skiprows=skipped[6], usecols=columns,
header=None, names=column_name, nrows=num_rows[0])['Amount'][0] * _CST_FACTOR
def apportion_debt(total_liab):
#use the ratio of total liabilities in an industry to total interest paid by all industries to proportionally distribute debt
keyword = total_liab.keys()[0]
#choose interest paid for either corporate or non-corporate businesses
if((keyword=='non_fin_corp_debt') or (keyword=='fin_corp_debt')):
columns = [11]
intrst_pd_1 = pd.read_csv(_SOI_S_VALUES, usecols=columns)
intrst_pd_2 = pd.read_csv(_SOI_C_VALUES, usecols=columns)
types = ['c_corp', 's_corp']
intrst_pd = {'c_corp':intrst_pd_1, 's_corp':intrst_pd_2}
else:
columns = [2]
intrst_pd_1 = pd.read_csv(_SOI_PA_VALUES, usecols=columns)
intrst_pd_2 = pd.read_csv(_SOI_PR_VALUES, usecols=columns)
types = ['partner', 'prop']
intrst_pd = {'partner':intrst_pd_1, 'prop':intrst_pd_2}
#runs the debt calculation twice for s-corps and c-corps or partnerships and sole proprietorships
debt_df = pd.DataFrame(index=np.arange(0,len(intrst_pd_1)), columns=types)
for i in types:
total_intrst = intrst_pd[i].sum(axis=0)['interest_paid']
ratio = total_liab[keyword] / total_intrst
indust_debt = np.array(intrst_pd[i]['interest_paid']) * ratio
debt_df[i] = indust_debt
return {keyword:debt_df}
def apportion_equity(total_equity):
keyword = total_equity.keys()[0]
if((keyword=='non_fin_corp_equity') or (keyword=='fin_corp_equity')):
columns = [1,3,4,6,8]
equity_x_1 = pd.read_csv(_SOI_S_VALUES, usecols=columns)
equity_x_2 = pd.read_csv(_SOI_C_VALUES, usecols=columns)
types = ['c_corp', 's_corp']
equity = {'c_corp':equity_x_1, 's_corp':equity_x_2}
equity_df = pd.DataFrame(index=np.arange(0,len(equity_x_1)),columns=['c_corp', 's_corp'])
for i in types:
equity[i]['cost_of_treasury_stock'] = equity[i]['cost_of_treasury_stock'] * -1
sum_equity = sum(equity[i].sum(axis=0))
equity_rows = equity[i].sum(axis=1)
ratio = total_equity[keyword] / sum_equity
indust_equity = np.array(equity_rows) * ratio
equity_df[i] = indust_equity
return {keyword:equity_df}
else:
columns = [7]
equity_pca = pd.read_csv(_SOI_AS_VALUES, usecols=columns)
equity_df = pd.DataFrame(index=np.arange(0,len(equity_pca)),columns=['non_corp'])
sum_equity = equity_pca.sum(axis=0)['capital_accounts_net']
ratio = total_equity[keyword] / sum_equity
indust_equity = np.array(equity_pca) * ratio
equity_df['non_corp'] = indust_equity
return {keyword:equity_df}
def calc_debt(total_equity, total_liab):
#calculates the debt ratio for each industry and then uses it to calculate the real discount rate
c_corp_equity = total_equity['non_fin_corp_equity']['c_corp']
s_corp_equity = total_equity['non_fin_corp_equity']['s_corp']
c_corp_debt = total_liab['non_fin_corp_debt']['c_corp']
s_corp_debt = total_liab['non_fin_corp_debt']['s_corp']
non_corp_debt = total_liab['non_corp_debt'].sum(axis=1) + s_corp_debt
non_corp_equity = total_equity['non_corp_equity'].sum(axis=1) + s_corp_equity
#calculates the debt ratio for all corporate and non-corporate industries
debt_f_corp = (c_corp_debt) / (c_corp_equity + c_corp_debt)
debt_f_non_corp = (non_corp_debt) / (non_corp_equity + non_corp_debt)
#prints out the debt ratios as an intermediate step
total_debt_f = pd.concat([pd.read_csv(_NAICS_CODES),debt_f_corp, debt_f_non_corp], axis=1)
total_debt_f.columns = ['NAICS', 'corp', 'non_corp']
save_ratios(total_debt_f)
return total_debt_f
def calc_after_return(indust_debt):
#calculates the real after-tax return paid by a corporation
inflation_rate = 0.011
nominal_mrkt_intrst = 0.0365
real_rate_return = 0.006
debt_ratio = np.array(indust_debt)
#debt_ratio = np.nan_to_num(debt_ratio)
equity_ratio = 1 - debt_ratio
after_tax_return = debt_ratio * (nominal_mrkt_intrst - inflation_rate) + equity_ratio * real_rate_return
def save_ratios(debt_ratios):
debt_ratios = debt_ratios[(debt_ratios.NAICS=='11')|(debt_ratios.NAICS=='211')|(debt_ratios.NAICS=='212')|(debt_ratios.NAICS=='213')
|(debt_ratios.NAICS=='22')|(debt_ratios.NAICS=='23')|(debt_ratios.NAICS=='31-33')|(debt_ratios.NAICS=='32411')|(debt_ratios.NAICS == '336')
|(debt_ratios.NAICS=='3391')|(debt_ratios.NAICS=='42')|(debt_ratios.NAICS=='44-45')|(debt_ratios.NAICS=='48-49')|(debt_ratios.NAICS == '51')
|(debt_ratios.NAICS=='52')|(debt_ratios.NAICS=='531')|(debt_ratios.NAICS=='532')|(debt_ratios.NAICS=='533')|(debt_ratios.NAICS=='54')
|(debt_ratios.NAICS=='55')|(debt_ratios.NAICS=='56')|(debt_ratios.NAICS=='61')|(debt_ratios.NAICS=='62')|(debt_ratios.NAICS=='71')
|(debt_ratios.NAICS=='72')|(debt_ratios.NAICS=='81')|(debt_ratios.NAICS=='92')]
debt_ratios.to_csv(os.path.join(_OUT_DIR,'debt.csv'), index = False)
| 18418n9f2nn1n/B-Tax | btax/calibrate_financing.py | calibrate_financing.py | py | 7,488 | python | en | code | null | github-code | 50 |
25730776660 | """
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
# Algorithm
# Create a phone number dictionary
# For each call
# if the sending number is in the dict
# add the time spent to the dict entry
# else
# add the number as an entry to the dict
#
# if the receiving number is in the dict
# add the time spent to the dict entry
# else
# add the number as an entry to the dict
#
# return the max time
phone_dict = {}
for call in calls:
if call[0] in phone_dict:
phone_dict[call[0]] += int(call[3])
else:
phone_dict[call[0]] = int(call[3])
if call[1] in phone_dict:
phone_dict[call[1]] += int(call[3])
else:
phone_dict[call[1]] = int(call[3])
max_phone_number = max(phone_dict, key = lambda k: phone_dict[k])
max_time = phone_dict[max_phone_number]
print("{0} spent the longest time, {1} seconds, on the phone during September 2016.".format(max_phone_number, str(max_time)))
| evmiguel/udacity_ds_algo | P0/Task2.py | Task2.py | py | 1,477 | python | en | code | 0 | github-code | 50 |
72957373594 | import os
import sys
import pandas as pd
# Check if the correct number of command-line arguments is provided
if len(sys.argv) != 2:
sys.stderr.write("Arguments error. Usage:\n")
sys.stderr.write("\tpython3 clean_features.py data-file\n")
sys.exit(1)
# Set the path to the input data
data_path = sys.argv[1]
print('data_path:', data_path)
# Set the output path for the cleaned dataset
f_output = os.path.join("data", "stage2", "dataset_cleaned.ftr")
os.makedirs(os.path.join("data", "stage2"), exist_ok=True)
# Read the dataset from the provided feather file
data = pd.read_feather(data_path)
# Drop columns related to trip end information
# as we are predicting the end time
drop_columns = ['id', 'dropoff_datetime']
data = data.drop(drop_columns, axis=1)
print('Columns left after cleaning:', data.shape[1])
# Drop columns related to the pickup date,
# as necessary information has already been extracted
drop_columns = ['pickup_datetime', 'pickup_date']
data = data.drop(drop_columns, axis=1)
print('Shape of data: {}'.format(data.shape))
# Save the cleaned dataframe to a new feather file
data.to_feather(f_output)
| OrlovAlexandr/NY_taxi_travel_time | scripts/data_scripts/clean_features.py | clean_features.py | py | 1,139 | python | en | code | 0 | github-code | 50 |
22453164947 | import sys, os, tempfile, stat, glob
try:
import mlflow
except ImportError:
mlflow = None # this prevent setting tracking ON
try:
from common.trace import traceln
except ImportError:
def traceln(*o): print(*o, file=sys.stderr, flush=True)
# Either load the config from the application PYTHONPATH or from this distro
try:
import Tracking_config
except ModuleNotFoundError:
import util.Tracking_config as Tracking_config
DEFAULT_URI = "http://%s:%d" % (Tracking_config.sMLFlowHost , Tracking_config.iMLFlowPort)
# MAIN VARIABLE to switch On/OFF the actual tracking to the MLFLOW server
bTracking = False # tracking off by default
# to hide the underlying exception
try:
TrackingException = mlflow.exceptions.MlflowException
except AttributeError:
TrackingException = Exception
# ------------- TRACKING API -------------
def set_tracking():
"""
Enable tracking
"""
global bTracking
if mlflow:
bTracking = True
else:
traceln("ERROR: mlflow not installed")
def set_tracking_uri(server_uri=None):
"""
Enable the tracking with given MLFlow server URI
"""
if mlflow:
if server_uri is None: server_uri = DEFAULT_URI
traceln("MLFLow server: ", server_uri)
mlflow.set_tracking_uri(server_uri)
set_tracking()
def set_no_tracking():
"""
Disable tracking
"""
global bTracking
bTracking = False
# ---- Setting experiment and start/stop of runs ----
def set_experiment(experiment_name):
if bTracking: mlflow.set_experiment(experiment_name)
def start_run(run_name=None):
# mlflow.start_run(run_id=None, experiment_id=None, run_name=None, nested=False)
if bTracking and mlflow:
for i in range(5): # max retry...
_s = run_name if i == 0 else "%s.%d" % (run_name, i)
try:
return mlflow.start_run(run_name=_s)
break
except:
mlflow.end_run()
traceln("MLFLOW: previous run '%s' probably crashed. Need to generate new name." % _s)
return None
else:
return _NullContextManager()
def end_run(status='FINISHED'):
if bTracking: mlflow.end_run(status=status)
# ---- Logging parameters, metrics and artifacts ----
def log_param(key, value):
if bTracking: mlflow.log_param(key, value)
def log_params(params):
if bTracking:
try:
mlflow.log_params(params)
except mlflow.exceptions.MlflowException:
# for the case of "had length 1296, which exceeded length limit of 250""
# ... pffff
for _k,_v in params.items(): log_param(_k,_v)
def log_metric(key, value, step=None
, ndigits=None):
"""
Extra parameter: ndigits : if specified, all values are rounded with the given number of digits
"""
if bTracking:
if ndigits is None:
mlflow.log_metric(key, value, step=step)
else:
try: value = round(value, ndigits)
except: pass
mlflow.log_metric(key, value, step=step)
def log_metrics(metrics, step=None
, ndigits=None):
"""
Extra parameter: ndigits : if specified, all values are rounded with the given number of digits
"""
if bTracking:
if ndigits is None:
mlflow.log_metrics(metrics, step=step)
else:
_d = {}
for k,v in metrics.items():
try: v = round(v, ndigits)
except: pass
_d[k] = v
mlflow.log_metrics(_d, step=step)
def log_artifact(local_path, artifact_path=None):
if bTracking:
_chmod_rw_rw_r(local_path)
mlflow.log_artifact(local_path, artifact_path=artifact_path)
def log_artifacts(local_dir, artifact_path=None):
if bTracking:
for fn in glob.iglob(os.path.join(local_dir, "**"), recursive=True):
_chmod_rw_rw_r(fn)
mlflow.log_artifacts(local_dir, artifact_path=artifact_path)
def log_artifact_string(sName, sData):
"""
make the string a temporary file, log it, delete the file...
"""
fd, name = tempfile.mkstemp(prefix=(sName+"."), suffix=".txt")
try:
os.write(fd, str(sData).encode('utf-8'))
os.fsync(fd)
os.close(fd)
log_artifact(name)
os.remove(name)
finally:
# os.remove(name)
pass
def set_tag(key, value):
if bTracking: mlflow.set_tag(key, value)
def set_tags(tags):
if bTracking: mlflow.set_tags(tags)
# ----- INTERNAL STUFF --------------------------------------------------
class _NullContextManager(object):
"""
A context manager that does nothing.
"""
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
def _chmod_rw_rw_r(fname):
"""
when used by a group of users:
- they must be in the same user group
- the file copied to the server area must be RW by the server, which
possibly runs under another account, of the same group of course!
Here we chose to set RW for user and group, and R for other
"""
os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR \
| stat.S_IRGRP | stat.S_IWGRP \
| stat.S_IROTH )
# ------------------------------------------------------------------------
def test_no_mlflow():
global mlflow
mlflow = None
start_run("DU.crf.Test.JL")
log_param("toto", "999")
log_metric("score", 10, 1)
log_metric("score", 20, 2)
end_run()
print("test_no_mlflow: DONE")
def test_no_mlflow_with():
global mlflow
mlflow = None
with start_run("DU.crf.Test.JL") as rrr:
log_param("toto", "999")
log_metric("score", 10, 1)
log_metric("score", 20, 2)
end_run()
print("test_no_mlflow: DONE")
def test_simple():
set_tracking()
set_experiment("DU.crf.Test.JL")
start_run("run_1")
log_param("toto", "999")
log_metric("score", 10, 1)
log_metric("score", 20, 2)
set_tag("k", "vv")
# log_artifact("dtw.py")
log_artifact_string("mydata", """Dummy data
in multiline style
""")
end_run()
print("test_simple: DONE")
def test_uri():
import time
set_tracking_uri("http://cumin.int.europe.naverlabs.com:5000")
set_experiment("DU.crf.Test.JL")
start_run("run_%s" % int(time.time()))
log_param("toto", "999")
log_metric("score", 10, 1)
log_metric("score", 20, 2)
set_tag("k", "vv")
log_artifact("dtw.py")
log_artifact_string("mydata", """Dummy data
in multiline style
""")
end_run()
print("test_uri: DONE")
def test_api():
import mlflow, time, os.path
sTestFile = "c:\\tmp\\toto.txt"
assert os.path.exists(sTestFile)
mlflow.set_tracking_uri("http://cumin.int.europe.naverlabs.com:5000")
mlflow.set_experiment("test_artifacts")
mlflow.start_run(run_name="run_%s" % int(time.time()))
mlflow.log_param("toto", "9.99")
mlflow.log_artifact(sTestFile)
mlflow.end_run()
# ------------------------------------------------------------------------
if __name__ == "__main__":
# test_no_mlflow()
# test_simple()
# test_uri()
test_api()
| Transkribus/TranskribusDU | TranskribusDU/util/Tracking.py | Tracking.py | py | 7,454 | python | en | code | 21 | github-code | 50 |
18897614330 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import math
pi = 3.14159265358979324
a = 6378245.0
ee = 0.00669342162296594323
x_pi = 3.14159265358979324 * 3000.0 / 180.0
def outOfChina(lat, lng):
if lng < 72.004 or lng > 137.8347:
return True
if lat < 0.8293 or lat > 55.8271:
return True
return False
def transformLat(x, y):
ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * math.sqrt(abs(x))
ret += (20.0 * math.sin(6.0 * x * pi) + 20.0 * math.sin(2.0 * x * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(y * pi) + 40.0 * math.sin(y / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(y / 12.0 * pi) + 320 * math.sin(y * pi / 30.0)) * 2.0 / 3.0
return ret
def transformLon(x, y):
ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * math.sqrt(abs(x))
ret += (20.0 * math.sin(6.0 * x * pi) + 20.0 * math.sin(2.0 * x * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(x * pi) + 40.0 * math.sin(x / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(x / 12.0 * pi) + 300.0 * math.sin(x / 30.0 * pi)) * 2.0 / 3.0
return ret
#地球坐标转换为火星坐标,即WGS84(国际通用)转为GCJ02坐标系适用于腾讯地图、高德(阿里)地图或谷歌地图
def WGS84toGCJ02(wgLat, wgLon):
latlng = [1.0, 1.0]
if outOfChina(wgLat, wgLon) == True:
latlng[0] = wgLat
latlng[1] = wgLon
return latlng
dLat = transformLat(wgLon - 105.0, wgLat - 35.0)
dLon = transformLon(wgLon - 105.0, wgLat - 35.0)
radLat = wgLat / 180.0 * pi
magic = math.sin(radLat)
magic = 1 - ee * magic * magic
sqrtMagic = math.sqrt(magic)
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)
dLon = (dLon * 180.0) / (a / sqrtMagic * math.cos(radLat) * pi)
latlng[0] = wgLat + dLat
latlng[1] = wgLon + dLon
return latlng
#地球坐标转换为百度坐标,即WGS84(国际通用)坐标系转为BD09坐标系适用于百度地图
def WGS84toBD09 (lat, lon):
latlng = WGS84toGCJ02(lat, lon)
x = latlng[1]
y = latlng[0]
z = math.sqrt(x * x + y * y) + 0.00002 * math.sin(y * x_pi)
theta = math.atan2(y, x) + 0.000003 * math.cos(x * x_pi)
latlng[0] = z * math.sin(theta) + 0.006 #0.006 #0.01205
latlng[1] = z * math.cos(theta) + 0.0062 #0.0065 #0.00370
return latlng
if __name__=='__main__':
lat = 31.23190588
lng = 121.46952288
print("WGS84: [%f,%f]" %(lng, lat))
latlng = WGS84toGCJ02(lat, lng)
print("GCJ02: [%f,%f]" %(latlng[1], latlng[0]))
latlng = WGS84toBD09(lat, lng)
print("BD09: [%f,%f]" %(latlng[1], latlng[0])) | lichuanqi/Python_Learn_Note | map_visualization/gps_convert.py | gps_convert.py | py | 2,675 | python | en | code | 2 | github-code | 50 |
20922589920 | import gzip
import io
import lz4.frame
import struct
import proio.proto as proto
magic_bytes = [b'\xe1',
b'\xc1',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00',
b'\x00']
class Writer(object):
"""
Writer for proio files
This class can be used with the `with` statement. A filename may be
omitted in favor of specifying `fileobj`.
:param string filename: name of output file to create or overwrite
:param fileobj: file object to write to
:example:
.. code-block:: python
with proio.Writer('output.proio') as writer:
...
"""
def __init__(self, filename = None, fileobj = None):
if filename is None:
if fileobj is not None:
self._stream_writer = fileobj
else:
self._stream_writer = io.BytesIO(b'')
else:
self._stream_writer = open(filename, 'wb')
self._close_file = True
self.bucket_dump_size = 0x1000000
self._bucket_events = 0
self._bucket = io.BytesIO(b'')
self.set_compression(proto.BucketHeader.GZIP)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def close(self):
"""
closes the file object assigned to the Writer. This is automatically
called at the end of a `with` statement.
"""
self.flush()
try:
if self._close_file:
self._stream_writer.close()
except:
pass
def flush(self):
"""
flushes all buffered data to the output file object. This is
automatically called at the end of a `with` statement.
"""
if self._bucket_events == 0:
return
if self._comp == proto.BucketHeader.LZ4:
bucket_bytes = lz4.frame.compress(self._bucket.getvalue())
elif self._comp == proto.BucketHeader.GZIP:
bucket_compressed = io.BytesIO(b'')
with gzip.GzipFile(fileobj = bucket_compressed, mode = 'wb') as writer:
writer.write(self._bucket.getvalue())
bucket_bytes = bucket_compressed.getvalue()
else:
bucket_bytes = self._bucket.getvalue()
self._bucket.seek(0, 0)
self._bucket.truncate(0)
header = proto.BucketHeader()
header.nEvents = self._bucket_events
header.bucketSize = len(bucket_bytes)
header.compression = self._comp
header_buf = header.SerializeToString()
header_size = struct.pack("I", len(header_buf))
for magic_byte in magic_bytes:
self._stream_writer.write(magic_byte)
self._stream_writer.write(header_size)
self._stream_writer.write(header_buf)
self._stream_writer.write(bucket_bytes)
self._bucket_events = 0
def set_compression(self, comp):
"""
sets the compression type to use for future output buckets.
:param comp: can be one of :attr:`proio.LZ4`, :attr:`proio.GZIP`, or
:attr:`proio.UNCOMPRESSED`
"""
self._comp = comp
def push(self, event):
"""
takes an event and serializes it into the output bucket.
:param Event event: event to serialize to output
"""
event._flush_cache()
proto_buf = event._proto.SerializeToString()
proto_size = struct.pack("I", len(proto_buf))
self._bucket.write(proto_size)
self._bucket.write(proto_buf)
self._bucket_events += 1
bucket_length = len(self._bucket.getvalue())
if bucket_length > self.bucket_dump_size:
self.flush()
| decibelcooper/proio | py-proio/proio/writer.py | writer.py | py | 3,900 | python | en | code | 2 | github-code | 50 |
25581271803 | # Выведите таблицу размером n×n, заполненную числами от 1 до n2 по спирали, выходящей из левого верхнего угла и закрученной по часовой стрелке, как показано в примере (здесь n=5):
# Sample Input:
# 5
# Sample Output:
# 1 2 3 4 5
# 16 17 18 19 6
# 15 24 25 20 7
# 14 23 22 21 8
# 13 12 11 10 9
#n - размерность матрицы n x n
#mat - результирующая матрица
#st - текущее значение-счетчик для записи в матрицу
#m - коеффициент, используемый для заполнения верхней
#матрицы последующих витков, т.к. одномерные матрицы
#следующих витков имеют меньше значений
n = int(input())
mat = [[0]*n for i in range(n)]
st, m = 1, 0
# Заранее присваиваю значение центральному элементу
# матрицы
mat[n//2][n//2]=n*n
for v in range(n//2):
#Заполнение верхней горизонтальной матрицы
for i in range(n-m):
mat[v][i+v] = st
st+=1
i+=1
#Заполнение правой вертикальной матрицы
for i in range(v+1, n-v):
mat[i][-v-1] = st
st+=1
i+=1
#Заполнение нижней горизонтальной матрицы
for i in range(v+1, n-v):
mat[-v-1][-i-1] =st
st+=1
i+=1
#Заполнение левой вертикальной матрицы
for i in range(v+1, n-(v+1)):
mat[-i-1][v]=st
st+=1
i+=1
v+=1
m+=2
#Вывод результата на экран
for i in mat:
print(*i) | hoiihop/chekio | array.py | array.py | py | 1,858 | python | ru | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.