seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5255201326 | import tkinter as tk
import tkinter.ttk as ttk
import DatabaseHandler as Database
import editWindow
class MainApplication(tk.Frame):
def __init__(self, parent, *args, **kwargs):
"""
Input: self - The object containing the frame being called, parent - the parent window of this frame
Output: None
Purpose: Creates the main window
"""
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.database = Database.Database(r".\Brewing.db")
parent.title("BrewingDB")
#handles making the recipe selection
self.lb_selection = tk.Listbox(self, selectmode = "single")
self.__refreshList()
self.lb_selection.grid(row = 1, column = 0, sticky = "NESW")
self.lb_selection.bind('<<ListboxSelect>>', self.__changeRecipe)
#handles creation of recipe display box
self.txt_recipe = tk.Text(self, height = 20, width = 50, wrap = tk.WORD)
self.txt_recipe.grid(row = 1, column = 1,sticky = "NESW")
self.txt_recipe.config(state="disabled")
#configures the display box to be resizable
self.rowconfigure(1, weight = 1)
self.columnconfigure(1, weight = 1)
#creates a frame for holding the options button
self.fr_options_bar = tk.Frame(self)
self.fr_options_bar.grid(row = 0, column =0, columnspan = 2, sticky = "NESW")
#Creates a copy button
self.btn_copy = ttk.Button(master=self.fr_options_bar, text="Copy", command = self.__copy)
self.btn_copy.pack(side = "left")
#Creates an edit button
self.btn_edit = ttk.Button(master=self.fr_options_bar, text="Edit", command = self.__edit)
self.btn_edit.pack(side = "left")
#Creates a delete button
self.btn_delete = ttk.Button(master=self.fr_options_bar, text="Delete", command = self.__delete)
self.btn_delete.pack(side = "left")
def __refreshList(self):
"""
Input: self - The instance of the object containing the function being called
Returns:None
Purpose: When called it requests the titles of all recipes from the database and displays them in lb_selection (selection bar)
"""
self.lb_selection.delete(0, tk.END)
self.titleList = self.database.getTitles()
[self.lb_selection.insert(tk.END, item[1]) for item in self.titleList]
self.lb_selection.insert(tk.END, "New...")
def __changeRecipe(self, event):
"""
Input: self - The instance of the object containing the function being called
Input: event - Virtual event causing __changeRecipe to be called
Returns: None
Purpose: When called it changes the recipe in txt_recipe to display the recipe currently selected in lb_selection.
"""
selectionIndex = self.lb_selection.curselection()[0]
if selectionIndex == (self.lb_selection.size()-1):
self.__newRecipe()
#self.editWindow = editWindow.editWindow(self, -1, self.database)
return
tags = ["","V ", "Start date: ","End date: ", "abv: ", "OG: ", "FG: ", "Batch size: ", "Instructions: "]
recipe = self.database.getRecipe(self.titleList[selectionIndex][0])
ingredients = self.database.getIngredients(self.titleList[selectionIndex][0])
self.txt_recipe.config(state="normal")
self.txt_recipe.delete("1.0", tk.END)
[self.txt_recipe.insert(tk.END, ("{}{}\n".format(tags[index], recipe[key])) ) for index, key in enumerate(recipe) if recipe[key]]
self.txt_recipe.insert(tk.END, "\n\nIngredients: \n" )
for ingredient in ingredients:
self.txt_recipe.insert(tk.END, "{}: {} {}{}\n".format(ingredient["category"].capitalize(), ingredient["name"], ingredient["amount"], ingredient["unit"]))
self.txt_recipe.config(state="disabled")
def __newRecipe(self):
"""
Input: self - The instance of the object containing the function being called
Returns: None
Purpose: When called this function opens a dialogue window to get the name of the new recipe. If the Confirm button is pressed the internal function "confirm" closes the
dialogue window and send s requests to the database handler to create a record with the given recipe. This recipe is then opened in an edit window.
"""
def confirm(name):
Rid = self.database.newRecipe(name)
self.editWindow = editWindow.editWindow(self, Rid, self.database)
dia_name.destroy()
self.__refreshList()
dia_name = tk.Toplevel(self)
fr_entry = tk.Frame(dia_name)
la_dia = ttk.Label(fr_entry, text = "Enter new recipe name: ", padding = (0,10))
ent_dia = ttk.Entry(fr_entry)
la_dia.pack(side = tk.LEFT)
ent_dia.pack(side = tk.LEFT)
fr_entry.grid(row = 0, column = 0, columnspan = 2, padx=(10,10))
btn_confirm = tk.Button(dia_name, text= "Confirm", command= lambda: confirm(ent_dia.get()))
btn_confirm.grid(row =1, column = 0)
btn_close = tk.Button(dia_name, text= "Close", command= dia_name.destroy)
btn_close.grid(row =1, column = 1)
def __edit(self):
"""
Input: self - The instance of the object containing the function being called
Returns: None
Purpose: Get the currently selected index of lb_selection and translates that into an RID before calling a new instance of editwindow
"""
selectionIndex = self.lb_selection.curselection()[0]
if selectionIndex == (self.lb_selection.size()-1):
return
print(selectionIndex)
self.editWindow = editWindow.editWindow(self, self.titleList[selectionIndex][0], self.database)
def __copy(self):
"""
Input: self - The instance of the object containing the function being called
Returns: None
Purpose: Duplicates the recipe currently selected in lb_selection
"""
selectionIndex = self.lb_selection.curselection()[0]
if selectionIndex == (self.lb_selection.size()-1):
return
recipe = self.database.getRecipe(self.titleList[selectionIndex][0])
recipe.update({"version":-1}) #set version to an impossible version number so there is a guarantee that it doesn't exactly match an existing records
ingredients = self.database.getIngredients(self.titleList[selectionIndex][0])
[ing.update({"state":"add"}) for ing in ingredients]
self.database.setorUpdateRecipe(recipe,ingredients)
self.__refreshList()
def __delete(self):
"""
Input: self - The instance of the object containing the function being called
Returns: None
Purpose: Deletes the recipe currently selected in lb_selection
"""
selectionIndex = self.lb_selection.curselection()[0]
if selectionIndex == (self.lb_selection.size()-1):
return
print(self.titleList[selectionIndex][0])
self.database.deleteRecipe(self.titleList[selectionIndex][0])
self.__refreshList()
if __name__ == "__main__":
root = tk.Tk()
MainApplication(root).pack(side="top", fill="both", expand=True)
root.mainloop() | ReeceHoffmann/brewing-database | BrewingDB.py | BrewingDB.py | py | 7,422 | python | en | code | 0 | github-code | 50 |
31987027978 | import multiprocessing
from multiprocessing import Manager
from multiprocessing import freeze_support
import os, time, random
import numpy as np
import pandas as pd
from collections import Counter
import csv
process_num = 4
## clear_all_data
fileName_all ="movement-speeds-hourly-new-york-2020-1.csv"
nodeName = "nodes.csv"
fileName_clear = "movement-speeds-hourly-new-york-2020-1_clear.csv"
edgeWeightName = "edge_weight.csv"
def read_data(fileName):
"""
Read files and return pandas as df
:return: dataframe
"""
df = pd.read_csv(fileName,sep=",",encoding="gb18030")
# print(df.head())
return df
def setcallback(x):
"""
function to process return value of function clear_all_data
:param x: return value of process
:return: movement-speeds-hourly-new-york-2020-1.csv_clear.csv file
"""
lock=x[1]
lock.acquire()
with open("movement-speeds-hourly-new-york-2020-1_clear.csv",'a+',newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(x[0])
lock.release()
#Multiprocess function
def clear_all_data(ns_nodes,ns_dataframe,range_tuple,name,lock):
"""
#replace ns_nodes with node_id and go through all id to replace
:param ns_nodes: shared space of nodes
:param ns_dataframe: shared space of raw data
:param range_tuple: numbers which represent pieces of dataframe we slice
:param name: process name
:return: processed result of each row in dataframe
"""
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
# tmp_lst = mvh.speed(dataframe[range_tuple[0]:range_tuple[1]],node)
res_lst = []
dataframe = ns_dataframe.df
node = ns_nodes.df
dataframe_part = dataframe[range_tuple[0]:range_tuple[1]]
for i, row in dataframe_part.iterrows():
tmp = node.loc[(node["start_node_id"] == row[2]) & (node["end_node_id"] == row[3])]
if tmp.values.size == 0:
print("Wrong!!!!!!!!!!!!!")
id = tmp["road_id"].values[0]
res_lst.append([row[0],row[1],id,row[4],row[5]])
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
return res_lst,lock
#Multiprocess function
def statistic_edge_weight(ns_dataframe, ns_lst, range_tuple, name,lock):
"""
calculate weight of edge, which is the number of each edge has
:param ns_dataframe: shared space of raw data
:param ns_lst: shared space of process result
:param range_tuple: numbers which represent pieces of dataframe we slice
:param name: process name
:return: dictionary of statistical result of each process
"""
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
tmp_dict = {}
dataframe = ns_dataframe.df
dataframe_part = dataframe[range_tuple[0]:range_tuple[1]]
for i, row in dataframe_part.iterrows():
if row[2] in list(tmp_dict.keys()):
tmp_dict[row[2]] += 1
else:
tmp_dict.update({row[2]:1})
lock.acquire()
ns_lst.append(tmp_dict)
lock.release()
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
return tmp_dict
def statistic_from_dict(ns_lst,weight_dict={}):
"""
The results returned by all process functions refer to the statistics
:param ns_lst: shared space of process result
:param weight_dict: dictionary of multiprocess's result
:return: weight dictionary
"""
weight_dict = Counter(weight_dict)
for i in ns_lst:
i = Counter(i)
weight_dict = weight_dict + i
return dict(weight_dict)
def main_clear_all_data():
"""
Replace start_id and end_id with road_id
input: movement-speeds-hourly-new-york-2020-1.csv and nodes.csv
:return: movement-speeds-hourly-new-york-2020-1_clear.csv
"""
mgr = Manager()
lock = mgr.Lock()
ns_dataframe = mgr.Namespace()
ns_nodes = mgr.Namespace()
node = read_data(nodeName)
ns_nodes.df = node
dataframe = read_data(fileName_all)
dataframe = dataframe[["day", "hour", "osm_start_node_id", "osm_end_node_id", "speed_mph_mean", "speed_mph_stddev"]]
ns_dataframe.df = dataframe
index_lst = np.linspace(0, dataframe.shape[0], process_num + 1, endpoint=True).astype(int)
with open(fileName_clear, "w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["day","hour","road_id","speed_mph_mean","spped_mph_stddev"])
p = multiprocessing.Pool(process_num)
for i in range(1,process_num + 1):
p.apply_async(clear_all_data, args=(ns_nodes, ns_dataframe, (index_lst[i - 1], index_lst[i]), i,lock,), callback=setcallback)
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
def main_statistic_edge_weight():
"""
Main function to calculate the amount of data for each edge
input: movement-speeds-hourly-new-york-2020-1.csv_clear.csv file from last function
:return: edge_weight.csv
"""
all_start_time = time.time()
mgr = Manager()
lock = mgr.Lock()
ns_dataframe = mgr.Namespace()
ns_lst = mgr.list()
dataframe = read_data(fileName_clear)
ns_dataframe.df = dataframe
index_lst = np.linspace(0, dataframe.shape[0], process_num + 1, endpoint=True).astype(int)
p = multiprocessing.Pool(process_num)
for i in range(1,process_num + 1):
p.apply_async(statistic_edge_weight, args=(ns_dataframe, ns_lst, (index_lst[i - 1], index_lst[i]), i,lock,), )
print('Waiting for all subprocesses done...')
p.close()
p.join()
# print(ns_lst)
print('All subprocesses done.')
weight_dict = statistic_from_dict(ns_lst)
key_lst = sorted(weight_dict)
# print(weight_dict)
nodes = pd.read_csv(nodeName)
add_lst = []
for i in key_lst:
add_lst.append(weight_dict[i])
nodes["weight"] = add_lst
nodes.to_csv(edgeWeightName, index=False, sep=',')
all_end_time = time.time()
print("time cost: ", (all_end_time - all_start_time) / 60, "min")
def main():
main_clear_all_data()
if __name__=='__main__':
main()
| HarryZhao2000/Traffic-Prediction-for-New-York-GNN | Preprocessing/multi_processing.py | multi_processing.py | py | 6,353 | python | en | code | 0 | github-code | 50 |
20916537422 | import numpy as np
import pygame
import sys
import time
edge_norm = 80
pos_vec = np.array([350, 500])
point2_vec = np.array([-100, 0])
point2 = pos_vec + point2_vec
def recursive_shape_generator(RotMtx, point, vector, point2, point_array): #initial vector is the common edge
new_vec = np.matmul(RotMtx, vector) #Rotate the vector by multiply it with rotation matrix
new_point = point + new_vec #determine the next point as a sum of the current point and rotated vector
point_array.append(new_point) #push it to the point list
if(np.linalg.norm(point2-new_point) < 0.0001): #if the next point is(neglet the error) the initial point then: stop
return new_point
else:
return recursive_shape_generator(RotMtx, new_point, -new_vec, point2, point_array) #Else turn the vector for the point and use it again
polygons = []
lower_bound = 3
upper_bound = 13
for n in range(lower_bound, upper_bound+1):
angle = (n-2)*np.pi/n
RotMtx = np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
point_set = [pos_vec]
recursive_shape_generator(RotMtx, pos_vec, point2_vec, point2, point_set)
polygons.append(point_set)
screen = pygame.display.set_mode((625, 550))
RED = (255, 0, 0)
WHITE = (255, 255, 255)
direction = -1
current = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill(WHITE)
if(current == 0 or current == len(polygons)):
direction *= -1
off_set = np.array(polygons)[:current+1]
for polygon in off_set:
n = len(polygon)
for i in range(n+1):
pygame.draw.line(screen, RED, (polygon[i%n][0], polygon[i%n][1]), (polygon[(i+1)%n][0], polygon[(i+1)%n][1]), 1)
current += direction
time.sleep(0.1)
pygame.display.flip()
| tnycnsn/Recursive-Equilaterals | recursive equilaterals/recursive_equilaterals.py | recursive_equilaterals.py | py | 1,765 | python | en | code | 0 | github-code | 50 |
25275703866 | from django import forms
class AddTagForm(forms.Form):
movie_id = forms.CharField(
widget=forms.HiddenInput(),
required=False
)
tag = forms.CharField(
label='Etiqueta', required=True,
help_text='Etiqueta nueva o existente.'
)
| moz667/homodaba | homodaba/homodaba/forms.py | forms.py | py | 276 | python | en | code | 3 | github-code | 50 |
6411731853 | import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from collections import OrderedDict # noqa
import json # noqa
import traceback # noqa
from typing import Dict, Any # noqa
# Disable insecure warnings
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" # ISO8601 format with UTC, default in XSOAR
SNX_IOC_TYPES_TO_DEMISTO_TYPES = {
'url': FeedIndicatorType.URL,
'md5': FeedIndicatorType.File,
'sha-1': FeedIndicatorType.File,
'sha-256': FeedIndicatorType.File,
'ipv4-addr': FeedIndicatorType.IP,
'domain': FeedIndicatorType.Domain,
'ipv6-addr': FeedIndicatorType.IPv6,
'email-addr': FeedIndicatorType.Email,
'domain-name': FeedIndicatorType.Domain,
'file:hashes.MD5': FeedIndicatorType.File
}
SNX_VERDICT_TO_DBOTSCORE = {
'No Threats': Common.DBotScore.GOOD,
'Suspicious': Common.DBotScore.SUSPICIOUS,
'Malware': Common.DBotScore.BAD,
'Ransomware': Common.DBotScore.BAD
}
class Client(BaseClient):
"""Implement class for SecneurX Analysis sandbox"""
def get_response(self, urlSuffix: str, paramsDict: Dict[str, str]):
try:
if urlSuffix == '/get_report':
respType = 'text'
else:
respType = 'json'
return self._http_request(
method="GET",
url_suffix=urlSuffix,
params=paramsDict,
resp_type=respType,
timeout=90
), None
except Exception as e:
return None, e
def submit_file(self, urlSuffix: str, fileData: Dict[str, Any], paramsDict: Dict[str, str]):
try:
return self._http_request(
method="POST",
url_suffix=urlSuffix,
files=fileData,
params=paramsDict,
timeout=90
), None
except Exception as e:
return None, e
def submit_url(self, urlSuffix: str, paramsDict: Dict[str, str], urlParams: Dict[str, str]):
try:
return self._http_request(
method="POST",
url_suffix=urlSuffix,
data=urlParams,
params=paramsDict,
timeout=90
), None
except Exception as e:
return None, e
class SNXReportParser():
JSON_URL = "url"
JSON_IOC = "IOC"
JSON_KEY = "key"
JSON_DATA = "data"
JSON_TYPE = "type"
JSON_VALUE = "value"
JSON_OBJECTS = "objects"
JSON_PATTERN = "pattern"
JSON_PLATFORM = "platform"
JSON_HOSTNAME = "hostname"
JSON_DNS_REQ = "dnsLookups"
JSON_TAGS = "ArtifactsTags"
JSON_FILE_SHA256 = "sha256"
JSON_JA3_LIST = "ja3Digests"
JSON_METHOD = "requestMethod"
JSON_RESOLVEDIPS = "resolvedIps"
JSON_VERDICTS = "ArtifactsVerdict"
JSON_FILE_WRITTEN = "filesWritten"
JSON_FILE_DROPPED = "filesDropped"
JSON_FILE_DELETED = "filesDeleted"
JSON_FILE_MODIFIED = "fileModified"
JSON_HTTP_REQ = "httpConversations"
JSON_ANALYSIS_TIME = "completedTime"
JSON_REGISTRY_SET = "registryKeysSet"
JSON_SHA256 = "analysisSubjectSha256"
JSON_STATUS_CODE = "responseStatusCode"
JSON_PROCESS_CREATED = "processesCreated"
JSON_REGISTRY_DELETED = "registryKeysDeleted"
SNX_SUBMISSION_NAME_KEY = "analysisSubjectName"
SNX_SUBMISSION_TYPE_KEY = "analysisSubjectType"
class JsonTableParser():
SNX_URL = "URL"
SNX_TAGS = "Tags"
SNX_SHA256 = "SHA256"
SNX_METHOD = "Method"
SNX_VERDICT = "Verdict"
SNX_DNS_REQ = "DNSRequests"
SNX_JA3_LIST = "JA3Digests"
SNX_PLATFORM_KEY = "Platform"
SNX_HTTP_REQ = "HTTPRequests"
SNX_STATUS_CODE = "StatusCode"
SNX_REGISTRY_SET = "RegistrySet"
SNX_FILE_WRITTEN = "FileCreated"
SNX_FILE_DROPPED = "FileDropped"
SNX_FILE_DELETED = "FileDeleted"
SNX_FILE_MODIFIED = "FileModified"
SNX_ANALYSIS_TIME = "SubmissionTime"
SNX_PROCESS_CREATED = "ProcessCreated"
SNX_REGISTRY_DELETED = "RegistryDeleted"
class SNXResponse():
FAILED = 0
SUCCESS = 1
SNX_URL_KEY = "url"
SNX_MSG_KEY = "msg"
SNX_IOC_KEY = "IOC"
SNX_FILE_KEY = "file"
SNX_NULL_KEY = "NULL"
SAMPLE_KEY = "Sample"
POLLING_KEY = "polling"
TIMEOUT_KEY = "timeout"
SNX_RESULT_KEY = "data"
INTERVAL_KEY = "interval"
SNX_SHA256_KEY = "sha256"
SNX_FAILED_KEY = "Failed"
SNX_STATUS_KEY = "status"
SNX_MALWARE_KEY = "Malware"
SNX_VERDICT_KEY = "verdict"
SNX_SUCCESS_KEY = "success"
SNX_REBOOT_KEY = "reboot_on"
SNX_WINDOWS_KEY = "Windows7"
SNX_PLATFORM_KEY = "platform"
SNX_PRIORITY_KEY = "priority"
SNX_DURATION_KEY = "duration"
SNX_FILENAME_KEY = "file_name"
SNX_ERROR_MSG_KEY = "error_msg"
SNX_TASK_UUID_KEY = "task_uuid"
SNX_COMPLETED_KEY = "Completed"
SNX_EXTENSTION_KEY = "extension"
SNX_LAST_HOURS_KEY = "last_hours"
SNX_LAST_COUNT_KEY = "last_count"
SNX_PROVIDER = "SecneurX Analysis"
SNX_REPORT_KEY = "report_available"
SNX_REPORT_FORMAT_KEY = "report_format"
SNX_FILE_PWD_KEY = "compressed_password"
class SNXErrorMsg():
SUCCESS_MSG = "ok"
CONFIG_ERR = "Configuration Error"
INVALID_ERR = "Endpoint Error: Invalid Server URL"
FAILED_ERR = "Submit Error: Sample submittion failed"
AUTH_ERR = "Authorization Error: make sure API Key is correctly set"
SERVER_ERR = "Endpoint Error: Invalid Server URL (or) Invalid input parameters values"
NOT_FOUND_ERR = "Report could not be generated"
def test_module(client: Client) -> Any:
get_status_cmd(client, {SNXResponse.SNX_LAST_COUNT_KEY: '2'})
return SNXErrorMsg.SUCCESS_MSG
def create_request_json(argsDict: Dict[str, str]) -> Dict:
params = {}
try:
if SNXResponse.SNX_TASK_UUID_KEY in argsDict.keys():
params[SNXResponse.SNX_TASK_UUID_KEY] = argsDict.get(SNXResponse.SNX_TASK_UUID_KEY)
if SNXResponse.SNX_LAST_COUNT_KEY in argsDict.keys():
params[SNXResponse.SNX_LAST_COUNT_KEY] = argsDict.get(SNXResponse.SNX_LAST_COUNT_KEY)
if SNXResponse.SNX_LAST_HOURS_KEY in argsDict.keys():
params[SNXResponse.SNX_LAST_HOURS_KEY] = argsDict.get(SNXResponse.SNX_LAST_HOURS_KEY)
if "Platform" in argsDict.keys():
platformValue = argsDict.get("Platform")
params[SNXResponse.SNX_PLATFORM_KEY] = str(platformValue)
if "Priority" in argsDict.keys():
priorityValue = argsDict.get("Priority")
params[SNXResponse.SNX_PRIORITY_KEY] = str(priorityValue)
if "Extension" in argsDict.keys():
extnValue = argsDict.get("Extension")
if extnValue is not None and len(extnValue) != 0:
params[SNXResponse.SNX_EXTENSTION_KEY] = argsDict.get("Extension")
if "Duration" in argsDict.keys():
durationValue = argsDict.get("Duration")
if durationValue is not None and len(durationValue) != 0:
params[SNXResponse.SNX_DURATION_KEY] = argsDict.get("Duration")
if "File Password" in argsDict.keys():
pwdValue = argsDict.get("File Password")
if pwdValue is not None and len(pwdValue) != 0:
params[SNXResponse.SNX_FILE_PWD_KEY] = pwdValue
if "Reboot" in argsDict.keys():
params[SNXResponse.SNX_REBOOT_KEY] = argsDict.get("Reboot")
if SNXResponse.SNX_REPORT_FORMAT_KEY in argsDict.keys():
params[SNXResponse.SNX_REPORT_FORMAT_KEY] = argsDict.get(SNXResponse.SNX_REPORT_FORMAT_KEY)
except Exception as e:
demisto.error(e)
return params
def error_response(err_msg) -> str:
msg = None
try:
if err_msg.res.status_code == 401:
msg = SNXErrorMsg.AUTH_ERR
elif err_msg.res.status_code == 400:
msg = SNXErrorMsg.SERVER_ERR
elif err_msg.res.status_code == 404:
msg = SNXErrorMsg.NOT_FOUND_ERR
elif err_msg.res.status_code == 500:
msg = SNXErrorMsg.FAILED_ERR
else:
msg = SNXErrorMsg.CONFIG_ERR
except Exception:
msg = SNXErrorMsg.INVALID_ERR
return msg
def parse_response(response):
try:
jsonContent = OrderedDict()
if SNXReportParser.JSON_SHA256 in response.keys():
jsonContent[JsonTableParser.SNX_SHA256] = response[SNXReportParser.JSON_SHA256]
if SNXReportParser.JSON_PLATFORM in response.keys():
jsonContent[JsonTableParser.SNX_PLATFORM_KEY] = response[SNXReportParser.JSON_PLATFORM]
if SNXReportParser.JSON_ANALYSIS_TIME in response.keys():
jsonContent[JsonTableParser.SNX_ANALYSIS_TIME] = response[SNXReportParser.JSON_ANALYSIS_TIME]
if SNXReportParser.JSON_VERDICTS in response.keys():
verdictResult = None
verdictValue = response[SNXReportParser.JSON_VERDICTS]
verdictResult = verdictValue.lower().capitalize()
jsonContent[JsonTableParser.SNX_VERDICT] = verdictResult
if SNXReportParser.JSON_TAGS in response.keys():
jsonContent[JsonTableParser.SNX_TAGS] = response[SNXReportParser.JSON_TAGS]
if SNXReportParser.JSON_DNS_REQ in response.keys():
dnsList = []
for dnsData in response[SNXReportParser.JSON_DNS_REQ]:
dnsReq = []
if SNXReportParser.JSON_HOSTNAME in dnsData.keys():
dnsReq.append(dnsData[SNXReportParser.JSON_HOSTNAME])
if SNXReportParser.JSON_RESOLVEDIPS in dnsData.keys():
dnsReq.append(dnsData[SNXReportParser.JSON_RESOLVEDIPS])
data = formatCell(dnsReq)
dnsList.append(data)
if dnsList:
jsonContent[JsonTableParser.SNX_DNS_REQ] = dnsList
if SNXReportParser.JSON_HTTP_REQ in response.keys():
httpList = []
split_line = ""
for httpData in response[SNXReportParser.JSON_HTTP_REQ]:
methodValue = None
requestValue = None
statusCodeValue = None
if SNXReportParser.JSON_METHOD in httpData.keys():
methodValue = httpData[SNXReportParser.JSON_METHOD]
if SNXReportParser.JSON_URL in httpData.keys():
requestValue = httpData[SNXReportParser.JSON_URL]
if SNXReportParser.JSON_STATUS_CODE in httpData.keys():
statusCodeValue = httpData[SNXReportParser.JSON_STATUS_CODE]
if methodValue and requestValue and statusCodeValue:
httpList.append(f"{split_line}[" + methodValue + "] "
+ requestValue + " [Status : " + str(statusCodeValue) + "]")
split_line = '\n'
if httpList:
jsonContent[JsonTableParser.SNX_HTTP_REQ] = httpList
if SNXReportParser.JSON_JA3_LIST in response.keys():
jsonContent[JsonTableParser.SNX_JA3_LIST] = response[SNXReportParser.JSON_JA3_LIST]
if SNXReportParser.JSON_PROCESS_CREATED in response.keys():
creationList = convert_json_to_str(response[SNXReportParser.JSON_PROCESS_CREATED])
jsonContent[JsonTableParser.SNX_PROCESS_CREATED] = creationList
if SNXReportParser.JSON_REGISTRY_SET in response.keys():
registrySetList = []
split_line = ""
for registry_data in response[SNXReportParser.JSON_REGISTRY_SET]:
keyData = registry_data[SNXReportParser.JSON_KEY]
registrySetList.append(f"{split_line}" + keyData)
split_line = "\n"
if registrySetList:
jsonContent[JsonTableParser.SNX_REGISTRY_SET] = registrySetList
if SNXReportParser.JSON_REGISTRY_DELETED in response.keys():
jsonContent[JsonTableParser.SNX_REGISTRY_DELETED] = response[SNXReportParser.JSON_REGISTRY_DELETED]
if SNXReportParser.JSON_FILE_WRITTEN in response.keys():
fileCreatedList = convert_json_to_str(response[SNXReportParser.JSON_FILE_WRITTEN])
jsonContent[JsonTableParser.SNX_FILE_WRITTEN] = fileCreatedList
if SNXReportParser.JSON_FILE_DROPPED in response.keys():
file_drop_list = []
for file_drop in response[SNXReportParser.JSON_FILE_DROPPED]:
sha256Value = file_drop[SNXReportParser.JSON_FILE_SHA256]
typeValue = file_drop[SNXReportParser.JSON_TYPE]
file_drop_list.append(typeValue + " : " + sha256Value)
if file_drop_list:
jsonContent[JsonTableParser.SNX_FILE_DROPPED] = file_drop_list
if SNXReportParser.JSON_FILE_DELETED in response.keys():
fileDeletedList = convert_json_to_str(response[SNXReportParser.JSON_FILE_DELETED])
jsonContent[JsonTableParser.SNX_FILE_DELETED] = fileDeletedList
if SNXReportParser.JSON_FILE_MODIFIED in response.keys():
fileModifiedList = convert_json_to_str(response[SNXReportParser.JSON_FILE_MODIFIED])
jsonContent[JsonTableParser.SNX_FILE_MODIFIED] = fileModifiedList
if SNXReportParser.JSON_IOC in response.keys() and SNXReportParser.JSON_DATA in response[SNXReportParser.JSON_IOC].keys():
iocList = parse_report_iocs(response[SNXReportParser.JSON_IOC][SNXReportParser.JSON_DATA])
jsonContent[SNXResponse.SNX_IOC_KEY] = iocList
return jsonContent
except Exception as e:
raise DemistoException(e)
def convert_json_to_str(data_list):
formated_list = []
try:
split_line = ''
for data in data_list:
formatValue = json.dumps(data)
formatValue = formatValue.rstrip('"').lstrip('"')
formated_list.append(f"{split_line}" + formatValue)
split_line = "\n"
except Exception as e:
raise DemistoException(e)
return formated_list
def parse_report_iocs(ioc_json):
parsed_ioc_list = []
try:
if SNXReportParser.JSON_OBJECTS in ioc_json.keys():
ioc_list = ioc_json[SNXReportParser.JSON_OBJECTS]
for ioc_data in ioc_list:
if SNXReportParser.JSON_PATTERN in ioc_data.keys():
patternData = ioc_data[SNXReportParser.JSON_PATTERN]
patternData = patternData.replace('[', '').replace(']', '')
patternKey = patternData.split(":")[0]
patternValue = patternData.split(" = ")[1].replace("'", '')
if patternKey.lower() in SNX_IOC_TYPES_TO_DEMISTO_TYPES.keys():
patternKey = SNX_IOC_TYPES_TO_DEMISTO_TYPES[patternKey]
parsed_ioc_list.append(patternKey + " : " + str(patternValue))
except Exception as e:
raise DemistoException(e)
return parsed_ioc_list
def format_report_contents(contents):
try:
def dict_to_string(nested_dict):
return json.dumps(nested_dict).lstrip('{').rstrip('}').replace('\'', '').replace('\"', '')
table_contents = OrderedDict()
for key, val in contents.items():
if isinstance(val, dict):
table_contents[key] = dict_to_string(val)
elif isinstance(val, list):
table_values = []
for item in val:
if isinstance(item, dict):
table_values.append(dict_to_string(item))
else:
table_values.append(item)
table_contents[key] = table_values
else:
table_contents[key] = val
return table_contents
except Exception as e:
raise DemistoException(e)
def parse_dbot_score(reportJson):
dbotScore = None
try:
if reportJson:
submissionType = reportJson.get(SNXReportParser.SNX_SUBMISSION_TYPE_KEY, None)
verdictValue = reportJson.get(SNXReportParser.JSON_VERDICTS, None)
verdictScore = 0
if verdictValue is not None and verdictValue in SNX_VERDICT_TO_DBOTSCORE.keys():
verdictScore = SNX_VERDICT_TO_DBOTSCORE[verdictValue]
if submissionType == SNXResponse.SNX_FILE_KEY:
indicatorValue = reportJson.get(SNXReportParser.JSON_SHA256, None)
if indicatorValue:
dbotScore = Common.DBotScore(
indicator=indicatorValue,
indicator_type=DBotScoreType.FILE,
score=verdictScore,
integration_name=SNXResponse.SNX_PROVIDER
)
else:
indicatorValue = reportJson.get(SNXReportParser.SNX_SUBMISSION_NAME_KEY, None)
if indicatorValue:
dbotScore = Common.DBotScore(
indicator=indicatorValue,
indicator_type=DBotScoreType.URL,
score=verdictScore,
integration_name=SNXResponse.SNX_PROVIDER
)
except Exception as e:
raise DemistoException(e)
return dbotScore
def parse_report_entity(reportJson):
dbot_score = parse_dbot_score(reportJson)
indicator = None
try:
if reportJson and dbot_score:
submissionType = reportJson.get(SNXReportParser.SNX_SUBMISSION_TYPE_KEY, None)
verdictValue = reportJson.get(SNXReportParser.JSON_VERDICTS, None)
tagList = reportJson.get(SNXReportParser.JSON_TAGS, verdictValue)
sha256Value = reportJson.get(SNXReportParser.JSON_SHA256, None)
subjectName = reportJson.get(SNXReportParser.SNX_SUBMISSION_NAME_KEY, None)
if subjectName:
if submissionType == SNXResponse.SNX_FILE_KEY:
indicator = Common.File(
name=subjectName,
dbot_score=dbot_score,
sha256=sha256Value,
tags=tagList,
description=verdictValue
)
elif submissionType == SNXResponse.SNX_URL_KEY:
indicator = Common.URL(
url=subjectName,
dbot_score=dbot_score,
tags=tagList,
description=verdictValue
) # type: ignore
except Exception as e:
raise DemistoException(e)
return indicator
def post_submit_file(client: Client, args: Dict[str, str]) -> CommandResults:
urlSuffix = "/submit_file"
entryId = args.get('EntryID') or None
if entryId is None:
raise DemistoException("Entry ID Not Found")
platformValue = args.get('platform') or SNXResponse.SNX_WINDOWS_KEY
params = create_request_json(args)
if 'platform' not in params.keys():
params['platform'] = platformValue
fileEntry = demisto.getFilePath(entryId)
fileName = fileEntry['name']
filePath = fileEntry['path']
fileData = {'file': (fileName, open(filePath, 'rb'))}
response, err_msg = client.submit_file(urlSuffix, fileData, params)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and SNXResponse.SNX_RESULT_KEY in response.keys():
finalJson = response[SNXResponse.SNX_RESULT_KEY]
readableOutput = tableToMarkdown(f"File Submitted Successfully: {fileName}", finalJson)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.SubmitFile",
outputs=finalJson
)
else:
readableOutput = tableToMarkdown(f"File Submission Failed: {fileName}", response)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.SubmitFile",
outputs=response
)
else:
msg = error_response(err_msg)
outputJson = {SNXResponse.SNX_ERROR_MSG_KEY: msg}
readableOutput = tableToMarkdown(f"File Submission Failed: {fileName}", t=outputJson)
return CommandResults(readable_output=readableOutput, outputs_prefix="SecneurXAnalysis.SubmitFile", outputs=outputJson)
def post_submit_url(client: Client, args: Dict[str, str]) -> CommandResults:
urlSuffix = "/analyze_url"
urlValue = args.get("URL") or None
if urlValue is None or len(urlValue) == 0:
raise DemistoException("Input url value is empty")
params = create_request_json(args)
urlParams = {SNXReportParser.JSON_URL: urlValue}
response, err_msg = client.submit_url(urlSuffix, params, urlParams)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and SNXResponse.SNX_RESULT_KEY in response.keys():
finalJson = response[SNXResponse.SNX_RESULT_KEY]
readableOutput = tableToMarkdown("URL Submitted Successfuly", finalJson)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.SubmitURL",
outputs=finalJson
)
else:
readableOutput = tableToMarkdown("URL Submission Failed", response)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.SubmitURL",
outputs=response
)
else:
msg = error_response(err_msg)
outputJson = {SNXResponse.SNX_ERROR_MSG_KEY: msg}
readableOutput = tableToMarkdown("URL Submission Failed", t=outputJson)
return CommandResults(readable_output=readableOutput, outputs_prefix="SecneurXAnalysis.SubmitURL", outputs=outputJson)
def get_verdict_cmd(client: Client, args: Dict[str, str]) -> CommandResults:
taskUuid = args.get(SNXResponse.SNX_TASK_UUID_KEY) or None
if taskUuid is None:
raise DemistoException("Task UUID Parameter value is not found")
else:
urlSuffix = "/get_verdict"
params = {SNXResponse.SNX_TASK_UUID_KEY: taskUuid}
response, err_msg = client.get_response(urlSuffix, params)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and response[SNXResponse.SNX_SUCCESS_KEY] == SNXResponse.SUCCESS:
dataResult = response[SNXResponse.SNX_RESULT_KEY]
readableOutput = tableToMarkdown(f"SecneurX Analysis - Verdict Result: {taskUuid}", t=dataResult)
return CommandResults(
readable_output=readableOutput,
outputs=dataResult,
outputs_key_field="task_uuid",
outputs_prefix="SecneurXAnalysis.Verdict",
raw_response=dataResult
)
else:
readableOutput = tableToMarkdown(f"SecneurX Analysis - Verdict Result: {taskUuid}", t=response)
return CommandResults(
readable_output=readableOutput,
outputs={"Status": SNXResponse.SNX_FAILED_KEY},
outputs_key_field="task_uuid",
outputs_prefix="SecneurXAnalysis.Verdict",
raw_response=response
)
else:
msg = error_response(err_msg)
outputJson = {SNXResponse.SNX_ERROR_MSG_KEY: msg, "Status": SNXResponse.SNX_FAILED_KEY}
readableOutput = tableToMarkdown("SecneurX Analysis - Error", t=outputJson)
return CommandResults(
readable_output=readableOutput,
outputs=outputJson,
outputs_prefix="SecneurXAnalysis.Verdict",
outputs_key_field="task_uuid"
)
def get_completed_cmd(client: Client, args: Dict[str, str]) -> CommandResults:
urlSuffix = "/get_completed"
params = create_request_json(args)
response, err_msg = client.get_response(urlSuffix, params)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and response[SNXResponse.SNX_SUCCESS_KEY] == SNXResponse.SUCCESS:
reportList = response.get(SNXResponse.SNX_RESULT_KEY, SNXResponse.SNX_NULL_KEY)
if reportList != SNXResponse.SNX_NULL_KEY and len(reportList) > 0:
readableOutput = tableToMarkdown("SecneurX Analysis - List of Completed Samples:", t=reportList,
headers=[
SNXResponse.SNX_TASK_UUID_KEY, SNXResponse.SNX_VERDICT_KEY,
SNXResponse.SNX_STATUS_KEY, SNXResponse.SNX_REPORT_KEY])
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Completed",
raw_response=reportList
)
else:
msgJson = {"msg": "No samples to display"}
readableOutput = tableToMarkdown("SecneurX Analysis - List of Completed Samples: ", msgJson)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Completed",
outputs=msgJson
)
else:
readableOutput = tableToMarkdown("", response)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Completed",
outputs=response
)
else:
msg = error_response(err_msg)
raise DemistoException(msg)
def get_pending_cmd(client: Client, args: Dict[str, str]) -> CommandResults:
urlSuffix = "/get_processing"
params = create_request_json(args)
response, err_msg = client.get_response(urlSuffix, params)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and response[SNXResponse.SNX_SUCCESS_KEY] == SNXResponse.SUCCESS:
reportList = response.get(SNXResponse.SNX_RESULT_KEY, SNXResponse.SNX_NULL_KEY)
if reportList != SNXResponse.SNX_NULL_KEY and len(reportList) > 0:
for report in reportList:
if SNXResponse.SNX_FILENAME_KEY in report.keys():
report[SNXResponse.SAMPLE_KEY] = report[SNXResponse.SNX_FILENAME_KEY]
elif SNXReportParser.JSON_URL in report.keys():
report[SNXResponse.SAMPLE_KEY] = report[SNXReportParser.JSON_URL]
else:
continue
readableOutput = tableToMarkdown("SecneurX Analysis - List of Samples in Pending State: ", t=reportList,
headers=[SNXResponse.SNX_TASK_UUID_KEY, SNXResponse.SAMPLE_KEY,
SNXResponse.SNX_STATUS_KEY, SNXResponse.SNX_SHA256_KEY])
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Pending",
raw_response=reportList
)
else:
msgJson = {'msg': "No samples to display"}
readableOutput = tableToMarkdown("SecneurX Analysis - List of Samples in Pending State:", msgJson)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Pending",
outputs=msgJson
)
else:
readableOutput = tableToMarkdown("", response)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Pending",
outputs=response
)
else:
msg = error_response(err_msg)
raise DemistoException(msg)
def get_status_cmd(client: Client, args: Dict[str, str]) -> CommandResults:
urlSuffix = "/get_status"
params = create_request_json(args)
response, err_msg = client.get_response(urlSuffix, params)
if response:
if SNXResponse.SNX_SUCCESS_KEY in response.keys() and response[SNXResponse.SNX_SUCCESS_KEY] == SNXResponse.SUCCESS:
reportList = response.get(SNXResponse.SNX_RESULT_KEY, SNXResponse.SNX_NULL_KEY)
if reportList != SNXResponse.SNX_NULL_KEY and len(reportList) > 0:
for report in reportList:
if SNXResponse.SNX_FILENAME_KEY in report.keys():
report[SNXResponse.SAMPLE_KEY] = report[SNXResponse.SNX_FILENAME_KEY]
elif SNXReportParser.JSON_URL in report.keys():
report[SNXResponse.SAMPLE_KEY] = report[SNXReportParser.JSON_URL]
else:
continue
readableOutput = tableToMarkdown("SecneurX Analysis - Status of Submitted Samples:", t=reportList,
headers=[SNXResponse.SNX_TASK_UUID_KEY, SNXResponse.SAMPLE_KEY,
SNXResponse.SNX_STATUS_KEY, SNXResponse.SNX_SHA256_KEY])
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Status",
raw_response=reportList
)
else:
msgJson = {"msg": "No samples to display"}
readableOutput = tableToMarkdown("SecneurX Analysis - Status of Submitted Samples: ", msgJson)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Status",
outputs=msgJson
)
else:
readableOutput = tableToMarkdown("", response)
return CommandResults(
readable_output=readableOutput,
outputs_prefix="SecneurXAnalysis.Status",
outputs=response
)
else:
msg = error_response(err_msg)
raise DemistoException(msg)
def get_report_cmd(client: Client, args: Dict[str, str]):
urlSuffix = "/get_report"
taskUuid = args.get(SNXResponse.SNX_TASK_UUID_KEY) or None
reportFormat = args.get(SNXResponse.SNX_REPORT_FORMAT_KEY) or "json"
if reportFormat is None or reportFormat != "html" and reportFormat != "json":
raise DemistoException("Invalid value of report file format paramater")
if taskUuid is None:
raise DemistoException("Task Uuid Parameter value is not found")
elif len(taskUuid) <= 10:
raise DemistoException("Invalid Task Uuid value")
else:
reportExtn = "." + reportFormat
params = create_request_json(args)
response, err_msg = client.get_response(urlSuffix, params)
if response:
if reportFormat == "json":
resJson = json.loads(response)
contents = parse_response(resJson)
indicator = parse_report_entity(resJson)
title = None
headerList = []
readableContents = None
for header in contents.keys():
headerList.append(header)
title = (f"SecneurX Analysis - Detailed Report of the Analyzed Sample: {taskUuid}")
readableContents = format_report_contents(contents)
readableOutputs = tableToMarkdown(title, readableContents, headers=headerList, headerTransform=pascalToSpace)
reportFileName = taskUuid + reportExtn
fileContent = fileResult(reportFileName, response)
return_results(fileContent)
return CommandResults(
readable_output=readableOutputs,
indicator=indicator,
outputs=contents,
outputs_prefix="SecneurXAnalysis.Report",
raw_response=resJson
)
else:
reportFileName = taskUuid + reportExtn
fileContent = fileResult(reportFileName, response)
demisto.results(fileContent)
else:
msg = error_response(err_msg)
result = {SNXResponse.SNX_ERROR_MSG_KEY: msg, "Status": SNXResponse.SNX_FAILED_KEY}
readableOutputs = tableToMarkdown(f"SecneurX Analysis - Failed: {taskUuid}", result)
return CommandResults(
readable_output=readableOutputs,
outputs_prefix="SecneurXAnalysis.Report",
outputs=result
)
def get_quota_cmd(client: Client) -> CommandResults:
urlSuffix = "/get_quota"
response, err_msg = client.get_response(urlSuffix, {})
if response:
if response.get(SNXResponse.SNX_SUCCESS_KEY) == SNXResponse.SUCCESS:
quotaData = response[SNXResponse.SNX_RESULT_KEY]
readableOutput = tableToMarkdown("SecneurX Analysis - API Key Quota Usage:", t=quotaData)
return CommandResults(
readable_output=readableOutput,
outputs=quotaData,
outputs_prefix="SecneurXAnalysis.Quota",
raw_response=response
)
else:
readableOutput = tableToMarkdown("SecneurX Analysis - API Key Quota Usage:", t=response)
return CommandResults(
readable_output=readableOutput,
outputs=response,
outputs_prefix="SecneurXAnalysis.Quota",
raw_response=response
)
else:
msg = error_response(err_msg)
raise DemistoException(msg)
def main():
apiKey = demisto.params().get("apiKey")
baseUrl = urljoin(demisto.params().get("url"), "/api/v1")
verifyCertificate = not demisto.params().get("insecure", False)
proxy = demisto.params().get("proxy", False)
headers = {"api-key": apiKey}
client = Client(
base_url=baseUrl,
verify=verifyCertificate,
headers=headers,
proxy=proxy
)
cmdAction = demisto.command()
demisto.debug(f"Command being called is {cmdAction}")
try:
if cmdAction == "test-module":
result = test_module(client)
return_results(result)
elif cmdAction == "snx-analysis-get-verdict":
return_results(get_verdict_cmd(client, demisto.args()))
elif cmdAction == "snx-analysis-get-completed":
return_results(get_completed_cmd(client, demisto.args()))
elif cmdAction == "snx-analysis-get-pending":
return_results(get_pending_cmd(client, demisto.args()))
elif cmdAction == "snx-analysis-get-status":
return_results(get_status_cmd(client, demisto.args()))
elif cmdAction == "snx-analysis-submit-file":
return_results(post_submit_file(client, demisto.args()))
elif cmdAction == "snx-analysis-submit-url":
return_results(post_submit_url(client, demisto.args()))
elif cmdAction == "snx-analysis-get-report":
return_results(get_report_cmd(client, demisto.args()))
elif cmdAction == "snx-analysis-get-quota":
return_results(get_quota_cmd(client))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Failed to execute {cmdAction} command.\nError:\n{str(e)}")
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| demisto/content | Packs/SecneurXAnalysis/Integrations/SecneurXAnalysis/SecneurXAnalysis.py | SecneurXAnalysis.py | py | 35,608 | python | en | code | 1,023 | github-code | 50 |
28964570804 | from statistics import median
import pandas as pd
pd.options.mode.chained_assignment = None
import matplotlib.pyplot as plt
df = pd.read_csv('train.csv')
#print(df.info(verbose=True))
columnsAsNumericValues = df.select_dtypes(include=['number']).columns # this line of code is to print the columns that has numeric values
columsAsNonNumericValues = df.select_dtypes(exclude=['number']).columns # this line of code represents the columns that has not numeric values
# now that we know better our dataset, and the information inside it, its time to know
# the columns that have missing values or null values, since it makes it difficult to
# make our analysis and see the data visualization wrong.
#print(df[columsAsNonNumericValues].info()) # this line it shows the column and the missing values inside these columns
#if we print it, we can see that we dont have any missing values inside these columns
missing_values = df.isna().sum() #this line it show us the columns and there missing values
print(missing_values[:10])
#ISNA -> it detects the missing values
missing_by_row = df.isna().sum(axis='columns')
missing_by_row.hist(bins=50)
#plt.show()
# Altough we can drop the colums or the rows, to make the dataset
# much smaller and work better with the dataset, it's not (depending on your project's goals)
# the mos efficient way in our project to clear out data
# since that, we are going to fill the missing data
# with statics
df_copy = df.copy()
filliningNumericValues = df_copy[columnsAsNumericValues].median()
df_copy[columnsAsNumericValues] = df_copy[columnsAsNumericValues].fillna(filliningNumericValues)
#print(filliningNumericValues)
fillingNonNumericValues = df_copy[columsAsNonNumericValues].describe().loc['top']
df_copy[columsAsNonNumericValues] = df_copy[columsAsNonNumericValues].fillna(fillingNonNumericValues)
#print(fillingNonNumericValues)
describingLife_sqColumn = df_copy['life_sq'].describe()
grafica = df_copy['life_sq'].hist(bins=100)
#df_copy.boxplot(column=['life_sq'])
#plt.show()
df['ecology'].value_counts().plot(kind = 'bar')
#plt.show()
#Unnecessary data
newDataFrame = df_copy.drop(columns=['id']).drop_duplicates()
#INCONSISTENT data
upperCaseData = newDataFrame['sub_area'].str.upper()
inconsistentData = upperCaseData.value_counts(dropna=False)
#print(upperCaseData)
# Cleaning data types
newDataFrame['timestamp_dt'] = pd.to_datetime(newDataFrame['timestamp'], format='%d/%m/%Y')
newDataFrame['day'] = newDataFrame['timestamp_dt'].dt.day
newDataFrame['month'] = newDataFrame['timestamp_dt'].dt.month
newDataFrame['year'] = newDataFrame['timestamp_dt'].dt.year
newDataFrame['weekday'] = newDataFrame['timestamp_dt'].dt.weekday
#print(newDataFrame[['timestamp_dt', 'day', 'month', 'year', 'weekday']].head())
missing_values = newDataFrame.isna().sum() #this line it show us the columns and there missing values
print(missing_values[:10])
new = newDataFrame.isna().sum(axis='columns')
new.hist(bins=50)
plt.show()
#print(newDataFrame.info(verbose=True)) | rriossigma/ProyectoFinal | ProyectoFinal.py | ProyectoFinal.py | py | 3,018 | python | en | code | 0 | github-code | 50 |
30255676732 | from flask import url_for
from flask_testing import TestCase
from application import app
from application.routes import backend
import requests_mock
test_region = {
"region_id": 1,
"region_name": "Bristol",
"region_property_address": "Flat 12 A",
"region_price": "200.000",
"description": 'Best price',
}
class TestBase(TestCase):
def create_app(self):
app.config.update(
DEBUG=True,
WTF_CSRF_ENABLED=False
)
return app
class TestViews(TestBase):
def test_home_get(self):
with requests_mock.Mocker() as m:
all_regions = [test_region]
m.get(f"http://{backend}/regions", json=all_regions)
response = self.client.get(url_for('home'))
self.assert200(response)
def test_create_region_get(self):
response = self.client.get(url_for('create_region'))
self.assert200(response)
def test_update_region_get(self):
with requests_mock.Mocker() as m:
m.get(f"http://{backend}/region/1", json=test_region)
response = self.client.get(url_for('update_region', id=1))
self.assert200(response)
class TestRead(TestBase):
def test_get_regions(self):
with requests_mock.Mocker() as m:
m.get(f"http://{backend}/regions", json=[test_region])
response = self.client.get(url_for('home'))
self.assertIn(b"Bristol", response.data)
class TestUpdate(TestBase):
def test_update_region_name(self):
with requests_mock.Mocker() as m:
m.get(f"http://{backend}/region/1", json=test_region)
m.put(f"http://{backend}/update/1", text="Test response")
test_region["region_name"] = "London"
m.get(f"http://{backend}/regions",
json=[test_region])
response = self.client.post(
url_for('update_region', id=1),
data={"region_name": "London"},
follow_redirects=True
)
self.assertIn(b"London", response.data)
class TestDelete(TestBase):
def test_delete_region(self):
with requests_mock.Mocker() as m:
m.delete(f"http://{backend}/delete/1")
m.get(f"http://{backend}/regions", json=[])
response = self.client.get(
url_for('delete_region', id=1),
follow_redirects=True
)
self.assertNotIn(b"Test the region delete", response.data)
| MutluToy/DfE6_Final_Project | frontend/tests/test_unit.py | test_unit.py | py | 2,508 | python | en | code | 0 | github-code | 50 |
30500759861 | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.ProjectListView.as_view(), name='project-list'),
path('<int:pk>/', views.ProjectDetailView.as_view(), name='project-detail'),
path('create/', views.CreateProjectView, name='create-project'),
path('update/<int:id>', views.UpdateProjectView, name='update-project'),
path('delete/<int:id>', views.DeleteProjectView, name = 'delete-project'),
] | Yshaq/project-showcase-site | projects/urls.py | urls.py | py | 450 | python | en | code | 0 | github-code | 50 |
27953728060 | """
Custom marshmallow validators.
"""
import socket
from typing import List
from marshmallow import ValidationError
def validate_ip(ip_str: str):
"""Check if the given string is a valid IPv4 address."""
try:
socket.inet_aton(ip_str)
except socket.error:
raise ValidationError('Invalid IP address.')
def validate_devices(device_list: List[str]):
"""Check if the given strings are valid device file paths."""
for device in device_list:
if not device.startswith('/dev/'):
raise ValidationError('Invalid Linux device path.')
| varrrro/shipyard-server | shipyard/validators.py | validators.py | py | 586 | python | en | code | 2 | github-code | 50 |
26878911655 | name_of_town = input()
holiday_package = input()
is_vip = input()
days_of_stay = int(input())
total_price = 0
price_per_day = 0
if name_of_town == 'Bansko' or name_of_town == 'Borovets':
if holiday_package == 'noEquipment':
price_per_day = 80
total_price = days_of_stay * price_per_day
if is_vip == 'yes':
total_price *= 0.95
elif holiday_package == 'withEquipment':
price_per_day = 100
total_price = days_of_stay * price_per_day
if is_vip == 'yes':
total_price *= 0.90
elif name_of_town == 'Varna' or name_of_town == 'Burgas':
if holiday_package == 'noBreakfast':
price_per_day = 100
total_price = days_of_stay * price_per_day
if is_vip == 'yes':
total_price *= 0.93
elif holiday_package == 'withBreakfast':
price_per_day = 130
total_price = days_of_stay * price_per_day
if is_vip == 'yes':
total_price *= 0.88
elif name_of_town != ['Varna', 'Burgas', 'Bansko', 'Borovets'] or holiday_package != ['noEquipment', 'withEquipment', 'noBreakfast', 'withBreakfast']:
print('Invalid input!')
exit()
if days_of_stay >= 7:
total_price -= price_per_day
if days_of_stay < 1:
print("Days must be positive number!")
else:
print(f"The price is {total_price:.2f}lv! Have a nice time!")
| ivocostov/SoftUni | Python/01. Python Basics Course/PB - Exams/Programming Basics Online Exam - 6 and 7 July 2019/03_travel_agency.py | 03_travel_agency.py | py | 1,360 | python | en | code | 2 | github-code | 50 |
25124698026 | #!/usr/bin/env python3
import torch
def gradient_penalty(d_real, y):
if not d_real.requires_grad:
return torch.tensor(0., device=d_real.device)
outputs = [d_real]
gradients = torch.autograd.grad(
outputs=outputs,
inputs=y,
grad_outputs=list(map(lambda t: torch.ones(
t.size(), device=y.device), outputs)),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
bs = y.shape[0]
gradients = gradients.reshape(bs, -1)
gp = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gp
| calvinpelletier/ai_old | loss/reg.py | reg.py | py | 584 | python | en | code | 0 | github-code | 50 |
41872307265 | from flask import Flask, request, redirect, render_template, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:locker@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = "123"
#gotta have a secret key when using flash
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
blogpost = db.Column(db.String(5000))
blogtitle = db.Column(db.String(500))
def __init__(self, blogpost, blogtitle):
self.blogpost = blogpost
self.blogtitle = blogtitle
#The above is defining a class and setting up a database-to make a database work you have
#to drop and create in python while MAMP is running
@app.route("/")
def root():
somevariable = request.args.get("id")
instanceofblogobject = Blog.query.filter_by(id = somevariable).first()
#This is not totally great because there is no route--HOWEVER! It works. this pulls out the specific id
return render_template("index.html", displayblog = Blog.query.all(), somevariable=somevariable, instanceofblogobject=instanceofblogobject)
@app.route("/Blog-it", methods = ['GET', 'POST'])
def blog():
if request.method == "POST":
blog_title = request.form["blogtitle"]
blogpost = request.form["textarea"]
blogsubmit = Blog(blogpost,blog_title )
error_present = False
if not blog_title:
flash("You left the Blog title blank! Give your entry a title :)")
error_present = True
if not blogpost:
flash("You left Blog Content blank! Give us your thoughts :)")
error_present = True
if error_present:
return redirect ("/Blog-it")
else:
db.session.add(blogsubmit)
db.session.commit()
blogid = blogsubmit.id
return redirect("/?id="+str(blogid))
else:
return render_template("Blog-it.html")
if __name__=="__main__":
app.run()
| Emrichardsone/build-a-blog | main.py | main.py | py | 2,076 | python | en | code | 0 | github-code | 50 |
16691529274 | class Solution:
def reverseVowels(self, s: str):
"""
@param s: a string
@return: reverse only the vowels of a string
"""
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
ls = list(s)
l, r = 0, len(ls) - 1
while l < r:
while l < r and ls[l] not in vowels:
l += 1
while l < r and ls[r] not in vowels:
r -= 1
ls[l], ls[r] = ls[r], ls[l]
l += 1
r -= 1
return "".join(ls)
from TestUtil import Test
if __name__ == '__main__':
test = Test(Solution())
cases = ["hello", "lintcode"]
test.run(cases, 'reverseVowels')
| AiRanthem/LintCode | Python/1282翻转字符串中的元音字母.py | 1282翻转字符串中的元音字母.py | py | 700 | python | en | code | 1 | github-code | 50 |
4150485572 | from mscn.util_common import *
def encode_samples(tables, samples, table2vec, added_tables = None):
samples_enc = []
for i, query in enumerate(tables):
samples_enc.append(list())
if added_tables == None:
new_tables = set()
else:
new_tables = set(added_tables[i]) - set(query)
for j, table in enumerate(query):
sample_vec = []
# Append table one-hot vector
sample_vec.append(table2vec[table])
# Append bit vector-
sample_vec.append(samples[i][j])
sample_vec = np.hstack(sample_vec)
samples_enc[i].append(sample_vec)
for new_table in new_tables:
sample_vec = []
sample_vec.append(table2vec[new_table])
sample_vec.append(np.ones(NUM_MATERIALIZED_SAMPLES, dtype=np.float32))
sample_vec = np.hstack(sample_vec)
samples_enc[i].append(sample_vec)
return samples_enc
def encode_data(predicates, joins, column_min_max_vals, column2vec, op2vec, join2vec):
predicates_enc = []
joins_enc = []
for i, query in enumerate(predicates):
predicates_enc.append(list())
joins_enc.append(list())
for predicate in query:
if len(predicate) == 3:
# Proper predicate
column = predicate[0]
operator = predicate[1]
val = predicate[2]
norm_val = normalize_data(val, column, column_min_max_vals)
pred_vec = []
pred_vec.append(column2vec[column])
pred_vec.append(op2vec[operator])
pred_vec.append(norm_val)
pred_vec = np.hstack(pred_vec)
else:
pred_vec = np.zeros((len(column2vec) + len(op2vec) + 1), dtype=np.float32)
predicates_enc[i].append(pred_vec)
for predicate in joins[i]:
# Join instruction
join_vec = join2vec[predicate]
joins_enc[i].append(join_vec)
return predicates_enc, joins_enc
def encode_data_with_string(predicates, joins, column_min_max_vals, column2vec, op2vec, join2vec, string_columns, word_vectors_path, is_imdb):
predicates_enc = []
joins_enc = []
word_vectors = KeyedVectors.load(word_vectors_path, mmap='r')
pred_enc_size = len(column2vec) + len(op2vec) + STR_EMB_SIZE
for i, query in enumerate(predicates):
predicates_enc.append(list())
joins_enc.append(list())
for predicate in query:
if len(predicate) == 3:
# Proper predicate
column = predicate[0]
operator = predicate[1]
val = predicate[2]
pred_vec = []
pred_vec.append(column2vec[column])
pred_vec.append(op2vec[operator])
if column in string_columns:
operand_vec = np.zeros(STR_EMB_SIZE, dtype=np.float32)
if operator in {'IN', 'NOT_IN'}:
assert (len(val) > 2)
val = val[1:-1] #remove '(' and ')'
vals = re.split(r",(?=')", val) #split on commas but ignore ones in single quotes
for val in vals:
new_vec = get_string_embedding(word_vectors, column, val[1:-1], is_imdb)
operand_vec = operand_vec + new_vec
cnt = len(vals)
operand_vec = operand_vec / cnt
elif operator in {'LIKE', 'NOT_LIKE'}:
cnt = 0
for v in val.split('%'):
if(len(v) > 0):
new_vec = get_string_embedding(word_vectors, column, v, is_imdb)
operand_vec = operand_vec + new_vec
cnt += 1
operand_vec = operand_vec / cnt
else:
operand_vec = get_string_embedding(word_vectors, column, val, is_imdb)
pred_vec.append(operand_vec)
elif column in DATE_COLUMNS:
norm_val = normalize_date(val, STR_EMB_SIZE)
pred_vec.append(norm_val)
else:
norm_val = normalize_data(val, column, column_min_max_vals, STR_EMB_SIZE)
assert len(norm_val) == STR_EMB_SIZE
pred_vec.append(norm_val)
pred_vec = np.hstack(pred_vec)
if len(pred_vec) != pred_enc_size:
raise
else:
pred_vec = np.zeros((len(column2vec) + len(op2vec) + STR_EMB_SIZE), dtype=np.float32)
predicates_enc[i].append(pred_vec)
for predicate in joins[i]:
# Join instruction
join_vec = join2vec[predicate]
joins_enc[i].append(join_vec)
return predicates_enc, joins_enc
| postechdblab/learned-cardinality-estimation | MSCN/mscn/util_mscn.py | util_mscn.py | py | 5,097 | python | en | code | 13 | github-code | 50 |
18445530243 | import random
import pandas as pd
import numpy as np
import pprint
from ObjectiveFunctions import count_nodes_objective_func
class Wildcards:
def __init__(self, phenotype, wildcard_symbol = '🌟'):
self.wildcards = []
self.wildcard_symbol = wildcard_symbol
self.fitness = -1
self.phenotype = phenotype
for i in range(3):
self.wildcards.append(list(np.random.randint(0, len(self.phenotype['schedule'][i]), 2)))
def update_gene(self, str_idx=None, wldcrd_idx=None, value=None):
if str_idx is None and wldcrd_idx is None and value is None:
str_idx = random.randint(0,2)
wldcrd_idx = random.randint(0,1)
value_limit = len(self.phenotype['schedule'][str_idx])
value = random.randint(0, value_limit)
elif str_idx is not None and wldcrd_idx is not None and value is None:
value_limit = len(self.phenotype['schedule'][str_idx])
value = random.randint(0, value_limit)
self.wildcards[str_idx][wldcrd_idx] = value
def get_phenotype(self):
phenome_lsts = [list(p) for p in self.phenotype['schedule']]
for i in range(3):
for j in range(2):
phenome_lsts[i][self.wildcards[i][j]] = self.wildcard_symbol
phenome = [''.join(phenome_lst) for phenome_lst in phenome_lsts]
return pd.DataFrame(phenome, columns=['schedule'])
def mutate(self):
self.update_gene()
def get_fitness(self):
self.fitness = count_nodes_objective_func(self.get_phenotype())
return self.fitness
def __str__(self):
return f'{pprint.pformat(self.wildcards)} {self.get_fitness()}\n' | JHludwolf/Optimizacion_y_MetahuristicasII | Final Project/Wildcards.py | Wildcards.py | py | 1,719 | python | en | code | 0 | github-code | 50 |
41614763940 | '''
Created on Aug 29, 2013
@author: tbowker
'''
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from registerweb import views
from registerweb import pages
urlpatterns = patterns('',
url(r'^demo/login/$', pages.LoginPage.as_view()),
url(r'^demo/landing/$', pages.LandingPage.as_view()),
url(r'^demo/register/$', pages.RegisterPage.as_view()),
url(r'^demo/test/$', pages.TestPage.as_view()),
url(r'^demo/api/v1/login', views.LoginApi.as_view()),
url(r'^demo/api/v1/users/(?P<pk>[\w,-]+)', views.UserResource.as_view()),
url(r'^demo/api/v1/users', views.UserResource.as_view())
)
urlpatterns = format_suffix_patterns(urlpatterns) | mauidev/django_register | register/registerweb/urls.py | urls.py | py | 726 | python | en | code | 3 | github-code | 50 |
6274092704 | from PyQt4.QtGui import *
from re import search
from os import system,geteuid,getuid
from Core.Settings import frm_Settings
from Modules.utils import Refactor
from subprocess import Popen,PIPE
from scapy.all import *
class frm_Probe(QMainWindow):
def __init__(self, parent=None):
super(frm_Probe, self).__init__(parent)
self.form_widget = frm_PMonitor(self)
self.setCentralWidget(self.form_widget)
self.setWindowIcon(QIcon('rsc/icon.ico'))
def closeEvent(self, event):
reply = QMessageBox.question(self, 'About Exit',"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
self.deleteLater()
else:
pass
else:
event.ignore()
class frm_PMonitor(QWidget):
def __init__(self, parent=None):
super(frm_PMonitor, self).__init__(parent)
self.Main = QVBoxLayout()
self.setWindowTitle("Probe Request wifi Monitor")
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.probes = []
self.config = frm_Settings()
self.interface = self.config.xmlSettings("interface", "monitor_mode", None, False)
self.loadtheme(self.config.XmlThemeSelected())
self.setupGUI()
def loadtheme(self,theme):
sshFile=("Core/%s.qss"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def setupGUI(self):
self.form0 = QFormLayout()
self.list_probe = QListWidget()
self.list_probe.setFixedHeight(300)
self.btn_scan = QPushButton("Scan")
self.btn_scan.clicked.connect(self.Pro_request)
self.btn_scan.setIcon(QIcon("rsc/network.png"))
self.get_placa = QComboBox(self)
n = Refactor.get_interfaces()['all']
for i,j in enumerate(n):
if search("wlan", j):
self.get_placa.addItem(n[i])
self.time_scan = QComboBox(self)
self.time_scan.addItems(["10s","20s","30s"])
self.form0.addRow("Network Adapter: ", self.get_placa)
self.form0.addRow(self.list_probe)
self.form0.addRow("Time Scan: ", self.time_scan)
self.form1 = QFormLayout()
self.form1.addRow(self.btn_scan)
self.Main.addLayout(self.form0)
self.Main.addLayout(self.form1)
self.setLayout(self.Main)
def Pro_request(self):
self.time_control = None
if self.time_scan.currentText() == "10s":self.time_control = 300
elif self.time_scan.currentText() == "20s":self.time_control = 400
elif self.time_scan.currentText() == "30s":self.time_control = 600
if self.get_placa.currentText() == "":
QMessageBox.information(self, "Network Adapter", 'Network Adapter Not found try again.')
else:
if not geteuid() == 0:
QMessageBox.information(self, "Permission Denied", 'the tool must be run as root try again.')
else:
comando = "ifconfig"
proc = Popen(comando,stdout=PIPE, shell=True)
data = proc.communicate()[0]
if search(self.interface, data):
sniff(iface=self.interface,prn=self.sniff_probe, count=self.time_control)
system("clear")
else:
system("airmon-ng start %s" %(self.get_placa.currentText()))
sniff(iface=self.interface,prn=self.sniff_probe, count=self.time_control)
system("clear")
def sniff_probe(self,p):
if (p.haslayer(Dot11ProbeReq)):
mac_address=(p.addr2)
ssid=p[Dot11Elt].info
ssid=ssid.decode('utf-8','ignore')
if ssid == "":
ssid="null"
else:
print ("[:] Probe Request from %s for SSID '%s'" %(mac_address,ssid))
self.probes.append("[:] Probe Request from %s for SSID '%s'" %(mac_address,ssid))
self.list_probe.addItem("[:] Probe Request from %s for SSID '%s'" %(mac_address,ssid)) | 84KaliPleXon3/3vilTwinAttacker | Modules/ModuleProbeRequest.py | ModuleProbeRequest.py | py | 4,215 | python | en | code | 0 | github-code | 50 |
10544505400 | #!/usr/bin/env python3
""" Amazon Business & Tech | Dradabau"""
import random
wordbank= ["indentation", "spaces"]
tlgstudents= ["Aaron", "Andy", "Asif", "Brent", "Cedric",
"Chris", "Cory", "Ebrima", "Franco",
"Greg", "Hoon", "Joey", "Jordan",
"JC", "LB", "Mabel", "Shon", "Pat", "Zach"]
wordbank.append(4)
print (wordbank)
num= int(input("Enter a number 1-18: "))
student_name = tlgstudents[num]
print(student_name)
print("Random name: " + random.choice(tlgstudents))
print (random.random())
def myfunc():
return .12
#print(random.shuffle(wordbank, myfunc))
print(wordbank)
print(random.shuffle(tlgstudents))
random.shuffle(wordbank, myfunc)
print(wordbank)
| DavidRadd/mycode | wordbank.py | wordbank.py | py | 696 | python | en | code | 0 | github-code | 50 |
19538014566 | import json
import time
from utils.api_helper import *
from src.endpoints.auth.auth import *
from src.endpoints.alerts.alerts import *
from utils.asserts import *
from pytest_check import check
from utils.preconditions import *
@pytest.mark.usefixtures("auth")
class TestAlertsApi:
def test_01_create_fault_notes(self, auth):
alerts = AlertsApi()
# Create fault
fault_data = create_fault(auth)
student_id = fault_data[0]['studentId']
user_id = fault_data[0]['userId']
# Create note
note_data = alerts.post_create_notes(auth, 201, student_id, user_id, "Note 0001")
# Validate the data in the response
check.equal(note_data["studentId"], student_id)
check.equal(note_data["userId"], user_id)
def test_02_get_student_alerts_notes(self, auth):
alerts = AlertsApi()
# Create fault and note
fault_data = create_fault(auth)
student_id = fault_data[0]['studentId']
user_id = fault_data[0]['userId']
alerts.post_create_notes(auth, 201, student_id, user_id, "Note 0001")
# Get notes by student
notes_data = alerts.get_student_alert_notes(auth, 200, student_id)
# Validate the data in the response
check.equal(notes_data[0]["message"], "Note 0001")
check.is_true(is_key_present(notes_data[0], "userName"))
check.is_true(is_key_present(notes_data[0], "userLastName"))
def test_03_get_student_alerts_notes_pdf(self, auth):
alerts = AlertsApi()
# Create fault and note
fault_data = create_fault(auth)
student_id = fault_data[0]['studentId']
user_id = fault_data[0]['userId']
alerts.post_create_notes(auth, 201, student_id, user_id, "Note 0001")
# Get notes by student
alerts.get_student_alert_notes_pdf(auth, 200, student_id)
| gerardo-aragon/upre_automation | tests/api/alerts/alerts.py | alerts.py | py | 1,913 | python | en | code | 0 | github-code | 50 |
39519162482 | # 15. Напишите программу, которая принимает на
# вход число N и выдает набор произведений чисел от 1 до N.
# Пример:
# o пусть N = 4, тогда [ 1, 2, 6, 24 ]\
# (1, 1*2, 1*2*3, 1*2*3*4)
# def factorial(num):
# numbers = int(input("Введите число "))
# list = []
# multiplication = 1
# for i in range(1, numbers + 1):
# list.append(multiplication * i)
# multiplication = multiplication * i
# print(list)
#
#
# factorial(5)
numbers = int(input("Введите число: "))
lst = [n for n in range(1, numbers + 1)]
print(lst)
def factor(lst):
sum_ = 1
lst_ = []
for i in range(1, len(lst) + 1):
lst_.append(sum_ * i)
sum_ = sum_ * i
print(lst_)
factor(lst)
| misha1potapenko/Python | Homework_phyton/HW6/first.py | first.py | py | 835 | python | ru | code | 0 | github-code | 50 |
29089279204 | from datetime import datetime, timezone
import secrets
import string
import hashlib
import requests
from time import sleep
import logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
import argparse
parser = argparse.ArgumentParser(description="XDR health check script")
parser.add_argument("-key", help="API key value", required=True, type=str)
parser.add_argument("-keyid", help="API key ID", required=True, type=str)
parser.add_argument("-tenant", help="Tenant URL", required=True, type=str)
parser.add_argument("-query", help="Query to run in quotes, default is 'dataset = panw_ngfw_traffic_raw'", required=True, type=str, default="dataset = panw_ngfw_traffic_raw")
keyID = parser.parse_args().keyid
keyValue = parser.parse_args().key
tenantURL = parser.parse_args().tenant
input_query = parser.parse_args().query
def api_call(called_parameters, input_query, api_url):
nonce = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp = int(datetime.now(timezone.utc).timestamp()) * 1000
auth_key = "%s%s%s" % (keyValue, nonce, timestamp)
auth_key = auth_key.encode("utf-8")
api_key_hash = hashlib.sha256(auth_key).hexdigest()
headers = {
"x-xdr-timestamp": str(timestamp),
"x-xdr-nonce": nonce,
"x-xdr-auth-id": str(keyID),
"Authorization": api_key_hash,
"Content-Type": "application/json"
}
parameters = {
"request_data": {
input_query: called_parameters
}
}
res = requests.post(url=f"https://api-{tenantURL}/public_api/v1/xql/{api_url}",
headers=headers,
json=parameters)
if res.status_code == 200:
return res.json()
return "error getting incidents", called_parameters
if __name__ == "__main__":
while True:
rawJson = api_call(input_query, "query", "start_xql_query") # replace dataset = with desired dataset to monitor
qryId = rawJson.get('reply')
logging.info(f"Got query ID: {qryId}")
max_wait = 60
state = False
for interval in range(10, max_wait, 10):
sleep(interval)
outputQuery = api_call(qryId, "query_id", "get_query_results")
logging.info(f"Query status: {outputQuery['reply']['status']}")
if outputQuery["reply"]['status'] == "SUCCESS":
state = True
break
if not state:
logging.error("Query took too long")
exit(0)
numResults = outputQuery['reply']['number_of_results']
if numResults != 0:
logging.info(f"Success, got number of results : {numResults}")
else:
logging.error(f"Logging failed for query! , number of results found: {numResults}")
logging.info("Sleeping for 2 days")
sleep(172800)
| injuxtice/xdr-log_ingestion-health-check | health_check.py | health_check.py | py | 3,028 | python | en | code | 0 | github-code | 50 |
74658565276 | class Solution:
def maxProfit(self, prices: List[int]) -> int:
# method 1
# 因为是股票交易,有时间的因素,所以我们要保证在当前最小的波谷买入,然后在后续的波峰卖出
# 不能用min,是因为有可能整个数组的最小值之后的序列收益很小,所以应该记录当前已遍历的数组的最小值
# if not prices:
# return 0
# else:
# min_now = prices[0]
# profit = 0
# for i in range(1,len(prices)):
# if prices[i]<min_now:
# min_now = prices[i]
# else:
# profit = max(profit,prices[i]-min_now)
# return profit
# method 2
# kadan's algorithm
maxCur = 0
maxSoFar = 0
for i in range(1, len(prices)):
maxCur += prices[i]-prices[i-1]
maxCur = max(0, maxCur)
# 如果这个maxCur是负数,说明这时候卖掉是亏钱的,正数是挣钱的
maxSoFar = max(maxCur, maxSoFar)
return maxSoFar
| HoweChen/leetcodeCYH | 121. Best Time to Buy and Sell Stock/main.py | main.py | py | 1,205 | python | en | code | 0 | github-code | 50 |
37152594629 | #########################################################################
# Dusi's Thesis #
# Algorithmic Discrimination and Natural Language Processing Techniques #
#########################################################################
# Reads the sentences from the WinoGender dataset.
import csv
# Templates constants
import settings
from src.models.gender_enum import Gender
MASK_OCCUPATION: str = "$OCCUPATION"
MASK_PARTICIPANT: str = "$PARTICIPANT"
# Pronouns constants
MALE_INDEX: int = 0
FEMALE_INDEX: int = 1
NEUTER_INDEX: int = 2
PRONOUNS_DICT: dict[str, tuple] = {
"$NOM_PRONOUN": ("he", "she", "they"),
"$POSS_PRONOUN": ("his", "her", "their"),
"$ACC_PRONOUN": ("him", "her", "them"),
}
GENDERS: list[Gender] = [Gender.MALE, Gender.FEMALE]
def instantiate_gender(templates: list[str], genders=None) -> list[list[str]]:
"""
From a list of string templates containing "pronoun holes", returns a list where
every hole is filled with the appropriate pronoun, declined in all the desired genders.
The templates have:
- "$NOM_PRONOUN" for the nominative pronouns: ("he", "she", "they"),
- "$POSS_PRONOUN" for the possessive pronouns: ("his", "her", "their"),
- "$ACC_PRONOUN" for the accusative pronouns: ("him", "her", "them"),
:type genders: list[Gender]
:param templates: The list of templates with one pronoun mask in each.
If more pronoun masks appear in the same string template, the function still works but declines all the pronouns
with the same gender. It's not possible to obtain all the crossed combinations.
:param genders: The list of desired genders to use in the instantiation.
:return: The list of list of instatiated sentences. Each template produces a list of sentences, each one instantiated
with a gender.
"""
# Replacing mutable value
if genders is None:
genders = GENDERS
sentences: list[list[str]] = []
for tmpl in templates:
# Replacing pronouns
genders_tuple = []
# For every desired gender
for gend in genders:
gend_sentence = tmpl
# Replacing pronouns
gend_sentence = gend_sentence.replace("$NOM_PRONOUN", gend.nom_pronoun)
gend_sentence = gend_sentence.replace("$ACC_PRONOUN", gend.acc_pronoun)
gend_sentence = gend_sentence.replace("$POSS_PRONOUN", gend.poss_pronoun)
genders_tuple.append(gend_sentence)
sentences.append(genders_tuple)
return sentences
def read_templates() -> list[str]:
"""
Reads the sentences from a .tsv file, assuming the file has a specific structure.
The sentences are instantiated from a template with different gender pronouns.
:return: the list of pairs of sentences: [(male_sentence, female_sentence)]
"""
with open("data/WinoGender/templates.tsv") as tsv_file:
read_tsv = csv.reader(tsv_file, delimiter=settings.OUTPUT_TABLE_COL_SEPARATOR)
templates_list: list[str] = []
for row in read_tsv:
word_occupation, word_participant = row[0], row[1]
answer = row[2] # Unused
template = row[3]
# Replacing occupation and participant
template = template.replace(MASK_OCCUPATION, word_occupation)
template = template.replace(MASK_PARTICIPANT, word_participant)
templates_list.append(template)
return templates_list
def get_sentences_pairs() -> list[list[str]]:
tmpls = read_templates()
pairs = instantiate_gender(tmpls)
return pairs
| MicheleDusi/AlgorithmicDiscrimination_MasterThesis | src/parsers/winogender_templates_parser.py | winogender_templates_parser.py | py | 3,361 | python | en | code | 0 | github-code | 50 |
34454691331 | import numpy as n
ary=[]
nz=5
first=int(input("Enter first number:"))
last=int(input("Enter last number:"))
for i in range(first,last+1):
ary.append(i)
z=n.array(ary)
k=n.zeros(len(z) + (len(z)-1)*(nz))
k[::nz+1]=z
print(k) | Adityachaitu/COGNIZANCE | Task-8/que-1.py | que-1.py | py | 237 | python | en | code | 0 | github-code | 50 |
26396538096 | from DSA_python.pythonds.Graph.Graph import Graph
from english_words import english_words_lower_set as words_set
from pprint import pprint as pp
import re
def buildGraphWordLadder(word1, word2):
"""
:param word1: a from word
:param word2: a to word
:return: a word ladder, class <'graph'>
"""
# args verify
if len(word1) != len(word2):
raise SyntaxError(f'{word1} and {word2} have different length')
else:
pass
# word dict filter
same_len_list = list(filter(lambda x: len(x) == len(word1), words_set))
# define a graph
word_graph = Graph()
def _buildGraphWordLadder(aword):
"""
:param aword: a word like 'hape'
reg match '.ape', 'h.pe', 'ha.e', 'hap.', and connect
:return: None
"""
for _ in range(len(aword)):
match_word_list = []
regexp = aword[:_] + '.' + aword[_ + 1:]
# put matched in a bucket
for pot in same_len_list:
if re.match(regexp, pot):
match_word_list.append(pot)
word_graph.addVertex(pot)
# connect
for item_1 in match_word_list:
for item_2 in match_word_list:
if item_1 != item_2:
word_graph.addEdge(item_1, item_2)
for item in same_len_list:
_buildGraphWordLadder(item)
return word_graph
if __name__ == '__main__':
res_graph = buildGraphWordLadder('fool', 'sage')
| steve3ussr/PyCharmProject | DSA_python/graph_learn/buildGraph_wordLadder.py | buildGraph_wordLadder.py | py | 1,531 | python | en | code | 0 | github-code | 50 |
36731106635 | #!/usr/bin/env python
import rospy
import actionlib
import math
from move_base_msgs.msg import MoveBaseAction
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Vector3
class Master:
"""Master Node
Processes commands from alexa and sends appropriate actions to arduino_motor
Uses PID control
"""
# set up constants
def __init__(self):
# create a new node
rospy.init_node("master", anonymous=True)
rospy.Subscriber("odom", Odometry, self.odometry_callback)
self.pub = rospy.Publisher("cmd_vel", Twist, queue_size=50)
self.action_server = actionlib.SimpleActionServer('move_base', MoveBaseAction, self.alexa_callback, False)
self.linear_accel = float(rospy.get_param("~linear_accel",0.2))
self.rot_accel = float(rospy.get_param("~rot_accel", 1))
# get publish rate and PID constants
self.rate = rospy.get_param("~rate", 100)
# self.Kp = rospy.get_param("~position_contant", 1.0)
# self.Ki = rospy.get_param("~derivative_contant", 1.0)
# self.Kd = rospy.get_param("~integral_contant", 1.0)
self.recv_msg = False
# current positions
self.current_position_x = 0.0
self.current_position_y = 0.0
self.current_orientation = 0.0
# desired positions
self.desired_position_x = 0.0
self.desired_position_y = 0.0
self.desired_orientation = 0.0
self.first_run = True
self.last_forward_vel = 0
self.last_rot_vel = 0
self.current_speed = 0
self.action_server.start()
def begin(self):
"""Sends velocities to the motors depending on the current and desired positions and orientations
"""
r = rospy.Rate(self.rate)
# # integral sum
# self.forward_integral = 0.0
# self.orientation_integral = 0.0
# # old error
# old_forward_error = 0.0
# old_orientation_error = 0.0
# # old time
old_time = rospy.get_time()
while not rospy.is_shutdown():
# # get current time
new_time = rospy.get_time()
time_diff = new_time - old_time
old_time = new_time
# generate message
msg = Twist()
# current velocity
forward_vel = 0.0
rotational_vel = 0.0
###### To test ignore PID output ########################
# http://robotsforroboticists.com/pid-control/
# # forward PID
# forward_error = self.desired.position - self.current.position
# forward_integral = forward_integral + (forward_error*time_diff)
# forward_derivative = forward_error - old_forward_error)/time_diff
# forward_output = Kp*forward_error+Ki*forward_integral + Kd*forward_derivative
# # orientation PID
# orientation_error = self.desired_orientation - self.current_orientation
# orientation_integral = orientation_integral + (orientation_error*time_diff)
# orientation_derivative = orientation_error - old_orientation_error)/time_diff
# orientation_output = Kp*orientation_error+Ki*orientation_integral + Kd*orientation_derivative
# # set old errors
# old_forward_error = forward_error
# old_orientation_error = orientation_error
##################################################
# lets do simple positional control for now
# if abs(self.desired_position_x - self.current_orientation)
# forward_vel = 0.5*((self.desired_position_x/math.cos(self.current_orientation)+self.desired_position_y/math.sin(self.current_orientation))
# - (self.current_position_x/math.cos(self.current_orientation) + self.current_position_x/math.cos(self.current_orientation)))
dist = math.sqrt((self.desired_position_x-self.current_position_x)**2+(self.desired_position_y-self.current_position_y)**2)
# update desired orientation to point in the correct direction
if dist >= 0.05: # 5 cm
self.desired_orientation = math.atan2(self.desired_position_y - self.current_position_y,
self.desired_position_x - self.current_position_x)
rospy.loginfo_throttle(1, "updating desired orientation %f" % self.desired_orientation)
else:
self.desired_position_x = self.current_position_x
self.desired_position_y = self.current_position_y
# get delta orientation in the range -pi to pi so we always take the short way around
orientation_err = (self.desired_orientation - self.current_orientation)
# if orientation_err > math.pi:
# orientation_err = orientation_err - 2*math.pi
# elif orientation_err < -math.pi:
# orientation_err = orientation_err + 2*math.pi
# we are trying to move forward
if dist >= 0.10: # 5 cm
# rotate toward the correct location
# if orientation_err>0.875:
# rotational_vel = min(0.875,orientation_err)
# else:
# rotational_vel = 0.3*orientation_err
rotational_vel = 0.5*orientation_err
# make sure we are in the correct orientation before moving forward
if abs(orientation_err) < 0.043: # 5 degrees/2
if dist > 2.5:
forward_vel = 1.1
else:
forward_vel = 0.3*dist
# turn command
else:
# if self.recv_msg:
# self.action_server.set_succeeded()
# self.recv_msg = False
# continue
# orientation deadband if we are doing a rotate command
if abs(orientation_err) >= 0.043: # 5 degrees (abs = 2.5 degrees)
# if orientation_err > 0.172: # 10 degrees
# rotational_vel = 0.875
# elif orientation_err < -0.172:
# rotational_vel = -0.875
# else:
# rotational_vel = 1.29/orientation_err
# rotational_vel = min(0.875,orientation_err)
rotational_vel = 0.3*orientation_err
max_forward_vel = self.last_forward_vel + self.linear_accel*time_diff
min_forward_vel = self.last_forward_vel - self.linear_accel * time_diff
forward_vel = max(min_forward_vel, min(max_forward_vel, forward_vel))
max_rot_vel = self.last_rot_vel + self.rot_accel * time_diff
min_rot_vel = self.last_rot_vel - self.rot_accel * time_diff
rotational_vel = max(min_rot_vel, min(max_rot_vel, rotational_vel))
# fill in values for the Twist
msg.linear = Vector3(forward_vel, 0, 0)
msg.angular = Vector3(0, 0, rotational_vel)
self.last_forward_vel = forward_vel
self.last_rot_vel = rotational_vel
rospy.loginfo_throttle(1, "Desired Position: (%f,%f,%f) Current Position: (%f,%f,%f) Sending Velocity: (%f,%f)" % (self.desired_position_x,self.desired_position_y,self.desired_orientation,self.current_position_x,self.current_position_y,self.current_orientation,forward_vel,rotational_vel))
# publish the message
self.pub.publish(msg)
# sleep
r.sleep()
def odometry_callback(self, msg):
"""Updates the current position and orientation
The message definition can be found here: http://docs.ros.org/api/nav_msgs/html/msg/Odometry.html
Args:
msg (nav_msgs.msg.Odometry): The current position
"""
self.current_position_x = msg.pose.pose.position.x
self.current_position_y = msg.pose.pose.position.y
self.current_orientation = math.asin(msg.pose.pose.orientation.z) * 2
self.current_speed = msg.twist.twist.linear.x
if self.first_run:
self.desired_position_x = self.current_position_x
self.desired_position_y = self.current_position_y
self.desired_orientation = self.current_orientation
self.first_run = False
def alexa_callback(self, goal):
"""Sets the desired position and orientation
The message definition can be found here: http://docs.ros.org/api/move_base_msgs/html/action/MoveBase.html
Args:
goal (move_base_msgs.msg.MoveBaseAction): The desired position
"""
self.desired_orientation = self.current_orientation + math.asin(goal.target_pose.pose.orientation.z) * 2
# keep theta between -pi and pi
if self.desired_orientation > math.pi:
self.desired_orientation = self.desired_orientation - 2*math.pi
elif self.desired_orientation < -math.pi:
self.desired_orientation = self.desired_orientation + 2*math.pi
# update desired positions
self.desired_position_x = self.current_position_x + goal.target_pose.pose.position.x*math.cos(self.current_orientation)
self.desired_position_y = self.current_position_y + goal.target_pose.pose.position.x*math.sin(self.current_orientation)
rospy.loginfo("Getting updated goal P:%f R:%f", goal.target_pose.pose.position.x, math.asin(goal.target_pose.pose.orientation.z) * 2)
self.action_server.set_succeeded()
self.recv_msg = True
if __name__ == "__main__":
try:
controller = Master()
controller.begin()
except rospy.ROSInterruptException:
pass
| Transnavigators/TROSnavigator | master/master.py | master.py | py | 9,945 | python | en | code | 2 | github-code | 50 |
72883368795 | from time import sleep
from turtle import Turtle
class Score:
def __init__(self, ball):
self.ball_obj = ball
self.ball = self.ball_obj.ball
self.attempts = 3
self.ball_gone = False
self.game_over = False
def update_score(self, screen, score):
try:
with open("high_score.txt") as file:
high_score = file.read()
screen.title(f"Highest Score: {high_score} Current Score: {score} Attempts: {self.attempts}")
except FileNotFoundError:
screen.title(f"Highest Score: 0 Current Score: {score} Attempts: {self.attempts}")
def refresh_end_game(self, score):
if self.ball_obj.ball.ycor() < -280 and self.attempts != 0:
self.ball_gone = True
if self.ball_gone:
sleep(2)
self.ball.hideturtle()
self.ball.goto(0, 0)
self.ball.setheading(-90)
self.ball_obj.BALL_SPEED = 0.5
self.ball.showturtle()
self.attempts -= 1
self.ball_gone = False
elif self.attempts == 0 or score == 288:
game_over_text = Turtle()
game_over_text.hideturtle()
game_over_text.penup()
game_over_text.goto(-165, 0)
game_over_text.color("white")
game_over_text.write("Game Over", font=("Bold", 50, ""))
game_over_text.showturtle()
self.game_over = True
return self.game_over
def set_high_score(self):
is_higher = False
try:
with open("high_score.txt") as file:
high_score = int(file.read())
if high_score < self.ball_obj.hit_points:
is_higher = True
if is_higher:
with open("high_score.txt", "w") as file:
file.write(f"{self.ball_obj.hit_points}")
except FileNotFoundError:
with open("high_score.txt", "w") as file:
file.write(f"{self.ball_obj.hit_points}")
| ThozamileMad/breakout-game | scoreboard.py | scoreboard.py | py | 2,158 | python | en | code | 0 | github-code | 50 |
13825936865 | import functools
import unittest
import logging
import mox_cpr_delta_mo.__main__ as mox
mox.mora_get_all_cpr_numbers = lambda: ["0101621234", "0202621234"]
mox.mora_update_person_by_cprnumber = lambda fromdate, pnr, changes: None
mox.cpr_get_delta_udtraek = lambda sincedate: {sincedate: {"0101621234": {'fornavn': "Bent"}}} # noqa 501
mox.cpr_add_subscription = lambda pnr: True
mox.cpr_remove_subscription = lambda pnr: False
mox.cpr_get_all_subscribed = lambda: ["0101621234", "0303631234"]
class TestMoxCprDeltaMo(unittest.TestCase):
def setUp(self):
self.logstash = []
def logger(*args, **kwargs):
self.logstash.append((args, kwargs))
mox.logger.debug = functools.partial(logger, logging.DEBUG)
def test_update_cpr_subscriptions(self):
mox.update_cpr_subscriptions()
self.assertEqual(self.logstash[0], (
(10, 'update_cpr_subscriptions started'), {})
)
self.assertEqual(self.logstash[-1], (
(10, 'update_cpr_subscriptions ended'), {})
)
| magenta-aps/mox_cpr_delta_mo | tests/test_mox_cpr_delta_mo.py | test_mox_cpr_delta_mo.py | py | 1,054 | python | en | code | 0 | github-code | 50 |
25221893176 | from flask import Flask, render_template, request, redirect, flash
from datetime import datetime, timedelta
import os
import requests
from dotenv import load_dotenv
load_dotenv('.env')
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'dev123')
@app.template_filter()
def format_datetime(date, fmt=None):
try:
dt = datetime.strptime(
date, '%Y-%m-%dT%H:%M:%S.%fZ') + timedelta(hours=8)
except ValueError:
dt = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ') + timedelta(hours=8)
return dt.strftime('%Y-%m-%d %H:%M:%S')
@app.route('/')
def alert_dashboard():
sound = True if request.cookies.get('sound') == '1' else False
countdown = False if request.cookies.get('countdown') == '0' else True
alertmanager_url = os.getenv('ALERTMANAGER_URL', 'http://127.0.0.1:9093')
title = os.getenv('TITLE', '')
w_num, c_num = 0, 0
sorted_data = {}
try:
r = requests.get(alertmanager_url+'/api/v1/alerts?silenced=false&inhibited=false')
except requests.exceptions.ConnectionError as errc:
return render_template('index.html', alerts=sorted_data, title=title, w_num=w_num, c_num=c_num, t_num=-1)
data = eval(str(r.json()['data']).replace(
'critical', '4').replace('warning', '2'))
sorted_data_tmp = sorted(data, key=lambda x: (
x['labels']['alertname']))
sorted_data = sorted(sorted_data_tmp, key=lambda x: (
x['labels']['severity']), reverse=True)
for i in sorted_data:
severity = i['labels']['severity']
w_num = w_num + 1 if severity == '2' else w_num
c_num = c_num + 1 if severity == '4' else c_num
return render_template('index.html', alerts=sorted_data, title=title, w_num=w_num, c_num=c_num, t_num=w_num + c_num, sound=sound, countdown=countdown)
| cszhi/alert-dashboard | app.py | app.py | py | 1,828 | python | en | code | 1 | github-code | 50 |
22568536394 | # Import necessary libraries
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Dense, Flatten, Reshape, Input, Lambda, BatchNormalization, Dropout
from tensorflow.keras.layers import concatenate
# Set hyperparameters
hidden_dim = 2
num_classes = 10
batch_size = 100 # Should be a multiple of 60,000 and 10,000
# Load MNIST dataset and preprocess
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255
x_test = x_test / 255
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
y_train_cat = keras.utils.to_categorical(y_train, num_classes)
y_test_cat = keras.utils.to_categorical(y_test, num_classes)
# Define dropout and batch normalization function
def dropout_and_batch(x):
return Dropout(0.3)(BatchNormalization()(x))
# Define input layers
input_img = Input(shape=(28, 28, 1))
fl = Flatten()(input_img)
lb = Input(shape=(num_classes,))
x = concatenate([fl, lb])
# Build encoder network
x = Dense(256, activation='relu')(x)
x = dropout_and_batch(x)
x = Dense(128, activation='relu')(x)
x = dropout_and_batch(x)
z_mean2 = Dense(hidden_dim)(x)
z_log_var = Dense(hidden_dim)(x)
# Define noiser function for latent variable sampling
def noiser(args):
global z_mean, z_log_var
z_mean, z_log_var = args
N = K.random_normal(shape=(batch_size, hidden_dim), mean=0., stddev=1.0)
return K.exp(z_log_var / 2) * N + z_mean
h = Lambda(noiser, output_shape=(hidden_dim,))([z_mean2, z_log_var])
# Define decoder network
input_dec = Input(shape=(hidden_dim,))
lb_dec = Input(shape=(num_classes,))
d = concatenate([input_dec, lb_dec])
d = Dense(128, activation='elu')(d)
d = dropout_and_batch(d)
d = Dense(256, activation='elu')(d)
d = dropout_and_batch(d)
d = Dense(28*28, activation='sigmoid')(d)
decoded = Reshape((28, 28, 1))(d)
# Create encoder, decoder, and CVAE models
encoder = keras.Model([input_img, lb], h, name='encoder')
decoder = keras.Model([input_dec, lb_dec], decoded, name='decoder')
cvae = keras.Model([input_img, lb, lb_dec], decoder([encoder([input_img, lb]), lb_dec]), name="cvae")
z_meaner = keras.Model([input_img, lb], z_mean2)
tr_style = keras.Model([input_img, lb, lb_dec], decoder([z_meaner([input_img, lb]), lb_dec]), name='tr_style')
# Compile CVAE model
cvae.compile(optimizer='adam', loss=vae_loss)
cvae.fit([x_train, y_train_cat, y_train_cat], x_train, epochs=5, batch_size=batch_size, shuffle=True)
# Encode test data and visualize the encoded space
lb = lb_dec = y_test_cat
h = encoder.predict([x_test, lb], batch_size=batch_size)
plt.scatter(h[:, 0], h[:, 1])
# Visualize decoded images using latent space interpolation
n = 4
total = 2*n+1
input_lbl = np.zeros((1, num_classes))
input_lbl[0, 5] = 1
plt.figure(figsize=(total, total))
h = np.zeros((1, hidden_dim))
num = 1
for i in range(-n, n+1):
for j in range(-n, n+1):
ax = plt.subplot(total, total, num)
num += 1
h[0, :] = [1*i/n, 1*j/n]
img = decoder.predict([h, input_lbl])
plt.imshow(img.squeeze(), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Function to plot images
def plot_digits(*images):
images = [x.squeeze() for x in images]
n = min([x.shape[0] for x in images])
plt.figure(figsize=(n, len(images)))
for j in range(n):
for i in range(len(images)):
ax = plt.subplot(len(images), n, i*n + j + 1)
plt.imshow(images[i][j])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# Visualize original and transformed images
dig1 = 5
dig2 = 2
num = 10
X = x_train[y_train == dig1][:num]
lb_1 = np.zeros((num, num_classes))
lb_1[:, dig1] = 1
plot_digits(X)
for i in range(num_classes):
lb_2 = np.zeros((num, num_classes))
lb_2[:, i] = 1
Y = tr_style.predict([X, lb_1, lb_2], batch_size=num)
plot_digits(Y)
| SSobol77/Perceptron-1 | ls30/ls_30_CVAE.py | ls_30_CVAE.py | py | 4,109 | python | en | code | 3 | github-code | 50 |
20236349401 | from unittest import TestCase
from domain.exceptions import ImageNameException
from domain.value_objects import ImageName
class TestImageName(TestCase):
def test_init_WHEN_valid_value_given_THEN_creates_instance_with_given_value(self) -> None:
valid_value = 'name'
image_name = ImageName(value=valid_value)
self.assertEqual(image_name.value, valid_value)
def test_init_WHEN_value_is_empty_string_THEN_raises_image_name_exception(self) -> None:
invalid_value = ''
with self.assertRaises(ImageNameException):
ImageName(value=invalid_value)
| fr-mm/blaze_mines_bot | tests/unit/domain/value_objects/test_image_name.py | test_image_name.py | py | 605 | python | en | code | 4 | github-code | 50 |
43729816712 | #!/usr/bin/python3
"""4. Text indentation
a function that prints a text with 2 new lines after
each of these characters: ., ? and :
Prototype: def text_indentation(text):
text must be a string, otherwise raise a TypeError exception with
the message text must be a string
There should be no space at the beginning or at the end of
each printed line"""
def text_indentation(text):
"""
function name: text_indentation
arguments: text - string
prints a text with 2 new lines after each of
these characters: ., ? and :
"""
if not isinstance(text, str):
raise TypeError("text must be a string")
my_text = ""
delim = {'.', '?', ':'}
for char in text:
if char in delim:
my_text += char + "\n\n"
else:
my_text += char
lines = my_text.splitlines()
my_text = '\n'.join(line.lstrip() for line in lines)
if my_text and my_text[-1] == '\n':
my_text = my_text[:-1]
print(my_text)
| Halmouus/alx-higher_level_programming | 0x07-python-test_driven_development/5-text_indentation.py | 5-text_indentation.py | py | 981 | python | en | code | 0 | github-code | 50 |
22840901551 | from .. import runtime
from ..runtime import Environment
from ..runtime.terminal import clear_screen
def _show_terminal_menu(
prompt: str,
items: list[str],
one: bool = False,
indentSize: int = 2
) -> list[int]:
print(prompt)
for index, item in enumerate(items):
print(f"{' ' * indentSize}{index}. {item}")
selection = input("> ")
return _parse_menu_selection(selection, index, one)
def _show_rofi_menu(
prompt: str,
items: list[str],
one: bool = False
) -> list[int]:
return []
def _parse_menu_selection(
selection: str,
maxValue: int,
one: bool
) -> list[int]:
'''
Parses menu selection
Args:
selection (str): Selection in format
0,1,2,0-2
maxValue (int): Selection range limit
one (bool): Selection must contain
only one option or not
Returns:
list[int]: List of selected options
[0, 1, 2]
'''
selection = selection.split(",")
indexes = set()
for slice in selection:
if slice.startswith("-"):
raise ValueError
elif slice.isnumeric():
if int(slice) > maxValue:
raise ValueError
indexes.add(int(slice))
else:
slice = slice.split("-")
if int(slice[0]) > maxValue or int(slice[-1]) > maxValue:
raise ValueError
indexes.update(range(int(slice[0]), int(slice[-1]) + 1))
if one and len(indexes) != 1:
raise ValueError
return sorted(indexes)
def show_menu(
prompt: str,
items: list[str],
one: bool = False
) -> list[int]:
'''
Displays a menu based on runtime.graphics
Args:
prompt (str): Menu title or CTA
items (list[str]): Items to choose
one (bool): Only one option can be
selected. Defaults to False.
Returns:
list[int]: Indexes of selected items
'''
if runtime.graphics is Environment.Undefined:
runtime._use_available_graphics()
if one:
prompt = f"{prompt} (Choose one option):"
else:
prompt = f"{prompt} (0,1,2,0-2):"
while True:
try:
if runtime.graphics is Environment.Rofi:
return _show_rofi_menu(prompt, items, one)
elif runtime.graphics is Environment.Terminal:
return _show_terminal_menu(prompt, items, one)
except ValueError:
if runtime.graphics is Environment.Terminal:
clear_screen(2 + len(items))
| 2trvl/dotfiles | scripts/crossgui/widgets/menu.py | menu.py | py | 2,559 | python | en | code | 0 | github-code | 50 |
27207820573 | # -*- coding:utf-8 -*-
# @Time : 2021/6/28 14: 23
# @Author : Ranshi
# @File : 面试题 02.06. 回文链表.py
from typing import Optional
class ListNode:
def __init__(self, val: int = 0, _next: Optional["ListNode"] = None):
self.val = val
self.next = _next
class Solution:
def isPalindrome(self, head: Optional[ListNode]) -> bool:
if head is None:
return True
# 找到前半部分链表的尾节点并反转后半部分链表
first_half_end = self.end_of_first_half(head)
if first_half_end:
second_half_start = self.reverse_list(first_half_end.next)
else:
second_half_start = None
# 判断是否回文
result = True
first_position = head
second_position = second_half_start
while result and second_position is not None:
if first_position:
if first_position.val != second_position.val:
result = False
first_position = first_position.next
second_position = second_position.next
# 还原链表并返回结果
if first_half_end and second_half_start:
first_half_end.next = self.reverse_list(second_half_start)
return result
def end_of_first_half(self, head: Optional[ListNode]) -> Optional[ListNode]:
fast = head
slow = head
while fast and fast.next and fast.next.next and slow:
fast = fast.next.next
slow = slow.next
return slow
def reverse_list(self, head: Optional[ListNode]):
previous = None
current = head
while current is not None:
next_node = current.next
current.next = previous
previous = current
current = next_node
return previous
| Zranshi/leetcode | interview-classic/02.06/main.py | main.py | py | 1,859 | python | en | code | 0 | github-code | 50 |
31058921525 | from dataclasses import dataclass
from pathlib import Path
from aoc2022.day6.buffer import Buffer
from aoc2022.day6.input import Input
from pytest import fixture
@dataclass
class Res:
length: int
first_marker_position: int
Sample = tuple[Buffer, Res]
class TestDay6:
@fixture
def test_input(self) -> Input:
return Input(Path("./aoc2022/day6/test_input.txt"))
@fixture
def samples(self) -> list[Sample]:
return [
(Buffer('mjqjpqmgbljsphdztnvjfqwrcgsmlb'), Res(4, 7)),
(Buffer('bvwbjplbgvbhsrlpgdmjqwftvncz'), Res(4, 5)),
(Buffer('nppdvjthqldpwncqszvftbrmjlhg'), Res(4, 6)),
(Buffer('nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg'), Res(4, 10)),
(Buffer('zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw'), Res(4, 11))
]
def test_finding_idx_start_of_packet_marker(self, samples: list[Sample]):
for sample in samples:
assert sample[0].get_start_of_packet_marker(sample[1].length) == sample[1].first_marker_position
| Luzkan/AdventOfCode2022 | aoc2022/day6/test_day6.py | test_day6.py | py | 1,026 | python | en | code | 1 | github-code | 50 |
41589364993 | import tweepy
import random
import time
import os
import datetime
import pandas as pd
from dotenv import load_dotenv
load_dotenv()
influencer_id_list = ["@takapon_jp", "@hirox246", "@ochyai"] # ホリエモン、ひろゆき、落合陽一
#ここのid変えるとフォローする対象を変更できる
NG_WORDS=['RT']
MY_SCREENNAME = 'st_st_blog'
def create_api(API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_SECRET):
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
def favorite_other_account(api, user_id_list):
influencer_id = random.choice(influencer_id_list)
followers_id_list = api.followers_ids(influencer_id)
random.shuffle(followers_id_list)
for follower_id in followers_id_list[:5]:
if api.get_user(follower_id).protected == False:
print(f"TwitterIdが{follower_id}のツイートをいいねします")
tweets = api.user_timeline(follower_id, count=3)
for tweet in tweets:
if (not tweet.retweeted) and (not tweet.favorited) and ('RT @' not in tweet.text):
api.create_favorite(tweet.id)
time.sleep(random.randint(0, 5))
def get_anhour_before():
dt_now = datetime.datetime.now()
anhour_before = dt_now + datetime.timedelta(hours=-0.4)
anhour_before_day = anhour_before.strftime('%Y-%m-%d')
anhour_before = anhour_before.strftime('%H:%M:%S')
return anhour_before_day, anhour_before
def select_blog_starter(api):
day, anhour_before = get_anhour_before()
query = f'#ブログ初心者 -filter:links since:{day}_{anhour_before}_JST'
print(f'検索ワード:{query}')
tweets = api.search_tweets(q=query, lang='ja', count=20)
selected_users = []
selected_tweets = []
for tweet in tweets:
flag = True
user_id = tweet.user.id
user = api.get_user(user_id=user_id)
followers = user.followers_count
friends = user.friends_count
try:
ff_ratio = friends/followers
except:
ff_ratio = 0
print('ff比が算出できませんでした')
#ツイートのいいねの数
if tweet.favorite_count >= 4:
print('-----------------')
print('ツイートについているいいねが多い')
continue
#ff比
if ff_ratio <= 0.85 or ff_ratio >= 2:
print('-----------------')
print('フォローを返さない人')
continue
#フォロワー数
if followers >= 350:
print('-----------------')
print('フォロワーが多すぎる')
continue
#NGWORD機能
for ng_word in NG_WORDS:
if ng_word in tweet.text:
flag = False
break
if flag:
print('-----------------')
print(f'ユーザー名:{tweet.user.screen_name}')
print(f'フォロワー数:{followers}')
print(f'ff比:{ff_ratio}')
print(f'ツイート:{tweet.text}')
print('-----------------')
selected_tweets.append(tweet)
selected_users.append(user)
return selected_tweets, selected_users
def favorite_tweet(tweet) -> None:
try:
api.create_favorite(id=tweet.id)
print('ツイートをいいねしました')
except:
print('既にいいね済みです')
def follow_user(user) -> None:
if judge_user_existence(user.screen_name):
print(f'既に{user.screen_name}をフォロー済みです')
else:
api.create_friendship(user_id=user.id)
print(f'{user.screen_name}をフォローしました')
today = datetime.datetime.now().strftime('%Y-%m-%d')
print(selected_user.id)
append_users([selected_user.screen_name, str(selected_user.id), today, '×'])
#API認証
api = create_api(os.getenv('API_KEY'), os.getenv(
'API_SECRET'), os.getenv('ACCESS_TOKEN'), os.getenv('ACCESS_SECRET'))
follower_id : list[str] = api.get_follower_ids()
follower_id = [str(n) for n in follower_id]
if __name__ == '__main__':
from spreadsheet import append_users, judge_user_existence, check_am_i_followed, return_unfollow_ids
selected_tweets, selected_users = select_blog_starter(api)
for selected_tweet in selected_tweets:
favorite_tweet(selected_tweet)
for selected_user in selected_users:
follow_user(selected_user)
kataomoi_ids = check_am_i_followed()
#フォロー返さない人へのフォローを外す
unfollow_ids = return_unfollow_ids()
for unfollow_id in unfollow_ids:
api.destroy_friendship(user_id=unfollow_id)
print(f'{unfollow_id}がフォローを返さないので、アンフォローします')
#片思いの人の最新ツイートをいいねする
# for kataomoi_id in kataomoi_ids:
# top_tweet = api.user_timeline(user_id=kataomoi_id)[0]
# print(f'{kataomoi_id}のトップツイート{top_tweet.text}にいいねします')
# favorite_tweet(top_tweet)
#一度アンフォローした人はもうフォローしない | nyonyataro/Twitter-API | app.py | app.py | py | 5,281 | python | en | code | 0 | github-code | 50 |
25554904454 | """
Author: coman8@uw.edu
Preprocessing the PTB and MS alignments before it is sent on its merry way downstream.
This file expects the alignment which does not have weird duplicate quotation marks.
Run this first:
sed -r "/'\"+([a-z]+)\"+'/ s//'\1'/g" $INPUT > $OUTPUT
cont Match stylistic differences in ptb and ms. It updates the ptb tokens
AND the combined tokens, leaving behind a CONT marker so you know things
were updated there. This includes fixing the annotations to ignore
differences between WANT TO and GOING TO.
detokenize 'you_know' and CONTRACTIONS are joined. WANT TO and GOING to are never
joined.
"""
import argparse
from collections import defaultdict
from data import read_tsv
import util
def update_ids(sent, tok_ids):
YOU_KNOW = ('you', 'know')
REDUCED = [('want', 'to'), ('going', 'to')]
result = list()
i = 1
while i <= len(sent):
current = (sent[i - 1], sent[i]) if i < len(sent) else None
if current and current[0] == '---':
result.append('None')
elif current == YOU_KNOW:
result.append(tok_ids[i - 1] + '_a')
result.append(tok_ids[i] + '_b')
i += 1 # skip ahead since appending at i
elif current in REDUCED and \
tok_ids[i - 1].endswith('_a') and tok_ids[i].endswith('_b'):
result.append(tok_ids[i - 1] + '0')
result.append(tok_ids[i] + '0')
i += 1 # skip ahead since appending at i
else:
result.append(tok_ids[i - 1])
i += 1
return result
def update_names(row, sent_col, tok_ids_col):
sent = row[sent_col]
tok_ids = row[tok_ids_col]
words = set(sent)
if 'you' in words and 'know' in words or \
'want' in words and 'to' in words or \
'going' in words and 'to' in words or \
'---' in words:
return update_ids(sent, tok_ids)
return tok_ids
def update_cont(row):
if 'CONT_MS' in set(row['comb_ann']):
ix = defaultdict(int)
ix['ptb'], ix['ms'], ix['ptb_disfl'] = 0, 0, 0
temp = defaultdict(list)
for i in range(len(row['comb_ann'])):
label = row['comb_ann'][i]
# the ptb tokens stay as is if it's not a CONT
if util.is_ptb(label) and label != 'CONT_TREE':
temp['sent'].append(row['sentence'][ix['ptb']])
temp['names'].append(row['names'][ix['ptb']])
if not row['names'][ix['ptb']] == 'None':
try:
temp['disfl'].append(row['disfl'][ix['ptb_disfl']])
except IndexError: # this catches one edge case where 'None' is at EOS
temp['disfl'].append(row['disfl'][ix['ptb_disfl']-1])
# an MS cont is added to the ptb tokens (cont_ms uses a CONT annotation)
if label == 'CONT_MS':
temp['sent'].append(row['ms_sentence'][ix['ms']])
temp['names'].append(row['ms_names'][ix['ms']])
temp['disfl'].append(row['disfl'][ix['ptb_disfl']]) # keep the original ptb disfluency
temp['comb'].append(row['comb_sentence'][i])
temp['ann'].append('CONT')
# for all normal labels, keep annotation the same (cont_tree has no annotation)
if not label.startswith('CONT'):
temp['comb'].append(row['comb_sentence'][i])
temp['ann'].append(row['comb_ann'][i])
# move indices forward
if util.is_ptb(label):
if ix['ptb'] < len(row['names']) and not row['names'][ix['ptb']] == 'None':
ix['ptb_disfl'] += 1
ix['ptb'] += 1
if util.is_ms(label):
ix['ms'] += 1
row['disfl'] = temp['disfl']
row['sentence'] = temp['sent']
row['names'] = temp['names']
row['comb_ann'] = temp['ann']
row['comb_sentence'] = temp['comb']
return row
def update_reductions(row):
pattern1 = ['going', 'to', 'going', 'to']
pattern2 = ['want', 'to', 'want', 'to']
target = ['SUB_MS', 'SUB_MS', 'SUB_TREE', 'INS']
temp = defaultdict(list)
i = 0
while i < len(row['comb_ann']):
if i + 4 < len(row['comb_ann']) and row['comb_ann'][i:i+4] == target \
and row['comb_sentence'][i:i+4] == pattern1 or row['comb_sentence'][i:i+4] == pattern2:
temp['ann'].extend(['CONT', 'CONT'])
temp['comb'].extend([row['comb_sentence'][i], row['comb_sentence'][i+1]])
i += 3
else:
temp['ann'].append(row['comb_ann'][i])
temp['comb'].append(row['comb_sentence'][i])
i += 1
row['comb_ann'] = temp['ann']
row['comb_sentence'] = temp['comb']
return row
def detokenize(row, sent_col, tok_ids_col):
if any([(x.endswith('_a') or x == 'None') for x in row[tok_ids_col]]):
temp = list()
i = 0
while i < len(row[tok_ids_col]):
label = row[tok_ids_col][i]
if label.endswith('_a'):
# join 'you' 'know' tokens with an underscore
if row[sent_col][i] == 'you' and row[sent_col][i + 1] == 'know':
temp.append(row[sent_col][i] + '_' + row[sent_col][i + 1])
# join contractions
else:
temp.append(row[sent_col][i] + row[sent_col][i + 1])
i += 1
elif label != 'None':
temp.append(row[sent_col][i])
i += 1
return temp
return row[sent_col]
def preprocess(args):
df = read_tsv(args.file)
print('Updating IDs')
token_pairs = [('sentence', 'names'), ('ms_sentence', 'ms_names')]
for sent_col, tok_ids_col in token_pairs:
df[tok_ids_col] = df.apply(lambda x: update_names(x, sent_col, tok_ids_col),
axis=1)
if args.cont:
print('Updating style differences')
df = df.apply(lambda x: update_cont(x), axis=1)
df = df.apply(lambda x: update_reductions(x), axis=1)
if args.detokenize:
print('Detokenizing values')
for sent_col, tok_ids_col in token_pairs:
df[sent_col + '_dtok'] = df.apply(lambda x: detokenize(x, sent_col, tok_ids_col), axis=1)
df.to_csv(args.output, sep='\t', index=None)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file", help="original alignments file")
parser.add_argument("output", help="output file")
parser.add_argument("-c", "--cont",
help="fix style difference between ptb and ms to match ms",
action='store_true')
parser.add_argument("-d", "--detokenize",
help="Combine forms such as contractions and remove special chars.",
action='store_true')
args = parser.parse_args()
preprocess(args)
| cmansfield8/swb_errors_surprisal | src/preprocessor.py | preprocessor.py | py | 7,014 | python | en | code | 0 | github-code | 50 |
3656956651 | import wx
import main_class
import settings
import copy
import sys
import req
import orbit
import ope_voltage
import south_judge
from datetime import datetime
import time
import serial
import math
import re
import csv
import ephem
import os
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
class moritator_main_class( main_class.main_class ):
def __init__( self, parent ):
frame = wx.Frame(None, -1, 'MORITATOR', size=(415,235))
frame.Centre()
panel = wx.Panel(frame, wx.ID_ANY)
frame.Show(True)
image = wx.Image(resource_path('loading.png'))
bitmap = image.ConvertToBitmap()
wx.StaticBitmap(panel, -1, bitmap, pos=(0, 0), size=(400,200))
settings.init()
self.satellite_choiceChoices, self.norad_id_list = settings.load_sat_list()
port_list = list(serial.tools.list_ports.comports())
self.sat_num, self.port = settings.load_state()
expected_ports = []
flag_not_found = False
for p in port_list:
try:
if 'Serial' in p[1] and 'USB' in p[1]:
check = serial.Serial(p[0],baudrate=9600,timeout=1)
check.close()
expected_ports.append(p[0])
except:
pass
self.com_port_choiceChoices = expected_ports
if len(expected_ports) == 0:
self.port = 'NOT FOUND'
self.com_port_choiceChoices = ['NOT FOUND']
flag_not_found = True
else:
if not self.port in expected_ports:
self.port = expected_ports[0]
if self.sat_num >= len(self.satellite_choiceChoices):
self.sat_num = 0
self.norad_id = self.norad_id_list[self.sat_num]
self.sat_name = self.satellite_choiceChoices[self.sat_num]
self.bg_sat_list = settings.load_background_sat_list() #複数衛星追跡機能の設定ファイルを読み込み,対象衛星のリストを取得
main_class.main_class.__init__( self, parent )
self.Bind(wx.EVT_IDLE, self.main)
self.Bind(wx.EVT_MOVE, self.main)
self.Bind(wx.EVT_MOVE_START, self.main)
self.Bind(wx.EVT_MOVING, self.main)
self.Bind(wx.EVT_MOVE_END, self.main)
self.Bind(wx.EVT_MOUSE_EVENTS, self.main)
self.Bind(wx.EVT_CLOSE, self.frame_close)
self.set_satellite_b.Bind(wx.EVT_BUTTON, self.set_satellite)
self.set_norad_b.Bind(wx.EVT_BUTTON, self.set_norad_id)
self.set_comport_b.Bind(wx.EVT_BUTTON, self.set_comport)
self.set_90_b.Bind(wx.EVT_BUTTON, self.manual_rotation_antenna_90)
self.daisenkai_b.Bind(wx.EVT_BUTTON, self.manual_rotation)
self.daisenkai_b2.Bind(wx.EVT_BUTTON, self.disable_rotation)
self.is_multiple_checkbox.Bind(wx.EVT_CHECKBOX, self.change_multiple_mode)
self.last_runtime = time.time()
#init parameters
self.deg90_flag = False
self.flag_round = False
self.flag_start_round = False
self.flag_do_not_round = False
self.flag_after_pass = True
self.flag_first_loop = True
self.flag_init_rotation = True
self.loop_cnt = 0
self.azi_M = 0.0
self.azi_M1 = 0.0
self.azi_M2 = 0.0
self.ele_M = 0.0
self.ele_M1 = 0.0
self.azi_e = 0.00
self.azi_e1 = 0.00
self.azi_e2 = 0.00
self.ele_e = 0.00
self.ele_e1 = 0.00
self.ele_e2 = 0.00
self.aKp = 0.25
self.aKi = 0.125
self.aKd = 0.10
self.eKp = 0.10
self.eKi = 0.25
self.eKd = 0.10
self.plus_angle = [0] * 10
self.pluse_angle = [0] * 10
self.a_cnt = 0
self.sum_kakudo = 0
self.sume_kakudo = 0
self.cal_kakudo = 0
self.cale_kakudo = 0
self.cnt = 0
self.send_flag = 0
self.next_azi_flag = 0
self.next_ele_flag = 0
self.azi_x = []
self.azi_y = []
self.ele_x = []
self.ele_y = []
self.azi_x.append(0.00)
self.azi_y.append(0.00)
self.ele_x.append(0.00)
self.ele_y.append(0.00)
self.azi_kakudo = 0.0
self.ele_kakudo = 0.0
self.now_azi_cood = 0.0
self.now_ele_cood = 0.0
self.before_azi_cood = 0.0
self.before_ele_cood = 0.0
self.azi_goal2 = 0.0
self.azi_goal3 = 0.0
try:
self.tle = req.read_TLE(self.norad_id)
if len(self.tle) < 2:
self.sat_num = 0
self.norad_id = self.norad_id_list[self.sat_num]
self.sat_name = self.satellite_choiceChoices[self.sat_num]
self.satellite_choice.SetSelection( self.sat_num )
self.tle = req.read_TLE(self.norad_id)
except:
self.sat_num = 0
self.norad_id = self.norad_id_list[self.sat_num]
self.sat_name = self.satellite_choiceChoices[self.sat_num]
if not flag_not_found:
self.ser1 = serial.Serial(self.port,baudrate=9600,timeout=1)
self.start_time,self.finish_time = south_judge.judgement(self.tle)
self.csv_strtime = str(self.start_time)
self.csv_time = self.csv_strtime.split()
print(self.csv_time)
self.csv_ttime = self.csv_time[1].split(':')
self.csv_tttime = self.csv_ttime[2].split('.')
print(self.csv_time)
csv_date = '-'.join(self.csv_time[0].split('/'))
#csvfile open
if not os.path.exists('./../moritator_log'):
os.makedirs('./../moritator_log')
f = open('./../moritator_log/' + csv_date + '_' + str(self.csv_ttime[0]) + str(self.csv_ttime[1]) + str(self.csv_tttime[0]) +'.csv', 'w')
self.writer = csv.writer(f, lineterminator = '\n')
self.writer.writerow([self.tle[0]])
self.writer.writerow([self.tle[1]])
self.writer.writerow([self.tle[2]])
self.writer.writerow(['', 'ローテータ', ' ', '衛星', ' '])
self.writer.writerow(['JST', 'アジマス', 'エレベーション', 'アジマス', 'エレベーション'])
settings.save_state(self.sat_num, self.port)
frame.Show(False)
def set_satellite(self, event):
self.init_rotation()
self.deg90_flag == False
self.sat_num = self.satellite_choice.GetSelection()
self.sat_name = self.satellite_choiceChoices[self.sat_num]
self.norad_id = self.norad_id_list[self.satellite_choice.GetSelection()]
self.norad_id_input.SetValue(self.norad_id)
self.tle = req.read_TLE(self.norad_id)
self.tracking_satellite.SetLabel(self.sat_name)
settings.save_state(self.sat_num, self.port)
if event != None:
self.is_multiple_checkbox.SetValue(False)
def set_comport(self, event):
self.port = self.com_port_choiceChoices[self.com_port_choice.GetSelection()]
settings.save_state(self.sat_num, self.port)
self.ser1 = serial.Serial(self.port,baudrate=9600,timeout=1)
settings.save_state(self.sat_num, self.port)
def set_norad_id(self, event):
self.init_rotation()
self.deg90_flag = False
self.norad_id = self.norad_id_input.GetValue()
self.tle = req.read_TLE(self.norad_id)
norad_list = copy.deepcopy(self.norad_id_list)
norad_list[-1] = ''
if self.norad_id in norad_list:
self.sat_num = self.norad_id_list.index(self.norad_id)
self.sat_name = self.satellite_choiceChoices[self.sat_num]
self.satellite_choice.SetSelection(self.sat_num)
else:
self.sat_num = len(self.satellite_choiceChoices) - 1
self.sat_name = self.tle[0]
self.norad_id_list[-1] = self.norad_id
self.satellite_choice.SetSelection( self.sat_num )
self.tracking_satellite.SetLabel(' '+self.sat_name)
for i in range(100):
print(self.sat_name)
if len(self.sat_name) > 10:
font_size = int(900/len(self.sat_name))
self.tracking_satellite.SetFont( wx.Font( font_size, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Lucida Grande" ) )
else:
self.tracking_satellite.SetFont( wx.Font( 48, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Lucida Grande" ) )
settings.save_state(self.sat_num, self.port)
settings.save_other_id(self.norad_id)
if event != None:
self.is_multiple_checkbox.SetValue(False)
def manual_rotation_antenna_90(self, event):
if self.deg90_flag == False:
self.deg90_flag = True
self.tracking.SetLabel( u"")
self.tracking_satellite.SetLabel('SET 90 MODE')
self.sat_ele.SetLabel('')
self.sat_azi.SetLabel('')
self.next_rise.SetLabel('')
self.next_set.SetLabel('')
self.aos_time.SetLabel('')
self.los_time.SetLabel('')
self.set_90_b.SetLabel('CANCEL')
else:
self.deg90_flag = False
self.tracking.SetLabel( u"\n Now Tracking ")
self.tracking_satellite.SetLabel(self.sat_name)
self.set_90_b.SetLabel('SET 90 & QUIT')
def set_next_sat(self):
if self.is_multiple_checkbox.GetValue() and self.flag_after_pass:
self.flag_after_pass = False
sat_list = copy.copy(self.bg_sat_list)
for i in range(len(sat_list)):
name = sat_list[i][0]
id = sat_list[i][1]
if len(id) > 5:
id = id[:5]
tle = req.read_TLE(id)
sat = ephem.readtle(tle[0], tle[1], tle[2])
fudai = ephem.Observer()
fudai.lat = '34.545898'
fudai.lon = '135.503224'
rise_t, az_rise, max_t, alt_max, set_t, az_set = fudai.next_pass(sat)
sat_list[i] = [rise_t, name, id]
sat_list.sort()
next_id = sat_list[0][2]
self.norad_id_input.SetValue(next_id)
self.set_norad_id(None)
else:
pass
def change_multiple_mode(self, event):
if self.is_multiple_checkbox.GetValue():
self.flag_after_pass = True
self.set_next_sat()
else:
self.set_norad_id(None)
def main(self, event):
try:
times = 1
event.RequestMore(True)
except:
times = 3
pass
if time.time() - self.last_runtime > 0.25*times:
self.last_runtime = time.time()
try:
if self.deg90_flag:
self.set_antenna_90()
else:
self.operate_antenna()
if self.sat_num == len(self.satellite_choiceChoices) - 1:
self.sat_name = self.tle[0]
if self.sat_name[:2] == '0 ':
self.sat_name == self.sat_name[2:]
self.tracking_satellite.SetLabel(self.sat_name)
if len(self.sat_name) > 10:
font_size = int(900/len(self.sat_name))
self.tracking_satellite.SetFont( wx.Font( font_size, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Lucida Grande" ) )
else:
self.tracking_satellite.SetFont( wx.Font( 48, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Lucida Grande" ) )
self.tracking.SetLabel( u"\n Now Tracking ")
utctime = datetime.utcnow()
self.angle = orbit.cal_orbit(self.tle,utctime)
self.sat_ele.SetLabel('%0.2f'%self.angle[1])
self.sat_azi.SetLabel('%0.2f'%self.angle[0])
except:
self.tracking_satellite.SetLabel('ERROR')
self.tracking.SetLabel('')
def set_antenna_90(self):
set_deg = 90
self.ele_goal = set_deg
self.init_rotation()
if set_deg - self.ele_kakudo < 1 and set_deg -self.ele_kakudo > -1:
self.frame_close(wx.EVT_CLOSE)
self.output_voltage_to_wait()
def manual_rotation(self, event):
self.send_flag = 0
if self.flag_round:
self.flag_start_round = not self.flag_start_round
else:
self.flag_start_round = False
if self.flag_start_round:
self.now_daisenkai_text.SetForegroundColour( wx.Colour( 255, 0, 0 ) )
self.now_daisenkai_text.SetLabel('Manual Rotation ENABLED')
self.writer.writerow([str(datetime.now()), 'Manual Rotation ENABLED'])
self.daisenkai_b.SetForegroundColour( wx.Colour( 255, 0, 0 ) )
self.daisenkai_b2.SetLabel('DISABLE')
self.daisenkai_b2.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.flag_do_not_round = False
else:
self.now_daisenkai_text.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.now_daisenkai_text.SetLabel('AUTO ROTATION (default)')
self.writer.writerow([str(datetime.now()), 'Manual Rotation DISABLED'])
self.daisenkai_b.SetLabel('MANUAL')
self.daisenkai_b.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
def disable_rotation(self, event):
if self.flag_round:
self.flag_do_not_round = not self.flag_do_not_round
else:
self.flag_do_not_round = False
if self.flag_do_not_round:
self.now_daisenkai_text.SetForegroundColour( wx.Colour( 0, 0, 255 ) )
self.now_daisenkai_text.SetLabel('Rotation DISABLED')
self.writer.writerow([str(datetime.now()), 'Rotation DISABLED'])
self.daisenkai_b2.SetForegroundColour( wx.Colour( 255, 0, 0 ) )
self.daisenkai_b.SetLabel('MANUAL')
self.daisenkai_b.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.flag_start_round = False
else:
self.now_daisenkai_text.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.now_daisenkai_text.SetLabel('AUTO ROTATION (default)')
self.daisenkai_b2.SetLabel('DISABLE')
self.daisenkai_b2.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
def init_rotation(self):
self.now_daisenkai_text.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.now_daisenkai_text.SetLabel('AUTO ROTATION (default)')
self.daisenkai_b2.SetLabel('DISABLE')
self.daisenkai_b2.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.daisenkai_b.SetLabel('MANUAL')
self.daisenkai_b.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
self.flag_round = False
self.flag_start_round = False
self.flag_do_not_round = False
self.flag_init_rotation = True
self.send_flag = 0
def judge_round(self):
self.flag_first_loop = False
self.flag_round = False
hirogari = ephem.readtle(self.tle[0], self.tle[1], self.tle[2])
fudai = ephem.Observer()
fudai.lat = '34.545898'
fudai.lon = '135.503224'
rise_t, az_rise, max_t, alt_max, set_t, az_set = fudai.next_pass(hirogari)
az_rise = int(orbit.cal_orbit(self.tle, rise_t)[0])
az_set = int(orbit.cal_orbit(self.tle, set_t)[0])
az_max = int(orbit.cal_orbit(self.tle,max_t)[0])
parameter = az_rise
distance = 0
while(parameter != az_max):
parameter += 1
distance += 1
if parameter >= 360:
parameter -= 360
if distance <= 180:
self.flag_clockwise = True
else:
self.flag_clockwise = False
parameter = az_rise
while(parameter != az_set):
if self.flag_clockwise:
parameter += 1
else:
parameter -= 1
if parameter >= 360:
parameter -= 360
elif parameter < 0:
parameter += 360
if parameter > 160 and parameter < 165:
self.flag_round = True
break
if self.flag_round:
self.daisenkai_yesno_text.SetLabel('YES')
self.daisenkai_yesno_text.SetForegroundColour( wx.Colour( 255, 0, 0 ) )
else:
self.daisenkai_yesno_text.SetLabel('NO')
self.daisenkai_yesno_text.SetForegroundColour( wx.Colour( 0, 0, 0 ) )
return self.flag_round
def operate_antenna(self):
utctime = datetime.utcnow()
self.angle = orbit.cal_orbit(self.tle,utctime)
self.sat_ele.SetLabel('%0.2f'%self.angle[1])
self.sat_azi.SetLabel('%0.2f'%self.angle[0])
hirogari = ephem.readtle(self.tle[0], self.tle[1], self.tle[2])
fudai = ephem.Observer()
fudai.lat = '34.545898'
fudai.lon = '135.503224'
self.rise_t, self.az_rise, max_t, alt_max, self.set_t, self.az_set = fudai.next_pass(hirogari)
rise_t_jst = ephem.localtime(self.rise_t)
set_t_jst = ephem.localtime(self.set_t)
print(self.rise_t)
if self.angle[1] <= -1:
if self.flag_init_rotation == False:
self.init_rotation()
self.set_next_sat()
self.judge_round()
print(type(self.rise_t))
aos_t = str(rise_t_jst).split()
los_t = str(set_t_jst).split()
self.aos_time.SetLabel(aos_t[0] + '\n' + aos_t[1][0:8])
self.los_time.SetLabel(los_t[0] + '\n' + los_t[1][0:8])
self.next_angle = orbit.cal_orbit(self.tle,self.rise_t)
self.next_set_angle = orbit.cal_orbit(self.tle, self.set_t)
self.next_rise.SetLabel('%0.2f'%self.next_angle[0])
self.next_set.SetLabel('%0.2f'%self.next_set_angle[0])
self.set_angle_to_wait()
self.output_voltage_to_wait()
else:
self.flag_init_rotation = False
self.flag_after_pass = True
if self.flag_first_loop:
self.judge_round()
self.set_angle_to_sat()
print('1')
self.output_voltage_to_sat()
print('2')
def set_angle_to_wait(self):
next_time = ope_voltage.cal_voltage(self.angle,self.tle)
self.next_angle = orbit.cal_orbit(self.tle,next_time)
next_voltage = ope_voltage.cal_voltage(self.next_angle,self.tle)
if self.next_angle[0] > 165:
self.azi_goal = self.next_angle[0] - 360
else:
if self.next_angle[0] >160:
self.next_angle[0] = 160
self.azi_goal = self.next_angle[0]
self.ele_goal = self.next_angle[1]
if self.flag_round and self.flag_start_round:
if self.flag_clockwise:
self.azi_goal = -195
else:
self.azi_goal = 160
def set_angle_to_sat(self):
if self.flag_clockwise:
if self.angle[0] >= 165:
self.flag_start_round = False
self.flag_round = False
self.now_daisenkai_text.SetLabel('ROTATION FINISHED')
self.daisenkai_b.SetLabel('MANUAL')
self.daisenkai_b2.SetLabel('DISABLE')
else:
if self.angle[0] <= 160:
self.flag_start_round = False
self.flag_round = False
self.now_daisenkai_text.SetLabel('ROTATION FINISHED')
self.daisenkai_b.SetLabel('MANUAL')
self.daisenkai_b2.SetLabel('DISABLE')
if self.angle[0] > 165:
self.azi_goal = self.angle[0] - 360
else:
if self.angle[0] >160:
self.angle[0] = 160
self.azi_goal = self.angle[0]
self.ele_goal = self.angle[1]
if self.flag_round:
if self.flag_start_round:
if self.flag_clockwise:
self.azi_goal = -195
else:
self.azi_goal = 160
elif self.flag_do_not_round:
if self.flag_clockwise:
if self.angle[0] >= 160:
self.azi_goal = 160
else:
if self.angle[0] <= 165:
self.azi_goal = -195
def output_voltage_to_wait(self):
send_flag = 0
voltage = ope_voltage.cal_voltage(self.angle,self.tle)
r=self.ser1.readline()
if len(r) != 0:
regex = re.compile('\d+')
match = regex.findall(str(r))
self.before_azi_cood = self.now_azi_cood
self.before_ele_cood = self.now_ele_cood
self.now_azi_cood = float(match[0]) + float(match[1]) / (pow(10,len(match[1])))
self.now_ele_cood = float(match[2]) + float(match[3]) / (pow(10,len(match[3])))
print('azi:' + str(self.now_azi_cood) + ' ele:' + str(self.now_ele_cood))
#yes pid
if self.now_azi_cood >= 0.0616 and self.now_azi_cood <= 4.781323: #20210319
self.azi_kakudo = 62.267 * self.now_azi_cood - 199.21
elif self.now_azi_cood > 4.781323 and self.now_azi_cood <= 4.8318: #20210319
self.azi_kakudo = 197.87 * self.now_azi_cood - 846.1
elif self.now_azi_cood > 4.8318 and self.now_azi_cood <= 4.995117: #20210319
self.azi_kakudo = 489.64 * self.now_azi_cood*self.now_azi_cood - 4501.8 * self.now_azi_cood + 10430
else:
self.now_azi_cood = self.before_azi_cood
if self.now_ele_cood >= 0.0 and self.now_ele_cood < 4.5583: #20210313
self.ele_kakudo = 15.484 * self.now_ele_cood - 5.9071
elif self.now_ele_cood >= 4.5583 and self.now_ele_cood <= 5.0:
self.ele_kakudo = 164.45 * self.now_ele_cood*self.now_ele_cood - 1502.1 * self.now_ele_cood + 3495.2
else:
self.now_ele_cood = self.before_ele_cood
if self.azi_kakudo < 0:
label_azi = self.azi_kakudo + 360
else:
label_azi = self.azi_kakudo
self.antenna_azi.SetLabel('%0.2f'%label_azi)
self.antenna_ele.SetLabel('%0.2f'%self.ele_kakudo)
#azimath
self.azi_M2 = self.azi_M1
self.azi_M1 = self.azi_M
self.azi_e2 = self.azi_e1
self.azi_e1 = self.azi_e
self.azi_e = (self.azi_goal + 0) - self.azi_kakudo
if self.azi_e < 10 and self.azi_e > -10:
self.azi_M = self.aKp * (self.azi_e-self.azi_e1) + self.aKi * self.azi_e + self.aKd * ((self.azi_e-self.azi_e1) - (self.azi_e1-self.azi_e2))
self.azi_pwm = round(self.azi_M,1)
if self.azi_pwm >= 5:
self.azi_pwm = 0.49
elif self.azi_pwm <= -5:
self.azi_pwm = -0.49
else:
self.azi_pwm = self.azi_pwm / 10
elif self.azi_e >= 8:
self.azi_pwm = 0.49
elif self.azi_e <= -8:
self.azi_pwm = -0.49
if self.azi_e < 4 and self.azi_e > -4:
self.next_azi_flag = 1
else:
self.next_azi_flag = 0
#elevation
self.ele_M1 = self.ele_M
self.ele_e2 = self.ele_e1
self.ele_e1 = self.ele_e
self.ele_e = (self.ele_goal + 0) - self.ele_kakudo
self.ele_M = self.eKp * (self.ele_e-self.ele_e1) + self.eKi * self.ele_e + self.eKd * ((self.ele_e-self.ele_e1) - (self.ele_e1-self.ele_e2))
if self.ele_e >= 1:
self.ele_pwm = 0.99
elif self.ele_e >= 0 and self.ele_e < 1:
self.ele_pwm = 0.5
elif self.ele_e >= -1 and self.ele_e < 0:
self.ele_pwm = 0.5
elif self.ele_e < -1:
self.ele_pwm = -0.99
if self.ele_e < 1 and self.ele_e > -1:
self.next_ele_flag = 1
else:
self.next_ele_flag = 0
if self.next_azi_flag == 1 and self.next_ele_flag == 1:
self.azi_pwm = 1.01
self.ele_pwm = 1.01
azi_send_data = math.floor(self.azi_pwm * 100)
ele_send_data = math.floor(self.ele_pwm * 100)
hoge1 = azi_send_data.to_bytes(1,'little',signed=True)
hoge2 = ele_send_data.to_bytes(1,'little',signed=True)
if self.send_flag == 0:
if self.cnt == 0:
self.ser1.write(hoge1)
self.cnt = 1
elif self.cnt == 1:
self.ser1.write(hoge2)
self.cnt = 0
if self.next_azi_flag == 1 and self.next_ele_flag == 1:
self.send_flag = 1
def output_voltage_to_sat(self):
self.send_flag = 0
voltage = ope_voltage.cal_voltage(self.angle,self.tle)
r=self.ser1.readline()
if len(r) != 0:
regex = re.compile('\d+')
match = regex.findall(str(r))
self.before_azi_cood = self.now_azi_cood
self.before_ele_cood = self.now_ele_cood
self.now_azi_cood = float(match[0]) + float(match[1]) / (pow(10,len(match[1])))
self.now_ele_cood = float(match[2]) + float(match[3]) / (pow(10,len(match[3])))
#-195 <= self.azi_kakudo <= 165
if self.now_azi_cood >= 0.0616 and self.now_azi_cood <= 4.781323: #20210319
self.azi_kakudo = 62.267 * self.now_azi_cood - 199.21
elif self.now_azi_cood > 4.781323 and self.now_azi_cood <= 4.8318: #20210319
self.azi_kakudo = 197.87 * self.now_azi_cood - 846.1
elif self.now_azi_cood > 4.8318 and self.now_azi_cood <= 4.995117: #20210319
self.azi_kakudo = 489.64 * self.now_azi_cood*self.now_azi_cood - 4501.8 * self.now_azi_cood + 10430
else:
self.now_azi_cood = self.before_azi_cood
self.plus_angle[self.a_cnt] = self.azi_kakudo
if self.loop_cnt >= 3:
for t in range(0,3):
self.sum_kakudo = self.sum_kakudo + self.plus_angle[t]
self.cal_kakudo = self.sum_kakudo / 3
self.sum_kakudo = 0
else:
self.cal_kakudo = self.plus_angle[self.a_cnt]
if self.loop_cnt >= 3:
for t in range(0,3):
self.sume_kakudo = self.sume_kakudo + self.pluse_angle[t]
self.cale_kakudo = self.sume_kakudo / 3
self.sume_kakudo = 0
else:
self.cale_kakudo = self.pluse_angle[self.a_cnt]
self.a_cnt = self.a_cnt + 1
if self.a_cnt >= 3:
self.a_cnt = 0
if self.now_ele_cood >= 0.0 and self.now_ele_cood < 4.5583: #20210313
self.ele_kakudo = 15.484 * self.now_ele_cood - 5.9071
elif self.now_ele_cood >= 4.5583 and self.now_ele_cood <= 5.0:
self.ele_kakudo = 164.45 * self.now_ele_cood*self.now_ele_cood - 1502.1 * self.now_ele_cood + 3495.2
else:
self.now_ele_cood = self.before_ele_cood
if self.azi_kakudo < 0:
label_azi = self.azi_kakudo + 360
else:
label_azi = self.azi_kakudo
self.antenna_azi.SetLabel('%0.2f'%label_azi)
self.antenna_ele.SetLabel('%0.2f'%self.ele_kakudo)
#pid
self.azi_M2 = self.azi_M1
self.azi_M1 = self.azi_M
self.azi_e2 = self.azi_e1
self.azi_e1 = self.azi_e
self.azi_e = (self.azi_goal - 2) - self.cal_kakudo
if self.ele_goal >= 50 and (self.azi_goal3 - self.azi_goal2) < 0: #cw
if self.ele_goal <= 55:
self.azi_e = (self.azi_goal + (self.ele_goal-50-2)) - self.cal_kakudo
else:
self.azi_e = (self.azi_goal + 5) - self.cal_kakudo
elif self.ele_goal >= 50 and (self.azi_goal3 - self.azi_goal2) > 0: #ccw
if self.ele_goal <= 55:
self.azi_e = (self.azi_goal - (self.ele_goal-50-2)) - self.cal_kakudo
else:
self.azi_e = (self.azi_goal - 5) - self.cal_kakudo
self.azi_ee = self.azi_goal - self.azi_kakudo
if self.azi_e < 10 and self.azi_e > -10:
self.azi_M = self.aKp * (self.azi_e-self.azi_e1) + self.aKi * self.azi_e + self.aKd * ((self.azi_e-self.azi_e1) - (self.azi_e1-self.azi_e2))
self.azi_pwm = round(self.azi_M,1)
if self.azi_pwm >= 5:
self.azi_pwm = 0.49
elif self.azi_pwm <= -5:
self.azi_pwm = -0.49
else:
self.azi_pwm = self.azi_pwm / 10
if self.azi_e >= 5:
self.azi_pwm = 0.49
elif self.azi_e <= -5:
self.azi_pwm = -0.49
self.azi_y.append(self.azi_e)
self.azi_x.append(self.loop_cnt)
#to do max speed
self.azi_goal3 = self.azi_goal2
self.azi_goal2 = self.azi_goal
if self.loop_cnt > 1:
if self.azi_goal - self.azi_goal3 > 1.5:
self.azi_pwm = 0.49
elif self.azi_goal - self.azi_goal3 < -1.5:
self.azi_pwm = -0.49
self.ele_M1 = self.ele_M
self.ele_e2 = self.ele_e1
self.ele_e1 = self.ele_e
self.ele_e = (self.ele_goal - 3) - self.ele_kakudo
self.ele_M = self.eKp * (self.ele_e-self.ele_e1) + self.eKi * self.ele_e + self.eKd * ((self.ele_e-self.ele_e1) - (self.ele_e1-self.ele_e2))
self.ele_y.append(self.ele_e)
self.ele_x.append(self.loop_cnt)
self.ele_pwm = round(self.ele_M,1)
if self.ele_pwm >= 0:
self.ele_pwm = self.ele_pwm + 5
elif self.ele_pwm < 0:
self.ele_pwm = self.ele_pwm - 5
if self.ele_pwm >= 10:
self.ele_pwm = 0.99
elif self.ele_pwm <= -10:
self.ele_pwm = -0.99
else:
self.ele_pwm = self.ele_pwm / 10
if self.ele_e >= 1:
self.ele_pwm = 0.99
elif self.ele_e >= 0 and self.ele_e < 1:
self.ele_pwm = 0.5
elif self.ele_e >= -1 and self.ele_e < 0:
self.ele_pwm = 0.5
elif self.ele_e < -1:
self.ele_pwm = -0.99
self.writer.writerow([str(datetime.now()), self.cal_kakudo, self.ele_kakudo, self.azi_goal, self.ele_goal])
azi_send_data = math.floor(self.azi_pwm * 100)
ele_send_data = math.floor(self.ele_pwm * 100)
hoge1 = azi_send_data.to_bytes(1,'little',signed=True)
hoge2 = ele_send_data.to_bytes(1,'little',signed=True)
if self.cnt == 0:
self.ser1.write(hoge1)
self.cnt = 1
elif self.cnt == 1:
self.ser1.write(hoge2)
self.cnt = 0
self.loop_cnt = self.loop_cnt + 1
def rotator(self):
self.norad_id_input.SetValue(self.norad_id_list[self.satellite_choice.GetSelection()])
self.tracking_satellite.SetLabel(self.satellite_choiceChoices[self.satellite_choice.GetSelection()])
def frame_close(self, event):
try:
stop_data = math.floor(1.01 * 100)
hoge1 = stop_data.to_bytes(1,'little',signed=True)
self.ser1.write(hoge1)
self.Destroy()
except:
pass
sys.exit()
| forestwaterfall/MORITATOR | moritator_main_class.py | moritator_main_class.py | py | 31,436 | python | en | code | 0 | github-code | 50 |
38751046470 | import distutils.util
import pandas
Discriminator = None
DiscriminatorWP = None
PlotSetup = None
apply_dm_cuts = True
setup_branches = [ 'chargedIsoPtSum' ]
def Initialize(eval_tools, args):
global Discriminator
global DiscriminatorWP
global PlotSetup
global apply_dm_cuts
Discriminator = eval_tools.Discriminator
DiscriminatorWP = eval_tools.DiscriminatorWP
PlotSetup = eval_tools.PlotSetup
if 'apply_dm_cuts' in args:
apply_dm_cuts = distutils.util.strtobool(args['apply_dm_cuts'])
def GetDiscriminators(other_type, deep_results_label, prev_deep_results_label):
deep_results_text = 'DeepTau'
if deep_results_label is not None and len(deep_results_label) > 0:
deep_results_text += ' ' + deep_results_label
has_prev_results = len(prev_deep_results_label) > 0 and 'None' not in prev_deep_results_label
if has_prev_results:
prev_deep_results_text = deep_results_label + ' ' + prev_deep_results_label
if other_type == 'jet':
discr = [
Discriminator('charged iso', 'relNegChargedIsoPtSum', True, False, 'green',
[ DiscriminatorWP.Loose, DiscriminatorWP.Medium, DiscriminatorWP.Tight ],
working_points_thrs = { "Loose": -0.2, "Medium": -0.1, "Tight": -0.05 }),
]
if has_prev_results:
discr.append(Discriminator(prev_deep_results_text, 'deepId{}_vs_jet'.format(prev_deep_results_label),
True, False, 'black'))
discr.append(Discriminator(deep_results_text + ' vs. jets', 'deepId_vs_jet', True, False, 'blue'))
return discr
else:
raise RuntimeError('Unknown other_type = "{}"'.format(other_type))
def DefineBranches(df, tau_types):
df['chargedIsoPtSum'] = pandas.Series(df.chargedIsoPtSum * 123.5 + 47.78, index=df.index)
df['relNegChargedIsoPtSum'] = pandas.Series(-df.chargedIsoPtSum / df.tau_pt, index=df.index)
return df
def ApplySelection(df, other_type):
if apply_dm_cuts:
df = df[(df['tau_decayMode'] != 5) & (df['tau_decayMode'] != 6)]
return df
def GetPtBins():
return [ 20, 100, 1000 ]
def GetPlotSetup(other_type):
if other_type == 'jet':
return PlotSetup(ylabel='Jet mis-id probability', ratio_ylable_pad=30, xlim=[0.3, 1],
ylim=[ [1e-3, 1], [2e-4, 1], [8e-5, 1], [2e-5, 1], [2e-5, 1],
[5e-6, 1], [5e-6, 1], [5e-6, 1], [5e-6, 1], [2e-6, 1] ],
ratio_ylim=[ [0.5, 4.5], [0.5, 6.5], [0.5, 2.5], [0.5, 2.5], [0.5, 2.5],
[0.5, 3.5], [0.5, 3.5], [0.5, 3.5], [0.5, 10], [0.5, 10] ] )
else:
raise RuntimeError('Unknown other_type = "{}"'.format(other_type))
| dimaykerby/DisTauMLTools | Training/python/plot_setups/phase2_hlt.py | phase2_hlt.py | py | 2,784 | python | en | code | 0 | github-code | 50 |
72561584156 | import requests, io, datetime, time
import Module
import xml.etree.ElementTree
class Naver(Module.Module):
module_name = 'Naver'
data = []
def __init__(self):
pass
def get(self):
url = "https://datalab.naver.com/keyword/realtimeList.naver?where=main"
headers = {}
headers['Content-Type'] = 'text/plain'
headers['User-Agent'] = 'PostmanRuntime/7.20.1'
headers['Accept'] = '*/*'
headers['Cache-Control'] = 'no-cache'
headers['Host'] = 'datalab.naver.com'
headers['Accept-Encoding'] = 'gzip, deflate'
headers['Connection'] = 'keep-alive'
res = requests.get(url, headers=headers)
data = res.content.decode('utf-8')
common = '<span class="item_title">'
self.data = []
for _ in range(20):
target = '<span class="item_num">%d</span>' % (_ + 1)
s = data[data.find(target) + len(target):]
s = s[:s.find('</span>')]
s = s[s.find(common) + len(common):].strip()
self.data.append(s)
#print(res.content)
def state(self):
print('<%s>' % self.module_name)
print('Module, Naver is Loaded')
if __name__ == '__main__':
n = Naver()
n.get()
| maeng-gu/SMART_MIRROR_project-in_dgsw | Main Program/Naver.py | Naver.py | py | 1,295 | python | en | code | 0 | github-code | 50 |
19275927857 | import pandas as pd
import numpy as np
from wordcloud import WordCloud
import jieba
import numpy as np
from PIL import Image
import pandas as pd
from wordcloud import STOPWORDS
import sklearn
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.linear_model import BayesianRidge, ARDRegression
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
import seaborn as sns
import requests
import re
import urllib3
import pandas as pd
import datetime
requests.packages.urllib3.disable_warnings()
class data_pd():
def __init__(self,path):
"""
:param path(str):文件读取路径;
"""
self.path = path
print(f"You are using pandas:{pd.__version__} and numpy:{np.__version__}")
print(f"Nice to see you! This project is designed as Pandas Class 1.")
self.data_all = {}
self.file = None
self.state = None
self.schedule = None
def read_data_all(self):
"""
多表数据读入,多级索引结构导入;
表的基础数据清洗 (NA处理、删除非必要数据);
"""
if len(self.data_all) == 0:
if (self.file is None):
self.file = pd.read_excel(self.path,sheet_name = 'File Name',
header = 2, index_col = [0,1])
# since header is beginning from index 2
self.file = self.file.dropna(how = 'all',axis= 0) # 数据清洗
self.file = self.file[["Link to Fields used in the files "]].fillna(method='ffill')
# 向前填充NA数据
if (self.state is None):
self.state = pd.read_excel(self.path,sheet_name = 'State District Codes',
header = 2, index_col = [0,1])
# since header is beginning from index 2
self.state = self.state.dropna(how = 'all',axis= 0) # 数据清洗
self.state = self.state.sort_index(level = [0,1]) # 进行排序 , 避免性能警告
if (self.schedule is None):
self.schedule = pd.read_excel(self.path,sheet_name = 'Schedule Codes',
header = 2, usecols= [1,2] ,index_col = [0])
self.schedule = self.schedule.dropna(how = 'all',axis= 0) # 数据清洗
else:
print("Data has already been loaded!")
return 'succeed'
sheet_name = self.file.index.droplevel(1).unique()
print(f"There are {len(sheet_name)} other sheets parsed in xlsx and they are:{list(sheet_name)}.")
for name in sheet_name:
try:
tmp_df = pd.read_excel(self.path,sheet_name = name, header = [2,3], index_col = [0])
# since there are multi_index in headers
tmp_df = tmp_df.dropna(how = 'all',axis= 0) # 数据清洗
# deal with multi_index in columns
columns = [tuple([x,x]) for x in tmp_df.columns[:4].droplevel(1)]
columns.extend(list(tmp_df.columns[-3:]))
columns = pd.MultiIndex.from_tuples(columns)
tmp_df.columns = columns
# delete NOTES info
tmp_df = tmp_df.drop(index = 'NOTES:')
self.data_all[name] = tmp_df
except Exception as e:
print(e)
return -1
return 'succeed'
def get_shape(self):
"""
输出所有表的结构信息;
"""
print('='*60)
for sheet in self.data_all.keys():
print(f"Sheet {sheet}'s shape is {self.data_all[sheet].shape}")
return
def show_data(self,df,n = 5):
"""
:param df(DataFrame): 输入的数据框;
:param n(int): 展示数据的行数;
"""
print('='*60)
print(f"Columns: {list(df.columns)}")
print(df.head(n))
return
def yellow_na(self,name):
"""
处理黄色无用数据;
:param name(str): 待处理数据框的名字;
:return: 是否成功的信息.
"""
if name not in ['COMB', 'MORT', 'WOMAN', 'WPS']:
print("Undefined dataframe!")
return -1
else:
if name not in self.data_all.keys():
print("Undefined dataframe!")
return -1
df = self.data_all[name]
yellow_mask = df.iloc[:,1:].isna().all(1)
print(f"There are {sum(yellow_mask)} data can be ignored since they are marked yellow.")
self.data_all[name] = self.data_all[name].iloc[list(~yellow_mask)] # filter out these data
return f'Filtered Out Yellow Marked Data for {name} sheet.'
def black_na_ret(self,name):
"""
处理有黑色标注的数据;
:param name(str): 待处理数据框的名字;
:return: ret(DataFrame): 去除有黑色标注的数据后的数据框.
"""
if name not in ['COMB', 'MORT', 'WOMAN', 'WPS']:
print("Undefined dataframe!")
return -1
else:
df = self.data_all[name]
# 黑色数据逻辑
black_mask_1 = df.iloc[:,1:-3].isna().any(1)
black_mask_2 = df.iloc[:,-3:].isna().any(1)
black_mask = (~black_mask_1) & (black_mask_2)
print(f"There are {sum(black_mask)} data can be ignored since they are marked black.")
ret = self.data_all[name].iloc[list(black_mask)] # filter out these data
print(f'Filtered Out black Marked Data for {name} sheet.')
return ret
def one_time_yellow(self):
"""
一次性处理所有数据框中的黄色无用数据;
"""
print('='*60)
for name in ['COMB', 'MORT', 'WOMAN', 'WPS']:
state = self.yellow_na(name)
if state == -1:
print(f"Something went wrong for {name} sheet!")
return 'filtration succeed'
def search_district(self,state = 18,district = 1):
"""
根据给定的区号查询区名;
:param state(int): state 编号;
:param district(int): district 编号;
:return: name(str): 区名.
"""
name = None
print('='*60)
try:
name = self.state.loc[pd.IndexSlice[(state, district)]].iloc[0,0].rstrip()
print(f"State Code {state} District Code {district} is {name}.")
except:
print("ID not found!")
return name
def merge_data(self,name):
"""
根据给定的表名合并 Schedule 中的 description;
数据框整合 (merge连接);
:param name(str): 待处理数据框的名字;
:return: df(DataFrame): 合并后的数据框.
"""
print('='*60)
if name not in ['COMB', 'MORT', 'WOMAN', 'WPS']:
print("Undefined dataframe!")
return -1
else:
if name not in self.data_all.keys():
print("Undefined dataframe!")
return -1
df = self.data_all[name] # 实际是创建副本 , 不会影响原始数据
df = df.droplevel(0,axis=1)
df = df.merge(data.schedule,on = 'Schedule Code',how = 'left')
print("Merge Succeed!")
return df
def output(self,df, file_name, is_csv = True):
"""
:param df(DataFrame): 待保存的数据框;
:param file_name(str): 保存文件名,不用带后缀;
:param is_csv(Bool): 是否保存为csv格式 ,否则存为xlsx格式;
"""
if is_csv:
file_name += '.csv'
df.to_csv(file_name,header = True, index = True)
else:
file_name += '.xlsx'
df.to_excel(file_name,header = True, index = True)
return
class data_cloud():
def __init__(self,path):
self.path = path
print(f"You are using jieba:{jieba.__version__}.")
print(f"Nice to see you! This project is designed as WorldCloud Class.")
self.english = None
self.chinese = None
self.gen()
def gen(self):
"""
txt 文件读入 (中英文自动识别);
"""
self.english = []
self.chinese = []
n,i = 0,0
try:
fp = open(self.path,mode = "r", encoding = "utf-8")
for x in fp:
n += 1
if x.strip():
self.english.append(x.split('\n')[0])
except Exception as e:
fp.close()
fp = open(self.path,mode = "r", encoding="gbk")
for x in fp:
if i<=n+10: # 有10行未知错误
i += 1
continue
if x.strip():
self.chinese.append(x.split('\n')[0])
else:
fp.close()
print("Something went wrong!")
return -1
finally:
fp.close()
print("File Closed!")
self.english = ' '.join(self.english)
self.chinese = ' '.join(self.chinese)
return 'succeed'
def __type_txt(self,type):
"""
根据 type 返回对应 txt 存储;
"""
if type == 'english':
return self.english
elif type == 'chinese':
return self.chinese
else:
print("Wrong Type!")
return -1
def show_text(self,type = 'english',n=300):
"""
展示指定文档;
:param type(str): 'english' or 'chinese';
:param n(int): 展示文章前 n 个词;
"""
txt = self.__type_txt(type)
print('+'*60)
print(txt[:n])
print('+'*60)
print(f"The txt's length is {len(txt)}.")
return
def gen_wordcloud(self,type = 'english',n=300, file_name = "wcd1.png",max_words = 250):
"""
依据指定文档前 n 个词语生成词云图片;
:param type(str): 'english' or 'chinese';
:param n(int): 文章前 n 个词;
:param file_name: 存储图片文件名;
:param max_words: 词云提取最大文字数量;
:return: 是否成功的信息.
"""
txt = self.__type_txt(type)
if type == 'english':
wcd = WordCloud(max_words = max_words,width = 700,height = 350)
wcd.generate(txt[:n])
wcd.to_file(file_name)
elif type == 'chinese':
wcd = WordCloud(font_path = "./support/data_cloud/msyh.ttc",background_color = "White"
,max_words = max_words, width = 700,height = 350)
# Notice: the font file is must for chinese wordcloud!
ss = " ".join(jieba.lcut(txt[:n]))
wcd.generate(ss)
wcd.to_file(file_name)
return 'succeed'
def mask(self,type = 'english', pic = "./support/data_cloud/pic2.png",n=300,file_name = "wcd3.png",max_words = 250):
"""
mask 词云图像: 将从 pic 路径读取 mask 图片;
:param type(str): 'english' or 'chinese';
:param pic(str): mask图片路径;
:param n(int): 采用文档前 n 个词;
:param file_name(str): 保存图片路径;
:param max_words(int): 词云图像中最大词个数;
:return: 是否成功的信息.
"""
txt = self.__type_txt(type)
mask = np.array(Image.open(pic))
#############################################
# 以下代码将造成大量GPU占用,且del变量未能释放占用 #
#############################################
if type == 'english':
wcd = WordCloud(mask=mask, max_words = max_words,
contour_width = 2, contour_color = "Pink")
wcd.generate(txt[:n])
wcd.to_file(file_name)
elif type == 'chinese':
wcd = WordCloud(font_path="./support/data_cloud/msyh.ttc", mask=mask, background_color="white",
contour_width = 3, contour_color = "Pink", max_words = max_words)
ss = " ".join(jieba.lcut(txt[:n]))
#print(ss)
wcd.generate(ss)
wcd.to_file(file_name)
return 'succeed'
def stop_words_mask(self,n=300,file_name = "wcd3.png",max_words = 250):
"""
mask 词云图像: 带有停词库过滤;
:param n(int): 采用文档前 n 个词;
:param file_name(str): 保存图片路径;
:param max_words(int): 词云图像中最大词个数;
:return: 是否成功的信息.
"""
stopwords = set()
# 更新中文停用词库
content = [line.strip() for line in open('./support/data_cloud/cn_stopwords.txt','r',encoding='UTF-8').readlines()]
stopwords.update(content)
mask = np.array(Image.open("./support/data_cloud/pic2.png"))
wcd = WordCloud(font_path="./support/data_cloud/msyh.ttc",mask=mask,background_color="white",
scale = 1 , max_font_size = 150 , min_font_size = 10,
stopwords = stopwords, colormap="spring", max_words = max_words)
ss = " ".join(jieba.lcut(self.chinese[:n]))
wcd.generate(ss)
wcd.to_file(file_name)
return 'succeed'
def bayes_plot(model,X,y,file_name="output_fig.png"):
"""
贝叶斯回归的图示;
:param model: 已完成训练的模型;
:param X(DataFrame): 预测数据;
:param y(DataFrame): 预测标签;
:param file_name(str): 图片保存路径;
:return: 是否成功的信息.
"""
y_brr, y_brr_std = model.predict(X, return_std=True)
full_data = pd.DataFrame({"input_feature": X[:,0], "target": y})
ax = sns.scatterplot(
data=full_data, x="input_feature", y="target", color="black", alpha=0.75
)
ax.plot(X[:,0], y, color="black", label="Ground Truth")
ax.plot(X[:,0], y_brr, color="red", label="BayesianRidge with polynomial features")
ax.fill_between(
X[:,0].ravel(),
y_brr - y_brr_std,
y_brr + y_brr_std,
color="red",
alpha=0.3,
)
ax.legend()
ax = ax.set_title("Polynomial fit of a non-linear feature")
ax.get_figure().savefig(file_name)
return 'succeed'
class data_learn():
def __init__(self,path):
self.path = path
print(f"You are using sklearn:{sklearn.__version__}.")
print(f"Nice to see you! This project is designed as Sklearn Class.")
self.data = pd.read_csv(self.path,index_col=[0])
self.features = set(self.data.columns)
self.tempX = None # 使用 gen_data 方法后会同步最后一次的X_train , y_train
self.tempy = None
def gen_data(self,formula, ratio = 0.1, is_scale = False):
"""
用formual (str格式 )形式获取测试训练数据;
:param formula(str): 回归公式;
:param ratio(float): 测试数据比例 , 设置为0时则默认训练用全量数据;
:param is_scale(bool): True or Flase 是否对数据进行归一化处理;
:return: 根据需求返回生成数据集(DataFrame).
"""
type = len(formula.split('~'))
if type == 1: # 没有 y 值数据 , 当前为无监督学习
print("Notice: This is unsupervised learning since y is omitted!")
X_name = formula.split('~')[0].split('+')
if set(X_name).issubset(self.features):
if ratio:
r = int(self.data.shape[0]*ratio)
index = np.random.permutation(self.data[X_name].shape[0]) # 利用index随机打乱数据
index1 = index[:r]
index2 = index[r:]
X_train = np.array(self.data[X_name].iloc[index1])
X_test = np.array(self.data[X_name].iloc[index2])
if is_scale:
X_train = StandardScaler().fit_transform(X_train)
X_test = StandardScaler().fit_transform(X_test)
return X_train,X_test
else:
X_train = np.array(self.data[X_name])
if is_scale:
X_train = StandardScaler().fit_transform(X_train)
return X_train
y_name = formula.split('~')[0].split('+')
if len(y_name)>=2:
print("y should be 1-d array!")
return -1
X_name = formula.split('~')[1].split('+')
if set(X_name).issubset(self.features) and set(y_name).issubset(self.features):
if ratio:
r = int(self.data.shape[0]*ratio)
index = np.random.permutation(self.data[X_name].shape[0]) # 利用index随机打乱数据
index1 = index[:r]
index2 = index[r:]
X_train = np.array(self.data[X_name].iloc[index1])
y_train = np.array(self.data[y_name].iloc[index1]).ravel()
X_test = np.array(self.data[X_name].iloc[index2])
y_test = np.array(self.data[y_name].iloc[index2]).ravel()
if is_scale:
X_train = StandardScaler().fit_transform(X_train)
X_test = StandardScaler().fit_transform(X_test)
return X_train,y_train,X_test,y_test
else: # ratio == 0
X_train = np.array(self.data[X_name])
y_train = np.array(self.data[y_name]).ravel()
if is_scale:
X_train = StandardScaler().fit_transform(X_train)
return X_train,y_train
def bayes_ridge_re(self, formula,degree=10,bias = False,test_ratio=0.,is_ARD = False,is_scale = False):
"""
贝叶斯回归模型拟合 , 返回为指定模型;
:param formula(str): 回归公式;
:param degree(int): 模型参数 , 多项式拟合阶数;
:param bias(bool): True or False 是否采用偏置项;
:param test_ratio(float): 0~1数值 , 测试数据比例 , 设置为0时则默认训练用全量数据;
:param is_ARD(bool): True or False 采用ARD回归或者贝叶斯回归;
:param is_scale(bool): True or Flase 是否对数据进行归一化处理;
:return: 拟合后的模型.
"""
if test_ratio:
X,y,_, _ = self.gen_data(formula,test_ratio, is_scale= is_scale)
self.tempX,self.tempy = X,y
else:
X,y = self.gen_data(formula,test_ratio, is_scale= is_scale)
self.tempX,self.tempy = X,y
if is_ARD:
ard_poly = make_pipeline(
PolynomialFeatures(degree = degree, include_bias = bias),
StandardScaler(),
ARDRegression(),
).fit(X, y)
return ard_poly
else:
brr_poly = make_pipeline(
PolynomialFeatures(degree = degree, include_bias = bias),
StandardScaler(),
BayesianRidge(),
).fit(X, y)
return brr_poly
def cluster_model(self,formula,type = 'kmean',random_seed = 0, is_scale = False,**kwarg):
"""
聚类方法集成 , 返回为指定模型;
:param formula(str): 回归公式;
:param type(str): 聚类模型类型 , 默认为 kmean;
:param random_seed(int): 随机种子;
:param kwarg: 传入模型必须参数;
:param is_scale(bool): True or Flase 是否对数据进行归一化处理;
:return: 拟合后的模型.
"""
# 目前 kwarg 只能集成了不同模型的一个主要关键字,因为只研究了这2个参数
X = self.gen_data(formula, ratio = 0., is_scale= is_scale)
self.tempX = X
if type == 'kmean':
kmeans = KMeans(n_clusters = kwarg['n_cluster'],random_state=random_seed)
kmeans = kmeans.fit(X)
print(f"Kmeans Score is {kmeans.score(X)}")
return kmeans
elif type == 'DBSCAN':
DBCSN = DBSCAN(eps = kwarg['eps'])
DBCSN = DBCSN.fit(X)
return DBCSN
else:
print("Not supported!")
return -1
def svm_svc(self,formula, kernal = 'linear',test_ratio=0., is_scale = False, **kwarg):
"""
支持向量机方法 , 返回为指定模型;
:param formula(str): 回归公式;
:param kernal(str): SVC支持向量机的核函数;
:param test_ratio(float): 0~1数值 , 测试数据比例 , 设置为0时则默认训练用全量数据;
:param is_scale(bool): True or Flase 是否对数据进行归一化处理;
:return: 拟合后的模型svc.
"""
if kernal not in ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']:
print("Not supported kernal")
return -1
if test_ratio:
X,y,X2, y2 = self.gen_data(formula,test_ratio, is_scale= is_scale)
self.tempX,self.tempy = X,y
else:
X,y = self.gen_data(formula,test_ratio, is_scale= is_scale)
self.tempX,self.tempy = X,y
if 'gamma' in kwarg.keys():
svc = SVC(kernel = kernal, gamma = kwarg['gamma']).fit(X, y)
else:
svc = SVC(kernel = kernal).fit(X, y)
if test_ratio:
print(f"Self predict score is {svc.score(X2, y2)}")
return svc
def search_hyper_para(self,model, param_grid, n_split = 5,random_seed = 0):
"""
对给定的模型进行超参搜索 , 采用 K 折检验; 不支持无监督学习!
:param model: 待搜索的模型;
:param param_grid(list): 待搜索的参数列表;
:param n_splits(int): K 折检验的 K 值;
:param random_seed(int): 随机种子;
:return: grid.best_params_最优超参列表.
"""
print("Notice: Please establish the model first!")
if self.tempX is None:
return -1
cv = StratifiedShuffleSplit(n_splits = n_split, test_size = 0.3, random_state = random_seed)
grid = GridSearchCV(model, param_grid = param_grid, cv = cv)
grid.fit(self.tempX, self.tempy)
print(f"The best parametes are {grid.best_params_} with a score of {grid.best_score_}")
return grid.best_params_
class WebSchedules():
def __init__(self, name, web):
"""
:param name(str): 对象存储名字;
:param web(str): 浏览器种类;
:params chedules: 爬取的日志;
:param schedules_stored: 存储的日志;
"""
self.name = name
self.web = web
self.schedules = {'date': [], 'version': []}
self.schedules_stored =''
self.processed_schedules = pd.DataFrame(columns= ('date','version'))
print(f"You are using requests:{requests.__version__}.")
print(f"Hi~Yi. This class is designed for getting chrome-version schedule!")
def getschedules(self, item):
"""
:param item(json): json 文件对象, 从中解析日期、版本;
"""
if self.web == 'chrome':
self.schedules['date'].append(item['start']['date'])
self.schedules['version'].append(item['summary'])
elif self.web == 'firefox':
self.schedules['date'].append(item['start']['dateTime'].split('T')[0])
self.schedules['version'].append(item['summary'])
return 'succeed'
def processtschedules(self, isunique= True, isprint = True):
"""
:param isunique(bool): 如果为 True 则版本重复发布 , 只保留一条信息;
:param isprint(bool): 是否输出日志信息;
"""
self.processed_schedules = pd.DataFrame(self.schedules)
self.processed_schedules['date'] = pd.to_datetime(self.schedules['date'])
self.processed_schedules = self.processed_schedules.sort_values('date')
self.processed_schedules = self.processed_schedules.set_index('date')
if isunique:
self.processed_schedules = self.processed_schedules.drop_duplicates(subset=['version'])
if isprint:
print("You are processing: {}".format(self.name))
print(self.processed_schedules.to_string())
return 'succeed'
def updateschedules_stored(self):
# Update schedule when different
"""
更新 schedule;
"""
schedule_print = '\n' + self.web.upper() + ' SCHEDULE:\n' + self.processed_schedules.to_string()
now = datetime.datetime.now()
if self.schedules_stored != schedule_print:
self.schedules_stored = schedule_print
print('\n', now, '\nUPDATE:', self.schedules_stored)
else:
print('SAME '+ self.web.upper() +' SCHEDULE.', now)
return 'succeed'
def updateschdeules(self,checkdate = datetime.date(2022, 1, 1)):
"""
:param check_date(datetime.date): 查找日期 , 如果在那天有新版本发布则会显示;
"""
for date in pd.to_datetime(self.processed_schedules.index.values.tolist()):
if 0 == (date - checkdate).days:
web_sr = self.processed_schedules[self.processed_schedules.index==date]['version']
# print('{} RELEASED TODAY.'.format(web_sr.values[0]))
notified_schedule = self.web.upper()+ ' ' + web_sr.values[0] + ' RELEASED TODAY! '
print(notified_schedule)
break
return 'succeed' | login-invalid/MATH620152 | doc/code/Task_1.py | Task_1.py | py | 25,899 | python | en | code | 0 | github-code | 50 |
27194063994 | from numpy import *
import pandas as pd
import os
import matplotlib.pyplot as plt
pristine = pd.read_pickle('pristine.pkl')
exposure1 = pd.read_pickle('exposure1.pkl')
pristine_mono_mean = pristine['monofilament'].mean()/1000
pristine_mono_std = pristine['monofilament'].std()/1000
pristine_flouro_mean = pristine['flourocarbon'].mean()/1000
pristine_flouro_std = pristine['flourocarbon'].std()/1000
exposed_mono_mean = exposure1['monofilament'].mean()/1000
exposed_mono_std = exposure1['monofilament'].std()/1000
exposed_flouro_mean = exposure1['flourocarbon'].mean()/1000
exposed_flouro_std = exposure1['flourocarbon'].std()/1000
pstresses = [pristine_mono_mean, pristine_flouro_mean]
perrors = [pristine_mono_std, pristine_flouro_std]
estresses = [exposed_mono_mean, exposed_flouro_mean]
eerrors = [exposed_mono_std, exposed_flouro_std]
indexs = array([0, 1])
width = .35
fig, ax = plt.subplots()
ax.bar(indexs - width/2, pstresses, width, yerr = perrors, capsize = 10)
ax.bar(indexs + width/2, estresses, width, yerr = eerrors, capsize = 10)
ax.set_ylabel('Max Stress [kPa]')
ax.set_xlabel('Material')
ax.set_xticks([0,1])
ax.set_xticklabels(['Monofilament', 'Flourocarbon'])
ax.set_title('Exposure 1 Material Properties')
ax.legend(['Pristine', 'Exposed'])
plt.savefig('All Material Properties.png',bbox_inches = 'tight')
plt.show() | Kent-Rush/Japan-Experiment | load_extensions/combined_plots.py | combined_plots.py | py | 1,349 | python | en | code | 0 | github-code | 50 |
34609885503 | import argparse
import os
import gzip
import numpy as np
import pandas as pd
import statistics
def read_fastq(file):
with open(file, "r") as fastq:
while True:
lines = [fastq.readline().strip() for i in range(4)]
if not lines[0]:
break
yield lines
def process_fastq(file, out_dir):
sample_name = os.path.splitext(os.path.basename(file))[0]
os.makedirs(out_dir, exist_ok=True)
out_file = os.path.join(out_dir, sample_name + ".tsv")
with open(out_file, "w") as out:
out.write("read_id\tread_length\tread_quality\taverage_base_quality\n")
for i, lines in enumerate(read_fastq(file)):
read_id, sequence, _, quality = lines
read_length = len(sequence)
read_quality = sum([ord(q) - 33 for q in quality]) / read_length
average_base_quality = read_quality / read_length
out.write(f"{read_id}\t{read_length}\t{read_quality:.2f}\t{average_base_quality:.2f}\n")
if (i + 1) % 1000 == 0:
print(f"Processed {i + 1} reads for sample {sample_name}")
def readstat_calculator(sample, read_lengths, read_qualities, avg_base_qualities):
read_count = len(read_lengths)
length_min = min(read_lengths)
length_median = statistics.median(read_lengths)
length_mean = statistics.mean(read_lengths)
length_max = max(read_lengths)
quality_min = min(read_qualities)
quality_median = statistics.median(read_qualities)
quality_mean = statistics.mean(read_qualities)
quality_max = max(read_qualities)
avg_base_quality_min = min(avg_base_qualities)
avg_base_quality_median = statistics.median(avg_base_qualities)
avg_base_quality_mean = statistics.mean(avg_base_qualities)
avg_base_quality_max = max(avg_base_qualities)
return [sample, read_count, length_min, length_median, length_mean, length_max,
quality_min, quality_median, quality_mean, quality_max,
avg_base_quality_min, avg_base_quality_median, avg_base_quality_mean, avg_base_quality_max]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_folder", required=True, help="Path to the folder containing fastq files")
parser.add_argument("-o", "--output_folder", required=True,
help="Path to the folder where the results will be saved")
args = parser.parse_args()
for file in os.listdir(args.input_folder):
if file.endswith((".fastq", ".fastq.gz")):
file_path = os.path.join(args.input_folder, file)
if file.endswith(".gz"):
with gzip.open(file_path, "rt") as fastq:
process_fastq(fastq, args.output_folder)
else:
process_fastq(file_path, args.output_folder)
print("Starting second for loop")
datatable = pd.DataFrame(
columns=["sample", "read_count", "length_min", "length_median", "length_mean", "length_max",
"quality_min", "quality_median", "quality_mean", "quality_max",
"avg_base_quality_min", "avg_base_quality_median", "avg_base_quality_mean", "avg_base_quality_max"])
for file in os.listdir(args.output_folder):
if file.endswith(".tsv"):
print(file, "calc")
file_path = os.path.join(args.output_folder, file)
sample_name = os.path.splitext(os.path.basename(file))[0]
try:
data = pd.read_csv(file_path, sep='\t')
except Exception as e:
print("Error reading file:", file_path)
print("Error:", e)
continue
print("Columns in data:", data.columns)
try:
out_data = readstat_calculator(sample_name, data["read_length"], data["read_quality"],
data["average_base_quality"])
except Exception as e:
print("Error running readstat_calculator for file:", file_path)
print("Error:", e)
try:
datatable.loc[file] = out_data
except Exception as e:
print("Error while printing data table: ", e)
print(datatable)
try:
datatable.to_csv(os.path.join(args.output_folder, "read.stat.tsv"), index=False, sep='\t')
except Exception as e:
print("Error while writing output dataframe: ", e)
if __name__ == "__main__":
main()
| gabor-gulyas/read.stat.from.fastq | read.stat.from.fastq.py | read.stat.from.fastq.py | py | 4,601 | python | en | code | 0 | github-code | 50 |
9340472117 | # https://pyautogui.readthedocs.io/en/latest/
import time
import pyautogui
import numpy as np
import random
from PIL import ImageGrab, ImageOps
# click function
def press(key):
pyautogui.keyDown(key)
time.sleep(0.01)
pyautogui.keyUp(key)
return
# variables
# delay for 5 seconds to navigate to game
time.sleep(2)
area = (10,350,600,200)
# is night or day
night = 0
def is_night(data):
pxTime1 =(10,170)
pxTime2 =(20,170)
pxTime3 =(30,170)
# print(data[60,100])
if (data[10,170] == 33 and data[20,170] == 33 and data[30,170] == 33 ):
isNightMode = 1
return isNightMode
pxDark = 33
ground_sur_y = 100
air_sur_y = 60
start_sur_x = 160
sur_x = start_sur_x
# run
while True:
try:
image = pyautogui.screenshot(region=area)
image = ImageOps.grayscale(image)
data = image.load()
isNightMode = is_night(data)
# print(isNightMode)
#catus and bird on ground
if (data[sur_x,ground_sur_y] == 171 ):
press('up')
print("jump")
except Exception as e:
print(e)
| nirans2002/Chrome_Dino_automate_python | dino.py | dino.py | py | 1,120 | python | en | code | 0 | github-code | 50 |
3716104452 | from datetime import datetime
from typing import Union
from flask import Response, flash, jsonify, redirect, render_template, request, url_for
from flask_babel import lazy_gettext
from flask_login import current_user, login_required
from ..ext import db
from ..forms.base import DeleteForm
from ..models import Brew
from ..utils.pagination import get_page
from ..utils.views import next_redirect
from . import brew_bp
from .forms import BrewForm, ChangeStateForm
from .permissions import AccessManager
from .utils import BrewUtils, list_query_for_user
HINTS = [
(
"67-66*C - 90'\n75*C - 15'",
lazy_gettext('single infusion mash w/ mash out')
),
(
"63-61*C - 30'\n73-71*C - 30'\n75*C - 15'",
lazy_gettext('2-step mash w/ mash out')
),
(
"55-54*C - 10'\n63-61*C - 30'\n73-71*C - 30'\n75*C - 15'",
lazy_gettext('3-step mash w/ mash out')
),
]
@brew_bp.route('/add', methods=['POST', 'GET'], endpoint='add')
@login_required
def brew_add() -> Union[str, Response]:
form = BrewForm()
if form.validate_on_submit():
brew = form.save()
flash(lazy_gettext('brew %(name)s created', name=brew.name), category='success')
return redirect(url_for('brew.details', brew_id=brew.id))
ctx = {
'form': form,
'mash_hints': HINTS,
}
return render_template('brew/form.html', **ctx)
@brew_bp.route('/<int:brew_id>', methods=['POST', 'GET'], endpoint='details')
def brew(brew_id: int) -> Union[str, Response]:
brew = Brew.query.get_or_404(brew_id)
is_post = request.method == 'POST'
AccessManager(brew, is_post).check()
brew_form = None
if is_post:
brew_form = BrewForm()
if brew_form.validate_on_submit():
brew = brew_form.save(obj=brew)
flash(
lazy_gettext('brew %(name)s data updated', name=brew.full_name),
category='success'
)
return redirect(request.path)
public_only = brew.brewery.brewer != current_user
ctx = {
'brew': brew,
'utils': BrewUtils,
'mash_hints': HINTS,
'notes': brew.notes_to_json(),
'next': brew.get_next(public_only=public_only),
'previous': brew.get_previous(public_only=public_only),
'action_form': ChangeStateForm(obj=brew),
'form': brew_form or BrewForm(obj=brew),
}
return render_template('brew/details.html', **ctx)
@brew_bp.route('/all', endpoint='all')
def brew_all() -> str:
page_size = 20
page = get_page(request)
if current_user.is_anonymous:
query = BrewUtils.brew_list_query()
else:
query = BrewUtils.brew_list_query(extra_user=current_user)
query = query.order_by(db.desc(Brew.created))
pagination = query.paginate(page, page_size)
context = {
'pagination': pagination,
'utils': BrewUtils,
'user_is_brewer': False,
}
return render_template('brew/list.html', **context)
@brew_bp.route('/search', endpoint='search')
def search() -> Response:
query = list_query_for_user(current_user)
term = request.args.getlist('q')
if term:
query = query.filter(Brew.name.like(term[0] + '%'))
query = query.order_by(Brew.name)
return jsonify(BrewUtils.brew_search_result(query))
@brew_bp.route('/<int:brew_id>/delete', methods=['GET', 'POST'], endpoint='delete')
@login_required
def brew_delete(brew_id: int) -> Union[str, Response]:
brew = Brew.query.get_or_404(brew_id)
AccessManager(brew, True).check()
name = brew.name
form = DeleteForm()
if form.validate_on_submit() and form.delete_it.data:
db.session.delete(brew)
db.session.commit()
flash(
lazy_gettext('brew %(name)s has been deleted', name=name),
category='success'
)
next_ = next_redirect('profile.brews', user_id=current_user.id)
return redirect(next_)
ctx = {
'brew': brew,
'delete_form': form,
}
return render_template('brew/delete.html', **ctx)
@brew_bp.route('/<int:brew_id>/chgstate', methods=['POST'], endpoint='chgstate')
@login_required
def change_state(brew_id: int) -> Response:
brew = Brew.query.get_or_404(brew_id)
AccessManager(brew, True).check()
form = ChangeStateForm()
if form.validate_on_submit():
now = datetime.utcnow()
action = form.action.data
if action == 'tap':
brew.tapped = now
brew.finished = None
elif action in ('untap', 'available'):
brew.finished = None
brew.tapped = None
elif action == 'finish': # pragma: nocover
brew.tapped = None
brew.finished = now
db.session.add(brew)
db.session.commit()
flash(
lazy_gettext('brew %(name)s state changed', name=brew.full_name),
category='success'
)
else:
flash(lazy_gettext('invalid state'), category='warning')
return redirect(url_for('brew.details', brew_id=brew.id))
| zgoda/brewlog | src/brewlog/brew/views.py | views.py | py | 5,068 | python | en | code | 2 | github-code | 50 |
72137274716 | import aiohttp
import diskcache
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from sqlitedict import SqliteDict
from starlette.background import BackgroundTasks
from config.config import TELEGRAM_BOT_TOKEN, CHANNEL_ID, USER_DB_PATH
from VO.account_vo import AccountAction
from database.db import get_session
from service.login import login
router = APIRouter()
USER_DB = SqliteDict(USER_DB_PATH, autocommit=False)
TOKEN_CACHE = diskcache.FanoutCache("./token_cache")
@router.post("/")
async def account(
action: AccountAction,
background_tasks: BackgroundTasks,
db: Session = Depends(get_session)
):
login_result = login(action, db)
if not login_result.success:
return login_result
background_tasks.add_task(send_telegram_message, action.data, action.std_id)
return {
"success": True
}
async def send_telegram_message(message, login_id):
message = message.replace("<", "<").replace(">", ">")
message = f"작성자: {login_id}\n" + message
message = message.replace("\n", "%0A")
url = f'https://api.telegram.org/{TELEGRAM_BOT_TOKEN}/sendMessage?chat_id={CHANNEL_ID}&text={message}'
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
response = await response.text()
print(response, url)
return response
| dryrain39/du-attend-v2-server | route/user/bug_report.py | bug_report.py | py | 1,402 | python | en | code | 4 | github-code | 50 |
74831828635 | import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
import core.utils as utils
import os
from core.config import cfg
import get_time_util
import data_stream_status_machine
# flags.DEFINE_string('weights', './checkpoint/social_yolov3_test-loss=3.3218.ckpt-51.pb', 'path to weights file')
# flags.DEFINE_string('weights', './checkpoint-v2/qixing_yolov3_test-loss=1.5639.ckpt-567.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2020-12-31_13-26-59/qixing_yolov3_test-loss=6.7334.ckpt-65.pb', 'path to weights file')
# flags.DEFINE_string('weights', './checkpoint-v2-2020-12-31_21-01-16/qixing_yolov3_test-loss=5.7523.ckpt-842.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2021-01-04_17-52-45/qixing_yolov3_test-loss=3.1826.ckpt-28.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2021-01-04_17-52-45/qixing_yolov3_test-loss=1.8997.ckpt-409.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2021-01-04_17-52-45/qixing_yolov3_test-loss=1.5758.ckpt-669.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2021-01-28_16-02-28/qixing_yolov3_test-loss=3.5855.ckpt-382.pb', 'path to weights file')
#flags.DEFINE_string('weights', './checkpoint-v2-2021-04-08_14-14-48/qixing_yolov3_test-loss=4.9424.ckpt-653.pb', 'path to weights file')
flags.DEFINE_string('weights', './checkpoint-v2-2022-05-08_15-48-47/qixing_yolov3_test-loss=3.0047.ckpt-976.pb', 'path to weights file')
#flags.DEFINE_string('output', './1.8997-409-detector.tflite', 'path to output')
#flags.DEFINE_string('output', './3.5855-382-detector.tflite', 'path to output')
#flags.DEFINE_string('output', './4.9424-653-detector.tflite', 'path to output')
flags.DEFINE_string('output', './3.0047-976-detector.tflite', 'path to output')
#flags.DEFINE_string('output_cplus', './1.8997-409-detector-cplus.tflite', 'path to output')
#flags.DEFINE_string('output_cplus', './3.5855-382-detector-cplus.tflite', 'path to output')
#flags.DEFINE_string('output_cplus', './4.9424-653-detector-cplus.tflite', 'path to output')
flags.DEFINE_string('output_cplus', './3.0047-976-detector-cplus.tflite', 'path to output')
flags.DEFINE_integer('input_size', 416, 'path to output')
flags.DEFINE_string('quantize_mode', 'float32', 'quantize mode (int8, float16, float32)')
flags.DEFINE_string('dataset', "/Volumes/Elements/imgs/coco_dataset/coco/5k.txt", 'path to dataset')
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
for input_value in range(10):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preprocess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
print("calibration image {}".format(fimage[input_value]))
yield [img_in]
else:
continue
def save_tflite(input_arrays, output_arrays, weights_pb_path, out_tflite_model_name):
# converter = tf.lite.TFLiteConverter.from_saved_model(weights_pb_path)
# converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
# graph_def_file, input_arrays, output_arrays)
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
weights_pb_path, input_arrays, output_arrays, {input_arrays[0] :[1,FLAGS.input_size,FLAGS.input_size,3]})
if FLAGS.quantize_mode == 'float16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif FLAGS.quantize_mode == 'int8':
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(out_tflite_model_name, 'wb').write(tflite_model)
logging.info("model saved to: {}".format(out_tflite_model_name))
def demo(model_path):
#img_path_file = '/home/chenp/YOLOv4-pytorch/qixing-data/test' #argv[3]
#img_path_file = '/home/chenp/Yolov5_tf/data/dataset/test' #argv[3]
#img_path_file = './data/dataset/test' #argv[3]
img_path_file = '/home/haishi/suanfa/20220505data/dataset/test' #argv[3]
# img_path_file = '/home/chenp/YOLOv4-pytorch/qixing-data/test/zhibeidangao/test-z' #argv[3]
# out_path = 'det_out-tflite-1.5758.ckpt-669' #argv[4]
#out_path = 'det_out-tflite-1.8997.ckpt-409-only-middle' #argv[4]
#out_path = 'det_out-tflite-4.9424-653' #argv[4]
out_path = 'det_out-tflite-'+model_path.split('/')[-1]
if not os.path.exists(out_path):
os.makedirs(out_path)
if not os.path.exists(img_path_file):
print('img_path_file=%s not exist' % img_path_file)
sys.exit()
interpreter = tf.lite.Interpreter(model_path=model_path) #FLAGS.output
interpreter.allocate_tensors()
logging.info('tflite model loaded')
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
input_shape = input_details[0]['shape']
img_files = []
for files in os.walk(img_path_file):
print(files[0])
for tp_f in files[2]:
img_files.append(files[0] + '/' + tp_f)
# input()
for idx, in_img_file in enumerate(img_files):
#print('idx=', idx, 'in_img_file=', in_img_file)
if not os.path.exists(in_img_file):
print('idx=', idx, 'in_img_file=', in_img_file, ' not exist')
continue
img = cv2.imread(in_img_file)
if in_img_file[-4:] != '.jpg':
print('not jpg ', in_img_file)
continue
if img is None:
print('idx=', idx, 'in_img_file=', in_img_file, ' read error')
continue
# input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
img_size = img.shape[:2]
image_data = utils.image_preporcess(np.copy(img), [FLAGS.input_size, FLAGS.input_size])
input_data = image_data[np.newaxis, ...].astype(np.float32)
# input_data0 = np.ones((1, 416, 416), np.float32)*0.2
# input_data1 = np.ones((1, 416, 416), np.float32)*0.6
# input_data2 = np.ones((1, 416, 416), np.float32)*0.5
# input_data = np.stack((input_data0, input_data1, input_data2), axis=3)
print('input_data', input_data.shape, input_data.dtype)
print(input_data[0, 0, 0, 0], input_data[0, 0, 0, 1], input_data[0, 0, 0, 2],
input_data[0, 200, 200, 0], input_data[0, 200, 200, 1], input_data[0, 200, 200, 2])
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print('run over.')
# input()
print(type(output_data))
pred_sbbox, pred_mbbox, pred_lbbox = output_data
print(pred_sbbox.shape, pred_mbbox.shape, pred_lbbox.shape, img_size)
#print(pred_sbbox[0, 20, 20, 0, 4],
# pred_sbbox[0, 20, 20, 0, 5], pred_sbbox[0, 20, 20, 0, 6], pred_sbbox[0, 20, 20, 0, 7], pred_sbbox[0, 20, 20, 0, 8])
#print(pred_sbbox[0, 0, 0, 0, 0], pred_sbbox[0, 0, 0, 0, 1], pred_sbbox[0, 0, 0, 0, 2], pred_sbbox[0, 0, 0, 0, 3], pred_sbbox[0, 0, 0, 0, 4],
# pred_sbbox[0, 0, 0, 0, 5], pred_sbbox[0, 0, 0, 0, 6], pred_sbbox[0, 0, 0, 0, 7], pred_sbbox[0, 0, 0, 0, 8])
print(in_img_file)
num_classes = len(utils.read_class_names(cfg.YOLO.CLASSES)) #6 #4
score_thresh = 0.6
iou_type = 'iou' #yolov4:diou, else giou
iou_thresh = 0.3
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
'''
pred_bbox = np.concatenate([
np.reshape(pred_mbbox, (-1, 5 + num_classes))
], axis=0)
'''
bboxes = utils.postprocess_boxes(pred_bbox, img_size, FLAGS.input_size, score_thresh)
bboxes = utils.nms(bboxes, iou_type, iou_thresh, method='nms')
score = 0
image = utils.draw_bbox(img, bboxes)
#image = Image.fromarray(image)
#image.show()
if len(bboxes) > 0:
score = bboxes[0][4]
print('bboxes len(bboxes) > 0', type(bboxes))
else:
print('bboxes len(bboxes) = 0', type(bboxes))
score = 0
out_img = np.asarray(image)
file_path, file_name = os.path.split(in_img_file)
file, postfix = os.path.splitext(file_name)
out_file = os.path.join(out_path, file + '_%.6f' % (score) + postfix)
cv2.imwrite(out_file, out_img)
print('idx=', idx, 'in_img_file=', in_img_file, 'out_file=', out_file)
def main(_argv):
gpu_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
input_arrays = ['input/input_data']
output_arrays = ['pred_sbbox/concat_2', 'pred_mbbox/concat_2', 'pred_lbbox/concat_2']
#get config for weight pb path:
in_f = open('./weight-pb-path', 'r')
weights_pb_path = in_f.readline()
in_f.close()
output_path = weights_pb_path + '-detector.tflite'
output_cplus_path = weights_pb_path + '-detector-cplus.tflite'
save_tflite(input_arrays, output_arrays, weights_pb_path, output_path)
print('convert done...')
input_arrays = ['input/input_data']
# output_arrays_for_CPlus = ['pred_lbbox/Exp', 'pred_lbbox/Sigmoid', 'pred_lbbox/Sigmoid_1', 'pred_lbbox/Sigmoid_2',
# 'pred_mbbox/Exp', 'pred_mbbox/Sigmoid', 'pred_mbbox/Sigmoid_1', 'pred_mbbox/Sigmoid_2',
# 'pred_sbbox/Exp', 'pred_sbbox/Sigmoid', 'pred_sbbox/Sigmoid_1', 'pred_sbbox/Sigmoid_2']
output_arrays_for_CPlus = ["conv_sbbox/BiasAdd", "conv_mbbox/BiasAdd", "conv_lbbox/BiasAdd"]
save_tflite(input_arrays, output_arrays_for_CPlus, weights_pb_path, output_cplus_path)
#demo(output_path)
if __name__ == '__main__':
last_data_stream_status = '3'
current_data_stream_status = '4'
current_note_log = 'convert tflite.'
if not data_stream_status_machine.start_check(last_data_stream_status):
exit(1)
start_time = get_time_util.get_last_time()
print('Start converting tflite...')
try:
app.run(main)
except SystemExit:
pass
print('Converting tflite done...')
end_time = get_time_util.get_last_time()
data_stream_status_machine.end_check(data_stream_status=current_data_stream_status,
note_log=current_note_log,
start_time=start_time, end_time=end_time)
| chenpengf0223/Yolov5_tf | convert_tflite.py | convert_tflite.py | py | 10,874 | python | en | code | 0 | github-code | 50 |
29631158294 | import os
import string
try:
from OpenSSL import crypto
except ImportError:
crypto = None
from PyQt5.QtCore import QTextCodec, QRegularExpression, Qt
from PyQt5.QtGui import QTextCursor, QTextCharFormat, QBrush, QColor, QIcon
from PyQt5.QtWidgets import QWidget, QLabel, QApplication, QFileDialog, QTreeWidgetItem
from deen.gui.widgets.hex import HexViewWidget
from deen.gui.widgets.text import TextViewWidget
from deen.gui.widgets.formatted import FormattedViewWidget
from deen.gui.widgets.ui_deenencoderwidget import Ui_DeenEncoderWidget
from deen import logger
MEDIA_PATH = os.path.dirname(os.path.abspath(__file__)) + '/../media/'
LOGGER = logger.DEEN_LOG.getChild('gui.widgets.encoder')
class DeenEncoderWidget(QWidget):
"""For each plugin operation Deen will create
an instance of this class to represent an
action. self.parent in instances of this
class should point to the main window (an
instance of DeenGui)."""
def __init__(self, parent, readonly=False):
super(DeenEncoderWidget, self).__init__(parent)
self.ui = Ui_DeenEncoderWidget()
self.parent = parent
self.readonly = readonly
self.process = False
self.plugin = None
self.search_matches = None
self._content = bytearray()
self.codec = QTextCodec.codecForName('UTF-8')
self.hex_view = False
self.formatted_view = False
# TODO: check if printable is enforced
self.printable = True
# Assign custom widgets for text_field and hex_field.
self.text_field = TextViewWidget(self, readonly=self.readonly)
self.text_field.textChanged.connect(self.field_content_changed)
self.hex_field = HexViewWidget(read_only=self.readonly, parent=self)
self.hex_field.setHidden(True)
self.formatted_field = FormattedViewWidget(self, readonly=self.readonly)
self.formatted_field.setHidden(True)
self.formatted_field.textChanged.connect(self.field_content_changed)
# Add connection for selection field
self.text_field.selectionChanged.connect(self.update_selection_field)
self.formatted_field.selectionChanged.connect(self.update_selection_field)
self.hex_field.bytesChanged.connect(self.field_content_changed)
self.hex_field.itemSelectionChanged.connect(self.update_selection_field)
# setupUi() access the field self._content, thus it must be initialized first:
self.ui.setupUi(self)
self.ui.selection_length_label.setText('Selection: 0')
self.ui.content_area_layout.addWidget(self.text_field)
self.ui.content_area_layout.addWidget(self.hex_field)
self.ui.content_area_layout.addWidget(self.formatted_field)
# Configure widget elements
self.ui.toggle_text_view.setChecked(True)
self.ui.toggle_text_view.clicked.connect(self.view_text)
self.toggle_button_border(self.ui.toggle_text_view)
self.ui.toggle_hex_view.setChecked(False)
self.ui.toggle_hex_view.clicked.connect(self.view_hex)
self.ui.toggle_formatted_view.setChecked(False)
self.ui.toggle_formatted_view.clicked.connect(self.view_formatted)
# Add icons to buttons
icon_path = MEDIA_PATH
if self.palette().color(self.backgroundRole()).value() < 128:
icon_path += 'dark/'
if all([os.path.exists(icon_path + 'view-text.svg'),
os.path.exists(icon_path + 'view-hex.svg'),
os.path.exists(icon_path + 'view-formatted.svg')]):
self.ui.toggle_text_view.setIcon(QIcon(icon_path + 'view-text.svg'))
self.ui.toggle_text_view.setText('')
self.ui.toggle_hex_view.setIcon(QIcon(icon_path + 'view-hex.svg'))
self.ui.toggle_hex_view.setText('')
self.ui.toggle_formatted_view.setIcon(QIcon(icon_path + 'view-formatted.svg'))
self.ui.toggle_formatted_view.setText('')
# Update labels with proper values
self.update_length_field()
# Create references for tree view items
self.plugin_tree_top_decode = self.ui.plugin_tree_view.topLevelItem(0)
self.plugin_tree_top_encode = self.ui.plugin_tree_view.topLevelItem(1)
self.plugin_tree_top_uncompress = self.ui.plugin_tree_view.topLevelItem(2)
self.plugin_tree_top_compress = self.ui.plugin_tree_view.topLevelItem(3)
self.plugin_tree_top_disassemble = self.ui.plugin_tree_view.topLevelItem(4)
self.plugin_tree_top_assemble = self.ui.plugin_tree_view.topLevelItem(5)
self.plugin_tree_top_hash = self.ui.plugin_tree_view.topLevelItem(6)
self.plugin_tree_top_misc = self.ui.plugin_tree_view.topLevelItem(7)
self.plugin_tree_top_format = self.ui.plugin_tree_view.topLevelItem(8)
# Add tree items for the plugin tree view
for encoding in [p[1] for p in self.parent.plugins.codecs
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_encode.addChild(QTreeWidgetItem([encoding.display_name]))
for encoding in [p[1] for p in self.parent.plugins.codecs
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_decode.addChild(QTreeWidgetItem([encoding.display_name]))
for compression in [p[1] for p in self.parent.plugins.compressions
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_compress.addChild(QTreeWidgetItem([compression.display_name]))
for compression in [p[1] for p in self.parent.plugins.compressions
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_uncompress.addChild(QTreeWidgetItem([compression.display_name]))
for assembly in [p[1] for p in self.parent.plugins.assemblies
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_assemble.addChild(QTreeWidgetItem([assembly.display_name]))
for assembly in [p[1] for p in self.parent.plugins.assemblies
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_disassemble.addChild(QTreeWidgetItem([assembly.display_name]))
for hashalg in [p[1] for p in self.parent.plugins.hashs
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_hash.addChild(QTreeWidgetItem([hashalg.display_name]))
for misc in [p[1] for p in self.parent.plugins.misc
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_misc.addChild(QTreeWidgetItem([misc.display_name]))
for formatter in [p[1] for p in self.parent.plugins.formatters
if (not getattr(p[1], 'cmd_only', None) or
(getattr(p[1], 'cmd_only', None) and not p[1].cmd_only))]:
self.plugin_tree_top_format.addChild(QTreeWidgetItem([formatter.display_name]))
# Connect signal to tree view
self.ui.plugin_tree_view.itemClicked.connect(self.action)
self.ui.plugin_tree_view.currentItemChanged.connect(self.action)
self.ui.plugin_tree_view.setMaximumWidth(self.ui.plugin_tree_view.columnWidth(0) * 2)
# Hide top level items without any loaded plugins
for i in range(self.ui.plugin_tree_view.topLevelItemCount()):
tl_item = self.ui.plugin_tree_view.topLevelItem(i)
if not tl_item:
continue
if tl_item.childCount() < 1:
tl_item.setHidden(True)
# Configure search widget
self.ui.search_area.returnPressed.connect(self.search_highlight)
self.ui.search_button.clicked.connect(self.search_highlight)
self.ui.search_clear_button.clicked.connect(self.clear_search_highlight)
self.ui.search_progress_bar.hide()
self.error_message = QLabel()
self.error_message.setStyleSheet('border: 2px solid red;')
self.error_message.hide()
self.ui.error_message_layout.addWidget(self.error_message)
self.ui.error_message_layout_widget.hide()
self.ui.search_group.hide()
# After adding new widgets, we have to update the max scroll range.
self.parent.ui.DeenMainWindow.verticalScrollBar().rangeChanged.connect(self.update_vertical_scroll_range)
self.parent.ui.DeenMainWindow.horizontalScrollBar().rangeChanged.connect(self.update_horizontal_scroll_range)
@property
def content(self):
return self._content
@content.setter
def content(self, data):
assert isinstance(data, (bytearray, bytes))
if isinstance(data, bytes):
data = bytearray(data)
self._content = data
if self.previous.formatted_view:
self.printable = True
self.ui.toggle_formatted_view.click()
elif not all(chr(c) in string.printable for c in self._content):
# If there are non-printable characters,
# switch to hex view.
self.printable = False
self.ui.toggle_hex_view.click()
else:
# Prevent the field from overwriting itself with invalid
# characters.
self.printable = True
self.ui.toggle_text_view.setEnabled(True)
self.ui.toggle_text_view.click()
self.text_field.moveCursor(QTextCursor.End)
self.update_length_field()
def has_previous(self):
"""Determine if the current widget is the root widget."""
if self.parent.widgets and self.parent.widgets[0] != self:
return True
else:
return False
def has_next(self):
"""Determine if there are already new widgets created."""
if self.parent.widgets and self.parent.widgets[-1] != self:
return True
else:
return False
@property
def previous(self):
"""Return the previous widget. If the current widget
is the root widget, this function returns the root
widget (self)."""
# during setupUI() this property is being called but the widget list is empty and thus, has_previous would fail:
if len(self.parent.widgets) == 0:
return None
if not self.has_previous():
return self
for i, w in enumerate(self.parent.widgets):
if w == self:
return self.parent.widgets[i - 1]
@property
def next(self):
"""Return the next widget. This is most likely the one
that is supposed to hold the output of action()'s of
the current widget."""
# during setupUI() this property is being called but the widget list is empty and thus, has_next would fail:
if len(self.parent.widgets) == 0:
return None
if not self.has_next():
w = DeenEncoderWidget(self.parent)
self.parent.widgets.append(w)
self.parent.ui.encoder_widget_layout.addWidget(w)
return w
for i, w in enumerate(self.parent.widgets):
if w == self:
return self.parent.widgets[i + 1]
@property
def field(self):
"""A property that references
the currently active field."""
if self.hex_view:
return self.hex_field
elif self.formatted_view:
return self.formatted_field
else:
return self.text_field
def set_field_focus(self):
"""Set the focus of the current
input field. Checks if hex view
mode is enabled."""
self.field.setFocus()
def get_field_content(self):
"""Return the content of the current
text or hex field."""
return self.field.content
def toggle_search_box_visibility(self):
"""Calling this function will either
hide or show the search box. By default
it is hidden and can be made visible
with the Search button."""
if self.ui.search_group.isVisible():
self.ui.search_group.hide()
self.clear_search_highlight()
self.set_field_focus()
else:
self.ui.search_group.show()
self.ui.search_area.setFocus()
def field_content_changed(self):
"""The event handler for the textChanged event of the
current widget. This will be called whenever the text
of the QTextEdit() will be changed by the user."""
if self.printable:
# TODO: is there another situation where this could fail?
self._content = self.get_field_content()
# Only proceed with live updates if self.plugin
# is not a formatter plugin.
if self.plugin:
self._action()
self.update_length_field()
def search_highlight(self):
"""The function that will be called whenever the
search area is submitted. It will search within
the text_field and highlights matches."""
cursor = self.text_field.textCursor()
char_format = QTextCharFormat()
cursor.select(QTextCursor.Document)
cursor.mergeCharFormat(char_format)
cursor.clearSelection()
char_format.setBackground(QBrush(QColor('yellow')))
regex = QRegularExpression(self.ui.search_area.text())
matches = regex.globalMatch(self.text_field.toPlainText())
_matches = []
while matches.hasNext():
_matches.append(matches.next())
self.search_matches = _matches
self.ui.search_matches_label.setText('Matches: ' + str(len(self.search_matches)))
self.ui.search_progress_bar.setRange(0, len(self.search_matches))
if len(self.search_matches) > 100:
self.ui.search_progress_bar.show()
match_count = 1
for match in self.search_matches:
if match_count > 1000:
# TODO: implement proper handling of > 1000 matches
break
self.ui.search_progress_bar.setValue(match_count)
match_count += 1
cursor.setPosition(match.capturedStart())
cursor.setPosition(match.capturedEnd(), QTextCursor.KeepAnchor)
cursor.mergeCharFormat(format)
self.ui.search_progress_bar.hide()
def clear_search_highlight(self, widget=None):
"""Reset any highlights set by the search
function."""
widget = widget or self
cursor = self.text_field.textCursor()
cursor.select(QTextCursor.Document)
char_format = QTextCharFormat()
cursor.setCharFormat(char_format)
widget.ui.search_area.clear()
widget.ui.search_matches_label.setText('Matches: 0')
def set_error(self, widget=None):
"""If an an error occured during transformation
this function sets the color of the next widget's
border to red and removes all following widgets."""
widget = widget or self
widget.text_field.setStyleSheet('border: 2px solid red;')
self.remove_next_widgets(widget=widget, offset=1)
def set_error_message(self, message, widget=None):
widget = widget or self
if not self.ui.error_message_layout_widget.isVisible():
self.ui.error_message_layout_widget.show()
widget.error_message.setText('Error: ' + message)
widget.error_message.setStyleSheet('color: red;')
widget.error_message.show()
def clear_error_message(self, widget=None):
widget = widget or self
self.ui.error_message_layout_widget.hide()
widget.error_message.clear()
widget.error_message.hide()
widget.text_field.setStyleSheet('')
def view_text(self):
"""A wrapper function that can be
called to change to the text view
widget."""
self.hex_view = False
self.formatted_view = False
self.text_field.setHidden(False)
self.hex_field.setHidden(True)
self.formatted_field.setHidden(True)
self.text_field.content = self._content
self.update_length_field()
self.toggle_button_border(self.ui.toggle_text_view)
def view_hex(self):
"""A wrapper function that can be
called to change to the hex view
widget."""
self.hex_view = True
self.formatted_view = False
self.text_field.setHidden(True)
self.formatted_field.setHidden(True)
self.hex_field.setHidden(False)
self.hex_field.content = self._content
self.update_length_field()
self.toggle_button_border(self.ui.toggle_hex_view)
def view_formatted(self):
"""A wrapper function that can be
called to change to the formatted
view widget."""
self.formatted_view = True
self.text_field.setHidden(True)
self.hex_field.setHidden(True)
self.formatted_field.setHidden(False)
self.formatted_field.content = self._content
self.update_length_field()
self.toggle_button_border(self.ui.toggle_formatted_view)
def toggle_button_border(self, button):
buttons = [self.ui.toggle_formatted_view,
self.ui.toggle_text_view,
self.ui.toggle_hex_view]
for b in buttons:
if b == button:
b.setStyleSheet('background-color: grey;')
else:
b.setStyleSheet('background-color: 0;')
def clear_content(self, widget=None):
"""Clear the content of widget. If widget
is not set, clear the content of the current
widget. This will also remove all widgets
that follow widget."""
widget = widget or self
self.clear_error_message(widget=widget)
self.clear_search_highlight(widget=widget)
if self.parent.widgets[0] == widget:
widget.text_field.clear()
widget.hex_field.content = bytearray()
widget._content = bytearray()
widget.update_length_field()
widget.set_field_focus()
widget.plugin = None
# TODO: move to seperat wrapper function?
widget.ui.plugin_tree_view.selectionModel().clearSelection()
else:
# Remove the current_combo of the previous
# widget so that the last pick doesn't
# stuck in the previous widget after deleting
# one.
self.previous.current_combo = None
self.previous.set_field_focus()
self.previous.plugin = None
# TODO: move to seperat wrapper function?
self.previous.ui.plugin_tree_view.selectionModel().clearSelection()
self.remove_next_widgets(widget=widget)
def update_length_field(self, widget=None):
"""Update the length field in the encoder widget
with the count of bytes in the current widget."""
widget = widget or self
widget.ui.content_length_label.setText('Length: ' + str(len(widget.field.content)))
def update_selection_field(self):
"""Update the selection field in the encoder widget
with the count of selected bytes."""
self.ui.selection_length_label.setText('Selection: ' + str(self.field.selection_count))
def update_vertical_scroll_range(self, minimum, maximum):
"""Update the scroll maximum of the main window scroll
are in order to automatically jump to newly created
encoder widgets."""
sb = self.parent.ui.DeenMainWindow.verticalScrollBar()
sb.setValue(maximum)
def update_horizontal_scroll_range(self, minimum, maximum):
"""Update the scroll maximum of the main window scroll
are in order to automatically jump to newly created
encoder widgets."""
sb = self.parent.ui.DeenMainWindow.horizontalScrollBar()
sb.setValue(maximum)
def remove_next_widgets(self, widget=None, offset=0):
"""Remove all widgets after widget. If widget is not
set, remove all widgets after the current widget."""
widget = widget or self
assert isinstance(offset, int)
index = self.parent.widgets.index(widget) + offset
while len(self.parent.widgets) != index:
if len(self.parent.widgets) == 1:
break
self.parent.ui.encoder_widget_layout.removeWidget(self.parent.widgets[-1])
self.parent.widgets[-1].deleteLater()
self.parent.widgets[-1] = None
self.parent.widgets.pop()
def get_tree_tl_label_for_plugin(self, plugin=None, process=None):
"""Return the top level item label for a plugin."""
plugin = plugin or self.plugin
process = process or self.process
category = self.parent.plugins.get_category_for_plugin(plugin)
if not category:
LOGGER.error('Could not determine category for ' + plugin.name)
return
tl_label = ''
if category == 'codecs':
tl_label = 'Encode' if process else 'Decode'
elif category == 'compressions':
tl_label = 'Compress' if process else 'Uncompress'
elif category == 'assemblies':
tl_label = 'Assemble' if process else 'Disassemble'
elif category == 'hashs':
tl_label = 'Hash'
elif category == 'misc':
tl_label = 'Miscellaneous'
elif category == 'formatters':
tl_label = 'Format'
else:
LOGGER.warning('Could not determine top level label')
return
return tl_label
def get_tree_item_for_plugin(self, plugin=None, process=None):
"""Return the tree view item of a plugin."""
plugin = plugin or self.plugin
process = process or self.process
tl_label = self.get_tree_tl_label_for_plugin(plugin, process)
for i in range(self.ui.plugin_tree_view.topLevelItemCount()):
tl_item = self.ui.plugin_tree_view.topLevelItem(i)
if not tl_item:
continue
# Find the top level item for the current label
if tl_item.text(0) == tl_label:
for j in range(tl_item.childCount()):
tl_child = tl_item.child(j)
if plugin.display_name == tl_child.text(0):
return tl_child
def is_action_process(self, choice):
"""Returns True if the action should call
process(), False if unprocess() should be
called. Should only be used for values of
the combo boxes."""
if choice == 'Encode' or choice == 'Compress' or \
choice == 'Hash' or choice == 'Miscellaneous' or \
choice == 'Assemble':
return True
else:
return False
def action(self, tree_item, **args):
"""The main function that will call plugins via the tree view."""
if not tree_item.parent():
return
self.process = self.is_action_process(tree_item.parent().text(0))
self.plugin = self.parent.plugins.get_plugin_instance(tree_item.text(0))
self._action()
def action_fuzzy(self, plugin_name):
"""The main entry point for triggering
actions via the fuzzy search field. This
function determines if the current action
should process or unprocess data. It then
tries to find appropriate plugin and select
it in the plugin tree view."""
self.process = True
if plugin_name.startswith('-') or plugin_name.startswith('.'):
self.process = False
plugin_name = plugin_name[1:]
if not self._content:
return
if self.parent.plugins.plugin_available(plugin_name):
self.plugin = self.parent.plugins.get_plugin_instance(plugin_name)
else:
LOGGER.warning('Plugin {} not found'.format(plugin_name))
self.parent.show_error_msg('Plugin {} not found'.format(plugin_name))
return
tl_label = self.get_tree_tl_label_for_plugin()
# Clear all selected items first
for i in self.ui.plugin_tree_view.selectedItems():
i.setSelected(False)
for i in range(self.ui.plugin_tree_view.topLevelItemCount()):
tl_item = self.ui.plugin_tree_view.topLevelItem(i)
if not tl_item:
continue
# Find the top level item for the current label
if tl_item.text(0) == tl_label:
if not tl_item.isExpanded():
tl_item.setExpanded(True)
for j in range(tl_item.childCount()):
tl_child = tl_item.child(j)
if self.plugin.display_name == tl_child.text(0):
tl_child.setSelected(True)
self.ui.plugin_tree_view.scrollToItem(tl_child)
else:
# Collapse all other top level items
if tl_item.isExpanded():
tl_item.setExpanded(False)
self._action()
def _action(self, process=None):
if process != None:
self.process = process
# Update self._content with data from
# the current field.
self._content = self.field.content
if self.field.selected_data:
self._content = self.field.selected_data
if self._content and self.plugin:
# Reset plugin errors
self.plugin.error = None
self.clear_error_message()
# Clear error message on the next widget as well
if self.has_next() and self.next.plugin:
self.next.plugin.error = None
self.next.clear_error_message()
# Set formatted view if plugin requires it
if self.plugin.formatted:
self.formatted_view = True
else:
self.formatted_view = False
data = None
category = self.parent.plugins.get_category_for_plugin(self.plugin)
if not category:
LOGGER.error('Could not determine category for ' + self.plugin.name)
return
process_gui_func = None
unprocess_gui_func = None
if self.process and 'process_gui' in vars(type(self.plugin)):
# Check if the plugin class implements
# process_gui() itself, and does not
# inherit it from DeenPlugin.
process_gui_func = getattr(self.plugin, 'process_gui', None)
if not self.process and 'unprocess_gui' in vars(type(self.plugin)):
# Check if the plugin class implements
# unprocess_gui() itself, and does not
# inherit it from DeenPlugin.
unprocess_gui_func = getattr(self.plugin, 'unprocess_gui', None)
if process_gui_func or unprocess_gui_func:
if self.process and process_gui_func and \
callable(process_gui_func):
# For plugins that implement a process_gui() function
# that adds additional GUI elements.
data = self.plugin.process_gui(self.parent, self._content)
elif not self.process and unprocess_gui_func and \
callable(unprocess_gui_func):
# For plugins that implement a unprocess_gui() function
# that adds additional GUI elements.
data = self.plugin.unprocess_gui(self.parent, self._content)
else:
LOGGER.error('Invalid path')
if not data:
# plugin.process_gui() returned nothing, so
# don't create a new widget.
if self.plugin.error:
self.set_error()
self.set_error_message(str(self.plugin.error))
return
if self.plugin.error:
self.next.set_error()
self.next.set_error_message(str(self.plugin.error))
self.next.content = data
# TODO: decide when focus should be set to next widget
#self.next.set_field_focus()
if not self.plugin.error:
self.next.clear_error_message()
else:
# All other plugins will write their output to a new
# window (self.next).
if self.process or category == 'formatters':
data = self.plugin.process(self._content)
else:
data = self.plugin.unprocess(self._content)
if self.plugin.error:
self.next.set_error()
self.next.set_error_message(str(self.plugin.error))
if data:
self.next.content = data
if not self.plugin.error:
self.next.clear_error_message()
else:
LOGGER.error('Plugin {} did not return any data'.format(self.plugin.name))
# Ensure that the selected plugins are visible
# in all widget plugin tree views.
for w in self.parent.widgets:
selected = w.ui.plugin_tree_view.selectedItems()
if selected:
w.ui.plugin_tree_view.scrollToItem(selected[0])
| takeshixx/deen | deen/gui/encoder.py | encoder.py | py | 30,002 | python | en | code | 46 | github-code | 50 |
27209146743 | # -*- coding: UTF-8 -*-
# @Time : 2021/08/30 07:54
# @Author : Ranshi
# @File : 123.py
from typing import List
import random
import bisect
class Solution:
def __init__(self, w: List[int]):
for i in range(1, len(w)):
w[i] += w[i - 1]
self.lst = w
self.len = w[-1]
def pickIndex(self) -> int:
return bisect.bisect_right(self.lst, random.random() * self.len)
if __name__ == "__main__":
s = Solution([1, 2, 4, 5])
print(s.pickIndex())
| Zranshi/leetcode | my-code/528/main.py | main.py | py | 507 | python | en | code | 0 | github-code | 50 |
20025582560 | """
Run builtin python tests with some needed changes after patch enabled,
then run it once again after patch disabled to make sure nothing breaks
"""
import sys
import unittest
from test import test_enum, test_re, test_inspect, test_dynamicclassattribute
TEST_MODULES = test_enum, test_re, test_inspect, test_dynamicclassattribute
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
if sys.version_info < (3, 8):
expected_help_output_with_docs = expected_help_output_with_docs.replace(
'Readonly properties inherited from enum.EnumMeta:',
'Data descriptors inherited from enum.EnumMeta:'
)
if sys.version_info < (3, 7):
expected_help_output_with_docs = expected_help_output_with_docs.replace(
'\n | Color(value, names=None, *, module=None, qualname=None, type=None, start=1)\n |\x20\x20',
''
)
def test_inspect_getmembers(self):
values = dict((
('__class__', test_enum.EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', test_enum.__name__),
('blue', self.Color.blue),
('green', self.Color.green),
('red', self.Color.red),
))
result = dict(test_enum.inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=test_enum.EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=test_enum.EnumMeta, object=test_enum.EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=test_enum.__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
]
values.sort(key=lambda item: item.name)
result = list(test_enum.inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class TestSetClassAttr(unittest.TestCase):
def test_set_class_attr(self):
class Foo:
def __init__(self, value):
self._value = value
self._spam = 'spam'
@test_dynamicclassattribute.DynamicClassAttribute
def value(self):
return self._value
spam = test_dynamicclassattribute.DynamicClassAttribute(
lambda s: s._spam,
alias='my_shiny_spam'
)
self.assertFalse(hasattr(Foo, 'value'))
self.assertFalse(hasattr(Foo, 'name'))
foo_bar = Foo('bar')
value_desc = Foo.__dict__['value']
value_desc.set_class_attr(Foo, foo_bar)
self.assertIs(Foo.value, foo_bar)
self.assertEqual(Foo.value.value, 'bar')
foo_baz = Foo('baz')
Foo.my_shiny_spam = foo_baz
self.assertIs(Foo.spam, foo_baz)
self.assertEqual(Foo.spam.spam, 'spam')
def run_tests():
loader = unittest.TestLoader()
suites = [
loader.loadTestsFromModule(module) for module in TEST_MODULES
]
result = unittest.TextTestRunner().run(loader.suiteClass(suites))
if result.failures or result.errors:
sys.exit(1)
orig_test_inspect_getmembers = test_enum.TestStdLib.test_inspect_getmembers
orig_test_inspect_classify_class_attrs = test_enum.TestStdLib.test_inspect_classify_class_attrs
orig_expected_help_output_with_docs = test_enum.expected_help_output_with_docs
if __name__ == '__main__':
# run_tests() # tests will fail here only if something was wrong before patch
import fastenum
assert fastenum.enabled
test_enum.TestStdLib.test_inspect_getmembers = test_inspect_getmembers
test_enum.TestStdLib.test_inspect_classify_class_attrs = test_inspect_classify_class_attrs
test_enum.expected_help_output_with_docs = expected_help_output_with_docs
test_dynamicclassattribute.TestSetClassAttr = TestSetClassAttr
run_tests()
fastenum.disable()
assert not fastenum.enabled
test_enum.TestStdLib.test_inspect_getmembers = orig_test_inspect_getmembers
test_enum.TestStdLib.test_inspect_classify_class_attrs = orig_test_inspect_classify_class_attrs
test_enum.expected_help_output_with_docs = orig_expected_help_output_with_docs
del test_dynamicclassattribute.TestSetClassAttr
run_tests()
| Bobronium/fastenum | tests/builtin_test.py | builtin_test.py | py | 6,259 | python | en | code | 15 | github-code | 50 |
10409045034 | #!/usr/bin/python3
""" some content to please the checker """
def add_attribute(obj, name, value):
""" adds a attribute if possible """
if '__dict__' in dir(obj):
obj.name = value
else:
raise TypeError("can't add new attribute")
| TS-N/holbertonschool-higher_level_programming | 0x0A-python-inheritance/101-add_attribute.py | 101-add_attribute.py | py | 259 | python | en | code | 0 | github-code | 50 |
72845417755 | """
Maelstrom Framework
High-level guiding principles:
- Classes are defined hierarchically and rely on wrapper/interface
functions to interact down the hierarchy
- Fitness evaluation is implemented as an external function and the
function itself it passed to the island
- Fitness evaluations are expected to accept named
GeneticProgrammingPopulation objects as input and assign fitness to the individuals
- This file is agnostic of the nodes used in evolution
- This framework is designed with coevolution in mind, but one could easily
use a single-population island with an appropriate fitness function
"""
import multiprocessing
from tqdm.auto import tqdm
# import concurrent.futures
from maelstrom.island import GeneticProgrammingIsland
# General-purpose Maelstrom class that contains and manages multiple islands
class Maelstrom:
"""
Class that handles coevolutionary evolution of multiple populations
"""
def __init__(
self,
islands: dict,
island_class=GeneticProgrammingIsland,
# TODO: do we really want this to default to None instead of a #?
evaluations=None,
# TODO: do we want to default this to None instead of throwing err?
migration_edges=None,
cores=None,
position=None,
**kwargs,
):
"""
Initializes a Maelstrom object
Args:
islands: dictionary of island names and island parameters
evaluations: total number of evaluations to perform
migration_edges: list of migration edges
cores: number of cores to use
position: position of progress bar
**kwargs: keyword arguments to pass to island initialization
"""
self.islands = {}
self.island_class = island_class
self.migration_edges = migration_edges
self.evals = 0
self.eval_limit = evaluations
self.log = {}
if cores is None:
cores = min(32, multiprocessing.cpu_count())
self.cores = cores
self.position = position
# self.evalPool = multiprocessing.Pool(cores)
# Initialize islands
for key in islands:
self.islands[key] = self.island_class(
cores=self.cores, **kwargs[islands[key]], **kwargs
)
self.evals = sum(island.evals for island in self.islands.values())
self.champions = {}
# def __del__(self):
# self.evalPool.close()
def run(self):
"""
Performs a single run of evolution until termination
"""
generation = 1
self.evals = sum(island.evals for island in self.islands.values())
with multiprocessing.Pool(self.cores) as eval_pool:
with tqdm(
total=self.eval_limit, unit=" evals", position=self.position
) as pbar:
pbar.set_description(
f"Maelstrom Generation {generation}", refresh=False
)
pbar.update(self.evals)
while self.evals < self.eval_limit:
evals_old = self.evals
# print(f"Beginning generation: {generation}\tEvaluations: {self.evals}")
# migration
for edge in self.migration_edges:
# check migration timing
if generation % edge["period"] == 0:
destinationIsland, destinationPopulation = edge[
"destination"
]
sourceIsland, sourcePopulation = edge["source"]
# collect imports
migrants = self.islands[sourceIsland].select(
population=sourcePopulation,
n=edge["size"],
method=edge["method"],
)
# export to destimation population
if (
destinationPopulation
in self.islands[destinationIsland].imports
):
self.islands[destinationIsland].imports[
destinationPopulation
].extend(migrants)
else:
self.islands[destinationIsland].imports[
destinationPopulation
] = migrants
# Evolve one full generation with each island
with multiprocessing.pool.ThreadPool() as executor:
executor.starmap(
self.island_class.generation,
[(island, eval_pool) for island in self.islands.values()],
)
self.evals = sum(island.evals for island in self.islands.values())
generation += 1
pbar.set_description(
f"Maelstrom Generation {generation}", refresh=False
)
pbar.update(self.evals - evals_old)
island_termination = False
for _, island in self.islands.items():
island_termination = island_termination or island.termination()
if island_termination:
break
# identify champions for each species on each island
for _, island in self.islands.items():
for species, champions in island.champions.items():
if species not in self.champions:
self.champions[species] = {}
self.champions[species].update(champions)
for key, val in self.islands.items():
self.log[key] = val.log
return self
def build(self):
"""
Builds islands
"""
for island in self.islands.values():
island.build()
def clean(self):
"""
Cleans islands
"""
for island in self.islands.values():
island.clean()
| DeaconSeals/maelstrom-framework | maelstrom/__init__.py | __init__.py | py | 6,261 | python | en | code | 2 | github-code | 50 |
4517033117 | import numpy as np
import random as rd
INF = 10000000
class Node:
def __init__(self, total_score, ni, par):
self.t = total_score
self.n = ni
self.n_actions = rd.randrange(2, 5)
self.children = []
self.parent = par
self.isTerminal = False
def goUp(self):
return self.parent
def populate(self, curr_depth, max_depth):
if self.isTerminal:
return None
for i in range(self.n_actions):
node = Node(0, 0, self)
if depth == max_depth:
node.isTerminal = True
self.children.append(node)
def getMaxUcbNode(self, N):
ucbs = []
if self.isTerminal:
return None
for node in self.children:
ucbs.append(node.calculateUCB(N))
ucbs = np.array(ucbs)
return self.children(np.argmax(ucbs))
def calculateUCB(self, N):
if self.n == 0:
return INF
ucb = (self.t/self.n) + (np.log(N)/self.n)**0.5
return ucb
def checkLeaf(self):
if len(self.children) == 0:
return True
return False
def getRandomizedValue(self):
return rd.randrange(-10, 11)
def backpropagate(self, reward):
curr = self
while curr.parent != None:
curr.t += reward
curr.n += 1
def rollout(self):
return self.getRandomizedValue()
class Tree:
def __init__(self, root, max_depth):
self.root = root
self.N = 0 #total iterations
self.max_depth = max_depth
self.root.populate(0, self.max_depth)
def startPlay(self, depth, n_iterations):
curr_depth = 0
curr = self.root
count = 0
N = self.N
while n_iterations > count:
curr = self.root
if curr.checkLeaf():
if curr.n == 0:
if curr.isTerminal:
reward = curr.getRandomizedValue()
curr.backpropagate(reward)
count += 1
continue
curr.rollout(curr_depth)
else:
curr.populate() #expansion
curr = curr.getMaxUcbNode(N)
if curr == None:
reward = curr.getRandomizedValue()
curr.backpropagate(reward)
count += 1
continue
curr_depth += 1
reward = curr.rollout(curr_depth) #rollout and backpropagete
curr.backpropagate(reward)
count += 1
else:
curr = curr.getMaxUcbNode(N) #selection
curr_depth += 1
| sshanuraj/MCTS | mcts.py | mcts.py | py | 2,116 | python | en | code | 0 | github-code | 50 |
17815739449 | """
Goddard Rocket Problem Example.
Comparison to Goddard example in for OpenGoddard
https://github.com/istellartech/OpenGoddard/blob/master/examples/04_Goddard_0knot.py
"""
import beluga
import logging
from math import pi, sqrt
ocp = beluga.Problem()
# Define independent variables
ocp.independent('t', '1')
# Define quantities used in the problem
ocp.quantity('drag', '1 * d_c * v**2 * exp(-h_c * (h - h_0) / h_0)')
ocp.quantity('g', 'g_0 * (h_0 / h)**2')
# Define equations of motion
ocp.state('h', 'v', 'm') \
.state('v', '(thrust - drag)/mass - g', 'm/s') \
.state('mass', '-thrust/c', 'kg')
# Define controls
ocp.control('thrust', '1')
# Define constants' numeric values
g_0 = 1.0
h_0 = 1.0
v_0 = 0.0
mass_0 = 1.0
t_c = 3.5
h_c = 500
v_c = 620
mass_c = 0.6
tar_mass_f = mass_0 * mass_c
c = 0.5 * sqrt(g_0 * h_0)
d_c = 0.5 * v_c * mass_0 / g_0
thrust_max = t_c * g_0 * mass_0
# Define constants
ocp.constant('g_0', g_0, '1') # Gravity at surface
ocp.constant('t_c', t_c, '1') # Constant for computing max thrust
ocp.constant('h_c', h_c, '1') # Constant for height
ocp.constant('v_c', v_c, '1') # Constant for velocity
ocp.constant('mass_c', mass_c, '1') # Terminal mass fraction
ocp.constant('c', c, '1') # Thrust to fuel ratio
ocp.constant('d_c', d_c, '1') # Drag scaling
ocp.constant('T_max', thrust_max, '1') # Max thrust
ocp.constant('T_min', 0, '1')
# Define constants for BCs
ocp.constant('h_0', h_0, '1')
ocp.constant('v_0', v_0, '1')
ocp.constant('mass_0', mass_0, '1')
ocp.constant('v_f', v_0, '1')
ocp.constant('mass_f', tar_mass_f, '1')
# Define smoothing constant
ocp.constant('eps', 0.01, '1')
# Define costs
ocp.terminal_cost('-h', '1')
# Define constraints
ocp.initial_constraint('h - h_0', '1')
ocp.initial_constraint('v - v_0', '1')
ocp.initial_constraint('mass - mass_0', '1')
ocp.initial_constraint('t', 's')
ocp.terminal_constraint('v - v_f', '1')
ocp.terminal_constraint('mass - mass_f', '1')
ocp.path_constraint('thrust', '1', lower='T_min', upper='T_max', activator='eps', method='epstrig')
ocp.scale(m=1, s=1, kg=1, rad=1, nd=1)
bvp_solver = beluga.bvp_algorithm('spbvp')
guess_maker = beluga.guess_generator(
'auto',
start=[h_0, v_0, mass_0], # Starting values for states in order
costate_guess=-0.0001,
control_guess=[pi/3],
time_integrate=0.1,
)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step() \
.num_cases(3) \
.const('v_f', 0)
continuation_steps.add_step() \
.num_cases(3, spacing='log') \
.const('mass_f', tar_mass_f)
continuation_steps.add_step() \
.num_cases(10, spacing='log') \
.const('eps', 0.000005)
beluga.add_logger(file_level=logging.DEBUG, display_level=logging.INFO)
sol_set = beluga.solve(
ocp=ocp,
method='indirect',
optim_options={'control_method': 'algebraic'},
bvp_algorithm=bvp_solver,
steps=continuation_steps,
guess_generator=guess_maker,
autoscale=False,
initial_helper=True,
)
| Rapid-Design-of-Systems-Laboratory/beluga | examples/AscentVehicles/GoddardRocket/GoddardRocket.py | GoddardRocket.py | py | 3,012 | python | en | code | 24 | github-code | 50 |
12001915114 | '''
.가 먼저 계산되야하는 상태로 인해 1개씩 빼는 게 무의미
홀수개씩이나 짝수개씩도 같은 상황,
앞쪽에서 하나씩 뽑아쓰는 거면 모두 결국 뒤 그 뒤 그 뒤뒤 연산자를
살펴야하는 문제 생김,
그러면 그렇게 하면 되긴하는데, 범용성은 있는건가
범용성에 맞추면 후위표기법이랑 스택계산기, 내가 안외워서 그렇지 복잡한건 아니긴했음
한편,
모든 .의 위치를 먼저 파악한뒤 그 좌우 좌표를 따로 저장해서
미리 .만 먼저 계산하는 것도 생각했는데
간단하지만 범용성은 있나
하지만 일단 난 간단
+
간단해서 좋긴 한데, 길이는 웬만큼 긴듯, 계산기 암기할걸
++
줄일수 있는 for문, 리스트복사 등을 줄였지만, 결국 타임리밋뜸
+ 연산자 관련하여
으레 변수끼리 연산한다음에는 어디에 저장할지의 문제 발생하는데
그리고 이경우 다음 연산을 위해서 이전 정보가 적절한 곳에 저장되야하는데
운좋게 result와 연산결과와 추후 과정사이에 연관이 있어 result에 저장
도 가능인데 그냥 tmp_nums로 해결했다가 다시 tmp_str로 해결
중요주제 범용성
++
찝찝한 계산기는 실패로 넘어갔고,
현재 틀에서 더 줄여보자
.나오면 +-나오기까지 while 돌려서 calcul 과정을 통합해보자
->dessert2
'''
import time
def calcul(list_tmp):
sum=int(list_tmp[0])
len_list_tmp=len(list_tmp)
idx1=1
idx2=2
#while 뒤쪽에 연산 붙을 시 인정되야할 다음 시행의 특징을 고려해서 조건식 짜야
while(idx1!=len_list_tmp):
operator=list_tmp[idx1]
if(operator=='+'):
sum+=int(list_tmp[idx2])
elif(operator=='-'):
sum-=int(list_tmp[idx2])
else:
pass
idx1+=2
idx2+=2
# print('###',sum)
return sum
def operator(depth, result, limit):
global nums
global cnt
if(depth==limit):
global list_tmp
list_tmp=[]
list_forprint=[]
# print(result)
# cnt=result.count('.') 전체 시행마다 발생하는데 이게 +2초정도
# tmp_nums=nums[:]
token_skip=0
cnt_pu=0
for op in result:
if(op=='.'):
cnt_pu+=1
if(cnt_pu>limit/2):
token_skip=1
if(token_skip):
pass
else:
# pass 단순 if else만 돌리면 8초대, 여기서부터 이미 타임리밋인지 아닌지 모르겠음
tmp_str=''
# . 먼저 정리하고 사용한 숫자와 연산자 빼게 단서 남기기
for idx in range(n-1):
# print(idx)
instant_operator=result[idx]
#print를 위한 중위표기식 저장
list_forprint.append(nums[idx])
list_forprint.append(result[idx])
if(idx==(n-1)-1):
list_forprint.append(nums[idx+1])
if(instant_operator=='.'):
if(tmp_str==''):
tmp_str=nums[idx]+nums[idx+1]
else:
tmp_str+=nums[idx+1]
if(idx==(n-1)-1):
list_tmp.append(tmp_str)
else:
if(tmp_str==''):
list_tmp.append(nums[idx])
else:
list_tmp.append(tmp_str)
tmp_str=''
list_tmp.append(result[idx])
if(idx==(n-1)-1):
list_tmp.append(nums[idx+1])
# print('##',list_tmp)
#남은 게 +-뿐이라 순차 계산 가능
val_sum=calcul(list_tmp)
if(val_sum==0):
cnt+=1
# print('##',list_tmp)
if(cnt<=20):
print(' '.join(list_forprint))
else:
for element in ('+','-','.'):
result[depth]=element
operator(depth+1, result,limit)
result[depth]=0
if __name__=="__main__":
start = time.time() # 시작 시간 저장
n=int(input())
global nums
nums=[str(i) for i in range(1,n+1)]
# print(nums)
global cnt
cnt=0
global list_tmp
list_tmp=list()
depth=0
result=[0]*(n-1)
limit=(n-1)
operator(depth,result,limit)
print(cnt)
print("time :", time.time() - start) # 현재시각 - 시작시간 = 실행 시간
| devsacti/Algorithms | python/algorithmjobs/L8/L8_04dessert.py | L8_04dessert.py | py | 4,668 | python | ko | code | 0 | github-code | 50 |
19030269070 | from grc.models import Application
def reference_number_string(reference_number):
trimmed_reference = reference_number.replace('-', '').replace(' ', '').upper()
formatted_reference = trimmed_reference[0: 4] + '-' + trimmed_reference[4: 8]
return formatted_reference
def validate_reference_number(reference):
reference = reference.replace('-', '').replace(' ', '').upper()
application = Application.query.filter_by(reference_number=reference).first()
if application is None:
print(f"An application with reference number {reference} does not exist", flush=True)
return False
else:
return application
| cabinetoffice/grc-app | grc/utils/reference_number.py | reference_number.py | py | 655 | python | en | code | 2 | github-code | 50 |
21494435423 | import logging
import webapp2
import jinja2
import os
import json
from google.appengine.ext import ndb
from google.appengine.api import users
from xml.dom.minidom import parse
import xml.dom.minidom
USERSTORE_NAME = 'default_userstore'
SUPPLIERSTORE_NAME = 'default_supplierstore'
PRODUCTSTORE_NAME = 'default_productstore'
CARTSTORE_NAME = 'default_cartstore'
ITEMSTORE_NAME = 'default_itemstore'
ORDERSTORE_NAME = 'default_orderstore'
#jinja
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),'staticpage/')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def data_store_key( datastore_name):
"""Constructs a datastore key for a datastore entity."""
#Note that all my data stores are going to be under the parent EmerDatastore
return ndb.Key('EmerDatastore', datastore_name)
class User(ndb.Model):
#represents a User entry.
id = ndb.StringProperty(indexed=True)
firstname = ndb.StringProperty(indexed=False)
lastname = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
address = ndb.TextProperty(indexed=False)
class Supplier(ndb.Model):
#Represents a supplier
id = ndb.StringProperty(indexed=True)
nameSupplier = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
phonenumber = ndb.IntegerProperty(indexed=False)
url = ndb.StringProperty(indexed=False)
class Product(ndb.Model):
#Represents a product
id = ndb.StringProperty(indexed=True)
supplier = ndb.StructuredProperty(Supplier)
nameProduct = ndb.StringProperty(indexed=False)
price = ndb.FloatProperty(indexed=False)
stockTotal = ndb.IntegerProperty(indexed=False)
class Item(ndb.Model):
#Represents an item
id = ndb.StringProperty(indexed=True)
product = ndb.StructuredProperty(Product)
quantity = ndb.IntegerProperty(indexed=False)
class Cart(ndb.Model):
#represents a cart
id = ndb.StringProperty(indexed=True)
item = ndb.StructuredProperty(Item)
user = ndb.StructuredProperty(User)
totalPrice = ndb.FloatProperty(indexed=False)
class Order(ndb.Model):
#represents an order
id = ndb.StringProperty(indexed=True)
item = ndb.StructuredProperty(Item)
user = ndb.StructuredProperty(User)
cart =ndb.StructuredProperty(Cart)
totalPrice = ndb.FloatProperty(indexed=False)
def init_products():
#add products to db
#Check that we haven't already added the book
products_query = Product.query()
products = products_query.fetch()
if len(products) == 0:
# Open products.xml document using minidom parser
DOMTree = xml.dom.minidom.parse("resources/products.xml")
collection = DOMTree.documentElement
#Get products
products = collection.getElementsByTagName("product")
# Print detail
for product in products:
newproduct = Product(parent=data_store_key(PRODUCTSTORE_NAME))
newproduct.id = product.getAttribute("id");
newproduct.supplier = product.getAttribute("supplier");
nameProduct = product.getElementsByTagName('nameProduct')[0]
newproduct.nameProduct = nameProduct.childNodes[0].data;
price = product.getElementsByTagName('price')[0]
newproduct.price = float(price.childNodes[0].data);
stockTotal = product.getElementsByTagName('stockTotal')[0]
newbook.stockTotal = integer(stockTotal.childNodes[0].data);
newproduct.put();
def init_suppliers():
"""add to db"""
#Check that we haven't already added the supplier
suppliers_query = Supplier.query()
suppliers = suppliers_query.fetch()
if len(suppliers) == 0:
# Open suppliers.xml document using minidom parser
DOMTree = xml.dom.minidom.parse("resources/products.xml")
collection = DOMTree.documentElement
#Get suppliers
suppliers = collection.getElementsByTagName("supplier")
# Print detail
for supplier in suppliers:
newsupplier = Supplier(parent=data_store_key(SUPPLIERSTORE_NAME))
newsupplier.id = supplier.getAttribute("id");
nameSupplier = supplier.getElementsByTagName('name')[0]
newsupplier.nameSupplier = nameSupplier.childNodes[0].data;
email = supplier.getElementsByTagName('email')[0]
newsupplier.email = email.childNodes[0].data;
phonenumber = supplier.getElementsByTagName('phonenumber')[0]
newsupplier.phonenumber = phonenumber.childNodes[0].data;
url = supplier.getElementsByTagName('url')[0]
newsupplier.url = url.childNodes[0].data;
newsupplier.put();
def init_items():
"""add to db"""
#Check that we haven't already added the item
items_query = Item.query()
items = items_query.fetch()
if len(items) == 0:
# Open products.xml document using minidom parser
DOMTree = xml.dom.minidom.parse("resources/products.xml")
collection = DOMTree.documentElement
#Get products
items = collection.getElementsByTagName("item")
# Print detail
for item in items:
newitem = Item(parent=data_store_key(ITEMSTORE_NAME))
newitem.id = item.getAttribute("id");
newitem.product = item.getAttribute("product");
quantity = item.getElementsByTagName('quantity')[0]
newitem.quantity = quantity(integer.childNodes[0].data);
newitem.put();
class MainPage(webapp2.RequestHandler):
"""This is the main handler for our application"""
def get(self):
# Checks if the user is logged in
user = users.get_current_user()
if user:
#User is logged in, get his details from user store and db
"""Process a HTTP Get Request for the application by returnng the client"""
template = JINJA_ENVIRONMENT.get_template('page.html')
template_values = {
'user_id':user.user_id(),
'user_nickname':user.nickname(),
'logout_url':users.create_logout_url(self.request.uri)}
self.response.write(template.render(template_values))
else:
#User is not logged in so redirect to login page
self.redirect(users.create_login_url(self.request.uri))
class UserServiceHandler(webapp2.RequestHandler):
"""This is the user service handler"""
def get(self,user_id):
"""Process a HTTP Get Request for the user service by returnng a user"""
#Read the user data from the data store
users_query = User.query(User.id==user_id)
users = users_query.fetch(1)
#if there was no information for the user then we should return a 404 error
if len(users) < 1:
self.error(404)
else:
#Create a dictionary to store the user attributes
r={};
r['id'] = users[0].id;
r['firstname'] = users[0].firstname;
r['lastname'] = users[0].lastname;
r['email'] = users[0].email;
r['address'] = users[0].address;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def post(self):
"""Process a HTTP POST Request for the users service by adding a users information"""
#Parse the json we received
userjson = json.loads(self.request.body)
# Checks if the user is logged in
current_user = users.get_current_user()
if (not current_user) or not (current_user.user_id() == userjson["id"]) :
#The request could not be completed because the uese is not logged in
#or the user who is logged in is not the user specified by the update request
self.error(500)
else:
users_query = User.query(User.id==userjson["id"])
query_results = users_query.fetch()
if len(query_results) > 0:
self.error(409) #conflict
else:
#Create user object to add to the app engine store
user = User(parent=data_store_key(USERSTORE_NAME))
user.id = userjson["id"];
user.firstname = userjson["firstName"];
user.lastname = userjson["lastName"];
#user.email = userjson["email"];
#user.address = userjson["address"];
#Store the user info
user.put();
#return a message to the client
data = {}
data['message'] = 'Updated User (POST):'+userjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def put(self):
"""Process a HTTP PUT Request for the users service by updating a users information"""
#Parse the json we received
userjson = json.loads(self.request.body)
# Checks if the user is logged in
current_user = users.get_current_user()
if (not current_user) or not (current_user.user_id() == userjson["id"]) :
#The request could not be completed because the uese is not logged in
#or the user who is logged in is not the user specified by the update request
self.error(500)
else:
users_query = User.query(User.id==userjson["id"])
query_results = users_query.fetch()
if len(query_results) == 0:
self.error(404); #not found
else:
#Update the user object
user = query_results[0];
user.firstname = userjson["firstName"];
user.lastname = userjson["lastName"];
#user.email = userjson["email"];
#user.address = userjson["address"];
#Store the user info
user.put();
#return a message to the client
data = {}
data['message'] = 'Updated User (PUT):'+userjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
class SupplierServiceHandler(webapp2.RequestHandler):
# supplier service handler
def get(self):
#Process a HTTP Get Request for the service
#Read the data
suppliers_query = Supplier.query()
suppliers = suppliers_query.fetch()
result = [];
for p in suppliers:
#store each supplier in a dictionary
supplier = {}
supplier['id'] = p.id
supplier['nameSupplier'] = p.nameSupplier
supplier['email'] = p.email
supplier['phonenumber'] = p.phonenumber
supplier['url'] = p.url
#add the dictionary to the list
result.append(supplier);
#Create a new dictionary for the results
r={};
#Give the results dictionary a key called suppliers whos value is the list of suppliers returned
r['suppliers'] = result;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def put(self):
#Process a HTTP PUT Request service
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#Parse the json we received
supplierjson = json.loads(self.request.body)
#check if we already have an entry for that supplier
suppliers_query = Supplier.query(Supplier.id==supplierjson["id"])
query_results = suppliers_query.fetch()
if len(query_results) == 0:
#We must be adding a new supplier as the query returned zero results
#Create a new instance of Supplier
supplier = Supplier(parent=data_store_key(PUBSTORE_NAME))
supplier.id = supplierjson["id"];
supplier.nameSupplier = supplierjson["nameSupplier"];
supplier.email = supplierjson["email"];
supplier.phonenumber = supplierjson["phonenumber"];
supplier.url = supplierjson["url"];
#Store the supplier info
supplier.put();
#return a message to the client
data = {}
data['message'] = 'Added Supplier (PUT):'+supplierjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
else:
#Update the supplier object
supplier = query_results[0];
supplier.nameSupplier = supplierjson["nameSupplier"];
#Store the supplier info
supplier.put();
#return a message to the client
data = {}
data['message'] = 'Updated Supplier (PUT):'+supplierjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def delete(self,supplier_id):
"""Process a HTTP DELETE Request for orders by deleting a order with the specified id"""
logging.info("Supplier delete called:"+supplier_id)
#Check that there are no suppliers using this publisher
suppliers_query = Supplier.query(Supplier.publisher==publisher_id)
query_results = suppliers_query.fetch()
if not len(query_results) == 0:
#We won't allow this publisher to be deleted because there is a supplier entry uning it
self.error(405) #method not allowed
else:
#Checks if the user is logged in
current_user = users.get_current_user()
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#check if we already have an entry for that order
suppliers_query = Supplier.query(Supplier.id==supplier_id)
query_results = suppliers_query.fetch()
if len(query_results) == 0:
#Resource not found
self.error(404)
else:
#Get the key of the object and deltet it from the key-value data store
supplier = query_results[0]
key = supplier.key
key.delete()
#return a message to the client
data = {}
data['message'] = 'Deleted Supplier:'+supplier_id
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
class OrderServiceHandler(webapp2.RequestHandler):
# service handler
def get(self):
"""Process a HTTP Get Request for the order service by returnng all orders"""
logging.info("order GET called")
#Read the order data from the data store
orders_query = Order.query()
orders = orders_query.fetch()
result = [];
for b in orders:
#store each order in a dictionary
order = {}
order['id'] = b.id
order['item'] = b.item
order['user'] = b.user
order['cart'] = b.cart
order['totalPrice'] = str(b.totalPrice)
#add the dictionary to the list
result.append(order);
#Create a new dictionary for the results
r={};
#Give the results dictionary a key called orders whos value is the list of orders returned
r['orders'] = result;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def delete(self,order_id):
"""Process a HTTP DELETE Request for orders by deleting a order with the specified id"""
logging.info("Order delete called:"+order_id)
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#check if we already have an entry for that order
orders_query = Order.query(Order.id==order_id)
query_results = orders_query.fetch()
if len(query_results) == 0:
#Resource not found
self.error(404)
else:
#Get the key of the object and deltet it from the key-value data store
order = query_results[0]
key = order.key
key.delete()
#return a message to the client
data = {}
data['message'] = 'Deleted Order:'+order_id
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def put(self):
"""Process a HTTP PUT Request for the orders service by adding or updating a orders information"""
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#Parse the json we received
orderjson = json.loads(self.request.body)
#Check if the publisher for the order information exists
order_query = Order.query(Order.id == orderjson["order"])
order_query_results = order_query.fetch()
if len(order_query_results)==0:
#There is no publisher in our database with the specified id
self.error(404); #not found
else:
#check if we already have an entry for that order
orders_query = Order.query(Order.id==orderjson["id"])
query_results = orders_query.fetch()
if len(query_results) == 0:
#We must be adding a new order as the query returned zero results
#Create a new instance of Order
order = Order(parent=data_store_key(BOOKSTORE_NAME))
order.id = orderjson["id"];
order.item = orderjson["item"];
order.user = orderjson["user"];
order.cart = orderjson["cart"];
order.totalPrice = float(orderjson["totalPrice"]);
#Store the user info
order.put();
#return a message to the client
data = {}
data['message'] = 'Added Order (PUT):'+orderjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
else:
#Update the order object
order = query_results[0];
order.item = orderjson["item"];
order.user = orderjson["user"];
order.cart = orderjson["cart"];
order.totalPrice = float(orderjson["totalPrice"]);
#Store the user info
order.put();
#return a message to the client
data = {}
data['message'] = 'Updated Order (PUT):'+orderjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
class ProductServiceHandler(webapp2.RequestHandler):
# product service handler
def get(self):
#Process a HTTP Get Request for the service
#Read the data
products_query = Product.query()
products = products_query.fetch()
result = [];
for p in products:
#store each product in a dictionary
product = {}
product['id'] = p.id
product['nameProduct'] = p.nameProduct
product['supplier'] = p.supplier
product['price'] = p.price
product['stockTotal'] = p.stockTotal
#add the dictionary to the list
result.append(product);
#Create a new dictionary for the results
r={};
#Give the results dictionary a key called products whos value is the list of products returned
r['products'] = result;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def put(self):
#Process a HTTP PUT Request service
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#Parse the json we received
productjson = json.loads(self.request.body)
#check if we already have an entry for that product
products_query = Product.query(Product.id==productjson["id"])
query_results = products_query.fetch()
if len(query_results) == 0:
#We must be adding a new product as the query returned zero results
#Create a new instance of Product
product = Product(parent=data_store_key(PUBSTORE_NAME))
product.id = productjson["id"];
product.nameProduct = productjson["nameProduct"];
product.supplier = productjson["supplier"];
product.price = productjson["price"];
product.stockTotal = productjson["stockTotal"];
#Store the product info
product.put();
#return a message to the client
data = {}
data['message'] = 'Added Product (PUT):'+productjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
else:
#Update the product object
product = query_results[0];
product.nameProduct = productjson["nameProduct"];
#Store the product info
product.put();
#return a message to the client
data = {}
data['message'] = 'Updated Product (PUT):'+productjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def delete(self,product_id):
"""Process a HTTP DELETE Request for orders by deleting a order with the specified id"""
logging.info("Product delete called:"+product_id)
#Check that there are no products using this publisher
products_query = Product.query(Product.publisher==publisher_id)
query_results = products_query.fetch()
if not len(query_results) == 0:
#We won't allow this publisher to be deleted because there is a product entry uning it
self.error(405) #method not allowed
else:
#Checks if the user is logged in
current_user = users.get_current_user()
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#check if we already have an entry for that order
products_query = Product.query(Product.id==product_id)
query_results = products_query.fetch()
if len(query_results) == 0:
#Resource not found
self.error(404)
else:
#Get the key of the object and deltet it from the key-value data store
product = query_results[0]
key = product.key
key.delete()
#return a message to the client
data = {}
data['message'] = 'Deleted Product:'+product_id
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
class ItemServiceHandler(webapp2.RequestHandler):
# item service handler
def get(self):
#Process a HTTP Get Request for the service
#Read the data
items_query = Item.query()
items = items_query.fetch()
result = [];
for p in items:
#store each item in a dictionary
item = {}
item['id'] = p.id
item['product'] = p.product
item['quantity'] = p.quantity
#add the dictionary to the list
result.append(item);
#Create a new dictionary for the results
r={};
#Give the results dictionary a key called items whos value is the list of items returned
r['items'] = result;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def put(self):
#Process a HTTP PUT Request service
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#Parse the json we received
itemjson = json.loads(self.request.body)
#check if we already have an entry for that item
items_query = Item.query(Item.id==itemjson["id"])
query_results = items_query.fetch()
if len(query_results) == 0:
#We must be adding a new item as the query returned zero results
#Create a new instance of Item
item = Item(parent=data_store_key(PUBSTORE_NAME))
item.id = itemjson["id"];
item.product = itemjson["product"];
item.quantity = itemjson["quantity"];
#Store the item info
item.put();
#return a message to the client
data = {}
data['message'] = 'Added Item (PUT):'+itemjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
else:
#Update the item object
item = query_results[0];
item.name = itemjson["name"];
#Store the item info
item.put();
#return a message to the client
data = {}
data['message'] = 'Updated Item (PUT):'+itemjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def delete(self,item_id):
"""Process a HTTP DELETE Request for orders by deleting a order with the specified id"""
logging.info("Item delete called:"+item_id)
#Check that there are no items using this publisher
items_query = Item.query(Item.publisher==publisher_id)
query_results = items_query.fetch()
if not len(query_results) == 0:
#We won't allow this publisher to be deleted because there is a item entry uning it
self.error(405) #method not allowed
else:
#Checks if the user is logged in
current_user = users.get_current_user()
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#check if we already have an entry for that order
items_query = Item.query(Item.id==item_id)
query_results = items_query.fetch()
if len(query_results) == 0:
#Resource not found
self.error(404)
else:
#Get the key of the object and deltet it from the key-value data store
item = query_results[0]
key = item.key
key.delete()
#return a message to the client
data = {}
data['message'] = 'Deleted Item:'+item_id
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
class CartServiceHandler(webapp2.RequestHandler):
# service handler
def get(self):
"""Process a HTTP Get Request for the order service by returnng all orders"""
logging.info("cart GET called")
#Read the order data from the data store
carts_query = Cart.query()
carts = carts_query.fetch()
result = [];
for b in carts:
#store each order in a dictionary
cart = {}
cart['id'] = b.id
cart['item'] = b.item
cart['user'] = b.user
cart['totalPrice'] = str(b.totalPrice)
#add the dictionary to the list
result.append(cart);
#Create a new dictionary for the results
r={};
#Give the results dictionary a key called orders whos value is the list of orders returned
r['carts'] = result;
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json.dumps(r, separators=(',',':')))
def delete(self,cart_id):
"""Process a HTTP DELETE Request for orders by deleting a order with the specified id"""
logging.info("Order delete called:"+order_id)
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#check if we already have an entry for that order
carts_query = Cart.query(Cart.id==cart_id)
query_results = carts_query.fetch()
if len(query_results) == 0:
#Resource not found
self.error(404)
else:
#Get the key of the object and deltet it from the key-value data store
cart = query_results[0]
key = cart.key
key.delete()
#return a message to the client
data = {}
data['message'] = 'Deleted cart:'+cart_id
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
def put(self):
"""Process a HTTP PUT Request for the orders service by adding or updating a orders information"""
#Checks if the user is logged in
current_user = users.get_current_user()
if not current_user :
#The request could not be completed because the user is not logged in
self.error(403) #access denied
else:
#Parse the json we received
cartjson = json.loads(self.request.body)
#Check if the publisher for the order information exists
cart_query = Cart.query(Cart.id == cartjson["cart"])
cart_query_results = cart_query.fetch()
#Read the user data from the data store
users_query = User.query(User.id==user_id)
users = users_query.fetch(1)
#check if we already have an entry for that item
items_query = Item.query(Item.id==itemjson["id"])
query_results = items_query.fetch()
if len(cart_query_results)==0:
#There is no publisher in our database with the specified id
self.error(404); #not found
else:
#check if we already have an entry for that order
carts_query = Cart.query(Cart.id==cartjson["id"])
query_results = carts_query.fetch()
if len(query_results) == 0:
#We must be adding a new order as the query returned zero results
#Create a new instance of Order
cart = Cart(parent=data_store_key(BOOKSTORE_NAME))
cart.id = cartjson["id"];
cart.item = cartjson["item"];
cart.user = cartjson["user"];
cart.totalPrice = float(cartjson["totalPrice"]);
#Store the user info
cart.put();
#return a message to the client
data = {}
data['message'] = 'Added cart (PUT):'+cartjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
else:
#Update the cart object
cart = query_results[0];
cart.item = cartjson["item"];
cart.user = cartjson["user"];
cart.totalPrice = float(cartjson["totalPrice"]);
#Store the user info
cart.put();
#return a message to the client
data = {}
data['message'] = 'Updated cart (PUT):'+cartjson["id"]
json_response = json.dumps(data)
self.response.headers['Content-Type'] = 'text/x-json'
self.response.write(json_response)
logging.info("STARTING UP")
#The first time our application runs we want to load book info
init_suppliers();
init_products();
init_items();
application = webapp2.WSGIApplication([
('/users', UserServiceHandler),
('/users/(\d+)', UserServiceHandler),
('/supplier', SupplierServiceHandler),
('/supplier/(\d+)', SupplierServiceHandler),
('/order', OrderServiceHandler),
('/order/(\d+)', OrderServiceHandler),
('/product', ProductServiceHandler),
('/product/(\d+)', ProductServiceHandler),
('/item', ProductServiceHandler),
('/item/(\d+)', ProductServiceHandler),
('/cart', ProductServiceHandler),
('/cart/(\d+)', ProductServiceHandler),
], debug=True)
| ethornbury/PythonCart | cartservice.py | cartservice.py | py | 29,695 | python | en | code | 0 | github-code | 50 |
19133322891 | import os
import sys
class colors:
cyan = '\033[96m'
green = '\033[92m'
red = '\033[91m'
normal = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
yellow = '\033[93m'
class ParensStack:
open_chars = set("[{(")
close_chars = set("}])")
def __init__(self, parens):
assert all([item in (self.open_chars | self.close_chars) for item in parens])
self.parens = parens
def push(self, item):
self.container.append(item)
self.print_state(f"PUSH {item}", colors.green)
return None
def process_paren(self, item):
if item in self.open_chars:
self.push(item)
else:
opener = self.pop()
if item == "}":
assert opener == "{"
elif item == ")":
assert opener == "("
else:
assert opener == "["
self.print_state(f"POP {opener}", colors.red)
def is_valid(self):
self.container = list()
try:
for i, c in enumerate(self.parens):
print(" " + colors.green + self.parens[:i] + colors.red, colors.bold + self.parens[i] + colors.normal, self.parens[i+1:])
self.process_paren(c)
if not self.container:
print(colors.green + "VALID PARENS - END" + colors.normal)
return True
except AssertionError:
print(colors.red + "INVALID PARENS - END" + colors.normal)
return False
def pop(self):
return self.container.pop()
def is_empty(self):
return not self.container
def get_container_string(self):
return "[ " + colors.bold + str(self.container)[1:-1].replace("'", "").replace(",", colors.normal + "," + colors.bold) + colors.normal + " ]"
def print_state(self, action, color, timeout=2):
print(f"\n------------- State of stack ------------------\n{action} ----> " + self.get_container_string())
input()
os.system('cls' if os.name == 'nt' else 'clear')
print("\n")
os.system('cls' if os.name == 'nt' else 'clear')
print('\n')
test = ParensStack(sys.argv[1])
# test = ParensStack("{(){[{(({}))}]}[]}")
test.is_valid() | a097123/code_collab | ParensStack.py | ParensStack.py | py | 2,266 | python | en | code | 0 | github-code | 50 |
75180344475 | from bs4 import BeautifulSoup
import requests
import json
# SCRAPING PPG
url = "https://www.teamrankings.com/nba/player-stat/points"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
jockDict = dict()
for list in lists:
name = list.find('td', class_="text-left")
ppg = list.find('td', class_="text-right")
if name is not None and ppg is not None:
nameText = name.text.strip()
ppgText = float(ppg.text)
jockDict[nameText] = {"ppg" : ppgText}
# SCRAPING ASSISTS
url = "https://www.teamrankings.com/nba/player-stat/assists"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
for list in lists:
name = list.find('td', class_="text-left")
assists = list.find('td', class_="text-right")
if name is not None and assists is not None:
nameText = name.text.strip()
assistsText = float(assists.text)
if nameText in jockDict:
jockDict[nameText]["assists"] = assistsText
else:
jockDict[nameText] = {"assists" : assistsText}
# SCRAPING REBOUNDS
url = "https://www.teamrankings.com/nba/player-stat/rebounds"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
for list in lists:
name = list.find('td', class_="text-left")
rebounds = list.find('td', class_="text-right")
if name is not None and rebounds is not None:
nameText = name.text.strip()
reboundsText = float(rebounds.text)
if nameText in jockDict:
jockDict[nameText]["rebounds"] = reboundsText
else:
jockDict[nameText] = {"rebounds" : reboundsText}
# # SCRAPING STEALS
url = "https://www.teamrankings.com/nba/player-stat/steals"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
for list in lists:
name = list.find('td', class_="text-left")
steals = list.find('td', class_="text-right")
if name is not None and steals is not None:
nameText = name.text.strip()
stealsText = float(steals.text)
if nameText in jockDict:
jockDict[nameText]["steals"] = stealsText
else:
jockDict[nameText] = {"steals" : stealsText}
# # SCRAPING BLOCKS
url = "https://www.teamrankings.com/nba/player-stat/blocks"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
for list in lists:
name = list.find('td', class_="text-left")
blocks = list.find('td', class_="text-right")
if name is not None and blocks is not None:
nameText = name.text.strip()
blocksText = float(blocks.text)
if nameText in jockDict:
jockDict[nameText]["blocks"] = blocksText
else:
jockDict[nameText] = {"blocks" : blocksText}
# # SCRAPING TURNOVERS
url = "https://www.teamrankings.com/nba/player-stat/turnovers"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('tr')
for list in lists:
name = list.find('td', class_="text-left")
turnovers = list.find('td', class_="text-right")
if name is not None and turnovers is not None:
nameText = name.text.strip()
turnoversText = float(turnovers.text)
if nameText in jockDict:
jockDict[nameText]["turnovers"] = turnoversText
else:
jockDict[nameText] = {"turnovers" : turnoversText}
for player in jockDict.keys():
print(player)
# print(json.dumps(jockDict, sort_keys=False, indent=4))
# print(jockDict)
| daygodavy/nba-jock | nba-webscraper-final.py | nba-webscraper-final.py | py | 3,481 | python | en | code | 0 | github-code | 50 |
41632770724 | # -*- coding:utf-8 -*-
from functools import wraps
import time
__author__ = 'q00222219@huawei'
class RetryDecorator(object):
"""Decorator for retrying a function upon suggested exceptions.
The decorated function is retried for the given number of times, and the
sleep time between the retries is incremented until max sleep time is
reached. If the max retry count is set to -1, then the decorated function
is invoked indefinitely until an exception is thrown, and the caught
exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=0, inc_sleep_time=1,
max_sleep_time=15, exceptions=()):
"""Configure the retry object using the input params.
:param max_retry_count: maximum number of times the given function must
be retried when one of the input 'exceptions'
is caught.
:param inc_sleep_time: incremental time in seconds for sleep time
between retries.
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param exceptions: suggested exceptions for which the function must be
retried
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries = self._max_retry_count
mdelay = 0
while mtries > 1:
try:
return f(*args, **kwargs)
except self._exceptions:
time.sleep(mdelay)
mtries -= 1
mdelay += self._inc_sleep_time
if mdelay >= self._max_sleep_time:
mdelay = self._max_sleep_time
return f(*args, **kwargs)
return f_retry # true decorator
| hgqislub/hybird-orchard | code/cloudmanager/decorator.py | decorator.py | py | 2,329 | python | en | code | 1 | github-code | 50 |
33411033275 | import sys
import bean as be
bdata_path = sys.argv[1]
reps = sys.argv[2].split(",")
outfile_path = sys.argv[3]
bdata = be.read_h5ad(bdata_path)
bdata_sub = bdata[:, bdata.samples.rep.isin(reps)]
bdata_sub.write(outfile_path)
| pinellolab/bean_manuscript | workflow/scripts/run_models/subset_screen.py | subset_screen.py | py | 237 | python | en | code | 0 | github-code | 50 |
18897888710 | import cv2
import numpy as np
import matplotlib.pyplot as plt
#分道计算每个通道的直方图
img = cv2.imread('/media/lc/8A986A3C986A26C3/model/data/m4.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#img_b, img_g, img_r = np.split(img)
# hist_b = cv2.calcHist([img0],[0],None,[256],[0,256])
# hist_g = cv2.calcHist([img0],[1],None,[256],[0,256])
# hist_r = cv2.calcHist([img0],[2],None,[256],[0,256])
def gamma_trans(img,gamma):
#具体做法先归一化到1,然后gamma作为指数值求出新的像素值再还原
gamma_table = [np.power(x/255.0,gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
#实现映射用的是Opencv的查表函数
return cv2.LUT(img,gamma_table)
img_corrted = gamma_trans(img_gray, 0.8)
cv2.imshow('img',img)
cv2.imshow('img_gray',img_gray)
cv2.imshow('gamma_image',img_corrted)
cv2.waitKey(0) | lichuanqi/Python_Learn_Note | vision/preprocessing/gamma_bianhuan.py | gamma_bianhuan.py | py | 911 | python | en | code | 2 | github-code | 50 |
18023867441 | # -*- coding:utf-8 -*-
"""
@author: guoxiaorui
@file: 3_maximum_detonation
@time: 2021/12/12 1:31 上午
@desc:
"""
from typing import List
class Solution:
def maximumDetonation(self, bombs: List[List[int]]) -> int:
n = len(bombs)
data = [[] for _ in range(n)]
for i in range(n):
for j in range(n):
if i == j:
continue
if (bombs[i][0] - bombs[j][0]) ** 2 + (bombs[i][1] - bombs[j][1]) ** 2 <= bombs[i][2] ** 2:
data[i].append(j)
val = 0
for i in range(n):
vis = {i}
q = [i]
h = 0
while h < len(q):
for j in data[q[h]]:
if j not in vis:
vis.add(j)
q.append(j)
h += 1
if len(q) > val:
val = len(q)
return val
if __name__ == '__main__':
s = Solution()
bombs = [[1, 2, 3], [2, 3, 1], [3, 4, 2], [4, 5, 3], [5, 6, 4]]
print(s.maximumDetonation(bombs))
| sun10081/leetcode_practice_xiaorui | questions/week/2021/2021_12_11/3_maximum_detonation.py | 3_maximum_detonation.py | py | 1,078 | python | en | code | 0 | github-code | 50 |
18222047806 | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
@pytest.fixture
def driver(request):
driver = webdriver.Chrome()
request.addfinalizer(driver.quit)
return driver
def test_countries_admin(driver):
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.ID, "content")))
list_countries = [i.get_attribute('innerText') for i in driver.find_elements_by_css_selector(
'tr.row a:not([title = Edit])')]
assert (list_countries == sorted(list_countries))
list_countries_zones = [i.get_attribute('outerText') for i in driver.find_elements_by_css_selector('td:nth-child(6)')]
list_index_countries_zones = [list_countries_zones.index(i) for i in list_countries_zones if i != '0']
for index_country_zones in list_index_countries_zones:
country_with_zones = driver.find_elements_by_css_selector('tr.row a:not([title = Edit])')
country_with_zones[index_country_zones].click()
list_zones = [i.get_attribute('textContent') for i in driver.find_elements_by_css_selector(
'#table-zones tr>td:nth-child(3)') if i.get_attribute('textContent') != '']
assert (list_zones == sorted(list_zones))
driver.back()
def test_zones_admin(driver):
driver.get("http://localhost/litecart/admin/?app=geo_zones&doc=geo_zones")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, "row")))
countries_list = len(driver.find_elements_by_css_selector('tr.row'))
while countries_list:
countries_list -= 1
country_zones = driver.find_elements_by_css_selector('td:nth-child(3)>a')
country_zones[countries_list].click()
zone_lists = [i.get_attribute('textContent') for i in driver.find_elements_by_css_selector('td:nth-child(3)>select option[selected]')]
assert (zone_lists == sorted(zone_lists))
driver.back() | NovikovNS/Selenium | Tasks/test_countries_admin.py | test_countries_admin.py | py | 2,479 | python | en | code | 0 | github-code | 50 |
7583912920 | import pandas as pd
import math
import numpy as np
###################################################################################
############### Make a class for matching process##################################
###################################################################################
class Matching:
def __init__(self, t, df, n_rd, n_p, v_cap, N, O_x, O_y):
self.t = t
self.df = df
self.n_rd = n_rd
self.n_p = n_p
self.mode = 'NA'
self.partners =[]
self.v_cap = v_cap
self.N = N
self.O_x = O_x
self.O_y = O_y
def distance_to_origin(self,id):
id_index = (self.df.loc[self.df['ID'] == id]).index[0]
# this function is for calculating the distance between the agents (drivers and passengers) and origin
agent_x_location = self.df.loc[id_index, 'X-Location']
agent_y_location = self.df.loc[id_index, 'Y-Location']
distance_to_o = math.sqrt(math.pow(agent_x_location-self.O_x, 2) + math.pow(agent_y_location-self.O_y, 2))
return distance_to_o
def min_distance(self):
self.mode = 1
# if # of waiting passengers >= 1, then
if self.n_p >= self.v_cap:
self.n_p -= self.v_cap
else: # when self.v_cap > 1, this step is necessary
n_matched_p = self.n_p
self.n_p -= self.n_p
# get partners
# self.t is the time step when doing matching process
store_min = []
store_ID = []
store_distance_to_o = []
for iter_t in range(1,self.t):
role = self.df.loc[iter_t, 'P_D'] # DataFrame.loc[行,列]
partner = self.df.loc[iter_t, 'partner']
pax_id = self.df.loc[iter_t, 'ID']
x_location = self.df.loc[iter_t, 'X-Location']
y_location = self.df.loc[iter_t, 'Y-Location']
D_x_location = self.df.loc[self.t, 'X-Location']
D_y_location = self.df.loc[self.t, 'Y-Location']
if role == 'P' and partner == '-':
store_min.append(math.sqrt(math.pow(x_location-D_x_location,2)+math.pow(y_location-D_y_location,2)))
store_ID.append(pax_id)
store_distance_to_o.append(self.distance_to_origin(pax_id))
else:
pass
result = []
for i in range(len(store_min)):
result.append(store_min[i]+store_distance_to_o[i])
result_min = min(result)
index_ID = result.index(result_min)
self.partners.append(store_ID[index_ID])
min_distance_d_p = store_min[index_ID]
return min_distance_d_p
def run_matching(self):
self.n_rd += 1
if self.n_rd > 0 and self.n_p > 0:
self.min_distance()
else:
print("WARN: Matching wasn't executed due to insufficient agents")
def get_N_rd(self):
return self.n_rd
def get_N_p(self):
return self.n_p
def get_mode(self):
return self.mode
def get_partners(self):
return self.partners
def main():
t = 3
agentID = [11, 12, 13, 14, 15, 16]
PorD = ['P', 'P', 'P', 'D', 'D', 'D']
partner = ['5', '-', '-', '-', '-', '-']
X_location = [1, 2, 3, 4, 5, 6]
Y_location = [1, 2, 3, 4, 5, 6]
n_rd = 0
n_p = 3
v_cap = 1
N = 100
O_x = 2
O_y = 2
d = {'P_D': PorD, 'ID': agentID, 'partner': partner, 'X-Location': X_location, 'Y-Location': Y_location}
df = pd.DataFrame(d)
ma = Matching(t, df, n_rd, n_p, v_cap, N, O_x, O_y)
ma.run_matching()
print('N_rd', ma.get_N_rd(), n_rd)
print('N_p', ma.get_N_p(), n_p)
print('partners', ma.get_partners())
print('mode', ma.get_mode())
print(ma.distance_to_origin(15))
# below two lines are to avoid "immediately running after import this
if __name__ == "__main__":
main()
| BAN-JY/ridesharing | matchingprocess.py | matchingprocess.py | py | 4,040 | python | en | code | 0 | github-code | 50 |
46786286288 | import math
import os
import random
import re
import sys
# String considered valid if all characters of the string appear the same number of times
# valid if removing 1 character at 1 index in the string
# given string s, determine if it is validIfo so return YES otherwise retunr no
def isValid(s):
# Returns expected STRING
char_dict = {}
# constraints 1<= |s| <= 10^5
# create loop for how many times string happened
for char in s:
if char in char_dict:
char_dict[char] += 1
else:
char_dict[char] = 1
# constraints each character s[i] E ASCII[a-z]
min_count = char_dict[char]
max_count = char_dict[char]
count_dict = {}
# sample input aabbcd sample output NO
for char, value in char_dict.items():
if value in count_dict:
count_dict[value] += 1
else:
count_dict[value] = 1
# update max and min count
if value < min_count:
min_count = value
if value > max_count:
max_count = value
if len(count_dict) == 1:
return 'YES'
elif len(count_dict) == 2:
if count_dict[max_count] == 1 and max_count -min_count == 1:
return 'YES'
elif count_dict[min_count] == 1 and min_count == 1:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
| yettsyjk/rockinAndRolling | validatingStrings.py | validatingStrings.py | py | 1,496 | python | en | code | 0 | github-code | 50 |
17565472607 | from typing import Dict
from edges import edge_list
from builders import build_graph
graph = build_graph(edge_list)
def undirected_has_path(*, graph:Dict, source:str, destination:str, visited:dict) -> bool:
if source == destination: return True
if visited.get(source): return False
visited.update({source: True})
neighbours = graph.get(source)
for neighbour in neighbours:
if undirected_has_path(graph=graph, source=neighbour, destination=destination, visited=visited):
return True
return False
if __name__ == '__main__':
print(undirected_has_path(graph=graph, source='i', destination='k', visited={}))
| code-intensive/data_structures_and_algorithms_in_python | graphs/undirected_path.py | undirected_path.py | py | 666 | python | en | code | 0 | github-code | 50 |
552580981 | class Solution:
def kSimilarity(self, A, B):
"""
Strings A and B are K-similar (for some non-negative integer K) if we can swap the positions of two letters in A
exactly K times so that the resulting string equals B.
Given two anagrams A and B, return the smallest K for which A and B are K-similar.
Example 1:
Input: A = "ab", B = "ba"
Output: 1
Example 2:
Input: A = "abc", B = "bca"
Output: 2
Example 3:
Input: A = "abac", B = "baca"
Output: 2
Example 4:
Input: A = "aabc", B = "abca"
Output: 2
Note:
1 <= A.length == B.length <= 20
A and B contain only lowercase letters from the set {'a', 'b', 'c', 'd', 'e', 'f'}
:type A: str
:type B: str
:rtype: int
start from A, keep swaping two chars at one step.
bfs from A to reach B where
1) use the swapped word as the state of visited
2) and early pruning on matched chars (only expand on unmatched char).
"""
if A == B:
return 0
qe = [A]
visited = set()
visited.add(A)
changes = 0
while qe:
size = len(qe)
while size > 0:
word = qe.pop(0)
size -= 1
if word == B:
return changes
for w in self.swap_words(word, B):
if w in visited:
continue
qe.append(w)
visited.add(w)
changes += 1
return -1
def swap_words(self, word, B):
# skip the matched chars
for i in range(len(word)):
if word[i] == B[i]:
continue
start = i
break
swords = set()
for j in range(start+1, len(word)):
# skip non-necessary swap
if word[j] != B[i]:
continue
sword = list(word)
sword[i], sword[j] = sword[j], sword[i]
swords.add("".join(sword))
return swords
s = Solution()
print(s.kSimilarity("ab", "ba")) | ljia2/leetcode.py | solutions/bfs/854.K-Similar.Strings.py | 854.K-Similar.Strings.py | py | 2,199 | python | en | code | 0 | github-code | 50 |
34325490634 | import numpy as np
import pandas as pd
import random
def FindInterval(a, arr):
for i in range(len(arr)):
if arr[i] >= a:
return i
V0 = 500
p = 2
VertexList = [i for i in range(V0)]
ArcList = [[] for i in range(V0)]
# 下面每个点随机产生两条边
for i in range(V0):
randedge = random.sample(range(V0), 2)
if randedge[0] not in ArcList[i]:
ArcList[i].append(randedge[0])
if randedge[1] not in ArcList[i]:
ArcList[i].append(randedge[1])
if i not in ArcList[randedge[0]]:
ArcList[randedge[0]].append(i)
if i not in ArcList[randedge[1]]:
ArcList[randedge[1]].append(i)
E0 = 0
for i in range(V0):
E0 += len(ArcList[i])
print('E0:'+str(E0))
# 至此,G0已构造完成
# 下面,我们开始往PA中添加新节点,共需添加8790-500=8290个节点,每个节点随机产生1或2或3条边。
New = 8290
curnum = E0
num = [0]
for j in range(New):
VertexList.append(V0+j)
ArcList.append([])
num = random.sample([1, 2, 3], 1)
dist = []
tail = 0
for i in range(V0+j):
tail += len(ArcList[i])
dist.append(tail/curnum)
# print('tail:'+str(tail)+'. curnum:'+str(curnum))
# print('dist:'+str(dist))
for i in range(num[0]):
ran = random.random()
# print('ran:'+str(ran))
x = FindInterval(ran, dist)
if V0+j not in ArcList[x]:
ArcList[x].append(V0+j)
curnum += 1
ArcList[V0+j].append(x)
curnum += 1
E = 0
for i in range(len(ArcList)):
E += len(ArcList[i])
print('E:'+str(E))
ArcArr = np.array(ArcList)
for i in range(len(ArcArr)):
print(len(ArcArr[i]))
print(np.max(ArcArr))
# 至此,PA模型构建sybil region完毕。我们应该读取Facebook social network数据集,然后进行网络合并。不过还是先把刚生成的网络存成txt吧。
fileStr = ''
for i in range(len(ArcArr)):
for j in range(len(ArcArr[i])):
fileStr += str(VertexList[i])+' '+str(ArcArr[i][j])+'\n'
File = open('PAnetwork.txt', 'w')
File.write(fileStr)
File.close()
| Caohanwen0/Sybil-Detection | unsupervised/SybilSCAR/PAGenerativeNetwork.py | PAGenerativeNetwork.py | py | 2,159 | python | en | code | 0 | github-code | 50 |
22933416067 | from flask import request,Response
from datetime import datetime
from flaskr.common_method import db_setting, security,list_method,splicing_list
import requests
import json
def article_like(app):
@app.route('/article_like', methods=['post'])
def article_like(): # 文章喜欢和取消喜欢
token = request.headers['access_token'] # 获取header里的token
parse_token = security.parse_token(token) # 解析token
if (parse_token == 1):
return {"code": 1, "message": "token已过期", "success": "false"}
elif (parse_token == 2):
return {"code": 2, "message": "token认证失败", "success": "false"}
elif (parse_token == 3):
return {"code": 3, "message": "非法的token", "success": "false"}
else:
userid = (parse_token['data']['userid']) # 查询用户id
article_id = request.json.get('article_id') # 文章id
status = request.json.get('status') # 根据status执行喜欢或者取消喜欢
if (article_id): # 判断是否传了article_id字段
sql = "select id from article where id='%s'" % (article_id)
if (db_setting.my_db(sql)): # 查询是否含有此文章id
sql2 = "select click_status from article_click where user_id='%s'and article_id='%s'" % (userid,article_id)#查询数据库表中是否有记录
if(status==0): #判断为0的时候执行取消喜欢
if (db_setting.my_db(sql2) and db_setting.my_db(sql2)[0][0] == 1): # 查询是否用户已点赞,未有记录则插入一条点赞记录,已有记录且状态为未点赞,则进行点赞
update_time = datetime.utcnow()
sql3 = "UPDATE article_click SET click_status=0,update_time = '%s' WHERE user_id = '%s' and article_id='%s'" % (
update_time, userid,article_id)
db_setting.my_db(sql3)
return {"code": 200, "message": "取消喜欢成功","success":"true"}
elif(db_setting.my_db(sql2) and db_setting.my_db(sql2)[0][0] == 0):
return {"code": 500, "message": "已取消喜欢,无法再取消喜欢","success":"false"}
else:
return {"code": 500, "message": "取消喜欢失败,未找到记录","success":"false"}
elif(status==1):#判断为1的时候执行喜欢
if (db_setting.my_db(sql2) and db_setting.my_db(sql2)[0][0] == 0):
update_time = datetime.utcnow()
sql3 = "UPDATE article_click SET click_status=1,update_time = '%s' WHERE user_id = '%s' and article_id='%s'" % (
update_time, userid,article_id)
db_setting.my_db(sql3)
return {"code": 200, "message": "更新喜欢成功","success":"true"}
elif(db_setting.my_db(sql2) and db_setting.my_db(sql2)[0][0] == 1):
return {"code": 500, "message": "喜欢失败,已喜欢","success":"false"}
else:
create_time = datetime.utcnow()
sql4 = "INSERT INTO article_click (user_id, article_id, click_status, create_time, update_time) VALUES ( '%s', '%s', '%s', '%s', '%s')" % (
userid, article_id, 1, create_time, create_time)
db_setting.my_db(sql4)
return {"code": 200, "message": "新增喜欢成功","success":"true"}
else:
return {"code": 500, "message": "请输入正确的状态码(0取消喜欢,1喜欢)","success":"false"}
else:
return {"code": 500, "message": "不存在的文章","success":"false"}
else:
return {"code": 500, "message": 'article_id字段不能为空',"success":"false"}
@app.route('/like_list', methods=['get'])
def like_list(): # 查询用户喜欢列表
token = request.headers['access_token'] # 获取header里的token
parse_token = security.parse_token(token) # 解析token
if (parse_token == 1):
return {"code": 1, "message": "token已过期", "success": "false"}
elif (parse_token == 2):
return {"code": 2, "message": "token认证失败", "success": "false"}
elif (parse_token == 3):
return {"code": 3, "message": "非法的token", "success": "false"}
else:
userid = (parse_token['data']['userid']) # 查询用户id
sql = "select a.user_id,b.id,b.article_title,author_id,article_content,view_status,article_img,b.create_time as article_createtime,a.update_time as click_time from article_click as a INNER JOIN article as b on a.article_id=b.id and a.user_id='%s' and a.click_status=1 and is_delete!=1 and article_id not in(select article_id from article_click as a INNER JOIN article as b on a.article_id=b.id and user_id!=author_id and view_status=0 and user_id='%s')ORDER BY click_time DESC" % (
userid,userid)
dict = {'user_id': '', 'article_id': '', 'article_title': '', 'author_id': '', 'article_content': '',
'view_status': '', 'article_img':'','article_create_time': '','click_time':''}
like_list = list_method.list_method(sql, dict)
user_message = {
'user_name': '', 'avatar_image_url': ''
}
resp = []
like_count_list = {'like_count': '', }
like_status_list = {'like_status': '', }
like_count_resp = []
like_status_resp = []
collect_status_resp = []
collect_status_list = {'collect_status': '', }
for num in like_list:
if (num['author_id'] == ""):
last_list = []
break
else:
# 查询用户头像,用户名信息
sql1 = "select name,avatar_image_url from user_message as c INNER JOIN(select user_id,avatar_image_url from user_avatar_image as a INNER JOIN (select MAX(create_time) as create_time from user_avatar_image GROUP BY user_id)as b on a.create_time=b.create_time)as d on c.id=d.user_id and user_id='%s'" % (
num['author_id'])
user_message_list=json.dumps(list_method.list_method(sql1,user_message)[0])
resp.append(user_message_list)
article_list=splicing_list.splicing_list(like_list, resp)
# 查询文章喜欢数量
sql2 = "select count(*)as like_count from article_click where article_id='%s' and click_status=1" % (
num['article_id'])
change_like_count_list = list_method.list_method(sql2, like_count_list)[0]
like_count_resp.append(json.dumps(change_like_count_list))
last_count_list = splicing_list.splicing_list(article_list, like_count_resp)
# 查询用户是否喜欢该文章
sql3 = "select click_status as like_status from article_click where article_id='%s' and user_id='%s'" % (
num['article_id'], userid)
change_like_status_list = list_method.list_method(sql3, like_status_list)[0]
like_status_resp.append(json.dumps(change_like_status_list))
last_status_list = splicing_list.splicing_list(last_count_list, like_status_resp)
# 查询用户是否收藏该文章
sql4 = "select collect_status from article_collect where article_id='%s' and user_id='%s'" % (
num['article_id'], userid)
change_collect_status_list = list_method.list_method(sql4, collect_status_list)[0]
collect_status_resp.append(json.dumps(change_collect_status_list))
last_list = splicing_list.splicing_list(last_status_list, collect_status_resp)
return {"code": 200, "message": "ok","data":last_list,"success":"true"}
# return Response(json.dumps(like_list, ensure_ascii=False), mimetype='application/json')#返回json串 | 17621445641/duitang_back_end | flaskr/model/article_like.py | article_like.py | py | 8,474 | python | en | code | 0 | github-code | 50 |
26203038438 | from io import BytesIO
from pathlib import Path
import httpx
from openai import AsyncOpenAI
from openai.types.chat import (
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionSystemMessageParam,
)
from openai.types.image import Image
from typing import Any, Callable, Coroutine, Generic, List, Literal, Optional, TypeVar, Union
from pydantic import BaseModel
class Channel(BaseModel):
api_key: str = ""
base_url: Optional[str] = None
organization: Optional[str] = None
class ToolCallResponse:
"""
ToolCallResponse 是一个工具调用响应类,用于封装工具调用的响应内容。
Attributes:
name (str): 工具的名称。
content_type (Literal["str", "openai_image", "image", "audio"]): 发送给用户内容的格式。
content (Optional[Union[Any, str, Image, bytes, Path]]): 用于发送给用户的内容。
data (Optional[str]): 用于回复给openai的内容。
"""
def __init__(
self,
name: str,
content_type: Literal["str", "openai_image", "image", "audio"],
content: Optional[Union[Any, str, Image, bytes, Path]] = None,
data: Optional[str] = None,
):
self.name = name
self.content_type = content_type
self.content = content
self.data = data
class ToolCallConfig(BaseModel):
name: str
enable: bool = True
class ToolCall:
"""
ToolCall类用于封装工具调用的相关信息。
Attributes:
name (str): 工具的名称。
func (Callable[..., Coroutine[Any, Any, ToolCallResponse]]):
工具的函数,它是一个可以接受任意参数的协程函数,返回一个ToolCallResponse对象。
func_info (dict): 关于工具函数的额外信息。
config (ToolCallConfig): 工具的配置信息。
"""
def __init__(
self,
name: str = "",
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]] = None,
func_info: dict = None,
config: ToolCallConfig = ToolCallConfig(name="Unknown"),
):
self.name = name
self.func = func
self.func_info = func_info
self.config = config
class ToolCallRequest:
tool_call: ChatCompletionMessageToolCall
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]]
config: ToolCallConfig
def __init__(
self,
tool_call: ChatCompletionMessageToolCall,
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]],
config: ToolCallConfig,
):
self.tool_call = tool_call
self.func = func
self.config = config
class Preset(BaseModel):
name: str
prompt: str
class Session(BaseModel):
id: str
messages: List[ChatCompletionMessageParam] = []
user: str = ""
preset: Optional[Preset] = None
max_length: int = 8
running: bool = False
def get_messages(self, preset: Preset = None):
if self.preset:
preset = self.preset
_preset = []
if preset:
_preset = [
ChatCompletionSystemMessageParam(content=preset.prompt, role="system")
]
return _preset + self.messages[-self.max_length :]
T = TypeVar('T', bound=ToolCallConfig)
class FuncContext(Generic[T]):
session: Session
http_client: httpx.AsyncClient
openai_client: AsyncOpenAI
config: T
def __init__(
self,
session: Session,
http_client: httpx.AsyncClient,
openai_client: AsyncOpenAI,
config: T,
):
self.session = session
self.http_client = http_client
self.openai_client = openai_client
self.config = config
| kpister/prompt-linter | data/scraping/repos/AkashiCoin~nonebot-plugin-openai/nonebot_plugin_openai~types.py | nonebot_plugin_openai~types.py | py | 3,736 | python | en | code | 0 | github-code | 50 |
31607181190 | from src import app, db
from src.models import Event, Ticket
from flask import render_template
@app.route("/", methods=["GET"])
def homepage():
events = Event.query.all()
return render_template("index.html", events=events)
@app.route("/event/<event_id>", methods=["GET"])
def event_visualizer(event_id):
tickets = Ticket.query.filter_by(event_id=event_id).all()
event = Event.query.where(Event.id == event_id).first()
return render_template("event.html", event=event, tickets=tickets)
@app.route("/event", methods=["POST"])
def add_event(event_name, event_date, total_tickets):
event = Event(event_name=event_name, event_date=event_date, total_tickets=total_tickets)
db.session.add(event)
db.session.flush()
ticket = Ticket(event_id=event.id)
for i in range(total_tickets):
db.session.add(ticket)
db.session.commit()
tickets = Ticket.query.filter_by(event_id=event.id).all()
return render_template("event.html", event=event, tickets=tickets)
@app.route("/redeem/<event_id>/<ticket_id>", methods=["GET"])
def redeem_ticket(event_id, ticket_id):
ticket = Ticket.query.get_or_404(ticket_id)
event = Event.query.get(event_id)
if ticket.redeemed == True:
return "Ticket already redeemed!", 410
ticket.redeemed = True
event.redeemed_tickets += 1
db.session.commit()
return render_template("redeemed_ticket.html", ticket=ticket, event_name=event.name) | Ytalow/Flask-tickets | src/routes.py | routes.py | py | 1,444 | python | en | code | 0 | github-code | 50 |
14759883343 | import logging
import time
import weatherlink_live_local as wlll
logging.basicConfig(level=logging.INFO)
def main():
devices = wlll.discover()
print(devices)
# select first device, get IP address
ip_first_device = devices[0].ip_addresses[0]
# specify units
wlll.set_units(
temperature=wlll.units.TemperatureUnit.CELSIUS,
pressure=wlll.units.PressureUnit.HECTOPASCAL,
rain=wlll.units.RainUnit.MILLIMETER,
wind_speed=wlll.units.WindSpeedUnit.METER_PER_SECOND,
)
# poll sensor data / conditions
while True:
conditions = wlll.get_conditions(ip_first_device)
print(f"Inside temperature: {conditions.inside.temp:.2f} °C")
print(f"Outside temperature: {conditions.integrated_sensor_suites[0].temp:.2f} °C")
time.sleep(10)
if __name__ == "__main__":
main()
| lukasberbuer/weatherlink-live-local-python | examples/basic.py | basic.py | py | 865 | python | en | code | 1 | github-code | 50 |
30847005898 | import pandas as pd
import math
import matplotlib.pyplot as plt
def benfordNumber(benfordTest):
sum = 0;
for val1,val2 in zip(benfordTest,benford):
sum += abs(val1-val2)
return sum
benford = [math.log10(1+1/value)*100 for value in range(1,10)]
digits = [value for value in range(1,10)]
data = pd.read_csv("covid_19_data.csv")
CountryList = data['Country/Region'].unique()
# confirmed = data['Confirmed'].to_numpy()
confirmed = data.loc[data['Country/Region'] == 'Mexico']['Confirmed'].to_numpy()
freq = [0 for value in range(1,10)]
count = 0
for val in confirmed:
if val != 0:
digitCount = int(math.log10(val))
lead = int((val / pow(10, digitCount)))
freq[lead-1] += 1
count += 1
benfordTest = ([(val / count) * 100 for val in freq])
# Ploting
plt.plot(digits, benford, label = 'Benford Numbers')
plt.plot(digits, benfordTest, label = 'US Numbers')
plt.title('Benford Test')
plt.ylabel('Frequency of leading digits')
plt.xlabel('Digits')
plt.legend(loc="upper right")
plt.show()
| mm909/BenfordsLaw | COVID/single.py | single.py | py | 1,041 | python | en | code | 0 | github-code | 50 |
1518490123 | from django.shortcuts import render_to_response, render, redirect
import requests
from bottle import request, route, run
from django.http import HttpResponse
from booker.models import Customer, Book
from booker.forms import CustomerForm, BookForm
def home(request):
return render(request, "home.html")
def new_book(request):
if request.method == "POST":
form = BookForm(request.POST)
if form.is_valid():
if form.save():
return redirect("/show_books")
else:
form = BookForm()
data = {
"book_form" : form
}
return render(request, "new_book.html", data)
def show_books(request):
dataums= Book.objects.all()
datos = Customer.objects.all()
polls = {"books": dataums, "customers": datos}
response = render_to_response(
"show_books.html",
polls
)
return response
def show_book(request, book_id):
book = Book.objects.get(id=book_id)
data = {"book": book}
return render(request, "show_book.html", data)
def edit_book(request, book_id):
book = Book.objects.get(id=book_id)
if request.method == "POST":
form = BookForm(request.POST, instance=book)
if form.is_valid():
if form.save():
return redirect("/show_books/")
else:
form = BookForm()
data = {"book": book, "book_form": form}
return render(request, "edit_book.html", data)
def add_owner(request):
if request.method == "POST":
form = CustomerForm(request.POST)
if form.is_valid():
if form.save():
return redirect("/show_books/")
else:
form = CustomerForm()
data = {"customer_form": form}
return render(request, "add_owner.html", data)
def show_customers(request):
dataums= Book.objects.all()
datos = Customer.objects.all()
data = {"books": dataums, "customers": datos}
return render(request, "show_customers.html", data)
def add_current_customer(request, book_id):
book = Book.objects.get(id=book_id)
dataums= Customer.objects.all()
data = {"customers": dataums, "book": book}
return render (request, "add_current_customer.html", data)
| Holl/BookStore | booker/views.py | views.py | py | 2,096 | python | en | code | 0 | github-code | 50 |
4639183369 | from io import BytesIO
from typing import Callable, Optional, Union, overload
from faker import Faker
from faker.generator import Generator
from faker.providers import BaseProvider
from faker.providers.python import Provider
from odf.opendocument import OpenDocumentText
from odf.text import P
from ..base import (
DEFAULT_FORMAT_FUNC,
BytesValue,
DynamicTemplate,
FileMixin,
StringValue,
)
from ..constants import DEFAULT_TEXT_MAX_NB_CHARS
from ..registry import FILE_REGISTRY
from ..storages.base import BaseStorage
from ..storages.filesystem import FileSystemStorage
__author__ = "Artur Barseghyan <artur.barseghyan@gmail.com>"
__copyright__ = "2022-2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = ("OdtFileProvider",)
class OdtFileProvider(BaseProvider, FileMixin):
"""ODT file provider.
Usage example:
.. code-block:: python
from faker import Faker
from faker_file.providers.odt_file import OdtFileProvider
FAKER = Faker()
FAKER.add_provider(OdtFileProvider)
file = FAKER.odt_file()
Usage example with options:
.. code-block:: python
file = FAKER.odt_file(
prefix="zzz",
max_nb_chars=100_000,
wrap_chars_after=80,
)
Usage example with `FileSystemStorage` storage (for `Django`):
.. code-block:: python
from django.conf import settings
from faker_file.storages.filesystem import FileSystemStorage
file = FAKER.odt_file(
storage=FileSystemStorage(
root_path=settings.MEDIA_ROOT,
rel_path="tmp",
),
prefix="zzz",
max_nb_chars=100_000,
wrap_chars_after=80,
)
Usage example with content modifiers:
.. code-block:: python
from faker_file.base import DynamicTemplate
from faker_file.providers.jpeg_file import JpegFileProvider
from faker_file.contrib.odt_file import (
add_h1_heading,
add_h2_heading,
add_h3_heading,
add_h4_heading,
add_h5_heading,
add_h6_heading,
add_page_break,
add_paragraph,
add_picture,
add_table,
)
file = FAKER.odt_file(
content=DynamicTemplate(
[
(add_h1_heading, {}),
(add_paragraph, {}),
(add_h2_heading, {}),
(add_h3_heading, {}),
(add_h4_heading, {}),
(add_h5_heading, {}),
(add_h6_heading, {}),
(add_paragraph, {}),
(add_picture, {}),
(add_page_break, {}),
(add_h6_heading, {}),
(add_table, {}),
(add_paragraph, {}),
]
)
)
"""
extension: str = "odt"
@overload
def odt_file(
self: "OdtFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
max_nb_chars: int = DEFAULT_TEXT_MAX_NB_CHARS,
wrap_chars_after: Optional[int] = None,
content: Optional[Union[str, DynamicTemplate]] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
raw: bool = True,
**kwargs,
) -> BytesValue:
...
@overload
def odt_file(
self: "OdtFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
max_nb_chars: int = DEFAULT_TEXT_MAX_NB_CHARS,
wrap_chars_after: Optional[int] = None,
content: Optional[Union[str, DynamicTemplate]] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
**kwargs,
) -> StringValue:
...
def odt_file(
self: "OdtFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
max_nb_chars: int = DEFAULT_TEXT_MAX_NB_CHARS,
wrap_chars_after: Optional[int] = None,
content: Optional[Union[str, DynamicTemplate]] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
raw: bool = False,
**kwargs,
) -> Union[BytesValue, StringValue]:
"""Generate an ODT file with random text.
:param storage: Storage. Defaults to `FileSystemStorage`.
:param basename: File basename (without extension).
:param prefix: File name prefix.
:param max_nb_chars: Max number of chars for the content.
:param wrap_chars_after: If given, the output string would be separated
by line breaks after the given position.
:param content: File content. Might contain dynamic elements, which
are then replaced by correspondent fixtures.
:param format_func: Callable responsible for formatting template
strings.
:param raw: If set to True, return `BytesValue` (binary content of
the file). Otherwise, return `StringValue` (path to the saved
file).
:return: Relative path (from root directory) of the generated file
or raw content of the file.
"""
# Generic
if storage is None:
storage = FileSystemStorage()
filename = storage.generate_filename(
extension=self.extension,
prefix=prefix,
basename=basename,
)
if isinstance(content, DynamicTemplate):
_content = ""
else:
_content = self._generate_text_content(
max_nb_chars=max_nb_chars,
wrap_chars_after=wrap_chars_after,
content=content,
format_func=format_func,
)
data = {"content": _content, "filename": filename, "storage": storage}
with BytesIO() as _fake_file:
document = OpenDocumentText()
if _content:
document.text.addElement(P(text=_content))
elif isinstance(content, DynamicTemplate):
for counter, (ct_modifier, ct_modifier_kwargs) in enumerate(
content.content_modifiers
):
ct_modifier(
self,
document,
data,
counter,
**ct_modifier_kwargs,
)
document.save(_fake_file)
if raw:
raw_content = BytesValue(_fake_file.getvalue())
raw_content.data = data
return raw_content
storage.write_bytes(filename, _fake_file.getvalue())
# Generic
file_name = StringValue(storage.relpath(filename))
file_name.data = data
FILE_REGISTRY.add(file_name)
return file_name
| barseghyanartur/faker-file | src/faker_file/providers/odt_file.py | odt_file.py | py | 7,216 | python | en | code | 74 | github-code | 50 |
1185328161 | # -*- coding: utf-8 -*-
"""
utils sqlalchemy module.
"""
from sqlalchemy.sql import quoted_name
from sqlalchemy.engine import result_tuple
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import inspect, Table, asc, desc, func, CheckConstraint
import pyrin.utils.datetime as datetime_utils
import pyrin.utils.string as string_utils
from pyrin.core.globals import _
from pyrin.core.globals import LIST_TYPES
from pyrin.utils.exceptions import InvalidRowResultFieldsAndValuesError, \
FieldsAndValuesCountMismatchError, CheckConstraintValuesRequiredError, \
MultipleDeclarativeClassesFoundError, InvalidOrderingColumnError
LIKE_CHAR_COUNT_LIMIT = 20
def like_both(value, start='%', end='%'):
"""
gets a copy of string with `%` or couple of `_` values attached to both ends.
it is to be used in like operator.
:param str value: value to be processed.
:param str start: start place holder to be prefixed.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:param str end: end place holder to be appended.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:rtype: str
"""
if value is None:
return None
value = like_prefix(value, start)
value = like_suffix(value, end)
return value
def like_prefix(value, start='%'):
"""
gets a copy of string with `%` or couple of `_` values attached to beginning.
it is to be used in like operator.
:param str value: value to be processed.
:param str start: start place holder to be prefixed.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:rtype: str
"""
if value is None:
return None
return '{start}{value}'.format(start=start, value=value)
def like_suffix(value, end='%'):
"""
gets a copy of string with `%` or couple of `_` values attached to end.
it is to be used in like operator.
:param str value: value to be processed.
:param str end: end place holder to be appended.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:rtype: str
"""
if value is None:
return None
return '{value}{end}'.format(value=value, end=end)
def _process_place_holder(value, count):
"""
processes the value and generates a place holder with count of `_` chars.
this value could be used in like operator.
:param str value: value to be processed.
:param int count: count of `_` chars to be attached.
:note count: this value has a limit of `LIKE_CHAR_COUNT_LIMIT`, if
the provided value goes upper than this limit, a
`%` will be attached instead of it. this limit is
for security reason.
:rtype: str
"""
if value is None:
return ''
if count is None or count <= 0:
return ''
place_holder = None
if count > LIKE_CHAR_COUNT_LIMIT:
place_holder = '%'
if place_holder is None:
place_holder = '_' * count
return place_holder
def like_exact_both(value, count):
"""
gets a copy of string with `count` number of `_` attached to both ends.
it is to be used in like operator.
:param str value: value to be processed.
:param int count: count of `_` chars to be attached.
:note count: this value has a limit of `LIKE_CHAR_COUNT_LIMIT`, if
the provided value goes upper than this limit, a
`%` will be attached instead of it. this limit is
for security reason.
:rtype: str
"""
place_holder = _process_place_holder(value, count)
return like_both(value, place_holder, place_holder)
def like_exact_prefix(value, count):
"""
gets a copy of string with `count` number of `_` attached to beginning.
it is to be used in like operator.
:param str value: value to be processed.
:param int count: count of `_` chars to be attached.
:note count: this value has a limit of `LIKE_CHAR_COUNT_LIMIT`, if
the provided value goes upper than this limit, a
`%` will be attached instead of it. this limit is
for security reason.
:rtype: str
"""
place_holder = _process_place_holder(value, count)
return like_prefix(value, place_holder)
def like_exact_suffix(value, count):
"""
gets a copy of string with `count` number of `_` attached to end.
it is to be used in like operator.
:param str value: value to be processed.
:param int count: count of `_` chars to be attached.
:note count: this value has a limit of `LIKE_CHAR_COUNT_LIMIT`, if
the provided value goes upper than this limit, a
`%` will be attached instead of it. this limit is
for security reason.
:rtype: str
"""
place_holder = _process_place_holder(value, count)
return like_suffix(value, place_holder)
def add_range_clause(clauses, column, value_lower, value_upper,
include_equal_to_lower=True,
include_equal_to_upper=True,
**options):
"""
adds range comparison into given clauses using specified inputs.
:param list clauses: clause list to add range clause to it.
:param CoreColumn column: entity column to add range clause for it.
:param object value_lower: lower bound of range clause.
:param object value_upper: upper bound of range clause.
:param include_equal_to_lower: specifies that lower value
should be considered in range.
defaults to True if not provided.
:param include_equal_to_upper: specifies that upper value
should be considered in range.
defaults to True if not provided.
"""
if value_lower is None and value_upper is None:
return
if value_lower is not None and value_lower == value_upper:
clauses.append(column == value_lower)
else:
if value_lower is not None:
if include_equal_to_lower is True:
clauses.append(column >= value_lower)
else:
clauses.append(column > value_lower)
if value_upper is not None:
if include_equal_to_upper is True:
clauses.append(column <= value_upper)
else:
clauses.append(column < value_upper)
def add_datetime_range_clause(clauses, column,
value_lower, value_upper,
include_equal_to_lower=True,
include_equal_to_upper=True,
**options):
"""
adds datetime range comparison into given clauses using specified inputs.
if the inputs are date objects, they will be converted to datetime with client
timezone and `consider_begin_of_day` and `consider_end_of_day` will also
considered as True.
:param list clauses: clause list to add datetime range clause to it.
:param CoreColumn column: entity column to add datetime range clause for it.
:param datetime | date value_lower: lower bound of datetime range clause.
:param datetime | date value_upper: upper bound of datetime range clause.
:param include_equal_to_lower: specifies that lower datetime
should be considered in range.
defaults to True if not provided.
:param include_equal_to_upper: specifies that upper datetime
should be considered in range.
defaults to True if not provided.
:keyword bool consider_begin_of_day: specifies that consider begin
of day for lower datetime.
defaults to False if not provided.
:keyword bool consider_end_of_day: specifies that consider end
of day for upper datetime.
defaults to False if not provided.
"""
value_lower, value_upper = datetime_utils.normalize_datetime_range(value_lower,
value_upper,
**options)
add_range_clause(clauses, column,
value_lower, value_upper,
include_equal_to_lower,
include_equal_to_upper,
**options)
def add_string_range_clause(clauses, column, value_lower, value_upper,
include_equal_to_lower=True,
include_equal_to_upper=True,
**options):
"""
adds string range comparison into given clauses using specified inputs.
it coerces both values into string if they are not None.
it also converts both values and the value of database column into lowercase.
:param list clauses: clause list to add string range clause to it.
:param CoreColumn column: entity column to add string range clause for it.
:param object value_lower: lower bound of string range clause.
:param object value_upper: upper bound of string range clause.
:param include_equal_to_lower: specifies that lower value
should be considered in range.
defaults to True if not provided.
:param include_equal_to_upper: specifies that upper value
should be considered in range.
defaults to True if not provided.
"""
if value_lower is not None:
value_lower = str(value_lower).lower()
if value_upper is not None:
value_upper = str(value_upper).lower()
# swapping values in case of user mistake.
if value_lower is not None and value_upper is not None:
if value_lower > value_upper:
value_lower, value_upper = value_upper, value_lower
add_range_clause(clauses, func.lower(column),
value_lower, value_upper,
include_equal_to_lower,
include_equal_to_upper,
**options)
def add_comparison_clause(clauses, column, value, **options):
"""
adds list or single comparison into clauses based on given value.
if the value type is any of list, tuple or set, it generates an
`in()` comparison, otherwise it generates a simple `==` comparison.
:param list clauses: clause list to add comparison clause to it.
:param CoreColumn column: entity column to add comparison clause for it.
:param object | list[object] value: value to add comparison for it.
"""
if value is not None:
if isinstance(value, LIST_TYPES):
clauses.append(column.in_(value))
else:
clauses.append(column == value)
def create_row_result(fields, values):
"""
creates a row result object with given fields and values.
this object type is returned by sqlalchemy `Query` when there
is column names or multiple entities in query.
:param list[str] fields: field names of the result object.
:param list[object] values: values to be mapped to fields.
they must be in the same order as fields.
:raises InvalidRowResultFieldsAndValuesError: invalid row result fields
and values error.
:raises FieldsAndValuesCountMismatchError: fields and values count mismatch error.
:rtype: ROW_RESULT
"""
if fields is None or values is None:
raise InvalidRowResultFieldsAndValuesError('Input parameters "fields" and '
'"values" must both be provided, '
'they could not be None.')
if len(fields) != len(values):
raise FieldsAndValuesCountMismatchError('The length of "fields" which is '
'[{fields}] and "values" which is '
'[{values}] does not match.'
.format(fields=len(fields),
values=len(values)))
result = result_tuple(fields)
return result(values)
def check_constraint(column, values, **options):
"""
generates a check constraint for given column and values.
by default, it generates an `in` check, but this could be changed to
`not in` by providing `use_in=False` in options.
if the first item of values is a string, all values will be quoted.
:param str column: column name to be used in check constraint.
:param list | tuple values: values to be used in check constraint.
:keyword bool use_in: specifies that it must generate an `in` check.
otherwise it generates a `not in` check.
defaults to True if not provided.
:keyword **options: all other keyword arguments will be passed to
underlying `CheckConstraint` constructor.
:raises CheckConstraintValuesRequiredError: check constraint values required error.
:rtype: CheckConstraint
"""
if values is None or len(values) <= 0:
raise CheckConstraintValuesRequiredError('Values for generating a check '
'constraint must be provided.')
converter = str
is_string = isinstance(values[0], str)
if is_string:
converter = string_utils.quote
use_in = options.pop('use_in', True)
condition = 'in'
if use_in is False:
condition = 'not in'
string_values = ', '.join(converter(item) for item in values)
sql_text = '{column} {condition} ({values})'.format(column=quoted_name(column, True),
condition=condition,
values=string_values)
options.update(sqltext=sql_text)
return CheckConstraint(**options)
def range_check_constraint(column, min_value=None, max_value=None, **options):
"""
generates a range check constraint for given column and values.
if the values are string, they will be quoted.
if only one value is provided, a max or min constraint will be generated.
:param str column: column name to be used in check constraint.
:param object min_value: min value to be used in check constraint.
:param object max_value: max value to be used in check constraint.
:keyword **options: all other keyword arguments will be passed to
underlying `CheckConstraint` constructor.
:raises CheckConstraintValuesRequiredError: check constraint values required error.
:rtype: CheckConstraint
"""
if min_value is None and max_value is None:
raise CheckConstraintValuesRequiredError('Values for generating a range check '
'constraint must be provided.')
sql_text = None
if min_value is not None and max_value is not None:
sql_text = '{column} >= {min} and {column} <= {max}'
elif min_value is not None:
sql_text = '{column} >= {min}'
else:
sql_text = '{column} <= {max}'
if isinstance(min_value, str):
min_value = string_utils.quote(min_value)
if isinstance(max_value, str):
max_value = string_utils.quote(max_value)
sql_text = sql_text.format(column=quoted_name(column, True),
min=min_value, max=max_value)
options.update(sqltext=sql_text)
return CheckConstraint(**options)
def get_class_by_table(base, table, **options):
"""
gets declarative class associated with given table.
if no class is found this function returns `None`.
if multiple classes were found (polymorphic cases) additional `data` parameter
can be given to hint which class to return.
:param type[BaseEntity] base: declarative base model.
:param Table table: sqlalchemy table object.
:keyword dict data: data row to determine the class in polymorphic scenarios.
:keyword bool raise_multi: specifies that if multiple classes found and
also provided data could not help, raise an error.
otherwise return None.
defaults to True if not provided.
:note: this code is taken from sqlalchemy-utils project.
https://github.com/kvesteri/sqlalchemy-utils
for example:
class User(CoreEntity):
_table = 'entity'
id = AutoPKColumn()
name = StringColumn()
get_class_by_table(CoreEntity, User.__table__) -> User class
this function also supports models using single table inheritance.
additional data parameter should be provided in these cases.
for example:
class Entity(CoreEntity):
_table = 'entity'
id = AutoPKColumn()
name = StringColumn()
type = StringColumn()
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'entity'
}
class User(Entity):
__mapper_args__ = {
'polymorphic_identity': 'user'
}
get_class_by_table(CoreEntity, Entity.__table__, {'type': 'entity'}) -> Entity class
get_class_by_table(CoreEntity, Entity.__table__, {'type': 'user'}) -> User class
it also supports extended entities with unlimited depth, it returns the correct child
entity.
for example:
class EntityBase(CoreEntity):
_table = 'entity'
id = AutoPKColumn()
class Entity2(EntityBase):
_extend_existing = True
name = StringColumn()
class Entity3(Entity2):
_extend_existing = True
age = CoreColumn(Integer)
get_class_by_table(CoreEntity, EntityBase.__table__) -> Entity3 class
:raises MultipleDeclarativeClassesFoundError: multiple declarative classes found error.
:returns: declarative class or None.
:rtype: type[pyrin.database.model.base.BaseEntity]
"""
data = options.get('data')
raise_multi = options.get('raise_multi', True)
found_classes = []
for item in base.registry.mappers:
if len(item.tables) > 0 and item.tables[0] is table:
found_classes.append(item.entity)
current_count = len(found_classes)
temp = list(found_classes)
if current_count > 1:
for item in found_classes:
if item.extend_existing is not True:
temp.remove(item)
if current_count > len(temp) > 0:
found_classes = temp
if len(found_classes) > 1:
hierarchies = []
for item in found_classes:
hierarchies.append(set(item.__mro__))
hierarchies = sorted(hierarchies, key=len, reverse=True)
last = hierarchies[0]
others = hierarchies[1:]
union = set().union(*others)
result = union.symmetric_difference(last)
if len(result) == 1:
return result.pop()
elif len(found_classes) == 1:
return found_classes[0]
found_classes = set(found_classes)
if len(found_classes) > 1:
if not data:
if raise_multi is True:
raise MultipleDeclarativeClassesFoundError('Multiple declarative classes found '
'for table [{table}]. please provide '
'"data" parameter for this function '
'to be able to determine polymorphic '
'scenarios.'.format(table=table.name))
else:
for cls in found_classes:
mapper = inspect(cls)
polymorphic_on = mapper.polymorphic_on.name
if polymorphic_on in data:
if data[polymorphic_on] == mapper.polymorphic_identity:
return cls
if raise_multi is True:
raise MultipleDeclarativeClassesFoundError('Multiple declarative classes found '
'for table [{table}]. given data row '
'does not match any polymorphic '
'identity of the found classes.'
.format(table=table.name))
elif found_classes:
return found_classes.pop()
return None
def is_valid_column_name(column):
"""
gets a value indicating that given column name is valid.
:param str column: column name.
:rtype: bool
"""
if not isinstance(column, str) or len(column) <= 0:
return False
column = column.replace('+', '').replace('-', '').strip()
return len(column) > 0 and ' ' not in column
def get_column_name(column):
"""
gets the pure column name from given column name with ordering info.
for example:
+age -> age
age -> age
-age -> age
:param str column: column name to extract pure name from it.
:rtype: str
"""
if column.startswith(('-', '+')):
return column[1:]
return column
def get_ordering_info(column):
"""
gets a tuple containing ordering info for given column name.
it returns a tuple of two item, first item is column name and
second item is ordering type which is an `UnaryExpression` from `asc` or `desc`.
default ordering is ascending, but it could be changed to descending
by prefixing `-` to column name.
for example:
age -> ordering for age column ascending.
-age -> ordering for age column descending.
:param str column: column name to get its ordering info.
:rtype: tuple[str, UnaryExpression]
"""
order_type = asc
if column.startswith(('-', '+')):
if column[0] == '-':
order_type = desc
column = column[1:]
return column, order_type
def get_ordering_criterion(*columns, valid_columns=None, ignore_invalid=True):
"""
gets required criterion for given columns ordering.
default ordering is ascending, but it could be changed to descending
by prefixing `-` to column names.
this method always ignores empty strings and None values.
for example:
name, +age -> ordering for name and age columns both ascending.
name, -age -> ordering for name ascending and age descending.
:param str columns: column names to get their ordering criterion.
:param list[str] valid_columns: valid column names for ordering.
defaults to None if not provided.
:param bool ignore_invalid: specifies that if provided columns are
not in valid column names, ignore them
instead of raising an error. note that
this only has effect if `valid_columns`
is provided. defaults to True.
:raises InvalidOrderingColumnError: invalid ordering column error.
:rtype: tuple[UnaryExpression]
"""
result = []
error_message = _('Column [{name}] is not valid for ordering.')
for item in columns:
if is_valid_column_name(item):
name, order_type = get_ordering_info(item)
if valid_columns is None or name in valid_columns:
result.append(order_type(quoted_name(name, True)))
elif ignore_invalid is False:
raise InvalidOrderingColumnError(error_message.format(name=name))
elif ignore_invalid is False:
raise InvalidOrderingColumnError(error_message.format(name=item))
return tuple(result)
def is_expression_level_hybrid_property(value):
"""
gets a value indicating that provided object is an expression level hybrid property.
the provided value may be a proxy holding a descriptor to a hybrid property.
:param object value: value to be checked.
:rtype: bool
"""
if isinstance(value, hybrid_property):
return value.expr is not None
descriptor = getattr(value, 'descriptor', None)
if descriptor is None:
return False
if not isinstance(descriptor, hybrid_property):
return False
return descriptor.expr is not None
| mononobi/pyrin | src/pyrin/utils/sqlalchemy.py | sqlalchemy.py | py | 25,102 | python | en | code | 12 | github-code | 50 |
9216030065 | import torch
class Main(torch.nn.Module):
def forward(self, x):
# The input x is a series of random numbers of size k x 2
# You should use these random numbers to compute and return pi using pytorch
# dist1 = torch.sqrt(x[:,0]**2 + x[:,1]**2)
# dist2 = torch.sqrt((x[:,0]-1)**2+x[:,1]**2)
# dist3 = torch.sqrt((x[:,0]-1)**2+(x[:,1]-1)**2)
# dist4 = torch.sqrt(x[:,0]**2+(x[:,1]-1)**2)
# inarea1 = torch.trunc(dist1)
# inarea2 = torch.trunc(dist2)
# inarea3 = torch.trunc(dist3)
# inarea4 = torch.trunc(dist4)
# total = torch.mean(1-inarea1)+torch.mean(1-inarea2)+torch.mean(1-inarea3)+torch.mean(1-inarea4)
# x = I
list_pi = []
for i in range(20):
# Estimate pi using both x and 1-x
pi = (torch.mean(torch.sqrt(1-x*x)) + torch.mean(torch.sqrt(2-x*x)))/2
list_pi.append(pi)
# Remove the most significant bit (and use the lower bits)
x = x * 2
x -= torch.floor(x)
# Pi the the average of all bit-shifted versions in list_pi
pi = torch.mean(list_pi)
return total
| xftnr/Neural-Networks | homework_01/homework/main.py | main.py | py | 1,020 | python | en | code | 0 | github-code | 50 |
16973540723 | from codecs import open
from setuptools import setup
long_description = open('README.rst', 'r', encoding='utf-8').read()
setup(
name='cloudml-hypertune',
version='0.1.0',
description='A library to report Google CloudML Engine HyperTune metrics.',
long_description=long_description,
author='Google CloudML Engine',
author_email='cloudml-feedback@google.com',
license='Apache Software License',
url='http://github.com/GoogleCloudPlatform/cloudml-hypertune',
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
'Topic :: Scientific/Engineering',
'Topic :: System :: Distributed Computing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
],
keywords='ml hyperparameter tuning',
packages=['hypertune'],
)
| GoogleCloudPlatform/cloudml-hypertune | setup.py | setup.py | py | 1,244 | python | en | code | 32 | github-code | 50 |
22055760488 | # Read file
# Build up graph with dict
# Start path finding
# find_paths(start, end, current_path=[start])
# Iterate over all neighbouring caves: find_paths(neighbourX, end, current_path[start, neighbourX])
# ... and concatenate their resulting paths
# When adding a new cave, check if path is valid
# If end is reached, return path
def main():
graph = read_file('input.txt')
paths = find_paths('start', 'end', ['start'], graph)
print(f'Path count = {len(paths)}')
def read_file(input_file):
input = open(input_file, 'r')
lines = input.readlines()
graph = {}
for line in lines:
start, end = line.strip().split('-')
if start in graph.keys():
graph[start].append(end)
else:
graph[start] = [end]
if end in graph.keys():
graph[end].append(start)
else:
graph[end] = [start]
return graph
def find_paths(start, end, current_path, graph):
if start == end:
return [current_path]
paths = []
for neighbour in graph[start]:
if is_valid_path(current_path + [neighbour]):
paths += find_paths(neighbour, end, current_path + [neighbour], graph)
return paths
def is_valid_path(path):
visit_counts = get_visit_counts(path)
for cave in visit_counts.keys():
if visit_counts[cave] > 1 and cave == cave.lower():
return False
return True
def get_visit_counts(path):
visit_counts = {}
for cave in path:
if cave in visit_counts.keys():
visit_counts[cave] += 1
else:
visit_counts[cave] = 1
return visit_counts
if __name__ == "__main__":
main()
| danielmast/advent-of-code-2021 | day12/day12_1.py | day12_1.py | py | 1,688 | python | en | code | 0 | github-code | 50 |
8025266594 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 21 10:04:17 2022
@author: psen
"""
#Intialize dict and list
grocery = {}
item = []
#Get input in item and sort
while True:
try:
item.append(input("Item: ").upper())
except EOFError:
print()
break
item.sort()
#Add item in dictionary with value as 1
for i in range(len(item)):
grocery[item[i]] = 1
#check duplicates and increment values with one
last =''
for key in grocery:
if grocery[key] == last:
new = {key:str(int(grocery[key])+1)}
grocery.update(new)
last = grocery[key]
#Print grocery dictionary values and keys
for key in grocery:
print(grocery[key],key)
| Lotus010/cs50python | Week 3 Exceptions/grocery.py | grocery.py | py | 731 | python | en | code | 0 | github-code | 50 |
71819228636 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
# ex: /polls/5/
url(r'^(?P<quali>[0-9]+)/$', views.detail, name='detail'),
# ex: /polls/5/results/
url(r'^(?P<quali>[0-9]+)/results/$', views.results, name='results'),
# ex: /polls/5/vote/
url(r'^(?P<quali>[0-9]+)/vote/$', views.vote, name='vote'),
]
| jmboettger/HioDB | quali/urls.py | urls.py | py | 389 | python | en | code | 1 | github-code | 50 |
25587999493 | '''
Objektorientiert Programmierung
Ein bewegtes Objekt
Version 1.00, 27.02.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
from tkinter import *
from kreis_klasse import KreisV1 as Kreis
from quadrat_klasse import QuadratV1 as Quadrat
from bewegt_klasse import BewegtesObjektV1 as BewegtesObjekt
import time
# Bildschirm erzeugen
bildschirm = Tk()
bildschirm.title('Spielerei mit 2D - Objekten')
# Spielfeld erzeugen
spielfeld = Canvas(bildschirm, width=1000, height=800, bg="yellow")
spielfeld.pack()
# Einen Kreis und ein Quadrat zeichnen
kreis = Kreis(spielfeld, x=110, y=110, radius=100)
quadrat = Quadrat(spielfeld, x=110, y=110, laenge=80, farbe="red")
# Ein bewegtes Objekt erstellen und für 5 Sekunden anzeigen
bewegt = BewegtesObjekt(spielfeld, x=110, y=110)
time.sleep(5)
# Die Figur 500 Schritte in 3-er Schritten bewegen
bewegt.bewegung(x=3, y=3)
for i in range(500):
bewegt.bewege()
bildschirm.mainloop()
| hobbyelektroniker/Micropython-Grundlagen | 013_Klassen in Micropython/Code/Objekte3.py | Objekte3.py | py | 1,080 | python | de | code | 0 | github-code | 50 |
29269191782 | import open3d as o3d
import copy
import numpy as np
# 2
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp],
zoom=0.4459,
front=[0.9288, -0.2951, -0.2242],
lookat=[1.6784, 2.0612, 1.4451],
up=[-0.3402, -0.9189, -0.1996])
# 3
source = o3d.io.read_point_cloud("data\STN6xyzi.txt.pcd")
target = o3d.io.read_point_cloud("data\STN7xyzi.txt.pcd")
threshold = 0.02
trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
[-0.139, 0.967, -0.215, 0.7],
[0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
# trans_init = np.asarray([[1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 1, 0], [0.0, 0.0, 0.0, 1.0]])
draw_registration_result(source, target, trans_init)
# 4
print("Initial alignment")
evaluation = o3d.pipelines.registration.evaluate_registration(
source, target, threshold, trans_init)
print(evaluation)
# 5
print("Apply point-to-point ICP")
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# 6
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# 7
# print("Apply point-to-plane ICP")
# reg_p2l = o3d.pipelines.registration.registration_icp(
# source, target, threshold, trans_init,
# o3d.pipelines.registration.TransformationEstimationPointToPlane())
# print(reg_p2l)
# print("Transformation is:")
# print(reg_p2l.transformation)
# draw_registration_result(source, target, reg_p2l.transformation)
# 7-1
# print("Robust point-to-plane ICP, threshold={}:".format(threshold))
# loss = o3d.pipelines.registration.TukeyLoss(k=sigma)
# print("Using robust loss:", loss)
# p2l = o3d.pipelines.registration.TransformationEstimationPointToPlane(loss)
# reg_p2l = o3d.pipelines.registration.registration_icp(source_noisy, target,
# threshold, trans_init,
# p2l)
# print(reg_p2l)
# print("Transformation is:")
# print(reg_p2l.transformation)
# draw_registration_result(source, target, reg_p2l.transformation)
# 8
def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):
distance_threshold = voxel_size * 0.4
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.pipelines.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
return result
result_icp = refine_registration(source, target, source_fpfh, target_fpfh,
voxel_size)
print(result_icp)
draw_registration_result(source, target, result_icp.transformation) | kev1nCh1u/Image_Processing_Project2 | src/o3d_icp.py | o3d_icp.py | py | 3,829 | python | en | code | 0 | github-code | 50 |
72335923354 | import os
import os.path as osp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
def plot_features(features, labels, num_classes, epoch, save_dir, prefix):
"""Plot features on 2D plane.
Args:
features: (num_instances, num_features).
labels: (num_instances).
"""
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for label_idx in range(10):
plt.scatter(
features[labels==label_idx, 0],
features[labels==label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='upper right')
dirname = osp.join(save_dir, prefix)
if not osp.exists(dirname):
os.mkdir(dirname)
save_name = osp.join(dirname, 'epoch_' + str(epoch+1) + '.png')
plt.savefig(save_name, bbox_inches='tight')
plt.close()
| chenshen03/MarginHash-pytorch | utils/visualize.py | visualize.py | py | 914 | python | en | code | 6 | github-code | 50 |
16858849958 | import pandas as pd
from sklearn.preprocessing import MinMaxScaler,StandardScaler
"""
特征工程之特征预处理:
通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程
为什么我们要进行归一化/标准化?
特征的单位或者大小相差较大,或者某特征的方差相比其他的特征要大出几个数量级,容易影响(支配)目标结果,使得一些算法无法学习到其它的特征
我们需要用到一些方法进行无量纲化,使不同规格的数据转换到同一规格
包含内容(数值型数据的无量纲化)
归一化
标准化
1,归一化
通过对原始数据进行变换把数据映射到(默认为[0,1])之间
公式:
先求出x1 = (x-min)/(max-min)
再求出x2 = x1*(xm-归一化区间的最小值)+mi
解释:作用于每一列,max为一列的最大值,min为一列的最小值,那么x2为最终结果,mx,mi分别为指定区间值默认mx为1,mi为0
归一化api:
①sklearn.preprocessing.MinMaxScaler (feature_range=(0,1)… )
feature_range指定归一化后的最大最小值
②MinMaxScalar.fit_transform(X)
X:numpy array格式的数据[n_samples,n_features]
返回值:转换后的形状相同的array
归一化总结:
注意最大值最小值是变化的,另外,最大值与最小值非常容易受异常点影响,所以这种方法鲁棒性较差,只适合传统精确小数据场景。解决这个问题可以使用标准化
2,标准化
通过对原始数据进行变换把数据变换到均值为0,标准差为1范围内
公式:
x1=(x-mean)/σ
解释:作用于每一列,mean为平均值,σ为标准差
标准化和归一化在异常值处理的区别
对于归一化来说:如果出现异常点,影响了最大值和最小值,那么结果显然会发生改变
对于标准化来说:如果出现异常点,由于具有一定数据量,少量的异常点对于平均值的影响并不大,从而方差改变较小。
标准化api
1 sklearn.preprocessing.StandardScaler( ) 实例化对象
处理之后每列来说所有数据都聚集在均值0附近标准差差为
2 StandardScaler.fit_transform(X) 进行转换
X:numpy array格式的数据[n_samples,n_features]
返回值:转换后的形状相同的array
"""
def minmax_demo():
"""
归一化演示
:return:None
"""
data = pd.read_csv("./data/dating.txt")
print(data)
# 1 实例化 确认归一化后的最大最小值
transform = MinMaxScaler(feature_range=(3, 5))
# 2 进行转换,调用fit_transform
ret_data = transform.fit_transform(data[["milage", "Liters", "Consumtime"]])
print("归一化后的数据:\n", ret_data)
def stand_demo():
"""
标准化演示
:return: None
"""
data = pd.read_csv("./data/dating.txt")
print(data)
# 1 实例化
transfer = StandardScaler()
# 2 进行转换,调用fit_transform
ret_data = transfer.fit_transform(data[["milage", "Liters", "Consumtime"]])
print("标准化之后的数据为:\n", ret_data)
print("每一列的方差为:\n", transfer.var_)
print("每一列的平均值为:\n", transfer.mean_)
def main():
minmax_demo()
stand_demo()
if __name__ == '__main__':
main()
| Mr-Owl/machine_learning | 05-k近邻算法/03-preprocessing预处理.py | 03-preprocessing预处理.py | py | 3,236 | python | zh | code | 2 | github-code | 50 |
34967066466 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import logging
from urllib.parse import urljoin
from .consts import *
from .htmltools import taglist_to_dict, table_to_dict
from .urltools import get_cached_url, get_cached_post, get_from_file
from .patterns import PATTERNS
#logging.getLogger().addHandler(logging.StreamHandler())
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
def extract_data_xpath(url, filename=None, xpath=None, fieldnames=None, absolutize=False, post=None, pagekey=None, pagerange=None):
"""Extract data with xpath
:param url:
HTML webpage url
:type url: str|unicode
:param xpath:
xpath expression
:type xpath: str|unicode
:param fieldnames:
string with list of fields like "src,alt,href,_text"
:type fieldnames: str|unicode
:param absolutize:
Absolutize all urls returned as href and other url-like fields
:type absoultize: bool
:param post:
If True use POST for HTTP requests
:type post: bool
:param pagekey:
Key of the page listing. GET or POST parameter
:type pagekey: str|unicode
:param pagerange:
Range of pages to process. String with format 'min,max,step', example: '1,72,1'
:type pagerange: str|unicode
:return: Returns array of extracted values
:rtype: :class:`array`."""
fields = fieldnames.split(',') if fieldnames else DEFAULT_FIELDS
data = []
if pagekey is None:
if url is not None:
if post:
root = get_cached_post(url)
else:
root = get_cached_url(url)
else:
root = get_from_file(filename )
tree = root.getroottree()
tags = tree.xpath(xpath)
data = taglist_to_dict(tags, fields)
else:
start, end, step, pagesize = map(int, pagerange.split(','))
# for i in range(start, end, step):
current = start
while True:
anurl = url + '?%s=%d' % (pagekey,current)
logging.info('Processing url %s' % (anurl))
if post:
root = get_cached_post(anurl, {pagekey : str(current)})
else:
root = get_cached_url(anurl)
tree = root.getroottree()
tags = tree.xpath(xpath)
# print(tags)
items = taglist_to_dict(tags, fields)
data.extend(items)
current += 1
if pagesize != -1 and len(items) < pagesize:
logging.info('Breaking loop. %d vs %d' % (len(items), pagesize))
break
if end != -1 and current == end+1:
logging.info('Breaking loop. %d vs %d' % (len(items), pagesize))
break
has_urltagtype = False
for tagtype in URL_TAG_TYPES:
if tagtype in fields:
has_urltagtype = True
if absolutize and has_urltagtype:
for i in range(0, len(data)):
for tagtype in URL_TAG_TYPES:
if tagtype not in data[i].keys(): continue
if data[i][tagtype][:6] not in ['http:/', 'https:'] and len(data[i][tagtype]) > 0:
data[i][tagtype] = urljoin(url, data[i][tagtype])
return data
def use_pattern(url, pattern, nodeid=None, nodeclass=None, fieldnames=None, absolutize=False, pagekey=False, pagerange=False):
"""Uses predefined pattern to extract page data
:param url:
HTML webpage url
:type url: str|unicode
:param nodeid:
id key for nodes
:type nodeid: str|unicode
:param nodeclass:
class key for nodes
:type nodeclass: str|unicode
:param fieldnames:
string with list of fields like "src,alt,href,_text"
:type fieldnames: str|unicode
:param absolutize:
Absolutize all urls returned as href and other url-like fields
:type absoultize: bool
:param pagekey:
Key of the page listing. GET or POST parameter
:type pagekey: str|unicode
:param pagerange:
Range of pages to process. String with format 'min,max,step', example: '1,72,1'
:type pagerange: str|unicode
:return: Returns array of extracted values
:rtype: :class:`array`."""
findata = []
pat = PATTERNS[pattern]
fields = fieldnames.split(',') if fieldnames else pat['deffields']
if pagekey is False:
root = get_cached_url(url)
tree = root.getroottree()
findata = PATTERNS[pattern]['func'](tree, nodeclass, nodeid, fields)
else:
start, end, step = map(int, pagerange.split(','))
for i in range(start, end, step):
anurl = url + '?%s=%d' % (pagekey,i)
# print('Processing url %s' % (anurl))
root = get_cached_url(url)
tree = root.getroottree()
findata.extend(PATTERNS[pattern]['func'](tree, nodeclass, nodeid, fields))
has_urltagtype = False
if fields is not None:
for tagtype in URL_TAG_TYPES:
if tagtype in fields:
has_urltagtype = True
if absolutize and has_urltagtype:
for i in range(0, len(findata)):
for tagtype in URL_TAG_TYPES:
if tagtype not in findata[i].keys(): continue
if findata[i][tagtype][:6] not in ['http:/', 'https:'] and len(findata[i][tagtype]) > 0:
findata[i][tagtype] = urljoin(url, findata[i][tagtype])
return findata
def get_table(url, nodeid=None, nodeclass=None, pagekey=False, pagerange=False, agent=None):
"""Extracts table with data from html
:param url:
HTML webpage url
:type url: str|unicode
:param nodeid:
id key for nodes
:type nodeid: str|unicode
:param nodeclass:
class key for nodes
:type nodeclass: str|unicode
:param pagekey:
Key of the page listing. GET or POST parameter
:type pagekey: str|unicode
:param pagerange:
Range of pages to process. String with format 'min,max,step', example: '1,72,1'
:type pagerange: str|unicode
:return: Returns array of extracted values
:rtype: :class:`array`."""
if pagekey is None:
root = get_cached_url(url, agent=agent)
tree = root.getroottree()
if nodeclass:
xfilter = "//table[@class='%s']" % (nodeclass)
elif nodeid:
xfilter = "//table[@id='%s']" % (nodeid)
else:
xfilter = '//table'
tags = tree.xpath(xfilter)
if len(tags) > 0:
findata = table_to_dict(tags[0], strip_lf=True)
else:
findata = []
else:
findata = []
start, end, step, pagesize = map(int, pagerange.split(','))
# for i in range(start, end, step):
current = start
while True:
anurl = url + '?%s=%d' % (pagekey,current)
logging.info('Crawling url %s' % (anurl))
root = get_cached_url(anurl)
logging.info('Got url %s' % (anurl))
tree = root.getroottree()
if nodeclass:
xfilter = "//table[@class='%s']" % (nodeclass)
elif nodeid:
xfilter = "//table[@id='%s']" % (nodeid)
else:
xfilter = '//table'
tags = tree.xpath(xfilter)
if len(tags) > 0:
items = table_to_dict(tags[0], strip_lf=True)
findata.extend(items)
else:
items = []
current += step
if pagesize != -1 and len(items) < pagesize:
logging.info('Breaking loop. %d vs %d' % (len(items), pagesize))
break
if end != -1 and current == end+1:
logging.info('Breaking loop. %d vs %d' % (len(items), pagesize))
break
return findata
| ivbeg/lazyscraper | lazyscraper/scraper.py | scraper.py | py | 7,862 | python | en | code | 17 | github-code | 50 |
22136621924 | # -*- coding: utf-8 -*-
import json
import uvicorn
from starlette.applications import Starlette
from starlette.responses import JSONResponse, RedirectResponse
from starlette.routing import Route, Mount, WebSocketRoute
# from motor.motor_asyncio import AsyncIOMotorClient
# from starlette.responses import Response
# from jinja2 import Environment, PackageLoader, select_autoescape
# from starlette.websockets import WebSocket
# from starlette.endpoints import HTTPEndpoint, WebSocketEndpoint
from starlette.templating import Jinja2Templates
from starlette.staticfiles import StaticFiles
import database
import uuid
import imghdr
import hashlib
templates = Jinja2Templates(directory='templates')
async def init_db():
database.show_postgre()
return JSONResponse({'message': 'ok'})
async def homepage(request):
token = request.cookies.get('token')
user = None
if token is not None:
user = request.app.state.logged_users.get(token)
response = templates.TemplateResponse('index.html', {
'request': request,
'cookie': request.cookies.get('mycookie'),
'user': user
})
# response.set_cookie(key='mycookie', value='elsewhere', path="/")
return response
# переписать на postgresql
async def websocket_endpoint(websocket):
await websocket.accept()
hello = database.show_partialy()
send_to = json.dumps(hello, ensure_ascii=False)
# count = await database.do_count_docs(websocket.app.state.db)
await websocket.send_text(send_to)
while True:
try:
hell = await websocket.receive_text()
print('hello there', hell)
except Exception:
print('here')
break
async def sign_up(request):
login = await request.form()
new_user = database.sign_up(login)
return JSONResponse({'message': new_user})
async def log_out(request):
token = request.cookies.get('token')
logout = request.app.state.logged_users.pop(token, None)
print(logout)
return RedirectResponse(url='/', status_code=303)
async def login_route(request):
login = await request.form()
try:
user = database.sign_in(login)
except database.SignInError as exc:
return JSONResponse({'message': str(exc)})
token = str(uuid.uuid4())
request.app.state.logged_users[token] = user
print(user, 'here user data')
print(request.app.state.logged_users)
response = RedirectResponse(url=f'/users/{user.id}', status_code=303)
response.set_cookie(key='token', value=token, path="/", max_age=60*60*24)
return response
async def logged_user(request):
token = request.cookies.get('token')
logged_user = None
if token is not None:
logged_user = request.app.state.logged_users.get(token)
user_id = request.path_params['user_id']
wuser = database.get_user_by_id(user_id)
response = templates.TemplateResponse('user.html', {
'request': request,
'cookie': request.cookies.get('mycookie'),
'user': logged_user,
'wuser': wuser,
'self_viewing': False if logged_user is None
else logged_user.id == user_id
})
# response.set_cookie(key='mycookie', value='elsewhere', path="/")
return response
async def vk_pstgre(_request):
database.take_to_pstgr()
return JSONResponse({'message': 'ok'})
async def show_db(_request):
flist = database.show_postgre()
return JSONResponse({'friends': flist})
async def remove_flist(_request):
database.remove_score()
flist = database.show_postgre()
return JSONResponse({'friends': flist})
async def find_by_city(request):
city_title = request.path_params['city']
flist = database.get_by_city(city_title)
return JSONResponse({'friends': flist})
async def upload_file(request):
token = request.cookies.get('token')
logged_user = None
if token is not None:
logged_user = request.app.state.logged_users.get(token)
if logged_user is None:
return JSONResponse(
{'message': 'GO AWAY'}, status_code=403)
upload = await request.form()
contents = await upload["avatar"].read()
detected_filetype = imghdr.what(None, h=contents)
allowed_types = ['jpeg', 'png']
if detected_filetype not in allowed_types:
return JSONResponse({'message': 'bad filetype'})
hash_object = hashlib.sha1(contents)
hash_str = hash_object.hexdigest()
imgdir = "uploads/avatars"
filename = f"{hash_str}.{detected_filetype}"
with open(f"{imgdir}/{filename}", "wb") as binary_file:
binary_file.write(contents)
database.add_avatar(logged_user.id, filename)
return JSONResponse({'message': detected_filetype})
routes = [
Route('/', endpoint=homepage),
Route('/users/{user_id:int}', endpoint=logged_user),
Route('/login_route', endpoint=login_route, methods=['POST']),
Route('/logout', endpoint=log_out, methods=['POST']),
Route('/sign_up', endpoint=sign_up, methods=['POST']),
WebSocketRoute('/ws', websocket_endpoint),
Route('/vkpstgr', endpoint=vk_pstgre),
Route('/psqlsh', endpoint=show_db),
Route('/rmflst', endpoint=remove_flist),
Route('/filter/{city}', endpoint=find_by_city),
Route('/upload_file', endpoint=upload_file, methods=['POST']),
Mount('/static', StaticFiles(directory='static'), name='static'),
Mount('/uploads', StaticFiles(directory='uploads'), name='uploads')
]
# , on_startup=[init_db]
app = Starlette(debug=True, routes=routes,)
app.state.logged_users = {}
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=8000, loop='uvloop')
| Surrealistic-Creature/elsewhere | main.py | main.py | py | 5,632 | python | en | code | 0 | github-code | 50 |
2222177898 | import socket
import math
def get_hostname():
return socket.gethostname()
def get_local_ip():
try:
hostname = socket.gethostname()
local_ip = socket.getaddrinfo(hostname, None, socket.AF_INET)[0][4][0]
return local_ip
except socket.error as e:
print("An error occurred:", e)
return None
import psutil
def get_disk_space_GB():
disk_info = psutil.disk_usage('/')
total_space = disk_info.total / (1024 ** 3) # Convert bytes to GB
used_space = disk_info.used / (1024 ** 3) # Convert bytes to GB
free_space = disk_info.free / (1024 ** 3) # Convert bytes to GB
return total_space, used_space, free_space
def get_disk_info_string():
total, used, free = get_disk_space_GB()
total_s = "{:.1f}GB".format(total)
used_s = "{:.1f}GB".format(used)
free_s = "{:.1f}GB".format(free)
percent_s = "{:.1f}%".format(math.floor(free/total*100))
return f'{total_s}, {used_s}, {free_s}[{percent_s}]' | jinkoo2/buy-or-sell | utils.py | utils.py | py | 999 | python | en | code | 0 | github-code | 50 |
18173730026 | #! /usr/bin/python3
from evaluator import load_gold_NER, load_gold_NER_ext
import sys
from os import listdir, system
import re
from math import sqrt
from xml.dom.minidom import parse
from nltk.tokenize import word_tokenize
import evaluator
# dictionary containig information from external knowledge resources
# WARNING: You may need to adjust the path to the resource files
external = {}
with open("resources/HSDB.txt") as h:
for x in h.readlines():
external[x.strip().lower()] = "drug"
with open("resources/DrugBank.txt") as h:
for x in h.readlines():
(n, t) = x.strip().lower().split("|")
external[n] = t
# --------- tokenize sentence -----------
# -- Tokenize sentence, returning tokens and span offsets
def tokenize(txt):
offset = 0
tks = []
for t in word_tokenize(txt):
offset = txt.find(t, offset)
tks.append((t, offset, offset+len(t)-1))
offset += len(t)
return tks
# -----------------------------------------------
# -- check if a token is a drug part, and of which type
suffix_drug = ['azole', 'amine', 'farin', 'idine', 'mycin',
'ytoin', 'goxin', 'navir', 'etine', 'lline']
suffix_brand = ['pirin', 'DOCIN', 'TAXOL', 'GASYS', 'VIOXX',
'xitil', 'EVIVE', 'TROTM', 'IMBEX', 'NIVIL']
suffix_group = ['gents', 'itors', 'sants', 'etics', 'otics',
'drugs', 'tives', 'lants', 'mines', 'roids']
suffix_drug_n = ['idine', 'PCP', 'gaine', '18-MC', '-NANM',
'toxin', 'MHD', 'xin A', 'tatin', 'mPGE2']
def classify_token(txt):
if txt.lower() in external:
return external[txt.lower()]
elif txt.count('-') >= 3:
return 'drug_n'
elif sum(i.isupper() or i in set(['-', '_', ',']) for i in txt) >= 3:
return "drug_n"
else:
return "NONE"
# --------- Entity extractor -----------
# -- Extract drug entities from given text and return them as
# -- a list of dictionaries with keys "offset", "text", and "type"
def extract_entities(stext):
# WARNING: This function must be extended to
# deal with multi-token entities.
# tokenize text
tokens = tokenize(stext)
result = []
# classify each token and decide whether it is an entity.
for (token_txt, token_start, token_end) in tokens:
drug_type = classify_token(token_txt)
if drug_type != "NONE":
e = {"offset": str(token_start)+"-"+str(token_end),
"text": stext[token_start:token_end+1],
"type": drug_type
}
result.append(e)
return result
# --------- main function -----------
def nerc(datadir, outfile):
# open file to write results
outf = open(outfile, 'w')
# process each file in input directory
for f in listdir(datadir):
# parse XML file, obtaining a DOM tree
tree = parse(datadir + "/" + f)
# process each sentence in the file
sentences = tree.getElementsByTagName("sentence")
for s in sentences:
sid = s.attributes["id"].value # get sentence id
stext = s.attributes["text"].value # get sentence text
# extract entities in text
entities = extract_entities(stext)
# print sentence entities in format requested for evaluation
for e in entities:
print(sid,
e["offset"],
e["text"],
e["type"],
sep="|",
file=outf)
outf.close()
# --------- MAIN PROGRAM -----------
# --
# -- Usage: baseline-NER.py target-dir
# --
# -- Extracts Drug NE from all XML files in target-dir
# --
# directory with files to process
# datadir = 'data/devel' # golddir
# outfile = 'out.txt'
datadir = sys.argv[1]
outfile = sys.argv[2]
nerc(datadir, outfile)
evaluator.evaluate("NER", datadir, outfile)
entities_clean = {}
entities, entities_clean, entities_suffix, entities_info = load_gold_NER_ext(
'data/train')
# e2 = list(entities['CLASS'])
# with open('parsed.txt','w') as file:
# for entity in e2:
# file.write(f"{entity}\n")
print('\n' * 4 + '_' * 60)
for key in entities_clean:
mu = 0
pct = 0
for elem in entities_clean[key]:
mu += len(elem) / len(entities_clean[key])
if elem[0].isupper():
pct += 1 / len(entities_clean[key])
std = 0
for elem in entities_clean[key]:
std += 1 / (len(entities_clean[key]) - 1) * (len(elem) - mu)**2
std = sqrt(std)
print(key)
print(f"entities info (numbers): {entities_info[key]['numbers']}")
print(f"entities info (capital): {entities_info[key]['capital']}")
print(f"entities info (combo): {entities_info[key]['combo']}")
print(
f"entities info (start): {entities_info[key]['start'].most_common(5)}")
print(
f"entities info (letters): {entities_info[key]['letters'].most_common(5)}")
print(f"entities info (dashes): {entities_info[key]['dashes']}")
print(f"Size of the dataset: {sum(entities_suffix[key].values())}\n")
print(f'Unique words: {len(entities_clean[key])}')
print(f'Mean: {round(mu, 2)}')
print(f'Std: {round(std, 2)}')
print(f'({mu - std}, {mu + std})')
print(f'Percentage of uppercase {round(100*pct, 2)}')
print(entities_suffix[key].most_common(10))
print('_' * 60)
# 5, 6 --> brand
# 7 undecisive
# 8, 9 --> drug
# drug_n
# Size of the dataset: 23
# Mean: 16.43
# Std: 9.37
# (7.069342819204033, 25.80022239818728)
# ____________________
# drug
# Size of the dataset: 540
# Mean: 11.02
# Std: 3.29
# (7.732574197148425, 14.304462839888613)
# ____________________
# brand
# Size of the dataset: 123
# Mean: 7.98
# Std: 2.75
# (5.228763000039568, 10.738716674757196)
# ____________________
# group
# Size of the dataset: 350
# Mean: 19.04
# Std: 9.28
# (9.756649536027936, 28.317636178257743)
# ____________________
# chmod +x ./startup.sh
| guillermocreus/AHLT | lab1/baseline-NER.py | baseline-NER.py | py | 6,001 | python | en | code | 0 | github-code | 50 |
22993556400 | """
Lots of code taken from deap
"""
input_names = ['b0', 'b1', 'b2', 'b3', 'b4']
PARITY_FANIN_M = 5
PARITY_SIZE_M = 2**PARITY_FANIN_M
inputs = [None] * PARITY_SIZE_M
outputs = [None] * PARITY_SIZE_M
for i in range(PARITY_SIZE_M):
inputs[i] = [None] * PARITY_FANIN_M
value = i
dividor = PARITY_SIZE_M
parity = 1
for j in range(PARITY_FANIN_M):
dividor /= 2
if value >= dividor:
inputs[i][j] = 1
parity = int(not parity)
value -= dividor
else:
inputs[i][j] = 0
outputs[i] = parity
class Parity5():
def evaluate(self, individual):
error = PARITY_SIZE_M
for i, inpt in enumerate(inputs):
res = eval(individual, dict(zip(input_names, inpt)))
if res == outputs[i]:
error -= 1
return (error, {})
if __name__ == "__main__":
import core.grammar as grammar
import core.sge
experience_name = "5Parity/"
grammar = grammar.Grammar("grammars/5_bit_parity_grammar.txt", 6, 17)
evaluation_function = Parity5()
core.sge.evolutionary_algorithm(grammar = grammar, eval_func=evaluation_function, exp_name=experience_name) | nunolourenco/dsge | src/examples/parity_5.py | parity_5.py | py | 1,196 | python | en | code | 11 | github-code | 50 |
38281810819 | #!python3
import os
x = 0
while 1:
x += 1
os.system('./generator > input.txt')
os.system('./dmopc20c3p4 < input.txt > output.txt')
os.system('./slow < input.txt > slow.txt')
if open('slow.txt').read() != open('output.txt').read():
print("WA")
exit(0)
print("AC random test "+str(x))
| pidddgy/competitive-programming | codeforces/fastslow.py | fastslow.py | py | 326 | python | en | code | 0 | github-code | 50 |
35879848034 | # __ TALLER 4 __
# Ejercicio 1
listaCanciones = ['La noche más linda', 'Plan', 'Monalisa', 'Maldita Traición', 'I Dont Care',
'Ella y Yo', 'Human', 'Movimiento de Cadera',
'Me Niego', 'La Descarada', 'Propuesta Indecente', 'Lento', 'Se Acabó',
'Traicionera', 'Otro Trago', 'De Música Ligera', 'Sweater Weather',
'Happy Together', 'Courtesy Call', 'Stressed Out', 'Estos celos'
]
gap = len(listaCanciones) // 3
# Sublistas
subListaInsertionSort = listaCanciones[:gap]
subListaMergeSort = listaCanciones[gap:2*gap]
subListaShellSort = listaCanciones[2*gap:]
#Algoritmos de ordenamiento
# Insertion Sort O(n^2)
def insertionSort(arreglo):
tamaño = len(arreglo)
for _ in range(tamaño):
for j in range(tamaño - 1):
if arreglo[j] > arreglo[j + 1]:
arreglo[j], arreglo[j + 1] = arreglo[j + 1], arreglo[j]
return arreglo
# Merge Sort O(n*logn)
def mergeSort(arreglo):
if len(arreglo) > 1:
mitad = len(arreglo) // 2
izq = arreglo[:mitad]
der = arreglo[mitad:]
mergeSort(izq)
mergeSort(der)
i = j = k = 0
while i < len(izq) and j < len(der):
if izq[i] <= der[j]:
arreglo[k] = izq[i]
i += 1
else:
arreglo[k] = der[j]
j += 1
k += 1
while i < len(izq):
arreglo[k] = izq[i]
i += 1
k += 1
while j < len(der):
arreglo[k] = der[j]
j += 1
k += 1
return arreglo
# ShellSort O(n^2) o O(n^1.5)
def insertShellSort(arreglo):
paso = len(arreglo)//2
while paso > 0:
for i in range(paso, len(arreglo)): # Desde la gap a final
clave = arreglo[i]
j = i
while j >= paso and arreglo[j - paso] > clave:
arreglo[j] = arreglo[j - paso]
j = j - paso
arreglo[j] = clave
paso = paso // 2
return arreglo
# Se usa insertion sort, merge sort, shell sort, para su correspondiente sublista
primeraSublista = insertionSort(subListaInsertionSort)
segundaSublista = mergeSort(subListaMergeSort)
terceraSublista = insertShellSort(subListaShellSort)
# Arreglos ordenados
#print(primeraSublista)
#print(segundaSublista)
#print(terceraSublista)
print('Ultimo elemento primera sublista: ', primeraSublista[-1])
print('Elemento medio segunda sublista: ', segundaSublista[len(segundaSublista)//2])
print('Primer elemento tercera sublista: ', terceraSublista[0])
# Running Time T(n) = O(n^2) + O(n^1.5) + O(nlogn), por lo tanto, T(n) = O(n^2) | NeoEzzio/algoritmos | Talleres/Ejer2_canciones.py | Ejer2_canciones.py | py | 2,723 | python | es | code | 0 | github-code | 50 |
28890305284 |
# Importing the Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# Data collection and analysis
big_mart_data = pd.read_csv('Train.csv')
big_mart_data.head()
big_mart_data.info()
big_mart_data.isnull().sum()
big_mart_data.shape
# Handling missing values
# Mean value of item weight column
big_mart_data['Item_Weight'].mean()
# Filling the missing values in "Item weight" column with mean value
big_mart_data['Item_Weight'].fillna(big_mart_data['Item_Weight'].mean(), inplace=True)
big_mart_data.isnull().sum()
# Replacing the missing values in "Outlest size" with mode
mode = big_mart_data.pivot_table(values = 'Outlet_Size', columns = 'Outlet_Type', aggfunc = (lambda x: x.mode()[0]))
print(mode)
missing_values = big_mart_data['Outlet_Size'].isnull()
print(missing_values)
big_mart_data.loc[missing_values, 'Outlet_Size'] = big_mart_data.loc[missing_values, 'Outlet_Type'].apply(lambda x: mode)
big_mart_data.isnull().sum()
# Data Analysis
big_mart_data.describe()
# Numerical Features Plot
sns.set()
# Item_Weight distribution
plt.figure(figsize=(6,6))
sns.distplot(big_mart_data['Item_Weight'])
plt.show()
# Distribution of Item Visibility
plt.figure(figsize=(6,6))
sns.distplot(big_mart_data['Item_Visibility'])
plt.show()
# Item MRP Distribution
plt.figure(figsize=(6,6))
sns.distplot(big_mart_data['Item_MRP'])
plt.show()
# Item outlet sales distribution
plt.figure(figsize=(6,6))
sns.distplot(big_mart_data['Item_Outlet_Sales'])
plt.show()
# Outlet Establishment Year Countplot
plt.figure(figsize=(6,6))
sns.countplot(x='Outlet_Establishment_Year', data=big_mart_data)
plt.show()
# Distribution of Categorical Features
plt.figure(figsize=(6,6))
sns.countplot(x='Item_Fat_Content', data=big_mart_data)
plt.show()
# Item Type Column Distribution
plt.figure(figsize=(30,6))
sns.countplot(x='Item_Type', data=big_mart_data)
plt.show()
# Data Preprocessing
big_mart_data.head()
big_mart_data['Item_Fat_Content'].value_counts()
big_mart_data.replace({'Item_Fat_Content': {'low fat':'Low Fat', 'LF':'Low Fat', 'reg':'Regular'}}, inplace=True)
big_mart_data['Item_Fat_Content'].value_counts()
# label Encoding
encoder = LabelEncoder()
big_mart_data['Item_Identifier'] = encoder.fit_transform(big_mart_data['Item_Identifier'])
big_mart_data['Item_Fat_Content'] = encoder.fit_transform(big_mart_data['Item_Fat_Content'])
big_mart_data['Item_Type'] = encoder.fit_transform(big_mart_data['Item_Type'])
big_mart_data['Outlet_Identifier'] = encoder.fit_transform(big_mart_data['Outlet_Identifier'])
big_mart_data['Outlet_Size'] = encoder.fit_transform(big_mart_data['Outlet_Size'], )
big_mart_data['Outlet_Location_Type'] = encoder.fit_transform(big_mart_data['Outlet_Location_Type'])
big_mart_data['Outlet_Type'] = encoder.fit_transform(big_mart_data['Outlet_Type'])
# Splitting Features and Target
X = big_mart_data.drop(columns = 'Item_Outlet_Sales', axis=1)
y = big_mart_data['Item_Outlet_Sales']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)
model = LogisticRegression()
model.fit(X_train, y_train)
# Evaluation on taining data
training_data_prediction = model.predict(X_train)
r2_train = metrics.r2_score(y_train, training_data_prediction)
print('R Squared value :', r2_train)
# Evaluation on test data
test_data_prediction = model.predict(X_test)
r2_test = metrics.r2_score(y_test, test_data_prediction)
print('R Squared value :', r2_test) | khaymanii/Big_Mart_Prediction_Model | Big Mart.py | Big Mart.py | py | 3,696 | python | en | code | 0 | github-code | 50 |
3166437961 | from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
import sys
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 720, 720)
self.setWindowTitle("PyQt5 window")
self.show()
def paintEvent(self, event):
self.painter = QPainter(self)
self.painter.setPen(QPen(Qt.black, 5, Qt.DotLine))
self.painter.setBrush(QBrush(Qt.yellow, Qt.SolidPattern))
self.painter.drawRect(350,350, 40, 40)
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
| willdavis576/wheelDashboard | poking.py | poking.py | py | 659 | python | en | code | 0 | github-code | 50 |
10345496394 | """OpenAPI spec validator handlers requests module."""
import contextlib
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import urlopen
from openapi_spec_validator.handlers.file import FileObjectHandler
class UrllibHandler(FileObjectHandler):
"""OpenAPI spec validator URL (urllib) scheme handler."""
def __init__(self, *allowed_schemes, **options):
self.timeout = options.pop('timeout', 10)
super(UrllibHandler, self).__init__(**options)
self.allowed_schemes = allowed_schemes
def __call__(self, url):
assert urlparse(url).scheme in self.allowed_schemes
f = urlopen(url, timeout=self.timeout)
with contextlib.closing(f) as fh:
return super(UrllibHandler, self).__call__(fh)
| eugene-aiken-bytecode/bytecode-airflow-dbt | bytecode-airflow-dbt/lib/python3.9/site-packages/openapi_spec_validator/handlers/urllib.py | urllib.py | py | 781 | python | en | code | 1 | github-code | 50 |
4313034204 | # Пишем фреймворк: Задание 1 - 200 баллов
from dns import reversename, resolver
import Part_2.Lesson_16.lesson_16_task_1.full_scripts.input_text_check as in_text
import colorama
from colorama import Fore
colorama.init(autoreset=True)
def dns_reverse():
ip = in_text.input_text("Enter ip: ")
try:
rev_name = reversename.from_address(ip)
reversed_dns = resolver.resolve(str(rev_name), "PTR")[0]
except Exception as e:
print(f"{Fore.RED}==> Error: {e}{Fore.RESET}")
else:
print(reversed_dns)
| MancunianRed/Python_Lessons | Part_2/Lesson_16/lesson_16_task_1/full_scripts/reverse_dns.py | reverse_dns.py | py | 567 | python | en | code | 0 | github-code | 50 |
22372064176 | import sys
from cx_Freeze import setup, Executable
def build(cmd = None, ver = None):
if cmd:
sys.argv.append(cmd)
#print(sys.argv)
# see http://cx-freeze.readthedocs.org/en/latest/distutils.html
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "PyMangaReader",
version = ver,
description = "PyMangaReader",
executables = [Executable("PyMangaReader.pyw", base=base)])
if __name__ == '__main__':
build()
print("Done.") | jschmer/PyMangaReader | setup.py | setup.py | py | 509 | python | en | code | 3 | github-code | 50 |
12752530025 | #The Tip Calculator
#Here, the initial price of the meal is asked from the user
price_meal = float(input("How much did your meal cost?: "))
#Next, the amount of people who will be splitting the bill is requested from the user
people = int(input("How many people will be splitting the bill?: "))
#Finally, The tip percentage is requested as a float from the use
tip = float(input("What percentage do you want to tip? Please include decimal point: "))
#First, the 10% tax is factored into the meal price, seperate from the tax.
tax_total = (price_meal * .10) + price_meal
#Next, The before-tax price of the meal is multiplied by the tip percentage float.
tip_total = (price_meal * tip)
#Then, The total from the meal price & the tax is added to the total from the meal price & the tip
meal_total = (tip_total + tax_total)
#Finally, the meal total is divided by the amount of people inputted by the user
people_split = (meal_total / people)
#A statement is provide showing what the overall total is as well as the Per Person total in the currency format.
print(f'Your total cost is ${meal_total:,.2f} and each person will should pay ${people_split:,.2f}')
| itsthatbrownguy91/TipCalculator | Tip_Calculator.py | Tip_Calculator.py | py | 1,172 | python | en | code | 0 | github-code | 50 |
35018961984 | ##############################################################################
# VoiceCode, a programming-by-voice environment
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# (C)2000, National Research Council of Canada
#
##############################################################################
"""Various utility functions"""
import getopt, os, re, stat, sys, time, types
import sys
if sys.platform =='win32':
global winsound
import winsound
import os.path
import vc_globals
def dict_merge(dict1, dict2):
"""Merges two dictionaries
Merges *{ANY: ANY} dict1* and *{ANY: ANY} dict2* and returns the
result *{ANY: ANY} dict3*.
If a key exists in both *dict1* and *dict2*, the value in *dict1*
will be used for *dict3*
"""
dict3 = dict2
for an_item in dict1.items():
key, val = an_item
dict3[key] = val
return dict3
def gopt(opt_defs, cmd=sys.argv[1:]):
"""High level wrapper around *getopt.getop*.
*removes first argument from *cmd* when parsing from *sys.argv*
*returned options are stored in a dictionary
*dashes ('-') are removed from the option name in that dictionary
*returns None and outputs error messages if invalid option
*allows to define default values for options
**INPUTS**
*[STR]* cmd=sys.argv[1:] -- list of options and arguments to be parsed.
*[STR, ANY, ...]* opt_defs -- defines the valid options (short and
long). The list is an alternate sequence of option name and
default value. If the name ends with *=*, it means the option
requires a value. If the name is a single letter, it's a short
option. The defaul value is compulsory, even for options that
don't require a value (can be used to set the switch to on or
off by default).
**OUTPUTS**
*opts, args* -- *opts* is a dictionary of options names and
values. *args* is the list of arguments.
"""
opt_dict = {}
args = []
#
# Set default values of options
#
index = 0
while (index < len(opt_defs)):
opt_name = opt_defs[index]
opt_default = opt_defs[index + 1]
opt_name = re.match('^(.*?)(=*)$', opt_name).groups()[0]
opt_dict[opt_name] = opt_default
index = index + 2
# print '-- util.gopt: initialised opt_dict=%s' % repr(opt_dict)
#
# Set options specifications to be used by getopt.
#
short_opts = ''
long_opts = []
requires_val = {}
is_long = {}
index = 0
while (index < len(opt_defs)):
opt_name = opt_defs[index]
opt_default = opt_defs[index + 1]
index = index + 2
match = re.match('^(.)(.*?)(=*)$', opt_name)
opt_name = match.group(1)+ match.group(2)
if (match.group(2) != ''):
is_long[opt_name] = 1
if (match.group(3) != ''):
requires_val[opt_name] = 1
if is_long.has_key(opt_name):
long_opts = long_opts + [opt_name + match.group(3)]
else:
short_opts = short_opts + opt_name
if requires_val.has_key(opt_name):
short_opts = short_opts + ':'
#
# Parse the command line options
#
# print '-- util.gopt: calling getopt with cmd=%s, short_opts=%s, long_opts=%s' % (repr(cmd), repr(short_opts), repr(long_opts))
options, args = getopt.getopt(cmd, short_opts, long_opts)
# print '-- util.gopt: options=%s, args=%s' % (repr(options), repr(args))
#
# Assign parsed values to the options dictionary.
#
# print '-- util.gopt: is_long=%s, requires_val=%s' % (repr(is_long), repr(requires_val))
for an_opt in options:
opt_name = an_opt[0]
a_match = re.match('^(-*)(.*)$', opt_name)
opt_name = a_match.group(2)
if not requires_val.has_key(opt_name):
#
# getopt.getopt returns None as the value for BOOLEAN options
# but we want it to be 1, otherwise it makes it look like the
# options was off
#
# In getopt.getopt, that didn't matter because the mere presence
# of the option name indicates it is on.
#
# In util.gopt, all options have an entry in the returned
# dictionary, and its value indicates whether it's on or off
#
opt_val = 1
else:
opt_val = an_opt[1]
opt_dict[opt_name] = opt_val
# print "-- gopt: opt_dict=%s, args=%s" % (str(opt_dict) , str(args))
return opt_dict, args
###############################################################################
# list processing
###############################################################################
def remove_occurences_from_list(item, list, max_occurs=None):
num_found = 0
new_list = []
ii = 0
for ii in range(len(list)):
if list[ii] == item:
num_found = num_found + 1
if max_occurs != None and num_found >= max_occurs:
break
else:
new_list.append(list[ii])
ii_rest = ii + 1
if ii_rest < len(list):
new_list = new_list + list[ii_rest:]
return new_list
###############################################################################
# file system
###############################################################################
def last_mod(f_name):
"""Returns the time at which a file was last modified.
*STR f_name* is the path of the file.
if *f_name* doesn't exist, returns 0.
"""
try:
stats = os.stat(f_name)
time = stats[stat.ST_MTIME]
except OSError:
time = 0
return time
###############################################################################
# For redirecting STDIN
###############################################################################
#
# Use this temporary file to send commands to the process' own stdin
#
redirected_stdin_fname = vc_globals.tmp + os.sep + 'user_input.dat'
redirected_stdin = open(redirected_stdin_fname, 'w')
def stdin_read_from_string(string):
"""Redirects process' own stdin so it reads from a string
**INPUTS**
*[STR] string* -- String from which to read stdin
**OUTPUTS**
*FILE old_stdin* -- Stream that stdin was originally assigned to.
"""
global redirected_stdin, redirected_stdin_fname
old_stdin = sys.stdin
#
# Close temporary file in case it was opened by a previous call to
# stdin_read_from_string
#
redirected_stdin.close()
#
# Write the string to temporary file
#
redirected_stdin = open(redirected_stdin_fname, 'w')
redirected_stdin.write(string)
redirected_stdin.close()
#
# Open temporary file and assign it to stdin
#
redirected_stdin = open(redirected_stdin_fname, 'r')
sys.stdin = redirected_stdin
return old_stdin
###########################################################################
# Identifying instances of basic types
###########################################################################
def islist(instance):
"""Returns true iif *instance* is a list."""
return isinstance(instance, types.ListType)
def istuple(instance):
"""Returns true iif *instance* is a tuple."""
return isinstance(instance, types.TupleType)
def issequence(instance):
return islist(instance) or istuple(instance)
def isfunction(instance):
return isinstance(instance, types.FunctionType)
###############################################################################
# path manipulation
###############################################################################
def full_split(path):
head, tail = os.path.split(path)
if head == '': return [tail]
if head == path: return [path]
l = full_split(head)
l.append(tail)
return l
def find_common(a, b):
n = min(len(a), len(b))
for i in range(n):
if a[i] != b[i]:
return i
return n
def remove_common(first, second):
f = full_split(first)
s = full_split(second)
n = find_common(f,s)
return f[n:]
def common_path(first, second):
f = full_split(first)
s = full_split(second)
n = find_common(f,s)
return f[:n]
def relative_name(path, head, prefix=''):
common = common_path(path, head)
whole_head = full_split(head)
if (whole_head != common):
return path
p = []
if (prefix != ''):
p.append(prefix)
p.extend(remove_common(path,head))
return apply(os.path.join, p)
def replace_all_within_VCode(text, prefix='%VCODE_HOME%' ):
return text.replace(vc_globals.home, prefix)
def within_VCode(path):
# return relative_name(path, vc_globals.home)
return relative_name(path, vc_globals.home, prefix='%VCODE_HOME%')
# return relative_name(path, vc_globals.home,
# prefix = os.path.basename(vc_globals.home))
###############################################################################
# miscellaneous
###############################################################################
def bell(length=3):
"""Plays a bell sound for a time proportional to INT length.
"""
bell_string = ''
for ii in range(length):
bell_string = bell_string + '\a'
# use sys.__stderr__ so that the user will hear the bell even if stderr
# is redirected by regression testing
sys.__stderr__.write(bell_string)
sys.__stderr__.flush()
# AD: Uncomment once we have upgraded to wxPython 2.5.
#import wx
#def wxPython_is_unicode_build():
# if 'unicode' in wx.PlatformInfo:
# print "wxPython was built in Unicode mode"
# return True
# else:
# print "wxPython was built in ANSI mode"
# return False
| jboner/emacs-config | VCode/Mediator/util.py | util.py | py | 10,501 | python | en | code | 17 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.