text stringlengths 38 1.54M |
|---|
from .so_problem import SingleObjectiveProblem
import numpy as np
from numpy import sin, exp, sqrt, pi
class CrossInTray(SingleObjectiveProblem):
def __init__(self, **kwargs):
super().__init__(n_params=2,
n_constraints=0,
param_type=np.double,
multi_dims=False)
xl = np.ones((self.n_params,)) * -10
xu = np.ones((self.n_params,)) * 10
self.domain = (xl, xu)
self._pareto_set = np.array([[1.34941, -1.34941],
[1.34941, 1.34941],
[-1.34941, -1.34941],
[-1.34941, 1.34941]])
self._pareto_front = -2.06261
self._optimum = min
self._argopt = np.argmin
## Overide Methods ##
def _f(self, X):
f = -0.0001 * (abs(sin(X[0]) * sin(X[1]) * \
exp(abs(100 - sqrt(X[0]**2 + X[1]**2)/pi))) + 1)**0.1
return f
def _sol_compare(self, y1, y2):
return y1 <= y2 |
user_inp = int(input("How many bars should be charged? "))
bars_charged = 0
while (bars_charged < user_inp):
bars_charged = bars_charged + 1
battery_level = "█ "*bars_charged
print("Charging:",battery_level)
print("The battery is fully charged") |
"""
Realizing Sching decisions.
(Acting on sching_decs)
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import str_to_bool, dpid_to_str
from pox.lib.addresses import IPAddr
import pox.lib.packet as pkt
from pox.openflow.of_json import *
import os, sys, inspect, json, pprint
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"ext")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
from ruleparser import RuleParser
from errors import *
from control_comm_intf import ControlCommIntf
log = core.getLogger()
#Right now this dict is filled up by HAND
#TODO: Do this autonomically
info_dict = {'gw_dpid_list': [11,12],
'lscher_addr':('127.0.0.1', 7999),
'scherl_addr':('127.0.0.1', 7998),
'lsensor_addr':'...',
'sensorl_addr':'...',
'acter_vip': '10.0.0.255',
'acter_vmac': '00:00:00:00:00:00',
'sid_pidlist_dict': {},
'sching_tp_src': 7001,
'sching_tp_dst': 7001,
's_entry_dur': [0, 0],
}
ruleparser = RuleParser('ext/schedwalks.xml', 'ext/scheditjobs.xml')
class Actuator (object):
def __init__ (self):
#TODO: active sching_realization
#for control comm with scher, ...
self.cci = ControlCommIntf()
self.cci.reg_commpair(sctag = 'acter-scher',
proto = 'tcp',
_recv_callback = self._handle_recvfromscher,
s_addr = info_dict['lscher_addr'],
c_addr = info_dict['scherl_addr'] )
#core.addListeners(self) <- bu aptal neden calismiyo anlamadim
core.openflow.addListenerByName("ConnectionUp", self._handle_ConnectionUp)
core.openflow.addListenerByName("FlowStatsReceived", self._handle_FlowStatsReceived)
core.openflow.addListenerByName("PacketIn", self._handle_PacketIn )
######################### _handle_*** methods #######################
def _handle_recvfromscher(self, msg):
#msg = [type_, data_]
[type_, data_] = msg
if type_ == 'sp_sching_dec':
s_id, p_id = int(data_['s_id']), int(data_['p_id'])
walk_rule = data_['walk_rule']
itjob_rule = data_['itjob_rule']
#print 'walk_rule: '
#pprint.pprint(walk_rule)
#updating global dicts based on the input rxed from scher
if not (s_id in info_dict['sid_pidlist_dict']):
info_dict['sid_pidlist_dict'][s_id] = []
info_dict['sid_pidlist_dict'][s_id].append(p_id)
#
ruleparser.modify_schedwalkxmlfile_by_walkrule(str(s_id),str(p_id),walk_rule)
ruleparser.modify_scheditjobxmlfile_by_itjobrule(str(s_id),str(p_id),itjob_rule)
if _install_schrules_proactively:
self.install_proactive_schedwalk(s_id, p_id)
self.install_proactive_scheditjob(s_id, p_id)
# Send "I am done with the job(sch realization)"
print 'sending sching_realization_done to scher...'
msg = json.dumps({'type':'sp_sching_reply',
'data':{'s_id':s_id,
'p_id':p_id,
'reply':'done'} })
self.cci.send_to_client('acter-scher', msg)
#Since the SW rules are set proactively from the beginning no packet_in is expected !
def _handle_PacketIn (self, event):
packet = event.parsed
print '#handle_data_packet is called;'
ip = packet.find('ipv4')
if ip is None:
print "packet", packet," isn't IP!"
return
print "Rxed packet: ", packet, "from sw_dpid: ", dpidToStr(event.connection.dpid)
print "Src IP:%s, Dst IP: %s" %(ip.srcip, ip.dstip)
def _handle_ConnectionUp (self, event):
print "Connection %s" % (event.connection)
if _install_deneme_flow and event.connection.dpid == 3:
print "Sending deneme_flow to sw_dpid:%s " %(event.connection.dpid)
self.send_ofmod_forward ('handle_conn_up', event.connection, '10.0.0.32', '10.0.0.31',
6000, 4, info_dict['s_entry_dur'])
def _handle_FlowStatsReceived (self, event):
stats = flow_stats_to_list(event.stats)
print "FlowStatsReceived from ",dpidToStr(event.connection.dpid), ": ",stats
#ofcourse works only for mininet networks
def dev_tfport(self, dev_str):
eth_part = dev_str.split('-', 1)[1]
return int(eth_part.strip('eth'))
######################### install_*** methods #######################
def install_proactive_scheditjob(self, s_id, p_id):
print 'installing proactive_scheditjob for s_id=%s, p_id=%s' % (s_id, p_id)
dict_ = ruleparser.get_itjobruledict_forsp(str(s_id), str(p_id))
print 'itjobdict:'
pprint.pprint(dict_)
for conn in core.openflow.connections:
dpid = str(conn.dpid)
try:
itnodeinfo_list = dict_[dpid]
except KeyError: #sw is not connected to any itnode on the sched walk
continue
for itnodeinfo in itnodeinfo_list:
jobinfo = itnodeinfo['jobinfo']
walkinfo = itnodeinfo['walkinfo']
#
self.send_udp_packet_out(conn=conn,
fw_port=self.dev_tfport(str(walkinfo['swdev_to_node']) ),
payload=json.dumps({'type':'itjob_rule',
'data': jobinfo}),
tp_src=info_dict['sching_tp_src'],
tp_dst=info_dict['sching_tp_dst'],
src_ip=info_dict['acter_vip'],
dst_ip=walkinfo['node_ip'],
src_mac=info_dict['acter_vmac'],
dst_mac=walkinfo['node_mac'] )
def install_proactive_schedwalk(self, s_id,p_id):
print 'installing proactive_schedwalk for s_id=%s, p_id=%s' % (s_id, p_id)
[dict_I, hmfromdpid_dict] = ruleparser.get_walkruledict_forsp(str(s_id), str(p_id))
#print 'walkruledict:'
#pprint.pprint(dict_I)
#print 'hmfromdpid_dict:'
#pprint.pprint(hmfromdpid_dict)
for conn in core.openflow.connections:
dpid = str(conn.dpid) #str(event.connection.dpid)
try:
hm = hmfromdpid_dict[dpid]
except (KeyError):
print '\n# No entry in hm_from_dpid for dpid=%s' % dpid
continue
l_dict = None
counter = 0
while (counter <= hm):
l_dict = dict_I[dpid, counter]
typ = l_dict['typ']
rule_dict = l_dict['rule_dict']
wc_dict = l_dict['wc_dict']
if typ == 'forward':
self.send_ofmod_forward('initial_flows',conn,wc_dict['src_ip'],wc_dict['dst_ip'],
wc_dict['tp_dst'],self.dev_tfport(rule_dict['fport']), info_dict['s_entry_dur'])
#self.send_stat_req(conn)
elif typ == 'modify_forward':
self.send_ofmod_modify_forward('initial_flows', conn, wc_dict['src_ip'],
wc_dict['dst_ip'],wc_dict['tp_dst'],rule_dict['new_dst_ip'],
rule_dict['new_dst_mac'],self.dev_tfport(rule_dict['fport']), info_dict['s_entry_dur'])
#self.send_stat_req(conn)
counter += 1
####################### send_*** methods ###################################
# Method for just sending a UDP packet over any sw_port (broadcast by default)
def send_udp_packet_out(self, conn, payload, tp_src, tp_dst,src_ip, dst_ip,
src_mac, dst_mac, fw_port = of.OFPP_ALL):
msg = of.ofp_packet_out(in_port=of.OFPP_NONE)
msg.buffer_id = None
#Make the udp packet
udpp = pkt.udp()
udpp.srcport = tp_src
udpp.dstport = tp_dst
udpp.payload = payload
#Make the IP packet around it
ipp = pkt.ipv4()
ipp.protocol = ipp.UDP_PROTOCOL
ipp.srcip = IPAddr(src_ip)
ipp.dstip = IPAddr(dst_ip)
# Ethernet around that...
ethp = pkt.ethernet()
ethp.src = EthAddr(src_mac)
ethp.dst = EthAddr(dst_mac)
ethp.type = ethp.IP_TYPE
# Hook them up...
ipp.payload = udpp
ethp.payload = ipp
# Send it to the sw
msg.actions.append(of.ofp_action_output(port = fw_port))
msg.data = ethp.pack()
#show msg before sending
"""
print '*******************'
print 'msg.show(): ',msg.show()
print '*******************'
"""
print "self.send_udp_packet_out; sw%s and fw_port:%s" %(conn.dpid, fw_port)
conn.send(msg)
#Basic send functions for communicating with SWs
def send_clear_swtable(self, conn):
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
conn.send(msg)
print 'clearing flows from %s.' % dpid_to_str(event.connection.dpid)
def send_stat_req(self, conn):
conn.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
print "\nsend_stat_req to sw_dpid=%s\n" % conn.dpid
def send_ofmod_delete(self, conn, nw_src, nw_dst, tp_dst, duration):
msg = of.ofp_flow_mod()
msg.command = OFPFC_DELETE
#wcs
msg.match.dl_type = 0x800 # Ethertype / length (e.g. 0x0800 = IPv4)
msg.match.nw_src = IPAddr(nw_src)
msg.match.nw_dst = IPAddr(nw_dst)
msg.match.nw_proto = 17 #UDP
msg.match.tp_dst = int(tp_dst)
#
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
conn.send(msg)
print '\nsend_ofmod_delete to sw_dpid=%s' % conn.dpid
print 'wcs: src_ip=%s, dst_ip=%s, tp_dst=%s\n' % (nw_src,nw_dst,tp_dst)
def send_ofmod_forward(self, _called_from, conn, nw_src, nw_dst, tp_dst, fport, duration):
msg = of.ofp_flow_mod()
#msg.match = of.ofp_match.from_packet(packet)
msg.priority = 0x7000
#msg.match = of.ofp_match(dl_type = pkt.ethernet.IP_TYPE, nw_proto = pkt.ipv4.UDP_PROTOCOL, nw_dst=IPAddr(nw_dst))
msg.match.dl_type = 0x800 # Ethertype / length (e.g. 0x0800 = IPv4)
msg.match.nw_src = IPAddr(nw_src)
msg.match.nw_dst = IPAddr(nw_dst)
msg.match.nw_proto = 17 #UDP
if tp_dst != None:
msg.match.tp_dst = int(tp_dst)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
#print "event.ofp.buffer_id: ", event.ofp.buffer_id
if _called_from == 'packet_in':
msg.buffer_id = event.ofp.buffer_id
msg.actions.append(of.ofp_action_output(port = fport))
conn.send(msg)
print '\nsend_ofmod_forward to sw_dpid=%s' % conn.dpid
print 'wcs: src_ip=%s, dst_ip=%s, tp_dst=%s' % (nw_src,nw_dst,tp_dst)
print 'acts: fport=%s\n', fport
def send_ofmod_modify_forward(self, _called_from, conn, nw_src, nw_dst, tp_dst, new_dst, new_dl_dst,fport, duration):
msg = of.ofp_flow_mod()
msg.priority = 0x7000
msg.match.dl_type = 0x800 # Ethertype / length (e.g. 0x0800 = IPv4)
msg.match.nw_src = IPAddr(nw_src)
msg.match.nw_dst = IPAddr(nw_dst)
msg.match.nw_proto = 17 #UDP
if tp_dst != None:
msg.match.tp_dst = int(tp_dst)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
if _called_from == 'packet_in':
msg.buffer_id = event.ofp.buffer_id
msg.actions.append(of.ofp_action_nw_addr(nw_addr = IPAddr(new_dst), type=7))
msg.actions.append(of.ofp_action_dl_addr(dl_addr = EthAddr(new_dl_dst), type=5))
msg.actions.append(of.ofp_action_output(port = fport))
conn.send(msg)
print '\nsend_ofmod_modify_forward to sw_dpid=%s' % conn.dpid
print 'wcs: src_ip=%s, dst_ip=%s, tp_dst=%s' % (nw_src,nw_dst,tp_dst)
print 'acts: new_dst=%s, new_dl_dst=%s, fport=%s\n' % (new_dst, new_dl_dst, fport)
##############################################################################
_install_schrules_proactively = None
_install_deneme_flow = None
def launch (proactive_install=True, deneme_flow=False):
global _install_schrules_proactively, _install_deneme_flow
#
_install_schrules_proactively = str_to_bool(proactive_install)
_install_deneme_flow = str_to_bool(deneme_flow)
#
core.registerNew(Actuator)
|
import pymongo
try:
myclient = pymongo.MongoClient("mongodb://mongoadmin:secret@localhost:27888/?authSource=admin")
mydb = myclient["reddit_cross_stocks"]
except pymongo.errors.ServerSelectionTimeoutError as err:
print(err)
|
"""Exercício Python 004: Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo
primitivo e todas as informações possíveis sobre ele."""
informacao = input('Digite alguma coisa: ')
print('O tipo primitivo da informação digitada é ', type(informacao))
print('A informação digitada contém apenas espaços? ', informacao.isspace())
print('A informação digitada contém apenas números? ', informacao.isnumeric())
print('A informação digitada contém apenas caracteres alfabéticos? ', informacao.isalpha())
print('A informação digitada é alfa numérica? ', informacao.isalnum())
print('A informação digitada está em maiúsculas? ', informacao.isupper())
print('A informação digitada está em minúsculas? ', informacao.islower())
print('A informação digitada está capitalizada? ', informacao.istitle())
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'personal/home.html')
def links(request):
return render(request, 'personal/links.html')
|
from graphviz import Digraph, Graph
from itertools import combinations
def data_model1():
dot = Digraph(comment="First data model")
dot.node("m", "mileage")
dot.node("y", "year")
dot.node("p", "price")
dot.edges(["mp", "yp"])
dot.graph_attr["rankdir"] = "LR"
return dot
def draw_new_and_usage_clusters(dot):
new_car_config = ["model", "transmission", "fuelType", "engineSize", "year"]
with dot.subgraph(name="clusterA") as c:
c.attr(style="filled", color="lightgrey", shape="egg")
for node in new_car_config:
c.node(node)
c.node_attr.update(style="filled", color="white")
# for pair in combinations('mtfey', r=2):
# c.edge(*pair)
# c.edges([('a0', 'a1'), ('a1', 'a2'), ('a2', 'a3')])
c.attr(label="New car configuration")
with dot.subgraph(name="clusterB") as c:
c.attr(style="filled", color="lightgrey")
c.node_attr.update(style="filled", color="white")
c.node("mileage")
c.attr(label="Usage")
def data_model2():
dot = Graph(comment="Data model 2", engine="fdp")
draw_new_and_usage_clusters(dot)
features = ["price", "tax", "mpg"]
short = "pag"
with dot.subgraph(name="clusterC") as c:
c.attr(style="filled", color="lightgrey")
c.node_attr.update(style="filled", color="white")
for node in features:
c.node(node)
c.attr(label="Predictables")
# for s, feature in zip(short, features):
# dot.node(s, feature)
dot.edge("clusterA", "mpg", dir="forward")
dot.edge("clusterA", "tax", dir="forward", splines="ortho")
dot.edge("clusterA", "price", dir="forward")
dot.edge("clusterB", "price", dir="forward", splines="curved")
# dot.edge('clusterA', 'clusterC', dir='forward')
# dot.edge('clusterB', 'clusterC', dir='forward')
dot.graph_attr["rankdir"] = "LR"
# dot.unflatten()
return dot
def data_model3():
dot = Graph(
comment="Data model 3",
engine="fdp",
)
draw_new_and_usage_clusters(dot)
features = ["price", "tax", "mpg"]
short = "pag"
with dot.subgraph(name="clusterC") as c:
c.attr(style="filled", color="lightgrey")
c.node_attr.update(style="filled", color="white")
for node in ["tax", "mpg"]:
c.node(node)
c.attr(label="Others")
dot.edge("clusterA", "price", dir="forward")
dot.edge("clusterB", "price", dir="forward")
dot.edge(
"clusterC",
"price",
dir="forward",
color="grey",
)
# dot.edge('clusterA', 'clusterC', dir='forward')
# dot.edge('clusterB', 'clusterC', dir='forward')
# dot.graph_attr['rankdir'] = 'LR'
return dot
|
from typing import List
class Solution:
def pancakeSort(self, arr: List[int]) -> List[int]:
res = []
n = len(arr)
for i in range(n, 0, -1):
index = arr.index(i)
if index == i - 1:
continue
if index != 0:
res.append(index + 1)
arr[:index + 1] = arr[:index + 1][::-1]
res.append(i)
arr[:i] = arr[:i][::-1]
return res
def main():
sol = Solution()
print(sol.pancakeSort([3,2,4,1]))
print(sol.pancakeSort([1,2,3]))
if __name__ == '__main__':
main() |
from faint import *
#start
# Create a 320 by 200 bitmap
bmp = Bitmap(320, 200)
# Fill the bitmap with magenta
for x in range(320):
for y in range(200):
bmp.set_pixel(x,y,(255,0,255))
# Draw the bitmap at x,y=10,10 in the active canvas
get_active_image().blit((10,10), bmp);
|
# Solution of;
# Project Euler Problem 102: Triangle containment
# https://projecteuler.net/problem=102
#
# Three distinct points are plotted at random on a Cartesian plane, for which
# -1000 ≤ x, y ≤ 1000, such that a triangle is formed. Consider the following
# two triangles:A(-340,495), B(-153,-910), C(835,-947)X(-175,41),
# Y(-421,-714), Z(574,-645)It can be verified that triangle ABC contains the
# origin, whereas triangle XYZ does not. Using triangles. txt (right click and
# 'Save Link/Target As. . . '), a 27K text file containing the co-ordinates of
# one thousand "random" triangles, find the number of triangles for which the
# interior contains the origin. NOTE: The first two examples in the file
# represent the triangles in the example given above.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 102
timed.caller(dummy, n, i, prob_id)
|
#!/usr/bin/python2 -u
from pwn import *
curr = "picoCTF{"
for c in range(1,14):
print("........................................................",c)
nc = remote('2018shell1.picoctf.com', 37131)
nc.recvuntil("Please enter your situation report: ")
payload2 = "A"*11+"B"*(25-c)
nc.sendline(payload2)
cipher = nc.recv(1024).decode('hex')
nc.close()
for i in range(32,128):
nc2 = remote('2018shell1.picoctf.com', 37131)
nc2.recvuntil("Please enter your situation report: ")
payload = "A"*11 + "B"*(14-c) + curr + chr(i)
nc2.sendline(payload)
cipher2 = nc2.recv(1024).decode('hex')
nc2.close()
if cipher2[80:96] == cipher[128:144]:
curr += chr(i)
break
time.sleep(0.05)
c += 1
if c > len(curr):
print("trash")
time.sleep(100)
print "deciphered text is: " + curr
|
#!/usr/bin/env python
#
# Contacts server front end
#
# The webserver module is responsible for incoming and outgoing HTTP requests.
#
import tornado.httpserver
import tornado.auth
import tornado.ioloop
import tornado.web
import os
import re
import time
import calendar
import base64
import traceback
import logging
import urllib
import cStringIO
import json
import cgi
import webconfig
import json
from urlparse import urlparse
import gravatar
import model # replace this with a dbserver
import xmlreader
# The OpenID+OAuth hybrid stuff doesn't work for us because (AFAICT) we're not
# world-routable yet. So this is just doing authentication and then we hand
class YahooConnectHandler(tornado.web.RequestHandler, tornado.auth.OpenIdMixin):
_OPENID_ENDPOINT = "https://open.login.yahooapis.com/openid/op/auth"
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self.onConnect))
return
to = self.get_argument("to", None)
if not to: to = "/"
self.authenticate_redirect(callback_uri = "http://localhost:8300/connect/yahoo?" + urllib.urlencode({"to":to}))
# Got the response and unpacked OpenID parameters: handle it
def onConnect(self, claimed_user_data):
logging.info(claimed_user_data)
if not claimed_user_data:
logging.warning("Could not log in Yahoo user")
self.write("unable to connect")
self.finish()
return
# Now do we have a user for this Yahoo identity?
claimed_id = claimed_user_data["claimed_id"] if "claimed_id" in claimed_user_data else claimed_user_data["email"]
if not claimed_id:
self.write("unable to get an identifier")
self.finish()
return
try:
session = model.Session()
id_list = model.identity(session, claimed_id)
if id_list and len(id_list) > 0:
if len(id_list) > 1: # uh oh
self.write("More than one user has claimed this identity. That's confusing. We should try to merge them somehow?")
self.finish()
return
user = id_list[0].user
logging.info("Yahoo ID %s logged in succesfully to user account %s" % (claimed_id, user.id))
else:
# new user
user = model.User()
session.add(user)
id = model.Identity(claimed_id, user, claimed_user_data["name"], model.OP_YAHOO)
id.verifiedNow()
session.add(id)
session.commit()
self.set_secure_cookie("uid", str(user.id))
# Where to?
except Exception, e:
logging.exception(e)
session.rollback()
to = self.get_argument("to", None)
if to:
self.redirect(to)
else:
self.redirect("/")
# This works even on localhost - but it doesn't give us the user's ID.
# For now that's okay. Once we're routable we should be able to do it
# all from YahooConnect and get the access_token in the user object
# passed to onConnect. (i.e. we can chuck this handler)
class YahooAuthorizeHandler(tornado.web.RequestHandler, tornado.auth.OAuthMixin):
_OAUTH_NO_CALLBACKS = False
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://api.login.yahoo.com/oauth/v2/get_request_token"
_OAUTH_AUTHORIZE_URL = "https://api.login.yahoo.com/oauth/v2/request_auth"
_OAUTH_ACCESS_TOKEN_URL = "https://api.login.yahoo.com/oauth/v2/get_token"
@tornado.web.asynchronous
def get(self):
uid = self.get_secure_cookie("uid")
if not uid:
logging.warn("No user session: redirecting to root")
return self.redirect("/")
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self.onConnect))
return
to = self.get_argument("to", None)
if not to: to = "/listview"
self.authorize_redirect(callback_uri = "http://localhost:8300/authorize/yahoo?" + urllib.urlencode({"to":to}), extra_params = {
'xoauth_displayname': "Mozilla Contacts"
})
def _on_access_token(self, callback, response):
if response.error:
logging.warning("Could not fetch access token")
callback(None)
return
uid = self.get_secure_cookie("uid")
if not uid:
logging.warn("No user session: redirecting to root")
return self.redirect("/")
logging.info("Got OAuth callback: %s" % response)
# NOTE that we assume the user has only one Yahoo account here!
access_token = tornado.auth._oauth_parse_response(response.body)
logging.info(" parsed to: %s" % access_token)
# What we get back is:
# {'xoauth_yahoo_guid': '54MJG4TXXXXXXMDIXXXXX5G5M',
# 'oauth_authorization_expires_in': '855808199', 'oauth_expires_in': '3600',
# 'oauth_session_handle': 'AHNm_UxwMcc-',
# 'secret': '2864f3d82f082cbbcf70b',
# 'key': 'A=EDiRDHTtsx3u5W.I9Vj<lots bigger>...'}
session = model.Session()
user = model.user(session, uid)
id = user.identity(session, model.OP_YAHOO)
if id:
id.accessToken = access_token["key"]
id.accessSecret = access_token["secret"]
id.opaqueID = access_token["xoauth_yahoo_guid"]
session.add(id)
session.commit()
else: # strange, we have no id for this user
self.write("Whoops - we don't have an authenticated Yahoo login for you. That's weird.")
self.finish()
return
to = self.get_argument("to", None)
if to:
self.redirect(to)
else:
self.redirect("/")
def onConnect(self, user):
logging.error("Made it to onConnect")
if not user:
raise tornado.web.HTTPError(500, "Yahoo authorization failed")
# The access token is in access_token - save it
logging.error(user)
def _oauth_consumer_token(self):
self.require_setting("yahoo_consumer_key", "Yahoo OAuth")
self.require_setting("yahoo_consumer_secret", "Yahoo OAuth")
return dict(
key=self.settings["yahoo_consumer_key"],
secret=self.settings["yahoo_consumer_secret"])
class YahooFetchHandler(tornado.web.RequestHandler, tornado.auth.OAuthMixin):
_OAUTH_VERSION = "1.0"
@tornado.web.asynchronous
def get(self):
uid = self.get_secure_cookie("uid")
if not uid:
logging.warn("No user session: redirecting to root")
return self.redirect("/")
args = {"count":"max", "format":"json"}
page = self.get_argument("page", None)
session = model.Session()
user = model.user(session, uid)
id = user.identity(session, model.OP_YAHOO)
access_token = {"key":id.accessToken, "secret":id.accessSecret}
url = "http://social.yahooapis.com/v1/user/" + id.opaqueID + "/contacts"
if access_token:
all_args = {}
all_args.update(args)
consumer_token = self._oauth_consumer_token()
oauth = self._oauth_request_parameters(url, access_token, all_args, method="GET")
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self.onFetch)
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(url, callback=callback)
def _oauth_consumer_token(self):
self.require_setting("yahoo_consumer_key", "Yahoo OAuth")
self.require_setting("yahoo_consumer_secret", "yahoo OAuth")
return dict(
key=self.settings["yahoo_consumer_key"],
secret=self.settings["yahoo_consumer_secret"])
def onFetch(self, response):
if response.code == 401: # need to reauthorize
self.redirect("/authorize/yahoo?to=/fetch/yahoo")
else:
# Convert from GData XML to JSON:
logging.error(response.body)
doc = json.loads(response.body)
logging.error(doc)
result = {"status":"ok"}
result["contacts"] = contacts = []
anonCount = 1
for aContact in doc["contacts"]["contact"]:
try:
person = {}
contacts.append(person)
for aField in aContact["fields"]:
if aField["type"] == "name":
name = person["name"] = {};
if aField["value"]["givenName"]: name["givenName"] = aField["value"]["givenName"]
if aField["value"]["familyName"]: name["familyName"] = aField["value"]["familyName"]
if aField["value"]["middleName"]: name["middleName"] = aField["value"]["middleName"]
if aField["value"]["prefix"]: name["prefix"] = aField["value"]["prefix"]
if aField["value"]["suffix"]: name["suffix"] = aField["value"]["suffix"]
elif aField["type"] == "phone":
if not "phoneNumbers" in person: person["phoneNumbers"] = [];
aPhone = {}
aPhone["value"] = aField["value"];
if aField["flags"] and len(aField["flags"]) > 0:
aPhone["type"] = aField["flags"][0].lower()
else:
aPhone["type"] = "unlabeled"
person["phoneNumbers"].append(aPhone)
elif aField["type"] == "address":
if not "addresses" in person: person["addresses"] = []
anAddress = {}
if aField["value"]["street"]: anAddress["streetAddress"] = aField["value"]["street"]
if aField["value"]["city"]: anAddress["locality"] = aField["value"]["city"]
if aField["value"]["stateOrProvince"]: anAddress["region"] = aField["value"]["stateOrProvince"]
if aField["value"]["postalCode"]: anAddress["postalCode"] = aField["value"]["postalCode"]
if aField["value"]["country"]: anAddress["country"] = aField["value"]["country"]
if aField["value"]["countryCode"]: anAddress["countryCode"] = aField["value"]["countryCode"]
if aField["flags"] and len(aField["flags"]) > 0:
anAddress["type"] = aField["flags"][0].lower()
else:
anAddress["type"] = "unlabeled"
person["addresses"].append(anAddress)
elif aField["type"] == "email":
if not "emails" in person: person["emails"] = []
anEmail = {}
anEmail["value"] = aField["value"]
if aField["flags"] and len(aField["flags"]) > 0:
anEmail["type"] = aField["flags"][0].lower()
else:
anEmail["type"] = "internet"
person["emails"].append(anEmail)
elif aField["type"] == "yahooid":
if not "accounts" in person: person["accounts"] = []
person["accounts"].append({"type":"yahoo", "username":aField["value"], "domain":"yahoo.com"})
elif aField["type"] == "otherid":
if aField["flags"] and len(aField["flags"]) > 0:
flag = aField["flags"][0]
domain = None
type = None
if flag == "GOOGLE":
domain = "google.com"
type = "google"
elif flag == "ICQ":
domain = "icq.com"
type = "ICQ"
elif flag == "JABBER":
domain = "jabber"
type = "Jabber"
elif flag == "MSN":
domain = "msn.com"
type = "MSN"
elif flag == "SKYPE":
domain = "skype.com"
type = "skype"
else:
domain = flag.lower()
type = flag.lower()
if not "accounts" in person: person["accounts"] = []
person["accounts"].append({"type":type, "username":aField["value"], "domain":domain});
elif aField["type"] == "link":
if aField["flags"] and len(aField["flags"]) > 0:
flag = aField["flags"][0]
type = flag.lower();
if not "urls" in person: person.urls = []
person["urls"].push({"type":type, "value":aField["value"]})
elif aField["type"] == "company":
if not person["organizations"]: person["organizations"] = [{}]
person["organizations"][0].name = aField["value"];
elif aField["type"] == "jobTitle":
if not person["organizations"]:person["organizations"] = [{}]
person["organizations"][0]["title"] = aField["value"];
# Construct a display name:
if "name" in person:
if "givenName" in person["name"] and "familyName" in person["name"]:
person["displayName"] = person["name"]["givenName"] + " " + person["name"]["familyName"] # FIXME Eurocentric
elif "givenName" in person["name"]:
person["displayName"] = person["name"]["givenName"]
elif "familyName" in person["name"]:
person["displayName" ]= person["name"]["familyName"]
# if not person["displayName"] and person["accounts"]:
# for p in person["accounts"]:
# if p["domain"] == "yahoo.com":
# person["displayName"] = p["username"]
# break
# if not person["displayName"]: person["displayName"] = person["accounts"][0]["username"]
# if not person["displayName"] and person["emails"]:
# person["displayName"] = person.emails[0]["value"];
# }
# if (!person.displayName) {
# person.displayName = "Unnamed Yahoo Contact " + anonCount;
# anonCount += 1;
# }
except Exception, e:
logging.exception(e)
pass
self.write(json.dumps(result))
self.finish()
#
# // Construct a display name:
# if (person.name) {
# if (person.name.givenName && person.name.familyName) {
# person.displayName = person.name.givenName + " " + person.name.familyName; // FIXME Eurocentric
# } else if (person.name.givenName) {
# person.displayName = person.name.givenName;
# } else if (person.name.familyName) {
# person.displayName = person.name.familyName;
# }
# } else {
# person.name = {givenName:"", familyName:""};
# }
#
# if (!person.displayName && person.accounts) {
# for each (p in person.accounts) {
# if (p.domain == "yahoo.com") {
# person.displayName = p.username;
# break;
# }
# }
# if (!person.displayName) person.displayName = person.accounts[0].username;
# }
# if (!person.displayName && person.emails) {
# person.displayName = person.emails[0]["value"];
# }
# if (!person.displayName) {
# person.displayName = "Unnamed Yahoo Contact " + anonCount;
# anonCount += 1;
# }
# people.push(person);
# } catch (e) {
# this._log.info("Error importing Yahoo contact: " + e);
# }
# }#
# self.write(json.dumps(result))
# self.finish()
|
import cv2
def lewis_kanade_approach(image1, image2):
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
old_gray = image1
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(image1)
frame_gray = image2
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new, good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
image2 = cv2.circle(image2,(a,b),5,color[i].tolist(),-1)
img = cv2.add(image2,mask)
# cv2.imshow('frame',img)
cv2.imshow("Result", img);cv2.waitKey();cv2.destroyAllWindows()
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
# given two images, return a set of matching points based on
# Sift keypoints w/ FLANN matching.
def match_images(image1, image2, render_output=False, ratio=0.7, flann_checks = 50):
image1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
image2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create(sigma=1.5)
keypoints_1, descriptors_1 = sift.detectAndCompute(image1, None)
keypoints_2, descriptors_2 = sift.detectAndCompute(image2, None)
# FLANN matching adapted from openCV tutorial:
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
# FLANN Matching
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 3)
search_params = dict(checks=flann_checks) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(descriptors_1,descriptors_2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
good_matches = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < ratio*n.distance:
matchesMask[i]=[1,0]
# Extraction of coordinates detailed here:
# https://stackoverflow.com/questions/46607647/sift-feature-matching-point-coordinates
point1 = keypoints_1[m.queryIdx].pt
point2 = keypoints_2[m.trainIdx].pt
good_matches.append([point1, point2])
## Draw pairs in purple, to make sure the result is ok
cv2.circle(image1, (int(point1[0]),int(point1[1])), 10, (255,0,255), -1)
cv2.circle(image2, (int(point2[0]),int(point2[1])), 10, (255,0,255), -1)
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(image1,keypoints_1,image2,keypoints_2,matches,None,**draw_params)
#plt.imshow(img3,),plt.show()
if render_output:
cv2.imshow("Result", img3);cv2.waitKey();cv2.destroyAllWindows()
return good_matches |
empdata={'empno':101,'name':'ravi','salary':9000}
print(empdata)
print(empdata['name'])
empdata['salary']=13000
print(empdata)
del empdata['name']
print(empdata) |
# !/usr/bin/env python
import re
from sploitego.cmdtools.nmap import NmapReportParser
from canari.maltego.entities import IPv4Address
from canari.framework import configure, superuser
from canari.maltego.message import UIMessage, Field, Label
from common.entities import Port
from common.nmap import getscanner, savereport
__author__ = 'Nadeem Douba'
__copyright__ = 'Copyright 2012, Sploitego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Nadeem Douba'
__email__ = 'ndouba@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform',
'onterminate'
]
@superuser
@configure(
label='To Client IPv4Address [NTP monlist]',
description='This transform performs an Nmap NTP monlist scan to retrieve a list of NTP clients.',
uuids=['sploitego.v2.PortToClients_NTPMonList'],
inputs=[('Reconnaissance', Port)],
)
def dotransform(request, response):
if request.entity.protocol != 'UDP':
response += UIMessage('NTP Monlist scans only work on UDP ports.')
return response
s = getscanner()
args = ['-n', '-Pn', '-sU', '--script=ntp-monlist', '-p', request.value] + request.params
r = s.scan(request.entity.destination, *args)
if r is not None:
for host in r.addresses:
for port in r.ports(host):
if 'ntp-monlist' in port['script']:
to_clients(response, port['script']['ntp-monlist'])
else:
response += UIMessage(s.error)
return response
class Category:
AlternativeTargetInterfaces = 0
PrivateServers = 1
PublicServers = 2
PrivatePeers = 3
PublicPeers = 4
PrivateClients = 5
PublicClients = 6
OtherAssociations = 7
@classmethod
def name(cls, id):
if not id:
return 'Alternative Target Interfaces'
elif id == 1:
return 'Private Servers'
elif id == 2:
return 'Public Servers'
elif id == 3:
return 'Private Peers'
elif id == 4:
return 'Public Peers'
elif id == 5:
return 'Private Clients'
elif id == 6:
return 'Public Clients'
elif id == 7:
return 'Other Associations'
ip_matcher = re.compile('([\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3})')
def to_clients(response, output):
cat = None
for line in output.split('\n'):
if not line:
continue
elif line.startswith(' '):
e = None
if cat in range(Category.AlternativeTargetInterfaces, Category.OtherAssociations):
for ip in ip_matcher.findall(line):
e = IPv4Address(ip)
e += Field('category', Category.name(cat), displayname='Category')
response += e
elif cat == Category.OtherAssociations:
ip, desc = line.strip().split(' ', 1)
e = IPv4Address(ip)
e += Label('Additional Info', desc)
e += Field('category', Category.name(cat), displayname='Category')
response += e
elif line.startswith(' '):
for id in range(Category.AlternativeTargetInterfaces, Category.OtherAssociations + 1):
if Category.name(id) in line:
cat = id
break |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-01-13 21:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myyxj', '0002_minebtns'),
]
operations = [
migrations.AlterModelOptions(
name='minebtns',
options={'verbose_name': '商品分类'},
),
migrations.RenameField(
model_name='minebtns',
old_name='class_name',
new_name='twotype_name',
),
migrations.RenameField(
model_name='minebtns',
old_name='btn',
new_name='typename',
),
migrations.RemoveField(
model_name='minebtns',
name='bref_url',
),
migrations.RemoveField(
model_name='minebtns',
name='is_used',
),
migrations.AlterModelTable(
name='minebtns',
table='yxj_goodstype',
),
]
|
input = 22
real_junwon = 11
real_lee = 22
if input == real_junwon:
print("Hello Junwon")
elif input == real_lee:
print("Hello lee")
else:
print("Error")
|
#########################################################IMPORTS#########################################################
import os
import discord
import dropbox
from dropbox.files import WriteMode
from dropbox.exceptions import ApiError, AuthError
import logging
import glob
#########################################################################################################################
logger = logging.getLogger(__name__)
DROPAPI = os.environ['DROPBOXAPI']
dbx = dropbox.Dropbox(DROPAPI)
LOCALFILE = 'USER.db'
#LOCALLOG = (glob.glob(__DIR__"data/logs/*.log"))
#print(LOCALLOG)
BACKUPPATH = '/USER.db'
#for file in LOCALLOG:
# logfile = file
# print(logfile)
#BACKUPLPATH = "/"+str(os.path.basename(logfile))
def file_len(fname):
with open(fname,encoding='utf-8') as f:
for i, l in enumerate(f):
pass
return i + 1
def restore():
# Download the specific revision of the file at BACKUPPATH to LOCALFILE
try:
os.remove(LOCALFILE)
logging.info("[USER.db] Detected Removing.....Done")
except OSError:
logging.warning("OSError")
pass
try:
logging.info("Downloading current " + BACKUPPATH + " from Dropbox, overwriting " + LOCALFILE + "...")
dbx.files_download_to_file(LOCALFILE, BACKUPPATH)
except:
logging.warning("RESTORE FAILED NO DATABASE!!")
logging.warning("Ignoring and continuing ..")
def backup():
with open(LOCALFILE, 'rb') as f:
# We use WriteMode=overwrite to make sure that the settings in the file
# are changed on upload
logging.info("Uploading " + LOCALFILE + " to Dropbox as " + BACKUPPATH + "...")
try:
try:
dbx.files_delete(BACKUPPATH)
except:
pass
dbx.files_upload(f.read(), BACKUPPATH, mode=dropbox.files.WriteMode.overwrite)
logging.info("Uploaded!")
except ApiError as err:
# This checks for the specific error where a user doesn't have
# enough Dropbox space quota to upload this file
if (err.error.is_path() and
err.error.get_path().error.is_insufficient_space()):
sys.exit("ERROR: Cannot back up; insufficient space.")
elif err.user_message_text:
logging.warning(err.user_message_text)
sys.exit()
else:
logging.warning(err)
sys.exit() |
import numpy as np
import matplotlib.pyplot as plt
import datetime
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
np.random.seed(seed=1)
for l in range(3):
imgs=[]
fig, axs = plt.subplots(1, 3, figsize=(9, 3))
for i in range(3):
newimg=np.trunc(np.random.rand(3,3,3)*255.0)
print("img: %d = %s" % (i, newimg))
#np.random.seed(seed=1)
imgs.append(newimg)
axs[i].imshow(imgs[i], interpolation="none")
axs[i].set_title("#%d" % (i))
plt.savefig("tmp/tnp_"+str(l)+"_"+TID+".png", bbox_inches="tight")
plt.show()
|
from dataclasses import dataclass
from typing import (
Dict,
Generic,
Iterable,
List,
NewType,
Optional,
Sequence,
TypeVar,
cast,
)
from eth2.beacon.db.abc import BaseBeaconChainDB
from eth2.beacon.epoch_processing_helpers import get_active_validator_indices
from eth2.beacon.fork_choice.abc import BaseForkChoice, BlockSink
from eth2.beacon.genesis import get_genesis_block
from eth2.beacon.helpers import compute_epoch_at_slot
from eth2.beacon.types.blocks import BaseBeaconBlock, BeaconBlock
from eth2.beacon.types.checkpoints import Checkpoint
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Epoch, Gwei, Root, Slot, ValidatorIndex, default_root
from eth2.configs import Eth2Config
# NOTE: copying `proto_array` implementation from:
# https://github.com/protolambda/eth2-py-hacks/proto_array.py
# Note: The Python implementation of Proto-array is an adaption of the Rust
# implementation by Sigma Prime (Apache 2.0). The Rust implementation is in
# turn an adaption of the original Proto-array work of @protolambda (licensed under MIT).
# However, as part of the Eth2 specification effort, and wider discussions
# with Eth2 implementers, the general idea of this implementation can be regarded as
# licensed under CC0 1.0 Universal, like the Eth2 specification.
ProtoNodeIndex = NewType("ProtoNodeIndex", int)
T = TypeVar("T")
class BlockNode(Generic[T]):
slot: Slot
root: Root
data: T
def __init__(self, slot: Slot, root: Root, data: T):
self.slot = slot
self.root = root
self.data = data
class ProtoNode(Generic[T]):
block: BlockNode[T]
parent: Optional[ProtoNodeIndex]
justified_epoch: Epoch
finalized_epoch: Epoch
weight: int
best_child: Optional[ProtoNodeIndex]
best_descendant: Optional[ProtoNodeIndex]
def __init__(
self,
block: BlockNode[T],
parent: Optional[ProtoNodeIndex],
justified_epoch: Epoch,
finalized_epoch: Epoch,
):
self.block = block
self.parent = parent
self.justified_epoch = justified_epoch
self.finalized_epoch = finalized_epoch
self.weight = 0
self.best_child = None
self.best_descendant = None
class ProtoArray(Generic[T]):
_block_sink: BlockSink
_index_offset: ProtoNodeIndex
_finalized_root: Root
_justified_epoch: Epoch
_finalized_epoch: Epoch
nodes: List[ProtoNode[T]]
indices: Dict[Root, ProtoNodeIndex]
def __init__(
self,
justified_epoch: Epoch,
finalized_block: BlockNode[T],
block_sink: BlockSink,
config: Eth2Config,
):
self._block_sink = block_sink
self._index_offset = ProtoNodeIndex(0)
self._justified_epoch = justified_epoch
finalized_epoch = compute_epoch_at_slot(
finalized_block.slot, config.SLOTS_PER_EPOCH
)
self._finalized_epoch = finalized_epoch
finalized_node = ProtoNode[T](
block=finalized_block,
parent=None,
justified_epoch=justified_epoch,
finalized_epoch=finalized_epoch,
)
self.nodes = [finalized_node]
self.indices = {finalized_block.root: ProtoNodeIndex(0)}
def _get_node(self, index: ProtoNodeIndex) -> ProtoNode[T]:
if index < self._index_offset:
raise IndexError(f"Minimum proto-array index is {self._index_offset}")
i = index - self._index_offset
if i > len(self.nodes):
raise IndexError(
f"Maximum proto-array index is {self._index_offset + len(self.nodes)}"
)
return self.nodes[i]
def canonical_chain(self, anchor_root: Root) -> Iterable[BlockNode[T]]:
"""From head back to anchor root (including the anchor itself)"""
index: Optional[ProtoNodeIndex] = self.indices[
self.find_head(anchor_root).root
] # KeyError if unknown root
while index is not None and index >= self._index_offset:
node = self._get_node(index)
yield node.block
if node.block.root == anchor_root:
break
index = node.parent
def apply_score_changes(
self, deltas: Iterable[int], justified_epoch: Epoch, finalized_epoch: Epoch
) -> None:
"""
Iterate backwards through the array, touching all nodes and their parents and potentially
the best-child of each parent.
The structure of the `self.nodes` array ensures that the child of each node is always
touched before its parent.
For each node, the following is done:
- Update the node's weight with the corresponding delta (can be negative).
- Back-propagate each node's delta to its parents delta.
- Compare the current node with the parents best-child, updating it if the current node
should become the best child.
- If required, update the parents best-descendant
with the current node or its best-descendant.
"""
deltas = list(deltas) # Copy, during back-prop the contents are mutated.
assert len(deltas) == len(self.nodes) == len(self.indices)
if (
justified_epoch != self._justified_epoch
or finalized_epoch != self._finalized_epoch
):
self._justified_epoch = justified_epoch
self._finalized_epoch = finalized_epoch
# Iterate backwards through all indices in `self.nodes`.
min_bound = self._index_offset - 1
max_bound = min_bound + len(self.nodes)
for node_index, node in zip(
range(max_bound, min_bound, -1), reversed(self.nodes)
):
node_delta = deltas[node_index - self._index_offset]
# Apply the delta to the node.
node.weight = node.weight + node_delta
# If the node has a parent, try to update its best-child and best-descendant.
if node.parent is not None and node.parent >= self._index_offset:
# Back-propagate the nodes delta to its parent.
parent_index = node.parent - self._index_offset
deltas[parent_index] += node_delta
self._maybe_update_best_child_and_descendant(
node.parent, ProtoNodeIndex(node_index)
)
def on_block(
self,
block: BlockNode[T],
parent_root: Root,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> None:
"""
Register a block with the fork choice.
It is only sane to supply a `None` parent for the genesis block.
"""
# If the block is already known, simply ignore it.
if block.root in self.indices:
return
node_index = ProtoNodeIndex(self._index_offset + len(self.nodes))
# NOTE: if the parent root is missing, then we take the convention that the parent
# is the genesis block. This convention handles the alias of the "empty" root as
# a block root for the genesis block.
if parent_root in self.indices:
parent_index = self.indices[parent_root]
else:
parent_index = self.indices.get(default_root, None)
node = ProtoNode[T](block, parent_index, justified_epoch, finalized_epoch)
self.indices[block.root] = node_index
self.nodes.append(node)
if node.parent is not None:
self._maybe_update_best_child_and_descendant(node.parent, node_index)
def find_head(self, anchor_root: Root) -> BlockNode[T]:
"""
Finds the head, starting from the anchor_root
subtree. (justified_root for regular fork-choice)
Follows the best-descendant links to find the best-block (i.e., head-block).
The result of this function is not guaranteed to be accurate if `on_block` has
been called without a subsequent `apply_score_changes` call. This is because
`on_block` does not attempt to walk backwards through the tree and update the
best-child/best-descendant links.
"""
anchor_index = self.indices.get(anchor_root) # Key error if not there
anchor_node = self._get_node(anchor_index)
best_descendant_index = anchor_node.best_descendant
if best_descendant_index is None:
best_descendant_index = anchor_index
best_node = self._get_node(best_descendant_index)
# Perform a sanity check that the node is indeed valid to be the head.
assert self._node_is_viable_for_head(best_node)
return best_node.block
def on_prune(self, anchor_root: Root) -> None:
"""
Update the tree with new finalization information (or alternatively another trusted root)
"""
anchor_index = self.indices[anchor_root] # KeyError if unknown root
if anchor_index == self._index_offset:
return # nothing to do
assert anchor_index > self._index_offset
best_index = self.indices[self.find_head(anchor_root).root]
# Remove the `self.indices` key/values for all the to-be-deleted nodes.
# And send the nodes to the block sink.
for idx, node in zip(range(self._index_offset, anchor_index), self.nodes):
canonical = node.best_descendant == best_index
self._block_sink.on_pruned_block(
_block_node_to_block(node.block), canonical
)
root = self.nodes[idx - self._index_offset].block.root
del self.indices[root]
# Drop all the nodes prior to finalization.
prune_index = anchor_index - self._index_offset
self.nodes = list(self.nodes[prune_index:])
# update offset
self._index_offset = anchor_index
def _maybe_update_best_child_and_descendant(
self, parent_index: ProtoNodeIndex, child_index: ProtoNodeIndex
) -> None:
"""
Observe the parent at `parent_index` with respect to the child at `child_index` and
potentially modify the `parent.best_child` and `parent.best_descendant` values.
There are four outcomes:
- The child is already the best child but it's now invalid due
to a FFG change and should be removed.
- The child is already the best child and the parent is updated
with the new best-descendant.
- The child is not the best child but becomes the best child.
- The child is not the best child and does not become the best child.
"""
child = self._get_node(child_index)
parent = self._get_node(parent_index)
child_leads_to_viable_head = self._node_leads_to_viable_head(child)
# The three options that we may set the `parent.best_child` and `parent.best_descendant` to.
def change_to_none() -> None:
parent.best_child = None
parent.best_descendant = None
def change_to_child() -> None:
parent.best_child = child_index
if child.best_descendant is None:
parent.best_descendant = child_index
else:
parent.best_descendant = child.best_descendant
def no_change() -> None:
pass
if parent.best_child is not None:
if parent.best_child == child_index:
if not child_leads_to_viable_head:
# If the child is already the best-child of the parent
# but it's not viable for the head, remove it.
change_to_none()
else:
# If the child is the best-child already, set it again to ensure that the
# best-descendant of the parent is updated.
change_to_child()
else:
best_child = self._get_node(parent.best_child)
best_child_leads_to_viable_head = self._node_leads_to_viable_head(
best_child
)
if child_leads_to_viable_head and (not best_child_leads_to_viable_head):
# The child leads to a viable head, but the current best-child doesn't.
change_to_child()
elif (
not child_leads_to_viable_head
) and best_child_leads_to_viable_head:
# The best child leads to a viable head, but the child doesn't.
no_change()
elif child.weight == best_child.weight:
# Tie-breaker of equal weights by root.
if child.block.root >= best_child.block.root:
change_to_child()
else:
no_change()
else:
# Choose the winner by weight.
if child.weight >= best_child.weight:
change_to_child()
else:
no_change()
else:
if child_leads_to_viable_head:
# There is no current best-child and the child is viable.
change_to_child()
else:
# There is no current best-child but the child is not viable.
no_change()
def _node_leads_to_viable_head(self, node: ProtoNode[T]) -> bool:
"""Indicates if the node itself is viable for the head,
or if it's best descendant is viable for the head."""
if node.best_descendant is not None:
best_descendant = self._get_node(node.best_descendant)
return self._node_is_viable_for_head(best_descendant)
else:
return self._node_is_viable_for_head(node)
def _node_is_viable_for_head(self, node: ProtoNode[T]) -> bool:
"""
This is the equivalent to the `filter_block_tree` function in the eth2 spec:
https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#filter_block_tree
Any node that has a different finalized or
justified epoch should not be viable for the head.
"""
return (
node.justified_epoch == self._justified_epoch or self._justified_epoch == 0
) and (
node.finalized_epoch == self._finalized_epoch or self._finalized_epoch == 0
)
@dataclass
class VoteTracker:
current_root: Root
next_root: Root
next_epoch: Epoch
class ProtoArrayForkChoice(Generic[T]):
proto_array: ProtoArray[T]
votes: List[VoteTracker]
balances: Sequence[Gwei]
justified: Checkpoint
finalized: Checkpoint
def __init__(
self,
finalized_block: BlockNode[T],
finalized: Checkpoint,
justified: Checkpoint,
block_sink: BlockSink,
config: Eth2Config,
):
finalized_epoch = compute_epoch_at_slot(
finalized_block.slot, config.SLOTS_PER_EPOCH
)
assert finalized_epoch == finalized.epoch
self.proto_array = ProtoArray(
justified.epoch, finalized_block, block_sink, config
)
self.balances = []
self.votes = []
def on_prune(self, anchor_root: Root) -> None:
self.proto_array.on_prune(anchor_root)
def get_canonical_chain(self, anchor_root: Root) -> Iterable[BlockNode[T]]:
self._reconcile_changes()
for block in self.proto_array.canonical_chain(anchor_root):
yield block
def process_attestation(
self, validator_index: ValidatorIndex, block_root: Root, target_epoch: Epoch
) -> None:
if validator_index >= len(self.votes):
self.votes.extend(
[
VoteTracker(default_root, default_root, Epoch(0))
for _ in range(validator_index - len(self.votes) + 1)
]
)
vote = self.votes[validator_index]
if target_epoch > vote.next_epoch:
vote.next_root = block_root
vote.next_epoch = target_epoch
def process_block(
self,
block: BlockNode[T],
parent_root: Root,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> None:
self.proto_array.on_block(block, parent_root, justified_epoch, finalized_epoch)
def update_justified(
self,
justified: Checkpoint,
finalized: Checkpoint,
justified_state_balances: Sequence[Gwei],
) -> None:
old_balances = self.balances
new_balances = justified_state_balances
deltas = _compute_deltas(
self.proto_array.indices,
self.proto_array._index_offset,
self.votes,
old_balances,
new_balances,
)
self.proto_array.apply_score_changes(deltas, justified.epoch, finalized.epoch)
self.balances = new_balances
self.justified = justified
self.finalized = finalized
def _reconcile_changes(self) -> None:
"""
NOTE: we call ``apply_score_changes``, see comment under ``ProtoArray.find_head``.
This should be called before reading the canonical chain.
"""
old_balances = self.balances
new_balances = old_balances
deltas = _compute_deltas(
self.proto_array.indices,
self.proto_array._index_offset,
self.votes,
old_balances,
new_balances,
)
self.proto_array.apply_score_changes(
deltas, self.justified.epoch, self.finalized.epoch
)
def find_head(self) -> BlockNode[T]:
self._reconcile_changes()
# NOTE: can skip some work by starting from justified, rather than finalized head
return self.proto_array.find_head(self.justified.root)
def _compute_deltas(
indices: Dict[Root, ProtoNodeIndex],
index_offset: int,
votes: List[VoteTracker],
old_balances: Sequence[Gwei],
new_balances: Sequence[Gwei],
) -> Sequence[int]:
"""
Returns a list of `deltas`, where there is one delta for each of the ProtoArray nodes.
The deltas are calculated between `old_balances` and `new_balances`, and/or a change of vote.
"""
deltas = [0] * len(indices)
for val_index, vote in enumerate(votes):
# There is no need to create a score change
# if the validator has never voted (may not be active)
# or both their votes are for the zero hash (alias to the genesis block).
if vote.current_root == default_root and vote.next_root == default_root:
continue
# Validator sets may have different sizes (but attesters are not different,
# activation only under finality)
old_balance = old_balances[val_index] if val_index < len(old_balances) else 0
new_balance = new_balances[val_index] if val_index < len(new_balances) else 0
if vote.current_root != vote.next_root or old_balance != new_balance:
# Ignore the current or next vote if it is not known in `indices`.
# We assume that it is outside of our tree (i.e., pre-finalization)
# and therefore not interesting.
if vote.current_root in indices:
deltas[indices[vote.current_root] - index_offset] -= old_balance
if vote.next_root in indices:
deltas[indices[vote.next_root] - index_offset] += new_balance
vote.current_root = vote.next_root
return deltas
def _block_node_to_block(node: BlockNode[T]) -> BaseBeaconBlock:
return cast(BaseBeaconBlock, node.data)
def _block_to_block_node(block: BaseBeaconBlock) -> BlockNode[BaseBeaconBlock]:
return BlockNode(block.slot, block.hash_tree_root, block)
class LMDGHOSTForkChoice(BaseForkChoice):
def __init__(
self,
finalized_block_node: BlockNode[BaseBeaconBlock],
finalized_state: BeaconState,
config: Eth2Config,
block_sink: BlockSink,
) -> None:
self._config = config
self._impl = ProtoArrayForkChoice(
finalized_block_node,
finalized_state.finalized_checkpoint,
finalized_state.current_justified_checkpoint,
block_sink,
config,
)
self.update_justified(finalized_state)
@classmethod
def from_genesis(
cls, genesis_state: BeaconState, config: Eth2Config, block_sink: BlockSink
) -> "LMDGHOSTForkChoice":
# NOTE: patch up genesis state to reflect the genesis block as an initial checkpoint
# this only has to be patched once at genesis
genesis_block = get_genesis_block(genesis_state.hash_tree_root, BeaconBlock)
genesis_block_node = BlockNode(genesis_block.slot, default_root, genesis_block)
return cls(genesis_block_node, genesis_state, config, block_sink)
@classmethod
def from_db(
cls, chain_db: BaseBeaconChainDB, config: Eth2Config, block_sink: BlockSink
) -> "LMDGHOSTForkChoice":
finalized_head = chain_db.get_finalized_head(BeaconBlock)
finalized_state = chain_db.get_state_by_root(
finalized_head.state_root, BeaconState
)
finalized_head_node = _block_to_block_node(finalized_head)
# TODO: need genesis patch up here as well....
return cls(finalized_head_node, finalized_state, config, block_sink)
def update_justified(self, state: BeaconState) -> None:
"""
Call when a new ``state`` is justified.
"""
self._justified = state.current_justified_checkpoint
self._finalized = state.finalized_checkpoint
# NOTE: prune before updating justified as it touches some internal state...
self._impl.on_prune(self._finalized.root)
current_epoch = state.current_epoch(self._config.SLOTS_PER_EPOCH)
balances = tuple(
state.validators[i].effective_balance
for i in get_active_validator_indices(state.validators, current_epoch)
)
self._impl.update_justified(self._justified, self._finalized, balances)
def get_canonical_chain(self) -> Iterable[BaseBeaconBlock]:
for block_node in self._impl.get_canonical_chain(self._finalized.root):
yield _block_node_to_block(block_node)
def on_block(self, block: BaseBeaconBlock) -> None:
"""
NOTE: assumes that only ``block``s are supplied to this method
if their parent has already been registered.
Otherwise, the way this module handles the genesis alias may break things.
Refer to ``ProtoArray.on_block`` for more information.
"""
self._impl.process_block(
_block_to_block_node(block),
block.parent_root,
self._justified.epoch,
self._finalized.epoch,
)
def on_attestation(
self, block_root: Root, target_epoch: Epoch, *indices: ValidatorIndex
) -> None:
for validator_index in indices:
self._impl.process_attestation(validator_index, block_root, target_epoch)
def find_head(self) -> BaseBeaconBlock:
node = self._impl.find_head()
return _block_node_to_block(node)
|
# Generated by Django 3.0.3 on 2020-03-21 11:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sevapp', '0012_poff'),
]
operations = [
migrations.CreateModel(
name='Candidate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='profile')),
('election', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sevapp.Election')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='sevapp.Entry')),
],
),
]
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/7/13 17:50
@Author : QDY
@FileName: 60. 第k个排列.py
给出集合 [1,2,3,…,n],其所有元素共有 n! 种排列。
按大小顺序列出所有排列情况,并一一标记,当 n = 3 时, 所有排列如下:
"123"
"132"
"213"
"231"
"312"
"321"
给定 n 和 k,返回第 k 个排列。
说明:
给定 n 的范围是 [1, 9]。
给定 k 的范围是[1, n!]。
示例 1:
输入: n = 3, k = 3
输出: "213"
示例 2:
输入: n = 4, k = 9
输出: "2314"
"""
import math
class Solution:
def getPermutation(self, n: int, k: int) -> str:
# nums = [str(i) for i in range(1,n+1)]
# self.cnt,self.res = 0,''
# def dfs(arr,tmp_nums):
# if not tmp_nums:
# self.cnt += 1
# if self.cnt == k:
# self.res = arr
# return
# for i in range(len(tmp_nums)):
# dfs(arr+tmp_nums[i],tmp_nums[:i]+tmp_nums[i+1:])
# if self.res:
# return
# dfs('',nums)
# return self.res
nums = [str(i) for i in range(1, n + 1)]
res = ''
while k > 1: # 当k==1时,res + nums剩余的字符按顺序排列就为所求
cnt = math.factorial(len(nums) - 1)
for i in range(len(nums)): # 当前位置为nums[i],后面有cnt种排列
if k > cnt: # k>cnt,说明第k个不在这cnt种排列种
k -= cnt #
else: # k<=cnt 说明 第k个在以当前位置为nums[i]的cnt个排列种
res += nums[i]
nums.pop(i) # 从nums种删除这个字符
break # 跳出循环,寻找下一个位置是哪个字符
return res + ''.join(nums)
|
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import *
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
MuonMiniAOD = DQMEDAnalyzer('MuonMiniAOD',
MuonServiceProxy,
MuonCollection = cms.InputTag("slimmedMuons"),
VertexLabel = cms.InputTag("offlineSlimmedPrimaryVertices"),
BeamSpotLabel = cms.InputTag("offlineBeamSpot"),
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate, get_last_day, add_days
from verp.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt
from verp.assets.doctype.asset_maintenance.asset_maintenance import calculate_next_due_date
class TestAssetMaintenance(unittest.TestCase):
def setUp(self):
set_depreciation_settings_in_company()
create_asset_data()
create_maintenance_team()
def test_create_asset_maintenance(self):
pr = make_purchase_receipt(item_code="Photocopier",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset_doc = frappe.get_doc('Asset', asset_name)
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset_doc.available_for_use_date = purchase_date
asset_doc.purchase_date = purchase_date
asset_doc.calculate_depreciation = 1
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.save()
if not frappe.db.exists("Asset Maintenance", "Photocopier"):
asset_maintenance = frappe.get_doc({
"doctype": "Asset Maintenance",
"asset_name": "Photocopier",
"maintenance_team": "Team Awesome",
"company": "_Test Company",
"asset_maintenance_tasks": get_maintenance_tasks()
}).insert()
next_due_date = calculate_next_due_date(nowdate(), "Monthly")
self.assertEqual(asset_maintenance.asset_maintenance_tasks[0].next_due_date, next_due_date)
def test_create_asset_maintenance_log(self):
if not frappe.db.exists("Asset Maintenance Log", "Photocopier"):
asset_maintenance_log = frappe.get_doc({
"doctype": "Asset Maintenance Log",
"asset_maintenance": "Photocopier",
"task": "Change Oil",
"completion_date": add_days(nowdate(), 2),
"maintenance_status": "Completed"
}).insert()
asset_maintenance = frappe.get_doc("Asset Maintenance", "Photocopier")
next_due_date = calculate_next_due_date(asset_maintenance_log.completion_date, "Monthly")
self.assertEqual(asset_maintenance.asset_maintenance_tasks[0].next_due_date, next_due_date)
def create_asset_data():
if not frappe.db.exists("Asset Category", "Equipment"):
create_asset_category()
if not frappe.db.exists("Location", "Test Location"):
frappe.get_doc({
'doctype': 'Location',
'location_name': 'Test Location'
}).insert()
if not frappe.db.exists("Item", "Photocopier"):
meta = frappe.get_meta('Asset')
naming_series = meta.get_field("naming_series").options
frappe.get_doc({
"doctype": "Item",
"item_code": "Photocopier",
"item_name": "Photocopier",
"item_group": "All Item Groups",
"company": "_Test Company",
"is_fixed_asset": 1,
"is_stock_item": 0,
"asset_category": "Equipment",
"auto_create_assets": 1,
"asset_naming_series": naming_series
}).insert()
def create_maintenance_team():
user_list = ["marcus@abc.com", "thalia@abc.com", "mathias@abc.com"]
if not frappe.db.exists("Role", "Technician"):
frappe.get_doc({"doctype": "Role", "role_name": "Technician"}).insert()
for user in user_list:
if not frappe.db.get_value("User", user):
frappe.get_doc({
"doctype": "User",
"email": user,
"first_name": user,
"new_password": "password",
"roles": [{"doctype": "Has Role", "role": "Technician"}]
}).insert()
if not frappe.db.exists("Asset Maintenance Team", "Team Awesome"):
frappe.get_doc({
"doctype": "Asset Maintenance Team",
"maintenance_manager": "marcus@abc.com",
"maintenance_team_name": "Team Awesome",
"company": "_Test Company",
"maintenance_team_members": get_maintenance_team(user_list)
}).insert()
def get_maintenance_team(user_list):
return [{"team_member": user,
"full_name": user,
"maintenance_role": "Technician"
}
for user in user_list[1:]]
def get_maintenance_tasks():
return [{"maintenance_task": "Change Oil",
"start_date": nowdate(),
"periodicity": "Monthly",
"maintenance_type": "Preventive Maintenance",
"maintenance_status": "Planned",
"assign_to": "marcus@abc.com"
},
{"maintenance_task": "Check Gears",
"start_date": nowdate(),
"periodicity": "Yearly",
"maintenance_type": "Calibration",
"maintenance_status": "Planned",
"assign_to": "thalia@abc.com"
}
]
def create_asset_category():
asset_category = frappe.new_doc("Asset Category")
asset_category.asset_category_name = "Equipment"
asset_category.total_number_of_depreciations = 3
asset_category.frequency_of_depreciation = 3
asset_category.append("accounts", {
"company_name": "_Test Company",
"fixed_asset_account": "_Test Fixed Asset - _TC",
"accumulated_depreciation_account": "_Test Accumulated Depreciations - _TC",
"depreciation_expense_account": "_Test Depreciations - _TC"
})
asset_category.insert()
def set_depreciation_settings_in_company():
company = frappe.get_doc("Company", "_Test Company")
company.accumulated_depreciation_account = "_Test Accumulated Depreciations - _TC"
company.depreciation_expense_account = "_Test Depreciations - _TC"
company.disposal_account = "_Test Gain/Loss on Asset Disposal - _TC"
company.depreciation_cost_center = "_Test Cost Center - _TC"
company.save()
# Enable booking asset depreciation entry automatically
frappe.db.set_value("Accounts Settings", None, "book_asset_depreciation_entry_automatically", 1) |
import os
import webapp2
from google.appengine.ext.webapp import template
class RefreshPageHandler(webapp2.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'app/index.html')
self.response.out.write(template.render(path, {}))
APP = webapp2.WSGIApplication([
('/about', RefreshPageHandler),
('/contact', RefreshPageHandler),
('/', RefreshPageHandler),
], debug=True) |
from distutils.core import setup
import py2exe
import os
# custom modules
from modules import *
py2exe_options = dict(
compressed=True, # Compress library.zip
optimize = 2,
dist_dir = 'DicoGIS'
)
setup(name="DicoGIS",
version="1.6",
description=u"Dynamic dictionary of geographic datas",
author="Julien Moura",
url = "https://github.com/Guts/DicoGIS",
license="license GPL v3.0",
data_files = [("locale", ["locale/lang_EN.xml",
"locale/lang_ES.xml",
"locale/lang_FR.xml"]),
("", ["settings.xml"]),
("", ["DicoGIS.ico"]),
("img", ["img/DicoGIS_logo.gif"]),
("doc",["documentation/DicoGIS_Manual_ES.pdf",
"documentation/README.html",
"documentation/DicoGIS_TechnicalDetails.htm"])],
options={'py2exe': py2exe_options},
windows = [
{
"script": "DicoGIS.py", # script
"icon_resources": [(1, "DicoGIS.ico")] # Icone
}
]
)
|
import numpy as np
import random
from scipy.stats import t
# initial settings
initial_seed = 2502
confidence_level = 0.95
runs = 5 # number of runs
debug=False
# create list of inputs
input_list=[]
for i in (2,3,4,5):
a=[x*10**i for x in (1,2,4,8)]
input_list.extend(a)
input_list.extend([1000000])
# print input parameters
print("*** INITIAL SETTINGS ***")
print("Bins/Balls number for the simulation:")
print(input_list)
print("Initial seed",initial_seed)
print("Confidence level",confidence_level)
print("Number of runs",runs)
print("*** END INITIAL SETTINGS ***")
# function to compute confidence intervals
def evaluate_conf_interval(x):
# x is list of all the experimental rules, one for each run
t_sh = t.ppf((confidence_level + 1) / 2, df=runs - 1) # threshold for t_student
ave = x.mean() # average
stddev = x.std(ddof=1) # std dev
ci = t_sh * stddev / np.sqrt(runs) # confidence interval half width
rel_err = ci / ave # relative error
if debug:
print("Min", x.min(), "Ave", ave, "Max", x.max())
print("Confidence interval:", "{:.2f}".format(ave - ci), end=" ")
print(ave, end=" ")
print("{:.2f}".format(ave + ci), end=" ")
print("Delta", "{:.2f}".format(2 * ci), end=" ")
print("Relative error: {:.2f}".format(rel_err))
return ave, ci, rel_err
def run_simulator(n): # run the bins-and-ball model for n bins and for multiple runs
random.seed(a=initial_seed) # reset initial seed
maxvec = np.full(runs, 0) # init vector for the maximum for each run
for r in range(runs): # for each run
bins = np.full(n, 0) # bins[i] is the occupancy of bin i; start from empty bins
for i in range(n): # for each ball
bins[random.randint(0, n - 1)] += 1 # drop ball randomly and update bins
maxvec[r] = bins.max() # compute the max occupancy
ave, ci, rel_err = evaluate_conf_interval(maxvec) # evaluate the confidence intervals
lower_bound = np.log(n) / np.log(np.log(n)) # theoretical formula
# print("Lower bound {:.2f}".format(lower_bound), " Upper bound {:.2f}".format(3 * lower_bound))
return n, lower_bound, 3 * lower_bound, ave - ci, ave, ave + ci, rel_err
#########################
# main simulation engine
#########################
# open the outfile file and write the header
datafile = open("binsballs.dat", "w")
print("# n\tLowerbound\t3*Lowerbound\tciLow\tave\tciHigh\tRelErr",file=datafile)
for n in input_list: # for each number of bins and balls
print("Running for n=",n) # log starting a run
out_run=run_simulator(n) # get the output results of a run
print(*out_run,sep="\t",file=datafile) # write on a file
datafile.close() # close the file
|
import argparse
import os
import pathlib
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from data_objs import model_results
parser=argparse.ArgumentParser()
parser.add_argument('--workingDir', action = 'store', dest = 'working_dir')
parser.add_argument('--inputFile', action = 'store', dest = 'input_file')
parser.add_argument('--labFile', action = 'store', dest='lab_file')
parser.add_argument('--nproc', action = 'store', type=int, default=2)
parser.add_argument('--outputDir', action = 'store', dest='output_dir')
args=parser.parse_args()
os.chdir(args.working_dir)
class SklDataObj:
def __init__(self,x_file_name, lab_file):
fn=x_file_name.split('/')[-1]
dm=int(fn.split('_')[1].split('-')[1])
wc=int(fn.split('_')[2].split('-')[1])
kmer_size=int(fn.split('_')[3].split('-')[1])
edim=int(fn.split('_')[4].split('-')[1].split('.')[0] )
data_name=f'dm-{dm}_wc-{wc}_kmer-{str(kmer_size)}_edim-{str(edim)}'
X_df=pd.read_csv(x_file_name,names=['embl_id']+ list(range(edim)))
labs=pd.read_csv(lab_file ).rename({'family_id' : 'target_label'}, axis =1)
X_df_labeled=pd.merge(left=labs, how='inner', right=X_df, left_on='embl_id', right_on='embl_id')
X_data=np.asarray(X_df_labeled.iloc[:,3:])#drop the first 3 columns
labs = X_df_labeled.iloc[:,:3]
Y_vec=np.asarray(X_df_labeled['target_label'])
X_train, X_test, train_labs, test_labs =train_test_split(X_data,labs,test_size=.2,
random_state=42,stratify=Y_vec)
self.X_train=X_train
self.Y_train=np.asarray(train_labs.iloc[:,2])
self.train_labs = train_labs
self.X_test=X_test
self.Y_test=np.asarray(test_labs.iloc[:,2])
self.test_labs = test_labs
self.name=data_name
self.model=None
def summary(self):
tr_len=len(self.X_train)
ts_len=len(self.X_test)
print(f'Training size: {tr_len}\nClass Counts:')
print(self.train_labs.protein_families.value_counts())
print(f'Test size: {ts_len}\nClass Counts:')
print(self.test_labs.protein_families.value_counts())
def run_model(self, model, model_name, outdir):
model.fit(self.X_train, self.Y_train)
self.model=model
Y_pred_class = model.predict(self.X_test)
Y_pred_prob = model.predict_proba(self.X_test)
Y_true = self.Y_test
model_results( Y_true, Y_pred_class, Y_pred_prob, f'{self.name}_{model_name}', outdir)
#return model_res_line
outdir = args.output_dir
if outdir[-1] is not '/':
outdir+='/'
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
with open(outdir+'model_results.csv', 'w+') as model_res_file:
data_obj = SklDataObj(args.input_file, args.lab_file)
data_obj.summary()
rf_model = RandomForestClassifier(n_estimators=100, random_state=32, n_jobs=args.nproc)
data_obj.run_model(rf_model, 'random_forest', outdir)
|
import random
palavras = 'hamburguer','suco','pizza','pudim'
print('='*60)
escolha = str(random.choice(palavras))
espacos= len((escolha))
print(espacos*'-')
print(escolha)
erro=0
acerto=0
letra=''
while erro<5:
letra=(str(input('Qual letra você escolhe ? : ')))
if letra in escolha:
print(f'Na palavra há a letra {letra} na posição {escolha.index(letra)+1}')
if acerto==len(escolha):
print(f'Você acertou a palavra era {escolha}')
else:
erro+=1
if erro<5:
print('Tente novamente')
else:
if erro>=5:
print('Voce perdeu com as 5 chances')
acerto = +1
print(acerto)
'''
for pos in escolha:
print(f'\nNa palavra {pos.upper()} temos', end =" ")
for letra in pos:
if letra.lower() in 'aeiou':
print(letra, end = ' ')''' |
# @generated from torch\_C\_VariableFunctions.pyi.in
from torch import Tensor, Generator, strided, memory_format, contiguous_format
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, TypeVar
from torch._six import inf
from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout
import builtins
# REDUNDANT!
namedtuple_values_indices = NamedTuple("namedtuple_values_indices", [("values", Tensor), ("indices", Tensor)])
namedtuple_eigenvalues_eigenvectors = NamedTuple("namedtuple_eigenvalues_eigenvectors", [("eigenvalues", Tensor), ("eigenvectors", Tensor)])
namedtuple_a_tau = NamedTuple("namedtuple_a_tau", [("a", Tensor), ("tau", Tensor)])
namedtuple_solution_QR = NamedTuple("namedtuple_solution_QR", [("solution", Tensor), ("QR", Tensor)])
namedtuple_Q_R = NamedTuple("namedtuple_Q_R", [("Q", Tensor), ("R", Tensor)])
namedtuple_sign_logabsdet = NamedTuple("namedtuple_sign_logabsdet", [("sign", Tensor), ("logabsdet", Tensor)])
namedtuple_solution_LU = NamedTuple("namedtuple_solution_LU", [("solution", Tensor), ("LU", Tensor)])
namedtuple_U_S_V = NamedTuple("namedtuple_U_S_V", [("U", Tensor), ("S", Tensor), ("V", Tensor)])
namedtuple_solution_cloned_coefficient = NamedTuple("namedtuple_solution_cloned_coefficient", [("solution", Tensor), ("cloned_coefficient", Tensor)])
@overload
def __and__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __and__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
def _adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
def _add_relu(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
def _add_relu_(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
def _addmv_impl_(input: Tensor, self2: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
@overload
def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
@overload
def _aminmax(input: Tensor, dim: _int, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _amp_non_finite_check_and_unscale_(input: Tensor, found_inf: Tensor, inv_scale: Tensor) -> None: ...
def _amp_update_scale(growth_tracker: Tensor, current_scale: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
def _baddbmm_mkl_(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
def _bmm(input: Tensor, mat2: Tensor, *, deterministic: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
def _cast_Byte(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Char(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Double(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Float(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Half(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Int(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Long(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Short(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool=False) -> Tuple[_float, _int]: ...
def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def _conj(input: Tensor) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
def _convolution_nogroup(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size) -> Tensor: ...
def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: _int, mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
def _cufft_clear_plan_cache(device_index: _int) -> None: ...
def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _debug_has_internal_overlap(input: Tensor) -> _int: ...
def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def _empty_affine_quantized(size: _size, *, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def _empty_affine_quantized(*size: _int, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(size: _size, *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: ...
@overload
def _fft_with_size(input: Tensor, signal_ndim: _int, complex_input: _bool, complex_output: _bool, inverse: _bool, checked_signal_sizes: _size, normalized: _bool, onesided: _bool, output_sizes: _size) -> Tensor: ...
@overload
def _fft_with_size(input: Tensor, signal_ndim: _int, complex_input: _bool, complex_output: _bool, inverse: _bool, checked_signal_sizes: _size, normalization: _int, onesided: _bool, output_sizes: _size) -> Tensor: ...
@overload
def _foreach_add(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_add(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
def _foreach_add_scalar_list(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_add_scalar_list_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
def _foreach_addcdiv(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
def _foreach_addcmul(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
@overload
def _foreach_div(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_div(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_div_scalar_list(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_div_scalar_list_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
def _foreach_exp(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_mul(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_mul_scalar_list(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_mul_scalar_list_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
def _foreach_sqrt(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_sub(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_sub(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
def _foreach_sub_scalar_list(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sub_scalar_list_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator]=None) -> Tuple[Tensor, Tensor]: ...
def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
def _index_copy_(input: Tensor, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False, unsafe: _bool=False) -> Tensor: ...
def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _lu_solve_helper(input: Tensor, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: ...
def _lu_with_info(input: Tensor, pivot: _bool=True, check_errors: _bool=True) -> Tuple[Tensor, Tensor, Tensor]: ...
def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mode(input: Tensor, dim: _int=-1, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
def _multinomial_alias_draw(J: Tensor, q: Tensor, num_samples: _int, *, generator: Optional[Generator]=None) -> Tensor: ...
def _multinomial_alias_setup(probs: Tensor) -> Tuple[Tensor, Tensor]: ...
def _nnpack_available() -> _bool: ...
def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[_int, _size], stride: Union[_int, _size]=1) -> Tensor: ...
def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Number, total_length: _int) -> Tuple[Tensor, Tensor]: ...
def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
def _s_where(condition: Tensor, input: Tensor, other: Tensor) -> Tensor: ...
def _sample_dirichlet(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
def _shape_as_tensor(input: Tensor) -> Tensor: ...
def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
def _softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_addmm(input: Tensor, sparse: Tensor, dense: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_mm(sparse: Tensor, dense: Tensor) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
def _standard_gamma(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
def _std(input: Tensor, unbiased: _bool=True) -> Tensor: ...
def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Number=1) -> Tensor: ...
def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int=1) -> Tensor: ...
def _unique(input: Tensor, sorted: _bool=True, return_inverse: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _unique2(input: Tensor, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
def _use_cudnn_rnn_flatten_weight() -> _bool: ...
def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size) -> None: ...
def _var(input: Tensor, unbiased: _bool=True) -> Tensor: ...
def _weight_norm(v: Tensor, g: Tensor, dim: _int=0) -> Tensor: ...
def _weight_norm_cuda_interface(v: Tensor, g: Tensor, dim: _int=0) -> Tuple[Tensor, Tensor]: ...
def abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def abs_(input: Tensor) -> Tensor: ...
def absolute(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acos_(input: Tensor) -> Tensor: ...
def acosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acosh_(input: Tensor) -> Tensor: ...
def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
@overload
def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def add(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
@overload
def add(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
@overload
def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
@overload
def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor) -> Tensor: ...
@overload
def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
@overload
def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
@overload
def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor) -> Tensor: ...
@overload
def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: ...
@overload
def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
def affine_grid_generator(theta: Tensor, size: _size, align_corners: _bool) -> Tensor: ...
@overload
def all(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def all(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def allclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool: ...
def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def amax(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def amin(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def angle(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def any(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def any(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
@overload
def arange(start: Number, end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
@overload
def arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
def arccos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arccos_(input: Tensor) -> Tensor: ...
def arccosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arccosh_(input: Tensor) -> Tensor: ...
def arcsin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arcsin_(input: Tensor) -> Tensor: ...
def arcsinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arcsinh_(input: Tensor) -> Tensor: ...
def arctan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arctan_(input: Tensor) -> Tensor: ...
def arctanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def arctanh_(input: Tensor) -> Tensor: ...
def argmax(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
def argmin(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
@overload
def argsort(input: Tensor, dim: _int=-1, descending: _bool=False) -> Tensor: ...
@overload
def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False) -> Tensor: ...
def as_strided(input: Tensor, size: _size, stride: _size, storage_offset: Optional[_int]=None) -> Tensor: ...
def as_strided_(input: Tensor, size: _size, stride: _size, storage_offset: Optional[_int]=None) -> Tensor: ...
def as_tensor(data: Any, dtype: _dtype=None, device: Optional[_device]=None) -> Tensor: ...
def asin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def asin_(input: Tensor) -> Tensor: ...
def asinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def asinh_(input: Tensor) -> Tensor: ...
def atan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def atan_(input: Tensor) -> Tensor: ...
def atanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def atanh_(input: Tensor) -> Tensor: ...
def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, ceil_mode: _bool=False, count_include_pad: _bool=True) -> Tensor: ...
@overload
def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def bartlett_window(window_length: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def bartlett_window(window_length: _int, periodic: _bool, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], mean_dy: Tensor, mean_dy_xmu: Tensor) -> Tensor: ...
def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor]=None) -> Tensor: ...
def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
@overload
def bernoulli(input: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
def bincount(input: Tensor, weights: Optional[Tensor]=None, minlength: _int=0) -> Tensor: ...
def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
@overload
def bitwise_and(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def bitwise_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bitwise_or(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bitwise_xor(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def blackman_window(window_length: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def blackman_window(window_length: _int, periodic: _bool, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def bucketize(self: Number, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
def can_cast(from_: _dtype, to: _dtype) -> _bool: ...
@overload
def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
def ceil(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def ceil_(input: Tensor) -> Tensor: ...
def celu(input: Tensor, alpha: Number=1.0) -> Tensor: ...
def celu_(input: Tensor, alpha: Number=1.0) -> Tensor: ...
def channel_shuffle(input: Tensor, groups: _int) -> Tensor: ...
def cholesky(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def cholesky_inverse(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[_float, _float]: ...
def chunk(input: Tensor, chunks: _int, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def clamp(self, min: _float=-inf, max: _float=inf, *, out: Optional[Tensor]=None) -> Tensor: ...
def clamp_max(input: Tensor, max: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def clamp_max_(input: Tensor, max: Number) -> Tensor: ...
def clamp_min(input: Tensor, min: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def clamp_min_(input: Tensor, min: Number) -> Tensor: ...
def clip(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
def clip_(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
def clone(input: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
def combinations(input: Tensor, r: _int=2, with_replacement: _bool=False) -> Tensor: ...
def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def conj(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def constant_pad_nd(input: Tensor, pad: _size, value: Number=0) -> Tensor: ...
def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int=0) -> Tensor: ...
def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int) -> Tensor: ...
def cos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def cos_(input: Tensor) -> Tensor: ...
def cosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def cosh_(input: Tensor) -> Tensor: ...
def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int=1, eps: _float=1e-08) -> Tensor: ...
@overload
def count_nonzero(input: Tensor, dim: _size) -> Tensor: ...
@overload
def count_nonzero(input: Tensor, dim: Optional[_int]=None) -> Tensor: ...
def cross(input: Tensor, other: Tensor, dim: Optional[_int]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def cudnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
@overload
def cudnn_convolution(input: Tensor, weight: Tensor, padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
@overload
def cudnn_convolution(input: Tensor, weight: Tensor, padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
@overload
def cudnn_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
@overload
def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
@overload
def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
def cudnn_is_acceptable(input: Tensor) -> _bool: ...
@overload
def cummax(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def cummin(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
def deg2rad(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def deg2rad_(input: Tensor) -> Tensor: ...
@overload
def dequantize(input: Tensor) -> Tensor: ...
@overload
def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def det(input: Tensor) -> Tensor: ...
def detach(input: Tensor) -> Tensor: ...
def detach_(input: Tensor) -> Tensor: ...
def diag(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def diag_embed(input: Tensor, offset: _int=0, dim1: _int=-2, dim2: _int=-1) -> Tensor: ...
def diagflat(input: Tensor, offset: _int=0) -> Tensor: ...
@overload
def diagonal(input: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
@overload
def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int=0) -> Tensor: ...
def digamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def dist(input: Tensor, other: Tensor, p: Number=2) -> Tensor: ...
def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def divide(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
def eig(input: Tensor, eigenvectors: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_eigenvalues_eigenvectors: ...
def embedding(weight: Tensor, indices: Tensor, padding_idx: _int=-1, scale_grad_by_freq: _bool=False, sparse: _bool=False) -> Tensor: ...
def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
@overload
def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def empty(size: _size, *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def empty_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def empty_meta(size: _size, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def empty_meta(*size: _int, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def empty_quantized(size: _size, qtensor: Tensor) -> Tensor: ...
def empty_strided(size: _size, stride: _size, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def eq(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def equal(input: Tensor, other: Tensor) -> _bool: ...
def erf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def erf_(input: Tensor) -> Tensor: ...
def erfc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def erfc_(input: Tensor) -> Tensor: ...
def erfinv(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def exp(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def exp2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def exp2_(input: Tensor) -> Tensor: ...
def exp_(input: Tensor) -> Tensor: ...
def expm1(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def expm1_(input: Tensor) -> Tensor: ...
@overload
def eye(n: _int, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def eye(n: _int, m: _int, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
@overload
def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
@overload
def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def fft(input: Tensor, signal_ndim: _int, normalized: _bool=False) -> Tensor: ...
@overload
def fill_(input: Tensor, value: Number) -> Tensor: ...
@overload
def fill_(input: Tensor, value: Tensor) -> Tensor: ...
def fix(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def fix_(input: Tensor) -> Tensor: ...
@overload
def flatten(input: Tensor, start_dim: _int=0, end_dim: _int=-1) -> Tensor: ...
@overload
def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ...
@overload
def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
@overload
def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
def flip(input: Tensor, dims: _size) -> Tensor: ...
def fliplr(input: Tensor) -> Tensor: ...
def flipud(input: Tensor) -> Tensor: ...
def floor(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def floor_(input: Tensor) -> Tensor: ...
def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def fmod(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def frac(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def frac_(input: Tensor) -> Tensor: ...
@overload
def frobenius_norm(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def from_file(filename: str, shared: Optional[_bool]=None, size: Optional[_int]=0, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def from_numpy(ndarray) -> Tensor: ...
@overload
def full(size: _size, fill_value: Number, *, out: Optional[Tensor]=None, layout: _layout=strided, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
@overload
def full(size: _size, fill_value: Number, *, names: List[Union[str, None]], layout: _layout=strided, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
def full_like(input: Tensor, fill_value: Number, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def ge(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def geqrf(input: Tensor, *, out: Optional[Tensor]=None) -> namedtuple_a_tau: ...
def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def get_default_dtype() -> _dtype: ...
def get_num_interop_threads() -> _int: ...
def get_num_threads() -> _int: ...
@overload
def greater(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def greater_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enabled: _bool=True) -> Tensor: ...
@overload
def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
@overload
def gt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def hamming_window(window_length: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def hamming_window(window_length: _int, periodic: _bool, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def hann_window(window_length: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def hann_window(window_length: _int, periodic: _bool, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def hardshrink(input: Tensor, lambd: Number=0.5) -> Tensor: ...
def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def histc(input: Tensor, bins: _int=100, min: Number=0, max: Number=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def i0(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def i0_(input: Tensor) -> Tensor: ...
def ifft(input: Tensor, signal_ndim: _int, normalized: _bool=False) -> Tensor: ...
def imag(input: Tensor) -> Tensor: ...
@overload
def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
@overload
def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
@overload
def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
@overload
def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: _int, index: Tensor, value: Number) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
@overload
def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def init_num_threads() -> None: ...
def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
def int_repr(input: Tensor) -> Tensor: ...
def inverse(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def irfft(input: Tensor, signal_ndim: _int, normalized: _bool=False, onesided: _bool=True, signal_sizes: _size=()) -> Tensor: ...
def is_complex(input: Tensor) -> _bool: ...
def is_distributed(input: Tensor) -> _bool: ...
def is_floating_point(input: Tensor) -> _bool: ...
def is_grad_enabled() -> _bool: ...
def is_nonzero(input: Tensor) -> _bool: ...
def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
def is_signed(input: Tensor) -> _bool: ...
def is_vulkan_available() -> _bool: ...
def isclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> Tensor: ...
def isfinite(input: Tensor) -> Tensor: ...
def isinf(input: Tensor) -> Tensor: ...
def isnan(input: Tensor) -> Tensor: ...
def isneginf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def isposinf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def isreal(input: Tensor) -> Tensor: ...
@overload
def kaiser_window(window_length: _int, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def kaiser_window(window_length: _int, periodic: _bool, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def kthvalue(input: Tensor, k: _int, dim: _int=-1, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
def layer_norm(input: Tensor, normalized_shape: _size, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enable: _bool=True) -> Tensor: ...
def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def le(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def le(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def lerp(input: Tensor, end: Tensor, weight: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def less(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def less(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def less_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def lgamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def log(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def log10(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def log10_(input: Tensor) -> Tensor: ...
def log1p(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def log1p_(input: Tensor) -> Tensor: ...
def log2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def log2_(input: Tensor) -> Tensor: ...
def log_(input: Tensor) -> Tensor: ...
@overload
def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
def logdet(input: Tensor) -> Tensor: ...
def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def logical_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def logit(input: Tensor, eps: Optional[_float]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
def logit_(input: Tensor, eps: Optional[_float]=None) -> Tensor: ...
def logspace(start: Number, end: Number, steps: Optional[_int]=None, base: _float=10.0, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
@overload
def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
def lstsq(input: Tensor, A: Tensor, *, out: Optional[Tensor]=None) -> namedtuple_solution_QR: ...
@overload
def lt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def masked_fill(input: Tensor, mask: Tensor, value: Number) -> Tensor: ...
@overload
def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def matrix_exp(input: Tensor) -> Tensor: ...
def matrix_power(input: Tensor, n: _int) -> Tensor: ...
@overload
def matrix_rank(input: Tensor, tol: _float, symmetric: _bool=False) -> Tensor: ...
@overload
def matrix_rank(input: Tensor, symmetric: _bool=False) -> Tensor: ...
@overload
def max(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def max(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def max(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tuple[Tensor, Tensor]: ...
def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def mean(input: Tensor, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def mean(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def median(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def median(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def min(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def min(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def min(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
def mkldnn_convolution_backward_weights(weight_size: _size, grad_output: Tensor, input: Tensor, padding: _size, stride: _size, dilation: _size, groups: _int, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def mode(input: Tensor, dim: _int=-1, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: ...
@overload
def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: ...
def mul(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
def multinomial(input: Tensor, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def multiply(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def mvlgamma(input: Tensor, p: _int) -> Tensor: ...
@overload
def nanquantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nansum(input: Tensor, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nansum(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def narrow(input: Tensor, dim: _int, start: _int, length: _int) -> Tensor: ...
@overload
def narrow(input: Tensor, dim: _int, start: Tensor, length: _int) -> Tensor: ...
def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Optional[Tensor]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: _int, C: _int, HxW: _int, group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
def native_layer_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], M: _int, N: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
@overload
def native_norm(input: Tensor, p: Number=2) -> Tensor: ...
@overload
def native_norm(input: Tensor, p: Optional[Number], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
@overload
def ne(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def neg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def neg_(input: Tensor) -> Tensor: ...
def negative(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def negative_(input: Tensor) -> Tensor: ...
def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nonzero(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nonzero(input: Tensor, *, as_tuple: bool=...) -> Tensor: ...
def norm_except_dim(v: Tensor, pow: _int=2, dim: _int=0) -> Tensor: ...
@overload
def normal(mean: Tensor, std: _float=1, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def normal(mean: _float, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def normal(mean: _float, std: _float, size: _size, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def not_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nuclear_norm(input: Tensor, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def numel(self: Tensor) -> _int: ...
@overload
def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def ones(size: _size, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def ones(*size: _int, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def ones_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool=True, transpose: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def pairwise_distance(x1: Tensor, x2: Tensor, p: _float=2, eps: _float=1e-06, keepdim: _bool=False) -> Tensor: ...
def pdist(input: Tensor, p: _float=2) -> Tensor: ...
def pinverse(input: Tensor, rcond: _float=1e-15) -> Tensor: ...
def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
def poisson(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def pow(self: Number, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def pow(input: Tensor, exponent: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
@overload
def prod(input: Tensor, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def prod(input: Tensor, dim: _int, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
def promote_types(type1: _dtype, type2: _dtype) -> _dtype: ...
def q_per_channel_axis(input: Tensor) -> _int: ...
def q_per_channel_scales(input: Tensor) -> Tensor: ...
def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
def q_scale(input: Tensor) -> _float: ...
def q_zero_point(input: Tensor) -> _int: ...
def qr(input: Tensor, some: _bool=True, *, out: Optional[Tensor]=None) -> namedtuple_Q_R: ...
@overload
def quantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def quantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: ...
@overload
def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: ...
@overload
def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: ...
def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tuple[Tensor, Tensor]: ...
def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
def rad2deg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def rad2deg_(input: Tensor) -> Tensor: ...
@overload
def rand(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(size: _size, *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(size: _size, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(*size: _int, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(size: _size, *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def rand_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
@overload
def randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
@overload
def randint_like(input: Tensor, high: _int, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randint_like(input: Tensor, low: _int, high: _int, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(size: _size, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(*size: _int, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(size: _size, *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(size: _size, *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def randn_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randperm(n: _int, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def randperm(n: _int, *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def range(start: Number, end: Number, step: Number=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
def real(input: Tensor) -> Tensor: ...
def reciprocal(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def reciprocal_(input: Tensor) -> Tensor: ...
def relu(input: Tensor) -> Tensor: ...
def relu_(input: Tensor) -> Tensor: ...
@overload
def remainder(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def renorm(input: Tensor, p: Number, dim: _int, maxnorm: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def repeat_interleave(repeats: Tensor) -> Tensor: ...
@overload
def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int]=None) -> Tensor: ...
@overload
def repeat_interleave(input: Tensor, repeats: _int, dim: Optional[_int]=None) -> Tensor: ...
def reshape(input: Tensor, shape: _size) -> Tensor: ...
def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
@overload
def result_type(tensor: Tensor, other: Tensor) -> _dtype: ...
@overload
def result_type(tensor: Tensor, other: Number) -> _dtype: ...
@overload
def result_type(scalar: Number, tensor: Tensor) -> _dtype: ...
@overload
def result_type(scalar1: Number, scalar2: Number) -> _dtype: ...
def rfft(input: Tensor, signal_ndim: _int, normalized: _bool=False, onesided: _bool=True) -> Tensor: ...
@overload
def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
@overload
def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
def roll(input: Tensor, shifts: Union[_int, _size], dims: Union[_int, _size]=()) -> Tensor: ...
def rot90(input: Tensor, k: _int=1, dims: _size=(0,1)) -> Tensor: ...
def round(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def round_(input: Tensor) -> Tensor: ...
def rrelu(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
def rrelu_(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
def rsqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def rsqrt_(input: Tensor) -> Tensor: ...
@overload
def rsub(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
@overload
def rsub(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
def scalar_tensor(s: Number, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, value: Number) -> Tensor: ...
@overload
def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
@overload
def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
@overload
def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
@overload
def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
@overload
def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def searchsorted(sorted_sequence: Tensor, self: Number, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ...
@overload
def select(input: Tensor, dim: _int, index: _int) -> Tensor: ...
def selu(input: Tensor) -> Tensor: ...
def selu_(input: Tensor) -> Tensor: ...
def set_flush_denormal(mode: _bool) -> _bool: ...
def set_num_interop_threads(num: _int) -> None: ...
def set_num_threads(num: _int) -> None: ...
def sgn(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sigmoid_(input: Tensor) -> Tensor: ...
def sign(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def signbit(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sin_(input: Tensor) -> Tensor: ...
def sinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sinh_(input: Tensor) -> Tensor: ...
def slogdet(input: Tensor) -> namedtuple_sign_logabsdet: ...
def smm(input: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
def solve(input: Tensor, A: Tensor, *, out: Optional[Tensor]=None) -> namedtuple_solution_LU: ...
@overload
def sort(input: Tensor, dim: _int=-1, descending: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
@overload
def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
def sparse_coo_tensor(indices: Tensor, values: Union[Tensor,List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def split_with_sizes(input: Tensor, split_sizes: _size, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def sqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def sqrt_(input: Tensor) -> Tensor: ...
def square(input: Tensor) -> Tensor: ...
def square_(input: Tensor) -> Tensor: ...
@overload
def squeeze(input: Tensor) -> Tensor: ...
@overload
def squeeze(input: Tensor, dim: _int) -> Tensor: ...
@overload
def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: ...
@overload
def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def sspaddmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def sspaddmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def std(input: Tensor, unbiased: _bool=True, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def std(input: Tensor, dim: Union[_int, _size], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def std_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
@overload
def std_mean(input: Tensor, dim: Union[_int, _size], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
@overload
def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
@overload
def sub(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def sub(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
@overload
def sub(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def subtract(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def subtract(input: Tensor, other: Number, alpha: Number=1, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def sum(input: Tensor, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
def svd(input: Tensor, some: _bool=True, compute_uv: _bool=True, *, out: Optional[Tensor]=None) -> namedtuple_U_S_V: ...
def symeig(input: Tensor, eigenvectors: _bool=False, upper: _bool=True, *, out: Optional[Tensor]=None) -> namedtuple_eigenvalues_eigenvectors: ...
def t(input: Tensor) -> Tensor: ...
def take(input: Tensor, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def tan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def tan_(input: Tensor) -> Tensor: ...
def tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def tanh_(input: Tensor) -> Tensor: ...
def tensor(data: Any, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
def threshold(input: Tensor, threshold: Number, value: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
def threshold_(input: Tensor, threshold: Number, value: Number) -> Tensor: ...
def topk(input: Tensor, k: _int, dim: _int=-1, largest: _bool=True, sorted: _bool=True, *, out: Optional[Tensor]=None) -> namedtuple_values_indices: ...
def trace(input: Tensor) -> Tensor: ...
@overload
def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
@overload
def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ...
@overload
def trapz(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
@overload
def trapz(y: Tensor, *, dx: _float=1, dim: _int=-1) -> Tensor: ...
def triangular_solve(input: Tensor, A: Tensor, upper: _bool=True, transpose: _bool=False, unitriangular: _bool=False, *, out: Optional[Tensor]=None) -> namedtuple_solution_cloned_coefficient: ...
def tril(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def tril_indices(row: _int, col: _int, offset: _int=0, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def triu(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def triu_indices(row: _int, col: _int, offset: _int=0, *, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
def trunc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def trunc_(input: Tensor) -> Tensor: ...
@overload
def unbind(input: Tensor, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def unique_dim(input: Tensor, dim: _int, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
def unsafe_chunk(input: Tensor, chunks: _int, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def unsafe_split(input: Tensor, split_size: _int, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def unsafe_split_with_sizes(input: Tensor, split_sizes: _size, dim: _int=0) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def unsqueeze(input: Tensor, dim: _int) -> Tensor: ...
def vander(x: Tensor, N: Optional[_int]=None, increasing: _bool=False) -> Tensor: ...
@overload
def var(input: Tensor, unbiased: _bool=True, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def var(input: Tensor, dim: Union[_int, _size], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def var_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
@overload
def var_mean(input: Tensor, dim: Union[_int, _size], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
@overload
def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def view_as_complex(input: Tensor) -> Tensor: ...
def view_as_real(input: Tensor) -> Tensor: ...
def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def where(condition: Tensor, input: Tensor, other: Tensor) -> Tensor: ...
@overload
def where(condition: Tensor, self: Number, other: Tensor) -> Tensor: ...
@overload
def where(condition: Tensor, input: Tensor, other: Number) -> Tensor: ...
@overload
def where(condition: Tensor, self: Number, other: Number) -> Tensor: ...
@overload
def where(condition: Tensor) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def zero_(input: Tensor) -> Tensor: ...
@overload
def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def zeros(size: _size, *, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
@overload
def zeros(*size: _int, out: Optional[Tensor]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
def zeros_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: _dtype=None, layout: _layout=strided, device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ...
__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
'_add_batch_dim', '_add_relu', '_add_relu_', '_addmv_impl_', '_aminmax',
'_amp_non_finite_check_and_unscale_', '_amp_update_scale', '_baddbmm_mkl_',
'_batch_norm_impl_index', '_bmm', '_cast_Byte', '_cast_Char', '_cast_Double', '_cast_Float',
'_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short', '_cat', '_choose_qparams_per_tensor',
'_compute_linear_combination', '_conj', '_convolution', '_convolution_nogroup', '_copy_from',
'_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state', '_cudnn_rnn',
'_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache', '_cufft_get_plan_cache_max_size',
'_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size', '_cummax_helper', '_cummin_helper',
'_debug_has_internal_overlap', '_dim_arange', '_dirichlet_grad', '_embedding_bag',
'_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
'_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
'_fake_quantize_learnable_per_tensor_affine', '_fft_with_size', '_foreach_add', '_foreach_add_',
'_foreach_add_scalar_list', '_foreach_add_scalar_list_', '_foreach_addcdiv', '_foreach_addcdiv_',
'_foreach_addcmul', '_foreach_addcmul_', '_foreach_div', '_foreach_div_',
'_foreach_div_scalar_list', '_foreach_div_scalar_list_', '_foreach_exp', '_foreach_exp_',
'_foreach_mul', '_foreach_mul_', '_foreach_mul_scalar_list', '_foreach_mul_scalar_list_',
'_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', '_foreach_sub_', '_foreach_sub_scalar_list',
'_foreach_sub_scalar_list_', '_fused_dropout', '_grid_sampler_2d_cpu_fallback',
'_has_compatible_shallow_copy_type', '_index_copy_', '_index_put_impl_', '_log_softmax',
'_log_softmax_backward_data', '_logcumsumexp', '_lu_solve_helper', '_lu_with_info',
'_make_per_channel_quantized_tensor', '_make_per_tensor_quantized_tensor', '_masked_scale',
'_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mode', '_multinomial_alias_draw',
'_multinomial_alias_setup', '_nnpack_available', '_nnpack_spatial_convolution',
'_pack_padded_sequence', '_pad_packed_sequence', '_remove_batch_dim', '_reshape_from_tensor',
'_s_where', '_sample_dirichlet', '_saturate_weight_to_fp16', '_shape_as_tensor',
'_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
'_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_addmm',
'_sparse_log_softmax', '_sparse_log_softmax_backward_data', '_sparse_mm', '_sparse_softmax',
'_sparse_softmax_backward_data', '_sparse_sum', '_standard_gamma', '_standard_gamma_grad', '_std',
'_test_serialization_subcmul', '_trilinear', '_unique', '_unique2', '_use_cudnn_ctc_loss',
'_use_cudnn_rnn_flatten_weight', '_validate_sparse_coo_tensor_args', '_var', '_weight_norm',
'_weight_norm_cuda_interface', 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_',
'adaptive_avg_pool1d', 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm',
'addmv', 'addmv_', 'addr', 'affine_grid_generator', 'all', 'allclose', 'alpha_dropout',
'alpha_dropout_', 'amax', 'amin', 'angle', 'any', 'arange', 'arccos', 'arccos_', 'arccosh',
'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan_', 'arctanh', 'arctanh_',
'argmax', 'argmin', 'argsort', 'as_strided', 'as_strided_', 'as_tensor', 'asin', 'asin_', 'asinh',
'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm', 'bartlett_window',
'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce', 'batch_norm_elemt',
'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts', 'batch_norm_stats',
'batch_norm_update_stats', 'bernoulli', 'bilinear', 'bincount', 'binomial', 'bitwise_and',
'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman_window', 'bmm', 'bucketize', 'can_cast',
'cat', 'ceil', 'ceil_', 'celu', 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse',
'cholesky_solve', 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_max', 'clamp_max_',
'clamp_min', 'clamp_min_', 'clip', 'clip_', 'clone', 'combinations', 'complex', 'conj',
'constant_pad_nd', 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d',
'conv_transpose2d', 'conv_transpose3d', 'convolution', 'cos', 'cos_', 'cosh', 'cosh_',
'cosine_similarity', 'count_nonzero', 'cross', 'cudnn_affine_grid_generator', 'cudnn_batch_norm',
'cudnn_convolution', 'cudnn_convolution_transpose', 'cudnn_grid_sampler', 'cudnn_is_acceptable',
'cummax', 'cummin', 'cumprod', 'cumsum', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach',
'detach_', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'digamma', 'dist', 'div', 'divide', 'dot',
'dropout', 'dropout_', 'dstack', 'eig', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty',
'empty_like', 'empty_meta', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_',
'erfc', 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expm1', 'expm1_', 'eye',
'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fft', 'fill_', 'fix', 'fix_',
'flatten', 'flip', 'fliplr', 'flipud', 'floor', 'floor_', 'floor_divide', 'fmod', 'frac', 'frac_',
'frobenius_norm', 'from_file', 'from_numpy', 'full', 'full_like', 'gather', 'gcd', 'gcd_', 'ge',
'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads', 'greater',
'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d', 'group_norm', 'gru',
'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside', 'histc', 'hspmm',
'hstack', 'hypot', 'i0', 'i0_', 'ifft', 'imag', 'index_add', 'index_copy', 'index_fill',
'index_put', 'index_put_', 'index_select', 'init_num_threads', 'instance_norm', 'int_repr',
'inverse', 'irfft', 'is_complex', 'is_distributed', 'is_floating_point', 'is_grad_enabled',
'is_nonzero', 'is_same_size', 'is_signed', 'is_vulkan_available', 'isclose', 'isfinite', 'isinf',
'isnan', 'isneginf', 'isposinf', 'isreal', 'kaiser_window', 'kthvalue', 'layer_norm', 'lcm',
'lcm_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log', 'log10', 'log10_',
'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp', 'logaddexp2',
'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logit',
'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lstsq', 'lt', 'lu_solve', 'masked_fill',
'masked_scatter', 'masked_select', 'matmul', 'matrix_exp', 'matrix_power', 'matrix_rank', 'max',
'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d', 'max_pool3d', 'maximum', 'mean', 'median',
'min', 'minimum', 'miopen_batch_norm', 'miopen_convolution', 'miopen_convolution_transpose',
'miopen_depthwise_convolution', 'miopen_rnn', 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution',
'mkldnn_convolution_backward_weights', 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mm', 'mode',
'movedim', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'namedtuple_Q_R',
'namedtuple_U_S_V', 'namedtuple_a_tau', 'namedtuple_eigenvalues_eigenvectors',
'namedtuple_sign_logabsdet', 'namedtuple_solution_LU', 'namedtuple_solution_QR',
'namedtuple_solution_cloned_coefficient', 'namedtuple_values_indices', 'nanquantile', 'nansum',
'narrow', 'native_batch_norm', 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne',
'neg', 'neg_', 'negative', 'negative_', 'nextafter', 'nonzero', 'norm_except_dim', 'normal',
'not_equal', 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer',
'pairwise_distance', 'pdist', 'pinverse', 'pixel_shuffle', 'poisson', 'poisson_nll_loss', 'polar',
'polygamma', 'pow', 'prelu', 'prod', 'promote_types', 'q_per_channel_axis', 'q_per_channel_scales',
'q_per_channel_zero_points', 'q_scale', 'q_zero_point', 'qr', 'quantile', 'quantize_per_channel',
'quantize_per_tensor', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_rnn_relu_cell',
'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
'randn', 'randn_like', 'randperm', 'range', 'real', 'reciprocal', 'reciprocal_', 'relu', 'relu_',
'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'result_type', 'rfft',
'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_',
'rrelu', 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'scalar_tensor', 'scatter', 'scatter_add',
'searchsorted', 'select', 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads',
'set_num_threads', 'sgn', 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinh', 'sinh_',
'slogdet', 'smm', 'softmax', 'solve', 'sort', 'sparse_coo_tensor', 'split_with_sizes', 'sqrt',
'sqrt_', 'square', 'square_', 'squeeze', 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract',
'sum', 'svd', 'symeig', 't', 'take', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'threshold',
'threshold_', 'topk', 'trace', 'transpose', 'trapz', 'triangular_solve', 'tril', 'tril_indices',
'triu', 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unique_dim', 'unsafe_chunk',
'unsafe_split', 'unsafe_split_with_sizes', 'unsqueeze', 'vander', 'var', 'var_mean', 'vdot',
'view_as_complex', 'view_as_real', 'vstack', 'where', 'zero_', 'zeros', 'zeros_like']
|
import os
import json
import logging
from shapely import wkt
import requests
from PIL import Image
import numpy as np
from .pascal_voc_writer import Writer as PascalWriter
from tqdm import tqdm
class UnknownFormatError(Exception):
"""Exception raised for unknown label_format"""
def __init__(self, label_format):
self.message = ("Provided label_format '{}' is unsupported"
.format(label_format))
def from_json(labeled_data,
annotations_output_dir,
images_output_dir,
image_sets_dir,
label_format='WKT',
database='unknown',
use_local=False,
local_image_dir=''):
"""Convert Labelbox JSON export to Pascal VOC format.
Args:
labeled_data (str): File path to Labelbox JSON export of label data.
annotations_output_dir (str): File path of directory to write Pascal VOC
annotation files.
images_output_dir (str): File path of directory to write images.
label_format (str): Format of the labeled data.
Valid options are: "WKT" and "XY", default is "WKT".
Todo:
* Add functionality to allow use of local copy of an image instead of
downloading it each time.
"""
# make sure annotation output directory is valid
try:
annotations_output_dir = os.path.abspath(annotations_output_dir)
assert os.path.isdir(annotations_output_dir)
except AssertionError as e:
logging.exception('Annotation output directory does not exist')
return None
# read labelbox JSON output
with open(labeled_data) as f:
label_data = json.loads(f.read())
if use_local:
image_id = 'External ID'
else:
image_id = 'ID'
label_set = dict()
for data in tqdm(label_data):
labels = []
if label_format == 'object':
if 'objects' in data['Label']:
for label in data['Label']['objects']:
labels.append(label['value'])
if data[image_id] in label_set:
pass
else:
label_set[data[image_id]] = labels
try:
write_label(
label_id=data[image_id],
image_url=os.path.join(local_image_dir, data['External ID']) if use_local else data['Labeled Data'],
labels=data['Label'],
label_format=label_format,
images_output_dir=images_output_dir,
annotations_output_dir=annotations_output_dir,
database=database,
local_image_dir=local_image_dir,
use_local=use_local
)
except requests.exceptions.MissingSchema as e:
logging.exception(('"Labeled Data" field must be a URL. Support for local files coming soon'))
continue
except requests.exceptions.ConnectionError as e:
logging.exception(f"Failed to fetch image from {data['Labeled Data']}")
continue
write_image_set(label_set, image_sets_dir, use_local=use_local)
def write_image_set(label_set, image_set_dir=None, use_local=False):
unique_labels = list(set([x[0] for x in label_set.values()]))
if use_local:
file_ending = ''
else:
file_ending = '.jpeg'
try:
for label in unique_labels:
with open(os.path.join(image_set_dir, f"{label}.txt"), 'w+') as f:
pass
with open(os.path.join(image_set_dir, f"{label}.txt"), 'a+') as f:
for image in label_set:
labels = label_set[image]
if label in labels:
f.write(f"{image}{file_ending} {1}\n")
else:
f.write(f"{image}{file_ending} {-1}\n")
except TypeError as e:
logging.exception(f"Please provide image sets directory, usually is PascalVOC-export-LB/ImageSets/Main'")
def write_label(label_id,
image_url,
labels,
label_format,
images_output_dir,
annotations_output_dir,
database='Unknown',
use_local=False,
local_image_dir=None):
"Writes a Pascal VOC formatted image and label pair to disk."
label_id = label_id.split('.')[0]
# Download image and save it
if use_local:
im = Image.open(image_url)
else:
response = requests.get(image_url, stream=True)
response.raw.decode_content = True
im = Image.open(response.raw)
image_name = (f'{label_id}.{im.format.lower()}')
image_fqn = os.path.join(images_output_dir, image_name)
im.save(image_fqn, format=im.format)
# generate image annotation in Pascal VOC
width, height = im.size
xml_writer = PascalWriter(database=database, path=image_fqn, width=width, height=height)
# remove classification labels (Skip, etc...)
if not callable(getattr(labels, 'keys', None)):
# skip if no categories (e.g. "Skip")
return
# convert label to Pascal VOC format
for category_name, wkt_data in labels.items():
if label_format == 'WKT':
xml_writer = _add_pascal_object_from_wkt(
xml_writer,
img_height=height,
wkt_data=wkt_data,
label=category_name)
elif label_format == 'XY':
xml_writer = _add_pascal_object_from_xy(
xml_writer,
img_height=height,
polygons=wkt_data,
label=category_name)
elif label_format == 'object':
xml_writer = _add_pascal_object_from_bbox(
xml_writer=xml_writer,
img_height=height,
img_width=width,
bbox=wkt_data,
label=category_name)
else:
e = UnknownFormatError(label_format=label_format)
logging.exception(e.message)
raise e
# write Pascal VOC xml annotation for image
xml_writer.save(os.path.join(annotations_output_dir, '{}.xml'.format(label_id)))
def _add_pascal_object_from_wkt(xml_writer, img_height, wkt_data, label):
polygons = []
if type(wkt_data) is list: # V3+
polygons = map(lambda x: wkt.loads(x['geometry']), wkt_data)
else: # V2
polygons = wkt.loads(wkt_data)
for m in polygons:
xy_coords = []
for x, y in m.exterior.coords:
xy_coords.extend([x, img_height - y])
# remove last polygon if it is identical to first point
if xy_coords[-2:] == xy_coords[:2]:
xy_coords = xy_coords[:-2]
xml_writer.addObject(name=label, xy_coords=xy_coords)
return xml_writer
def _add_pascal_object_from_xy(xml_writer, img_height, polygons, label):
for polygon in polygons:
if 'geometry' in polygon: # V3
polygon = polygon['geometry']
assert type(polygon) is list # V2 and V3
xy_coords = []
for x, y in [(p['x'], p['y']) for p in polygon]:
xy_coords.extend([x, img_height - y])
xml_writer.addObject(name=label, xy_coords=xy_coords)
return xml_writer
def _add_pascal_object_from_bbox(xml_writer, img_height, img_width, bbox, label):
new = True
for obj in bbox:
if 'bbox' in obj: # V3
bbox = obj['bbox']
xy_coords = []
xy_coords.extend([bbox['left'], bbox['top'], bbox['width'] + bbox['left'], bbox['height'] + bbox['top']])
xml_writer.addObject(name=obj['value'], xy_coords=xy_coords, new=True)
return xml_writer |
"""
CNN(Convolution Neural Network, 합성곱 신경망)
= 이미지 인식과 음성 인식등 다양한 딥러닝 분야에서 사용.
완전연결(Fully-Connected) 신경망: 'Affine 계층'으로 구현
input -> [Affine] -> [ReLU] -> [Affine] -> [ReLU] -> [Affine] -> [Softmax] -> output
CNN: 합성곱 계층(Convolutional Layer) & 폴링 계층(Pooling Layer) 추가.
input -> [Conv] -> [ReLU] -> [Pooling] -> [Conv] -> [ReLU] -> [Pooling] -> [Affine] -> [Softmax] -> output
output에 가까운 layer에서는 '[Affine] -> [ReLU]' 구성을 사용할 수 있다.
그리고 마지막 layer에서는 '[Affine] -> [Softmax]' 구성을 그대로 사용한다.
CNN은 각 layer 사이에서 3차원 데이터 같은 '입체적인 데이터'가 흐른다는 것이 완전연결 신경망과 다르다.
완전연결 신경망은 3차원 입력 데이터를 1차원으로 평탄화해서 전달하기 때문에 입력의 특징을 제대로 살릴수 없다.
그러나 CNN은 3차원 입력데이터를 그대로 3차원으로 전달하기 떄문에 입력의 특징을 제대로 전달할 수 있다.
CNN에서의 데이터 = '특징 맵(Feature Map)'
CNN에서의 입력 데이터 = '입력 특징 맵(Input Feature Map)'
CNN에서의 출력 데이터 = '출력 특징 맵(Output Feature Map)'
CNN에서는 '필터의 매개변수'가 'Weight'에 해당된다. bias는 항상 하나(1x1)만 존재하고 필터를 적용한 모든 원소에 더한다.
input -> Conv filter -> + bias -> output
"""
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.signal import convolve, correlate
# jpg 파일 open
img = Image.open('sample.jpg', mode='r')
img_pixel = np.array(img)
print(img_pixel.shape) # (937, 1920, 3) ~> (세로길이(height), 가로길이(width), color-depth(RGB))
# Cf) 머신러닝의 라이브러리에 따라 color의 위치가 변경될 수 있다.
# Tensorflow: channel-last 방식. color-depth가 n차원 배열의 마지막 차원
# Theano: channel-first 방식. color-depth가 n차원 배열의 첫번째 차원
# Keras: 두가지 방식 모두 지원.
# 이미지 화면 출력
plt.imshow(img_pixel) # pixel로 변화된 이미지를 전달해야 한다.
plt.show()
# 이미지의 RED/Green/Blue 값 정보
print(img_pixel[:, :, 0])
print(img_pixel[:, :, 1])
print(img_pixel[:, :, 2])
# 3x3x3 필터
filter = np.zeros((3, 3, 3))
print('filter =', filter)
# filter의 일부 값 수정
filter[1, 1, 0] = 255
print('filter =', filter)
# 이미지와 필터를 convolution 연산
transformed_conv = convolve(img_pixel, filter, mode='same') / 255
# ~> 0 ~ 1사이의 값으로 반들어 주기 위해 255로 나누고, input의 크기를 유지하기 위해 mode='same'
plt.imshow(transformed_conv)
plt.show()
# 이미지와 필터를 cross-correlation 연산
transformed_corr = correlate(img_pixel, filter, mode='same') / 255
plt.imshow(transformed_corr)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Implements an internal QtWebKit based browser
"""
from PyQt4 import QtCore, QtGui, QtWebKit
from gui.browser.BrowserActions import BrowserActions
from gui.icons import Ico
from gui.icons import Icon
class BrowserPane(QtGui.QWidget):
"""
Implements a Browser pane
"""
def __init__(self, parent, main, initial_page=None, compact=False, enable_api=True, auto_compact_exit=True):
"""
Initializes the browser pane
"""
QtGui.QWidget.__init__(self, parent)
self.main = main
self.compact = compact
self.auto_compact_exit = auto_compact_exit
mainLayout = QtGui.QVBoxLayout()
mainLayout.setContentsMargins(0,0,0,0)
mainLayout.setSpacing(0)
self.setLayout(mainLayout)
self.toolbar = QtGui.QToolBar()
mainLayout.addWidget(self.toolbar, 0)
act = self.toolbar.addAction(Icon(Ico.Back), "", self.on_back)
act.setToolTip("Back")
act = self.toolbar.addAction(Icon(Ico.Forward), "", self.on_forward)
act.setToolTip("Forward")
act = self.toolbar.addAction(Icon(Ico.Refresh), "", self.on_refresh)
act.setToolTip("Refresh")
self.txtUrl = QtGui.QLineEdit(initial_page)
self.toolbar.addWidget(self.txtUrl)
### Brwoser - declared below
self.browser = BrowserWidget(self, self.main, enable_api=enable_api)
mainLayout.addWidget(self.browser, 2000)
self.browser.statusBarMessage.connect(self.on_browser_status_message)
self.browser.urlChanged.connect(self.on_browser_url_changed)
self.browser.linkClicked.connect(self.on_browser_link_clicked)
print "Connected Events"
self.statusBar = QtGui.QStatusBar()
mainLayout.addWidget(self.statusBar, 0)
if compact:
self.mode_change(compact)
if initial_page:
self.browser.setUrl(QtCore.QUrl(QtCore.QString(initial_page)))
def mode_change(self, mode):
"""
Changes the mode of the browser from/into compact mode.
Compact mode removes the toolbar and the status bar.
"""
if mode:
self.toolbar.hide()
self.statusBar.hide()
else:
self.toolbar.show()
self.statusBar.show()
def on_refresh(self):
self.browser.reload()
def on_back(self):
self.browser.back()
def on_forward(self):
self.browser.forward()
#################################################
## Browser Events
def on_browser_status_message(self, string):
print "status=", string # does nothing ????
self.statusBar.showMessage(string)
def on_browser_url_changed(self, url):
print "URl Changed"
if self.auto_compact_exit:
self.change_mode(False)
self.txtUrl.setText(url.toString())
def on_browser_link_clicked(self, url):
print "url=", url, url.toString() # doesnt trigger ???
self.txtUrl.setText(url.toString())
class BrowserWidget(QtWebKit.QWebView):
"""
Implements the internal browser
"""
def __init__(self, parent, main, enable_api=True):
"""
Initializes the internal browser
"""
QtWebKit.QWebView.__init__(self, parent)
self.main = main
if enable_api:
self.actions = BrowserActions(main, self)
|
# -*- coding: utf-8 -*-
import os
from glob import glob
import matplotlib.pyplot as plt
import random
import pandas as pd
import numpy as np
#import matplotlib.gridspec as gridspec
#import seaborn as sns
import zlib
import itertools
import sklearn
import itertools
import scipy
import skimage
from skimage.transform import resize
import csv
from tqdm import tqdm
from sklearn import model_selection
from sklearn.model_selection import train_test_split, learning_curve,KFold,cross_val_score,StratifiedKFold
from sklearn.utils import class_weight
from sklearn.metrics import confusion_matrix
import keras
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, MaxPool2D, BatchNormalization
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras import models, layers, optimizers
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from keras.optimizers import SGD, RMSprop, Adam, Adagrad, Adadelta
from keras.models import Sequential, model_from_json
from keras.layers import Activation,Dense, Dropout, Flatten, Conv2D, MaxPool2D,MaxPooling2D,AveragePooling2D, BatchNormalization
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
from keras import backend as K
from keras.preprocessing import image
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Dense , Activation
from keras.layers import Dropout , GlobalAveragePooling2D
from keras.layers import Flatten
#from imblearn.over_sampling import RandomOverSampler
#from imblearn.under_sampling import RandomUnderSampler
#from sklearn.metrics import roc_auc_score
#from sklearn.metrics import roc_curve
#from sklearn.metrics import auc
import warnings
warnings.filterwarnings("ignore")
nb_train_samples = 5232
nb_validation_samples = 624
nb_normal_test_sample=234
nb_bacteria_test_sample=242
nb_virus_test_sample=148
nb_pneumonia_test_sample=390
nb_classes = 2
def train_dir(args):
return args.data_dir+"/train/"
def test_dir(args):
return args.data_dir+"/test/"
def plotKerasLearningCurve():
plt.figure(figsize=(10,5))
metrics = np.load('logs.npy')[()]
filt = ['acc'] # try to add 'loss' to see the loss learning curve
for k in filter(lambda x : np.any([kk in x for kk in filt]), metrics.keys()):
l = np.array(metrics[k])
plt.plot(l, c= 'r' if 'val' not in k else 'b', label='val' if 'val' in k else 'train')
x = np.argmin(l) if 'loss' in k else np.argmax(l)
y = l[x]
plt.scatter(x,y, lw=0, alpha=0.25, s=100, c='r' if 'val' not in k else 'b')
plt.text(x, y, '{} = {:.4f}'.format(x,y), size='15', color= 'r' if 'val' not in k else 'b')
plt.legend(loc=4)
plt.axis([0, None, None, None]);
plt.grid()
plt.xlabel('Number of epochs')
plt.ylabel('Accuracy')
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.figure(figsize = (5,5))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_learning_curve(history):
plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./accuracy_curve.png')
plt.subplot(1,2,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./loss_curve.png')
def train_generator(args):
data_directory = train_dir(args)
if(args.aug):
if args.aug_mode == 2:
train_datagen = ImageDataGenerator(rescale=1. / 255,
#samplewise_center=True,
#samplewise_std_normalization=True,
#zca_whitening=True,
#zca_epsilon=1e-6,
rotation_range=3,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
#channel_shift_range=10,
fill_mode='constant',
cval=0.,
horizontal_flip=True,
vertical_flip=True)
else:
transformation_ratio = .05 # how aggressive will be the data augmentation/transformation
train_datagen = ImageDataGenerator(rescale=1. / 255,
rotation_range=transformation_ratio,
shear_range=transformation_ratio,
zoom_range=transformation_ratio,
horizontal_flip=True,
vertical_flip=True)
else:
train_datagen = ImageDataGenerator(rescale=1. / 255)
image_resize_height = args.input_size
image_resize_width = args.input_size
generator = train_datagen.flow_from_directory(
data_directory,
#color_mode='grayscale',
target_size=(image_resize_height, image_resize_width),
batch_size=args.batch_size,
class_mode='categorical',
seed=1234)
return generator
def test_generator(args) :
validation_datagen = ImageDataGenerator(rescale=1. / 255)
image_resize_height = args.input_size
image_resize_width = args.input_size
data_directory = test_dir(args)
generator = validation_datagen.flow_from_directory(
data_directory,
#color_mode='grayscale',
target_size=(image_resize_height, image_resize_width),
batch_size=args.batch_size,
class_mode='categorical')
return generator
def createModel(pretrainedmodel,args):
base_model = pretrainedmodel # Topless
x = Sequential()
x.add(base_model)
# Add top layer
if (args.model == 3):
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(512, activation='relu', name='fc1'))
x.add(Dropout(0.5))
x.add(Dense(256, activation='relu', name='fc3'))
x.add(Dropout(0.5))
x.add(Dense(128, activation='relu', name='fc4'))
x.add(Dropout(0.5))
elif (args.model == 2):
#incenptionv3 original
x.add(GlobalAveragePooling2D(name='avg_pool'))
elif (args.model == 4):
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(512, activation='relu', name='fc1'))
x.add(Dropout(0.5))
elif (args.model == 5):
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(512, activation='relu', name='fc1'))
x.add(Dropout(0.5))
x.add(Dense(512, activation='relu', name='fc2'))
x.add(Dropout(0.5))
x.add(Dense(256, activation='relu', name='fc3'))
x.add(Dropout(0.5))
elif (args.model == 1):
#VGG original
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(4096, activation='relu', name='fc1'))
x.add(Dense(4096, activation='relu', name='fc2'))
elif (args.model == 6):
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(512, activation='relu', name='fc1'))
if args.batchnorm:
x.add(BatchNormalization())
x.add(Dropout(args.dropout1))
x.add(Dense(256, activation='relu', name='fc3'))
if args.batchnorm:
x.add(BatchNormalization())
x.add(Dropout(args.dropout2))
x.add(Dense(128, activation='relu', name='fc4'))
x.add(Dropout(args.dropout3))
elif (args.model == 7):
x = Sequential()
for l in base_model.layers[0:-1]:
x.add(l)
if args.batchnorm:
x.add(BatchNormalization())
x.add(Dropout(args.dropout1))
x.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
x.add(GlobalAveragePooling2D(name='avg_pool'))
if args.model7_fc1:
x.add(Dense(16, name='fc1'))
if args.batchnorm:
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout2))
elif (args.model==8):
#VGG add bn
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(4096, name='fc1'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout1))
x.add(Dense(2048, name='fc2'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout2))
elif (args.model==9):
#VGG add bn
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(4096, name='fc1'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout1))
x.add(Dense(2048, name='fc2'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout1))
x.add(Dense(2048, name='fc3'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout2))
elif (args.model==10):
#VGG add bn
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(4096, name='fc1'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout1))
x.add(Dense(2048, name='fc2'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout2))
elif (args.model==11):
#VGG add max dropout
x.add(Dropout(args.dropout1))
x.add(GlobalAveragePooling2D(name='avg_pool'))
x.add(Dense(4096, name='fc1'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout1))
x.add(Dense(2048, name='fc2'))
x.add(BatchNormalization())
x.add(Activation('relu'))
x.add(Dropout(args.dropout2))
elif (args.model == 12):
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(base_model.input, x)
if(args.model != 12):
x.add(Dense(nb_classes, activation='softmax', name='predictions'))
model = x
# Train top layer
#if (args.testing or args.vis) :
# for layer in model.layers:
# layer.trainable = False
#else:
for layer in base_model.layers[0:args.tune_layer]:
layer.trainable = False
if args.tune_layer >= 0:
for layer in base_model.layers[args.tune_layer:]:
layer.trainable = False
else:
for layer in base_model.layers[args.tune_layer:]:
print("layer",layer.name," set to:",True)
layer.trainable = True
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
summary_file= open(args.save_dir+"/model_summary.txt","w")
model.summary(print_fn=lambda x: summary_file.write(x + '\n'))
summary_file.close()
return model
def train(model,args):
# callbacks
from keras.callbacks import CSVLogger, TensorBoard, ModelCheckpoint,LearningRateScheduler,EarlyStopping
log = CSVLogger(args.save_dir + '/log.csv')
tb = TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
batch_size=args.batch_size, histogram_freq=int(args.debug))
checkpoint = ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_acc', mode='max',
save_best_only=True, save_weights_only=True, verbose=1)
lr_decay = LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))
early_stop = EarlyStopping(monitor='val_acc', patience=args.stopnum, verbose=1)
if args.aug:
nb_samples = args.aug_num
else:
nb_samples = nb_train_samples
# Fit model
#history = model.fit(xtrain,ytrain, epochs=numepochs, class_weight=classweight, validation_data=(xtest,ytest), verbose=1,callbacks = [MetricsCheckpoint('logs')])
history = model.fit_generator(generator=train_generator(args),
steps_per_epoch=int(nb_samples/ args.batch_size),
epochs=args.epochs,
use_multiprocessing=True,
validation_data=test_generator(args),
validation_steps=int(nb_validation_samples/args.batch_size),
callbacks=[log, tb, checkpoint, lr_decay, early_stop],
verbose=1)
#model.save_weights(args.save_dir + '/trained_model.h5')
# Evaluate model
score = model.evaluate_generator(generator=test_generator(args), verbose=0)
print('\nKeras CNN - accuracy:', score[1], '\n')
with open(args.save_dir+"/model_summary.txt", "a") as summary_file:
summary_file.write(str(model.metrics_names)+"\n")
summary_file.write(str(score))
return model
def get_all_y(data_generator):
data_list = []
batch_index = 0
while batch_index <= data_generator.batch_index:
data = data_generator.next()
data_list.append(data[1])
batch_index = batch_index + 1
return data_list
def test(model,args):
#y_test = get_all_y(test_generator(args))
test_gen = test_generator(args)
y_pred = model.predict_generator(test_gen,steps = len(test_gen.filenames),verbose=1)
print(y_pred)
#print(sklearn.metrics.classification_report(np.where(ytest > 0)[1], np.argmax(y_pred, axis=1), target_names=list(labels.values())))
#Y_pred_classes = np.argmax(y_pred,axis = 1)
#Y_true = np.argmax(ytest,axis = 1)
#confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
#print(confusion_mtx)
#plot_confusion_matrix(confusion_mtx, classes = list(labels.values()))
#plt.show()
return model
def load_xray_test(args,load_count=-1):
from xray_dataset import get_data
x_test, y_test, imgfiles = get_data(test_dir(args),args.input_size,load_count)
return (x_test, y_test,imgfiles)
def get_class_labels(args):
from xray_dataset import get_labels_dict
label_dict = get_labels_dict(train_dir(args))
return list(label_dict.keys())
def test_one_by_one(model,args):
#load balanced sample count per class
(x_test,y_test,imgfiles) = load_xray_test(args, args.cpc)
test_result_file = open(args.save_dir+"/"+os.path.basename(args.weights)+"_test_result.txt","w")
model.summary(print_fn=lambda x: test_result_file.write(x + '\n'))
Y_true = []
Y_pred_classes = []
for im,real_y,f in zip(x_test,y_test,imgfiles):
y_pred = model.predict(im.reshape(-1,args.input_size,args.input_size,3).astype('float32') / 255.,verbose=0)[0]
#print(y_pred)
pred_class=np.argmax(y_pred)
#print(pred_class)
Y_pred_classes.append(pred_class)
Y_true.append(real_y)
#print(f,"pred result is",pred_class==real_y)
#test_result_file.write(str(f)+" pred result is "+str(pred_class==real_y)+"\n")
labels = get_class_labels(args)
test_result_file.write(sklearn.metrics.classification_report(Y_true, Y_pred_classes , target_names=labels)+"\n")
print(sklearn.metrics.classification_report(Y_true, Y_pred_classes , target_names=labels))
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
print(confusion_mtx)
test_result_file.write(str(confusion_mtx)+"\n")
#plot_confusion_matrix(confusion_mtx, classes = labels)
#plt.show()
test_result_file.close()
return model
def show_activation(model,layer_idx):
from vis.visualization import visualize_activation
from vis.input_modifiers import Jitter
# 1 is the imagenet category for 'PNEUMONIA'
im = visualize_activation(model, layer_idx, filter_indices=None, max_iter=500,input_modifiers=[Jitter(16)], verbose=False)
plt.imshow(im)
plt.show()
def show_saliency(model,layer_idx,images,outs):
from vis.visualization import visualize_saliency
#plt.figure()
f, ax = plt.subplots(nb_classes,args.cpc,figsize=(15,15))
ax=ax.reshape((len(images)))
plt.suptitle('Saliency for predicted classes')
# New output containing the output result for the saliency visualization
gradsSaliency=[]
certainties=[]
classKeys=[]
for i, img in enumerate(images):
classKey=np.argmax(outs[i])
classKeys.append(classKey)
certainty=outs[i][classKey]
certainties.append(certainty)
#grads = visualize_saliency(model, layer_idx, filter_indices=classKeys[i], seed_input=img, backprop_modifier='guided')
grads = visualize_saliency(model, layer_idx, filter_indices=None, seed_input=img, backprop_modifier='guided')
gradsSaliency.append(grads)
ax[i].imshow(grads,cmap='jet')
ax[i].set_title('pred:' + str(classKeys[i]) +'('+ str(round(certainties[i]*100,3))+' %)')
plt.show()
return gradsSaliency
def show_cam(model,layer_idx,images,outs):
import matplotlib.cm as cm
# KERAS visualize_cam
from vis.visualization import visualize_cam, overlay
#plt.figure()
f, ax = plt.subplots(nb_classes,args.cpc,figsize=(15,15))
ax=ax.reshape((len(images)))
# New list containing the output image result of the Grad-Cam visualization.
gradsCAM=[]
certainties=[]
classKeys=[]
plt.suptitle('grad-CAM for predicted classes')
for i, img in enumerate(images):
classKey=np.argmax(outs[i])
classKeys.append(classKey)
certainty=outs[i][classKey]
certainties.append(certainty)
# Visualization with the Grad-Cam output.
#grads = visualize_cam(model, layer_idx, filter_indices=classKeys[i], seed_input=img, backprop_modifier='guided')
grads = visualize_cam(model, layer_idx, filter_indices=None, seed_input=img, backprop_modifier='guided')
# Lets overlay the heatmap onto original image.
gradsCAM.append(grads)
t=plt.imshow(grads,cmap='jet')
l=t.get_array()
ax[i].imshow(overlay(l,img))
ax[i].set_title('pred : ' + str(classKeys[i]) +'('+ str(round(certainties[i]*100,3))+' %)')
plt.show()
return gradsCAM
def show_salcam(gradsSaliency, gradsCAM,images,outs):
from matplotlib import colors
#plt.figure()
f, ax = plt.subplots(nb_classes,args.cpc,figsize=(15,15))
ax=ax.reshape((nb_classes*args.cpc))
plt.suptitle('grad-CAM + saliency for predicted classes')
certainties=[]
classKeys=[]
for i, img in enumerate(images):
classKey=np.argmax(outs[i])
classKeys.append(classKey)
certainty=outs[i][classKey]
certainties.append(certainty)
ax[i].imshow((gradsSaliency[i][:,:,2]*1/(1.1+gradsCAM[i][:,:,2])),cmap='Blues',vmin=150),
ax[i].set_title('pred : ' + str(classKeys[i]) +'('+ str(round(certainties[i]*100,3))+' %)')
plt.show()
def vis(model,args):
from vis.utils import utils
from keras import activations
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
# Anyway, we are interested in the last layer, where the prediction happens
layer_idx = utils.find_layer_idx(model, 'predictions')
#To visualize activation over final dense layer outputs, we need to switch the softmax activation out for linear
#since gradient of output node will depend on all the other node activations.
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
layer_idx=args.layer_idx
#We define the softmax function to translate the output of the CNN into a probability for each class.
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Rows are scores for each class.
Columns are predictions (samples).
"""
scoreMatExp = np.exp(np.asarray(x))
return scoreMatExp / scoreMatExp.sum(0)
def predictImage(args):
from os.path import basename
load_count = args.cpc
(x_test,y_test,imgfiles) = load_xray_test(args,load_count)
images=[]
outs=[]
if not args.noshowpredict:
#plt.figure()
f, ax = plt.subplots(nb_classes, load_count,figsize=(15,15))
ax=ax.reshape((nb_classes*load_count))
plt.suptitle('predicted classes')
i = 0
for im,real_y,fn in zip(x_test,y_test,imgfiles):
images.append(im)
out=softmax(model.predict(im.reshape(-1,args.input_size,args.input_size,3).astype('float32') / 255.)[0])
print(out)
print(fn)
outs.append(out)
classKey=np.argmax(out)
# Look in the dictionary for the specific term for the image identification.
certainty=out[classKey]
# green to gray
#from skimage.color import rgb2gray
#im=rgb2gray(im)
if not args.noshowpredict:
if len(y_test)>1:
ax[i].imshow(im/255.)
ax[i].set_title(basename(fn)+" pred: " + str(classKey) + '(' + str(round(certainty*100,3)) + '%)')
i+=1
else :
ax.imshow(im/255.)
ax.set_title(basename(fn)+" pred: " + str(classKey) + '(' + str(round(certainty*100,3)) + '%)')
return images,outs
images,outs = predictImage(args)
if not args.noshowpredict:
plt.show()
if args.vis == "act" or args.vis == "all":
show_activation(model,layer_idx)
elif args.vis == "sal" or args.vis == "all":
show_saliency(model,layer_idx,images,outs)
elif args.vis == "cam" or args.vis == "all":
show_cam(model,layer_idx,images,outs)
elif args.vis == "salcam" or args.vis == "all":
sal = show_saliency(model,layer_idx,images,outs)
cam = show_cam(model,layer_idx,images,outs)
show_salcam(sal,cam,images,outs)
def get_images_path(args):
# Parse paths
full_paths = [os.path.join(os.getcwd(), path) for path in args.path]
files = set()
for path in full_paths:
if os.path.isfile(path):
files.add(path)
else:
files |= set(glob.glob(path + '/*' + args.extension))
return files
def get_class_number(args):
from xray_dataset import get_labels_dict
label_dict = get_labels_dict(train_dir(args))
return len(label_dict)
if __name__ == "__main__":
import os
import sys
import argparse
parser = argparse.ArgumentParser(description="Capsule Network on MNIST.")
parser.add_argument('--epochs', default=5, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs 0.9 0.99")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard")
parser.add_argument('--save_dir', default='./result')
parser.add_argument('--data_dir', default='../chest_xray',
help="the base of data dir")
parser.add_argument('--input_size', default=224,
help="the size of input image, default value is 299")
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
parser.add_argument('--pretrain_weights', default='imagenet',
help="The path of the pretrained weights. default is imagenet")
parser.add_argument('--aug', action="store_true", help="if use data augmentation")
parser.add_argument('--vis', help="generate visualization options are: act,sal,cam,all")
#parser.add_argument('path', nargs='*', help='Path of a file or a folder of files.')
parser.add_argument('--cpc', default=2, type=int, help="load how many image per class")
parser.add_argument('--layer_idx', default=-1, type=int, help="the index of layer that will be vis")
parser.add_argument('--noshowpredict', action="store_true", help="skip show predict")
parser.add_argument('--aug_num', default=nb_train_samples, type=int,
help="the number of aug image, default value is nb_train_samples")
parser.add_argument('--stopnum', default=3, type=int,
help="the number of early stop, default value is 3")
parser.add_argument('--model', default=3, type=int,
help="the model, 1 - original, 2 - simple, 3 - complex ")
parser.add_argument('--aug_mode', default=1, type=int,
help="the model, 1 - simple, 2 - complex ")
parser.add_argument('--net', default="vgg16",
help="the net, vgg16 or inceptionv3")
parser.add_argument('--tune_layer', default=0, type=int,
help="the tune layer in pretrained model, 0 is not fine tune the pretrained model, -1 means the last layer of pretrained model,... ")
parser.add_argument('--dropout1', default=0.5, type=float,
help="the dropout of first dense layer. ")
parser.add_argument('--dropout2', default=0.5, type=float,
help="the dropout of second dense layer. ")
parser.add_argument('--dropout3', default=0.5, type=float,
help="the dropout of third dense layer. ")
parser.add_argument('--batchnorm', action='store_true',
help="if add batch normal in model 6 after activition")
parser.add_argument('--model7_fc1', action='store_true',
help="if model 7 include fc1 layer")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
else:
if not (args.testing or args.vis) :
print(args.save_dir+" existed!!!!! save the old result first")
os.exit()
if not (args.testing or args.vis):
args_file = open(args.save_dir+"/args_file.txt","w")
args_file.write(str(args)+"\n");
args_file.close();
if not os.path.exists(args.data_dir):
print(args.data_dir+" is not exist")
sys.exit()
nb_classes = get_class_number(args)
#'imagenet'
if(args.net == "vgg16"):
args.input_size = 224
pretrained_model = VGG16(weights = args.pretrain_weights, include_top=False,input_shape=(args.input_size ,args.input_size ,3))
else:
args.input_size = 299
pretrained_model = InceptionV3(weights = args.pretrain_weights, include_top=False,input_shape=(args.input_size ,args.input_size,3))
model = createModel(pretrained_model,args)
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if args.testing: # as long as weights are given, will run testing
if args.weights is None:
print('No weights are provided. Will test using random initialized weights.')
test_one_by_one(model=model, args=args)
elif args.vis :
if args.weights is None:
print('No weights are provided for vis.')
sys.exit()
#if args.path is None:
# print('No path are provided for vis.')
# sys.exit()
vis(model=model, args=args)
else:
train(model=model, args=args)
|
# CUDA_VISIBLE_DEVICES='0' python gan.py
import argparse
import struct
import time
import numpy as np
print 'numpy ' + np.__version__
np.set_printoptions(threshold='nan')
np.set_printoptions(linewidth=250)
np.set_printoptions(formatter={'float': '{:12.8f}'.format, 'int': '{:4d}'.format})
import tensorflow as tf
print 'tensorflow ' + tf.__version__
import cv2
print 'cv2 ' + cv2.__version__
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--m', help='latent space dimensionality', default=10, type=int)
parser.add_argument('--n', help='number of units per layer', default=16, type=int)
parser.add_argument('--lr', help='learning rate', default=0.0001, type=float)
parser.add_argument('--batch', help='batch size', default=1000, type=int)
parser.add_argument('--epochs', help='training epochs', default=1000000, type=int)
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
print args
with open('train-images-idx3-ubyte','rb') as f:
h = struct.unpack('>IIII',f.read(16))
d = np.fromstring(f.read(), dtype=np.uint8).reshape((h[1],h[2],h[3],1)).astype('float32')
d = d/255. - .5
print 'd.shape',d.shape, 'd.min()',d.min(),'d.max()',d.max()
def dnet(args,x,reuse=None):
print 'discriminator network, reuse',reuse
with tf.variable_scope('dnet',reuse=reuse):
d = tf.layers.conv2d(inputs=x, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.layers.conv2d(inputs=d, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.image.resize_bilinear(images=d,size=[14,14]) ; print d
d = tf.layers.conv2d(inputs=d, filters=2*args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.layers.conv2d(inputs=d, filters=2*args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.image.resize_bilinear(images=d,size=[7,7]) ; print d
d = tf.layers.conv2d(inputs=d, filters=3*args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.layers.conv2d(inputs=d, filters=3*args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print d
d = tf.contrib.layers.flatten(d)
d = tf.layers.dense(inputs=d, units=1, activation=tf.sigmoid) ; print d
return d
def gnet(args,z,reuse=None):
print 'generator network, reuse', reuse
with tf.variable_scope('gnet',reuse=reuse):
g = tf.layers.dense(inputs=z, units=8*8*args.n, activation=None) ; print g
g = tf.reshape(g,[-1,8,8,args.n]) ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.image.resize_bilinear(images=g,size=[14,14]) ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.image.resize_bilinear(images=g,size=[28,28]) ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.layers.conv2d(inputs=g, filters=args.n, kernel_size=3, strides=1,activation=tf.nn.elu, padding='same') ; print g
g = tf.layers.conv2d(inputs=g, filters=1, kernel_size=3, strides=1,activation=None, padding='same') ; print g
return g
x = tf.placeholder('float32', [None,28,28,1],name='x') ; print x
z = tf.placeholder('float32', [None,args.m],name='z') ; print z
dx = dnet(args,x) # d(x)
gz = gnet(args,z) # g(z)
dgz = dnet(args,gz,reuse=True) # d(g(z))
dxreal = tf.negative(tf.reduce_mean(tf.log(dx)))
dgzfake = tf.negative(tf.reduce_mean(tf.log(1-dgz)))
dgzreal = tf.negative(tf.reduce_mean(tf.log(dgz)))
dopt = tf.train.AdamOptimizer(learning_rate=args.lr)
dxreal_train = dopt.minimize(dxreal)
dgzfake_train = dopt.minimize(dgzfake)
gopt = tf.train.AdamOptimizer(learning_rate=args.lr)
dgzreal_train = gopt.minimize(dgzreal)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(args.epochs):
np.random.shuffle(d)
dxreal_loss=0.
dgzfake_loss=0.
dgzreal_loss=0.
t=0.
for j in range(0,d.shape[0],args.batch):
_,dxreal_loss_ = sess.run([dxreal_train,dxreal],feed_dict={x:d[j:j+args.batch]})
_,dgzfake_loss_ = sess.run([dgzfake_train,dgzfake],feed_dict={z:np.random.randn(args.batch,args.m)})
_,dgzreal_loss_ = sess.run([dgzreal_train,dgzreal],feed_dict={z:np.random.randn(args.batch,args.m)})
dxreal_loss += dxreal_loss_
dgzfake_loss += dgzfake_loss_
dgzreal_loss += dgzreal_loss_
t+=1.
print 'epoch',i,'dxreal',dxreal_loss/t,'dgzfake',dgzfake_loss/t,'dgzreal',dgzreal_loss/t
x0 = sess.run(gz, feed_dict={z:np.random.randn(args.batch,args.m)})
x0 = np.clip(x0+.5,0.,1.)*255.
cv2.imshow('img', cv2.resize(np.concatenate((x0[0:10]).astype('uint8'),axis=1),(1000,100)))
cv2.waitKey(10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Appinfos(object):
def __init__(self):
self._app_name = None
self._app_type = None
self._mini_app_id = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_type(self):
return self._app_type
@app_type.setter
def app_type(self, value):
self._app_type = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.app_type:
if hasattr(self.app_type, 'to_alipay_dict'):
params['app_type'] = self.app_type.to_alipay_dict()
else:
params['app_type'] = self.app_type
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Appinfos()
if 'app_name' in d:
o.app_name = d['app_name']
if 'app_type' in d:
o.app_type = d['app_type']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
return o
|
n, m = map(int, input().split())
ans = [[None for i in range(m)] for j in range(n)]
def get(i, j):
global ans
if i not in range(n) or j not in range(m):
return 0
if ans[i][j] != None:
return ans[i][j]
ans[i][j] = get(i+1, j) + get(i, j+1)
return ans[i][j]
ans[-1][-1] = 1
print(get(0, 0))
raise SystemExit
for bla in ans:
print(*bla, sep='\t')
|
print(1)
def currency_converter(rate,euros):
dollars=euros*rate
return dollars
r=input("enter rate: ")
e=input("enter euros: ")
print(currency_converter(float(r),float(e)))
functions=[currency_converter(100,1000),currency_converter(100,2000)]
print(functions)
|
import numpy as np
from DTL import getDocMatrix, getWord, getLabelName, getDoc, wordInDoc, getWordCount, getDocNum
import math
import time
from enum import IntEnum
class Labels (IntEnum):
atheism = 1
graphics = 2
#load all the data into numpy arrays
trainData = np.loadtxt("trainData.txt")
testData = np.loadtxt("testData.txt")
trainLabel = np.loadtxt("trainLabel.txt")
testLabel = np.loadtxt("testLabel.txt")
file = open("words.txt")
words = file.read().splitlines()
labels = ["alt.atheism", "comp.graphics"]
def getLabelFreq(lbldata):
docs = np.where(lbldata == Labels.atheism)
docs2 = np.where(lbldata == Labels.graphics)
ptotal = docs[0].size
ntotal = docs2[0].size
resu = np.array([ptotal, ntotal])
return resu
#returns the number of times a word appears in a doc
def countWordinDoc(docId, wordId, data):
return data[docId - 1, wordId -1]
#count all words occurrences in a given dataset
def countWordAll(wordId, data):
subset = data[:, wordId - 1]
count = np.sum(subset)
return count
def getFreqTable(docdata, lbldata):
wtotal = getWordCount(words)
f = np.zeros((2, wtotal))
docs = np.where(lbldata == Labels.atheism)
docs2 = np.where(lbldata == Labels.graphics)
docArr = docdata[docs[0], :]
docArr2 = docdata[docs2[0], :]
for word in range(1, wtotal + 1):
pcount= countWordAll(word, docArr)
ncount = countWordAll(word, docArr2)
f[0, word - 1] = pcount
f[1, word - 1] = ncount
return f
def getRelFreq(FTable, lbldata):
rf = np.copy(FTable)
#normalize and smooth
p_class = getLabelFreq(lbldata)
rf[0,:] = (rf[0,:] + 1)/(p_class[0] + p_class.size)
rf[1,:] = (rf[1,:] + 1)/(p_class[1] + p_class.size)
return rf
def getLogProb(FTable):
lf = np.copy(FTable)
for data in lf:
data[...] = np.log(data)
return lf
def getDiscriminality(freqData, wordId):
p1 = freqData[Labels.atheism - 1, wordId -1]
p2 = freqData[Labels.graphics - 1, wordId -1]
#print(p1, p2)
resu = abs(np.log(p1) - np.log(p2))
return resu
def DiscrimLst(freqData, wordLst):
dlst = []
for wordId in wordLst:
dval = getDiscriminality(freqData, wordId)
dlst.append(dval)
return np.asarray(dlst)
def getTopD(dLst, wordLst,n):
srtLst = np.argsort(dLst)[::-1]
topWords = wordLst[srtLst]
PrintWords(topWords[0:n], dLst)
return topWords[0:n]
def PrintWords(wordLst, dLst):
print("DVals:", dLst[wordLst - 1])
for wordId in wordLst:
print(getWord(wordId),dLst[wordId -1])
def getDocWords(data, docId):
resu = np.where(data[docId - 1, :] > 0)
return resu[0]
def getNDocWords(data, docId):
resu = np.where(data[docId - 1, :] == 0)
return resu[0]
def normalize(f):
return f/np.sum(f)
def calcProb(data, freqT, docId, lbldata, useLog):
p_class = getLabelFreq(lbldata)
#normalize and smooth the class probability
word_inx = getDocWords(data, docId)
nword_inx = getNDocWords(data, docId)
if not useLog:
p_class = p_class/np.sum(p_class)
p_words = freqT[:, word_inx]
p_nwords = freqT[:, nword_inx]
p_nwords = 1 - p_nwords
p_all = np.concatenate((p_words, p_nwords), axis=1)
for pw in p_all.T:
p_class[0] *= pw[0]
p_class[1] *= pw[1]
p_class = normalize(p_class)
else:
p_class = np.log(p_class) - np.log(p_class.sum())
lfreqT = getLogProb(freqT)
lfreqT2 = getLogProb(1 - freqT)
p_words = lfreqT[:, word_inx]
p_nwords = lfreqT2[:, nword_inx]
p_all = np.concatenate((p_words, p_nwords), axis=1)
#print("ALL:", p_all.T[:10])
p_class = p_class + np.sum(p_all, axis=1)
p_class = normalize(p_class)
return p_class
def Classify(data, freqT, docId, lbldata, useLog):
p_c = calcProb(data, freqT, docId, lbldata, useLog)
if useLog:
label = np.argmin(p_c)
else:
label = np.argmax(p_c)
return label + 1
def main():
wordLst = np.arange(1, getWordCount(words) + 1)
trainDocs = getDocMatrix(trainData, trainLabel)
testDocs = getDocMatrix(testData, testLabel)
doclst = np.arange(1, getDocNum(trainDocs) + 1 )
doclst2 = np.arange(1, getDocNum(testDocs) + 1 )
trainTotal = getDocNum(trainDocs)
testTotal = getDocNum(testDocs)
wordId = 5
#print(getLabelFreq(trainLabel))
#print("Word:", wordId, countWordAll(wordId, trainDocs))
#SETUP
freqt = getFreqTable(trainDocs, trainLabel)
rfreqt = getRelFreq(freqt, trainLabel)
#print("RFREQ:", rfreqt)
lfreqt = getLogProb(rfreqt)
discrim = getDiscriminality(rfreqt, 10)
dLst = DiscrimLst(rfreqt, wordLst)
getTopD(dLst, wordLst, 10)
#print(getDocWords(trainDocs, 1))
#print("RESULT:", calcProb(trainDocs, rfreqt, 680, trainLabel))
#print(getLabelName(Classify(trainDocs, rfreqt, 483, trainLabel, True)))
#TEST
resuLst = []
resuLst2 = []
for docId in np.nditer(doclst):
resuLst.append(Classify(trainDocs, rfreqt, docId, trainLabel, True))
for docId in np.nditer(doclst2):
resuLst2.append(Classify(testDocs, rfreqt, docId, trainLabel, True))
resuArr = np.asarray(resuLst)
resuArr2 = np.asarray(resuLst2)
diff = np.where(trainLabel != resuArr)
diff2 = np.where(testLabel != resuArr2)
train_accuracy = (trainTotal - diff[0].size)/trainTotal
test_accuracy = (testTotal - diff2[0].size)/testTotal
print("Train Accuracy:", train_accuracy * 100)
print("Test Accuracy:", test_accuracy * 100)
if __name__ == "__main__":
main() |
#!/usr/bin/env python
from distutils.core import setup
import sys
sys.path.append('src/')
from fetcher.version import __version__
setup(name='torrent_fetcher',
version=__version__,
description='Tool to fetch torrents from www.onlinetvrecorder.de.',
author='Sven Klomp',
author_email='mail@klomp.eu',
url='https://github.com/avanc/torrent_fetcher',
packages=['fetcher'],
package_dir={'fetcher': 'src/fetcher'},
scripts=['src/bin/fetcher'],
data_files=[('config', ['config/fetcher.conf'])],
license="GPLv2",
platforms=["Linux"],
long_description=""
)
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------------
# LNG_main.py
# -----------------------------------------------------------------------------
import numpy as np
import os
import math
from dxfwrite import DXFEngine as dxf
from matplotlib import collections as mc
import pylab as pl
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
import LNG_engine
class RVE:
def __init__(self, eletypeID = None, sizeXYZ=[1.0,0.0,1.0], t = 0.0):
self.sizeXYZ = np.reshape(np.asarray(sizeXYZ), (1,3))
self.bound_node = {}
if eletypeID != None:
self._eletype = self._eletype(eletypeID)
else: #Define your element
self.eletypeID = eletypeID
self.nodes = None
self.edges = None
self.faces = None
self.sym = None
self.dim = None
self._get_input()
self.t = t
if t !=0.0:
self._get_faces()
def N(self):
return len(self.nodes)
def _eletype(self, eletypeID):
self.eletypeID = eletypeID
self.sym = []
self.dim = [0,2]
self.faces = np.array([[]])
def e0(self):
self.nodes = np.array([[0,0,0],[1,0,0]], dtype="float")
self.edges = np.array([[0,1]])
self.dim = [0]
def e1(self):
self.nodes = np.array([[1,0],[0,0],[1,1],[0,1]], dtype="float")
self.edges = np.array([[0,1],[1,2],[2,3]])
self.sym = [0,2]
def e3(self):
self.nodes = np.array([[0,0],[0,0.5],[1,1]], dtype="float")
self.edges = np.array([[0,1],[1,2]])
self.sym = [0,2]
def e4(self):
self.nodes = np.array([[0,0],[0,1],[1,1],[1,0]], dtype="float")
self.edges = np.array([[0,1],[1,2],[2,3],[3,0]])
self.faces = np.array([[]])
def e5(self): # cube
self.nodes = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,0],[0,0,1],[1,0,1],[0,1,1],[1,1,1]], dtype="float")
self.edges = np.array([[0,1],[0,2],[2,3],[1,3],[0,4],[1,5],[2,6],[3,7],[4,5],[4,6],[5,7],[6,7]])
self.faces = np.array([[0,1,3,2],[4,5,7,6],[0,1,5,4],[0,2,6,4],[1,3,7,5],[2,3,7,6]])
self.dim = (0,1,2)
def e6(self): # X shape
self.nodes = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,0],[0,0,1],[1,0,1],[0,1,1],[1,1,1]], dtype="float")
self.edges = np.array([[0,7],[1,6],[2,5],[3,4]])
self.faces = np.array([[]])
self.dim = (0,1,2)
def e7(self): # hexa-tetrahedrom
self.nodes = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,0],[0,0,1],[1,0,1],[0,1,1],[1,1,1]], dtype="float")
self.edges = np.array([[0,1],[0,2],[0,3],[2,3],[1,3],[2,7],[1,7],[1,4],[2,4],[4,6],[4,5],[5,7],[6,7],[4,7]])
self.faces = np.array([[0,2,3],[0,1,3],[4,6,7],[4,5,7],[2,4,7],[1,4,7]])
self.sym = [0,1,2]
self.dim = (0,1,2)
def e8(self): # octet-truss
self.nodes = np.array([[0,0,0],[1,1,0],[1,0,1],[0,1,1]], dtype="float")
self.edges = np.array([[0,1],[0,2],[0,3],[2,3],[1,2],[1,3]])
self.faces = np.array([[]])
self.sym = [0,1,2]
self.dim = (0,1,2)
def e9(self): # octet-truss
self.nodes = np.array([[0,0,0],[1,1,0],[1,0,1],[0,1,1]], dtype="float")
self.edges = np.array([[0,1],[0,2],[0,3]])
self.faces = np.array([[]])
self.sym = [0,1,2]
self.dim = (0,1,2)
def e10(self): # octet-truss
self.nodes = np.array([[0,0,0],[1,0,1],[0,1,1],[1,1,1],[1,1,0]], dtype="float")
self.edges = np.array([[0,1],[0,2],[1,3],[2,3],[3,4],[0,4]])
self.faces = np.array([[]])
self.sym = [0,1,2]
self.dim = (0,1,2)
def e11(self): # octet-truss
self.nodes = np.array([[0,0,0],[1,0,1],[0,1,1],[1,1,1],[1,1,0]], dtype="float")
self.edges = np.array([[0,1],[0,2],[0,3],[0,4]])
self.faces = np.array([[]])
self.sym = [0,1,2]
self.dim = (0,1,2)
# print(' get_basic_elem( e F) // nodes, edges, faces, sym, dim G')
try:
eval('e'+str(self.eletypeID)+'(self)')
except:
raise NameError('Element type %s is not defined.' %self.eletypeID)
def _get_input(self):
if type(self.sizeXYZ)!= np.array:
self.sizeXYZ = np.asmatrix(self.sizeXYZ)
# Amount of nodes
self._scale = np.divide(self.sizeXYZ[:,self.dim],np.max(self.nodes, axis=0))
self.nodes = np.multiply(self.nodes,self._scale)
# Adding missing dimension in case of node coordinates given in 2D
if np.shape(self.nodes)[1] != 3:
self.nodes = np.array([self.nodes[:,0],np.zeros([np.shape(self.nodes)[0],1]),self.nodes[:,1]]).T[0]
self._scale = np.array([self._scale[:,0],np.zeros([np.shape(self._scale)[0],1]),self._scale[:,1]]).T[0]
T_edges = [[a] for a in range(self.N())]
for i,j in self.edges:
if j not in T_edges[i]:
T_edges[i].append(j)
self.edges = T_edges
# print('get_input(coord, edges, faces, sizeXYZ, dim F) // coord, T_edges, N, scale G')
assert np.sum(self.sizeXYZ!=0)==len(self.dim), ValueError('Element cell size (RVE.sizeXYZ) and element dimensions (self.dim) do not match: sizeXYZ=%s dim=%s'% (self.sizeXYZ, self.dim))
def _get_faces(self):
Nn = self.N()
LNG_engine.gen_thickness_data(self)
self.bound_node = set(range(Nn, self.N() ))
def gen_mesh(self, meshSize):
if meshSize != []:
LNG_engine.gen_mesh(self, meshSize)
print("RVE remeshed with a meshSize of {}, if you want to visualize it type {}.showmesh() ".format(meshSize, type(self).__name__))
else:
print("Lattice RVE not remeshed.")
def printDATA(self):
for x in ['nodes', 'edges', 'faces']:
try:
print(str(x)+': {}\n'.format(eval('self.'+x)))
except:
pass
def show(self):
def show2F(self):
pass
def show2T(self):
verts = [self.nodes[f][:,self.dim] for f in self.faces]
print(verts)
fig, ax = plt.subplots()
# Make the collection and add it to the plot.
coll = PolyCollection(verts, hatch ='/',linestyle=':')
ax.add_collection(coll)
ax.autoscale_view()
plt.show()
def show3F(self):
pass
eval('show'+str(len(self.dim))+str(bool(self.t))[0]+'(self)')
def showmesh(self):
plt.figure()
plt.axis('equal')
plt.axis('off')
plt.triplot(self.nodes[:,0], self.nodes[:,2], self.faces)
plt.show()
class LatticeStructure:
def __init__(self, _RVE, n_G=[1,1,1], shape=''):
assert type(_RVE) == RVE, 'Please enter an RVE object'
self.n_G = n_G
self.shape = shape
self.RVE = _RVE
def gen_nodegrid(self):
LNG_engine.check_dimensions(self)
LNG_engine.do_pregrid(self)
N = self.RVE.N()
nlayer = ((self.n_L[0]-1)*self.n_G[0]+1)*(self.n_L[2]-1) #each row of elements(X direction)
nplane = ((self.n_L[0]-1)*self.n_G[0]+1)*((self.n_L[2]-1)*self.n_G[2]+1) #each plane XZ of elements
# Initialize mesh: matrix with the node coordinates by rows.
self.nodes = np.zeros([nplane*((self.n_L[1]-1)*self.n_G[1]+1),3]) # Coordinates of the grid are ordered WE & SN
# Initialize index: boolean vector to determine the real node rows in the mesh matrix.
index = np.zeros(nplane*((self.n_L[1]-1)*self.n_G[1]+1))
# Initialize num: Transformation matrix from global to local nodes.
# Each row corresponds to one element.
# Elements are ordered from WE & SN
self.num = np.array(np.zeros([self.n_G[0]*self.n_G[1]*self.n_G[2],N]))
# Initialize boundary: List of the list of boundary nodes of the basic element.
self.boundary = [set(),set(),set(),set(),set(),set()] # Boundaries[min(x), max(x), min(y), max(y), min(z), max(z)]
self.bound, self.TOT_angleX, self.TOT_angleY= [[],[],[]], None, None
p = -1
if self.RVE.sym !=-1:
msize = [1,1,1]
for rep in self.RVE.sym:
msize[rep] = 2
msize.append(N)
ind_elem_GS = np.zeros(msize)
msize.append(3)
self.iiS = np.zeros(msize)
coordS = np.zeros(msize)
for i in range(msize[0]):
for j in range(msize[1]):
for k in range(msize[2]):
indbol = np.array([bool(i),bool(j),bool(k)])
coords = np.array(self.RVE.nodes)
coords[:,indbol] = np.reshape(np.repeat([self.delmax[indbol]],N,axis=0),(N,sum(indbol))) - self.RVE.nodes[:,indbol]
coordS[i,j,k] = coords
iis = np.array(self.ii)
iis = [abs(max(self.ii[0])*indbol[0]-self.ii[0]),abs(max(self.ii[1])*indbol[1]-self.ii[1]),abs(max(self.ii[2])*indbol[2]-self.ii[2])]
ind_elem_Gs = (iis[0] + iis[1]*nplane + iis[2]*nlayer/(self.n_L[2]-1))
self.iiS[i,j,k] = np.reshape(iis, np.shape(iis)[:-1]).T
ind_elem_GS[i,j,k] = np.reshape(ind_elem_Gs,(len(ind_elem_Gs)))
del(coords,ind_elem_Gs, iis, indbol)
ind_elem_GS = ind_elem_GS.astype(int)
p = -1
self.bound[1]=[]
for j in range(self.n_G[1]):
if 1 in self.RVE.sym:
jT = j % 2
else:
jT = 0
self.bound[2]=[]
for k in range(self.n_G[2]):
if 2 in self.RVE.sym:
kT = k % 2
else:
kT = 0
self.bound[0]=[]
for i in range(self.n_G[0]):
if 0 in self.RVE.sym:
iT = i % 2
else:
iT = 0
p = p + 1
self.I = [i,j,k]
self.num_r = np.reshape(ind_elem_GS[iT,jT,kT] + i*(self.n_L[0]-1) + k*nlayer+ j*nplane,(1,N))
[dx,dy,dz] = [np.float32(self.delmax[0]*i), np.float32(self.delmax[1]*j), np.float32(self.delmax[2]*k)]
LNG_engine.do_shapeME(self, coordS[iT,jT,kT]+[dx,dy,dz],self.iiS[iT,jT,kT])
self.num[p,:] = self.num_r
index[self.num_r] = np.ones([len(self.num_r),1])
index = index.astype("bool")
self.num = self.num.astype("int")
self.nodes = self.nodes[index].astype("float32") #Take only the nodes of interest from the rectangular grid
# Hash from index imaginary rectangular grid to node grid.
# The row number is the global node number and the value is the
# correspondant node to the imaginary rectangular grid.
index = np.arange(0,len(index),1)[index]
self._ind_hash, k = {}, -1
for i in index:
k = k + 1
self._ind_hash[i] = k
self.boundary = [LNG_engine.do_translate(list(b), self._ind_hash) for b in self.boundary]
del (self.bound, self.I)
def gen_edges(self):
# Initializing global adjecency list
self.edges = [[a] for a in range(len(self._ind_hash))]
#consider only the lower triangle of the symetric matrix
self.num_edges = 0
for row in self.num:
i = -1
for node_in in row:
i = i + 1
node_con = row[self.RVE.edges[i]]
node_in = self._ind_hash[node_in]
for node_out in node_con[1:]:
node_out = self._ind_hash[node_out]
if (node_out not in self.edges[node_in])and(node_in not in self.edges[node_out]):
self.edges[node_in].append(node_out)
self.num_edges = self.num_edges + 1
def gen_faces(self):
# Initializing global face list
try:
self.faces = []
for num_r in self.num:
for face in self.RVE.faces:
nf = num_r[face].tolist()
nnf = []
for n in nf:
nnf.append(self._ind_hash[n])
self.faces.append(nnf)
except:
pass
def gen_CAD(self, filename ='', foldername = ''):
if filename =='': filename = self.shape +'_RVE'+ str(self.RVE.eletypeID
)+'t'+str(self.RVE.t) +'nG'+ ''.join(map(str, self.n_G[:]))
if foldername == '': foldername = self.shape
parent_folder = os.getcwd()
try:
LNG_engine.openFolder(foldername)
print('folder ')+ foldername + ' created inside /drawings_/.'
except:
NameError: 'path: '+parent_folder+foldername+'not found'
# Initializing file
try:
os.remove(filename+'.dxf' )
print("drawing '%s.dxf' replaced.\n" % filename)
drawing = dxf.drawing( filename+'.dxf' )
except:
print("drawing '%s.dxf' created.\n" % filename)
drawing = dxf.drawing( filename+'.dxf' )
for n0 in self.edges:
for n1 in n0[1:]:
drawing.add(dxf.polyline([self.nodes[n0[0],self.RVE.dim], self.nodes[n1,self.RVE.dim]], layer = 'edges'))
drawing.save()
try:
drawing = dxf.drawing( filename+'f'+'.dxf' )
for face in self.faces:
f=[]
for i in range(len(face)):
f.append(tuple(self.nodes[face[i],self.RVE.dim]))
f = dxf.face3d(f, flags=1)
f['layer'] = 'faces'
f['color'] = 7
drawing.add(f)
del(f)
except: pass
drawing.save()
os.chdir(parent_folder)
def show(self, lim = [], filename ='', foldername = ''):
lines = list()
if lim == []: lim = [np.zeros(3),self.delmax*self.n_G]
lim, low, high = [], lim[0], lim[-1]
if filename =='': filename = self.shape +'_RVE'+ str(self.RVE.eletypeID
)+'t'+str(self.RVE.t) +'nG'+ ''.join(map(str, self.n_G[:]))
if foldername == '': foldername = self.shape
parent_folder = os.getcwd()
try:
LNG_engine.openFolder(foldername)
print('folder ')+ foldername + ' created inside /drawings_/.'
except:
NameError: 'path: '+parent_folder+foldername+'not found'
try:
os.remove(filename+'.pdf' )
print("drawing '%s.pdf' replaced.\n" % filename)
except:
print("drawing '%s.pdf' created.\n" % filename)
for n0 in self.edges:
for n1 in n0[1:]:
lines.append((self.nodes[n0[0],self.RVE.dim], self.nodes[n1,self.RVE.dim]))
if len(self.RVE.dim) != 3:
lc = mc.LineCollection(lines, color='grey')#(high[0]-low[0])/(len(self.nodes)), color='#CCCCCC')
fig, aix = pl.subplots()
# for i in range(len(self.nodes)):
# aix.annotate(str(i), xy=(self.nodes[i,self.RVE.dim]), family='Courier New',fontsize=16, color='red' )
aix.set_xlim(low[0],high[0])
aix.set_ylim(low[2],high[2])
aix.add_collection(lc)
aix.axis('equal')
aix.axis('off')
fig.show()
else:
fig = pl.figure()
aix = fig.add_subplot(111, projection='3d')
aix.view_init(azim=120)
lc= Line3DCollection(lines, linewidths=1, color='red')
aix.add_collection3d(lc)
aix.set_xlim3d(low[0]-3,high[0]+3)
aix.set_ylim3d(low[1]-3,high[1]+3)
aix.set_zlim3d(low[2]-3,high[2]+3)
# Hide grid lines
aix.grid(False)
# Hide axes ticks
aix.set_xticks([])
aix.set_yticks([])
aix.set_zticks([])
aix.autoscale_view()
pl.savefig(filename + '.pdf')
os.chdir(parent_folder)
|
# Copyright (c) 2012 Trend Micro, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from sclib.config import Config, sclibConfigLocations
import os
import platform
import re
import sys
import logging
import logging.config
import urlparse
__module__ = 'sclib'
__version__ = '3.5.1000'
Version = __version__ # for backward compatibility
__config__ = Config()
UserAgent = '%s/%s (%s)' % (__module__, __version__, sys.platform)
def init_logging():
for file in sclibConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(Config))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger(__module__)
perflog = logging.getLogger('%s.perf' % (__module__))
log.addHandler(NullHandler())
perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sc(sc_host_url, sc_broker, sc_broker_key):
"""
:type sc_host_url: string
:param sc_host_url: Your SecureCloud broker url. Ex. https://ms.securecloud.com:7443/broker/API.svc/v3.5
:type sc_broker: string
:param sc_broker: Your broker name
:type sc_broker_key: string
:param sc_broker_key: Your broker access key
:rtype: :class:`sclib.sc.connection.SCConnection`
:return: A connection to SecureCloud
"""
from sclib.sc.connection import SCConnection
return SCConnection(sc_host_url,
sc_broker,
sc_broker_key)
|
# test module sqlite3 write and read a database file
# (Python25 and higher have module sqlite3 built in)
# sqlite3.connect(database, timeout=5.0, isolation_level=None,
# detect_types=0, factory=100)
# keywords:
# timeout=5.0 --> allows multiple access for 5 seconds
# isolation_level=None --> autocommit mode
# detect_types=0 --> native types TEXT, INTEGER, FLOAT, BLOB and NULL
# factory=100 --> statement cache to avoid SQL parsing overhead
import sqlite3
# create/connect to a permanent file database
con = sqlite3.connect("my_db.db3")
# establish the cursor, needed to execute the connected db
cur = con.cursor()
# create/execute a table:
# (optionally used capital letters to show commands)
cur.execute('CREATE TABLE IF NOT EXISTS clients \
(id INT PRIMARY KEY, \
firstname CHAR(60), \
lastname CHAR(60))')
# insert several lines at once using a
# list of (id, firstname, lastname) tuples
# use try/except or the existing db will complain about
# the non-unique id since it is already in the db
try:
clients = [
(107, "Ella", "Fitzgerald"),
(108, "Louis", "Armstrong"),
(109, "Miles", "Davis")
]
cur.executemany("INSERT INTO clients (id, firstname, lastname) \
VALUES (?, ?, ?)", clients )
except:
pass
# add another client
# use try/except or the existing db will complain about
# the non-unique id if it is already in the db
try:
new_client = (110, "Benny", "Goodman")
cur.execute("INSERT INTO clients (id, firstname, lastname) \
VALUES (?, ?, ?)", new_client)
except:
pass
# important if you make changes to the database
# commits current data to the db file (data is persistant now)
con.commit()
# now test it
# get data row by row
print("Show data row by row:")
# also orders/sorts data by lastname
cur.execute('SELECT id, firstname, lastname FROM clients \
ORDER BY lastname')
for row in cur:
print(row)
print('-'*40)
# select just one data item from each row ...
cur.execute('SELECT firstname FROM clients')
print(cur.fetchall())
print('-'*40)
# or ...
cur.execute('SELECT firstname FROM clients')
for row in cur:
print(row[0])
print('-'*40)
# select a specific data row ...
cur.execute('SELECT * FROM clients WHERE lastname="Davis"')
print(cur.fetchall())
print('-'*40)
# show the table header
# use only the first item of the tuple info
col_name_list = [tup[0] for tup in cur.description]
print("Table header:")
print(col_name_list)
# finally ...
con.close()
"""my output with Python3 -->
Show data row by row:
(108, 'Louis', 'Armstrong')
(109, 'Miles', 'Davis')
(107, 'Ella', 'Fitzgerald')
(110, 'Benny', 'Goodman')
----------------------------------------
[('Ella',), ('Louis',), ('Miles',), ('Benny',)]
----------------------------------------
Ella
Louis
Miles
Benny
----------------------------------------
[(109, 'Miles', 'Davis')]
----------------------------------------
Table header:
['id', 'firstname', 'lastname']
""" |
#!/usr/bin/env python2
from __future__ import division
import sys, os
sys.path.append(os.path.join(os.getcwd(), '../src'))
import time
import pickle
from collections import OrderedDict
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
from binary_response import *
from figure_presets import *
from plotting_functions import *
from adaptive_response.adaptive_threshold import AdaptiveThresholdTheoryReceptorFactors
Nr, alpha = 16, 1.5
Ns, s = 128, 32
#r_list = [8, 4, 2]
an_list = [0.5, 0.2, 0.1]
with open('data/mutual_information_distributed.pkl', 'rb') as fp:
res = pickle.load(fp)
variances = res['variances']
data = res['data']
colors = [cm.viridis(x) for x in np.linspace(0, 0.9, len(an_list))]
for fig in figures(
'mutual_information_distributed.pdf',
fig_width_pt=200., crop_pdf=False, legend_frame=False,
transparent=True, #post_process=False,
# num_ticks=3
):
#thresh = data[widths[0]]['MI_less'] / Na
#plt.axhline(thresh, ls=':', color=COLOR_RED)
for k, an in enumerate(an_list):
errorplot(variances, data[an]['MI_mean'], yerr=data[an]['MI_std'],
label=r'$\mean{a_n}=%g$' % an, color=colors[k])
# max_id = np.argmax(MI_rel)
# idx = np.flatnonzero(MI_rel[max_id:] < thresh) + max_id
# print('xi_1 max = %g for width = %g' % (factors[idx[0]], width))
plt.legend(loc='best', fontsize=8)
# plt.yscale('log')
plt.xlim(0, variances.max())
plt.ylim(0, 34)
#plt.xlabel(r'Receptor sensitivity $\langle S_{n1} \rangle$')#\gamma_1$')
plt.xlabel(r'Sensitivity variation $\var(\xi_n)/\mean{\xi_n}^2$')
plt.ylabel(r'Infor. $I$ [$\unit{bits}$]')
|
cont = 0
cont1 = 0
for i in range(10):
a=int(input('Digite um valor: '))
if(a >= 10 and a <= 20):
cont=cont + 1
else:
cont1=cont1+1
print('Dentro de [10,20]:',cont)
print('Fora de [10,20]:',cont1)
|
import sqlite3
_connection = None
def get_connection():
global _connection
if _connection == None:
_connection = sqlite3.connect('db_setup.db', check_same_thread=False)
return _connection
def init_db(force: bool = False):
conn = get_connection()
c = conn.cursor()
if force:
c.execute('DROP TABLE IF EXISTS db_setup')
c. execute("""
CREATE TABLE IF NOT EXISTS setup(
id INTEGER PRIMARY KEY,
menedger TEXT,
cost INT,
cost_sell_auto TEXT
)""")
conn.commit()
init_db()
##########
def add_setup(men, cost, cost_sell_auto):
conn = get_connection()
c = conn.cursor()
c.execute(f'INSERT INTO setup (menedger, cost, cost_sell_auto) VALUES ("{men}", {cost}, {cost_sell_auto})')
conn.commit()
def take_setup():
conn = get_connection()
c = conn.cursor()
c.execute('SELECT * FROM setup')
return c.fetchone()
def upd_men(info):
conn = get_connection()
c = conn.cursor()
c.execute(f'UPDATE setup SET menedger = "{info}" WHERE id = 1')
conn.commit()
def upd_cost(info):
conn = get_connection()
c = conn.cursor()
c.execute(f'UPDATE setup SET cost = {info} WHERE id = 1')
conn.commit()
def upd_cost_sell_auto(info):
conn = get_connection()
c = conn.cursor()
c.execute(f'UPDATE setup SET cost_sell_auto = {info} WHERE id = 1')
conn.commit() |
import argparse
import codecs
import json
import struct
import xml.etree.ElementTree as ElementTree
import os
import zlib
import dicttoxml
import yaml
class NodeType:
Node = 0x00
Boolean = 0x00
Float = 0x01
Int = 0x02
Vector2 = 0x03
Vector3 = 0x04
Vector4 = 0x06
String = 0x07
Actor = 0x08
UnknownString = 0x0f
UnknownUnsignedInt = 0x11
String2 = 0x14
Values = [
0x00,
0x01,
0x02,
0x07,
0x08,
0x0f,
0x11,
0x14
]
Reference = [
]
class AAMP:
data_object = {}
hash_table = {}
def __init__(self, path):
print("Parsing AAMP file...")
filename = os.path.basename(path)
print("Reading {0}...".format(filename))
file = open(path, 'rb')
self.data = file.read()
signature = self.data[0x00:0x04]
if signature != b'AAMP':
print('\033[31mQuitting: {0} is not a AAMP file\033[0m'.format(filename))
print('\033[31mExpected b\'AAMP\' but saw {0}\033[0m'.format(signature))
exit(0)
version = struct.unpack('<I', self.data[0x04:0x08])[0]
if version != 2:
print('\033[31mQuitting: {0} is not the correct AAMP version\033[0m'.format(filename))
print('\033[31mExpected 2 but saw {0}\033[0m'.format(version))
exit(0)
# Get hashed names
self.get_hash_table()
root_nodes_length = struct.unpack('<I', self.data[0x18:0x1c])[0]
pos = 0x34
for index in range(0, root_nodes_length):
children = {}
node_id, unknown, offset, child_count = \
struct.unpack('<IIHH', self.data[pos:pos + 0x0c])
if node_id in self.hash_table:
node_id = self.hash_table[node_id]
node_id = str(node_id)
self.data_object[node_id] = {}
child_pos = offset * 4 + pos
for child_index in range(0, child_count):
child_node_id = struct.unpack('<I', self.data[child_pos:child_pos + 0x04])[0]
if child_node_id in self.hash_table:
child_node_id = self.hash_table[child_node_id]
child_node_id = str(child_node_id)
children[child_node_id] = self.get_node(child_pos)
child_pos += 0x08
self.data_object[node_id] = children
pos += 0x0c
def get_hash_table(self):
file = open('C:\\botw-data\\src\\extractors\\hashed_names.txt', 'r')
data = file.read()
data = data.split('\n')
for index in range(0, len(data)):
self.hash_table[zlib.crc32(bytearray(data[index], 'utf-8'))] = data[index]
file = open('C:\\botw-data\\src\\extractors\\hash-number-appendix.txt', 'r')
data = file.read()
data = data.split('\n')
for index in range(0, len(data)):
self.hash_table[zlib.crc32(bytearray(data[index], 'utf-8'))] = data[index]
file.close()
def get_node(self, pos):
node = {}
node_id, offset, child_count, child_node_type \
= struct.unpack('<IHBB', self.data[pos:pos + 0x08])
if node_id in self.hash_table:
node_id = self.hash_table[node_id]
node_id = str(node_id)
offset = offset * 4 + pos
# print("Node id: {0}, Offset: {1}, Child Count: {2}, Child Node Type: {3}"
# .format(node_id, hex(offset), child_count, hex(child_node_type)))
if child_node_type == NodeType.Node and child_count > 0:
children = []
for index in range(0, child_count):
child = self.get_node(offset)
node[child[0]] = child[1]
offset += 0x08
return node
# Node = 0x00
# Boolean = 0x00
# Float = 0x01
# Int = 0x02
# Vector2 = 0x03
# Vector3 = 0x04
# Vector4 = 0x06
# String = 0x07
# Actor = 0x08
# UnknownString = 0x0f
# UnknownUnsignedInt = 0x11
# String2 = 0x14
elif child_node_type == NodeType.Boolean:
value = struct.unpack('<I', self.data[offset:offset + 0x04])[0]
value = True if value == 1 else False
node[node_id] = value
elif child_node_type == NodeType.Float:
value = struct.unpack('<f', self.data[offset:offset + 0x04])[0]
node[node_id] = value
elif child_node_type == NodeType.Int:
value = struct.unpack('<I', self.data[offset:offset + 0x04])[0]
node[node_id] = value
elif child_node_type == NodeType.String:
value = self.data[offset:].decode('utf-8')
value = value.split('\x00')
value = value[0]
node[node_id] = value
elif child_node_type == NodeType.Actor:
value = self.data[offset:].decode('utf-8')
value = value.split('\x00')
value = value[0]
node[node_id] = value
elif child_node_type == NodeType.String2:
value = self.data[offset:].decode('utf-8')
value = value.split('\x00')
value = value[0]
node[node_id] = value
else:
value = self.data[offset:offset + 0x04]
return node_id, value
def main():
parser = argparse.ArgumentParser(description="Parse the Legend of Zelda: Breath of the Wild aamp files to xml")
parser.add_argument("filename", type=str, help="File to be parsed.")
parser.add_argument("-x", "--xml",
help="Exports data as a xml file (default)",
action="store_true")
parser.add_argument("-y", "--yaml",
help="Exports data as a yaml file",
action="store_true")
parser.add_argument("-j", "--json",
help="Exports data as a json file",
action="store_true")
parser.add_argument("-a", "--all",
help="Exports data as a xml, yaml and json file",
action="store_true")
args = parser.parse_args()
aamp = AAMP(args.filename)
if args.all:
args.yaml = True
args.json = True
args.xml = True
if args.yaml:
save_as_yaml(args, aamp)
if args.json:
save_as_json(args, aamp)
if args.xml:
save_as_xml(args, aamp)
if not args.yaml and not args.json and not args.xml:
save_as_xml(args, aamp)
def save_as_yaml(args, byml):
filename = os.path.basename(args.filename)
print('Saving {0}.yaml...'.format(filename))
file = codecs.open(args.filename + '.yaml', 'w', 'utf-8')
yaml.dump(byml.data_object, file, allow_unicode=True)
file.close()
def save_as_json(args, byml):
filename = os.path.basename(args.filename)
print('Saving {0}.json...'.format(filename))
file = codecs.open(args.filename + '.json', 'w', 'utf-8')
json.dump(byml.data_object, file, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': '))
file.close()
def save_as_xml(args, byml):
from xml.dom.minidom import parseString
filename = os.path.basename(args.filename)
path = os.path.dirname(os.path.abspath(args.filename))
base_filename = os.path.splitext(filename)[0]
print('Saving {0}...'.format(path + '\\' + base_filename + '.xml'))
file = codecs.open(path + '\\' + base_filename + '.xml', 'w', 'utf-8')
dom = dicttoxml.dicttoxml(byml.data_object).decode('utf-8')
file.write(parseString(dom).toprettyxml())
file.close()
if __name__ == "__main__":
main()
|
# Longest Palindromic Substring
# O(n²)
# n = len(string)
def longestPalindromicSubstring(string):
best = [0, 0]
for i in range(len(string)):
centerPalindrome = expand(string, i, i)
leftPalindrome = expand(string, i-1, i)
if centerPalindrome and centerPalindrome[1]-centerPalindrome[0] > best[1]-best[0]:
best = centerPalindrome
if leftPalindrome and leftPalindrome[1]-leftPalindrome[0] > best[1]-best[0]:
best = leftPalindrome
return string[best[0]:best[1]+1]
def expand(string, start, end):
if start < 0 or end >= len(string) or string[start] != string[end]:
return None
while start-1 >= 0 and end+1 < len(string) and string[start-1] == string[end+1]:
start -= 1
end += 1
return [start, end] |
### last changed: 08/28/2018
from astropy.io import fits
import numpy as np
import os, time, gc, sys, types
from dirs import *
def mkdisk(pos_angle_deg,inclination_deg,ext,dim,V_sys=0.,V_max=220.,h_rot=10.,sigma_cen=250.):
pos_angle = pos_angle_deg *np.pi/180
inclination = inclination_deg *np.pi/180
r_ip = np.zeros((dim,dim))
R_gp = np.zeros((dim,dim))
phi_ip = np.zeros((dim,dim))
theta_gp = np.zeros((dim,dim))
image = np.zeros((dim,dim))
cen_x = np.shape(image)[1]//2
cen_y = np.shape(image)[0]//2
a = 0.5 *0.8 *dim
b = a * np.cos(inclination)
if 0 <= pos_angle < 1.5*np.pi: alpha = pos_angle + 0.5*np.pi
else: alpha = pos_angle % (0.5*np.pi)
### for each image pixel, calculate radius r and azimuthal angle phi in image plane
for y in range(np.shape(image)[0]):
for x in range(np.shape(image)[1]):
r = np.sqrt( (x-cen_x)**2 +(y-cen_y)**2 )
### azimuthal angle in image plane
if (x == cen_x) and (y == cen_y):
phi = pos_angle +0.5*np.pi
else:
phi = np.arctan2(y-cen_y,x-cen_x)
if (x <= cen_x) and (y >= cen_y): phi -= 0.5*np.pi
else: phi += 1.5*np.pi
### azimuthal angle in galaxy disk plane
theta = np.arctan( np.tan(phi-pos_angle+0.5*np.pi) *np.cos(inclination) )
if phi-pos_angle == 0:
theta -= 0.5*np.pi
elif 0 < pos_angle <= np.pi:
if 0 < phi-pos_angle <= np.pi: theta += 0.5*np.pi
else: theta += 1.5*np.pi
elif np.pi < pos_angle < 2*np.pi:
if pos_angle <= phi <= 2*np.pi: theta += 0.5*np.pi
elif 0 <= phi < pos_angle-np.pi: theta += 0.5*np.pi
else: theta += 1.5*np.pi
r_ip[y,x] = r
phi_ip[y,x] = phi
theta_gp[y,x] = theta
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
X = x-cen_x
Y = y-cen_y
### (square of) radial coordinate in galaxy plane (ellipse de-projected) normalized to disk radius R
p = (X*cos_alpha +Y*sin_alpha)**2 /a**2 + (X*sin_alpha -Y*cos_alpha)**2 /b**2
### radius in galaxy plane
R = a * p**0.5
R_gp[y,x] = R
if True: #p <= 1: ### truncate after convolution (02/27/17)
if ext == 'vel':
image[y,x] = V_sys + V_max *np.sin(inclination) *np.tanh(R/h_rot) *np.cos(theta)
elif ext == 'disp':
image[y,x] = sigma_cen * np.exp(-p)
writedir = modeldir
#print writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+str(ext)+'disk.fits'
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+str(ext)+'disk.fits',image,overwrite=True)
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'tanh.fits',np.tanh(R_gp/h_rot),overwrite=True)
if not ext == 'disp':
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'R_gp.fits',R_gp,overwrite=True)
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'r_im.fits',r_ip,overwrite=True)
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'theta_gp.fits',theta_gp,overwrite=True)
fits.writeto(writedir+'PA='+str(pos_angle_deg)+'_i='+str(inclination_deg)+'_'+'phi_im.fits',phi_ip,overwrite=True)
if __name__ == '__main__':
#PA_deg = [45, 135, 225, 315] #[0, 5, 15, 30, 45, 60, 75, 90, 120, 150, 175, 180]
#PA_deg = [-45, -135]
PA_deg = 10*(np.array(range(35))+1)
inc_deg = [60] #s[30, 45, 60, 75] #1, 2, 3, 4, 5, 15, 30, 45, 60, 75, 85, 95, 105, 120, 135, 150, 165, 175, 180]
exts = ['vel'] #,'disp']
for PA in PA_deg:
for inc in inc_deg:
for ext in exts:
mkdisk(PA,inc,ext,dim=72)
print(' ### PA (degrees) = '+str(PA))
print(' ### inclination (degrees) = '+str(inc))
print(' ### time now: '+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
print('')
|
import logging
import socket
import sys
import time
import StringIO
from . import config
def _main(ip):
import aliyun.api
for rr, domain_name, type_ in config.RECORDS:
req = aliyun.api.dns.DnsDescribeDomainRecordsRequest()
req.TypeKeyWord = type_
req.DomainName = domain_name
req.RRKeyWord = rr
records = req.getResponse()['DomainRecords']['Record']
if records:
record = records[0]
if record['Value'] == ip:
logging.debug('%s.%s is already %s', rr, domain_name, ip)
else:
logging.info('Updating %s.%s from %s to %s',
rr, domain_name, record['Value'], ip)
update = aliyun.api.dns.DnsUpdateDomainRecordRequest()
update.RecordId = record['RecordId']
update.RR = rr
update.Type = type_
update.Value = ip
update.getResponse()
else:
logging.warning('No such record %s.%s, skipping for now.',
rr, domain_name)
def main():
"""
This script is called with the following arguments:
Arg Name Example
$1 Interface name ppp0
$2 The tty ttyS1
$3 The link speed 38400
$4 Local IP number 12.34.56.78
$5 Peer IP number 12.34.56.99
$6 Optional ``ipparam'' value foo
"""
# noinspection PyPackageRequirements
import aliyun.api
if not sys.stdin.encoding:
sys.stdin = StringIO.StringIO()
sys.stdin.encoding = sys.getdefaultencoding()
interface = sys.argv[1]
if interface not in config.INTERFACES:
return
ip = sys.argv[4].decode('utf-8')
logging.info('Detected %s IP change: %s', interface, ip)
# noinspection PyUnresolvedReferences
aliyun.setDefaultAppInfo(config.ALIYUN_KEY_ID, config.ALIYUN_KEY_SECRET)
tries = 0
while True:
try:
tries += 1
_main(ip)
break
except socket.error:
if tries < 3:
logging.warning(
'Network issue, try again in %s second(s).',
tries ** 2, exc_info=True)
time.sleep(tries ** 2)
else:
logging.critical(
'Still no network, please set DNS manually.',
exc_info=True)
break
logging.info('Finished DDNS process.')
def main_wrapper():
logging.basicConfig(filename=config.LOG_FILE, level=config.LOG_LEVEL,
format=config.LOG_FORMAT)
# noinspection PyBroadException
try:
main()
except Exception:
logging.exception('Failed due to exception!')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
main()
|
# Generated by Django 3.1.2 on 2020-10-28 13:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('categories', '0006_auto_20201028_1332'),
]
operations = [
migrations.RenameModel(
old_name='Subsite',
new_name='CategorySubSite',
),
]
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from brian2 import *
plt.style.use('ggplot')
start_scope()
weight = 1 # default weight param
tau = 10*ms # default time constant
sigma = 0.1
eqs = 'dv/dt = -v/tau + sigma*xi*tau**-0.5 : volt'
# Flow neurons
S1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
S2 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
A_i1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
A_i2 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
A_o1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
B_i1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
B_o1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
B_o2 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
T1 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
T2 = NeuronGroup(10, eqs, threshold='v>1', reset='v=0')
# Capacity neurons
A_c = NeuronGroup(10, eqs, threshold='v>3', reset='v=0')
B_c = NeuronGroup(10, eqs, threshold='v>3', reset='v=0')
# Flow connections
S_A = Synapses(S1, A_i1, on_pre='v += weight')
S_B = Synapses(S2, B_i1, on_pre='v += weight')
B_A = Synapses(B_o1, A_i2, on_pre='v += weight')
A_T = Synapses(A_o1, T1, on_pre='v+=weight')
B_T = Synapses(B_o2, T2, on_pre='v+=weight')
# IO connections
Ai1_o1 = Synapses(A_i1, A_o1, on_pre='v+=weight')
Ai2_o1 = Synapses(A_i2, A_o1, on_pre='v+=weight')
Bi1_o1 = Synapses(B_i1, B_o1, on_pre='v+=weight')
Bi1_o2 = Synapses(B_i1, B_o2, on_pre='v+=weight')
# Capacity Connections
S_A.connect()
S_B.connect()
M = SpikeMonitor(B_i1)
# Now we can just run once with no loop
run(1*second)
plot(M.t/ms, M.i, '.')
xlabel(r'$\tau$ (ms)')
ylabel('Firing rate (sp/s)'); |
Python 3.4.0 (v3.4.0:04f714765c13, Mar 16 2014, 19:25:23) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> print('Hello World!')
Hello World!
>>> str1="My string"
>>> str1
'My string'
>>> print (str1)
My string
>>> str2 = "My
SyntaxError: EOL while scanning string literal
>>> str2="""My
String"""
>>> str2
'My\nString'
>>> print (str2)
My
String
>>> print ('Hello World!')
Hello World!
>>> print ('What\'s up?')
What's up?
>>> print ('Hello World!'); print('What\'s up?')
Hello World!
What's up?
>>> str3="This is a string"
>>> str3
'This is a string'
>>> ================================ RESTART ================================
>>> str3
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
str3
NameError: name 'str3' is not defined
>>>
|
#waap to check of the number is even or odd
num = int(input("enter the number "))
res = num % 2
if res == 0 :
print("number is even ")
else :
print("number is odd ") |
# A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
def largest_palindrome(digits):
x = [10 ** (digits - 1), 10 ** (digits - 1), 10 ** digits] # Base case
lim = [10 ** (digits - 1), (10 ** digits) - 1]
while lim[0] != lim[1] and lim[1] ** 2 > x[2]: # Keeps moving while it is worthwhile to check
for i in range(lim[1], lim[0], -1):
if is_palindrome(i*lim[1]) and i*lim[1] > x[2]:
x[0] = i
x[1] = lim[1]
x[2] = x[0] * x[1]
break
elif lim[1] * i < x[2]:
break
lim[1] -= 1 # Gradually decreases top number
return x
def is_palindrome(x):
x = str(x)
for i in range(len(x) // 2):
if x[i] != x[-i - 1]:
return False
return True
print(largest_palindrome(3)) # 906609
|
#!/usr/bin/env python
# This script just logs you into your ArchivesSpace backend and prints a session ID.
# You can copy it into whatever script you want to write to do useful things with your database.
import configparser, requests
config = configparser.ConfigParser()
config.read('local_settings.cfg')
dictionary = {
'baseURL': config.get('ArchivesSpace', 'baseURL'),
'repository':config.get('ArchivesSpace', 'repository'),
'user': config.get('ArchivesSpace', 'user'),
'password': config.get('ArchivesSpace', 'password')
}
repositoryBaseURL = '{baseURL}/repositories/{repository}'.format(**dictionary)
resourceURL = '{baseURL}'.format(**dictionary)
auth = requests.post('{baseURL}/users/{user}/login?password={password}&expiring=false'.format(**dictionary)).json()
session = auth['session']
headers = {'X-ArchivesSpace-Session': session}
print(session)
|
# import tensorflow as tf
import torch
from torch import nn
from torch.utils.data import Dataset
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, history_length = 1, device = 'cpu'):
super().__init__()
self.device = device
self.cov = nn.Sequential(
nn.Conv2d(history_length, 32, kernel_size=8, stride=4),
nn.BatchNorm2d(32),
nn.ELU(),
nn.Dropout2d(0.5),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.BatchNorm2d(64),
nn.ELU(),
nn.Dropout2d(0.5),
nn.Conv2d(64, 64, 3, 1),
nn.ELU(),
)
self.batch1 = nn.BatchNorm1d(64*7*7)
self.fc = nn.Sequential(
nn.Linear(64*7*7, 128),
nn.ELU(),
nn.BatchNorm1d(128),
nn.Dropout(),
nn.Linear(128, 5)
)
def forward(self, x):
x = x.to(self.device)
x = self.cov(x)
x = x.view(x.size()[0], -1)
x = self.batch1(x)
x = F.dropout(x)
x = self.fc(x)
return x
def load(self, file_name):
self.load_state_dict(torch.load(file_name))
print(f'{file_name} is loaded')
return self
def save(self, file_name):
torch.save(self.state_dict(), f=file_name)
print(f'{file_name} is saved')
class stateDataSet(Dataset):
def __init__(self, state_data, action):
self.state = state_data
self.action = action
def __len__(self): return self.action.shape[0]
def __getitem__(self, idx):
return [self.state[idx], self.action[idx]]
|
#!/usr/bin/env python
from worldmodel_bullet.BulletEnv import BulletEnv
from worldmodel_bullet.SimulationManager import SimulationManager, SimulatedCar
from worldmodel_bullet.RewardCalculator import RewardCalculator
import cv2
import gym
from gym.utils import seeding
from gym import spaces
import numpy as np
STATE_W = 64
STATE_H = 64
MIN_SPEED = 0
MAX_SPEED = 80
MAX_FORCE = 10
DEFAULT_SIM_FREQ = 240
STABILISE_TIMESTEP = 50
DEFAULT_SPAWN_HEIGHT = 0.1
ROS_ENABLE = False
FPV = True
if ROS_ENABLE:
import rospy
from cv_bridge import CvBridge
from geometry_msgs.msg import Point, Quaternion, Pose
from std_msgs.msg import Float32
from sensor_msgs.msg import Image
class SingleRacecar(BulletEnv):
def __init__(self, waypoint_threshold=0.7, waypoint_reward_multi=1.0, timestep_reward=-0.1, render_mode='headless', step_freq=240):
BulletEnv.__init__(self)
self.render_mode = render_mode
self.waypoint_threshold = waypoint_threshold
self.waypoint_reward_multi = waypoint_reward_multi
self.timestep_reward = timestep_reward
self.step_freq = step_freq
self.step_num = self._steps_calc(self.step_freq)
self.sm = SimulationManager(render_mode=self.render_mode)
if ROS_ENABLE:
rospy.init_node('bullet_gym', anonymous=True)
self.pose_pub = rospy.Publisher('/ackermann_vehicle/pose', Pose, queue_size=10)
self.reward_pub = rospy.Publisher('/ackermann_vehicle/reward', Float32, queue_size=10)
self.camera_pub = rospy.Publisher('/ackermann_vehicle/camera0/image_raw', Image, queue_size=10)
self.bridge = CvBridge()
self.action_space = spaces.Box(low=np.array([0, -1]), high=np.array([1,1]), shape=(2,)) #speed, steering angle
self.reward_range = (-np.inf, np.inf)
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=(STATE_H, STATE_W, 3), dtype=np.float32)
self.reward = 0
# self.sc = SimulatedCar(start_position=[0,0,0.5])
def seed(self, seed=None):
self._seed(seed)
def reset(self):
if ROS_ENABLE:
self._rospy_check()
self.sm.reset_simulation()
random_track_num = np.random.randint(1, 20)
random_track_name = f'track{random_track_num}'
self.sm.spawn_track(random_track_name)
self.rc = RewardCalculator(track_name=random_track_name, waypoint_reward_multi=self.waypoint_reward_multi, timestep_reward=self.timestep_reward, threshold_distance=self.waypoint_threshold)
wp, rpy = self.rc.getSpawn()
self.sc = SimulatedCar(start_position=[wp[0],wp[1],DEFAULT_SPAWN_HEIGHT], start_orientation=rpy, render_mode=self.render_mode)
# timesteps to stabilise
for i in range(STABILISE_TIMESTEP):
# print('stabbing')
self.sm.step_simulation()
if not FPV:
img = self.sc.get_image(image_width=STATE_W, image_height=STATE_H)
else:
img = self.sc.get_fpv_image(image_width=STATE_W, image_height=STATE_H)
self.state = img
# print('stab done')
if ROS_ENABLE:
img_msg = img * 255
img_msg = img_msg.astype(np.uint8)
img_msg = self.bridge.cv2_to_imgmsg(img_msg, encoding='rgb8')
self.camera_pub.publish(img_msg)
return img
def step(self, action):
if ROS_ENABLE:
self._rospy_check()
speed = action[0]
steering = action[1]
speed = speed * (MAX_SPEED - MIN_SPEED) + MIN_SPEED
self.sc.set_speed(wheel_vel=speed, max_force=MAX_FORCE)
self.sc.set_steering(steering_angle=steering)
for i in range(self.step_num):
self.sm.step_simulation()
if not FPV:
state = self.sc.get_image(image_width=STATE_W, image_height=STATE_H)
else:
state = self.sc.get_fpv_image(image_width=STATE_W, image_height=STATE_H)
self.state = state
if ROS_ENABLE:
img_msg = state * 255
img_msg = img_msg.astype(np.uint8)
img_msg = self.bridge.cv2_to_imgmsg(img_msg, encoding='rgb8')
self.camera_pub.publish(img_msg)
pos, ori = self._getPose()
reward, done = self.rc.get_reward(pos)
return state, reward, done, {}
def close(self):
if self.render_mode in ['headless', 'human','rgb_array']:
cv2.destroyAllWindows()
self.sm.close()
if ROS_ENABLE:
self._close()
def render(self, mode='None'):
if mode in ['rgb_array']:
img = self.sc.get_image(image_width=640, image_height=640)
return (img * 255).astype(np.uint8)
elif self.render_mode in ['headless', 'human']:
# img = self.sc.get_image(image_width=640, image_height=640)
# obs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.namedWindow('observation', cv2.WINDOW_KEEPRATIO)
obs = cv2.cvtColor(self.state, cv2.COLOR_RGB2BGR)
cv2.imshow('observation', obs)
cv2.resizeWindow('observation', 300, 300)
cv2.waitKey(1)
# cv2.imshow('state', self.state)
# cv2.waitKey(1)
else:
print(f'Invalid render mode: {self.render_mode}, needs to be ["headless", "human"]')
def _getPose(self):
pos = self.sc.get_position()
ori = self.sc.get_orientation(quaternion=True)
if ROS_ENABLE:
pose_msg = Pose()
pos_msg = Point()
pos_msg.x = pos[0]
pos_msg.y = pos[1]
pos_msg.z = pos[2]
ori_msg = Quaternion()
ori_msg.x = ori[0]
ori_msg.y = ori[1]
ori_msg.z = ori[2]
ori_msg.w = ori[3]
pose_msg.position = pos_msg
pose_msg.orientation = ori_msg
self.pose_pub.publish(pose_msg)
return pos, ori
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _steps_calc(self, time_step):
steps = DEFAULT_SIM_FREQ/self.step_freq
step_num = int(steps)
if not steps.is_integer():
print(f'Warning: Chosen step freq is not compatible with {DEFAULT_SIM_FREQ}Hz default calc freq')
print(f'Real step freq will be {DEFAULT_SIM_FREQ / self.step_num}')
return step_num
if ROS_ENABLE:
def _rospy_check(self):
if rospy.is_shutdown():
raise KeyboardInterrupt |
# Generated by Django 2.0.3 on 2018-06-28 13:18
from django.db import migrations, models
import planogram.models
class Migration(migrations.Migration):
dependencies = [
('planogram', '0006_auto_20180615_1558'),
]
operations = [
migrations.AlterField(
model_name='product',
name='vendor_code',
field=models.CharField(default=planogram.models.default_vendor_code, max_length=200, verbose_name='Артикул'),
),
]
|
a = list(map(lambda x : x[1],filter(lambda x : x[0],[(i*100+j*10+k == i**3+j**3+k**3, i**3+j**3+k**3) for i in range(1, 10) for j in range(0, 10) for k in range(0, 10)])))
print(a)
|
from rest_framework.views import APIView
from rest_framework.response import Response
# from Jwt.extensions.auth import JwtQueryParamAuthentication, JwtAuthorizationAuthentication
from conf.my_conf import token_Signature #所有的配置文件 import
import jwt
# 获取token
from rbac.server.jwt_tools import get_token
# 验证token
class get_token_views(APIView):
authentication_classes=[] #取消验证
def get(self,request,*args,**kwargs):
payload={'id':'','type':''}
token=get_token(payload)
ret = {'token':'jwt %s'%token.decode('utf-8')}
return Response(ret)
#
# def post(self, request, *args, **kwargs):
# ret = {'state': True, 'message': 'login post'}
# print('login post')
# print(dir(request))
# token=request.data.get('token')
# print(token)
# ret['message'] = parse_payload(token)
# return Response(ret)
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
class Config(object):
"""Base config class."""
pass
class ProdConfig(Config):
"""Production config class."""
pass
class DevConfig(Config):
"""Dev config class."""
#Open Debug
DEBUG=True
|
from python.svgsort.paper_utils import PAPER, make_paper
import sys
import traceback
from docopt import docopt
from python.svgsort import Svgsort
# from .paper_utils import PAPER
# from .paper_utils import make_paper
def main(args, return_string=False):
try:
_in = args['<in>']
out = args['<out>'] if args['<out>'] else args['<in>']+'-srt'
adjust = not args['--no-adjust']
penmoves = args['--pen-moves']
svgs = Svgsort(sw=args['--sw']).load(_in)
min_x, min_y, width, height = svgs.svg_atr.get('viewBox').split(' ')
original_is_portrait= True
if width > height:
original_is_portrait = False
if args['--no-split']:
pass
elif args['--split-all']:
svgs.eager_split()
else:
# default
svgs.split()
if args['--no-sort']:
# do not sort
pass
else:
svgs.sort(rnd=args['--rnd'])
if args['--repeat']:
svgs.repeat()
if penmoves:
svgs.make_pen_move_paths()
dim = args['--dim'].strip().lower()
paper = PAPER.get(dim, None)
if paper is None:
try:
paper = make_paper(tuple([int(d) for d in args['--dim'].split('x')]))
except Exception:
raise ValueError('wrong dim/paper size')
if return_string:
return ''
svg_out = ''
preserve_orientation = args.get('preserve_orientation', False)
if adjust:
svg_out = svgs.save(out, paper=paper, pad=float(args['--pad']),
padAbs=bool(args['--pad-abs']),
preserve_orientation=preserve_orientation,
original_is_portrait=original_is_portrait)
else:
svg_out = svgs.save_no_adjust(out)
return svg_out
except Exception:
traceback.print_exc(file=sys.stdout)
exit(1)
if __name__ == '__main__':
args = {'--dim': 'A3',
'--no-adjust': False,
'--no-sort': False,
'--no-split': False,
'--pad': '200',
'--pad-abs': True,
'--pen-moves': False,
'--repeat': False,
'--rnd': False,
'--split-all': False,
'--sw': '1.0',
'<in>': 'test.svg',
'<out>': 'test_out.svg'
}
main(args)
|
from rest_framework import serializers
from .models import Listed
class ListedSerialiser(serializers.ModelSerializer):
class Meta:
model = Listed
fields = (
'pk', 'name', 'number', 'type', 'reference_number', 'provider',
'active')
def validate(self, data):
# Validate reference_number:
# Obteneomos los campos enviados para validar referencia
reference_number = data.get('reference_number')
provider = data.get('provider')
type = data.get('type')
# instance_pk None nos indica que es un nuevo registro (post)
instance_pk = None
# Si es una actualizacion obtenemos el id de la instancia a
# actualizar
if not self.instance is None:
instance_pk = self.instance.pk
try:
# Comprobamos si hay algun producto existente con esa
# referencia asociada a ese provedor y que sea el mismo tipo
# de producto y obtenemos su id.
listed = Listed.objects.get(reference_number=reference_number,
provider=provider, type=type)
listed_pk = listed.pk
# Si la ID de la isntancia a actualizar es direfente que la del
# egistro que tiene coincidencias, indicamos que ya esta registrada
# esa referencia.
if instance_pk != listed_pk:
for key, value in Listed.PROVIDERS_CHOICES:
if key == provider:
provider = value
break
msg = 'La referencia "{}" asociada al proveedor "{}" ya esta registrada.'.format(
reference_number, provider)
raise serializers.ValidationError(
{'reference_number': msg})
except Listed.DoesNotExist:
# No existe producto con referencia igual
pass
return data
|
from datetime import datetime
from dateutil import relativedelta
def isBirthAfterMarriage(ind_dict,fam_dict,date_format):
for key in ind_dict:
value = ind_dict[key]
unique_id = key
name = value[0]
name = name[:name.index('/')].strip()
birth_date = value[2]
birth_date_datetime = datetime.strptime(birth_date, date_format)
family_id = value[6]
if family_id is not 'NA':
marriage_date = fam_dict[family_id][0]
if marriage_date is not 'NA':
marriage_date_datetime = datetime.strptime(marriage_date, date_format)
if birth_date_datetime<marriage_date_datetime:
print "ANOMALY: FAMILY: US08 : ", unique_id, ": child ", name, " born ", birth_date, " before marriage on ",marriage_date
divorce_date = fam_dict[family_id][1]
if divorce_date is not 'NA':
divorce_date_datetime = datetime.strptime(marriage_date, date_format)
if relativedelta.relativedelta(birth_date_datetime, divorce_date_datetime).months > 9:
print "ANOMALY: FAMILY: US08 : ", unique_id, ": child ", name, " born ", birth_date," after divorce on ", divorce_date
def isBirthBeforeDeathofParents(ind_dict,fam_dict,date_format):
for key in fam_dict:
value = fam_dict[key]
wife_id = value[4]
wife_id = wife_id.replace('@','')
husb_id= value[2]
husb_id = husb_id.replace('@', '')
childSet = value[6]
if not('NA'in childSet):
indValue = ind_dict[wife_id]
wifeDeathDate = indValue[5]
indValue = ind_dict[husb_id]
husbDeathDate = indValue[5]
for child in childSet:
unique_id = child
childValue = ind_dict[child]
name = childValue[0]
name = name[:name.index('/')].strip()
childBirthDate = childValue[2]
if wifeDeathDate is not 'NA' and childBirthDate is not 'NA':
wifeDeathDate_datetime = datetime.strptime(wifeDeathDate, date_format)
childBirthDate_datetime = datetime.strptime(childBirthDate, date_format)
if childBirthDate_datetime > wifeDeathDate_datetime:
print "ANOMALY: FAMILY: US09 : ", unique_id, ": child ", name, " born ", childBirthDate," after death of mother on ", wifeDeathDate
if husbDeathDate is not 'NA' and childBirthDate is not 'NA':
husbDeathDate_datetime = datetime.strptime(husbDeathDate, date_format)
childBirthDate_datetime = datetime.strptime(childBirthDate, date_format)
if relativedelta.relativedelta(childBirthDate_datetime, husbDeathDate_datetime).months > 9:
print "ANOMALY: FAMILY: US09 : ", unique_id, ": child ", name, " born ", childBirthDate," after 9 months of death of father on ", husbDeathDate
|
from .geometry import dRMSD, dRMSD_masked, internal_coords, internal_to_srf, nerf, pnerf
from .data import make_data_loader
from .util import count_parameters, to_device, group_by_class
from .optimization import Lamb, poly_schedule
from . import models
from . import scripts
|
import pyspark
import json
import nltk
from nltk import word_tokenize
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def _sentiment_analysis(lines,company,source):
import nltk
nltk.download('punkt',download_dir='./nltk_data')
nltk.download('vader_lexicon',download_dir='./nltk_data')
nltk.data.path.append("./nltk_data")
for line in lines:
text = json.loads(line)["text"]
name = json.loads(line)["company"]
sents = nltk.sent_tokenize(text)
for sent in sents:
if sent.count(name)>=1:
ana = sent
results = []
for datum in lines:
def real_main():
sc = pyspark.SparkContext()
dataRDD = sc.textFile("gs://group688/688v2.dat",25)
dataRDD.mapPartitions(_sentiment_analysis)
if __name__=="__main__":
real_main()
|
listt = list(map(int, input().split()))
if listt[0] == 0:
listt[0] += 24
time = 60 + listt[1] - 45
if time >= 60:
listt[1] = time - 60
else:
listt[0] -= 1
listt[1] = time
print(listt[0], listt[1])
|
def solution(interval1: int, n1: int, interval2: int, n2: int):
# TODO: returns wrong answer.
a1 = (interval1 + 1) * (n1 - 1) + 1
b1 = a1 + 2 * interval1
a2 = (interval2 + 1) * (n2 - 1) + 1
b2 = a2 + 2 * interval2
if a2 > b1 or a1 > b2:
return -1
left = a1 if a1 >= a2 else a2
right = b1 if b1 <= b2 else b2
return f'{left} {right}'
def lengths(interval: int, n_seen: int) -> tuple:
min_length = (interval + 1) * (n_seen - 1) + 1
max_length = min_length + 2 * interval
return min_length, max_length
def intersection(a1: int, b1: int, a2: int, b2: int) -> str:
if a2 > b1 or a1 > b2:
return '-1'
left = a1 if a1 >= a2 else a2
right = b1 if b1 <= b2 else b2
return f'{left} {right}'
print(solution(1, 3, 3, 2))
print(solution(1, 5, 1, 2))
|
# import unittest
# import test_register
# from HTMLTestRunner import HTMLTestRunner
# # 创建测试套件
# suite = unittest.TestSuite()
# # 通过模块加载测试用例
# loader = unittest.TestLoader()
# suite.addTest(loader.loadTestsFromModule(test_register))
# # 创建测试运行程序启动器
# runner = HTMLTestRunner(stream=open("report.html", "wb"), # 打开一个报告文件,将句柄传给stream
# description="注册接口测试报告", # 报告中显示的描述信息
# title=u"自动化测试报告",
# tester = 'miki')
# # 使用启动器去执行测试套件里的用例
# runner.run(suite)
import unittest
import test_register
import HTMLTestRunnerCN
suite = unittest.TestSuite()
# # 通过模块加载测试用例
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromModule(test_register))
runner = HTMLTestRunnerCN.HTMLTestReportCN(
stream = open("report.html", "wb"),
title=u'自动化测试报告',
description='详细测试用例结果', #不传默认为空
tester=u"Findyou" #测试人员名字,不传默认为QA
)
#运行测试用例
runner.run(suite) |
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def set_width(self, w):
self.width = w
def set_height(self, h):
self.height = h
def get_area(self):
return self.width*self.height
def get_perimeter(self):
return 2*(self.width + self.height)
def get_diagonal(self):
return (self.width**2 + self.height**2)**.5
def get_picture(self):
if max(self.width, self.height) > 50:
return 'Too big for picture.'
else:
s=('*'*self.width + '\n')*self.height
return s
def get_amount_inside(self, shape):
return (self.width//shape.width)*(self.height//shape.height)
def __str__(self):
return 'Rectangle(width={}, height={})'.format(self.width, self.height)
class Square(Rectangle):
def __init__(self, side):
self.width = side
self.height = side
def set_side(self, s):
self.width = s
self.height = s
def __str__(self):
return 'Square(side={})'.format(self.width)
def set_width(self, w):
self.width = w
self.height = w
def set_height(self, h):
self.width = h
self.height = h
|
import discord
import time
import requests
import json
import random
import asyncio
from PythonGists import PythonGists
from discord.ext import commands
from cogs.utils.checks import *
'''FakeDownload'''
class FakeDownload:
def __init__(self, bot):
self.bot = bot
config = load_config()
self.bot_prefix = config["bot_identifier"]
@commands.command(pass_context=True)
async def download(self, ctx,*filename):
"""downloads specified file(s) from db
"""
file = (' '.join(list(filename)))
await ctx.message.channel.send('Downloading ' + '`' + file + '`' + ' to `Downloads/`...')
await asyncio.sleep(10 + (random.random() * 10))
await ctx.message.channel.send('Finished downloading ' + '`' + file + '`' + '!')
def setup(bot):
bot.add_cog(FakeDownload(bot))
print('thank you for downloading SUPER DUPER ULTRA COG !! ')
print('for one time donation of $5.66 you could make man wallet better :) thank again for downnload SUPER COG')
|
# simple test class
import uuid
class Drone:
def __init__(self, name="Anonymous Drone", speed=0, elevation=0):
self.__droneID = uuid.uuid4()
self.name = name
self.speed = speed # mph
self.elevation = elevation # ft
def status(self):
if self.elevation == 0 and self.speed == 0:
return 'Drone "{}" parked'.format(self.name)
elif self.elevation < 0:
return 'Drone "{}" crashed'.format(self.name)
else:
return 'Drone "{}" speed is {} mph at {} ft'.format(self.name, self.speed, self.elevation)
def getID(self):
return self.__droneID
|
__author__ = 'CassyLee'
from datetime import datetime
from elasticsearch import Elasticsearch
import json
import time
# by default we connect to localhost:9200
class ES_query(object):
def __init__(self):
self.es = Elasticsearch()
#load schema and create an index
def create_index(self,index_name):
with open('sportsman_schema.txt','r') as schema:
sports_schema = json.load(schema)
novel_index = self.es.indices.create(index = index_name, body = sports_schema)
return sports_schema
#bulk load the data
def bulk_loading(self):
with open('rock_climbing.json','r') as j:
json_text = json.load(j)
bulk_file = []
action = { "index": { "_index": "i_sportsman", "_type": "stadium" }}
for i in range(len(json_text)):
bulk_file.append(action)
bulk_file.append(json_text.values()[i])
#return bulk_file
#call create_index function to create i_novel index
self.create_index("i_sportsman")
bulk_load = self.es.bulk(body = bulk_file)
self.es.indices.refresh(index = "i_sportsman")
return bulk_load
def q_place(self,string):
query_body = {
"query":{
"multi_match" : {
"query": string,
"fields": [ "name", "location" ]}},
"highlight":{
"fields":{
"locations":{}}}
}
res = self.es.search(index = "i_sportsman", doc_type = "stadium", body = query_body,size = 10000)
self.prints(res)
#print the required results by order
def prints(self,res):
hits = res["hits"]["hits"]
print 'totle number of hits: ' + str(len(hits))
for i in range(min(10,len(hits))):
print '\n'
print 'rank: ' + str(i+1)
stadium = hits[i]["_source"]
print 'name: ' + stadium['name']
highlight = hits[i]["highlight"]
print 'highlights:'
for (k,v) in highlight.items():
print ' '+ k + ': ' + str(v)
if __name__ == "__main__":
x = ES_query()
q_addr = x.q_place('Boston')
|
#!/usr/bin/env python
#coding=gbk
import xmlrpclib, base64, sys
proxy = xmlrpclib.ServerProxy('http://eztally.appspot.com')
#proxy = xmlrpclib.ServerProxy('http://localhost:8080')
#sk = proxy.user_login(0, '123')
print sk
#print proxy.get_stat_report(sk, '2010-02', '2010-05', -1)
#print proxy.get_last_tallies(sk, 1, 1)
#id = proxy.add_tally(sk, 1, 1, 100, 0, '2010-06-19', 'memo')
#print proxy.save_tally(sk, id, 1, 1, 100, 0, '2010-6-20', 'new memo')
#print proxy.get_last_tallies(sk, -1)
#print proxy.del_tally(sk, id)
#print base64.encodestring('±ΈΧΆ')
#print base64.decodestring('sbjXog==')
#print proxy.get_month_total(sk, '2010-06') |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Block, Prescription, Transaction
class PrescriptionAdmin(admin.ModelAdmin):
''' Custom Prescription Admin '''
def has_add_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return False
search_fields = ['id']
list_per_page = 25
fields = ('id','public_key', 'timestamp')
exclude = ('public_key','private_key',)
readonly_fields = ("public_key", "private_key", "data", "timestamp", "location","signature")
# Register your models here.
admin.site.register(Block)
admin.site.register(Prescription)
admin.site.register(Transaction)
|
from amuse.lab import *
import numpy
from distinct_colours import get_distinct
from matplotlib import pyplot
def energy_error_of_integrated_Nbody_system(code, particles,
end_time, precision):
gravity = code(number_of_workers=4)
gravity.parameters.timestep_parameter = precision
#gravity.parameters.timestep = precision | nbody_system.time
gravity.particles.add_particles(particles)
channel_from_to_framework = gravity.particles.new_channel_to(particles)
E0 = gravity.particles.potential_energy(G=nbody_system.G)
E0 += gravity.particles.kinetic_energy()
gravity.evolve_model(end_time)
channel_from_to_framework.copy()
Et = gravity.particles.potential_energy(G=nbody_system.G) \
+ gravity.particles.kinetic_energy()
gravity.stop()
de = (Et-E0)/E0
return de
def get_dE(code, precision, t_end):
dE = []
for pri in precision:
dEi = energy_error_of_integrated_Nbody_system(code, particles,
t_end, pri)
dE.append(abs(dEi))
print("integrated with precision=", pri, "dE/E=", dEi)
return dE
if __name__ in ('__main__','__plot__'):
numpy.random.seed(31415)
particles = new_plummer_model(1000)
precision = 10.**numpy.linspace(0., -3., 10)
t_end = 1.0| nbody_system.time
cols = get_distinct(2)
print('ph4')
code = ph4
dE = get_dE(code, precision, t_end)
pyplot.scatter(precision, dE, c=cols[0], lw=0, s=50, marker='o')
print('BHTree')
code = BHTree
dE = get_dE(code, precision, t_end)
pyplot.scatter(precision, dE, c=cols[1], lw=0, s=50, marker='^')
t0 = 0.8
t1 = 0.02
ep = 4.e-5
eb = 0.07
pyplot.plot([t0, t1], [ep, ep*(t1/t0)**4], c=cols[0], lw=2)
pyplot.plot([t0, t1], [eb, eb*(t1/t0)**2], c=cols[1], lw=2)
pyplot.xlabel('time step parameter')
pyplot.xlim(1.e-4, 3.)
pyplot.xscale('log')
pyplot.ylabel('$|E(t)-E(0)|/|E(0)|$')
pyplot.ylim(1.e-15, 10.)
pyplot.yscale('log')
save_file = 'precision_N100t1.png'
pyplot.savefig(save_file)
print("\nOutput saved in", save_file)
pyplot.show()
|
import subprocess
import wave
import struct
import numpy
import csv
import sys
import os
import pydub
import matplotlib.pyplot as plt
def moments(x):
mean = x.mean()
std = x.var()**0.5
skewness = ((x - mean)**3).mean() / std**3
kurtosis = ((x - mean)**4).mean() / std**4
return [mean, std, skewness, kurtosis]
def fftfeatures(wavdata):
f = numpy.fft.fft(wavdata)
f = f[2:(f.size / 2 + 1)]
f = abs(f)
total_power = f.sum()
f = numpy.array_split(f, 10)
return [e.sum() / total_power for e in f]
def features(x):
x = numpy.array(x)
f = []
xs = x
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 10).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 100).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 1000).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
f.extend(fftfeatures(x))
return f
def read_wav(wav_file):
"""Returns two chunks of sound data from wave file."""
try:
w = wave.open(wav_file)
n = 60 * 10000
fmt = "%di" % n
if w.getnframes() < n * 2:
raise ValueError('Wave file too short')
frames = w.readframes(n)
wav_data1 = struct.unpack(fmt, frames)
frames = w.readframes(n)
wav_data2 = struct.unpack(fmt, frames)
except Exception as e: print(e)
return wav_data1, wav_data2
def compute_chunk_features(mp3_file):
"""Return feature vectors for two chunks of an MP3 file."""
# Extract MP3 file to a mono, 10kHz WAV file
out_file = "temp.wav"
mp3_to_convert = pydub.AudioSegment.from_mp3(mp3_file)
mp3_to_convert.export(out_file, format="wav")
# Read in chunks of data from WAV file
wav_data1, wav_data2 = read_wav(out_file)
return features(wav_data1), features(wav_data2)
# Main script starts here
# =======================
def main():
x1 = []
x2 = []
labels = []
analysis = []
for path, dirs, files in os.walk('C:/Users/jkrogman/Downloads/scdl'):
for f in files:
if not f.endswith('.mp3'):
# Skip any non-MP3 files
continue
mp3_file = os.path.join(path, f)
# Extract the track name (i.e. the file name) plus the names
# of the two preceding directories. This will be useful
# later for plotting.
tail, track = os.path.split(mp3_file)
tail, dir1 = os.path.split(tail)
tail, dir2 = os.path.split(tail)
# Compute features. feature_vec1 and feature_vec2 are lists of floating
# point numbers representing the statistical features we have extracted
# from the raw sound data.
try:
feature_vec1, feature_vec2 = compute_chunk_features(mp3_file)
x1.append(feature_vec1[9])
x2.append(feature_vec2[10])
labels.append(track)
except:
continue
# x, y = zip(*analysis)
print [x1, x2]
print labels
print '\n'
#print x2
if __name__ == '__main__':
print 'starting'
main()
|
import unittest
from en_route_salute import solution
class TestEnrouteSalute(unittest.TestCase):
def test_solution(self):
test_values = {
"--->-><-><-->-": 10,
">----<": 2,
"<<>><": 4
}
for key, item in test_values.items():
self.assertEqual(solution(key), item)
if __name__ == '__main__':
unittest.main() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-09 12:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ServerInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(blank=True, max_length=64, null=True, verbose_name='主机名')),
('manage_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='管理IP')),
('usage', models.CharField(blank=True, max_length=64, null=True, verbose_name='用途')),
('system', models.CharField(choices=[('ubuntu', 'ubuntu'), ('centos7', 'centos7')], default='cenots7', max_length=64, verbose_name='操作系统类型')),
('cpu', models.IntegerField(blank=True, null=True, verbose_name='cpu个数')),
('mem', models.CharField(blank=True, max_length=32, null=True, verbose_name='内存')),
('disk_total', models.CharField(blank=True, max_length=100, null=True)),
('is_active', models.BooleanField(default=True, verbose_name='是否启用')),
('add_time', models.DateTimeField(auto_now_add=True, null=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': '服务器资产表',
'verbose_name_plural': '服务器资产表',
'db_table': 'wisops_server_info_t',
},
),
]
|
# coding: utf-8
import random as rand
import math
import binascii
#encrypting message block m, a list of bits
def app0_error_correcting_encoding(m, n):
#take m, turn into n length by appending 0's
return m + (n-len(m))*"0"
#print(app0_error_correcting_encoding(format(44,'0%ib'%8), 100))
def app0_error_correcting_decoding(em, lam):
return em[:lam]
#print(app0_error_correcting_decoding(app0_error_correcting_encoding(format(44,'0%ib'%8), 100), 8))
def repeat_error_correcting_encoding(m, r):
message_error_correct = ''
for i in range(len(m)):
message_error_correct += (m[i]*r)
return message_error_correct
def repeat_error_correcting_decoding(m, r):
message_error_decode = ''
for i in range(len(m)):
#do something
return message_error_decode
#insert primality test
def is_prime(n):
return True
#KEY GENERATION: inputs lam, outputs pk, sk and T
#uniformly randomly chosen n-bit string with Hamming weight h
def bit_string_h(n, h):
#generate h random distinct numbers between 1 and n, put 1's in those positions
rand_list = "0"*n
true_list = rand.sample(range(1, n), h)
for t in true_list:
rand_list = rand_list[:t] + "1" + rand_list[t+1:]
return int(rand_list, 2)
#print(bit_string_h(10, 5))
#uniformly randomly chosen n-bit string
def n_bit_num(n):
rand_num = rand.getrandbits(n)
return rand_num
def key_gen(lam):
h = lam
#Choose a Mersenne prime such that h = lam and 16*(lam^2) >= n > 10*(lam^2)
n_high = 16*(h**2)
n_low = 10*(h**2)+1
p_not_prime = True
while p_not_prime:
#print(' in prime checking loop')
n = rand.randint(n_low, n_high) #randomly chosen between n_low and n_high
p = 2**n - 1 #CHECK PRIMALITY
p_not_prime = False
if is_prime(n):
p_not_prime = False
F = bit_string_h(n, h) #uniformly randomly chosen n-bit string with Hamming weight h
G = bit_string_h(n, h) #uniformly randomly chosen n-bit string with Hamming weight h
R = n_bit_num(n) #uniformly randomly chosen n-bit string.
#public key
pk = (format(R,'0%ib'%n), (F*R + G) % p) #mod p
#secret key
sk = F
return (pk, sk) #as well as lam and n
#ENCRYPTION: Inputs message m, public key pk and the error-correcting encoding algorithm E.
# Outputs encrypted message (C1, C2)
def Mersenne_encrypt(m, pk, E):
h = len(m)
n = len(pk[0])
A = bit_string_h(n, h) #uniformly randomly chosen n-bit string with Hamming weight h
B1 = bit_string_h(n, h) #uniformly randomly chosen n-bit string with Hamming weight h
B2 = bit_string_h(n, h) #uniformly randomly chosen n-bit string with Hamming weight h
C1 = A*(int(pk[0])) + B1 #DEFINE R
Em = E(m) #error correcting code
ecl = len(Em) #number to binary string of length
C2 = eval("0b" + format(A*pk[1] + B2,'0%ib'%ecl)) ^ eval("0b" + Em)
return (C1, C2)
#DECRYPTION: Inputs the coded message (C1, C2), the secret key F and the error-correcting decoding algorithm D.
# Outputs message m.
def Mersenne_decrypt(F, C1, C2, D):
#bitwise XOR operation
bin_len = max(math.ceil(math.log(F*C1, 2)), math.ceil(math.log(C2, 2)))
output = eval("0b" + format(F*C1,'0%ib'%bin_len)) ^ eval("0b" + format(C2,'0%ib'%bin_len))
return D(format(output,'0%ib'%bin_len))
pk, sk = key_gen(6)
print("Key generated")
#Run an example
m = "101110110100010110"
def string_to_bin(s):
return bin(int.from_bytes(s.encode(), 'big'))[2:]
def bin_to_string(b):
return b.to_bytes((n.bit_length() + 7) // 8, 'big').decode()
app100_enc = lambda x: app0_error_correcting_encoding(x, 10000)
app100_dec = lambda x: app0_error_correcting_decoding(x, len(m))
enc_m1, enc_m2 = Mersenne_encrypt(m, pk, app100_enc)
#print(enc_m1, enc_m2)
dec_m = Mersenne_decrypt(sk, enc_m1, enc_m2, app100_dec)
print("Original m: ", m)
print("Encoded, then decoded m: ", dec_m) |
import RadioSimulator
import datetime
import sys, os, time, copy
import itertools
import multiprocessing
from multiprocessing import Pool
import numpy as np
import pandas as pd
errFile = 'gridsearchErrorLog.log'
try:
os.remove(errFile)
except OSError:
pass
sys.stderr = open(errFile, 'w')
## Define the grid
TEGserialSeries = np.arange(1,51,5)
TEGparallelSeries = np.arange(1,31,5)
battSeries = np.arange(1,30,5)
capSeries = np.arange(1,30,5)
SOCseries = np.arange(0.2,0.81,0.2)
V_bSeries = np.arange(0, 1.6, 0.3)
V_cSeries = np.arange(1.8, 3.4, 0.3)
varList = [TEGserialSeries,TEGparallelSeries, battSeries, capSeries, SOCseries, V_bSeries, V_cSeries]
scenarioVarList = list(itertools.product(*varList) ) # Create a list of all scenario combinations
# x = [ p, s , b , c, SOC, V_b, V_c]
minx = np.array( [0.1, 0.1, 0 , 0, 0.2, 0 , 0 ])
maxx = np.array( [100, 100, 100, 100, 0.8, 1.6, 3.6])
mySim = RadioSimulator.RadioSimulator(radioFile = '../Data/PowerMEMS_Sample_Data_em_20160928.csv')
# mySim = RadioSimulator.RadioSimulator(radioFile = '../Data/50step_downsampled_toy.csv')
resultfile = '../Results/gridSearchAllResults_'+datetime.datetime.now().strftime("%Y-%m-%d_%H_%M")+'.csv'
#### Parallelized Grid Search ####
## The following section preps a list of scenarios for a parallelized grid search.
# This could be replaced by nested loops if parallelization is not desired.
# Steps:
# - Create a list of lists with all the variables.
# - Create all combinations of these variable values (combinatorial combinations; same as nested loops
# - Pack each of these into the initVariables dictionary form
# - map all of these to a multiprocessing pool
# - take the output of the pool and pack it into a dataframe
# - Assign the costs to the dataframe
# - Save the dataframe
def tupleToDict(a):
return {'TEGserial':a[0], 'TEGparallel':a[1], 'batts':a[2], 'caps':a[3], 'SOC':a[4], 'V_b':a[5], 'V_c':a[6]}
def processTupleSim(myTuple):
(initVariable, mySim) = myTuple
return mySim.computeCost(initVariable)
def returnSimCost(initVars):
mySim = RadioSimulator.RadioSimulator(radioFile = '../Data/PowerMEMS_Sample_Data_em_20160928.csv')
return mySim.computeCost(initVars)
scenarioDictList = [tupleToDict(myTuple) for myTuple in scenarioVarList ]
# scenarioList = [tupleToDict(myTuple) for myTuple in scenarioList] # Pack them each into dictionaries
# scenarioSimTupleList = [(initDict, mySim) for initDict in scenarioList]
print("Number of scenarios: %s"%len(scenarioVarList))
sys.stdout.flush()
### Prepping for parallel execution
results = pd.DataFrame(scenarioDictList) # This currently does not have the cost data; that will be added later
results['cost'] = float('NaN')
success = pd.DataFrame()
if multiprocessing.cpu_count() <= 10:
myPool = Pool()
else:
myPool = Pool(6) # Don't overrun bGrid2
batches = 20
startAt = 0
batchSize = (len(scenarioVarList) - startAt) /batches
batchSize = int(batchSize)
## Block the problem into batches, so that we can save progress between batches
for j in np.arange(startAt,len(scenarioVarList),batchSize):
scenarioSimTupleList = [(initDict, mySim ) for initDict in scenarioDictList[j:j+batchSize] ]
# scenarioResults = pd.DataFrame(scenarioDictList)
initStr = "Started batch for scenarios %s to %s at %s"%(j, j+batchSize-1, datetime.datetime.now())
print(initStr)
sys.stderr.write(initStr+'\n')
sys.stderr.flush()
starttime = time.time()
results.loc[j:j+batchSize-1, 'cost'] = myPool.map(processTupleSim, scenarioSimTupleList)
results.to_csv(resultfile)
finishStr = "Finished batch in %.2f seconds with %s successes"%(time.time()-starttime,
sum(results.loc[j:j+batchSize-1, 'cost']<2000) )
print(finishStr)
sys.stderr.write(finishStr+'\n')
sys.stderr.flush()
print("Done")
sys.stderr.close()
sys.stderr = sys.__stderr__
|
epsilon = 0.0000001
def mysqrt(a):
x = 1
while True:
y = (x + a/x) / 2
if abs(y-x) < epsilon:
break
x = y
return y
#mysqrt(100)
import math
def test_square_root():
print('a mysqrt(a) math.sqrt(a) diff')
print('- --------- ------------ ----')
for i in range(9):
i +=1
a = i
b = mysqrt(a)
c = math.sqrt(a)
d = abs(b-c)
print('{:.1f} {:.10f} {:.10f} {}'.format(a, b, c, d))
test_square_root() |
from unittest import TestCase
from ......messaging.decorators.attach_decorator import AttachDecorator
from ......messaging.models.base import BaseModelError
from .....didcomm_prefix import DIDCommPrefix
from ...message_types import ATTACHMENT_FORMAT, PRES_20_REQUEST
from ..pres_format import V20PresFormat
from ..pres_request import V20PresRequest
CD_ID = "NcYxiDXkpYi6ov5FcYDi1e:3:CL:12:tag1"
INDY_PROOF_REQ = [
{
"name": "proof-req",
"version": "1.0",
"nonce": "12345",
"requested_attributes": {
"0_player_uuid": {
"name": "player",
"restrictions": [
{
"cred_def_id": f"{CD_ID}",
"attr::player::value": "Richie Knucklez",
}
],
"non_revoked": {
"from": 1234567890,
"to": 1234567890,
},
},
"0_screencapture_uuid": {
"name": "screenCapture",
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
"non_revoked": {
"from": 1234567890,
"to": 1234567890,
},
},
},
"requested_predicates": {
"0_highscore_GE_uuid": {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
"non_revoked": {
"from": 1234567890,
"to": 1234567890,
},
}
},
},
{
"name": "proof-req",
"version": "1.0",
"nonce": "123456",
"requested_attributes": {
"0_player_uuid": {
"name": "player",
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
},
"0_screencapture_uuid": {
"name": "screenCapture",
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
},
},
"requested_predicates": {
"0_highscore_GE_uuid": {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
}
},
},
{
"name": "proof-req",
"version": "1.0",
"nonce": "1234567",
"requested_attributes": {},
"requested_predicates": {
"0_highscore_GE_uuid": {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": f"{CD_ID}"}],
}
},
},
]
PRES_REQ = [
V20PresRequest(
comment="Test",
will_confirm=True,
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(mapping=proof_req, ident="indy")
],
)
for proof_req in INDY_PROOF_REQ
]
class TestV20PresRequest(TestCase):
"""Presentation request tests."""
def test_init_type(self):
"""Test initializer and type."""
for i, pres_req in enumerate(PRES_REQ):
assert pres_req.will_confirm
assert len(pres_req.formats) == len(pres_req.request_presentations_attach)
assert pres_req.request_presentations_attach[0].content == INDY_PROOF_REQ[i]
assert pres_req.attachment(V20PresFormat.Format.INDY) == INDY_PROOF_REQ[i]
assert pres_req._type == DIDCommPrefix.qualify_current(PRES_20_REQUEST)
def test_attachment_no_target_format(self):
"""Test attachment behaviour for only unknown formats."""
x_pres_req = V20PresRequest(
comment="Test",
formats=[V20PresFormat(attach_id="not_indy", format_="not_indy")],
request_presentations_attach=[
AttachDecorator.data_base64(
ident="not_indy", mapping=PRES_REQ[0].serialize()
)
],
)
assert x_pres_req.attachment() is None
def test_serde(self):
"""Test de/serialization."""
for pres_req_msg in PRES_REQ:
pres_req_dict = pres_req_msg.serialize()
pres_req_obj = V20PresRequest.deserialize(pres_req_dict)
assert type(pres_req_obj) == V20PresRequest
pres_req_dict["request_presentations~attach"][0]["data"][
"base64"
] = "eyJub3QiOiAiaW5keSJ9"
with self.assertRaises(BaseModelError):
V20PresRequest.deserialize(pres_req_dict)
pres_req_dict["request_presentations~attach"][0]["@id"] = "xxx"
with self.assertRaises(BaseModelError):
V20PresRequest.deserialize(pres_req_dict)
pres_req_dict["request_presentations~attach"].append(
{
"@id": "def",
"mime-type": "application/json",
"data": {"base64": "eyJub3QiOiAiaW5keSJ9"},
}
) # more attachments than formats
with self.assertRaises(BaseModelError):
V20PresRequest.deserialize(pres_req_dict)
pres_req_msg.formats.append( # unknown format: no validation
V20PresFormat(
attach_id="not_indy",
format_="not_indy",
)
)
obj = pres_req_msg.serialize()
obj["request_presentations~attach"].append(
{
"@id": "not_indy",
"mime-type": "application/json",
"data": {"base64": "eyJub3QiOiAiaW5keSJ9"},
}
)
V20PresRequest.deserialize(obj)
|
#!/usr/local/bin/python
# -*- encoding: utf-8 -*-
from collections import deque
class Color(object):
RED = True
BLACK = False
class Node(object):
key = None
value = None
left = None
right = None
color = None
count = None
def __init__(self, key, value=None, color=Color.BLACK):
self.key = key
self.value = value
self.color = color
self.count = 0
def is_red(self):
return self.color is Color.RED
class RedBlackTree(object):
root = None
@classmethod
def from_lot(cls, keys):
if not keys:
return
tree = cls()
for k in keys:
tree.put(k)
return tree
def as_lot(self, colored=False):
tree_lot = []
if not self.root:
return tree_lot
q = deque()
nodes_in_current_level = 1
nodes_in_next_level = 0
q.append(self.root)
while q:
current_node = q.popleft()
nodes_in_current_level -= 1
if current_node:
tree_lot.append('[%s]' % current_node.key
if colored and current_node.is_red() else current_node.key)
q.append(current_node.left)
q.append(current_node.right)
nodes_in_next_level += 2
if nodes_in_current_level == 0:
nodes_in_current_level = nodes_in_next_level
nodes_in_next_level = 0
return tree_lot
def put(self, key, value=None):
self.root = self._put(key, value, self.root)
self.root.color = Color.BLACK
def _put(self, key, value=None, node=None):
if node is None:
return Node(key, value, Color.RED)
if key < node.key:
node.left = self._put(key, value, node.left)
elif key > node.key:
node.right = self._put(key, value, node.right)
else:
node.value = value
if self.is_red(node.right) and not self.is_red(node.left):
node = self.rotate_left(node)
if self.is_red(node.left) and self.is_red(node.left.left):
node = self.rotate_right(node)
if self.is_red(node.left) and self.is_red(node.right):
self.flip_colors(node)
node.count = 1 + self._size(node.left) + self._size(node.right)
return node
def get(self, key):
current = self.root
while current is not None:
if key < self.root.key:
current = self.root.left
elif key > self.root.key:
current = self.root.right
else:
return current.value
return None
def size(self):
return self._size(self.root)
def _size(self, node):
return 0 if node is None else node.count
def is_red(self, node):
if not node:
return Color.BLACK
return node.is_red()
def rotate_left(self, node):
assert self.is_red(node.right)
node_right = node.right
node.right = node_right.left
node_right.left = node
node_right.color = node.color
node.color = Color.RED
return node_right
def rotate_right(self, node):
assert self.is_red(node.left)
node_left = node.left
node.left = node_left.right
node_left.right = node
node_left.color = node.color
node.color = Color.RED
return node_left
def flip_colors(self, node):
assert not self.is_red(node)
assert self.is_red(node.left)
assert self.is_red(node.right)
node.color = Color.RED
node.left.color = Color.BLACK
node.right.color = Color.BLACK
if __name__ == '__main__':
inp = map(int, '64 55 89 50 63 76 96 28 74 81'.split())
rbt = RedBlackTree.from_lot(inp)
res = rbt.as_lot(colored=True)
print res == inp, res, "\n"
inp = 'S E A R C H X M P L'.split()
rbt = RedBlackTree.from_lot(inp)
print rbt.as_lot(colored=True), "\n"
inp = map(int, '63 52 79 37 58 71 85 25 38 53 60 10 29'.split())
rbt = RedBlackTree.from_lot(inp)
res = map(str, rbt.as_lot(colored=True))
print res
res = map(lambda r: r.replace(']', '').replace('[', ''), [r for r in res if '[' in r])
print ' '.join(sorted(res)), "\n"
inp = '52 31 85 16 42 79 88 10 26 87'.split()
rbt = RedBlackTree.from_lot(inp)
print rbt.as_lot(colored=True)
for i in '59 29 22'.split():
rbt.put(i)
print " ".join(rbt.as_lot()), "\n"
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 08:18:41 2020
@author: Ashima
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the insertionSort2 function below.
def insertionSort2(n, arr):
for j in range(1, n):
k = arr[j]
i = j - 1
while arr[i] > k and i >= 0:
arr[i+1] = arr[i]
i = i - 1
arr[i+1] = k
print(*arr)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
insertionSort2(n, arr)
|
#!/usr/bin/env python3
"""
Defines function that calculates the probability density function of
a Gaussian distribution
"""
import numpy as np
def pdf(X, m, S):
"""
Calculates the probability density function of a Gaussian distribution
parameters:
X [numpy.ndarray of shape (n, d)]:
contains the dataset whose PDF should be calculated
n: the number of data points
d: the number of dimensions for each data point
m [numpy.ndarray of shape (d,)]:
contains the mean of the distribution
S [numpy.ndarray of shape (d, d)]:
contains the covariance of the distribution
not allowed to use any loops
not allowed to use the function numpy.diag or method numpy.ndarray.diagonal
returns:
P [numpy.ndarray of shape (n,)]:
containing the PDF values for each data point
all values in P should have a minimum value of 1e-300
or None on failure
"""
return None
|
#!/usr/bin/python
# Andy Sayler
# Fall 2012
# CU CS5525
# flatten visitor functions
#
# Adopted from Jeremy Siek, Fall 2012
#
# In conjunction with:
# Michael (Mike) Vitousek
# https://github.com/mvitousek/python-compiler-mmv
# Anne Gatchell
# https://github.com/halloannielala/compiler-5525
# Helper Types
from vis import Visitor
# Helper Tools
from utilities import generate_name
from unitcopy import CopyVisitor
from functionwrappers import *
# Data Types
from compiler.ast import *
from monoast import *
from flatast import *
# Flatten expressions to 3-address instructions (Remove Complex Operations)
# Input: an AST for P_1
# Output: an AST for P_1 (put without complex operations)
# Notes: this introduces too many variables and moves, but that's OK.
# Register allocation with move biasing will hopefully take care of it.
def make_assign(lhs, rhs):
return Assign(nodes=[AssName(name=lhs, flags='OP_ASSIGN')], expr=rhs)
class FlattenVisitor(CopyVisitor):
# Banned Nodes
def visitAdd(self, n):
raise Exception("'Add' node no longer valid at this stage")
def visitUnarySub(self, n):
raise Exception("'UnarySub' node no longer valid at this stage")
def visitCompare(self, n):
raise Exception("'Compare' node no longer valid at this stage")
def visitPrintnl(self, n):
raise Exception("'Printnl' node no longer valid at this stage")
def visitmono_IsTag(self, n):
raise Exception("'mono_IsTag' node no longer valid at this stage")
def visitmono_ProjectTo(self, n):
raise Exception("'mono_ProjectTo' node no longer valid at this stage")
def visitmono_InjectFrom(self, n):
raise Exception("'mono_InjectFrom' node no longer valid at this stage")
def visitAnd(self, n):
raise Exception("'And' node no longer valid at this stage")
def visitOr(self, n):
raise Exception("'Or' node no longer valid at this stage")
def mono_IsTrue(self, n):
raise Exception("'mono_IsTrue' node no longer valid at this stage")
def IfExp(self, n):
raise Exception("'IfExp' node no longer valid at this stage")
# For statements: takes a statement and returns a list of instructions
def visitStmt(self, n):
sss = []
for s in n.nodes:
sss += [self.dispatch(s)]
return Stmt(reduce(lambda a,b: a + b, sss, []), n.lineno)
def visitAssign(self, n):
(rhs,ss) = self.dispatch(n.expr, False)
return ss + [Assign(n.nodes, rhs)]
def visitDiscard(self, n):
(e, ss) = self.dispatch(n.expr, True)
return ss
# For expressions: takes an expression and a bool saying whether the
# expression needs to be simple, and returns an expression
# (a Name or Const if it needs to be simple) and a list of instructions.
def visitConst(self, n, needs_to_be_simple):
return (n, [])
def visitName(self, n, needs_to_be_simple):
return (n, [])
def visitmono_Let(self, n, needs_to_be_simple):
(rhs, ss1) = self.dispatch(n.rhs, True)
(body, ss2) = self.dispatch(n.body, True)
return (body, ss1 + [make_assign(n.var.name, rhs)] + ss2)
def visitmono_IntAdd(self, n, needs_to_be_simple):
(left, ss1) = self.dispatch(n.left, True)
(right, ss2) = self.dispatch(n.right, True)
if needs_to_be_simple:
tmp = generate_name('intaddtmp')
return (Name(tmp), ss1 + ss2 + [make_assign(tmp, mono_IntAdd((left, right)))])
else:
return (mono_IntAdd((left, right)), ss1 + ss2)
def visitmono_IntEqual(self, n, needs_to_be_simple):
(left, ss1) = self.dispatch(n.left, True)
(right, ss2) = self.dispatch(n.right, True)
if needs_to_be_simple:
tmp = generate_name('intequaltmp')
return (Name(tmp), ss1 + ss2 + [make_assign(tmp, mono_IntEqual((left, right)))])
else:
return (mono_IntEqual((left, right)), ss1 + ss2)
def visitmono_IntNotEqual(self, n, needs_to_be_simple):
(left, ss1) = self.dispatch(n.left, True)
(right, ss2) = self.dispatch(n.right, True)
if needs_to_be_simple:
tmp = generate_name('intnotequaltmp')
return (Name(tmp), ss1 + ss2 + [make_assign(tmp, mono_IntNotEqual((left, right)))])
else:
return (mono_IntNotEqual((left, right)), ss1 + ss2)
def visitmono_IntUnarySub(self, n, needs_to_be_simple):
(expr,ss) = self.dispatch(n.expr, True)
if needs_to_be_simple:
tmp = generate_name('usubtmp')
return (Name(tmp), ss + [make_assign(tmp, mono_IntUnarySub(expr))])
else:
return (mono_IntUnarySub(expr), ss)
def visitCallFunc(self, n, needs_to_be_simple):
if isinstance(n.node, Name):
args_sss = [self.dispatch(arg, True) for arg in n.args]
args = [arg for (arg,ss) in args_sss]
ss = reduce(lambda a,b: a + b, [ss for (arg,ss) in args_sss], [])
if needs_to_be_simple:
tmp = generate_name('callfunctmp')
return (Name(tmp), ss + [make_assign(tmp, CallFunc(n.node, args))])
else:
return (CallFunc(n.node, args), ss)
else:
raise Exception('flatten: only calls to named functions allowed')
def visitmono_IfExp(self, n, needs_to_be_simple):
(teste, testss) = self.dispatch(n.test, True)
(thene, thenss) = self.dispatch(n.then, True)
(elsee, elsess) = self.dispatch(n.else_, True)
simple = mono_IfExp(teste,
flat_InstrSeq(thenss, thene),
flat_InstrSeq(elsess, elsee))
if needs_to_be_simple:
tmp = generate_name('ifexptmp')
myexpr = (Name(tmp))
myss = [make_assign(tmp, simple)]
else:
myexpr = simple
myss = []
return (myexpr, testss + myss)
|
if __name__ == '__main__':
dd =None
if dd is not None:
df=23
if dd == None:
df = 21
print(df)
arr ={}
jongMok = "0506940"
arr[jongMok]=0
print(arr["05069402"])
volume = ""
if len(volume) == 0:
volume="-1"
print(int(volume))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UploadIoTDataToBlockchainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'lto', '2021-07-07', 'UploadIoTDataToBlockchain')
self.set_method('POST')
def get_IotIdSource(self): # String
return self.get_query_params().get('IotIdSource')
def set_IotIdSource(self, IotIdSource): # String
self.add_query_param('IotIdSource', IotIdSource)
def get_IotDataToken(self): # String
return self.get_query_params().get('IotDataToken')
def set_IotDataToken(self, IotDataToken): # String
self.add_query_param('IotDataToken', IotDataToken)
def get_PrivacyData(self): # String
return self.get_query_params().get('PrivacyData')
def set_PrivacyData(self, PrivacyData): # String
self.add_query_param('PrivacyData', PrivacyData)
def get_IotId(self): # String
return self.get_query_params().get('IotId')
def set_IotId(self, IotId): # String
self.add_query_param('IotId', IotId)
def get_IotDataDigest(self): # String
return self.get_query_params().get('IotDataDigest')
def set_IotDataDigest(self, IotDataDigest): # String
self.add_query_param('IotDataDigest', IotDataDigest)
def get_IotDataDID(self): # String
return self.get_query_params().get('IotDataDID')
def set_IotDataDID(self, IotDataDID): # String
self.add_query_param('IotDataDID', IotDataDID)
def get_PlainData(self): # String
return self.get_query_params().get('PlainData')
def set_PlainData(self, PlainData): # String
self.add_query_param('PlainData', PlainData)
def get_IotAuthType(self): # String
return self.get_query_params().get('IotAuthType')
def set_IotAuthType(self, IotAuthType): # String
self.add_query_param('IotAuthType', IotAuthType)
def get_IotIdServiceProvider(self): # String
return self.get_query_params().get('IotIdServiceProvider')
def set_IotIdServiceProvider(self, IotIdServiceProvider): # String
self.add_query_param('IotIdServiceProvider', IotIdServiceProvider)
|
from src.rest_service.models import BaseSqla, BaseDocument, BaseEmbeddedDocument
from src.rest_service.resources import BaseResource
from sqlalchemy import Column
import sqlalchemy as sqa_fields
from umongo import fields as umo_fields, validate
from quart import Response, request
import json
class MongoUser(BaseDocument):
uid = umo_fields.StrField(attribute='_id')
username = umo_fields.StringField(required=True)
email = umo_fields.EmailField(required=True, unique=True)
# TODO create a class that encapsulates the business logic of user
# TODO create a class that encapsulates the resource of user
# TODO create a class that encapsulates the odm and orm object
class ExampleUserODM(object):
APPLICATION = None
RESOURCE = None
class ExampleUserRsrc(BaseResource):
PATTERNS = ['/user/id/<int:user_id>',
'/user/user/<str:username>',
'/user/add'
]
async def get(self, *args, **kwargs):
'''
Get Example view
'''
return Response(json.dumps({'result': 'admin example works'}), status=300, mimetype='application/json')
async def post(self, *args, **kwargs):
'''
Get Example view
'''
return Response(json.dumps({'result': 'admin example works'}), status=300, mimetype='application/json')
async def put(self, *args, **kwargs):
'''
Get Example view
'''
return Response(json.dumps({'result': 'admin example works'}), status=300, mimetype='application/json')
async def delete(self, *args, **kwargs):
'''
Get Example view
'''
return Response(json.dumps({'result': 'admin example works'}), status=300, mimetype='application/json')
|
class DoublyLinkedList:
class Node:
def __init__(self, data=None, prev=None, next=None):
self.data = data
self.prev = prev
self.next = next
def disconnect(self):
self.data = None
self.prev = None
self.next = None
def __init__(self):
self.header = DoublyLinkedList.Node()
self.trailer = DoublyLinkedList.Node()
self.header.next = self.trailer
self.trailer.prev = self.header
self.size = 0
def __len__(self):
return self.size
def is_empty(self):
return len(self) == 0
def first_node(self):
if(self.is_empty()):
raise Exception("List is empty")
return self.header.next
def last_node(self):
if(self.is_empty()):
raise Exception("List is empty")
return self.trailer.prev
def add_after(self, node, data):
prev = node
succ = node.next
new_node = DoublyLinkedList.Node(data, prev, succ)
prev.next = new_node
succ.prev = new_node
self.size += 1
return new_node
def add_first(self, data):
return self.add_after(self.header, data)
def add_last(self, data):
return self.add_after(self.trailer.prev, data)
def add_before(self, node, data):
return self.add_after(node.prev, data)
def delete_node(self, node):
pred = node.prev
succ = node.next
pred.next = succ
succ.prev = pred
self.size -= 1
data = node.data
node.disconnect()
return data
def delete_first(self):
if (self.is_empty()):
raise Exception("List is empty")
self.delete_node(self.first_node())
def delete_last(self):
if (self.is_empty()):
raise Exception("List is empty")
self.delete_node(self.last_node())
def __iter__(self):
if (self.is_empty()):
return
cursor = self.first_node()
while cursor is not self.trailer:
yield cursor.data
cursor = cursor.next
def __repr__(self):
return "[" + " <--> ".join([str(item) for item in self]) + "]"
def merge_linked_lists(srt_lnk_lst1, srt_lnk_lst2):
pointer1 = 0
pointer2 = 0
merged_sublists = DoublyLinkedList()
while pointer1 < len(srt_lnk_lst1) and pointer2 < len(srt_lnk_lst2):
number = 0
if srt_lnk_lst1[pointer1] <= srt_lnk_st2[pointer2]:
merged_sublists.add_after(number, srt_lnk_lst1[pointer1])
number +=1
pointer1 +=1
else:
merged_sublists.add_after(number, srt_lnk_lst2[pointer2])
pointer2 += 1
number +=1
return merged_sublists
def merge_linked_lists(
|
def sumar(op1,op2):
print("El resultad de la suma es:", op1+op2)
def restar(op1,op2):
print("El resultad de la resta es:", op1-op2)
def multiplicar(op1,op2):
print("El resultad de la x es:", op1*op2)
|
import numpy as np
import logging
from RAGE import RAGE
from XY_ORACLE import XY_ORACLE
from XY_ADAPTIVE import XY_ADAPTIVE
from LAZY_TS import LAZY_TS
import pickle
import os
import sys
import functools
import multiprocessing as multiprocess
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# create folder for data
data_dir = os.path.join(os.getcwd(), 'direction_data_dir')
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
# Calling algorithms
def sim_wrapper(item_list, seed_list, count):
item_list[count].algorithm(seed_list[count])
return item_list[count]
# Create a linear bandit problems (arms, mu)
def many_arm_problem_instance(n):
d = 2
x = .1 * np.random.rand(n - 2)
arm1 = [
[np.cos(.78 + x[i]), np.sin(.78 + x[i])] + [0 for _ in range(d - 2)]
for i in range(n - 2)
]
arm2 = [[1] + [0 for _ in range(d - 1)]]
arm3 = [[-.707, .707] + [0 for _ in range(d - 2)]]
X = np.vstack(arm1 + arm2 + arm3)
theta_star = np.array([1, 0] + [0 for _ in range(d - 2)]).reshape(-1, 1)
return X, theta_star
# parameters
count = 20
delta = 0.05
alpha = .1
eps = 0
sweep = [1000, 2500, 5000, 7500, 10000]
factor = 10
pool_num = 2
arguments = sys.argv[1:]
# For each element in sweep is bandit problem with number of arms n
for n in sweep:
print('Starting sweep: {}'.format(n))
np.random.seed(43)
X_set = []
theta_star_set = []
# Generate
for i in range(count):
X, theta_star = many_arm_problem_instance(n)
X_set.append(X)
theta_star_set.append(theta_star)
# Lazy TS
if 'lazyts' in arguments:
print('[ALGORITHM] * * * LTS no averaging * * *')
np.random.seed(43)
instance_list = [LAZY_TS(X, theta_star, delta, 2, False) for X, theta_star in zip(X_set, theta_star_set)]
seed_list = list(np.random.randint(0, 100000, count))
# calls the algorithm
parallel_sim = functools.partial(sim_wrapper, instance_list, seed_list)
pool = multiprocess.Pool(pool_num)
num_list = list(range(count))
instance_list = []
for instance in pool.imap_unordered(parallel_sim, num_list):
try:
instance_list.append(instance)
print('Finished Lazy TS instance ')
sample_complexity = np.array([instance.tau for instance in instance_list])
mean = np.mean(sample_complexity)
se = np.std(sample_complexity)/np.sqrt(count)
file1 = open(os.path.join(data_dir, "tslazy_no_averaging_" + str(n) + "_data.p"), "wb")
pickle.dump((mean, se), file1)
file1.close()
print('completed %d: mean %d and se %d' % (n, mean, se))
file2 = open(os.path.join(data_dir, "tslazy_no_averaging_" + str(n) + ".p"), "wb")
pickle.dump(instance_list, file2)
file2.close()
except:
print('error')
pool.close()
pool.join()
print('[ALGORITHM] * * * LTS averaging * * *')
np.random.seed(43)
instance_list = [LAZY_TS(X, theta_star, delta, 2, True) for X, theta_star in zip(X_set, theta_star_set)]
seed_list = list(np.random.randint(0, 100000, count))
# calls the algorithm
parallel_sim = functools.partial(sim_wrapper, instance_list, seed_list)
pool = multiprocess.Pool(pool_num)
num_list = list(range(count))
instance_list = []
for instance in pool.imap_unordered(parallel_sim, num_list):
try:
instance_list.append(instance)
print('Finished Lazy TS instance ')
sample_complexity = np.array([instance.tau for instance in instance_list])
mean = np.mean(sample_complexity)
se = np.std(sample_complexity)/np.sqrt(count)
file1 = open(os.path.join(data_dir, "tslazy_averaging_" + str(n) + "_data.p"), "wb")
pickle.dump((mean, se), file1)
file1.close()
print('completed %d: mean %d and se %d' % (n, mean, se))
file2 = open(os.path.join(data_dir, "tslazy_averaging_" + str(n) + ".p"), "wb")
pickle.dump(instance_list, file2)
file2.close()
except:
print('error')
pool.close()
pool.join()
#sys.exit('done')
# RAGE
if 'rage' in arguments:
print('[ALGORITHM] * * * RAGE * * *')
np.random.seed(43)
instance_list = [
RAGE(X, theta_star, factor, delta)
for X, theta_star in zip(X_set, theta_star_set)
]
seed_list = list(np.random.randint(0, 100000, count))
# calls the algorithm
parallel_sim = functools.partial(sim_wrapper, instance_list, seed_list)
pool = multiprocess.Pool(pool_num)
num_list = list(range(count))
instance_list = []
for instance in pool.imap_unordered(parallel_sim, num_list):
try:
instance_list.append(instance)
print('Finished RAGE Instance')
sample_complexity = np.array(
[instance.N for instance in instance_list])
mean = np.mean(sample_complexity)
se = np.std(sample_complexity) / np.sqrt(count)
file1 = open(
os.path.join(data_dir, "rage_" + str(n) + "_data.p"), "wb")
pickle.dump((mean, se), file1)
file1.close()
print('completed %d: mean %d and se %d' % (n, mean, se))
file2 = open(
os.path.join(data_dir, "rage_" + str(n) + ".p"), "wb")
pickle.dump(instance_list, file2)
file2.close()
except:
print('error')
pool.close()
pool.join()
# XY
if 'xy' in arguments:
np.random.seed(43)
instance_list = [
XY_ADAPTIVE(X, theta_star, alpha, delta) for i in range(count)
]
seed_list = list(np.random.randint(0, 100000, count))
parallel_sim = functools.partial(sim_wrapper, instance_list, seed_list)
pool = multiprocess.Pool(5)
num_list = list(range(count))
instance_list = []
for instance in pool.imap_unordered(parallel_sim, num_list):
try:
instance_list.append(instance)
print('Finished XY Instance')
sample_complexity = np.array(
[instance.N for instance in instance_list])
mean = np.mean(sample_complexity)
se = np.std(sample_complexity) / np.sqrt(count)
pickle.dump((mean, se),
open(
os.path.join(data_dir,
"xy_" + str(d) + "_data.p"),
"wb"))
print('completed %d: mean %d and se %d' % (d, mean, se))
pickle.dump(
instance_list,
open(os.path.join(data_dir, "xy_" + str(d) + ".p"), "wb"))
except:
print('error')
pool.close()
pool.join()
# ORACLE
if 'oracle' in arguments:
np.random.seed(43)
instance_list = [
XY_ORACLE(X, theta_star, delta)
for X, theta_star in zip(X_set, theta_star_set)
]
seed_list = list(np.random.randint(0, 100000, count))
parallel_sim = functools.partial(sim_wrapper, instance_list, seed_list)
pool = multiprocess.Pool(pool_num)
num_list = list(range(count))
instance_list = []
for instance in pool.imap_unordered(parallel_sim, num_list):
try:
instance_list.append(instance)
print('Finished ORACLE Instance')
sample_complexity = np.array(
[instance.N for instance in instance_list])
mean = np.mean(sample_complexity)
se = np.std(sample_complexity) / np.sqrt(count)
file1 = open(
os.path.join(data_dir, "oracle_" + str(n) + "_data.p"),
"wb")
pickle.dump((mean, se), file1)
file1.close()
print('completed %d: mean %d and se %d' % (n, mean, se))
file2 = open(
os.path.join(data_dir, "oracle_" + str(n) + ".p"), "wb")
pickle.dump(instance_list, file2)
file2.close()
except:
print('error')
pool.close()
pool.join()
|
# -*- coding: utf-8 -*-
#
# (c) 2016 Björn Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'phell' for details.
#
import uuid
from phell.utils import to_int, from_hex
class Gpt(object):
SIZE = 512
EFI_SIGNATURE = from_hex("4546492050415254")
EFI_REVISION = from_hex("00000100")
SIGNATURE_START = 0
SIGNATURE_SIZE = 8
SIGNATURE_END = SIGNATURE_START + SIGNATURE_SIZE
REVISION_START = SIGNATURE_END
REVISION_SIZE = 4
REVISION_END = REVISION_START + REVISION_SIZE
HEADER_SIZE_START = REVISION_END
HEADER_SIZE_SIZE = 4
HEADER_SIZE_END = HEADER_SIZE_START + HEADER_SIZE_SIZE
HEADER_CRC_START = HEADER_SIZE_END
HEADER_CRC_SIZE = 4
HEADER_CRC_END = HEADER_CRC_START + HEADER_CRC_SIZE
RESERVED_START = HEADER_CRC_END
RESERVED_SIZE = 4
RESERVED_END = RESERVED_START + RESERVED_SIZE
CURRENT_LBA_START = RESERVED_END
CURRENT_LBA_SIZE = 8
CURRENT_LBA_END = CURRENT_LBA_START + CURRENT_LBA_SIZE
BACKUP_LBA_START = CURRENT_LBA_END
BACKUP_LBA_SIZE = 8
BACKUP_LBA_END = BACKUP_LBA_START + BACKUP_LBA_SIZE
FIRST_LBA_START = BACKUP_LBA_END
FIRST_LBA_SIZE = 8
FIRST_LBA_END = FIRST_LBA_START + FIRST_LBA_SIZE
LAST_LBA_START = FIRST_LBA_END
LAST_LBA_SIZE = 8
LAST_LBA_END = LAST_LBA_START + LAST_LBA_SIZE
GUID_START = LAST_LBA_END
GUID_SIZE = 16
GUID_END = GUID_START + GUID_SIZE
STARTING_LBA_START = GUID_END
STARTING_LBA_SIZE = 8
STARTING_LBA_END = STARTING_LBA_START + STARTING_LBA_SIZE
NUMBER_ENTRIES_START = STARTING_LBA_END
NUMBER_ENTRIES_SIZE = 4
NUMBER_ENTRIES_END = NUMBER_ENTRIES_START + NUMBER_ENTRIES_SIZE
ENTRY_SIZE_START = NUMBER_ENTRIES_END
ENTRY_SIZE_SIZE = 4
ENTRY_SIZE_END = ENTRY_SIZE_START + ENTRY_SIZE_SIZE
PARTITIONS_CRC_START = ENTRY_SIZE_END
PARTITIONS_CRC_SIZE = 4
PARTITIONS_CRC_END = PARTITIONS_CRC_START + PARTITIONS_CRC_SIZE
def __init__(self, data):
self.data = data
self.signature = data[Gpt.SIGNATURE_START:Gpt.SIGNATURE_END]
self.revision = data[Gpt.REVISION_START:Gpt.REVISION_END]
self.header_size = to_int(data[
Gpt.HEADER_SIZE_START:Gpt.HEADER_SIZE_END])
self.header_crc = data[Gpt.HEADER_CRC_START:Gpt.HEADER_CRC_END]
self.current_lba = to_int(data[
Gpt.CURRENT_LBA_START:Gpt.CURRENT_LBA_END])
self.backup_lba = to_int(data[
Gpt.BACKUP_LBA_START:Gpt.BACKUP_LBA_END])
self.first_lba = to_int(data[
Gpt.FIRST_LBA_START:Gpt.FIRST_LBA_END])
self.last_lba = to_int(data[
Gpt.LAST_LBA_START:Gpt.LAST_LBA_END])
self.guid = uuid.UUID(bytes_le=data[Gpt.GUID_START:Gpt.GUID_END])
self.start_lba = to_int(data[
Gpt.STARTING_LBA_START:Gpt.STARTING_LBA_END])
self.nr_entries = to_int(data[
Gpt.NUMBER_ENTRIES_START:Gpt.NUMBER_ENTRIES_END])
self.entry_size = to_int(data[
Gpt.ENTRY_SIZE_START:Gpt.ENTRY_SIZE_END])
self.partitions_crc = \
data[Gpt.PARTITIONS_CRC_START:Gpt.PARTITIONS_CRC_END]
def is_valid(self):
# todo:
# - Check the Signature
# - Check the Header CRC
# - Check that the current LBA entry points to the LBA that contains
# the GUID Partition Table
# - Check the CRC of the GUID Partition Entry Array
# If the GPT is the primary table, stored at LBA 1:
# - Check the Backup LBA to see if it is a valid GPT
return True
class GptPartition(object):
DEFAULT_SIZE = 128
TYPE_GUID_START = 0
TYPE_GUID_SIZE = 16
TYPE_GUID_END = TYPE_GUID_START + TYPE_GUID_SIZE
UNIQUE_GUID_START = TYPE_GUID_END
UNIQUE_GUID_SIZE = 16
UNIQUE_GUID_END = UNIQUE_GUID_START + UNIQUE_GUID_SIZE
FIRST_LBA_START = UNIQUE_GUID_END
FIRST_LBA_SIZE = 8
FIRST_LBA_END = FIRST_LBA_START + FIRST_LBA_SIZE
LAST_LBA_START = FIRST_LBA_END
LAST_LBA_SIZE = 8
LAST_LBA_END = LAST_LBA_START + LAST_LBA_SIZE
ATTRIBUTE_FLAGS_START = LAST_LBA_END
ATTRIBUTE_FLAGS_SIZE = 8
ATTRIBUTE_FLAGS_END = ATTRIBUTE_FLAGS_START + ATTRIBUTE_FLAGS_SIZE
NAME_START = ATTRIBUTE_FLAGS_END
NAME_SIZE = 72
NAME_END = NAME_START + NAME_SIZE
EFI_SYSTEM_TYPE = uuid.UUID("c12a7328-f81f-11d2-ba4b-00a0c93ec93b")
UNUSED_TYPE = uuid.UUID("00000000-0000-0000-0000-000000000000")
MBR_TYPE = uuid.UUID("024DEE41-33E7-11D3-9D69-0008C781F39F")
BIOS_TYPE = uuid.UUID("21686148-6449-6E6F-744E-656564454649")
MAC_OS_HFS_TYPE = uuid.UUID("48465300-0000-11AA-AA11-00306543ECAC")
MAC_OS_UFS_TYPE = uuid.UUID("55465300-0000-11AA-AA11-00306543ECAC")
MAC_OS_BOOT_TYPE = uuid.UUID("426F6F74-0000-11AA-AA11-00306543ECAC")
LINUX_FILESYSTEM_TYPE = uuid.UUID("0FC63DAF-8483-4772-8E79-3D69D8477DE4")
LINUX_RAID_TYPE = uuid.UUID("A19D880F-05FC-4D3B-A006-743F0F84911E")
LINUX_ROOT_X86_TYPE = uuid.UUID("44479540-F297-41B2-9AF7-D131D5F0458A")
LINUX_ROOT_X86_64_TYPE = uuid.UUID("4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709")
LINUX_ROOT_ARM_TYPE = uuid.UUID("69DAD710-2CE4-4E3C-B16C-21A1D49ABED3")
LINUX_ROOT_ARM64_TYPE = uuid.UUID("B921B045-1DF0-41C3-AF44-4C6F280D3FAE")
LINUX_SWAP_TYPE = uuid.UUID("0657FD6D-A4AB-43C4-84E5-0933C84B4F4F")
LINUX_LVM_TYPE = uuid.UUID("E6D6D379-F507-44C2-A23C-238F2A3DF928")
LINUX_HOME_TYPE = uuid.UUID("933AC7E1-2EB4-4F13-B844-0E14E2AEF915")
LINUX_DMCRYPT_TYPE = uuid.UUID("7FFEC5C9-2D00-49B7-8941-3EA10A5586B7")
LINUX_LUKS_TYPE = uuid.UUID("CA7D7CCB-63ED-4C53-861C-1742536059CC")
MS_DATA_PARTITION_TYPE = uuid.UUID("EBD0A0A2-B9E5-4433-87C0-68B6B72699C7")
partition_types = {
EFI_SYSTEM_TYPE: "EFI System Partition",
UNUSED_TYPE: "Unused",
MBR_TYPE: "MBR Partition Scheme",
BIOS_TYPE: "BIOS Boot Partition",
MAC_OS_HFS_TYPE: "Mac OS X (HFS/HFS+)",
MAC_OS_UFS_TYPE: "Mac OS X (UFS)",
MAC_OS_BOOT_TYPE: "Mac OS X (boot)",
LINUX_FILESYSTEM_TYPE: "Linux Filesystem",
LINUX_RAID_TYPE: "Linux RAID",
LINUX_ROOT_X86_TYPE: "Linux Root (x86)",
LINUX_ROOT_X86_64_TYPE: "Linux Root (x86-64)",
LINUX_ROOT_ARM_TYPE: "Linux Root (ARM)",
LINUX_ROOT_ARM64_TYPE: "Linux Root (ARM64)",
LINUX_SWAP_TYPE: "Linux Swap",
LINUX_LVM_TYPE: "Linux LVM",
LINUX_HOME_TYPE: "Linux /home",
LINUX_DMCRYPT_TYPE: "Linux dm-crypt",
LINUX_LUKS_TYPE: "Linux LUKS",
MS_DATA_PARTITION_TYPE: "MS Data Partition",
}
def __init__(self, data):
self.data = data
self.type_guid = uuid.UUID(bytes_le=data[
self.TYPE_GUID_START:self.TYPE_GUID_END])
self.unique_guid = uuid.UUID(bytes_le=data[
self.UNIQUE_GUID_START:self.UNIQUE_GUID_END])
self.first_lba = to_int(data[
self.FIRST_LBA_START:self.FIRST_LBA_END])
self.last_lba = to_int(data[
self.LAST_LBA_START:self.LAST_LBA_END])
self.attribute_flags = \
data[self.ATTRIBUTE_FLAGS_START:self.ATTRIBUTE_FLAGS_END]
self.name = data[self.NAME_START:self.NAME_END].decode("utf_16_le")
def is_type(self, ptype_guid):
return self.type_guid == ptype_guid
def get_partition_type(self):
return self.partition_types.get(self.type_guid, "unkown")
# vim: set ts=4 sw=4 tw=80:
|
from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_batch_foward(x, w , b, gamma, beta, bn_param):
fc_out, fc_cache = affine_forward(x, w, b)
batch_out, batch_cache = batchnorm_forward(fc_out, gamma, beta, bn_param)
relu_out, relu_cache = relu_forward(batch_out)
'''
if dropout_param['p']>0
dropout_out, dropout_cache = dropout_forward(relu_out,dropout_param)
out = dropout
cache = (fc_cache,batch_cache, relu_cache, dropout_cache)
else:
'''
out = relu_out
cache = (fc_cache,batch_cache, relu_cache)
return out, cache
def affine_relu_batch_backward(dout, cache):
"""
Backward pass for the affine-batch norm-relu convenience layer
"""
'''
if dropout_param['p']>0:
fc_cache,batch_cache, relu_cache, dropout_cache = cache
dout_relu = dropout_backward(dout, dropout_cache)
else:
'''
fc_cache, batch_cache, relu_cache = cache
dout_relu = dout
dbatch = relu_backward(dout_relu, relu_cache)
dfc, dgamma, dbeta = batchnorm_backward(dbatch, batch_cache)
dx, dw, db = affine_backward(dfc, fc_cache)
return dx, dw, db, dgamma, dbeta
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
relu_out, relu_cache = relu_forward(a)
'''
if dropout_param['p'] > 0:
out, dropout_cache = dropout_foward(relu_out, dropout_param)
cache = (fc_cache, relu_cache, dropout_cache)
else:
'''
out = relu_out
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
'''
if dropout_param['p'] > 0:
fc_cache, relu_cache, dropout_cache = cache
dout_relu = dropout_backward(dout, dropout_cache)
else:
'''
fc_cache, relu_cache = cache
dout_relu = dout
da = relu_backward(dout_relu, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
pass
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
|
# Generated by Django 2.2.7 on 2020-02-01 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_autorace', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tran_schedule',
name='Raceplace1',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード1'),
),
migrations.AddField(
model_name='tran_schedule',
name='Raceplace2',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード2'),
),
migrations.AddField(
model_name='tran_schedule',
name='Raceplace3',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード3'),
),
migrations.AddField(
model_name='tran_schedule',
name='Raceplace4',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード4'),
),
migrations.AddField(
model_name='tran_schedule',
name='Raceplace5',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード5'),
),
migrations.AddField(
model_name='tran_schedule',
name='Raceplace6',
field=models.CharField(blank=True, max_length=1, verbose_name='場コード6'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum1',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数1'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum2',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数2'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum3',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数3'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum4',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数4'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum5',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数5'),
),
migrations.AddField(
model_name='tran_schedule',
name='Totalracenum6',
field=models.CharField(blank=True, max_length=2, verbose_name='レース数6'),
),
migrations.AlterField(
model_name='tran_schedule',
name='DateOfRace1',
field=models.CharField(blank=True, max_length=8, verbose_name='競走年月日1'),
),
migrations.AlterField(
model_name='tran_schedule',
name='DateOfRace2',
field=models.CharField(blank=True, max_length=8, verbose_name='競走年月日2'),
),
migrations.AlterField(
model_name='tran_schedule',
name='DateOfRace3',
field=models.CharField(blank=True, max_length=8, verbose_name='競走年月日3'),
),
migrations.AlterField(
model_name='tran_schedule',
name='SalesInfo1',
field=models.CharField(blank=True, max_length=1, verbose_name='場外発売情報1'),
),
migrations.AlterField(
model_name='tran_schedule',
name='SalesInfo2',
field=models.CharField(blank=True, max_length=1, verbose_name='場外発売情報2'),
),
migrations.AlterField(
model_name='tran_schedule',
name='SalesInfo3',
field=models.CharField(blank=True, max_length=1, verbose_name='場外発売情報3'),
),
]
|
"""
.. module:: dbsync.logs
:synopsis: Logging facilities for the library.
"""
import logging
#: All the library loggers
loggers = set()
log_handler = None
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
loggers.add(logger)
if log_handler is not None:
logger.addHandler(log_handler)
return logger
def set_log_target(fo):
"""
Set a stream as target for dbsync's logging. If a string is given,
it will be considered to be a path to a file.
"""
global log_handler
if log_handler is None:
log_handler = logging.FileHandler(fo) if isinstance(fo, basestring) \
else logging.StreamHandler(fo)
log_handler.setLevel(logging.WARNING)
log_handler.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
for logger in loggers:
logger.addHandler(log_handler)
|
class CustomerAlreadyRegisteredError(Exception):
def __init__(self):
super().__init__('Customer already registered')
class CustomerNotFoundError(Exception):
def __init__(self):
super().__init__('Customer not found')
|
import numpy as np
import numpy.random as nr
import scipy.misc
from IPython.display import clear_output
from scipy.special import gammaln
def run_nsteps(pol_vec0,theta0,env,nsteps):
# run nsteps of metropolis hastings
theta = np.copy(theta0)
pol_vec = np.copy(pol_vec0)
value_curr = value(env,pol_vec)
pol_trace = np.zeros([nsteps,len(pol_vec)])
theta_trace = np.zeros([nsteps,len(theta)])
v_trace = np.zeros([nsteps])
for step in np.arange(nsteps):
if step % 20 == 0:
clear_output()
print('step: ', step)
pol_trace[step,:] = pol_vec
theta_trace[step,:] = theta
v_trace[step] = value_curr
(pol_vec, logp_curr, value_curr) = policy_step(pol_vec,theta,env,v_trace[step])
theta = latent_step(pol_vec,theta, value_curr)
return (pol_trace,theta_trace,v_trace)
def sim_episode(env, policy_vec, max_step, show):
d = False
j = 0
S = env.reset()
while j < max_step:
if show == 1:
# draw environment and pause
env.render(ax)
plt.pause(.02)
# increase counter
j += 1
# sample action given by pi for state S
a = policy_vec[S]
# take action A, observe s1, r, terminal?
S_prime,r,d = env.step(a)
# update S
S = S_prime;
if d == True:
break
return j
def dir_logp(theta):
# log_p(theta|dir(alpha))
alpha = np.array([5,5,5,5])
return np.sum((alpha-1)*np.log(theta) - gammaln(alpha)) + gammaln(np.sum(alpha))
def cat_logp(theta,p_vec):
# log_p(p_vec|categorical(theta))
return np.sum(np.log(theta[p_vec.astype(int)]))
def value(env,p_vec):
# evaluate p_vec for env
return -1*sim_episode(env,p_vec,200,0)
def logp(val, p_vec, theta):
# log posterior of value, p_vec and theta
logp = val + cat_logp(theta,p_vec) + dir_logp(theta)
return logp
def latent_step(pol_vec,theta0, value_curr):
# step theta (propose and accept or reject)
theta = np.copy(theta0)
logp_curr = logp(value_curr,pol_vec,theta)
curr_val, theta = theta, prop_theta(theta)
logp_prop = logp(value_curr,pol_vec,theta)
theta,accepted = metrop_select(logp_prop - logp_curr, theta, curr_val)
if accepted:
logp_curr = logp_prop
return theta
def policy_step(pol_vec0,theta,env,value_curr):
# step p_vec (gibbs metropolis)
state_list = np.arange(pol_vec0.shape[0])
nr.shuffle(state_list)
pol_vec = np.copy(pol_vec0)
logp_curr = logp(value_curr,pol_vec,theta)
nchoices = 4
for s in state_list:
curr_choice, pol_vec[s] = pol_vec[s], sample_except(nchoices, pol_vec[s])
value_prop = value(env,pol_vec)
logp_prop = logp(value_prop,pol_vec,theta)
pol_vec[s], accepted = metrop_select(logp_prop - logp_curr, pol_vec[s], curr_choice)
if accepted:
logp_curr = logp_prop
value_curr = value_prop
return pol_vec, logp_curr, value_curr
def prop_theta(theta0):
# propose new theta
scale = 30
prop_theta = nr.dirichlet(scale*theta0)
return prop_theta
def metrop_select(mr, q, q0):
# accept or reject according to metrop hasting rule
if np.isfinite(mr) and np.log(nr.uniform()) < mr:
return q, True
else:
return q0, False
def sample_except(limit, excluded):
# draw categorical sample less than limit, not picking exclded
candidate = nr.choice(limit - 1)
if candidate >= excluded:
candidate += 1
return candidate
def sample_policy(theta,nstates):
pol_vec = nr.choice(4,size=nstates,p=theta)
return(pol_vec)
def sample_theta():
alpha = np.array([10,10,10,10])
return nr.dirichlet(alpha)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.