seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43444307075 | from datetime import datetime, timedelta
import random
import re
import wsgiref.handlers
import cgi
import base64
from google.appengine.api import xmpp
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.ereporter import report_generator
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import xmpp_handlers
from google.appengine.api.taskqueue import Task
from conversation import *
from stats import *
drinkers = set([])
informed = set([])
settingprefs = set([])
teacountdown = False
doublejeopardy = ""
lastround = datetime.now()
def send_random(recipient, choices, prefix=""):
xmpp.send_message(recipient, prefix + random.sample(choices, 1)[0])
class Roster(db.Model):
jid = db.StringProperty(required=True)
teaprefs = db.StringProperty(required=False, default="")
askme = db.BooleanProperty(required=False, default=True)
newbie = db.BooleanProperty(required=False, default=True)
def get_roster():
return Roster.all().fetch(limit=999)
def howTheyLikeItClause(message, talker):
global TRIGGER_TEAPREFS
global HOW_TO_TAKE_IT
fromaddr = talker.jid
# Try to work out if this includes a how they like it clause
clauses = message.split(",")
if len(clauses) > 1:
del clauses[0]
secondSentence = " ".join(clauses)
if re.search(TRIGGER_TEAPREFS, secondSentence, re.IGNORECASE):
talker.teaprefs = secondSentence
talker.put()
return
# If they haven't given any preferences before
if talker.teaprefs == "":
send_random(fromaddr, HOW_TO_TAKE_IT)
settingprefs.add(fromaddr)
return
def selectByMadeVsDrunkRatio(drinkers):
# Build a dictionary mapping the contents of drinkers onto their made drunk ratio
probs = dict()
probTotal = 0.0
for person in drinkers:
ratio = getMadeDrunkRatio(person)
probs[person] = ratio
probTotal += ratio
# Now if we generate a random point between 0 and the total probs, then iterate through
# the dictionary of listeners, summing the probabilities as we go, as soon as the probs
# tip over the total, that person has been randomly selected with a weight.
goalNumber = probTotal * random.random()
probSum = 0.0
for person in probs.keys():
probSum += probs[person]
if probSum >= goalNumber:
return person
# We probably had a floating point rounding error. Randomly select the first person in the list :P
return drinkers[0]
def getSalutation(jid):
return jid.split("@")[0].replace(".", " ").title()
def buildWellVolunteeredMessage(person):
finalMessage = random.sample(WELL_VOLUNTEERED, 1)[0] + '\n'
for drinker in drinkers:
if drinker != person:
temp = Roster.get_by_key_name(drinker)
teapref = temp.teaprefs
finalMessage += " * " + getSalutation(drinker) + " ("+teapref+")" + '\n'
return finalMessage
class XmppHandler(xmpp_handlers.CommandHandler):
"""Handler class for all XMPP activity."""
def unhandled_command(self, message=None):
message.reply("I don't have any secret easter egg commands.....")
def text_message(self, message=None):
global NOBACKOUT
global GREETING
global AHGRAND
global GOOD_IDEA
global HUH
global RUDE
global NO_TEA_TODAY
global JUST_MISSED
global ADDPERSON
global WANT_TEA
global TRIGGER_HELLO
global TRIGGER_YES
global TRIGGER_TEA
global TRIGGER_RUDE
global TRIGGER_GOAWAY
global TRIGGER_ADDPERSON
global TRIGGER_TEAPREFS
global teacountdown
global drinkers
global settingprefs
global lastround
fromaddr = self.request.get('from').split("/")[0]
talker = Roster.get_or_insert(key_name=fromaddr, jid=fromaddr)
# If they showed up in the middle of a round, ask them if they want tea in
# the normal way after we've responded to this message
if(talker.askme == False and teacountdown):
send_random(fromaddr, WANT_TEA)
informed.add(fromaddr)
talker.askme=True
talker.put()
# Mrs Doyle takes no crap
if re.search(base64.b64decode(TRIGGER_RUDE), message.body, re.IGNORECASE):
send_random(fromaddr, RUDE)
return
# And sometimes people take no crap.
if re.search(TRIGGER_GOAWAY, message.body, re.IGNORECASE):
talker.askme=False
talker.put()
send_random(fromaddr, NO_TEA_TODAY)
xmpp.send_presence(fromaddr, status=":( Leaving " + getSalutation(fromaddr) + " alone. So alone...", presence_type=xmpp.PRESENCE_TYPE_AVAILABLE)
return
xmpp.send_presence(fromaddr, status="", presence_type=xmpp.PRESENCE_TYPE_AVAILABLE)
# See if we're expecting an answer as regards tea preferences
if fromaddr in settingprefs:
talker.teaprefs = message.body
talker.put()
settingprefs.remove(fromaddr)
xmpp.send_message(fromaddr, "Okay!")
return
if teacountdown:
if fromaddr in drinkers:
if re.search(TRIGGER_TEAPREFS, message.body, re.IGNORECASE):
xmpp.send_message(fromaddr, "So you like your tea '" + message.body + "'?")
talker.teaprefs = message.body
talker.put()
elif re.search(TRIGGER_YES, message.body, re.IGNORECASE):
xmpp.send_message(fromaddr, "Okay!")
else:
send_random(fromaddr, NOBACKOUT)
return
if re.search(TRIGGER_YES, message.body, re.IGNORECASE):
drinkers.add(fromaddr)
send_random(fromaddr, AHGRAND)
howTheyLikeItClause(message.body, talker)
else:
send_random(fromaddr, AH_GO_ON)
elif re.search(TRIGGER_ADDPERSON, message.body, re.IGNORECASE):
emailtoinvite = re.search("("+TRIGGER_ADDPERSON+")", message.body, re.IGNORECASE).group(0)
xmpp.send_invite(emailtoinvite)
send_random(fromaddr, ADDPERSON)
elif re.search(TRIGGER_TEA, message.body, re.IGNORECASE):
send_random(fromaddr, GOOD_IDEA)
howTheyLikeItClause(message.body, talker)
drinkers.add(fromaddr)
informed.add(fromaddr)
for person in get_roster():
if person.askme and not person.jid == fromaddr:
xmpp.send_presence(jid=person.jid, presence_type = xmpp.PRESENCE_TYPE_PROBE)
doittask = Task(countdown="120", url="/maketea")
doittask.add()
teacountdown = True
elif re.search(TRIGGER_HELLO, message.body, re.IGNORECASE):
send_random(fromaddr, GREETING)
elif re.search(TRIGGER_YES, message.body, re.IGNORECASE) and (datetime.now() - lastround) < timedelta(seconds=120):
send_random(fromaddr, JUST_MISSED)
else:
send_random(fromaddr, HUH)
class ProcessTeaRound(webapp.RequestHandler):
def post(self):
global ON_YOUR_OWN
global WELL_VOLUNTEERED
global OTHEROFFERED
global drinkers
global teacountdown
global doublejeopardy
global lastround
global informed
if len(drinkers) == 1:
for n in drinkers:
send_random(n, ON_YOUR_OWN)
elif len(drinkers) > 0:
# Select someone who wasn't the last person to make the tea
doublejeopardy = teamaker = selectByMadeVsDrunkRatio(filter(lambda n : n != doublejeopardy, drinkers))
for person in drinkers:
if person == teamaker:
statDrinker(person, len(drinkers))
statRound (person, len(drinkers))
xmpp.send_message(person, buildWellVolunteeredMessage(person))
else:
send_random(person, OTHEROFFERED, getSalutation(teamaker))
statDrinker(person)
teacountdown = False
drinkers = set([])
settingprefs = set([])
informed = set([])
lastround = datetime.now()
class Register(webapp.RequestHandler):
def post(self):
global WANT_TEA
global NEWBIE_GREETING
global teacountdown
global informed
fromaddr = self.request.get('from').split("/")[0]
person = Roster.get_or_insert(key_name=fromaddr, jid=fromaddr)
if(person.newbie):
xmpp.send_message(fromaddr, NEWBIE_GREETING)
person.newbie = False
person.put()
if(not person.askme):
xmpp.send_presence(fromaddr, status=":( Haven't heard from " + getSalutation(fromaddr) + " in a while...", presence_type=xmpp.PRESENCE_TYPE_AVAILABLE)
else:
xmpp.send_presence(fromaddr, status="", presence_type=xmpp.PRESENCE_TYPE_AVAILABLE)
if(teacountdown and person.askme and fromaddr not in informed):
send_random(fromaddr, WANT_TEA)
informed.add(fromaddr)
def main():
app = webapp.WSGIApplication([
('/maketea', ProcessTeaRound),
('/_ah/xmpp/message/chat/', XmppHandler),
('/_ah/xmpp/presence/available/', Register),
('/_ah/xmpp/subscription/subscribe/', Register),
], debug=True)
wsgiref.handlers.CGIHandler().run(app)
if __name__ == '__main__':
main()
| AdamClements/MrsDoyle | app/mrsdoyle.py | mrsdoyle.py | py | 9,138 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.xmpp.send_message",
"line_number": 27,
"usage_type": "call"
},
{
"ap... |
20148737612 | #!/usr/bin/env python3
from enum import Enum
from copy import deepcopy
import sys
import readline # make input() use readline <3
CODE_BASE_ADDR = 0x1000 # where code is loaded
MEMORY_SIZE = 0xFFFF # How big memory is
INSTR_PTR_LOC = 0x0
STACK_PTR_LOC = 0x1
BASE_PTR_LOC = 0x2
class InstructionFamily(Enum):
IMM = 0
ARTIH = 1
JMP = 2
class Opcode(Enum):
MI = 0x0
MV = 0x1
MD = 0x2
LD = 0x3
ST = 0x4
AD = 0x5
SB = 0x6
ND = 0x7
OR = 0x8
XR = 0x9
SR = 0xa
SL = 0xb
SA = 0xc
JG = 0xd
JL = 0xe
JQ = 0xf
class Instruction:
def __init__(self, address, opcode, *args):
self.address = address
self.opcode = Opcode(opcode)
if self.opcode in [Opcode.MI]:
self.family = InstructionFamily.IMM
self.rm = args[0]
self.imm = args[1]
self.length = 2 # num words
elif self.opcode in [Opcode.MV, Opcode.MD, Opcode.LD, Opcode.ST, \
Opcode.AD, Opcode.SB, Opcode.ND, Opcode.OR, \
Opcode.XR, Opcode.SR, Opcode.SL, Opcode.SA]:
self.family = InstructionFamily.ARTIH
self.rm = args[0]
self.mem = args[1]
self.length = 2 # num words
elif self.opcode in [Opcode.JG, Opcode.JL, Opcode.JQ]:
self.family = InstructionFamily.JMP
self.rm = args[0]
self.mem = args[1]
self.label = args[2]
self.length = 3 # num words
else:
raise NotImplemented('WHAT EVEN THATS NOT A POSSIBLE OPCODE: {:x}'.format(self.opcode))
def mnemonic(self):
mnemonics = {
Opcode.MI: 'MI',
Opcode.MV: 'MV',
Opcode.MD: 'MD',
Opcode.LD: 'LD',
Opcode.ST: 'ST',
Opcode.AD: 'AD',
Opcode.SB: 'SB',
Opcode.ND: 'ND',
Opcode.OR: 'OR',
Opcode.XR: 'XR',
Opcode.SR: 'SR',
Opcode.SL: 'SL',
Opcode.SA: 'SA',
Opcode.JG: 'JG',
Opcode.JL: 'JL',
Opcode.JQ: 'JQ'
}
mnemonic = mnemonics[self.opcode]
if mnemonic == 'JQ' and self.rm == 0x0 and self.mem == 0x0 and self.address == self.label:
mnemonic = 'HF' # special case for halt-and-catch-fire, which is a specialization of jmp to self
return mnemonic
def __repr__(self):
if self.family == InstructionFamily.IMM:
return 'Instruction(0x{:x}, 0x{:x}, [0x{:x}, 0x{:x}])'.format( \
self.address, self.opcode, \
self.rm, self.imm)
elif self.family == InstructionFamily.ARTIH:
return 'Instruction(0x{:x}, 0x{:x}, [0x{:x}, 0x{:x}])'.format( \
self.address, self.opcode, \
self.rm, self.mem)
elif self.family == InstructionFamily.JMP:
return 'Instruction(0x{:x}, 0x{:x}, [0x{:x}, 0x{:x}, 0x{:x}])'.format( \
self.address, self.opcode, \
self.rm, self.mem, self.label)
else:
raise NotImplemented('What even instruction did you give me')
def __str__(self):
"""
If you want a prettier version, use .nice_str
"""
if self.family == InstructionFamily.IMM:
return '{} 0x{:x}, 0x{:x}'.format(self.mnemonic(), self.rm, self.imm)
elif self.family == InstructionFamily.ARTIH:
return '{} 0x{:x}, 0x{:x}'.format(self.mnemonic(), self.rm, self.mem)
elif self.family == InstructionFamily.JMP:
if self.opcode == Opcode.JQ and self.rm == 0x0 and self.mem == 0x0 and self.address == self.label:
return 'HF' # special case for halt-and-catch-fire
return '{} 0x{:x}, 0x{:x}, 0x{:x}'.format(self.mnemonic(), self.rm, self.mem, self.label)
def nice_str(self, symbol_fn):
"""
symbol_fn takes an address and returns a pretty string for it.
"""
if self.family == InstructionFamily.IMM:
return '{} {}, {}'.format(self.mnemonic(), symbol_fn(self.rm), symbol_fn(self.imm))
elif self.family == InstructionFamily.ARTIH:
return '{} {}, {}'.format(self.mnemonic(), symbol_fn(self.rm), symbol_fn(self.mem))
elif self.family == InstructionFamily.JMP:
if self.opcode == Opcode.JQ and self.rm == 0x0 and self.mem == 0x0 and self.address == self.label:
return 'HF' # special case for halt-and-catch-fire
return '{} {}, {}, {}'.format(self.mnemonic(), symbol_fn(self.rm), symbol_fn(self.mem), symbol_fn(self.label))
@classmethod
def decode(cls, address, instruction_memory):
opcode = (instruction_memory[0] >> 12) & 0xF
args = []
args.append(instruction_memory[0] & 0xFFF)
args.append(instruction_memory[1])
args.append(instruction_memory[2])
return cls(address, opcode, *args)
class InvalidAccess(Exception):
"""
Quick helper for invalid accesses
"""
pass
def invalid_access(state, addr, *args):
"""
Interrupt to represent things as invalid
"""
raise InvalidAccess('Cannot read/write memory at addr {:x}'.format(addr))
def getchar_interrupt(state, addr, *args):
"""
getchar interrupt
"""
return state.get_input()
def putchar_interrupt(state, addr, *args):
"""
putchar interrupt
"""
char = args[0]
state.add_output(chr(char))
class BreakpointHit(Exception):
"""
Raise when a bp is hit
"""
pass
class Memory:
def __init__(self, memory):
self.memory = memory
self.state = None # fill this in l8r
def __getitem__(self, key):
if not isinstance(key, slice) and \
key in self.state.read_interrupts:
return self.state.read_interrupts[key](self.state, key)
return self.memory[key]
def __setitem__(self, key, value):
if key in self.state.write_interrupts:
self.state.write_interrupts[key](self.state, key, value)
return
self.memory[key] = value
class State:
def __init__(self, memory):
self.memory = memory
self.halt = False # is cpu halted
# addr read -> function to call for value to be read
self.read_interrupts = {
0x200: invalid_access, # reading output register
0x201: getchar_interrupt, # getchar
0x202: invalid_access, # gen key write
0x203: None, # gen key read
0x204: invalid_access, # load key write
0x205: None, # load key read
0x206: invalid_access, # mul write
0x207: None, # mul read
0x208: None, # rng
0x209: invalid_access, # set timeout
0x20A: None, # timeout addr
}
# addr written -> function to call with writen value
self.write_interrupts = {
0x200: putchar_interrupt, # putchar
0x201: invalid_access, # writing input register
0x202: None, # gen key write
0x203: invalid_access, # gen key read
0x204: None, # load key write
0x205: invalid_access, # load key read
0x206: None, # mul write
0x207: invalid_access, # mul read
0x208: invalid_access, # rng
0x209: None, # set timeout
0x20A: None, # timeout addr
}
self.all_input = ''
self.all_output = ''
def decode_instruction_at(self, addr):
instr_memory = self.memory[addr : addr + 3] # we need 3 words for jmp family instructions
return Instruction.decode(addr, instr_memory)
def instruction_pointer(self):
return self.memory[INSTR_PTR_LOC]
def stack_pointer(self):
return self.memory[STACK_PTR_LOC]
def step(self):
ip = self.instruction_pointer()
instr = self.decode_instruction_at(ip)
next_ip = self.instruction_pointer() + instr.length
self.memory[INSTR_PTR_LOC] = next_ip # advance ip
if instr.opcode == Opcode.MI:
self.memory[instr.rm] = instr.imm
elif instr.opcode == Opcode.MV:
self.memory[instr.rm] = self.memory[instr.mem]
elif instr.opcode == Opcode.MD:
self.memory[instr.rm] = self.memory[self.memory[instr.mem]]
elif instr.opcode == Opcode.LD:
self.memory[self.memory[instr.rm]] = self.memory[instr.mem]
elif instr.opcode == Opcode.ST:
self.memory[self.memory[instr.mem]] = self.memory[instr.rm]
elif instr.opcode == Opcode.AD:
self.memory[instr.rm] += self.memory[instr.mem]
elif instr.opcode == Opcode.SB:
self.memory[instr.rm] -= self.memory[instr.mem]
elif instr.opcode == Opcode.ND:
self.memory[instr.rm] &= self.memory[instr.mem]
elif instr.opcode == Opcode.OR:
self.memory[instr.rm] |= self.memory[instr.mem]
elif instr.opcode == Opcode.XR:
self.memory[instr.rm] ^= self.memory[instr.mem]
elif instr.opcode == Opcode.SR:
self.memory[instr.rm] >>= self.memory[instr.mem]
elif instr.opcode == Opcode.SL:
self.memory[instr.rm] <<= self.memory[instr.mem]
elif instr.opcode == Opcode.SA:
rm = self.memory[instr.rm]
mem = self.memory[instr.mem]
to_signed = lambda x: int.from_bytes(x.to_bytes(2, 'little'), 'little', True)
to_unsigned = lambda x: int.from_bytes(x.to_bytes(2, 'little', True), 'little')
rm = to_signed(rm)
res = rm >> mem
memory[rm] = to_unsigned(rm)
elif instr.opcode == Opcode.JG:
if self.memory[instr.rm] > self.memory[instr.mem]:
self.memory[INSTR_PTR_LOC] = instr.label
elif instr.opcode == Opcode.JL:
if self.memory[instr.rm] < self.memory[instr.mem]:
self.memory[INSTR_PTR_LOC] = instr.label
elif instr.opcode == Opcode.JQ:
if instr.rm == 0 and instr.mem == 0 and instr.label == ip:
# halt-and-catch-fire
self.cpu_abort()
elif self.memory[instr.rm] == self.memory[instr.mem]:
self.memory[INSTR_PTR_LOC] = instr.label
def run(self):
while not self.halt:
instr = self.decode_instruction_at(self.instruction_pointer())
self.step()
def cpu_abort(self):
self.halt = True
def get_input(self):
char = sys.stdin.read(1)
self.all_input += char
return ord(char)
def add_output(self, value):
self.all_output += value
sys.stdout.write(value)
sys.stdout.flush()
@classmethod
def from_file(cls, filename):
with open(filename, 'rb') as f:
contents = f.read()
# reinterpret contents as an array of little-endian words
code = []
for i in range(0, len(contents), 2):
mem = contents[i : i+2]
word = int.from_bytes(mem, 'little')
code.append(word)
# memory space, all 0's to start
memory = [0 for _ in range(MEMORY_SIZE+1)]
# initialize stack ptr
memory[STACK_PTR_LOC] = 0x300
# initialize instruction ptr
memory[INSTR_PTR_LOC] = 0x1000
# initialize code
for offset, word in enumerate(code):
memory[CODE_BASE_ADDR + offset] = word
st = State(Memory(memory))
st.memory.state = st
return st
class InteractiveDebugger:
"""
An interactive debugger.
Commands:
again: repeat last command (just hitting return also does this)
r: run, start the program if not running
s, ni, n: single step.
- Takes argument of number of instructions to step (default 1)
c: run until breakpoint.
b: break at address.
- for example, b 0x1234 creates a breakpoint at the instruction 0x1234
p: print expression. Takes a format specifier, defaulting to "word"
- Format specifiers come in the form p/NX,
where N is a number of consecutive version of the argument type to print,
and X is the specifier.
supports the following specifiers:
w, x: word
c: character
i: instruction
load-labels: Load labels from a file (argument required)
Expressions: **TODO**
right now just hex numbers for an addr haha
"""
def __init__(self, state):
self.initial_state = state
self.state = None
self.running = False
self.symbols = {
INSTR_PTR_LOC: 'IP',
STACK_PTR_LOC: 'SP',
0x3: 'SC0',
0x4: 'SC1',
0x200: 'OUTPUT',
0x201: 'INPUT',
}
# rNN registers
for i, x in enumerate(range(0x10, 0x40)):
name = 'r{:02x}'.format(i)
self.symbols[x] = name
# sNN registers
for i, x in enumerate(range(0x40, 0x100)):
name = 's{:02x}'.format(i)
self.symbols[x] = name
self.breakpoints = {} # bp number -> bp addr
self.highest_breakpoint_num = 0 # breakpoint #s monotonically increment
self.last_command = ''
# commands to run always
self.autocmds = [
#'p/10i *$ip', # instruction leadup
]
self.watch_addrs = [INSTR_PTR_LOC, STACK_PTR_LOC]
def input_loop(self):
while True:
self.print_banner()
self.handle_input()
def handle_input(self):
"""
Handle getting input, parsing it, executing commands
"""
inp = input('ctdbg> ')
self.runcmd(inp)
def runcmd(self, cmd, user=True):
def print_stuff(num, specifier, loc):
specifiers = {
'x': lambda addr: hex(self.state.memory[addr]),
'c': lambda addr: bytes([self.state.memory[addr]]),
'i': lambda addr: hex(addr) + ': ' + self.state.decode_instruction_at(addr).nice_str(self.nice_format_addr),
}
specifiers['w'] = specifiers['x'] # w = x
joinchrs = {
'x': ' ',
'c': b'',
'i': '\n',
}
joinchrs['w'] = joinchrs['x'] # ditto to above
try:
fmt_fn = specifiers[specifier]
# gather all outs into this nice lil array
outs = []
offset = 0
for i in range(num):
addr = loc + offset
if specifier != 'i':
offset += 1
else:
offset += self.state.decode_instruction_at(addr).length
outs.append((addr, fmt_fn(addr)))
#TODO: decide ways of formatting based on length
out = joinchrs[specifier] + joinchrs[specifier].join(y for x,y in outs)
if specifier == 'c': # get a repr for non-printables :)
out = repr(out)
return out
except Exception as e:
print("err> Error formatting:", e)
def step(n):
if not self.running:
self.restart()
if self.state.halt:
print('err> STATE HALTED, USE r TO RESTART')
return
for _ in range(n):
self.state.step()
def run():
self.restart()
self.run()
def add_watch(addr):
self.watch_addrs.append(addr)
return 'Added a watchpoint for {:x}'.format(addr)
cmd_dict = {
'again': lambda: self.runcmd(self.last_command),
'r': lambda: run(), # different then self.run
's': lambda n: step(n),
'c': lambda: self.run(),
'b': lambda target: self.add_breakpoint(target),
'p': lambda num, specifier, loc: print_stuff(num, specifier, loc),
'd': lambda num: self.del_breakpoint(num),
'watch': add_watch,
'load-labels': lambda filename: self.load_symbols_from_file(filename),
}
try:
parsed = self.parse_command(cmd)
cmd_fn = cmd_dict[parsed[0]]
args = parsed[1:]
res = cmd_fn(*args)
if res:
print('out>', res)
# only set last_command if user-initiated and not a repeat command
if user and cmd not in {'again', ''}:
self.last_command = cmd
except ValueError as e:
print('err> Unknown command:', cmd)
print(e)
def run(self):
if not self.running:
self.restart()
try:
while not self.state.halt:
self.state.step()
# bp check
if self.state.instruction_pointer() in self.breakpoints.values():
raise BreakpointHit()
except BreakpointHit:
print('out> breakpoint hit!!')
except KeyboardInterrupt:
print('out> paused!!')
if self.state.halt:
self.running = False
print('out> program hlt!!')
def nice_format_addr(self, addr):
"""
Format an address in the nicest way we can figure out
"""
if addr in self.symbols:
return '{} (0x{:x})'.format(self.symbols[addr], addr)
return hex(addr)
def print_banner(self):
"""
Print a banner of watched variables, plus some other info
(if cpu is halted, upcoming instructions, etc)
"""
print('<===================')
if self.state:
for addr in self.watch_addrs:
nice_addr, val = self.nice_format_addr(addr), self.state.memory[addr]
print('{} = {:x}'.format(nice_addr, val))
if self.state.halt:
print('CPU HALTED')
print('\n====================\n')
for cmd in self.autocmds:
try:
self.runcmd(cmd, user=False)
except Exception as e:
pass
else:
print('Not running!')
print('===================>')
def parse_command(self, cmd):
operation = cmd.split(' ')[0]
arg = ' '.join(cmd.split(' ')[1:])
if operation == '' or operation == 'again':
return ('again',) # do last command again
elif operation == 'r':
return ('r',)
elif operation in {'s', 'ni', 'n'}:
if arg:
arg = self.parse_expression(arg)
else:
arg = 1
return ('s', 1)
elif operation == 'c':
return ('c',)
elif operation == 'b':
if not arg:
raise ValueError('Argument required for b command')
return ('b', self.parse_expression(arg))
elif operation == 'watch':
if not arg:
raise ValueError('Argument required for watch command')
return ('watch', self.parse_expression(arg))
elif operation[0] in {'p', 'x'}:
if '/' in operation:
p_len = operation[operation.index('/') + 1 : -1]
if p_len:
p_len = int(p_len)
else:
p_len = 1
p_specifier = operation[-1]
else:
p_len = 1
p_specifier = 'w'
return ('p', p_len, p_specifier, self.parse_expression(arg))
elif operation == 'load-labels':
if arg:
return ('load-labels', arg)
raise ValueError('Argument required for load-labels command')
else:
raise ValueError('Unknown command!')
def parse_expression(self, expression):
#TODO: * for deref, + and -
# TODO: better gooder parsing?
expression = expression.lower()
for addr, name in self.symbols.items():
expression = expression.replace('$' + name.lower(), hex(addr))
needs_deref = False
if expression[0] == '*':
expression = expression[1:]
needs_deref = True
val = int(expression, 16)
if needs_deref:
val = self.state.memory[val]
return val
def add_breakpoint(self, addr):
max_bp = self.highest_breakpoint_num
bp_num = max_bp + 1
self.breakpoints[bp_num] = addr
self.highest_breakpoint_num += 1
return bp_num
def del_breakpoint(self, num):
self.breakpoints.pop(num)
def restart(self):
"""
Restart the program under test (basically the `r` command)
"""
self.state = deepcopy(self.initial_state)
self.running = True
def load_symbols_from_file(self, filename):
new_symbols = {}
with open(filename, 'r') as f:
for line in f:
if line.strip():
addr, name = line.split(':')
addr = int(addr, 0)
name = name.strip()
new_symbols[addr] = name
print('Loaded {} symbols'.format(len(new_symbols)))
self.symbols.update(new_symbols)
def main(argv):
if len(argv) != 2:
print('ERROR: ROM argument required!')
return
st = State.from_file(argv[1])
dbg = InteractiveDebugger(st)
dbg.runcmd('load-labels symbols')
dbg.input_loop()
if __name__ == '__main__':
main(sys.argv)
| Hypersonic/CyberTronix64k | ct64k_dbg.py | ct64k_dbg.py | py | 21,766 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sys.stdin.read",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 29... |
73033882273 | # -*- coding: utf-8 -*-
'''
A salt module for SSL/TLS.
Can create a Certificate Authority (CA)
or use Self-Signed certificates.
:depends: - PyOpenSSL Python module (0.10 or later, 0.14 or later for
X509 extension support)
:configuration: Add the following values in /etc/salt/minion for the CA module
to function properly::
ca.cert_base_path: '/etc/pki'
CLI Example #1
Creating a CA, a server request and its signed certificate:
.. code-block:: bash
# salt-call tls.create_ca my_little \
days=5 \
CN='My Little CA' \
C=US \
ST=Utah \
L=Salt Lake City \
O=Saltstack \
emailAddress=pleasedontemail@thisisnot.coms
Created Private Key: "/etc/pki/my_little/my_little_ca_cert.key"
Created CA "my_little_ca": "/etc/pki/my_little_ca/my_little_ca_cert.crt"
# salt-call tls.create_csr my_little CN=www.thisisnot.coms
Created Private Key: "/etc/pki/my_little/certs/www.thisisnot.coms.key
Created CSR for "www.thisisnot.coms": "/etc/pki/my_little/certs/www.thisisnot.coms.csr"
# salt-call tls.create_ca_signed_cert my_little CN=www.thisisnot.coms
Created Certificate for "www.thisisnot.coms": /etc/pki/my_little/certs/www.thisisnot.coms.crt"
CLI Example #2:
Creating a client request and its signed certificate
.. code-block:: bash
# salt-call tls.create_csr my_little CN=DBReplica_No.1 cert_type=client
Created Private Key: "/etc/pki/my_little/certs//DBReplica_No.1.key."
Created CSR for "DBReplica_No.1": "/etc/pki/my_little/certs/DBReplica_No.1.csr."
# salt-call tls.create_ca_signed_cert my_little CN=DBReplica_No.1
Created Certificate for "DBReplica_No.1": "/etc/pki/my_little/certs/DBReplica_No.1.crt"
CLI Example #3:
Creating both a server and client req + cert for the same CN
.. code-block:: bash
# salt-call tls.create_csr my_little CN=MasterDBReplica_No.2 \
cert_type=client
Created Private Key: "/etc/pki/my_little/certs/MasterDBReplica_No.2.key."
Created CSR for "DBReplica_No.1": "/etc/pki/my_little/certs/MasterDBReplica_No.2.csr."
# salt-call tls.create_ca_signed_cert my_little CN=MasterDBReplica_No.2
Created Certificate for "DBReplica_No.1": "/etc/pki/my_little/certs/DBReplica_No.1.crt"
# salt-call tls.create_csr my_little CN=MasterDBReplica_No.2 \
cert_type=server
Certificate "MasterDBReplica_No.2" already exists
(doh!)
# salt-call tls.create_csr my_little CN=MasterDBReplica_No.2 \
cert_type=server type_ext=True
Created Private Key: "/etc/pki/my_little/certs/DBReplica_No.1_client.key."
Created CSR for "DBReplica_No.1": "/etc/pki/my_little/certs/DBReplica_No.1_client.csr."
# salt-call tls.create_ca_signed_cert my_little CN=MasterDBReplica_No.2
Certificate "MasterDBReplica_No.2" already exists
(DOH!)
# salt-call tls.create_ca_signed_cert my_little CN=MasterDBReplica_No.2 \
cert_type=server type_ext=True
Created Certificate for "MasterDBReplica_No.2": "/etc/pki/my_little/certs/MasterDBReplica_No.2_server.crt"
CLI Example #4:
Create a server req + cert with non-CN filename for the cert
.. code-block:: bash
# salt-call tls.create_csr my_little CN=www.anothersometh.ing \
cert_type=server type_ext=True
Created Private Key: "/etc/pki/my_little/certs/www.anothersometh.ing_server.key."
Created CSR for "DBReplica_No.1": "/etc/pki/my_little/certs/www.anothersometh.ing_server.csr."
# salt-call tls_create_ca_signed_cert my_little CN=www.anothersometh.ing \
cert_type=server cert_filename="something_completely_different"
Created Certificate for "www.anothersometh.ing": /etc/pki/my_little/certs/something_completely_different.crt
'''
from __future__ import absolute_import
# pylint: disable=C0103
# Import python libs
import os
import time
import calendar
import logging
import hashlib
import salt.utils
from salt._compat import string_types
from salt.ext.six.moves import range as _range
from datetime import datetime
from distutils.version import LooseVersion
import re
HAS_SSL = False
X509_EXT_ENABLED = True
try:
import OpenSSL
HAS_SSL = True
OpenSSL_version = LooseVersion(OpenSSL.__dict__.get('__version__', '0.0'))
except ImportError:
pass
# Import salt libs
log = logging.getLogger(__name__)
two_digit_year_fmt = "%y%m%d%H%M%SZ"
four_digit_year_fmt = "%Y%m%d%H%M%SZ"
def __virtual__():
'''
Only load this module if the ca config options are set
'''
global X509_EXT_ENABLED
if HAS_SSL and OpenSSL_version >= LooseVersion('0.10'):
if OpenSSL_version < LooseVersion('0.14'):
X509_EXT_ENABLED = False
log.error('You should upgrade pyOpenSSL to at least 0.14.1 '
'to enable the use of X509 extensions')
elif OpenSSL_version <= LooseVersion('0.15'):
log.warn('You should upgrade pyOpenSSL to at least 0.15.1 '
'to enable the full use of X509 extensions')
# never EVER reactivate this code, this has been done too many times.
# not having configured a cert path in the configuration does not
# mean that users cant use this module as we provide methods
# to configure it afterwards.
# if __opts__.get('ca.cert_base_path', None):
# return True
# else:
# log.error('tls module not loaded: ca.cert_base_path not set')
# return False
return True
else:
X509_EXT_ENABLED = False
return False, ['PyOpenSSL version 0.10 or later must be installed '
'before this module can be used.']
def cert_base_path(cacert_path=None):
'''
Return the base path for certs from CLI or from options
cacert_path
absolute path to ca certificates root directory
CLI Example:
.. code-block:: bash
salt '*' tls.cert_base_path
'''
if not cacert_path:
cacert_path = __context__.get(
'ca.contextual_cert_base_path',
__salt__['config.option']('ca.contextual_cert_base_path'))
if not cacert_path:
cacert_path = __context__.get(
'ca.cert_base_path',
__salt__['config.option']('ca.cert_base_path'))
return cacert_path
def _cert_base_path(cacert_path=None):
'''
Retrocompatible wrapper
'''
return cert_base_path(cacert_path)
def set_ca_path(cacert_path):
'''
If wanted, store the aforementioned cacert_path in context
to be used as the basepath for further operations
CLI Example:
.. code-block:: bash
salt '*' tls.set_ca_path /etc/certs
'''
if cacert_path:
__context__['ca.contextual_cert_base_path'] = cacert_path
return cert_base_path()
def _new_serial(ca_name, CN):
'''
Return a serial number in hex using md5sum, based upon the ca_name and
CN values
ca_name
name of the CA
CN
common name in the request
'''
opts_hash_type = __opts__.get('hash_type', 'md5')
hashtype = getattr(hashlib, opts_hash_type)
hashnum = int(
hashtype(
'{0}_{1}_{2}'.format(
ca_name,
CN,
int(calendar.timegm(time.gmtime())))
).hexdigest(),
16
)
log.debug('Hashnum: {0}'.format(hashnum))
# record the hash somewhere
cachedir = __opts__['cachedir']
log.debug('cachedir: {0}'.format(cachedir))
serial_file = '{0}/{1}.serial'.format(cachedir, ca_name)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
if not os.path.exists(serial_file):
fd = salt.utils.fopen(serial_file, 'w')
else:
fd = salt.utils.fopen(serial_file, 'a+')
with fd as ofile:
ofile.write(str(hashnum))
return hashnum
def _four_digit_year_to_two_digit(datetimeObj):
return datetimeObj.strftime(two_digit_year_fmt)
def _get_basic_info(ca_name, cert, ca_dir=None):
'''
Get basic info to write out to the index.txt
'''
if ca_dir is None:
ca_dir = '{0}/{1}'.format(_cert_base_path(), ca_name)
index_file = "{0}/index.txt".format(ca_dir)
expire_date = _four_digit_year_to_two_digit(
datetime.strptime(
cert.get_notAfter(),
four_digit_year_fmt)
)
serial_number = format(cert.get_serial_number(), 'X')
# gotta prepend a /
subject = '/'
# then we can add the rest of the subject
subject += '/'.join(
['{0}={1}'.format(
x, y
) for x, y in cert.get_subject().get_components()]
)
subject += '\n'
return (index_file, expire_date, serial_number, subject)
def _write_cert_to_database(ca_name, cert, cacert_path=None, status='V'):
'''
write out the index.txt database file in the appropriate directory to
track certificates
ca_name
name of the CA
cert
certificate to be recorded
'''
set_ca_path(cacert_path)
ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name)
index_file, expire_date, serial_number, subject = _get_basic_info(
ca_name,
cert,
ca_dir)
index_data = '{0}\t{1}\t\t{2}\tunknown\t{3}'.format(
status,
expire_date,
serial_number,
subject
)
with salt.utils.fopen(index_file, 'a+') as ofile:
ofile.write(index_data)
def maybe_fix_ssl_version(ca_name, cacert_path=None, ca_filename=None):
'''
Check that the X509 version is correct
(was incorrectly set in previous salt versions).
This will fix the version if needed.
ca_name
ca authority name
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' tls.maybe_fix_ssl_version test_ca /etc/certs
'''
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
certp = '{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename)
ca_keyp = '{0}/{1}/{2}.key'.format(
cert_base_path(),
ca_name,
ca_filename)
with salt.utils.fopen(certp) as fic:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
fic.read())
if cert.get_version() == 3:
log.info(
'Regenerating wrong x509 version '
'for certificate {0}'.format(certp))
with salt.utils.fopen(ca_keyp) as fic2:
try:
# try to determine the key bits
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, fic2.read())
bits = key.bits()
except Exception:
bits = 2048
try:
days = (datetime.strptime(
cert.get_notAfter(),
'%Y%m%d%H%M%SZ') - datetime.utcnow()).days
except (ValueError, TypeError):
days = 365
subj = cert.get_subject()
create_ca(
ca_name,
bits=bits,
days=days,
CN=subj.CN,
C=subj.C,
ST=subj.ST,
L=subj.L,
O=subj.O,
OU=subj.OU,
emailAddress=subj.emailAddress,
fixmode=True)
def ca_exists(ca_name, cacert_path=None, ca_filename=None):
'''
Verify whether a Certificate Authority (CA) already exists
ca_name
name of the CA
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' tls.ca_exists test_ca /etc/certs
'''
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
certp = '{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename)
if os.path.exists(certp):
maybe_fix_ssl_version(ca_name,
cacert_path=cacert_path,
ca_filename=ca_filename)
return True
return False
def _ca_exists(ca_name, cacert_path=None):
'''Retrocompatible wrapper'''
return ca_exists(ca_name, cacert_path)
def get_ca(ca_name, as_text=False, cacert_path=None):
'''
Get the certificate path or content
ca_name
name of the CA
as_text
if true, return the certificate content instead of the path
cacert_path
absolute path to ca certificates root directory
CLI Example:
.. code-block:: bash
salt '*' tls.get_ca test_ca as_text=False cacert_path=/etc/certs
'''
set_ca_path(cacert_path)
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
cert_base_path(),
ca_name,
ca_name)
if not os.path.exists(certp):
raise ValueError('Certificate does not exist for {0}'.format(ca_name))
else:
if as_text:
with salt.utils.fopen(certp) as fic:
certp = fic.read()
return certp
def get_ca_signed_cert(ca_name,
CN='localhost',
as_text=False,
cacert_path=None,
cert_filename=None):
'''
Get the certificate path or content
ca_name
name of the CA
CN
common name of the certificate
as_text
if true, return the certificate content instead of the path
cacert_path
absolute path to certificates root directory
cert_filename
alternative filename for the certificate, useful when using special characters in the CN
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' tls.get_ca_signed_cert test_ca CN=localhost as_text=False cacert_path=/etc/certs
'''
set_ca_path(cacert_path)
if not cert_filename:
cert_filename = CN
certp = '{0}/{1}/certs/{2}.crt'.format(
cert_base_path(),
ca_name,
cert_filename)
if not os.path.exists(certp):
raise ValueError('Certificate does not exists for {0}'.format(CN))
else:
if as_text:
with salt.utils.fopen(certp) as fic:
certp = fic.read()
return certp
def get_ca_signed_key(ca_name,
CN='localhost',
as_text=False,
cacert_path=None,
key_filename=None):
'''
Get the certificate path or content
ca_name
name of the CA
CN
common name of the certificate
as_text
if true, return the certificate content instead of the path
cacert_path
absolute path to certificates root directory
key_filename
alternative filename for the key, useful when using special characters
.. versionadded:: 2015.5.3
in the CN
CLI Example:
.. code-block:: bash
salt '*' tls.get_ca_signed_key \
test_ca CN=localhost \
as_text=False \
cacert_path=/etc/certs
'''
set_ca_path(cacert_path)
if not key_filename:
key_filename = CN
keyp = '{0}/{1}/certs/{2}.key'.format(
cert_base_path(),
ca_name,
key_filename)
if not os.path.exists(keyp):
raise ValueError('Certificate does not exists for {0}'.format(CN))
else:
if as_text:
with salt.utils.fopen(keyp) as fic:
keyp = fic.read()
return keyp
def _check_onlyif_unless(onlyif, unless):
ret = None
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
ret = {'comment': 'onlyif execution failed', 'result': True}
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
ret = {'comment': 'onlyif execution failed', 'result': True}
log.debug('onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
ret = {'comment': 'unless execution succeeded', 'result': True}
elif isinstance(unless, string_types):
if retcode(unless) == 0:
ret = {'comment': 'unless execution succeeded', 'result': True}
log.debug('unless execution succeeded')
return ret
def create_ca(ca_name,
bits=2048,
days=365,
CN='localhost',
C='US',
ST='Utah',
L='Salt Lake City',
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net',
fixmode=False,
cacert_path=None,
ca_filename=None,
digest='sha256',
onlyif=None,
unless=None,
replace=False):
'''
Create a Certificate Authority (CA)
ca_name
name of the CA
bits
number of RSA key bits, default is 2048
days
number of days the CA will be valid, default is 365
CN
common name in the request, default is "localhost"
C
country, default is "US"
ST
state, default is "Utah"
L
locality, default is "Centerville", the city where SaltStack originated
O
organization, default is "SaltStack"
OU
organizational unit, default is None
emailAddress
email address for the CA owner, default is 'xyz@pdq.net'
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
replace
Replace this certificate even if it exists
.. versionadded:: 2015.5.1
Writes out a CA certificate based upon defined config values. If the file
already exists, the function just returns assuming the CA certificate
already exists.
If the following values were set::
ca.cert_base_path='/etc/pki'
ca_name='koji'
the resulting CA, and corresponding key, would be written in the following
location::
/etc/pki/koji/koji_ca_cert.crt
/etc/pki/koji/koji_ca_cert.key
CLI Example:
.. code-block:: bash
salt '*' tls.create_ca test_ca
'''
status = _check_onlyif_unless(onlyif, unless)
if status is not None:
return None
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
certp = '{0}/{1}/{2}.crt'.format(
cert_base_path(), ca_name, ca_filename)
ca_keyp = '{0}/{1}/{2}.key'.format(
cert_base_path(), ca_name, ca_filename)
if not replace and not fixmode and ca_exists(ca_name, ca_filename=ca_filename):
return 'Certificate for CA named "{0}" already exists'.format(ca_name)
if fixmode and not os.path.exists(certp):
raise ValueError('{0} does not exists, can\'t fix'.format(certp))
if not os.path.exists('{0}/{1}'.format(
cert_base_path(), ca_name)
):
os.makedirs('{0}/{1}'.format(cert_base_path(),
ca_name))
# try to reuse existing ssl key
key = None
if os.path.exists(ca_keyp):
with salt.utils.fopen(ca_keyp) as fic2:
# try to determine the key bits
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, fic2.read())
if not key:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
ca = OpenSSL.crypto.X509()
ca.set_version(2)
ca.set_serial_number(_new_serial(ca_name, CN))
ca.get_subject().C = C
ca.get_subject().ST = ST
ca.get_subject().L = L
ca.get_subject().O = O
if OU:
ca.get_subject().OU = OU
ca.get_subject().CN = CN
ca.get_subject().emailAddress = emailAddress
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
ca.set_issuer(ca.get_subject())
ca.set_pubkey(key)
if X509_EXT_ENABLED:
ca.add_extensions([
OpenSSL.crypto.X509Extension('basicConstraints', True,
'CA:TRUE, pathlen:0'),
OpenSSL.crypto.X509Extension('keyUsage', True,
'keyCertSign, cRLSign'),
OpenSSL.crypto.X509Extension('subjectKeyIdentifier', False,
'hash', subject=ca)])
ca.add_extensions([
OpenSSL.crypto.X509Extension(
'authorityKeyIdentifier',
False,
'issuer:always,keyid:always',
issuer=ca)])
ca.sign(key, digest)
# alway backup existing keys in case
keycontent = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key)
write_key = True
if os.path.exists(ca_keyp):
bck = "{0}.{1}".format(ca_keyp, datetime.utcnow().strftime(
"%Y%m%d%H%M%S"))
with salt.utils.fopen(ca_keyp) as fic:
old_key = fic.read().strip()
if old_key.strip() == keycontent.strip():
write_key = False
else:
log.info('Saving old CA ssl key in {0}'.format(bck))
with salt.utils.fopen(bck, 'w') as bckf:
bckf.write(old_key)
os.chmod(bck, 0o600)
if write_key:
with salt.utils.fopen(ca_keyp, 'w') as ca_key:
ca_key.write(keycontent)
with salt.utils.fopen(certp, 'w') as ca_crt:
ca_crt.write(
OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
_write_cert_to_database(ca_name, ca)
ret = ('Created Private Key: "{0}/{1}/{2}.key." ').format(
cert_base_path(), ca_name, ca_filename)
ret += ('Created CA "{0}": "{1}/{2}/{3}.crt."').format(
ca_name, cert_base_path(), ca_name, ca_filename)
return ret
def get_extensions(cert_type):
'''
Fetch X509 and CSR extension definitions from tls:extensions:
(common|server|client) or set them to standard defaults.
.. versionadded:: Beryllium
cert_type:
The type of certificate such as ``server`` or ``client``.
CLI Example:
.. code-block:: bash
salt '*' tls.get_extensions client
'''
assert X509_EXT_ENABLED, ('X509 extensions are not supported in '
'pyOpenSSL prior to version 0.15.1. Your '
'version: {0}'.format(OpenSSL_version))
ext = {}
if cert_type == '':
log.error('cert_type set to empty in tls_ca.get_extensions(); '
'defaulting to ``server``')
cert_type = 'server'
try:
ext['common'] = __salt__['pillar.get']('tls.extensions:common', False)
except NameError as err:
log.debug(err)
if not ext['common'] or ext['common'] == '':
ext['common'] = {
'csr': {
'basicConstraints': 'CA:FALSE',
},
'cert': {
'authorityKeyIdentifier': 'keyid,issuer:always',
'subjectKeyIdentifier': 'hash',
},
}
try:
ext['server'] = __salt__['pillar.get']('tls.extensions:server', False)
except NameError as err:
log.debug(err)
if not ext['server'] or ext['server'] == '':
ext['server'] = {
'csr': {
'extendedKeyUsage': 'serverAuth',
'keyUsage': 'digitalSignature, keyEncipherment',
},
'cert': {},
}
try:
ext['client'] = __salt__['pillar.get']('tls.extensions:client', False)
except NameError as err:
log.debug(err)
if not ext['client'] or ext['client'] == '':
ext['client'] = {
'csr': {
'extendedKeyUsage': 'clientAuth',
'keyUsage': 'nonRepudiation, digitalSignature, keyEncipherment',
},
'cert': {},
}
# possible user-defined profile or a typo
if cert_type not in ext:
try:
ext[cert_type] = __salt__['pillar.get'](
'tls.extensions:{0}'.format(cert_type))
except NameError as e:
log.debug(
'pillar, tls:extensions:{0} not available or '
'not operating in a salt context\n{1}'.format(cert_type, e))
retval = ext['common']
for Use in retval:
retval[Use].update(ext[cert_type][Use])
return retval
def create_csr(ca_name,
bits=2048,
CN='localhost',
C='US',
ST='Utah',
L='Salt Lake City',
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net',
subjectAltName=None,
cacert_path=None,
ca_filename=None,
csr_path=None,
csr_filename=None,
digest='sha256',
type_ext=False,
cert_type='server',
replace=False):
'''
Create a Certificate Signing Request (CSR) for a
particular Certificate Authority (CA)
ca_name
name of the CA
bits
number of RSA key bits, default is 2048
CN
common name in the request, default is "localhost"
C
country, default is "US"
ST
state, default is "Utah"
L
locality, default is "Centerville", the city where SaltStack originated
O
organization, default is "SaltStack"
NOTE: Must the same as CA certificate or an error will be raised
OU
organizational unit, default is None
emailAddress
email address for the request, default is 'xyz@pdq.net'
subjectAltName
valid subjectAltNames in full form, e.g. to add DNS entry you would call
this function with this value:
examples: ['DNS:somednsname.com',
'DNS:1.2.3.4',
'IP:1.2.3.4',
'IP:2001:4801:7821:77:be76:4eff:fe11:e51',
'email:me@i.like.pie.com']
.. note::
some libraries do not properly query IP: prefixes, instead looking
for the given req. source with a DNS: prefix. To be thorough, you
may want to include both DNS: and IP: entries if you are using
subjectAltNames for destinations for your TLS connections.
e.g.:
requests to https://1.2.3.4 will fail from python's
requests library w/out the second entry in the above list
.. versionadded:: Beryllium
cert_type
Specify the general certificate type. Can be either `server` or
`client`. Indicates the set of common extensions added to the CSR.
server: {
'basicConstraints': 'CA:FALSE',
'extendedKeyUsage': 'serverAuth',
'keyUsage': 'digitalSignature, keyEncipherment'
}
client: {
'basicConstraints': 'CA:FALSE',
'extendedKeyUsage': 'clientAuth',
'keyUsage': 'nonRepudiation, digitalSignature, keyEncipherment'
}
type_ext
boolean. Whether or not to extend the filename with CN_[cert_type]
This can be useful if a server and client certificate are needed for
the same CN. Defaults to False to avoid introducing an unexpected file
naming pattern
The files normally named some_subject_CN.csr and some_subject_CN.key
will then be saved
replace
Replace this signing request even if it exists
.. versionadded:: 2015.5.1
Writes out a Certificate Signing Request (CSR) If the file already
exists, the function just returns assuming the CSR already exists.
If the following values were set::
ca.cert_base_path='/etc/pki'
ca_name='koji'
CN='test.egavas.org'
the resulting CSR, and corresponding key, would be written in the
following location::
/etc/pki/koji/certs/test.egavas.org.csr
/etc/pki/koji/certs/test.egavas.org.key
CLI Example:
.. code-block:: bash
salt '*' tls.create_csr test
'''
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
if not ca_exists(ca_name, ca_filename=ca_filename):
return ('Certificate for CA named "{0}" does not exist, please create '
'it first.').format(ca_name)
if not csr_path:
csr_path = '{0}/{1}/certs/'.format(cert_base_path(), ca_name)
if not os.path.exists(csr_path):
os.makedirs(csr_path)
CN_ext = '_{0}'.format(cert_type) if type_ext else ''
if not csr_filename:
csr_filename = '{0}{1}'.format(CN, CN_ext)
csr_f = '{0}/{1}.csr'.format(csr_path, csr_filename)
if not replace and os.path.exists(csr_f):
return 'Certificate Request "{0}" already exists'.format(csr_f)
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
req = OpenSSL.crypto.X509Req()
req.get_subject().C = C
req.get_subject().ST = ST
req.get_subject().L = L
req.get_subject().O = O
if OU:
req.get_subject().OU = OU
req.get_subject().CN = CN
req.get_subject().emailAddress = emailAddress
try:
extensions = get_extensions(cert_type)['csr']
extension_adds = []
for ext, value in extensions.items():
extension_adds.append(OpenSSL.crypto.X509Extension(ext, False,
value))
except AssertionError as err:
log.error(err)
extensions = []
if subjectAltName:
if X509_EXT_ENABLED:
if isinstance(subjectAltName, str):
subjectAltName = [subjectAltName]
extension_adds.append(
OpenSSL.crypto.X509Extension(
'subjectAltName', False, ", ".join(subjectAltName)))
else:
raise ValueError('subjectAltName cannot be set as X509 '
'extensions are not supported in pyOpenSSL '
'prior to version 0.15.1. Your '
'version: {0}.'.format(OpenSSL_version))
if X509_EXT_ENABLED:
req.add_extensions(extension_adds)
req.set_pubkey(key)
req.sign(key, digest)
# Write private key and request
with salt.utils.fopen('{0}/{1}.key'.format(csr_path,
csr_filename), 'w+') as priv_key:
priv_key.write(
OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
)
with salt.utils.fopen(csr_f, 'w+') as csr:
csr.write(
OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_PEM,
req
)
)
ret = 'Created Private Key: "{0}{1}.key." '.format(
csr_path,
csr_filename
)
ret += 'Created CSR for "{0}": "{1}{2}.csr."'.format(
CN,
csr_path,
csr_filename
)
return ret
def create_self_signed_cert(tls_dir='tls',
bits=2048,
days=365,
CN='localhost',
C='US',
ST='Utah',
L='Salt Lake City',
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net',
cacert_path=None,
cert_filename=None,
digest='sha256',
replace=False):
'''
Create a Self-Signed Certificate (CERT)
tls_dir
location appended to the ca.cert_base_path, default is 'tls'
bits
number of RSA key bits, default is 2048
CN
common name in the request, default is "localhost"
C
country, default is "US"
ST
state, default is "Utah"
L
locality, default is "Centerville", the city where SaltStack originated
O
organization, default is "SaltStack"
NOTE: Must the same as CA certificate or an error will be raised
OU
organizational unit, default is None
emailAddress
email address for the request, default is 'xyz@pdq.net'
cacert_path
absolute path to ca certificates root directory
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
replace
Replace this certificate even if it exists
.. versionadded:: 2015.5.1
Writes out a Self-Signed Certificate (CERT). If the file already
exists, the function just returns.
If the following values were set::
ca.cert_base_path='/etc/pki'
tls_dir='koji'
CN='test.egavas.org'
the resulting CERT, and corresponding key, would be written in the
following location::
/etc/pki/koji/certs/test.egavas.org.crt
/etc/pki/koji/certs/test.egavas.org.key
CLI Example:
.. code-block:: bash
salt '*' tls.create_self_signed_cert
Passing options from the command line:
.. code-block:: bash
salt 'minion' tls.create_self_signed_cert CN='test.mysite.org'
'''
set_ca_path(cacert_path)
if not os.path.exists('{0}/{1}/certs/'.format(cert_base_path(), tls_dir)):
os.makedirs("{0}/{1}/certs/".format(cert_base_path(),
tls_dir))
if not cert_filename:
cert_filename = CN
if not replace and os.path.exists(
'{0}/{1}/certs/{2}.crt'.format(cert_base_path(),
tls_dir, cert_filename)
):
return 'Certificate "{0}" already exists'.format(cert_filename)
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
# create certificate
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
cert.get_subject().C = C
cert.get_subject().ST = ST
cert.get_subject().L = L
cert.get_subject().O = O
if OU:
cert.get_subject().OU = OU
cert.get_subject().CN = CN
cert.get_subject().emailAddress = emailAddress
cert.set_serial_number(_new_serial(tls_dir, CN))
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, digest)
# Write private key and cert
with salt.utils.fopen(
'{0}/{1}/certs/{2}.key'.format(cert_base_path(),
tls_dir, cert_filename),
'w+'
) as priv_key:
priv_key.write(
OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
)
with salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(cert_base_path(),
tls_dir,
cert_filename
), 'w+') as crt:
crt.write(
OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM,
cert
)
)
_write_cert_to_database(tls_dir, cert)
ret = 'Created Private Key: "{0}/{1}/certs/{2}.key." '.format(
cert_base_path(),
tls_dir,
cert_filename
)
ret += 'Created Certificate: "{0}/{1}/certs/{2}.crt."'.format(
cert_base_path(),
tls_dir,
cert_filename
)
return ret
def create_ca_signed_cert(ca_name,
CN,
days=365,
cacert_path=None,
ca_filename=None,
cert_path=None,
cert_filename=None,
digest='sha256',
cert_type=None,
type_ext=False,
replace=False):
'''
Create a Certificate (CERT) signed by a named Certificate Authority (CA)
If the certificate file already exists, the function just returns assuming
the CERT already exists.
The CN *must* match an existing CSR generated by create_csr. If it
does not, this method does nothing.
ca_name
name of the CA
CN
common name matching the certificate signing request
days
number of days certificate is valid, default is 365 (1 year)
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
cert_path
full path to the certificates directory
cert_filename
alternative filename for the certificate, useful when using special
characters in the CN. If this option is set it will override
the certificate filename output effects of ``cert_type``.
``type_ext`` will be completely overridden.
.. versionadded:: 2015.5.3
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
replace
Replace this certificate even if it exists
.. versionadded:: 2015.5.1
cert_type
string. Either 'server' or 'client' (see create_csr() for details).
If create_csr(type_ext=True) this function **must** be called with the
same cert_type so it can find the CSR file.
.. note::
create_csr() defaults to cert_type='server'; therefore, if it was also
called with type_ext, cert_type becomes a required argument for
create_ca_signed_cert()
type_ext
bool. If set True, use ``cert_type`` as an extension to the CN when
formatting the filename.
e.g.: some_subject_CN_server.crt or some_subject_CN_client.crt
This facilitates the context where both types are required for the same
subject
If ``cert_filename`` is `not None`, setting ``type_ext`` has no
effect
If the following values were set:
.. code-block:: text
ca.cert_base_path='/etc/pki'
ca_name='koji'
CN='test.egavas.org'
the resulting signed certificate would be written in the following
location:
.. code-block:: text
/etc/pki/koji/certs/test.egavas.org.crt
CLI Example:
.. code-block:: bash
salt '*' tls.create_ca_signed_cert test localhost
'''
ret = {}
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
if not cert_path:
cert_path = '{0}/{1}/certs'.format(cert_base_path(), ca_name)
if type_ext:
if not cert_type:
log.error('type_ext = True but cert_type is unset. '
'Certificate not written.')
return ret
elif cert_type:
CN_ext = '_{0}'.format(cert_type)
else:
CN_ext = ''
csr_filename = '{0}{1}'.format(CN, CN_ext)
if not cert_filename:
cert_filename = '{0}{1}'.format(CN, CN_ext)
if not replace and os.path.exists(
os.path.join(
os.path.sep.join('{0}/{1}/certs/{2}.crt'.format(
cert_base_path(),
ca_name,
cert_filename).split('/')
)
)
):
return 'Certificate "{0}" already exists'.format(cert_filename)
try:
maybe_fix_ssl_version(ca_name,
cacert_path=cacert_path,
ca_filename=ca_filename)
with salt.utils.fopen('{0}/{1}/{2}.crt'.format(cert_base_path(),
ca_name,
ca_filename)) as fhr:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, fhr.read()
)
with salt.utils.fopen('{0}/{1}/{2}.key'.format(cert_base_path(),
ca_name,
ca_filename)) as fhr:
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
fhr.read()
)
except IOError:
ret['retcode'] = 1
ret['comment'] = 'There is no CA named "{0}"'.format(ca_name)
return ret
try:
csr_path = '{0}/{1}.csr'.format(cert_path, csr_filename)
with salt.utils.fopen(csr_path) as fhr:
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM,
fhr.read())
except IOError:
ret['retcode'] = 1
ret['comment'] = 'There is no CSR that matches the CN "{0}"'.format(
cert_filename)
return ret
exts = []
try:
exts.extend(req.get_extensions())
except AttributeError:
try:
# see: http://bazaar.launchpad.net/~exarkun/pyopenssl/master/revision/189
# support is there from quite a long time, but without API
# so we mimic the newly get_extensions method present in ultra
# recent pyopenssl distros
log.info('req.get_extensions() not supported in pyOpenSSL versions '
'prior to 0.15. Processing extensions internally. '
' Your version: {0}'.format(
OpenSSL_version))
native_exts_obj = OpenSSL._util.lib.X509_REQ_get_extensions(
req._req)
for i in _range(OpenSSL._util.lib.sk_X509_EXTENSION_num(
native_exts_obj)):
ext = OpenSSL.crypto.X509Extension.__new__(
OpenSSL.crypto.X509Extension)
ext._extension = OpenSSL._util.lib.sk_X509_EXTENSION_value(
native_exts_obj,
i)
exts.append(ext)
except Exception:
log.error('X509 extensions are unsupported in pyOpenSSL '
'versions prior to 0.14. Upgrade required to '
'use extensions. Current version: {0}'.format(
OpenSSL_version))
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.set_subject(req.get_subject())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
cert.set_serial_number(_new_serial(ca_name, CN))
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.add_extensions(exts)
cert.sign(ca_key, digest)
cert_full_path = '{0}/{1}.crt'.format(cert_path, cert_filename)
with salt.utils.fopen(cert_full_path, 'w+') as crt:
crt.write(
OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
_write_cert_to_database(ca_name, cert)
return ('Created Certificate for "{0}": '
'"{1}/{2}.crt"').format(
CN,
cert_path,
cert_filename
)
def create_pkcs12(ca_name, CN, passphrase='', cacert_path=None, replace=False):
'''
Create a PKCS#12 browser certificate for a particular Certificate (CN)
ca_name
name of the CA
CN
common name matching the certificate signing request
passphrase
used to unlock the PKCS#12 certificate when loaded into the browser
cacert_path
absolute path to ca certificates root directory
replace
Replace this certificate even if it exists
.. versionadded:: 2015.5.1
If the following values were set::
ca.cert_base_path='/etc/pki'
ca_name='koji'
CN='test.egavas.org'
the resulting signed certificate would be written in the
following location::
/etc/pki/koji/certs/test.egavas.org.p12
CLI Example:
.. code-block:: bash
salt '*' tls.create_pkcs12 test localhost
'''
set_ca_path(cacert_path)
if not replace and os.path.exists(
'{0}/{1}/certs/{2}.p12'.format(
cert_base_path(),
ca_name,
CN)
):
return 'Certificate "{0}" already exists'.format(CN)
try:
with salt.utils.fopen('{0}/{1}/{2}_ca_cert.crt'.format(cert_base_path(),
ca_name,
ca_name)) as fhr:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
fhr.read()
)
except IOError:
return 'There is no CA named "{0}"'.format(ca_name)
try:
with salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(cert_base_path(),
ca_name,
CN)) as fhr:
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
fhr.read()
)
with salt.utils.fopen('{0}/{1}/certs/{2}.key'.format(cert_base_path(),
ca_name,
CN)) as fhr:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
fhr.read()
)
except IOError:
return 'There is no certificate that matches the CN "{0}"'.format(CN)
pkcs12 = OpenSSL.crypto.PKCS12()
pkcs12.set_certificate(cert)
pkcs12.set_ca_certificates([ca_cert])
pkcs12.set_privatekey(key)
with salt.utils.fopen('{0}/{1}/certs/{2}.p12'.format(cert_base_path(),
ca_name,
CN), 'w') as ofile:
ofile.write(pkcs12.export(passphrase=passphrase))
return ('Created PKCS#12 Certificate for "{0}": '
'"{1}/{2}/certs/{3}.p12"').format(
CN,
cert_base_path(),
ca_name,
CN
)
def cert_info(cert_path, digest='sha256'):
'''
Return information for a particular certificate
cert_path
path to the cert file
digest
what digest to use for fingerprinting
CLI Example:
.. code-block:: bash
salt '*' tls.cert_info /dir/for/certs/cert.pem
'''
# format that OpenSSL returns dates in
date_fmt = '%Y%m%d%H%M%SZ'
with salt.utils.fopen(cert_path) as cert_file:
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
cert_file.read()
)
ret = {
'fingerprint': cert.digest(digest),
'subject': dict(cert.get_subject().get_components()),
'issuer': dict(cert.get_issuer().get_components()),
'serial_number': cert.get_serial_number(),
'not_before': calendar.timegm(time.strptime(
cert.get_notBefore(),
date_fmt)),
'not_after': calendar.timegm(time.strptime(
cert.get_notAfter(),
date_fmt)),
}
# add additional info if your version of pyOpenSSL supports it
if hasattr(cert, 'get_extension_count'):
ret['extensions'] = {}
for i in _range(cert.get_extension_count()):
ext = cert.get_extension(i)
ret['extensions'][ext.get_short_name()] = ext
if 'subjectAltName' in ret.get('extensions', {}):
valid_names = set()
for name in ret['extensions']['subjectAltName'] \
._subjectAltNameString().split(", "):
if not name.startswith('DNS:'):
log.error('Cert {0} has an entry ({1}) which does not start '
'with DNS:'.format(cert_path, name))
else:
valid_names.add(name[4:])
ret['subject_alt_names'] = valid_names
if hasattr(cert, 'get_signature_algorithm'):
ret['signature_algorithm'] = cert.get_signature_algorithm()
return ret
def create_empty_crl(
ca_name,
cacert_path=None,
ca_filename=None,
crl_file=None):
'''
Create an empty Certificate Revocation List.
.. versionadded:: Beryllium
ca_name
name of the CA
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
crl_file
full path to the CRL file
CLI Example:
.. code-block:: bash
salt '*' tls.create_empty_crl ca_name='koji' \
ca_filename='ca' \
crl_file='/etc/openvpn/team1/crl.pem'
'''
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
if not crl_file:
crl_file = '{0}/{1}/crl.pem'.format(
_cert_base_path(),
ca_name
)
if os.path.exists('{0}'.format(crl_file)):
return 'CRL "{0}" already exists'.format(crl_file)
try:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename
)).read()
)
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}.key'.format(
cert_base_path(),
ca_name,
ca_filename)).read()
)
except IOError:
return 'There is no CA named "{0}"'.format(ca_name)
crl = OpenSSL.crypto.CRL()
crl_text = crl.export(ca_cert, ca_key)
with salt.utils.fopen(crl_file, 'w') as f:
f.write(crl_text)
return 'Created an empty CRL: "{0}"'.format(crl_file)
def revoke_cert(
ca_name,
CN,
cacert_path=None,
ca_filename=None,
cert_path=None,
cert_filename=None,
crl_file=None):
'''
Revoke a certificate.
.. versionadded:: Beryllium
ca_name
Name of the CA.
CN
Common name matching the certificate signing request.
cacert_path
Absolute path to ca certificates root directory.
ca_filename
Alternative filename for the CA.
cert_path
Path to the cert file.
cert_filename
Alternative filename for the certificate, useful when using special
characters in the CN.
crl_file
Full path to the CRL file.
CLI Example:
.. code-block:: bash
salt '*' tls.revoke_cert ca_name='koji' \
ca_filename='ca' \
crl_file='/etc/openvpn/team1/crl.pem'
'''
set_ca_path(cacert_path)
ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name)
if ca_filename is None:
ca_filename = '{0}_ca_cert'.format(ca_name)
if cert_path is None:
cert_path = '{0}/{1}/certs'.format(_cert_base_path(), ca_name)
if cert_filename is None:
cert_filename = '{0}'.format(CN)
try:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename
)).read()
)
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}.key'.format(
cert_base_path(),
ca_name,
ca_filename)).read()
)
except IOError:
return 'There is no CA named "{0}"'.format(ca_name)
try:
client_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}.crt'.format(
cert_path,
cert_filename)).read()
)
except IOError:
return 'There is no client certificate named "{0}"'.format(CN)
index_file, expire_date, serial_number, subject = _get_basic_info(
ca_name,
client_cert,
ca_dir)
index_serial_subject = '{0}\tunknown\t{1}'.format(
serial_number,
subject)
index_v_data = 'V\t{0}\t\t{1}'.format(
expire_date,
index_serial_subject)
index_r_data_pattern = re.compile(
r"R\t" +
expire_date +
r"\t\d{12}Z\t" +
re.escape(index_serial_subject))
index_r_data = 'R\t{0}\t{1}\t{2}'.format(
expire_date,
_four_digit_year_to_two_digit(datetime.utcnow()),
index_serial_subject)
ret = {}
with salt.utils.fopen(index_file) as f:
for line in f:
if index_r_data_pattern.match(line):
revoke_date = line.split('\t')[2]
try:
datetime.strptime(revoke_date, two_digit_year_fmt)
return ('"{0}/{1}.crt" was already revoked, '
'serial number: {2}').format(
cert_path,
cert_filename,
serial_number
)
except ValueError:
ret['retcode'] = 1
ret['comment'] = ("Revocation date '{0}' does not match"
"format '{1}'".format(
revoke_date,
two_digit_year_fmt))
return ret
elif index_serial_subject in line:
__salt__['file.replace'](
index_file,
index_v_data,
index_r_data,
backup=False)
break
crl = OpenSSL.crypto.CRL()
with salt.utils.fopen(index_file) as f:
for line in f:
if line.startswith('R'):
fields = line.split('\t')
revoked = OpenSSL.crypto.Revoked()
revoked.set_serial(fields[3])
revoke_date_2_digit = datetime.strptime(fields[2],
two_digit_year_fmt)
revoked.set_rev_date(revoke_date_2_digit.strftime(
four_digit_year_fmt))
crl.add_revoked(revoked)
crl_text = crl.export(ca_cert, ca_key)
if crl_file is None:
crl_file = '{0}/{1}/crl.pem'.format(
_cert_base_path(),
ca_name
)
if os.path.isdir(crl_file):
ret['retcode'] = 1
ret['comment'] = 'crl_file "{0}" is an existing directory'.format(
crl_file)
return ret
with salt.utils.fopen(crl_file, 'w') as f:
f.write(crl_text)
return ('Revoked Certificate: "{0}/{1}.crt", '
'serial number: {2}').format(
cert_path,
cert_filename,
serial_number
)
if __name__ == '__main__':
# create_ca('koji', days=365, **cert_sample_meta)
create_csr(
'koji',
CN='test_system',
C="US",
ST="Utah",
L="Centerville",
O="SaltStack",
OU=None,
emailAddress='test_system@saltstack.org'
)
create_ca_signed_cert('koji', 'test_system')
create_pkcs12('koji', 'test_system', passphrase='test')
| shineforever/ops | salt/salt/modules/tls.py | tls.py | py | 56,256 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "distutils.version.LooseVersion",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "OpenSSL.__dict__.get",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "OpenSSL.__dict__",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_... |
8503620014 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
Path("../Docs/graphs").mkdir(parents=True, exist_ok=True)
#Overhead Experiment
W = [25, 50, 100, 200, 400, 800]
L = ['p', 'a']
overhead_data = pd.read_csv("exp_data/overhead.csv")
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(111)
Locks = ['Serial', 'TAS', 'Mutex', 'A-Lock', 'MCS']
Speedup = exp1_data["Speedup"]
plt.ylabel('Speedup (parallel throughput / serial throughput)')
plt.title('Exp-1: B = 10000, N = 1')
ax.set_xticklabels(Locks, rotation = 45)
ax.set_yticks(np.arange(0, 13, 0.5))
ax.bar(Locks,Speedup)
plt.savefig('../Docs/graphs/exp1.png')
plt.clf()
#Experiment 2
fig = plt.figure(figsize = (10,10))
ideal=[1.0,1.0,1.0,1.0,1.0]
plt.plot(N, ideal, label ="Ideal Performance")
plt.ylabel('Speedup (parallel throughput / serial throughput)')
plt.xlabel('Number of thread (N)')
plt.title('Exp-2: B = 10000')
for l in opt:
exp2_data = pd.read_csv("exp_data/exp2_" + l + ".csv")
Speedup = exp2_data["Speedup"]
if l == 't':
plt.plot(N, Speedup, label = ("Lock = Test And Set"))
if l == 'p':
plt.plot(N, Speedup, label = ("Lock = Mutex"))
if l == 'a':
plt.plot(N, Speedup, label = ("Lock = Anderson"))
if l == 'm':
plt.plot(N, Speedup, label = ("Lock = MCS"))
plt.legend()
plt.savefig('../Docs/graphs/exp2.png')
plt.clf()
#Esperiment 3
for l in opt:
fig = plt.figure(figsize = (10,10))
plt.plot(N, ideal, label ="Ideal Performance")
plt.ylabel('Speedup (parallel runtime / serial runtime)')
plt.xlabel('Size of critical section (t ms)')
if l == 't':
plt.title('Exp-3: B = 3136 (ms), lock = Test And Set')
if l == 'p':
plt.title('Exp-3: B = 3136 (ms), lock = Mutex')
if l == 'a':
plt.title('Exp-3: B = 3136 (ms), lock = Anderson')
if l == 'm':
plt.title('Exp-3: B = 3136 (ms), lock = MCS')
for n in N:
exp3_data = pd.read_csv("exp_data/exp3_" + l + ":" + str(n) + ".csv")
Speedup = exp3_data["Speedup"]
plt.plot(T, Speedup, label = ("N = " + str(n)))
plt.legend()
plt.savefig('../Docs/graphs/exp3_' + l + '.png')
plt.clf()
| amiller68/CMSC-23010 | amiller68-cs23010-spr-21/HW3a/hw3a/analyze.py | analyze.py | py | 2,204 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
41331514007 | from datetime import datetime, timedelta
from functools import partial
from itertools import groupby
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.misc import formatLang
from odoo.osv import expression
from odoo.tools import float_is_zero, float_compare
from dateutil.relativedelta import relativedelta
from num2words import num2words
class SaleOrderExt(models.Model):
_inherit = 'sale.order'
po_no = fields.Char('PO Number',store = True)
sales_partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Vendor Credit Note, otherwise a Partner bank account number.',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
def _create_invoices(self, grouped=False, final=False):
"""
Create the invoice associated to the SO.
:param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by
(partner_invoice_id, currency)
:param final: if True, refunds will be generated if necessary
:returns: list of created invoices
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
# 1) Create invoices.
invoice_vals_list = []
for order in self:
pending_section = None
# Invoice values.
invoice_vals = order._prepare_invoice()
# Invoice line values (keep only necessary sections).
for line in order.order_line:
if line.display_type == 'line_section':
pending_section = line
continue
if float_is_zero(line.qty_to_invoice, precision_digits=precision):
continue
if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):
if pending_section:
invoice_vals['invoice_line_ids'].append((0, 0, pending_section._prepare_invoice_line()))
pending_section = None
invoice_vals['invoice_line_ids'].append((0, 0, line._prepare_invoice_line()))
if not invoice_vals['invoice_line_ids']:
raise UserError(_('There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))
invoice_vals_list.append(invoice_vals)
if not invoice_vals_list:
raise UserError(_(
'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))
# 2) Manage 'grouped' parameter: group by (partner_id, currency_id).
if not grouped:
new_invoice_vals_list = []
for grouping_keys, invoices in groupby(invoice_vals_list, key=lambda x: (x.get('partner_id'), x.get('currency_id'))):
origins = set()
payment_refs = set()
refs = set()
ref_invoice_vals = None
for invoice_vals in invoices:
if not ref_invoice_vals:
ref_invoice_vals = invoice_vals
else:
ref_invoice_vals['invoice_line_ids'] += invoice_vals['invoice_line_ids']
origins.add(invoice_vals['invoice_origin'])
payment_refs.add(invoice_vals['invoice_payment_ref'])
refs.add(invoice_vals['ref'])
ref_invoice_vals.update({
'ref': ', '.join(refs),
'po_number' : self.po_no,
'invoice_partner_bank_id': self.sales_partner_bank_id.id,
'invoice_origin': ', '.join(origins),
'invoice_payment_ref': len(payment_refs) == 1 and payment_refs.pop() or False,
})
new_invoice_vals_list.append(ref_invoice_vals)
invoice_vals_list = new_invoice_vals_list
# 3) Create invoices.
moves = self.env['account.move'].with_context(default_type='out_invoice').create(invoice_vals_list)
# 4) Some moves might actually be refunds: convert them if the total amount is negative
# We do this after the moves have been created since we need taxes, etc. to know if the total
# is actually negative or not
if final:
moves.filtered(lambda m: m.amount_total < 0).action_switch_invoice_into_refund_credit_note()
for move in moves:
move.message_post_with_view('mail.message_origin_link',
values={'self': move, 'origin': move.line_ids.mapped('sale_line_ids.order_id')},
subtype_id=self.env.ref('mail.mt_note').id
)
return moves
class SaleOrderLineExt(models.Model):
_inherit = 'sale.order.line'
product_part_no = fields.Char('Part No',store = True)
remarks= fields.Char('Remarks',store = True)
def _prepare_procurement_values(self, group_id=False):
""" Prepare specific key for moves or other components that will be created from a stock rule
comming from a sale order line. This method could be override in order to add other custom key that could
be used in move/po creation.
"""
values = super(SaleOrderLineExt, self)._prepare_procurement_values(group_id)
self.ensure_one()
date_planned = self.order_id.date_order\
+ timedelta(days=self.customer_lead or 0.0) - timedelta(days=self.order_id.company_id.security_lead)
values.update({
'group_id': group_id,
'sale_line_id': self.id,
'date_planned': date_planned,
'route_ids': self.route_id,
'warehouse_id': self.order_id.warehouse_id or False,
'partner_id': self.order_id.partner_shipping_id.id,
'company_id': self.order_id.company_id,
'remarks':self.remarks,
})
for line in self.filtered("order_id.commitment_date"):
date_planned = fields.Datetime.from_string(line.order_id.commitment_date) - timedelta(days=line.order_id.company_id.security_lead)
values.update({
'date_planned': fields.Datetime.to_string(date_planned),
})
return values
class account_move_ext(models.Model):
_inherit = 'account.move'
po_number = fields.Char("PO Number")
def action_ext(self):
domain = self.ref[13:]
return domain
def get_kyat_explanation(self,amount):
new_amount=num2words(amount,lang='en')
return new_amount
class stock_rule_ext(models.Model):
_inherit = 'stock.rule'
remarks = fields.Char("Remarks")
def _get_stock_move_values(self, product_id, product_qty, product_uom,location_id, name, origin, company_id, values):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'pull' or 'pull_push') set on it.
:param procurement: browse record
:rtype: dictionary
'''
group_id = False
if self.group_propagation_option == 'propagate':
group_id = values.get('group_id', False) and values['group_id'].id
elif self.group_propagation_option == 'fixed':
group_id = self.group_id.id
date_expected = fields.Datetime.to_string(
fields.Datetime.from_string(values['date_planned']) - relativedelta(days=self.delay or 0)
)
# it is possible that we've already got some move done, so check for the done qty and create
# a new move with the correct qty
qty_left = product_qty
move_values = {
'name': name[:2000],
'company_id': self.company_id.id or self.location_src_id.company_id.id or self.location_id.company_id.id or company_id.id,
'product_id': product_id.id,
'product_uom': product_uom.id,
'product_uom_qty': qty_left,
'partner_id': self.partner_address_id.id or (values.get('group_id', False) and values['group_id'].partner_id.id) or False,
'location_id': self.location_src_id.id,
'location_dest_id': location_id.id,
'remarks':values.get('remarks',False),
'move_dest_ids': values.get('move_dest_ids', False) and [(4, x.id) for x in values['move_dest_ids']] or [],
'rule_id': self.id,
'procure_method': self.procure_method,
'origin': origin,
'picking_type_id': self.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, route.id) for route in values.get('route_ids', [])],
'warehouse_id': self.propagate_warehouse_id.id or self.warehouse_id.id,
'date': date_expected,
'date_expected': date_expected,
'propagate_cancel': self.propagate_cancel,
'propagate_date': self.propagate_date,
'propagate_date_minimum_delta': self.propagate_date_minimum_delta,
'description_picking': product_id._get_description(self.picking_type_id),
'priority': values.get('priority', "1"),
'delay_alert': self.delay_alert,
}
for field in self._get_custom_move_fields():
if field in values:
move_values[field] = values.get(field)
return move_values
| sanlin-isgm/starglobal-dev | starglobal/models/sales_ext.py | sales_ext.py | py | 9,730 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "odoo.models.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
... |
28456291602 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 06:26:48 2020
@author: Lokeshwar
"""
# In this, the test dataset points are assigned to KMEANS clusters and DBSCAN clusters based on KNN Classifer.
import pandas as pd
from train import features
from sklearn.neighbors import KNeighborsClassifier
file_name = input ('Enter the testing file name:\n' )
#k,distance_metric = input ('Enter K value and distance metric (Eg: 5 Manhattan) -' ).split()
kmeans_labels=[]
dbscan_labels=[]
def KNN(k,test_features):
train_features=pd.read_csv('train_features.csv')
knn_kmeans=KNeighborsClassifier(n_neighbors=k,p=2)
knn_dbscan=KNeighborsClassifier(n_neighbors=k,p=2)
knn_kmeans.fit(train_features.iloc[:,:-3],train_features.iloc[:,-2])
knn_dbscan.fit(train_features.iloc[:,:-3],train_features.iloc[:,-1])
for row in range(0,test_features.shape[0]):
kmeans_labels.append(knn_kmeans.predict([list(test_features.iloc[row,:])])[0])
dbscan_labels.append(knn_dbscan.predict([list(test_features.iloc[row,:])])[0])
def fn_testing(file_name):
test_df = pd.read_csv(file_name,names=[i for i in range(30)],index_col=False)
test_features=features(test_df)
k=25
KNN(k,test_features)
results_df=pd.DataFrame({'DBSCAN':dbscan_labels,'KMEANS':kmeans_labels})
results_df.to_csv('P3labels.csv',header=False)
print(results_df)
fn_testing(file_name)
| Lokeshwar0304/Data-Mining-CGM-System-Data | Meal data clusters/test.py | test.py | py | 1,453 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 24,
"usage_type": "call"
... |
11731959047 | import argparse
import numpy as np
from keras.layers import Conv1D, BatchNormalization, Activation, MaxPool1D
from keras.layers import Dense, Dropout, GlobalMaxPool1D
from keras.models import Input, Model
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from extract import *
# from pretrained import get_1d_conv_model, Config
# import pretrained
MAX_LEN = 2 * 16000
def build():
input_shape = Input([MAX_LEN, 1])
out = Conv1D(16, 9)(input_shape)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv1D(16, 9)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = MaxPool1D(16)(out)
out = Dropout(0.1)(out)
out = Conv1D(32, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv1D(32, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = MaxPool1D(4)(out)
out = Dropout(0.1)(out)
out = Conv1D(32, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv1D(32, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = MaxPool1D(4)(out)
out = Dropout(0.1)(out)
out = Conv1D(256, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv1D(256, 3)(out)
# out = BatchNormalization()(out)
out = Activation('relu')(out)
out = GlobalMaxPool1D()(out)
out = Dropout(0.2)(out)
out = Dense(64, activation='relu')(out)
out = Dense(128, activation='relu')(out)
out = Dense(41, activation='softmax')(out)
model = Model(input_shape, out)
model.compile(optimizer=Adam(1e-3), loss='categorical_crossentropy', metrics=['acc'])
return model
def datagen(x, y, batch_size):
num = len(y)
while True:
ind = np.random.choice(num, batch_size, replace=False)
# yield x[ind] * np.random.normal(1, 0.04, x[ind].shape), y[ind]
yield x[ind], y[ind]
def run(args):
model = build()
folder = args.train_dir
files = get_fnames(folder)
wave2ind = {}
x = np.zeros([len(files), 32000])
for i, file in enumerate(files):
wave2ind[file] = i
# raw, _ = load(os.path.join(folder, file), sr=16000, res_type='kaiser_fast')
# raw = pretrained.audio_norm(raw)
# x[i][:len(raw)] = raw[:32000]
# print(i, file)
# x = np.expand_dims(x, axis=-1)
# np.save('pickle/sr16000.pickle', x)
x = np.load('pickle/sr16000.pickle.npy')
# config = pretrained.Config(sampling_rate=16000, audio_duration=2, n_folds=10, learning_rate=0.001)
# model = pretrained.get_1d_conv_model(config)
# data, wave2ind = read_dir(args.train_dir)
# x = np.empty([len(data), MAX_LEN, 1])
# for i, d in enumerate(data):
# x[i, :len(d), 0] = d[:MAX_LEN]
labels = read_label(args.train_label, args.id_name, wave2ind)
labels = to_categorical(labels, 41)
x_train = x[:args.train_num]
x_valid = x[args.train_num:]
y_train = labels[:args.train_num]
y_valid = labels[args.train_num:]
callbacks = [EarlyStopping(monitor='val_loss', patience=5, mode='min'),
ModelCheckpoint(args.model_path, monitor='val_loss', save_best_only=True)]
# model.summary()
model.fit_generator(
datagen(x_train, y_train, args.batch_size),
epochs=args.epochs,
steps_per_epoch=100,
validation_data=datagen(x_valid, y_valid, args.batch_size),
validation_steps=10,
callbacks=callbacks
)
# model.save(args.model_path)
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('train_dir')
parser.add_argument('train_label')
parser.add_argument('id_name')
parser.add_argument('model_path')
parser.add_argument('-i', type=int, default=100, dest='epochs')
parser.add_argument('-b', type=int, default=100, dest='batch_size')
parser.add_argument('-n', type=int, default=8000, dest='train_num')
parser.add_argument('-s', dest='semi')
return parser.parse_args()
if __name__ == '__main__':
# run(parse())
f = lambda: None
for k in range(7, 10):
setattr(f, 'train_dir', 'data/audio_train')
setattr(f, 'train_label', 'data/train.csv')
setattr(f, 'id_name', 'pickle/mfcc_id_name.pickle')
# setattr(f, 'model_path', 'model/1d_conv%d.mdn' % k)
setattr(f, 'model_path', 'tmp')
setattr(f, 'batch_size', 64)
setattr(f, 'train_num', 8500)
setattr(f, 'epochs', 100)
setattr(f, 'semi', None)
run(f)
| hungchingliu/ML2018SPRING | final/src/method2/train_raw.py | train_raw.py | py | 4,676 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.Input",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.... |
2144573080 | import requests
from bs4 import BeautifulSoup
from headers import HEADERS
from brainportindustries.hrefFinder import HrefFinder
from csvImporter import CsvImporter
class InfoFinder:
def __init__(self):
self.finder = HrefFinder()
self.url_list = self.finder.get_href_list()
self.importer = CsvImporter('brainportindustries')
self.headers = HEADERS
def find_info(self):
for url in self.url_list:
req = requests.get(url, self.headers)
plain_text = req.text
soup = BeautifulSoup(plain_text)
try:
company = soup.find('h1').text
postcode = soup.find('span', {'class': 'postcode'}).text
self.importer.import_to_csv(company, postcode)
print(postcode)
print(company)
except Exception as e:
print('exception occured at url: {} error: {}'.format(url, e))
finder = InfoFinder()
finder.find_info() | redvox27/innovatiespotter | brainportindustries/infoFinder.py | infoFinder.py | py | 991 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "brainportindustries.hrefFinder.HrefFinder",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csvImporter.CsvImporter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "headers.HEADERS",
"line_number": 13,
"usage_type": "name"
},
{
... |
4441106692 | from flask import Flask, render_template, request, send_file, Response
import nltk
from gtts import gTTS
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
import pyttsx3
import os
import tempfile
from io import BytesIO
import re
import base64
import gtts
app = Flask(__name__)
def read_article(content):
sentences = content.split(". ")
sentences.pop()
return sentences
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stopwords):
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for i in range(len(sentences)):
for j in range(len(sentences)):
if i == j:
continue
similarity_matrix[i][j] = sentence_similarity(sentences[i], sentences[j], stopwords)
return similarity_matrix
def generate_summary(content, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
sentences = read_article(content)
sentence_similarity_matrix = build_similarity_matrix(sentences, stop_words)
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_matrix)
scores = nx.pagerank(sentence_similarity_graph)
ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
for i in range(top_n):
summarize_text.append(ranked_sentences[i][1])
return ". ".join(summarize_text)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/summarize', methods=['POST'])
def summarize():
content = request.form['content']
summary = generate_summary(content, top_n=3)
# Initialize the TTS engine
engine = pyttsx3.init()
# Create a temporary audio file
# with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_audio:
# audio_path = temp_audio.name
# engine.save_to_file(summary, audio_path)
# engine.runAndWait()
myobj = gTTS(text=summary, lang='en', slow=False)
myobj.save("summary.mp3")
return render_template('summary.html', summary=summary, audio_path='/summary.mp3')
@app.route('/audio/<path:audio_path>')
def stream_audio(audio_path):
return send_file(audio_path, mimetype='audio/mpeg')
if __name__ == '__main__':
app.run(debug=True)
| Santho-osh/flaskProject | app.py | app.py | py | 2,872 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nltk.download",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.stopwords"... |
29638027337 | from datetime import datetime
import time
import os
import time
import psutil
import subprocess
from config.configSetup import *
from databaseSendMSG import handle_error, handle_info
def is_vcgencmd_available():
try:
subprocess.check_output(['vcgencmd'], stderr=subprocess.DEVNULL)
return True
except FileNotFoundError:
return False
# Leggi la temperatura della CPU -> String
def get_cpu_temperature():
try:
if is_vcgencmd_available():
temp = os.popen('vcgencmd measure_temp').readline()
return (temp.replace("temp=", "").replace("'C\n", ""))
return None
except Exception as e:
handle_error(
f"Display.py : Errore durante la lettura della temperatura della CPU: {str(e)}")
return None
# Leggi utilizzo RAM -> Float
def get_ram_usage():
try:
memory_info = psutil.virtual_memory()
return memory_info.percent
except Exception as e:
handle_error(
f"Display.py : Errore durante la lettura dell'utilizzo della RAM: {str(e)}")
return None
def update_display(window, price, wallet_balance, current_position_side, current_position_size, signal, pnl_percentage, tmp_agg_display, entry_price, entry_timestamp):
# Cancella la finestra
window.clear()
# Stampa la temperatura della CPU
cpu_temp = get_cpu_temperature()
# Stampa le informazioni sulla CPU
cpu_percent = psutil.cpu_percent()
# Utilizzo della funzione per ottenere l'utilizzo della RAM
ram_usage_percent = get_ram_usage()
# Get the window size
max_y, max_x = window.getmaxyx()
# Check if the window is large enough to display all the text
if max_y < 10 or max_x < 10:
window.addstr(20, 20, "Please resize the window to at least 10x10.")
window.refresh()
time.sleep(tmp_agg_display)
return
now = datetime.now()
current_time = now.strftime("[ %H:%M ] [ %d-%m-%Y ]")
# Aggiungi questa riga per mostrare la data e l'ora
window.addstr(0, 0, f"Ora attuale e Data : {current_time}")
# Aggiorna le stringhe con le informazioni sulla posizione in tempo reale
window.addstr(
2, 0, f"Posizione corrente : {'Long' if current_position_side else 'Short' if current_position_side is not None else 'N/D'}")
try:
window.addstr(4, 0, f"Mark Price : {price:.2f} USD")
except Exception as e:
handle_error(f"Display.py : Errore Mark Price : {str(e)}")
try:
window.addstr(
3, 0, f"Execution time : {entry_timestamp}"if entry_timestamp is not None and current_position_side is not None else "Execution time : N/D")
except Exception as e:
handle_error(
f"Display.py : Errore nel recupero del timestamp: {str(e)}")
try:
window.addstr(
5, 0, f"Entry Price : {entry_price} USD" if entry_price is not None and current_position_side is not None else "Entry Price : N/D")
except Exception as e:
handle_error(f"Display.py : Errore Enty Price : {str(e)}")
if pnl_percentage is not None:
window.addstr(
6, 0, f"Guadagno/Perdita : {float(pnl_percentage)* float(leverage):.2f}%")
else:
window.addstr(6, 0, "Guadagno/Perdita : N/D")
window.addstr(
7, 0, f"Dimensione Posizione : {current_position_size if current_position_size is not None else 0} USD")
window.addstr(
8, 0, f"Patrimonio attuale : {wallet_balance * price:.2f} USD")
window.addstr(9, 0, f"Bilancio BTC : {wallet_balance:.8f} BTC")
window.addstr(10, 0, f"Long Condition : {signal['long_condition']}")
window.addstr(11, 0, f"Short Condition : {signal['short_condition']}")
window.addstr(12, 0, f"Exit Long : {signal['exit_long']}")
window.addstr(13, 0, f"Exit Short : {signal['exit_short']}")
window.addstr(14, 0, f"Leva set : {leverage}")
if current_position_side is not None:
window.addstr(
15, 0, f"Stop Loss : {(stop_loss_LONG_percentage*100) if current_position_side else (stop_loss_SHORT_percentage*100) if current_position_side is not None else 'N/D'}%")
window.addstr(
16, 0, f"Take Profit : {(take_profit_LONG_percentage*100) if current_position_side else (take_profit_LONG_percentage*100) if current_position_side is not None else 'N/D'}%")
# Uso CPU e funziona solo su Raspberry pi
if is_vcgencmd_available():
window.addstr(17, 0, f"Temperatura CPU : {str(cpu_temp)} °C")
window.addstr(18, 0, f"Utilizzo della CPU: : {str(cpu_percent)} %")
window.addstr(19, 0, f"Utilizzo della RAM: : {str(ram_usage_percent)} %")
if is_vcgencmd_available():
if len(cpu_temp) > 0 and float(cpu_temp) > 65:
handle_info("Display.py: Temperatura CPU alta, sopra i 65°C")
if float(ram_usage_percent) > 85:
handle_info("Display.py: Utilizzo della RAM elevato, superiore al 85%")
try:
window.refresh()
except Exception as e:
handle_error(f"Display.py: Errore aggiornamento display")
| SoufianeElkha/trading_talib_ccxt_bitmex_XBTUSD | affichage/display.py | display.py | py | 5,203 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "subprocess.check_output",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.popen",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "databaseSend... |
5834626397 | import webapp2
import jinja2
import os
import model
import functions
##==============================================================================##
## CreateNewTeam.py Creates a new Team ##
##==============================================================================##
## Author: C00117798 - Olawale Egbeyemi ##
## Date: 11/12/2010 ##
## Last Modified: 29/12/2012 ##
## Description: ##
## Python Version: 2.7 ##
##==============================================================================##
##==============================================================================##
#Global Variable
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class CreateNewTeamHandler(webapp2.RequestHandler): #Web Page Index Handler Class
def post(self):
template = jinja_environment.get_template('templates/teams.html')
teamStringName = self.request.get('teamName')
teamStringNameConfirm = self.request.get('teamNameConfirm')
playerTagKey = functions.Worker.PlayerTagStringToPlayerKey(self.request.get('currentUser'))
team = functions.Worker.GetPlayerTagTeamList(playerTagKey)
if(teamStringName != teamStringNameConfirm):
html = template.render({'playerTagHeader': playerTagKey.tag,
'errorTeam': 'Team Name does not match!'})
else:
#Find Duplicates
query = model.Team.query(model.Team.teamName == teamStringName)
if(query.count() > 0):
html = template.render({'playerTagHeader': playerTagKey.tag,
'errorTeam': 'This team already exist!'})
else:
try:
newTeam = model.Team()
newTeam.administrator = playerTagKey.key
newTeam.teamName = teamStringName
newTeam.members.append(playerTagKey.key)
newTeam.put()
team = functions.Worker.GetPlayerTagTeamList(playerTagKey)
friends = functions.Worker.GetFriendsList(playerTagKey)
template = jinja_environment.get_template('templates/dash.html')
html = template.render({'playerTagHeader': playerTagKey.tag,
'teams' : team,
'the_title': 'Dashboard',
'friends' : friends})
except:
html = template.render({'playerTagHeader': playerTagKey.tag,
'errorMatch': 'An exception error occurred! '})
#return the html page
self.response.out.write(html)
app = webapp2.WSGIApplication([ ('/createnewteam', CreateNewTeamHandler),], debug=True) | PureIso/PythonWebApp | Google App Engine - PlayerTag App/createnewteam.py | createnewteam.py | py | 3,212 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jinja2.Environment",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
71109775394 | """
Endpoints for management of arkOS Applications.
arkOS Kraken
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import os
from flask import Blueprint, abort, jsonify, request, send_from_directory
from flask.views import MethodView
from arkos import applications
from arkos.messages import NotificationThread
from kraken import auth
from kraken.records import push_record
from kraken.jobs import as_job, job_response
backend = Blueprint("apps", __name__)
class ApplicationsAPI(MethodView):
@auth.required()
def get(self, id):
if request.args.get("rescan", None):
applications.scan()
installed = request.args.get("installed", None)
if installed and installed.lower() == "true":
installed = True
elif installed and installed.lower() == "false":
installed = False
apps = applications.get(
id, type=request.args.get("type", None),
loadable=request.args.get("loadable", None),
installed=installed,
cry=False)
if id and not apps:
abort(404)
if isinstance(apps, applications.App):
return jsonify(app=apps.serialized)
else:
return jsonify(apps=[x.serialized for x in apps])
@auth.required()
def put(self, id):
operation = request.get_json()["app"]["operation"]
app = applications.get(id)
if not app:
abort(404)
if operation == "install":
if app.installed and not getattr(app, "upgradable", None):
return jsonify(app=app.serialized)
id = as_job(self._install, app)
elif operation == "uninstall":
if not app.installed:
resp = jsonify(message="Application isn't yet installed")
resp.status_code = 422
return resp
id = as_job(self._uninstall, app)
else:
return jsonify(errors={"msg": "Unknown operation"}), 422
data = app.serialized
data["is_ready"] = False
return job_response(id, {"app": data})
def _install(self, job, app):
nthread = NotificationThread(id=job.id)
app.install(nthread=nthread, force=True, cry=False)
push_record("app", app.serialized)
def _uninstall(self, job, app):
nthread = NotificationThread(id=job.id)
app.uninstall(nthread=nthread)
push_record("app", app.serialized)
@auth.required()
def dispatcher(id, path):
a = applications.get(id)
if not a or not hasattr(a, "_api"):
abort(404)
params = path.split("/")
fn = getattr(a._api, params[0])
return fn(*params[1:])
@backend.route('/api/apps/assets/<string:id>/<string:asset>')
def get_app_asset(id, asset):
app = applications.get(id)
if not app:
abort(404)
return send_from_directory(
os.path.join('/var/lib/arkos/applications', id, 'assets'), asset)
apps_view = ApplicationsAPI.as_view('apps_api')
backend.add_url_rule('/api/apps', defaults={'id': None},
view_func=apps_view, methods=['GET', ])
backend.add_url_rule('/api/apps/<string:id>', view_func=apps_view,
methods=['GET', 'PUT'])
backend.add_url_rule('/api/apps/<string:id>/<path:path>', view_func=dispatcher,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
| arkOScloud/kraken | kraken/frameworks/apps.py | apps.py | py | 3,413 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.r... |
32771578748 | from flask import (
Blueprint,
Flask,
abort,
jsonify,
redirect,
render_template,
request,
url_for,
)
from flask_jwt_extended import jwt_required
from flask_simplelogin import get_username, login_required
from api.auth import create_user
from api.controller import (
add_new_category,
add_new_video,
delete_category,
delete_video,
get_all_category,
get_all_videos,
get_all_videos_by_category,
get_category_by_id,
get_video_by_id,
search_video,
update_category,
update_video,
)
from api.database import mongo
bp = Blueprint(
"api", __name__, template_folder="templates", static_url_path="static"
)
@bp.route("/")
def index():
return render_template("index.html.j2")
@bp.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
data = {"username": username, "password": password}
try:
create_user(**data)
except ValueError:
return abort(404)
return redirect(url_for("admin.index"))
return render_template("register.html.j2"), 200
@bp.route("/token")
@login_required()
def token():
usarname = get_username()
token = mongo.db.users.find_one(
{"username": usarname}, projection={"_id": False}
)["token"]
return render_template("token.html.j2", data=token), 200
@bp.route("/videos")
@jwt_required()
def list_videos():
try:
videos = get_all_videos()
except FileNotFoundError:
return abort(404)
return jsonify(videos), 200
@bp.route("/videos/<int:video_id>")
@jwt_required()
def one_video(video_id):
try:
video = get_video_by_id(video_id)
except FileNotFoundError:
return abort(404)
return jsonify(video), 200
@bp.route("/videos/<int:video_id>", methods=["DELETE"])
@jwt_required()
def delete_one_video(video_id):
try:
get_video_by_id(video_id)
except FileNotFoundError:
return abort(404)
else:
exec_video = delete_video(video_id)
return exec_video
@bp.route("/videos/new", methods=["POST"])
@jwt_required()
def new_video():
data = request.get_json()
video = add_new_video(data)
return redirect(url_for("api.one_video", video_id=video)), 200
@bp.route("/videos/<int:video_id>", methods=["PUT"])
@jwt_required()
def update_data_video(video_id):
data = request.get_json()
video = update_video(video_id, data)
return redirect(url_for("api.one_video", video_id=video)), 200
@bp.route("/videos/")
@jwt_required()
def search_video_query():
search = request.args.get("search")
try:
videos = search_video(search)
except FileNotFoundError:
return abort(404)
return jsonify(videos), 200
# CATEGORY ROUTES
@bp.route("/category")
@jwt_required()
def list_category():
try:
category = get_all_category()
except FileNotFoundError:
return abort(404)
return jsonify(category), 200
@bp.route("/category/<int:categoryId>")
@jwt_required()
def one_category(categoryId):
try:
category = get_category_by_id(categoryId)
except FileNotFoundError:
return abort(404)
return jsonify(category), 200
@bp.route("/category/<int:categoryId>", methods=["DELETE"])
@jwt_required()
def delete_one_category(categoryId):
try:
get_category_by_id(categoryId)
except FileNotFoundError:
return abort(404)
else:
exec_category = delete_category(categoryId)
return exec_category
@bp.route("/category/new", methods=["GET", "POST"])
@jwt_required()
def new_category():
data = request.get_json()
category = add_new_category(data)
return redirect(url_for("api.one_category", categoryId=category), 200)
@bp.route("/category/<int:categoryId>", methods=["GET", "PUT"])
@jwt_required()
def update_data_category(categoryId):
data = request.get_json()
category = update_category(categoryId, data)
return redirect(url_for("api.one_category", categoryId=category), 200)
# RELATIONSHIP
@bp.route("/category/<int:categoryId>/videos", methods=["GET"])
@jwt_required()
def show_videos_by_category(categoryId):
try:
videos_category = get_all_videos_by_category(categoryId)
except FileNotFoundError:
return abort(404)
return jsonify(videos_category), 200
def configure(app: Flask):
app.register_blueprint(bp)
| joseevilasio/my-videos-lib | api/views.py | views.py | py | 4,498 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "flask... |
6829450262 | # coding: utf-8
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
all_time_series = []
for i in range(15):
file_name = './timeseries_data/timeseries_' + str(i) + '.csv'
f = open(file_name, 'r')
dataReader = csv.reader(f)
for row in dataReader:
all_time_series.append(row[0:80])
all_time_series = np.array(all_time_series)
x = np.arange(0, 285, 1)
y = np.arange(0, 130, 1)
X, Y = np.meshgrid(x, y)
Z = []
for i in range(130):
Z.append([])
for j in range(285):
Z[i].append(float(all_time_series[285 * i + j][0]))
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(X, Y, Z, color='blue')
print(X.shape)
print(Z.shape)
plt.show()
| kozenumezawa/causalviz | python/three-dim-test.py | three-dim-test.py | py | 740 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number":... |
69997512993 | from notifypy import Notify
from PySimpleGUI import PySimpleGUI as sg
# 1 ciclo pomodoro = 25 min = 1500 segundos
# OK exibir tempo restante do ciclo na tela
# OK exibir quantidade de ciclos
# OK ao finalizar ciclo, tocar som de alerta COM notificação
# Após 1 ciclo, realizar pausa de 5 min = 300 segundos
# - ao finalizar pausa, tocar som de alerta - > escolher som de alerta para pausa
# - criar função timer para pausa
# Após 4 ciclos concluídos, realizar pausa longa de 15 a 30 min = 900 / 1800 segundos
# - exibir alerta de notificação
# - perguntar se deseja iniciar novo ciclo ou finalizar
# OK Reiniciar ciclo para seguir tarefas
def notificacao(meu_titulo='Título Teste', texto_notificacao='Texto Teste'):
notifica = Notify()
notifica.title = meu_titulo
notifica.message = texto_notificacao
notifica.audio = 'files/sound/alerta.wav'
return notifica.send()
titulo = 'Pomodoro Notipyer '
fim_tarefa = 'Tarefa concluída!'
def formatar_tempo(t):
mins, secs = divmod(t, 60)
tempo_formatado = '{:02d}:{:02d}'.format(mins, secs)
return tempo_formatado
ciclos_realizados = 0
sg.theme('Reddit')
layout = [[sg.Text('Ciclos:'), sg.Text('0', key='CICLOS')],
[sg.Text('00:00', size=(8, 2), font=('Helvetica', 20),
justification='center', key='TIMER')],
[sg.Text('-HOLDER TXT-')],
[sg.Button('Iniciar', key='INICIAR', focus=True, button_color=('black', 'white')),
sg.Button('Pausar', key='PAUSAR', focus=False, disabled=True, button_color=('black', 'white')),
sg.Button('Resetar', key='RESETAR', focus=False, disabled=True, button_color=('black', 'white'))]]
app = sg.Window('Notipyer Pomodoro', layout, auto_size_buttons=False, keep_on_top=False,
grab_anywhere=True,
element_padding=(0, 0),
finalize=True,
element_justification='c')
contador_esta_ativo, tempo_restante = False, 5 # contador inicia desativado // tempo do timer pomodoro em segundos
# Loop de Eventos do App
while True:
evento, valores = app.Read(1000)
tempo_restante -= 1 * (contador_esta_ativo is True)
if evento == 'INICIAR':
contador_esta_ativo = True
if tempo_restante == 0:
tempo_restante = 10
app['INICIAR'].Update(disabled=True)
app['PAUSAR'].Update(disabled=False)
app['RESETAR'].Update(disabled=False)
elif evento == 'PAUSAR':
contador_esta_ativo = False
app['INICIAR'].Update('Continuar', disabled=False)
app['PAUSAR'].Update(disabled=True)
elif evento == 'RESETAR':
tempo_restante = 10
contador_esta_ativo = False
app['INICIAR'].Update('Iniciar', disabled=False)
app['PAUSAR'].Update(disabled=True)
app['RESETAR'].Update(disabled=True)
elif tempo_restante == 0 and contador_esta_ativo: # fim da contagem
notificacao(titulo, fim_tarefa)
tempo_restante = 0
ciclos_realizados += 1
app['CICLOS'].Update(f'{ciclos_realizados}')
contador_esta_ativo = False
if evento == 'INICIAR':
tempo_restante = 10
app['INICIAR'].Update(disabled=True)
app['PAUSAR'].Update(disabled=False)
app['RESETAR'].Update(disabled=False)
app['INICIAR'].Update('Reiniciar', disabled=False)
app['PAUSAR'].Update(disabled=True)
app['RESETAR'].Update(disabled=True)
elif evento is None or evento == 'Quit':
break
app['TIMER'].Update(formatar_tempo(tempo_restante))
| dan-alvares/Pomodoro-Notipyer | pomodoro.py | pomodoro.py | py | 3,585 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "notifypy.Notify",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI.theme",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": ... |
30830730394 | # Feature 2: Date
from datetime import date
import pyttsx3 as speak
# Get the date
today = date.today()
strdate = today.strftime("%B %d, %Y")
# Create the statement to be spoken
statement2 = "Today is " + strdate + ", have a great day"
# Speak the statement
engine = speak.init()
engine.say(statement2)
engine.runAndWait()
| YouCantTouchThis/Thanos | Date.py | Date.py | py | 328 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pyttsx3.init",
"line_number": 15,
"usage_type": "call"
}
] |
40420182184 | from pathlib import Path
import numpy as np
data_folder = Path(".")
class IntCodeProgram:
"""A Class for the state of an IntCode program"""
def __init__(self, instr):
self.instructions = dict(zip(list(range(len(instrs))), instrs))
self.rel_base = 0
self.instr_ptr = 0
self.input = None
self.output = None
def get(self, ptr, mode):
loc = self._find_loc(ptr, mode)
if loc in self.instructions:
return self.instructions[loc]
else:
return 0
def set(self, ptr, mode, value):
loc = self._find_loc(ptr, mode)
self.instructions[loc] = value
def _find_loc(self, ptr, mode):
if mode == 1:
return ptr
elif mode == 0:
return self.get(ptr, 1)
elif mode == 2:
return self.get(ptr, 1) + self.rel_base
class Robot:
"""A class for a hull painting robot"""
def __init__(self, prog, initial_panel_color=0):
self.location = (0, 0)
self.panel_colors = {(0, 0): initial_panel_color}
self.times_painted = {(0, 0): 0}
self.orientation = 0
self.prog = IntCodeProgram(prog)
def paint_panel(self, color_int):
self.panel_colors[self.location] = color_int
self.times_painted[self.location] += 1
def move(self, right):
if right:
self.orientation = (self.orientation + 90) % 360
else:
self.orientation = (self.orientation - 90) % 360
if self.orientation == 0:
self.location = (self.location[0] - 1, self.location[1])
elif self.orientation == 90:
self.location = (self.location[0], self.location[1] + 1)
elif self.orientation == 180:
self.location = (self.location[0] + 1, self.location[1])
elif self.orientation == 270:
self.location = (self.location[0], self.location[1] - 1)
if self.location not in self.panel_colors:
self.panel_colors[self.location] = 0
self.times_painted[self.location] = 0
def get_n_tiles_painted(self):
tiles_painted = 0
for tile in self.times_painted:
if self.times_painted[tile] > 0:
tiles_painted += 1
return tiles_painted
def print_hull(self):
row_dim = [0, 0]
column_dim = [0, 0]
tile_rows = []
tile_columns = []
colors = []
for tile in self.panel_colors:
if tile[0] > row_dim[1]:
row_dim[1] = tile[0]
elif tile[0] < row_dim[0]:
row_dim[0] = tile[0]
if tile[1] > column_dim[1]:
column_dim[1] = tile[1]
elif tile[1] < column_dim[0]:
column_dim[0] = tile[1]
tile_rows.append(tile[0])
tile_columns.append(tile[1])
colors.append(self.panel_colors[tile])
tile_rows = np.array(tile_rows)
tile_columns = np.array(tile_columns)
hull = np.zeros(
(row_dim[1] - row_dim[0] + 1, column_dim[1] - column_dim[0] + 1), dtype=int
)
hull[tile_rows - row_dim[0], tile_columns - column_dim[0]] = colors
# Assumes text color is white on the system
print(
"\n".join(
[
"".join([str(d) for d in row])
.replace("0", " ")
.replace("1", u"\u2588")
for row in hull
]
)
)
def add(prog, modes):
n_params = 3
modes = modes + [0] * (n_params - len(modes))
prog.set(
prog.instr_ptr + 2,
modes[2],
prog.get(prog.instr_ptr, modes[0]) + prog.get(prog.instr_ptr + 1, modes[1]),
)
prog.instr_ptr += n_params
return None
def mult(prog, modes):
n_params = 3
modes = modes + [0] * (n_params - len(modes))
prog.set(
prog.instr_ptr + 2,
modes[2],
prog.get(prog.instr_ptr, modes[0]) * prog.get(prog.instr_ptr + 1, modes[1]),
)
prog.instr_ptr += n_params
def inp(prog, modes):
n_params = 1
modes = modes + [0] * (n_params - len(modes))
prog.set(prog.instr_ptr, modes[0], prog.input)
prog.instr_ptr += n_params
def outp(prog, modes):
n_params = 1
modes = modes + [0] * (n_params - len(modes))
prog.output = prog.get(prog.instr_ptr, modes[0])
prog.instr_ptr += n_params
def jump_if_true(prog, modes):
n_params = 2
modes = modes + [0] * (n_params - len(modes))
if prog.get(prog.instr_ptr, modes[0]) > 0:
prog.instr_ptr = prog.get(prog.instr_ptr + 1, modes[1])
else:
prog.instr_ptr += n_params
def jump_if_false(prog, modes):
n_params = 2
modes = modes + [0] * (n_params - len(modes))
if prog.get(prog.instr_ptr, modes[0]) == 0:
prog.instr_ptr = prog.get(prog.instr_ptr + 1, modes[1])
else:
prog.instr_ptr += n_params
def less_than(prog, modes):
n_params = 3
modes = modes + [0] * (n_params - len(modes))
if prog.get(prog.instr_ptr, modes[0]) < prog.get(prog.instr_ptr + 1, modes[1]):
prog.set(prog.instr_ptr + 2, modes[2], 1)
else:
prog.set(prog.instr_ptr + 2, modes[2], 0)
prog.instr_ptr += n_params
def equals(prog, modes):
n_params = 3
modes = modes + [0] * (n_params - len(modes))
if prog.get(prog.instr_ptr, modes[0]) == prog.get(prog.instr_ptr + 1, modes[1]):
prog.set(prog.instr_ptr + 2, modes[2], 1)
else:
prog.set(prog.instr_ptr + 2, modes[2], 0)
prog.instr_ptr += n_params
def adj_rel_base(prog, modes):
n_params = 1
modes = modes + [0] * (n_params - len(modes))
prog.rel_base += prog.get(prog.instr_ptr, modes[0])
prog.instr_ptr += n_params
operations = {
1: add,
2: mult,
3: inp,
4: outp,
5: jump_if_true,
6: jump_if_false,
7: less_than,
8: equals,
9: adj_rel_base,
}
def run_robot(robot):
end_of_program = False
output_type = "color"
while not end_of_program:
digits = [int(d) for d in str(robot.prog.get(robot.prog.instr_ptr, 1))]
if len(digits) == 1:
op_mode = digits[-1]
else:
op_mode = digits[-2] * 10 + digits[-1]
if op_mode == 99:
return
else:
modes = digits[-3::-1]
robot.prog.instr_ptr += 1
if op_mode == 3:
robot.prog.input = robot.panel_colors[robot.location]
operations[op_mode](robot.prog, modes)
if op_mode == 4:
if output_type == "color":
robot.paint_panel(robot.prog.output)
output_type = "direction"
elif output_type == "direction":
robot.move(robot.prog.output)
output_type = "color"
file = data_folder / "input.txt"
instrs = [int(instr) for instr in file.read_text().split(",")]
def main():
# Part 1
robot = Robot(instrs, 0)
run_robot(robot)
print("Part 1:")
print(f"{robot.get_n_tiles_painted()} tiles were painted at least once.")
print()
# Part 2
robot = Robot(instrs, 1)
run_robot(robot)
print("Part 2:")
print(f"The completed hull looks like:")
robot.print_hull()
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2019/11/sol.py | sol.py | py | 7,361 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
44958344082 | # coding=utf-8
#
# created by kpe on 04.11.2019 at 2:07 PM
#
from __future__ import division, absolute_import, print_function
import unittest
import os
import tempfile
import numpy as np
import tensorflow as tf
import bert
from .test_common import AbstractBertTest, MiniBertFactory
class TestExtendSegmentVocab(AbstractBertTest):
def setUp(self) -> None:
tf.compat.v1.reset_default_graph()
tf.compat.v1.enable_eager_execution()
print("Eager Execution:", tf.executing_eagerly())
def test_extend_pretrained_tokens(self):
model_dir = tempfile.TemporaryDirectory().name
os.makedirs(model_dir)
save_path = MiniBertFactory.create_mini_bert_weights(model_dir)
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file=os.path.join(model_dir, "vocab.txt"), do_lower_case=True)
ckpt_dir = os.path.dirname(save_path)
bert_params = bert.params_from_pretrained_ckpt(ckpt_dir)
self.assertEqual(bert_params.token_type_vocab_size, 2)
bert_params.extra_tokens_vocab_size = 3
l_bert = bert.BertModelLayer.from_params(bert_params)
# we dummy call the layer once in order to instantiate the weights
l_bert([np.array([[1, 1, 0]]), np.array([[1, 0, 0]])], mask=[[True, True, False]])
mismatched = bert.load_stock_weights(l_bert, save_path)
self.assertEqual(0, len(mismatched), "token_type embeddings should have mismatched shape")
l_bert([np.array([[1, -3, 0]]), np.array([[1, 0, 0]])], mask=[[True, True, False]])
| kpe/bert-for-tf2 | tests/test_extend_tokens.py | test_extend_tokens.py | py | 1,556 | python | en | code | 802 | github-code | 1 | [
{
"api_name": "test_common.AbstractBertTest",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.reset_default_graph",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 24,
"usage_type": "attribute"
... |
5153499389 | import matplotlib.pyplot as plt
import numpy as np
fp = open('/home/zt/Maillage/sibson/errors.txt', 'r')
errors = fp.readline().split(' ')[:-1]
fp.close()
errors = np.array(errors).reshape((-1, 3)).T
plt.figure(figsize=(15, 8))
plt.plot(errors[0, :-10], color = 'red')
plt.plot(errors[1, :-10], color = 'green')
plt.plot(errors[2, :-10], color = 'blue')
plt.xlabel('iteration')
plt.ylabel('mae')
plt.grid()
plt.title('Training Error')
plt.show()
| Tong-ZHAO/sibson_interpolation | draw_figure.py | draw_figure.py | py | 448 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
18518116189 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20140919_2201'),
]
operations = [
migrations.AddField(
model_name='todo',
name='category',
field=models.CharField(default=b'Do Now', max_length=32, choices=[(b'Do Now', b'Do Now'), (b'Later', b'later')]),
preserve_default=True,
),
migrations.AddField(
model_name='todo',
name='link',
field=models.URLField(default=None, null=True, blank=True),
preserve_default=True,
),
]
| joshgachnang/djangle | example/todo/migrations/0004_auto_20140920_0016.py | 0004_auto_20140920_0016.py | py | 710 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
39415421623 | import xml.etree.ElementTree as ET
from ..track import Track
def parser(
file_path,
*,
require_title=True,
require_duration=False,
require_year=False,
require_bpm=False,
require_fp=False,
default_artist="",
verbose=False,
):
"""
Traktor supports:
- title
- artist
- year
- playtime
- bpm
"""
if require_fp:
raise NotImplementedError("Traktor parser doesn't support file paths.")
tracks = []
traktor_xml = ""
counter = 0
with open(file_path) as file:
traktor_xml = ET.fromstring(file.read())
entries = traktor_xml.findall("COLLECTION/ENTRY")
for track in entries:
playtime = 0
year = ""
bpm = 0
try:
track_title = track.get("TITLE", "").strip()
track_artist = track.get("ARTIST", "").strip()
if not track_artist:
track_artist = default_artist
meta = track.find("INFO")
if meta is not None:
playtime = int(meta.get("PLAYTIME", ""))
# key = meta.get("KEY", "")
year = meta.get("RELEASE_DATE", "")
if verbose: # pragma: no cover
print(f"found year: {year}, playtime: {playtime}")
tempometa = track.find("TEMPO")
if tempometa is not None:
bpm = int(float(tempometa.get("BPM", 0)))
tracks.append(
Track(
title=track_title,
artist=track_artist,
year=year,
duration=playtime,
bpm=bpm,
)
)
except Exception as e: # pragma: no cover
if verbose:
print(f"Skipping line {counter}", e)
counter += 1
return tracks
| slipmatio/playlistparser | src/playlistparser/parsers/traktor.py | traktor.py | py | 1,862 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "track.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "trac... |
75201417632 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Created on 2017/12/16
__author__ = "Jianguo Jin (jinjianguosky@hotmail.com)"
"""
Description:
"""
import unittest
from selenium import webdriver
class SearchTest(unittest.TestCase):
"""【天猫产品搜索】单元测试版本 """
def test_search_by_name(self):
driver = webdriver.Chrome()
driver.implicitly_wait(20)
driver.get('https://www.tmall.com')
search_field = driver.find_element_by_name('q')
search_field.clear()
search_field.send_keys('小米 Note')
search_field.submit()
products = driver.find_elements_by_xpath('//*[@id="J_ItemList"]/div/div/div/a[1]')
self.assertEqual(158, len(products))
if __name__ == '__main__':
unittest.main()
| skyaiolos/SeleniumWithPython | sec26_unittestWithselenium/search_test.py | search_test.py | py | 803 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "u... |
7728542412 | import xarray as xa
from data import eisc
from data import zc
from data import dcm
test_run = True
input_dir = "/discover/nobackup/projects/eis_freshwater/swang9/OL_1km/OUTPUT.RST.2013"
month = "201303" if test_run else "*"
eisc( cache = "/gpfsm/dnb43/projects/p151/zarr", mode = "eis.freshwater.swang9" )
input = f"{input_dir}/SURFACEMODEL/{month}/LIS_HIST" + "_{time}.d01.nc"
if __name__ == '__main__':
dcm().init_cluster()
dset: xa.Dataset = zc().get_input( input, merge_dim="time" )
time = dset['time'].values
path = dset['_eis_source_path'].values
for idx in range(time.size):
print( f"{time[idx]}: {path[idx]}")
| nasa-nccs-cds/eis_smce | workflows/input_info.py | input_info.py | py | 654 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "data.eisc",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "data.dcm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xarray.Dataset",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "data.zc",
"line_number": 15... |
986799681 | import os
import sys
import shutil
import time
import random
import torch
import logging
from pathlib import Path
import numpy as np
import statistic
from torch import multiprocessing
from torch.nn import functional as F
import nibabel as nib
from tensorboardX import SummaryWriter
from skimage.measure import label
def mkdir(path, level=2, create_self=True):
""" Make directory for this path,
level is how many parent folders should be created.
create_self is whether create path(if it is a file, it should not be created)
e.g. : mkdir('/home/parent1/parent2/folder', level=3, create_self=False),
it will first create parent1, then parent2, then folder.
:param path: string
:param level: int
:param create_self: True or False
:return:
"""
p = Path(path)
if create_self:
paths = [p]
else:
paths = []
level -= 1
while level != 0:
p = p.parent
paths.append(p)
level -= 1
for p in paths[::-1]:
p.mkdir(exist_ok=True)
def seed_reproducer(seed=2022):
"""Reproducer for pytorch experiment.
Parameters
----------
seed: int, optional (default = 2020)
Radnom seed.
Example
-------
seed_reproducer(seed=2020).
"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)#set all gpus seed
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False#if input data type and channels' changes arent' large use it improve train efficient
torch.backends.cudnn.enabled = True
def cutmix_config_log(save_path, tensorboard=False):
writer = SummaryWriter(str(save_path), filename_suffix=time.strftime('_%Y-%m-%d_%H-%M-%S')) if tensorboard else None
save_path = str(Path(save_path) / 'log.txt')
formatter = logging.Formatter('%(levelname)s [%(asctime)s] %(message)s')
logger = logging.getLogger(save_path.split('/')[-2])
logger.setLevel(logging.INFO)
handler = logging.FileHandler(save_path)
handler.setFormatter(formatter)
logger.addHandler(handler)
sh = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(sh)
return logger, writer
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
return self
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
return self
class Measures():
def __init__(self, keys, writer, logger):
self.keys = keys
self.measures = {k: AverageMeter() for k in self.keys}
self.writer = writer
self.logger = logger
def reset(self):
[v.reset() for v in self.measures.values()]
class CutPreMeasures(Measures):
def __init__(self, writer, logger):
keys = ['ce_loss', 'dice_loss', 'loss_all', 'train_dice']
super(CutPreMeasures, self).__init__(keys, writer, logger)
def update(self, out, lab, *args):
args = list(args)
masks = get_mask(out)
train_dice1 = statistic.dice_ratio(masks, lab)
args.append(train_dice1)
dict_variables = dict(zip(self.keys, args))
for k, v in dict_variables.items():
self.measures[k].update(v)
def log(self, epoch, step):
# self.logger.info('epoch : %d, step : %d, train_loss: %.4f, train_dice: %.4f' % (
# epoch, step, self.measures['loss_all'].avg, self.measures['train_dice'].avg))
log_string, params = 'Epoch : {}', []
for k in self.keys:
log_string += ', ' + k + ': {:.4f}'
params.append(self.measures[k].val)
self.logger.info(log_string.format(epoch, *params))
for k, measure in self.measures.items():
k = 'pretrain/' + k
self.writer.add_scalar(k, measure.avg, step)
self.writer.flush()
def get_mask(out, thres=0.5):
probs = F.softmax(out, 1)
masks = (probs >= thres).float()
masks = masks[:, 1, :, :].contiguous()
return masks
def save_net_opt(net, optimizer, path, epoch):
state = {
'net': net.state_dict(),
'opt': optimizer.state_dict(),
'epoch': epoch,
}
torch.save(state, str(path))
def load_net_opt(net, optimizer, path):
state = torch.load(str(path))
net.load_state_dict(state['net'])
optimizer.load_state_dict(state['opt'])
def save_net(net, path):
state = {
'net': net.state_dict(),
}
torch.save(state, str(path))
def load_net(net, path):
state = torch.load(str(path))
net.load_state_dict(state['net'])
def generate_mask(img, patch_size):
batch_l = img.shape[0]
#batch_unlab = unimg.shape[0]
loss_mask = torch.ones(batch_l, 96, 96, 96).cuda()
#loss_mask_unlab = torch.ones(batch_unlab, 96, 96, 96).cuda()
mask = torch.ones(96, 96, 96).cuda()
w = np.random.randint(0, 96 - patch_size)
h = np.random.randint(0, 96 - patch_size)
z = np.random.randint(0, 96 - patch_size)
mask[w:w+patch_size, h:h+patch_size, z:z+patch_size] = 0
loss_mask[:, w:w+patch_size, h:h+patch_size, z:z+patch_size] = 0
#loss_mask_unlab[:, w:w+patch_size, h:h+patch_size, z:z+patch_size] = 0
#cordi = [w, h, z]
return mask.long(), loss_mask.long()
def config_log(save_path, tensorboard=False):
writer = SummaryWriter(str(save_path), filename_suffix=time.strftime('_%Y-%m-%d_%H-%M-%S')) if tensorboard else None
save_path = str(Path(save_path) / 'log.txt')
formatter = logging.Formatter('%(levelname)s [%(asctime)s] %(message)s')
logger = logging.getLogger(save_path.split('/')[-2])
logger.setLevel(logging.INFO)
handler = logging.FileHandler(save_path)
handler.setFormatter(formatter)
logger.addHandler(handler)
sh = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(sh)
return logger, writer
class CutmixFTMeasures(Measures):
def __init__(self, writer, logger):
keys = ['mix_loss_lab', 'mix_loss_unlab', 'loss_all']
super(CutmixFTMeasures, self).__init__(keys, writer, logger)
def update(self, *args):
args = list(args)
# masks = get_mask(out[0])
# train_dice = statistic.dice_ratio(masks, lab)
# args.append(train_dice)
dict_variables = dict(zip(self.keys, args))
for k, v in dict_variables.items():
self.measures[k].update(v)
def log(self, epoch, step):
# self.logger.info('epoch : %d, step : %d, train_loss: %.4f, train_dice: %.4f' % (
# epoch, step, self.measures['loss_all'].avg, self.measures['train_dice'].avg))
log_string, params = 'Epoch : {}', []
for k in self.keys:
log_string += ', ' + k + ': {:.4f}'
params.append(self.measures[k].val)
self.logger.info(log_string.format(epoch, *params))
for k, measure in self.measures.items():
k = 'pretrain/' + k
self.writer.add_scalar(k, measure.avg, step)
self.writer.flush()
def to_cuda(tensors, device=None):
res = []
if isinstance(tensors, (list, tuple)):
for t in tensors:
res.append(to_cuda(t, device))
return res
elif isinstance(tensors, (dict,)):
res = {}
for k, v in tensors.items():
res[k] = to_cuda(v, device)
return res
else:
if isinstance(tensors, torch.Tensor):
if device is None:
return tensors.cuda()
else:
return tensors.to(device)
else:
return tensors
def get_cut_mask(out, thres=0.5, nms=True, connect_mode=1):
probs = F.softmax(out, 1)
masks = (probs >= thres).type(torch.int64)
masks = masks[:, 1, :, :].contiguous()
if nms==True:
masks = LargestCC_pancreas(masks, connect_mode=connect_mode)
return masks
def LargestCC_pancreas(segmentation, connect_mode=1):
N = segmentation.shape[0]
batch_list = []
for n in range(N):
n_prob = segmentation[n].detach().cpu().numpy()
labels = label(n_prob, connectivity=connect_mode)
if labels.max() != 0:
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
else:
largestCC = n_prob
batch_list.append(largestCC)
return torch.Tensor(batch_list).cuda()
@torch.no_grad()
def update_ema_variables(model, ema_model, alpha):
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_((1 - alpha) * param.data) | DeepMed-Lab-ECNU/BCP | code/pancreas/pancreas_utils.py | pancreas_utils.py | py | 9,038 | python | en | code | 84 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"lin... |
2234470277 | from django.urls import path
from django.http import HttpResponse
from django.template import Template, Context
def index(request):
return HttpResponse('''
<h1>Welcome to my homepage</h1>
<a href="/my-favorite-characters">My favorite Game of Thrones characters</a> <br />
<a href="/about-me">About me</a> <br />
''')
def characters(request):
return HttpResponse('''
<h1>My favorite Game of Thrones Characters</h1>
<ul>
<li>Daenerys Targaryen</li>
<li>Jon Snow</li>
<li>Tyrion Lannister</li>
<li>Arya Stark</li>
</ul>
<hr />
<a href="/">Back to home page</a>
''')
def about_me(request):
return HttpResponse('''
<h1>About me</h1>
<p>Just your average django dev</p>
<hr />
<a href="/">Back to home page</a>
''')
urlpatterns = [
path('', index),
path('my-favorite-characters', characters),
path('ABout-/-/meeeee', about_me),
]
# Boilerplate -- Don't worry about understanding anything from here down
def main():
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
settings.configure(
DEBUG=True,
ROOT_URLCONF=sys.modules[__name__],
)
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| errinmarie/FeedBack | Documents/BACKEND/week5/5.3-heroku/activities/1-django/manage.py | manage.py | py | 1,391 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 29,
"usage_type": "call"
},
{
"api_nam... |
34015117419 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# 1. Import the dataset using Pandas from the given URL
url = 'https://raw.githubusercontent.com/SR1608/Datasets/main/covid-data.csv'
df = pd.read_csv(url)
# 2. High Level Data Understanding
# a. Find no. of rows & columns in the dataset
rows, columns = df.shape
print("Number of rows:", rows)
print("Number of columns:", columns)
# b. Data types of columns
print("Data types of columns:")
print(df.dtypes)
# c. Info & describe of data in dataframe
print("Info:")
print(df.info())
print("Describe:")
print(df.describe())
# 3. Low Level Data Understanding
# a. Find count of unique values in the location column
unique_locations = df['location'].nunique()
print("Count of unique values in the location column:", unique_locations)
# b. Find which continent has the maximum frequency using value counts
continent_frequency = df['continent'].value_counts()
max_continent_frequency = continent_frequency.idxmax()
print("Continent with maximum frequency:", max_continent_frequency)
# c. Find maximum & mean value in 'total_cases'
max_total_cases = df['total_cases'].max()
mean_total_cases = df['total_cases'].mean()
print("Maximum value in 'total_cases':", max_total_cases)
print("Mean value in 'total_cases':", mean_total_cases)
# d. Find 25%, 50%, and 75% quartile value in 'total_deaths'
quartiles = df['total_deaths'].quantile([0.25, 0.5, 0.75])
print("25% Quartile value in 'total_deaths':", quartiles[0.25])
print("50% Quartile value in 'total_deaths':", quartiles[0.5])
print("75% Quartile value in 'total_deaths':", quartiles[0.75])
# e. Find which continent has the maximum 'human_development_index'
max_hdi_continent = df.loc[df['human_development_index'].idxmax(), 'continent']
print("Continent with the maximum 'human_development_index':", max_hdi_continent)
# f. Find which continent has the minimum 'gdp_per_capita'
min_gdp_continent = df.loc[df['gdp_per_capita'].idxmin(), 'continent']
print("Continent with the minimum 'gdp_per_capita':", min_gdp_continent)
# 4. Filter the dataframe with only specific columns and update the data frame
columns_to_keep = ['continent', 'location', 'date', 'total_cases', 'total_deaths', 'gdp_per_capita', 'human_development_index']
df = df[columns_to_keep]
# 5. Data Cleaning
# a. Remove all duplicate observations
df = df.drop_duplicates()
# b. Find missing values in all columns
missing_values = df.isnull().sum()
print("Missing values in each column:")
print(missing_values)
# c. Remove all observations where continent column value is missing
df = df.dropna(subset=['continent'])
# d. Fill all missing values with 0
df = df.fillna(0)
# 6. Date time format
# a. Convert date column to datetime format
df['date'] = pd.to_datetime(df['date'])
# b. Create a new column 'month' by extracting month data from the date column
df['month'] = df['date'].dt.month
# 7. Data Aggregation
# a. Find max value in all columns using groupby function on 'continent' column
df_groupby = df.groupby('continent').max().reset_index()
# 8. Feature Engineering
# Create a new feature 'total_deaths_to_total_cases' by ratio of 'total_deaths' column to 'total_cases'
df_groupby['total_deaths_to_total_cases'] = df_groupby['total_deaths'] / df_groupby['total_cases']
# 9. Data Visualization
# a. Perform Univariate analysis on 'gdp_per_capita' column by plotting histogram using seaborn dist plot
sns.displot(df['gdp_per_capita'], kde=False)
plt.title('Histogram of GDP per capita')
plt.show()
# b. Plot a scatter plot of 'total_cases' & 'gdp_per_capita'
plt.scatter(df['total_cases'], df['gdp_per_capita'])
plt.xlabel('Total Cases')
plt.ylabel('GDP per capita')
plt.title('Scatter plot: Total Cases vs GDP per capita')
plt.show()
# c. Plot Pairplot on df_groupby dataset
sns.pairplot(df_groupby)
plt.title('Pairplot of df_groupby dataset')
plt.show()
# d. Plot a bar plot of 'continent' column with 'total_cases'
sns.catplot(x='continent', y='total_cases', kind='bar', data=df_groupby)
plt.title('Bar plot: Continent vs Total Cases')
plt.show()
# 10. Save the df_groupby dataframe in your local drive using pandas.to_csv function
df_groupby.to_csv('df_groupby.csv', index=False)
| Kempraju/santhosh | final project.py | final project.py | py | 4,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "seaborn.displot",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.t... |
2561956080 | from django.db import models
from django.contrib.auth.models import User
from final.settings import AUTH_USER_MODEL as User
# 질문란
class Question(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
subject = models.CharField(max_length=200)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
# 작성한 질문이 제목으로 보여지게 함
def __str__(self):
return self.subject
# 댓글란
class Answer(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True) | zzoall/EmoAI | QnA/models.py | models.py | py | 826 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": ... |
21004978153 | from streamer.database import StreamerDB
from pymongo.errors import WriteError
class StreamerUsers:
def __init__(self):
"""
BookdlUsers is the mongo collection for the documents that holds the details of the users.
Functions:
insert_user: insert new documents, that contains the details of the new users who started using the bot.
get_user: return the document that contains the user_id for the the given telegram user id.
"""
self.user_collection = StreamerDB().db['Users']
async def insert_user(self, user_id: int):
if self.user_collection.count_documents({'user_id': user_id}) > 0:
return
else:
self.user_collection.insert_one({
'user_id': user_id,
'stream_locations': [],
})
async def get_user(self, user_id: int):
return self.user_collection.find_one({'user_id': user_id})
async def add_stream_loc(self, user_id: int, stream_chat_id: int,
name: str, stream_url: str, stream_key: str):
try:
res = self.user_collection.update_one({'user_id': user_id}, {
'$push': {
'stream_locations': {
'name': name,
'stream_url': stream_url,
'stream_key': stream_key,
'stream_chat_id': stream_chat_id
}
}
})
except WriteError as e:
None, e.details
return res, None
async def remove_stream_loc(self,
user_id: int,
stream_chat_id: int = None):
try:
if stream_chat_id:
query = {
'$pull': {
'stream_locations': {
'stream_chat_id': stream_chat_id
}
}
}
else:
query = {'$set': {'stream_locations': []}}
res = self.user_collection.update_one({'user_id': user_id}, query)
except WriteError as e:
None, e.details
return res, None
async def get_stream_loc(self, user_id: int, stream_chat_id: int = None):
res = (await self.get_user(user_id))['stream_locations']
if not stream_chat_id:
return res
else:
return next(
(loc
for loc in res if loc['stream_chat_id'] == stream_chat_id),
None)
| Samfun75/SamfunStreamerBot | streamer/database/users.py | users.py | py | 2,611 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "streamer.database.StreamerDB",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pymongo.errors.WriteError",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pymongo.errors.WriteError",
"line_number": 65,
"usage_type": "name"
}
] |
71530148513 | import random
import requests
import time
class LED_manager:
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(LED_manager, cls).__new__(cls)
return cls.instance
def __init__(self) -> None:
self.leds = []
self.create_leds()
def configure_wled(self,url, settings):
try:
response = requests.post(url, json=settings)
response.raise_for_status()
print("Konfigurace úspěšně odeslána.")
except requests.exceptions.RequestException as e:
print("Chyba při odesílání požadavku:", e)
def create_leds(self,count=15):
for i in range(count):
self.leds.append(LED(i))
def set_color(self,led_id,rgb):
self.leds[led_id].color = rgb
def set_brightness(self,led_id,brightness):
self.leds[led_id].brightness = brightness
def set_state(self,led_id,state):
self.leds[led_id].state = state
def settings(self):
settings = {"seg": []}
for led in self.leds:
settings["seg"].append({
"on":led.state,
"bri":led.brightness,
"col":[[led.color[0],led.color[1],led.color[2],0],[],[]],
"start":led.position,
"stop": led.position+1
})
return settings
def change_state(self,device_id:int, state: bool, rgb: list, brightness=128 ):
wled_url = "http://10.10.3.24/json/state" # Nahraďte <adresa_wled> IP adresou a portem WLED
for i in range(15):
self.set_state(i,state)
self.set_color(i,rgb)
self.set_brightness(i,brightness)
self.configure_wled(wled_url,self.settings())
class LED():
def __init__(self,position) -> None:
self.state = True
self.color = [0,0,255]
self.brightness=128
self.position = position
manager = LED_manager()
"""
time.sleep(1)
manager.change_state(1,False,[255,0,0])
time.sleep(1)
manager.change_state(1,True,[0,255,0],brightness=255)
time.sleep(1)
manager.change_state(1,True,[0,0,255])
manager.set_color(2,[0,0,0])
manager.change_state(2,True,[255,0,0])
manager.change_state(3,True,[0,255,0])
""" | hackaton-ssipf/backend | WLED/main.py | main.py | py | 2,278 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 22,
"usage_type": "attribute"
}
] |
145946204 | import sys
import os
import argparse
from spinalcordtoolbox.utils import Metavar, SmartFormatter, init_sct, display_viewer_syntax, printv
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.qmri.mt import compute_mtr
def get_parser():
parser = argparse.ArgumentParser(
description='Compute magnetization transfer ratio (MTR). Output is given in percentage.',
add_help=None,
formatter_class=SmartFormatter,
prog=os.path.basename(__file__).strip(".py"))
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-mt0',
required=True,
help='Image without MT pulse (MT0)',
metavar=Metavar.float,
)
mandatoryArguments.add_argument(
'-mt1',
required=True,
help='Image with MT pulse (MT1)',
metavar=Metavar.float,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-thr",
type=float,
help="Threshold to clip MTR output values in case of division by small number. This implies that the output image"
"range will be [-thr, +thr]. Default: 100.",
default=100
)
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit"
)
optional.add_argument(
'-v',
type=int,
choices=(0, 1, 2),
help='Verbose: 0 = nothing, 1 = classic, 2 = expended',
default=1
)
optional.add_argument(
'-o',
help='Path to output file.',
metavar=Metavar.str,
default=os.path.join('.', 'mtr.nii.gz')
)
return parser
def main():
# Check input parameters
parser = get_parser()
args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
fname_mtr = args.o
verbose = args.v
# compute MTR
printv('\nCompute MTR...', verbose)
nii_mtr = compute_mtr(nii_mt1=Image(args.mt1), nii_mt0=Image(args.mt0), threshold_mtr=args.thr)
# save MTR file
nii_mtr.save(fname_mtr, dtype='float32')
display_viewer_syntax([args.mt0, args.mt1, fname_mtr])
if __name__ == "__main__":
init_sct()
main()
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITHUB_REPOS/neuropoly@spinalcordtoolbox/scripts/sct_compute_mtr.py | sct_compute_mtr.py | py | 2,243 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "spinalcordtoolbox.utils.SmartFormatter",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 15,
"usage_type": "call"
},
{
"a... |
40420564164 | from pathlib import Path
import string
data_folder = Path(".").resolve()
type_to_priority = dict(zip(string.ascii_letters, range(1, 53)))
def parse_data(data):
rucksacks = [[type_to_priority[l] for l in line] for line in data.split("\n")]
return rucksacks
def find_common_type(rucksack):
comp_size = len(rucksack) // 2
common = list(set.intersection(set(rucksack[:comp_size]), set(rucksack[comp_size:])))[0]
return common
def sum_elf_group_priorities(rucksacks):
n_rucksacks = len(rucksacks)
group_size = 3
badge_priorities = []
for group_id in range(0, n_rucksacks, group_size):
group = [set(rucksack) for rucksack in rucksacks[group_id : group_id + group_size]]
common = list(set.intersection(*group))[0]
badge_priorities.append(common)
return sum(badge_priorities)
def main():
data = data_folder.joinpath("input.txt").read_text()
rucksacks = parse_data(data)
print("Part 1")
priorities_sum = sum([find_common_type(rucksack) for rucksack in rucksacks])
print(f"The sum of priorities of the common item types in both compartments is {priorities_sum}")
print("Part 2")
priorities_sum = sum_elf_group_priorities(rucksacks)
print(f"The sum of priorities of the common item types for each elf group is {priorities_sum}")
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2022/03/sol.py | sol.py | py | 1,365 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 5,
"usage_type": "attribute"
}
] |
31936896058 | from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
def splitWith(rowString, rowIndex):
colValue = list(map(int, rowString.split(',')))
retValue = []
for colIndex,item in enumerate(colValue):
retValue.append((colIndex,(rowIndex,item)))
return retValue
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: matrixVectorMultiply <file>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="matrixVectorMultiply")
inputFile = sc.textFile(sys.argv[1], 1)
rowCount,columnCount = map(int,inputFile.first().split(','))
inputFileWithIndex = inputFile.zipWithIndex()
matrixInput = inputFileWithIndex.filter(lambda x : x[1] > 0 and x[1] <= rowCount)
vectorInput = inputFileWithIndex.filter(lambda x : x[1] > rowCount)
colIndexedVector = vectorInput.map(lambda x : (x[1] - rowCount -1,int(x[0])))
colIndexedMatrix = matrixInput.flatMap(lambda x: splitWith(x[0],x[1]-1))
rowIndexedSum = colIndexedMatrix.join(colIndexedVector).map(lambda x: (x[1][0][0], x[1][0][1] * x[1][1])).reduceByKey(add)
output = rowIndexedSum.sortByKey().map(lambda x: x[1]).collect()
with open('output.txt', 'w') as outputFile:
for item in output:
outputFile.write("{0}\n".format(item))
sc.stop()
| alokparmesh/csep524 | hw4/matrixVectorMultiply.py | matrixVectorMultiply.py | py | 1,345 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"li... |
38959673751 | from PyQt5.QtWidgets import QWidget,QLineEdit,QHBoxLayout, QVBoxLayout
from UI.Components.button_container import ButtonContainer #buttonClickNoise
from UI.KeyboardPage.completer import suggestWords
groupedChars = ['abc | def | ghi',
'jkl | mno | pqr',
'stu | vwx | yz0',
'123 | 456 | 789']
groupedChars2 = [['a | b | c','d | e | f','g | h | i'],
['j | k | l','m | n | o','p | q | r'],
['s | t | u','v | w | x','y | z | 0'],
['1 | 2 | 3','4 | 5 | 6','7 | 8 | 9']]
class KeyboardWidget(QWidget):
def __init__(self, parent):
super().__init__(parent)
layout = QVBoxLayout()
self.setObjectName("Keyboard Page")
layout.addWidget(self.keyboardWidgetUpper(parent))
layout.addWidget(self.keyboardWidgetLower(parent))
self.setLayout(layout)
# #create auto completer
# self.completer = AutoCompleter()
# self.completer.setWidget(self)
def keyboardWidgetUpper(self,parent):
keyboardKeys = QWidget()
layout = QHBoxLayout()
keyboardKeys.setObjectName("Keyboard Widget")
buttons = []
for x in range(4):
button = ButtonContainer(groupedChars[x], freqName=f"Keyboard {x+1}", checkable=False)
buttons.append(button)
layout.addWidget(button)
buttons[0].clicked.connect(lambda: keyboardClick(parent,buttons, buttons[0]))
buttons[1].clicked.connect(lambda: keyboardClick(parent,buttons, buttons[1]))
buttons[2].clicked.connect(lambda: keyboardClick(parent,buttons, buttons[2], prediction=True))
buttons[3].clicked.connect(lambda: keyboardClick(parent,buttons, buttons[3], prediction=True))
keyboardKeys.setLayout(layout)
return keyboardKeys
def keyboardWidgetLower(self,parent):
sidebar = QWidget()
layout = QHBoxLayout()
buttons = []
sidebar.setObjectName("low keys")
toggleBtn = ButtonContainer("Toggle Words", freqName="Word Toggle", checkable=False)
toggleBtn.setObjectName("Toggle")
buttons.append(toggleBtn)
buttons.append(ButtonContainer("Space",freqName="Space",checkable=False))
buttons.append(ButtonContainer("Backspace",freqName="Backspace",checkable=False))
for button in buttons:
layout.addWidget(button)
buttons[0].clicked.connect(lambda: toggle(parent))
buttons[1].clicked.connect(lambda: space(parent))
buttons[2].clicked.connect(lambda: backspace(parent))
sidebar.setLayout(layout)
return sidebar
def clickedGroup(parent, buttons, text, level):
print(level)
if level == 1:
nextGroup = groupedChars2[groupedChars.index(text)]
elif level == 2:
nextGroup = list(text.split(' | '))
print(nextGroup)
print(len(buttons))
print("length")
for x in range(len(buttons) - 1):
print(x)
buttons[x].label.setText(nextGroup[x])
buttons[3].label.setText("Back")
def clickedBack(parent, buttons, text, level):
if level == 2:
for x in range(len(buttons)):
buttons[x].label.setText(groupedChars[x])
elif level == 3:
currentCharGroup = ""
for x in range(len(buttons) - 2):
currentCharGroup += buttons[x].label.text()
currentCharGroup += " | "
currentCharGroup += buttons[2].label.text()
print(currentCharGroup)
for group in groupedChars2:
if currentCharGroup in group:
for x in range(len(buttons)-1):
buttons[x].label.setText(groupedChars2[groupedChars2.index(group)][x])
break
def keyboardClick(parent,buttons,selected,prediction=False):
#buttonClickNoise()
toggleBtn = parent.findChild(ButtonContainer,"Toggle")
btnText = selected.label.text()
if btnText in groupedChars:
clickedGroup(parent, buttons, btnText, 1)
elif any(btnText in subl for subl in groupedChars2):
clickedGroup(parent, buttons, btnText, 2)
elif btnText == "Back":
if any(buttons[0].label.text() in subl for subl in groupedChars2):
clickedBack(parent, buttons, btnText, 2)
else:
clickedBack(parent, buttons, btnText, 3)
elif prediction == True: # Different button functionality when using GTP3 for prediction
writePredictionToInput(parent, buttons, btnText, charMode=toggleBtn.label.text() == "Toggle Words")
suggestWords(parent)
else:
writeToInput(parent, buttons, btnText)
def writeToInput(parent, buttons, text):
inputField = parent.findChild(QLineEdit,"Input")
prevText = inputField.text()
if len(text) == 1:
for x in range(len(buttons)):
buttons[x].label.setText(groupedChars[x])
else:
prevText = " ".join(inputField.text().split()[0:-1]) + " "
text = text + " "
temp = text
if prevText:
temp = prevText + text
inputField.setText(temp)
def writePredictionToInput(parent, buttons, text, charMode):
inputField = parent.findChild(QLineEdit,"Input")
prevText = inputField.text()
# Deletes space between any words and puncuations
if not text[0].isalpha():
print("NOT:" + text + "END")
prevText = prevText.rstrip()
temp = text
# If user is typing individual characters
if charMode == True and len(text) == 1:
for x in range(len(buttons)):
buttons[x].label.setText(groupedChars[x])
else:
temp += ' '
inputField.setText(temp)
def backspace(parent):
#buttonClickNoise()
inputField = parent.findChild(QLineEdit,"Input")
temp = inputField.text()
if len(temp) != 0 :
inputField.setText(temp[:-1])
def space(parent):
#buttonClickNoise()
inputField = parent.findChild(QLineEdit,"Input")
temp = inputField.text() + " "
inputField.setText(temp)
def toggle(parent):
#buttonClickNoise()
toggleBtn = parent.findChild(ButtonContainer,"Toggle")
keyboardWidget = parent.findChild(QWidget,"Keyboard Widget")
keyboardBtns = keyboardWidget.findChildren(ButtonContainer)
if toggleBtn.label.text() == "Toggle Words":
toggleBtn.label.setText("Toggle Characters")
# Request API call for GPT-3
suggestWords(parent)
keyLabels = ['word','word','this is a phrase','this is a phrase']
else:
toggleBtn.label.setText("Toggle Words")
keyLabels = groupedChars
for x in range(len(keyboardBtns)):
keyboardBtns[x].label.setText(keyLabels[x])
| WATOLINK/mind-speech-interface-ssvep | SSVEP-Interface/UI/KeyboardPage/KeyboardWidget.py | KeyboardWidget.py | py | 6,719 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 31,
"usage_type": "call"
},
{
"api_n... |
2733118265 | import io
from pathlib import Path
import flask
import numpy as np
import pandas as pd
from .utils import list_dir
from .model import define_model
class ServeConfig:
OPT_ML_DIR = Path("/opt/ml")
MODELS_DIR = OPT_ML_DIR / "models"
ASSETS_PATH = Path("./assets")
ASSETS_PATH.mkdir(parents=True, exist_ok=True)
AUDIO_PATH = ASSETS_PATH / "audio"
AUDIO_PATH.mkdir(parents=True, exist_ok=True)
META_PATH = ASSETS_PATH / "metadata"
META_PATH.mkdir(parents=True, exist_ok=True)
class ScoringService(object):
"""
A singleton for holding the model. This simply loads the model and holds it.
It has a predict function that does a prediction based on the model and the input data.
"""
models = None # Where we keep the model when it's loaded
@classmethod
def get_model(cls):
"""Get the model object for this instance, loading it if it's not already loaded."""
if cls.models == None:
models = []
model_list = list_dir(ServeConfig.MODELS_DIR)
for name in model_list:
cnn = define_model()
cnn.load_weights(str(ServeConfig.IN_MODELS_DIR / name))
models.append(cnn)
cls.models = models
return cls.models
@classmethod
def predict(cls, input: pd.DataFrame):
"""For the input, do the predictions and return them.
Args:
input (a pandas dataframe): The data on which to do the predictions. There will be
one prediction per row in the dataframe"""
models = cls.get_model()
res = np.zeros(input.shape[0], dtype=float)
input_features = input.values
for model in models:
res += model.predict(input_features)
output = res / len(models)
return output
app = flask.Flask(__name__)
@app.route("/ping", methods=["GET"])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
health = ScoringService.get_model() is not None
status = 200 if health else 404
return flask.Response(response="\n", status=status, mimetype="application/json")
@app.route("/invocations", methods=["POST"])
def invocations():
data = None
# Convert from CSV to pandas
if flask.request.content_type == "text/csv":
data = flask.request.data.decode("utf-8")
s = io.StringIO(data)
data = pd.read_csv(s, header=None)
else:
return flask.Response(
response="This predictor only supports CSV data",
status=415,
mimetype="text/plain",
)
print("Invoked with {} records".format(data.shape[0]))
# Do the prediction
predictions = ScoringService.predict(data)
# Convert from numpy back to CSV
out = io.StringIO()
pd.DataFrame({"results": predictions}).to_csv(
out, header=False, index=False)
result = out.getvalue()
return flask.Response(response=result, status=200, mimetype="text/csv")
| tungbui198/sm-safe-deployment-aishield | container/code/predictor.py | predictor.py | py | 3,086 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utils.list_dir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "model.define_model",
"li... |
29366515053 | import numpy as np
import tools
import matplotlib.pyplot as plt
from importlib import reload
from time import sleep
from tqdm import tqdm
import pickle
import sys
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
during = int(sys.argv[1]) # Define the simulation time
existing = int(sys.argv[2]) # Allow to load the right number of years simulations
N = int(sys.argv[3])
Nyears = int(sys.argv[4])
seuil = int(sys.argv[5])
d = load_obj('all_data_{}part_{}years_{}pc'.format(N, existing*Nyears, seuil))
#------------------------------------------
# Loading parameters
N = int(d['position'].shape[1])
Nyears = d['Nyears']
dt = d['dt']
G = 6.67e-11/dt**2
pc = d['pc'] #m
Msun = 1.989e30 #kg
taille = d['taille']
L = d['L']
m = d['m']
nb = d['nb']
rho = np.zeros(((len(d['time']), nb, nb)))
Potential = tools.Potential(G)
Update = tools.Update(G)
#------------------------------------------
def density(X, t, nb) :
border = np.linspace(-1.5*taille*pc, 1.5*taille*pc, nb)
dens = np.zeros((len(border), len(border)))
for i, j in enumerate(border):
for k, l in enumerate(border):
if i != nb-1 :
if k != nb-1 :
ind = (X[t, :, 0] > border[i]) & (X[t, :, 0] < border[i+1]) & (X[t, :, 1] > border[k]) & (X[t, :, 1] < border[k+1])
dens[i, k] = np.sum(ind)
return dens
def pos_init(N, r_lim):
posi = np.zeros((N, 3))
k=0
while k < N :
X = np.random.uniform(-r_lim, r_lim, (1, 3))
#print(X)
r = np.sqrt(X[0, 0]**2 + X[0, 1]**2 + X[0, 2]**2)
if r < r_lim :
#print(k)
posi[k] = X
k+=1
return posi
old_pos = d['position']
old_vit = d['velocity']
old_rho = d['density']
new_pos = np.zeros(((during*d['position'].shape[0], N, 3)))
new_vit = np.zeros(((during*d['time'].shape[0], N, 3)))
new_rho = np.zeros(((during*len(d['time']), nb, nb)))
new_pos[:d['position'].shape[0]] = old_pos
new_vit[:d['velocity'].shape[0]] = old_vit
new_rho[:d['density'].shape[0]] = old_rho
#new_tab_pos = np.delete(new_tab_pos, len(d['time']), axis = 0)
#new_tab_vit = np.delete(new_tab_vit, len(d['time']), axis = 0)
time_array = np.arange(0, during*len(d['time'])*dt, dt)
#new_time = np.concatenate((d['time'], time_array), axis = 0)
#new_time = np.delete(new_time, len(d['time']))
print("\n\n======================== \n Computing trajectories \n======================== \n \n")
for t in tqdm(range(len(d['time']), len(time_array))) :
#print(t)
if t != len(time_array)-1:
#print('computing new positions!')
for i in range(N):
r, ind, ax, ay, az = Update.update_acc(new_pos, m, t-1, i, seuil)
if t == len(d['time']):
new_pos[t, i, 0], new_vit[t, i, 0] = Update.first_step(new_pos[t-1, i, 0], new_vit[t-1, i, 0], ax, dt)
new_pos[t, i, 1], new_vit[t, i, 1] = Update.first_step(new_pos[t-1, i, 1], new_vit[t-1, i, 1], ay, dt)
new_pos[t, i, 2], new_vit[t, i, 2] = Update.first_step(new_pos[t-1, i, 2], new_vit[t-1, i, 2], az, dt)
else:
new_pos[t, i, 0] = Update.verlet(new_pos[t-1, i, 0], new_pos[t-2, i, 0], ax, dt)
new_pos[t, i, 1] = Update.verlet(new_pos[t-1, i, 1], new_pos[t-2, i, 1], ay, dt)
new_pos[t, i, 2] = Update.verlet(new_pos[t-1, i, 2], new_pos[t-2, i, 2], az, dt)
new_vit[t, i, 0] = (new_pos[t-1, i, 0] - new_pos[t-2, i, 0])/dt
new_vit[t, i, 1] = (new_pos[t-1, i, 1] - new_pos[t-2, i, 1])/dt
new_vit[t, i, 2] = (new_pos[t-1, i, 2] - new_pos[t-2, i, 2])/dt
# Last time step
elif t == len(time_array)-1 :
#print("last")
for i in range(N):
new_pos[t, i, 0] = new_pos[t-1, i, 0]
new_pos[t, i, 1] = new_pos[t-1, i, 1]
new_pos[t, i, 2] = new_pos[t-1, i, 2]
new_vit[t, i, 0] = new_vit[t-1, i, 0]
new_vit[t, i, 1] = new_vit[t-1, i, 1]
new_vit[t, i, 2] = new_vit[t-1, i, 2]
else:
pass
#print('No computing!')
#old_pos = np.concatenate((old_pos, new_pos), axis = 0)
#old_vit = np.concatenate((old_vit, new_vit), axis = 0)
print("\n\n======================== \n Computing density \n======================== \n \n")
for t in tqdm(range(len(d['time']), len(time_array))):
if t >= len(d['time']):
new_rho[t] = density(new_pos, t, nb)
new_dict = {
'position' : new_pos,
'velocity' : new_vit,
'density' : new_rho,
'taille' : taille,
'L' : L,
'pc' : pc,
'Nyears' : Nyears,
'dt' : dt,
'seuil' : seuil,
'nb' : nb,
'time' : time_array,
'm' : m}
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
save_obj(new_dict, 'all_data_{}part_{}years_{}pc'.format(N, during*existing*Nyears, seuil))
| mathias77515/Galaxy | continue_sim.py | continue_sim.py | py | 4,963 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number":... |
41973095065 | from .transformer import Transformer
from musikla.core import Voice
from musikla.core.events import MusicEvent, NoteEvent, RestEvent, ChordEvent
from typing import List, Optional
class SlidingAverage():
def __init__ ( self, capacity : int = 0 ):
self.history : List[float] = []
self.capacity = capacity
self.sum : float = 0
self.count : int = 0
def __iadd__ ( self, value : float ):
if self.count == 0:
self.sum = value
self.count = 1
if self.capacity > 0:
self.history.append( value )
else:
# ( ( self.average / self.count ) + ( value / ( self.count + 1 ) ) )
self.sum += value
if self.capacity == 0 or self.count < self.capacity:
self.count += 1
else:
self.sum -= self.history.pop( 0 )
if self.capacity > 0:
self.history.append( value )
return self
def __int__ ( self ):
if self.count == 0:
return 0
return int( self.sum / self.count )
def __float__ ( self ):
if self.count == 0:
return 0
return float( self.sum / self.count )
class VoiceIdentifierVoice():
def __init__ ( self, transformer : 'VoiceIdentifierTransformer', parent : Voice, index : int, staff : int = None ):
self.transformer : VoiceIdentifierTransformer = transformer
self.parent_name : str = parent.name
self.parent_staff : Optional[int] = staff
self.voice : Voice = parent.clone( parent.name + '/' + str( index ) )
self.index = index
self.average_pitch : SlidingAverage = SlidingAverage()
self.auto_create_rests : bool = True
self.auto_create_end_rests : bool = True
self.last_event : Optional[MusicEvent] = None
@property
def last_timestamp ( self ):
if self.last_event is None:
return 0
return self.last_event.timestamp
@property
def last_end_timestamp ( self ):
if not self.last_event:
return 0
return self.last_event.end_timestamp
def is_busy_at ( self, timestamp : int ) -> bool:
return self.last_end_timestamp > timestamp
def is_connected_at ( self, timestamp : int ) -> bool:
return abs( timestamp - self.last_end_timestamp ) <= 3
def distance ( self, event : NoteEvent ) -> float:
return abs( int( event ) - int( self.average_pitch ) )
def create_rest ( self, target : int, visible : bool = True ) -> RestEvent:
duration = target - self.last_end_timestamp
value = self.voice.from_duration_absolute( duration )
return RestEvent(
timestamp = self.last_end_timestamp,
visible = visible,
duration = duration,
value = value,
voice = self.voice,
staff = self.parent_staff
)
def append ( self, *events : MusicEvent ):
for event in events:
if isinstance( event, NoteEvent ) or isinstance( event, ChordEvent ):
self.average_pitch += int( event ) if isinstance( event, NoteEvent ) \
else sum( event.pitches ) / len( event.pitches )
# TODO Allow setting a minimum rest duration. If the empty space between the events is less than
# said minimum, then no rest is created
if self.auto_create_rests and self.last_end_timestamp < event.timestamp - 1:
self.transformer.add_output( self.create_rest( event.timestamp ) )
self.transformer.add_output( event.clone( voice = self.voice ) )
self.last_event = event
class VoiceIdentifierTransformer(Transformer):
"""
This class receives a flat, ordered stream of musical events and splits them in multiple voices and identifies parallel notes/chords
"""
def __init__ ( self, auto_create_rests : bool = True, auto_create_end_rests : bool = True ):
super().__init__()
self.voices : List[VoiceIdentifierVoice] = []
self.auto_create_rests : bool = auto_create_rests
self.auto_create_end_rests : bool = auto_create_end_rests
def create_voice_for ( self, event : NoteEvent ) -> VoiceIdentifierVoice:
index = max( ( voice.index for voice in self.voices if voice.parent_name == event.voice.name ), default = 0 )
voice = VoiceIdentifierVoice( self, event.voice, index + 1, event.staff )
voice.auto_create_rests = self.auto_create_rests
voice.auto_create_end_rests = self.auto_create_end_rests
self.voices.append( voice )
return voice
def find_voice_for ( self, event, auto_create : bool = True ) -> Optional[VoiceIdentifierVoice]:
best_voice, best_voice_dst = None, None
for voice in self.voices:
if voice.parent_name != event.voice.name or voice.parent_staff != event.staff:
continue
# If the voice already has a sound playing at this timestamp
if voice.is_busy_at( event.timestamp ):
continue
if event.staff is not None:
return voice
voice_dst = voice.distance( event )
if best_voice is None or best_voice_dst > voice_dst:
best_voice = voice
best_voice_dst = voice_dst
if best_voice is None and auto_create:
best_voice = self.create_voice_for( event )
return best_voice
def find_voice_for_rest ( self, event, auto_create : bool = True ) -> VoiceIdentifierVoice:
for voice in self.voices:
if voice.parent_name != event.voice.name or voice.parent_staff != event.staff:
continue
# If the voice already has a sound playing at this timestamp
if voice.is_connected_at( event.timestamp ):
return voice
return self.create_voice_for( event )
def register_rest ( self, event : RestEvent ):
voice = self.find_voice_for_rest( event )
if voice is not None:
voice.append( event )
def register_note ( self, event : NoteEvent ):
voice = self.find_voice_for( event )
if voice is not None:
voice.append( event )
def register_chord ( self, event : ChordEvent ):
voice = self.find_voice_for( event )
if voice is not None:
voice.append( event )
def transform ( self ):
"""
For now, NoteOn/NoteOff events will be ignored.
Use the compose transformer before passing events into this notation builder.
This might change in the future.
"""
while True:
done, event = yield
if done: break
if isinstance( event, NoteEvent ):
self.register_note( event )
elif isinstance( event, RestEvent ):
self.register_rest( event )
elif isinstance( event, ChordEvent ):
self.register_chord( event )
else:
self.add_output( event )
if self.auto_create_end_rests and self.voices:
last_voice_timestamp : int = max( [ voice.last_end_timestamp for voice in self.voices ] )
for voice in self.voices:
if voice.last_end_timestamp < last_voice_timestamp:
self.add_output( voice.create_rest( last_voice_timestamp, visible = False ) )
| pedromsilvapt/miei-dissertation | code/musikla/musikla/core/events/transformers/voice_identifier.py | voice_identifier.py | py | 7,553 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "musikla.core.Voice",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "musikla.core.Voice",
... |
15889711723 | import os
import sys
import importlib
import datetime
import time
import shutil
import numpy as np
import pandas as pd
from pathlib import Path
from evidence.post_processing import postprocess
# UltraNest imports
try:
from ultranest import ReactiveNestedSampler
import ultranest.stepsampler
except ImportError:
raise ImportError("Install UltraNest to use this module.")
# MPI imports
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
except ImportError:
print("Install mpi4py to use with OpenMPI. Continuing with one thread.")
rank = 0
size = 1
def run(model, rundict, priordict, ultrasettings=None):
"""
Runs UltraNest on the chosen data and model. When UltraNest is finished
running it automatically runs a post processing script on the output creating
plots of the posterior and saving basic information like the evidence and
run settings on a text file for future reference.
Parameters
----------
model : object
Custom model class that contains your desired model. This class has to
have a method called log_likelihood(x) which takes a parameter array x
and returns the corresponding log Likelihood. The order of the array x is
given by the order that results from calling list(priordict.keys()) (see
description for priordict for more information.)
Your custom model class should inherit from either RVModel if it's a
radial velocities model or from BaseModel otherwise.
rundict : dict
Dictionary with basic information about the run itself. Keys it should
include are:
target : Name of target or function to analyse
runid : String to identify the specific model or configuration being
used
comment (optional) : Optional comment for a third layer of identification
prior_names (optional) : List with the names and ranges of the priors
nplanets (optional) : Number of planets in the RV model. If not
provided, it will use the number of planets
present in the configfile.
star_params (optional) : Dictionary with the stars parameters like
star_mass (mass of the star in solar masses),
star_radius (radius of star in solar radii),
star_rot (roation period of star). These
can be be ufloats to include the uncertainty.
savedir (optional) : Save directory for the output of UltraNest
order_planets (optionsl) : Boolean indicating if the planets should
be ordered by period. This is to avoid
periods jumping between the different
planets. Default is False.
priordict : dict
Dictionary with the priors to all free parameters. Keys are the names
of the parameters. Values are object with a method .ppf(x) which is the
inverse of the CDF of the chosen probability distribution of the prior.
It takes a uniformly sampled number between 0 and 1 and returns the
physical parameter distributed according to the prior.
The method log_likelihood in your custom model should take the same order
or parameters that results from calling list(priordict.keys()).
ultrasettings : dict, optional
Dictionary containing custom parameters for UltraNest setting like nlive
or nrepeats. If None are given the defualt UltraNest settings are used.
Returns
-------
output : Output object
Object with the all sorts of outputs from the Nested Sampling run.
Very similar in structure to the output from PolyChord but without all
custom PolyChord objects.
Several attributes are added before returning. These are used for the
post processing script.
"""
# Create list of parameter names
parnames = model.parnames
rundict_keys = list(rundict.keys())
# Count "real" number of planets if not provided by rundict or model
nplanets = 0
if 'nplanets' in rundict_keys:
# Check if nplanets is in rundict
nplanets = rundict['nplanets']
elif hasattr(model, 'nplanets'):
# Check if nplanets is in the model
nplanets = model.nplanets
else:
# Count number of planets by checking for periods
for i, par in enumerate(parnames):
if ('planet' in par) and ('period' in par):
nplanets += 1
planets = []
planet_idxs = []
for n in range(1, nplanets+1):
planets.append([])
for i, par in enumerate(parnames):
if f'planet{n}' in par:
planets[n-1].append(i)
if 'period' in par:
planet_idxs.append(i)
# Prepare run
nderived = 0
ndim = len(parnames)
# Function to convert from hypercube to physical parameter space
def prior(hypercube):
"""
Converts a point in the unit hypercube to the physical parameters using
their respective priors.
"""
# Claculate physical parameters with ppf from prior
theta = np.ones_like(hypercube)
for i, x in enumerate(range(ndim)):
param = parnames[i]
theta[x] = priordict[param].ppf(hypercube[i])
return theta
# LogLikelihood
def loglike(x):
"""
Calculates de logarithm of the Likelihood given the parameter vector x.
"""
return model.log_likelihood(x)
# Starting time to identify this specific run
# If it's being run with more than one core the isodate on the first core
# is broadcasted to the rest so they all share the same variable
isodate = datetime.datetime.today().isoformat()
if size > 1:
isodate = comm.bcast(isodate, root=0)
# Create settings object for this run
settings = set_ultrasettings(rundict, ultrasettings, ndim, nderived, isodate, parnames)
# Find and indicate wrapped (circular) parameters like phases
wrapped_params = np.zeros(ndim, dtype=bool)
for i, par in enumerate(parnames):
if 'omega' in par or 'ml0' in par:
wrapped_params[i] = True
# Set up the sampler
sampler = ReactiveNestedSampler(parnames, loglike, prior,
log_dir = settings['log_dir'],
num_test_samples = 100,
wrapped_params = wrapped_params,
num_bootstraps = settings['num_bootstraps'],
#resume=False,
# vectorized = True
)
# Use a slice sampler instead of rejection sampling
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=settings['nsteps'], adaptive_nsteps='move-distance')
# Initialise clocks
ti = time.process_time()
# ----- Run UltraNest ------
sampler.run(min_num_live_points = settings['nlive'],
cluster_num_live_points = int(0.1*settings['nlive']),
dlogz = settings['dlogz'],
frac_remain = settings['frac_remain']
)
# --------------------------
# Stop clocks
tf = time.process_time()
if size > 1:
# Reduce clocks to min and max to get actual wall time
ti = comm.reduce(ti, op=MPI.MIN, root=0)
tf = comm.reduce(tf, op=MPI.MAX, root=0)
# Print General results
sampler.print_results()
# Save results
if rank == 0:
output = Output()
# Assign additional parameters to output
output.runtime = datetime.timedelta(seconds=tf-ti)
output.rundict = rundict.copy()
output.datadict = dict(model.datadict)
output.fixedpardict = dict(model.fixedpardict)
output.model_name = str(model.model_path.stem)
output.nlive = settings['nlive']
output.nrepeats = settings['nsteps']
output.isodate = isodate
output.ncores = size
output.parnames = parnames
output.ndim = ndim
output.sampler = 'UltraNest'
output.base_dir = settings['log_dir']
output.file_root = settings['file_root']
output.logZ = sampler.results['logz']
output.logZerr = sampler.results['logzerr']
output.nlike = sampler.results['ncall']
output.samples = pd.DataFrame(sampler.results['samples'], columns=parnames)
# Add additional information if provided
if 'prior_names' in rundict_keys:
output.priors = rundict['prior_names']
if 'star_params' in rundict_keys:
output.starparams = rundict['star_params']
# Print run time
print(f'\nTotal run time was: {output.runtime}')
# Plot results
sampler.plot()
base_dir_parent = str(Path(output.base_dir).parent.absolute())
runid_dir = Path(output.base_dir).parent.parent.absolute()
# Save model file
shutil.copy(model.model_path, base_dir_parent)
# Save output as pickle file
# print(base_dir_parent)
# run_label = os.path.basename(base_dir_parent)
# print(run_label)
dump2pickle(output, output.file_root+'.pkl')
# Copy post processing script to this run's folder
parent = Path(__file__).parent.parent.absolute()
shutil.copy(os.path.join(parent, 'post_processing.py'), base_dir_parent)
# Copy FIP criterion scirpt to parent of runid
shutil.copy(os.path.join(parent, 'fip_criterion.py'), runid_dir)
# Copy model file
shutil.copy(model.model_path, base_dir_parent)
# Run post processing script
postprocess(base_dir_parent)
return #output
def dump2pickle(output, filename, savedir=None):
"""
Takes the output from UltraNest and saves it as a pickle file.
Parameters
----------
output : Output object
Object file with the output infos from the UltraNest run
filename : str
Name of the saved file
savedir : str, optional
Directory on where to save the pickled file
"""
# Try to import pickle. Raise warning if not installed.
try:
import pickle
except ImportError:
print('Install pickle to save the output. Try running:\n pip install pickle')
return
if savedir is None:
# Save directory in parent of base dir
pickledir = Path(output.base_dir).parent
else:
# Unless specified otherwhise
pickledir = savedir
# Create directory if it doesn't exist.
os.makedirs(pickledir, exist_ok=True)
full_path = os.path.join(pickledir, filename)
with open(full_path, 'wb') as f:
pickle.dump(output, f)
return
def set_ultrasettings(rundict, ultrasettings, ndim, nderived, isodate, parnames):
"""
Sets the correct settings for UltraNest and returns a dictionary with the
settings. It combines the default settings and overwrites the used defined
settings. It cointains the settings for both the initialization of the
samples and the its run function.
Parameters
----------
rundict : dict
Dictionary with information about the run itself.
polysettings : dict
Dictionary with the custom UltraNest settings to be set for this run
ndim : int
Number of free parameters
nderived : int
Number of derived parameters
isodate : datetime
Date stamp to identify the current run
parnames : list
List containing the names of the free parameters
Returns
-------
settings : dict
Object with all the information that UltraNest needs to run nested
sampling on this model.
"""
rundict_keys = list(rundict.keys())
# Define UltraNest settings
# Use the settings provided in ultrasettings otherwise use default
# Definition of default values for the UltraNest settings
default_settings = {'nlive': 25*ndim,
'nsteps': 3*ndim,
'dlogz': 0.5,
'frac_remain': 0.01,
'num_bootstraps': 30
}
# Update default values with settings provided by user
if ultrasettings != None:
if type(ultrasettings) is not dict:
raise TypeError("ultrasettings has to be a dictionary")
else:
default_settings.update(ultrasettings)
# Define fileroot name (identifies this specific run)
rundict['target'] = rundict['target'].replace(' ', '') # Remove any whitespace
rundict['runid'] = rundict['runid'].replace(' ', '') # Remove any whitespace
file_root = rundict['target']+'_'+rundict['runid']
# Add comment if it exists and is not empty
if 'comment' in rundict_keys:
if rundict['comment'] != '':
file_root += '-' + rundict['comment']
# Add number of planets if it exists
if 'nplanets' in rundict_keys:
if rundict['nplanets'] is not None:
file_root += f'_k{rundict["nplanets"]}'
# Check if there is a drift and add "d{n}" to file name
drift_order = 0
for par in parnames:
if 'drift' in par:
if par[6:] in ['lin', 'quad', 'cub', 'quar']:
drift_order += 1
if drift_order > 0:
file_root += f'_d{drift_order}'
# Label the run with nr of planets, live points, nr of cores, sampler and date
file_root += f'_nlive{default_settings["nlive"]}'
file_root += f'_ncores{size}'
file_root += '_ultranest'
file_root += '_'+isodate
# Base directory
# Check if a save directory was provided
if 'save_dir' in rundict_keys:
save_dir = rundict['save_dir']
else:
save_dir = ''
base_dir = os.path.join(save_dir, file_root, 'ultraresults')
# Update settings dictionary with fileroot and basedir
default_settings.update({'log_dir': base_dir, 'file_root': file_root})
return default_settings
class Output:
pass | nicochunger/evidence | evidence/ultranest/__init__.py | __init__.py | py | 14,333 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "numpy.ones_like",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
38488304977 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load the cleaned data
data = pd.read_csv("C:\\Users\\wamm1\\Desktop\\midterm\\clean_pop.csv")
# List of countries for visualization
countries = ['Canada', 'India', 'Kenya', 'Brazil', 'Ukraine']
# Filter data for these countries
selected_countries = data[data['country'].isin(countries)]
# Plot
plt.figure(figsize=(12, 7))
sns.lineplot(data=selected_countries, x='year', y='Infant Mortality Rate', hue='country')
plt.title('Infant Mortality Rate Over the Years')
plt.ylabel('Infant Mortality Rate')
plt.xlabel('Year')
plt.legend(title='Country')
plt.show()
| TRAP33ZOID/Global-Population-Trends | statistical_modelling/infant_mortality_rates.py | infant_mortality_rates.py | py | 636 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "seaborn.lin... |
17418909566 | import collections
import math
from collections import deque
class Graph:
def __init__(self):
self.vertices = set()
# makes the default value for all vertices an empty list
self.edges = collections.defaultdict(list)
self.weights = {}
def add_vertex(self, value):
self.vertices.add(value)
def add_edge(self, from_vertex, to_vertex, distance):
if from_vertex == to_vertex: pass
self.edges[from_vertex].append(to_vertex)
self.weights[(from_vertex, to_vertex)] = distance
def __str__(self):
string = "Vertices: " + str(self.vertices) + "\n"
string += "Edges: " + str(self.edges) + "\n"
string += "Weights: " + str(self.weights)
return string
def dijkstra(graph, start):
# initializations
S = set()
# lenght_of_shortest_path represents the length shortest distance paths from start to edge, for edge in lenght_of_shortest_path.
#initialize every vertex with path of infinity
lenght_of_shortest_path = dict.fromkeys(list(graph.vertices), math.inf)
previous = dict.fromkeys(list(graph.vertices), None)
# then we set the path length of the start vertex to 0
lenght_of_shortest_path[start] = 0
# while there exists a vertex v not in S
while S != graph.vertices:
# let v be the closest vertex that has not been visited,it will be 'start'
v = min((set(lenght_of_shortest_path.keys()) - S), key=lenght_of_shortest_path.get)
# for each neighbor of v not in S
for neighbor in set(graph.edges[v]) - S:
new_path = lenght_of_shortest_path[v] + graph.weights[v, neighbor]
# check new path length with current
if new_path < lenght_of_shortest_path[neighbor]:
# since it's optimal, update the shortest path for neighbor
lenght_of_shortest_path[neighbor] = new_path
# set the previous vertex of neighbor to v
previous[neighbor] = v
S.add(v)
return lenght_of_shortest_path, previous
def shortest_path(graph, start, end):
visited, paths = dijkstra(graph, start)
full_path = deque()
_destination = paths[end]
#while end point is not start point append destination to path
while _destination != start:
full_path.appendleft(_destination)
_destination = paths[_destination]
full_path.appendleft(start)
full_path.append(end)
#return shortest way
return visited[end], list(full_path)
if __name__ == '__main__':
graph = Graph()
with open('input.txt') as read_file:
n = int(read_file.readline())
for i in range(n):
node_list = read_file.readline().split()
graph.add_vertex(node_list[0])
graph.add_vertex(node_list[1])
graph.add_edge(node_list[0], node_list[1], int(node_list[2]))
points = read_file.readline().split()
start_point = points[0]
end_point = points[1]
print( shortest_path(graph, start_point, end_point))
| nzavarinsky/Algorhitms | LABA4(dijkstra-algo)/dijkstra-v3.py | dijkstra-v3.py | py | 3,072 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 64,
"usage_type": "call"
}
] |
6897367210 | import argparse
import ast
import io
import os.path
import shlex
import shutil
import subprocess
import sys
import textwrap
import urllib.parse
def get_url_filename(url, suffix):
filename = urllib.parse.urlparse(url).path
filename = filename.split('/')[-1]
if not filename.endswith(suffix):
raise Exception("%r filename doesn't end with %r suffix; url=%r"
% (filename, suffix, url))
return filename
def add_version(package_name, version):
if version is not None:
return '%s==%s' % (package_name, version)
else:
return package_name
class CI:
def __init__(self):
self.source_dir = os.path.abspath(os.path.dirname(__file__))
self.root_dir = os.path.abspath('work')
self.set_task_dir()
self.python_warnings = []
# self.python_warnings.append('error')
self.python_dev_mode = True
# self.python_bytes_warnings = None
# 2 stands for '-bb'
self.python_bytes_warnings = 0
self.read_package_versions()
if self.python_bytes_warnings and self.python_bytes_warnings > 1:
self.python_warnings.append('error::BytesWarning')
self.env = self.create_environ()
self.python_options = []
if self.python_bytes_warnings:
self.python_options.append('-' + 'b' * self.python_bytes_warnings)
if self.python_dev_mode:
self.python_options.extend(['-X', 'dev'])
for warn in self.python_warnings:
self.python_options.append('-W%s' % warn)
self._python_version = None
self._python_version_str = None
self._orig_python_args = None
self.set_python(sys.executable)
def create_environ(self):
env = dict(os.environ)
for key in list(env):
if key.startswith('PYTHON'):
del env[key]
if self.python_dev_mode:
env['PYTHONDEVMODE'] = '1'
if self.python_warnings:
env['PYTHONWARNINGS'] = ','.join(self.python_warnings)
return env
def log(self, message):
print(message)
def mkdir(self, path):
if os.path.exists(path):
return
self.log("Create directory: %s" % path)
os.mkdir(path)
def set_python(self, python):
python = os.path.abspath(python)
self.python_args = [python] + self.python_options
if self._orig_python_args is None:
self._orig_python_args = self.python_args
def run_command(self, args, quiet=False, **kw):
cmd_str = ' '.join(shlex.quote(arg) for arg in args)
cmd_str = cmd_str.replace("\n", "\\n")
if not quiet:
text = "Run command: %s" % cmd_str
if 'cwd' in kw:
text += ' in %s' % kw['cwd']
self.log(text)
if 'stdin' not in kw:
kw['stdin'] = subprocess.DEVNULL
if 'env' in kw:
raise NotImplementedError("cannot override env")
kw['env'] = self.env
proc = subprocess.run(args, **kw)
exitcode = proc.returncode
if exitcode:
raise Exception("%s command failed with exit code %s"
% (cmd_str, exitcode))
return proc
def run_python(self, args, **kw):
return self.run_command(self.python_args + args, **kw)
def patch(self, filename, directory=os.path.curdir, level=1):
filename = os.path.join(self.source_dir, 'patches', filename)
with open(filename) as fp:
# --force: do not ask any questions
self.run_command(['patch', '-p%s' % level, '--force'],
stdin=fp,
cwd=directory)
def read_package_versions(self):
# parse requirements.txt
self.package_versions = {}
filename = os.path.join(self.source_dir, 'requirements.txt')
with io.open(filename, encoding='utf8') as fp:
for line in fp:
# strip comments
line = line.split('#', maxsplit=1)[0]
line = line.rstrip()
if '==' in line:
name, version = line.split('==', 1)
else:
name = line
version = None
self.package_versions[name] = version
def pip_install_update(self, packages):
if not packages:
raise ValueError
args = ["-m", "pip", "install", "--upgrade"]
for name in packages:
# don't pin the version with --dev
if not self.args.dev:
try:
version = self.package_versions[name]
except KeyError:
raise Exception("unversionned package: %r" % name)
arg = add_version(name, version)
args.append(arg)
return self.run_python(args)
def get_python_version(self):
if self._python_version is not None:
return self._python_version
code = "import sys; print(sys.version_info[:3])"
proc = self.run_python(["-c", code],
stdout=subprocess.PIPE,
universal_newlines=True,
quiet=True)
line = proc.stdout.rstrip()
self._python_version = ast.literal_eval(line)
return self._python_version
def get_python_version_str(self):
# Get the Python version string including the Python implementation
# name. Example: 'cpython-3.8.0b4'
if self._python_version_str is not None:
return self._python_version_str
code = textwrap.dedent("""
import sys
if hasattr(sys, 'implementation'):
name = sys.implementation.name
else:
import platform
name = platform.python_implementation()
name = name.lower()
version = sys.version.split()[0]
print("%s-%s" % (name, version))
""")
proc = self.run_python(["-c", code],
stdout=subprocess.PIPE,
universal_newlines=True,
quiet=True)
self._python_version_str = proc.stdout.rstrip()
return self._python_version_str
def download(self, url, filename):
self.mkdir(self.download_dir)
filename = os.path.basename(filename)
filename = os.path.join(self.download_dir, filename)
# already downloaded: do nothing
if os.path.exists(filename):
return filename
try:
self.run_command(["wget", "-O", filename, url])
except: # noqa
self.unlink(filename)
raise
return filename
# FIXME: validate a checksum?
def create_empty_file(self, filename):
open(filename, 'wb', 0).close()
def unlink(self, filename):
if not os.path.exists(filename):
return
self.log("Remove file: %s" % filename)
os.unlink(filename)
def rmtree(self, dirname):
if not os.path.exists(dirname):
return
self.log("Remove directory: %s" % dirname)
shutil.rmtree(dirname)
def package_directory(self, name):
# Create an absolute path
# Example: "/path/to/numpy-1.17.2"
version = self.package_versions[name]
dirname = "%s-%s" % (name, version)
return os.path.join(self.task_dir, dirname)
def task_directory_name(self, name):
# Example: "cpython-3.8_numpy-1.16.2"
python = self.get_python_version_str()
version = self.package_versions[name]
dirname = "%s_%s-%s" % (python, name, version)
if self.args.dev:
dirname += "-dev"
return dirname
def download_extract_zip(self, url, dirname):
filename = get_url_filename(url, '.zip')
filename = self.download(url, filename)
self.rmtree(dirname)
self.run_command(["unzip", "-d", self.task_dir, filename])
def download_extract_tarball(self, url, dirname):
filename = get_url_filename(url, '.tar.gz')
filename = self.download(url, filename)
self.rmtree(dirname)
self.run_command(["tar", "-xf", filename])
def apply_patches(self, patches, dirname):
for filename in patches:
self.patch(filename, dirname)
def setup_venv(self):
create_venv = not os.path.exists(self.venv_dir)
if create_venv:
try:
self.run_python(["-m", "venv", self.venv_dir])
except: # noqa
self.rmtree(self.venv_dir)
raise
else:
self.log("venv already exists: %s" % self.venv_dir)
self.set_python(os.path.join(self.venv_dir, "bin", "python"))
if create_venv:
self.pip_install_update(["setuptools", "pip"])
def get_tasks(self):
task_dir = os.path.join(self.source_dir, 'task')
names = [name[:-3] for name in os.listdir(task_dir)
if not name.startswith("__init__.") and name.endswith(".py")]
return names
def parse_options(self):
tasks = self.get_tasks()
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('command',
choices='install test clean cleanall'.split())
parser.add_argument('task',
choices=sorted(tasks))
parser.add_argument('--dev', action="store_true")
self.args = parser.parse_args()
def set_task_dir(self, name=None):
if name:
self.task_dir = os.path.join(self.root_dir, name)
else:
self.task_dir = self.root_dir
self.download_dir = os.path.join(self.root_dir, 'download')
self.venv_dir = os.path.join(self.task_dir, 'venv')
def chdir(self, path):
self.log("Change directory: %s" % path)
os.chdir(path)
def setup_env(self):
self.mkdir(self.root_dir)
self.mkdir(self.task_dir)
self.chdir(self.task_dir)
self.setup_venv()
def _get_task(self):
task_name = self.args.task
modname = 'pythonci.task.' + task_name
mod = __import__(modname).task
mod = getattr(mod, task_name)
task_class = mod.Task
return task_class(self)
def patch_tox_basepython(self):
# rely on the current working directory
filename = 'tox.ini'
with open(filename, encoding="utf-8") as fp:
content = fp.read()
# Don't pass arguments, only the executable
# Don't use the Python of the venv, but the original Python
#python = self._orig_python_args[0]
python = self.python_args[0]
line = f"basepython = {python}\n"
testenv = "[testenv]\n"
pos = content.find(testenv)
if pos:
pos += len(testenv)
content = content[:pos] + line + content[pos:]
else:
content = f"{testenv}{line}" + content
with open(filename, "w", encoding="utf-8") as fp:
fp.write(content)
fp.flush()
self.log(f"basepython overriden in {filename}")
def main(self):
self.parse_options()
if self.args.command == 'cleanall':
self.rmtree(self.root_dir)
return
task = self._get_task()
command = self.args.command
if command == 'clean':
self.rmtree(self.task_dir)
else:
if command == 'install':
task.install()
elif command == 'test':
task.run_tests()
else:
raise Exception("unknown command: %r" % command)
def remove_cython_files(self):
# Force to run Cython: regenerate C files generated by Cython
# in the current directory and subdirectories.
cmd = r"rm -f -v $(grep -rl '/\* Generated by Cython') PKG-INFO"
self.run_command([cmd], shell=True)
def install_cython(self):
if self.args.dev:
url = "git+git://github.com/cython/cython.git@0.29x"
self.run_python(["-m", "pip", "install", url])
else:
self.pip_install_update(["Cython"])
| vstinner/pythonci | pythonci/_ci.py | _ci.py | py | 12,377 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "urllib.parse.parse.urlparse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.p... |
74455165473 | from reportlab.pdfgen.canvas import Canvas
from PollyReports import *
from testdata import data
import sqlite3
import os
from tkinter import messagebox
import webbrowser as vb
# ================== Variables=======
Qty_list = []
Price_list = []
Qty_sum=0
Price_sum=0
# ==========SQL connection============
con=sqlite3.connect(r"ims.db")
cur=con.cursor()
try:
cur.execute("Select * from products")
data=cur.fetchall()
for row in data:
Qty_list.append(int(row[5]))
Price_list.append(int(row[6]))
for x in Qty_list:
Qty_sum=Qty_sum + x
for x in Price_list:
Price_sum=Price_sum + x
rpt = Report(data)
# =============== Header ================
rpt.pageheader = Band([
Element((36, 0), ("Times-Bold", 20),
text = "Page Header"),
# category text,supplier text,name text,price text,qty text,Pstatus text
Element((0, 24), ("Helvetica", 12),
text = "Category Name"),
Element((100, 24), ("Helvetica", 12),
text = "Supplier Name"),
Element((200, 24), ("Helvetica", 12),
text = "Product Name"),
Element((300, 24), ("Helvetica", 12),
text = "Price"),
Element((400, 24), ("Helvetica", 12),
text = "Qty",),
Element((500, 24), ("Helvetica", 12),
text = "Total Price"),
Element((600, 24), ("Helvetica", 12),
text = "Status",),
Rule((0, 42), 8*80, thickness = 2),])
rpt.pagefooter = Band([
Element((72*8, 0), ("Times-Bold", 20),
text = "Page Footer", align = "right"),
Element((36, 16), ("Helvetica-Bold", 12),
sysvar = "pagenumber",
format = lambda x: "Page %d" % x),
])
# ==============Data inserted into Page=============
rpt.detailband = Band([
Element((0, 24), ("Helvetica", 12),key=1),
# text = "Category Name"),
Element((100, 24), ("Helvetica", 12),key=2),
# text = "Supplier Name"),
Element((200, 24), ("Helvetica", 12),key=3),
# text = "Product Name"),
Element((300, 24), ("Helvetica", 12),key=4),
# text = "Price"),
Element((400, 24), ("Helvetica", 12), key=5),
# text = "Qty",),
Element((500, 24), ("Helvetica", 12),key=6),
# text = "Total Price"),
Element((600, 24), ("Helvetica", 12),key=7),
# text = "Status",),
])
# ============Footer==========
rpt.reportfooter = Band([
Rule((300, 1), 300),
Element((300, 4), ("Helvetica-Bold", 12),
text = "Total Qty = "),
Element((430, 4), ("Helvetica-Bold", 12),
text = "Grand Total = "),
Element((400,5), ("Helvetica-Bold", 12),
text = f"{str(Qty_sum)}"),
Element((510,5), ("Helvetica-Bold", 12),
text = f"{str(Price_sum)}"),
])
canvas = Canvas("ReportsFolder/sample02.pdf", (72*11, 72*8.5))
rpt.generate(canvas)
canvas.save()
# vb.open_new("sample02.pdf")
# if os.path.exists("sample02.pdf"):
# # os.close("sample02.pdf") # error in this line
# os.remove("bill/sample02.pdf")
# canvas = Canvas("sample02.pdf", (72*11, 72*8.5))
# rpt.generate(canvas)
# canvas.save()
# vb.open_new("sample02.pdf")
# else:
# print("The file does not exist")
# canvas = Canvas("bill/sample02.pdf", (72*11, 72*8.5))
# rpt.generate(canvas)
# canvas.save()
# vb.open_new("bill/sample02.pdf")
except Exception as ex:
messagebox.showerror("Error",f"Error Due to {str(ex)}") | QurbanGujjar/ims | Stock_Report.py | Stock_Report.py | py | 3,580 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "testdata.data",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "testdata.data",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "testdata.data",
"line... |
38639095519 | """
Need to consider the switching on and off of physics schemes with reduced
precision. Show the active/deactive points as a function of precision for
selected physics schemes (Vertical Diffusion/Surface Fluxes/Convection)
"""
import numpy as np
import matplotlib.pyplot as plt
import iris.plot as iplt
from myscripts.statistics import count
from myscripts.models import speedy
from myscripts.projects.ithaca.tendencies import load_tendency
def main():
variable = 'Temperature'
sigma = speedy.sigma_levels[0]
precisions = range(5, 24)
# Create a two by two grid with shared x and y axes along rows and columns
fig, axes = plt.subplots(nrows=2, ncols=2, sharex='col', figsize=[16, 10])
plt.axes(axes[0, 0])
scheme = 'Convection'
make_plot(variable, scheme, sigma, precisions)
plt.legend(title='Number of gridpoints')
plt.axes(axes[0, 1])
scheme = 'Surface Fluxes'
make_plot(variable, scheme, sigma, precisions)
plt.axes(axes[1, 0])
scheme = 'Vertical Diffusion'
make_plot(variable, scheme, sigma, precisions)
plt.axes(axes[1, 1])
variable = 'Specific Humidity'
make_plot(variable, scheme, sigma, precisions)
plt.xlabel('Precision [sbits]')
fig.text(0.01, 0.5, 'Number of Gridpoints',
rotation='vertical', va='center')
plt.show()
return
def make_plot(variable, scheme, sigma, precisions):
rp = load_tendency(
variable=variable, scheme=scheme,
rp_scheme=scheme.replace(' ', '_').replace('-', '_').lower(),
sigma=sigma, precision=precisions)
fp = load_tendency(
variable=variable, scheme=scheme,
rp_scheme='all_parametrizations',
sigma=sigma, precision=52)
plt.title(scheme)
plot_active(rp, fp)
return
def plot_active(rp, fp, **kwargs):
# Count the number of gridboxes with nonzero tendencies
n_active, n_activated, n_deactivated, n_zeros = \
count_active_deactive(rp, fp)
for linestyle, label, data in [('-k', 'Active', n_active),
('--k', 'Activated', n_activated),
(':k', 'Deactivated', n_deactivated)]:
iplt.plot(data, linestyle, label=label, **kwargs)
return
def count_active_deactive(rp, fp):
# Ignore gridboxes where reduced precision has activated or deactivated the
# physics scheme. Will give e=infinity or e=-1 respectively
# Create a dummy cube to use the collapsed function
cube = rp.copy()
# Number of active gridpoints
n_active = count(rp)
# Number of activated gridpoints
activated = np.logical_and(rp.data != 0, fp.data == 0)
cube.data = activated
n_activated = count(cube)
# Number of deactivated gridpoints
deactivated = np.logical_and(rp.data == 0, fp.data != 0)
cube.data = deactivated
n_deactivated = count(cube)
# Number of zeros
n_zeros = count(cube, func=lambda x: x == 0)
return n_active, n_activated, n_deactivated, n_zeros
if __name__ == '__main__':
main()
| leosaffin/scripts | myscripts/projects/ithaca/rp_physics/fig5_physics_activation.py | fig5_physics_activation.py | py | 3,054 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "myscripts.models.speedy.sigma_levels",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "myscripts.models.speedy",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 22,
"usage_type": "call"
... |
28010157790 | """This is the utils module.
Module for storing information to the file between program launches.
"""
from os import path
import json
# '/your_absolute_path_here/notes.json'
JSON_FILE = path.abspath('notes.json')
def save_to_file(notes: list) -> None:
"""
Save the list with notes to a file.
:param notes: list with notes.
:return: None
"""
with open(JSON_FILE, 'w', encoding='utf-8') as outfile:
json.dump(notes,
outfile,
ensure_ascii=False,
indent=4,
default=str,
separators=(',', ': '))
def load_from_json_file() -> list:
"""
Load notes from file if file exists otherwise create empty JSON file.
:return: list with notes
"""
if not path.isfile(JSON_FILE):
save_to_file([])
with open(JSON_FILE, 'r', encoding='utf-8') as f:
notes = json.load(f)
return notes
| allwdesign/notes | utils.py | utils.py | py | 936 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number":... |
40419644184 | from pathlib import Path
import numpy as np
import copy
from itertools import permutations
import re
data_folder = Path(__file__).parent.resolve()
reg = re.compile(
r"(\w+) would (lose|gain) (\d+) happiness units by sitting next to (\w+)."
)
class Table:
def __init__(self, data):
happiness = []
people = set()
for line in data.split("\n"):
m = reg.match(line)
assert m is not None
new_data = list(m.groups())
new_data[2] = int(new_data[2])
if new_data[1] == "lose":
new_data[2] *= -1
new_data.pop(1)
happiness.append(new_data)
for i in [0, 2]:
people.add(new_data[i])
self.n_people = len(people)
self.people = dict(zip(people, np.arange(self.n_people)))
self.happiness = self.compute_happiness_matrix(happiness)
def compute_happiness_matrix(self, happiness):
n = self.n_people
happiness_matrix = np.zeros((n, n), dtype=int)
for h in happiness:
i = self.people[h[0]]
j = self.people[h[2]]
happiness_matrix[i, j] = h[1]
return happiness_matrix
def find_max_happiness(self,include_self=False):
n = self.n_people
if include_self:
n += 1
s = np.arange(n)
max_happiness = 0
for order in permutations(s):
happiness = 0
for j in range(n):
i = (j + 1) % n
if (order[i]==self.n_people) or (order[j]==self.n_people):
continue
happiness += self.happiness[order[j], order[i]]
happiness += self.happiness[order[i], order[j]]
if happiness > max_happiness:
max_happiness = happiness
return max_happiness
def main():
data_folder = Path(".").resolve()
data = data_folder.joinpath("input.txt").read_text()
t = Table(data)
print("Part 1")
print(
"The total change in happiness for the optimal "
+ f"seating arrangement is {t.find_max_happiness()}"
)
print()
print("Part 2")
print(
"The total change in happiness for the optimal "
+ f"seating arrangement including you is {t.find_max_happiness(True)}"
)
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2015/13/sol.py | sol.py | py | 2,381 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 3... |
18556723155 | import torch
import torch.nn
from collections import OrderedDict
from deep_learning.architectures.Resnet3D.model import get_pretrained_resnet
from deep_learning.architectures.ClinicalNet import ClinicalNet
def load_trained_model(model, weights_path):
print('loading pretrained model {}'.format(weights_path))
pretrain = torch.load(weights_path, map_location='cpu')
new_state_dict = OrderedDict()
for k, v in pretrain['state_dict'].items():
name = k[6:] # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
return model
class Ensemble(torch.nn.Module):
def __init__(self, indices: list, fold: int, combined_clinical=False):
super().__init__()
self.imaging_models = torch.nn.ModuleList()
self.clinical_model = None
if combined_clinical:
self.clinical_model = ClinicalNet()
# ["T1_COR_agent_None", "T1_FS_COR_agent_GD", "T2_FS_COR_agent_None", "T1_FS_AX_agent_GD", "T2_FS_AX_agent_None"]
# Fold 0
if fold == 0:
trained_paths = [
# Specify path to weights
"/path/to/weights/T1_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_COR_agent_GD.ckpt",
"/path/to/weights/T2_FS_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_AX_agent_GD.ckpt",
"/path/to/weights/T2_FS_AX_agent_None.ckpt",
# Add path to trained clinical model...
]
# Fold 1
if fold == 1:
trained_paths = [
# Specify path to weights
"/path/to/weights/T1_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_COR_agent_GD.ckpt",
"/path/to/weights/T2_FS_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_AX_agent_GD.ckpt",
"/path/to/weights/T2_FS_AX_agent_None.ckpt",
]
# Fold 2
if fold == 2:
trained_paths = [
# Specify path to weights
"/path/to/weights/T1_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_COR_agent_GD.ckpt",
"/path/to/weights/T2_FS_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_AX_agent_GD.ckpt",
"/path/to/weights/T2_FS_AX_agent_None.ckpt",
]
# Fold 3
if fold == 3:
trained_paths = [
# Specify path to weights
"/path/to/weights/T1_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_COR_agent_GD.ckpt",
"/path/to/weights/T2_FS_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_AX_agent_GD.ckpt",
"/path/to/weights/T2_FS_AX_agent_None.ckpt",
]
# Fold 4
if fold == 4:
trained_paths = [
# Specify path to weights
"/path/to/weights/T1_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_COR_agent_GD.ckpt",
"/path/to/weights/T2_FS_COR_agent_None.ckpt",
"/path/to/weights/T1_FS_AX_agent_GD.ckpt",
"/path/to/weights/T2_FS_AX_agent_None.ckpt",
]
trained_paths = [path for i, path in enumerate(trained_paths) if i in indices]
for path in trained_paths:
model = get_pretrained_resnet(34)
model = load_trained_model(model, path)
# Freeze all models
for parameter in model.parameters():
parameter.requires_grad = False
self.imaging_models.append(model)
def forward(self, x, clinical_data=None):
predictions = []
for x_, model in zip(x, self.imaging_models):
pred = model(x_)
predictions.append(pred)
if self.clinical_model:
predictions.append(self.clinical_model(clinical_data))
ensemble_prediction = torch.mean(torch.stack(predictions, dim=0), 0)
return ensemble_prediction
| lukasfolle/MRI-Classification-RA-PsA | deep_learning/architectures/Resnet3D/ensemble.py | ensemble.py | py | 4,060 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.ModuleList"... |
14287230397 | import logging
from smtplib import SMTPException
from django.contrib import messages
from django.core.mail import EmailMultiAlternatives
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
def try_send_email_add_warning_if_failed(
request: HttpRequest, email: str, subject: str, message: str
):
html = render_to_string(
'email/regular_email.html',
context={
'header': subject,
'text': message
}
)
if not send_html_email(email, subject, html):
messages.warning(request, _(f'Не вдалося надіслати повідомлення на пошту {email}'))
def send_html_email(email: str, subject: str, html: str) -> bool:
msg = EmailMultiAlternatives(
subject=subject,
body=mark_safe(html),
# django requires from_email to be explicitly set to None
# to use settings.DEFAULT_FROM_EMAIL
from_email=None,
to=[email]
)
msg.content_subtype = 'html'
try:
msg.send()
except SMTPException as e:
logging.error(
'failed to send email to %s with error: %s',
email, # pylint: disable=C0209
e
)
return False
return True
| senyehor/energokodros_website | utils/common/email.py | email.py | py | 1,377 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.http.HttpRequest",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 23,
"usage_type": "call"... |
5644139426 | # import matplotlib.pyplot as plt
# import numpy as np
#
# # Sample data for clustered columns
# categories = ['Category A', 'Category B', 'Category C']
# values1 = [15, 25, 30]
# values2 = [10, 20, 25]
#
# # Create an array for the x-axis positions
# x = np.arange(len(categories))
#
# # Set the width of each bar
# bar_width = 0.35
#
# # Create the clustered columns
# plt.bar(x - bar_width/2, values1, bar_width, label='Value 1', color='b', align='center')
# plt.bar(x + bar_width/2, values2, bar_width, label='Value 2', color='g', align='center')
#
# # Add labels and legend
# plt.xlabel('Categories')
# plt.ylabel('Values')
# plt.title('Clustered Column Bar Chart')
# plt.xticks(x, categories)
# plt.legend()
#
# # Show the plot
# plt.tight_layout()
# plt.show()
import matplotlib.pyplot as plt
import numpy as np
# Sample data (replace with your actual data)
time = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
memory_overhead = [100, 105, 98, 110, 115, 112, 120, 130, 128, 132]
# Window size for the moving average (adjust as needed)
window_size = 4
# Calculate the moving average
smoothed_memory_overhead = np.convolve(memory_overhead, np.ones(window_size)/window_size, mode='same')
# Create a figure and axis
fig, ax = plt.subplots()
# Plot the original data
ax.plot(time, memory_overhead, label='Memory Overhead', marker='o')
# Plot the smoothed data
ax.plot(time, smoothed_memory_overhead, label='Smoothed Memory Overhead', linestyle='--')
# Set labels and legend
ax.set_xlabel('Time')
ax.set_ylabel('Memory Overhead')
ax.legend()
# Show the plot
plt.show()
| sonyavalo/statistics_paper_sofya_23 | test.py | test.py | py | 1,565 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.convolve",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
74065867873 | import torch
import numpy as np
import pickle
from tqdm import tqdm
import pandas as pd
import math
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, log_loss
from sentence_transformers import SentenceTransformer, models
from transformers import AutoModel, AutoTokenizer
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '6,5'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
print("Device", device)
bert_model = SentenceTransformer("/mount/arbeitsdaten43/projekte/thesis-dp-1/banerjak/msmarco-MiniLM-L-6-v3/")
if torch.cuda.is_available():
bert_model.cuda()
print("Initializing initial query encoder...")
bert_model.max_seq_length = 1000
torch.manual_seed(10)
torch.autograd.set_detect_anomaly(True)
embed_size = 256
init_query_embed_size = 384
num_docs = 20
batch_size = 10
epochs = 20
max_len = 256
epsilon = 0.0001
tokenizer = AutoTokenizer.from_pretrained('google/electra-small-discriminator', truncation = True)
print("Initializing the model...")
class SBERT(torch.nn.Module):
def __init__(self):
super().__init__()
self.roberta = AutoModel.from_pretrained('google/electra-small-discriminator').train()
self.pooler = torch.nn.AvgPool1d(embed_size)
def forward(self, ids, attention_masks, token_type_ids):
embeds = [self.roberta(id, attention_mask, token_type_id).last_hidden_state for (id, attention_mask, token_type_id) in zip(ids, attention_masks, token_type_ids)]
embeds = [self.pooler(embed) for embed in embeds]
embeds = [torch.squeeze(embed, dim = 2) for embed in embeds]
ln = len(embeds)
embeds = torch.cat(embeds, dim = 0).reshape((ln, num_docs, embed_size))
return embeds
class GCN(torch.nn.Module):
def __init__(self, n_h, input_features):
super().__init__()
W = torch.FloatTensor(input_features, n_h)
self.weights = torch.nn.Parameter(W)
self.relu = torch.nn.ReLU()
stdv = 1. / math.sqrt(self.weights.size(1))
self.weights.data.uniform_(-stdv, stdv)
bs = torch.FloatTensor(n_h)
self.bias = torch.nn.Parameter(bs)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, nfs, A):
out = torch.matmul(nfs, self.weights)
out = torch.matmul(A, out)
out = out + self.bias
out = self.relu(out)
return out
class AmbiguityNetwork(torch.nn.Module):
def __init__(self):
super().__init__()
self.sbert = SBERT()
self.gcn1 = GCN(20, embed_size)
self.gcn2 = GCN(10, 20)
self.gcn3 = GCN(10,10)
self.dp = torch.nn.Dropout(p = 0.5)
self.dense1 = torch.nn.Linear(in_features = num_docs*10 + init_query_embed_size, out_features = 64)
self.dense2 = torch.nn.Linear(in_features = 64, out_features = 4)
self.relu = torch.nn.ReLU()
def forward(self, ids, attention_masks, token_type_ids, init_query):
embeds = self.sbert(ids, attention_masks, token_type_ids)
emb_norm = torch.nn.functional.normalize(embeds, dim = 2)
emb_sum = torch.sum(torch.square(emb_norm), axis = 2)
csml = torch.matmul(emb_norm, emb_norm.transpose(1,2))
A = csml
print(A[0])
S = torch.sum(A, axis = 2)
S = S + epsilon
S = torch.pow(S, -0.5)
#D = torch.diag_embed(torch.sum(A, axis = 2))
#D_inv = torch.linalg.inv(D)
D_ = torch.diag_embed(S)
#print("D_", D_)
#D_ = torch.pow(D_inv, 0.5)
A_ = torch.matmul(A, torch.transpose(D_, 1,2))
A_ = torch.matmul(D_, A_)
A = A_ + torch.eye(embeds.shape[1]).to(device)
out = self.gcn1(embeds, A)
out = self.dp(out)
out = self.gcn2(out, A)
out = self.dp(out)
out = self.gcn3(out, A)
out = self.dp(out)
out = torch.flatten(out, 1)
out = torch.cat((out, init_query.to(device)), dim = 1)
out = self.dense1(out)
out = self.relu(out)
out = self.dense2(out)
return out
class InputDataset(torch.utils.data.Dataset):
def __init__(self, ids, masks, token_type_ids, labels, init_qrs):
self.ids = ids
self.masks = masks
self.token_type_ids = token_type_ids
self.labels = torch.LongTensor(labels.long())
self.init_qrs = init_qrs
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
return self.ids[idx], self.masks[idx], self.token_type_ids[idx], self.labels[idx], self.init_qrs[idx]
def prep_data():
print("Preparing the data...")
requests = pd.read_table('/mount/arbeitsdaten43/projekte/thesis-dp-1/banerjak/data/dev.tsv', sep = '\t', header = 0)
df = requests
f = open('/mount/arbeitsdaten43/projekte/thesis-dp-1/banerjak/doclists_dev.pkl', 'rb')
docs = {}
queries = {}
query_list = []
while True:
try:
data = pickle.load(f)
tid = int(data['topic_id'])
docs[tid] = data['docs']
queries[tid] = data['queries']
except EOFError:
break
f.close()
tids = list(docs.keys())
inputs = []
for tid in tids:
inputs.append(docs[tid][:num_docs])
init_qrs = torch.zeros((len(tids), init_query_embed_size))
ids = torch.LongTensor(len(tids), num_docs, max_len)
masks = torch.LongTensor(len(tids), num_docs, max_len)
token_type_ids = torch.LongTensor(len(tids), num_docs, max_len)
print("Preparing training data...")
for i in tqdm(range(len(tids))):
tid = tids[i]
documents = inputs[i]
tokens = [tokenizer.encode_plus(doc, None, add_special_tokens = True, max_length = max_len, padding = 'max_length', return_token_type_ids = True, truncation = True) for doc in documents]
idlist = [torch.tensor(token['input_ids'], dtype = torch.long) for token in tokens]
torch.cat(idlist, out = ids[i])
masklist = [torch.tensor(token['attention_mask'], dtype = torch.long) for token in tokens]
torch.cat(masklist, out = masks[i])
tokentypelist = [torch.tensor(token['token_type_ids'], dtype = torch.long) for token in tokens]
torch.cat(tokentypelist, out = token_type_ids[i])
init_qrs[i] = torch.Tensor(bert_model.encode(queries[tid]))
return tids, ids, masks, token_type_ids, init_qrs
print("Testing")
def test():
tids, ids, masks, token_type_ids, init_qrs = prep_data()
model = torch.nn.DataParallel(AmbiguityNetwork())
model.load_state_dict(torch.load('roberta_finetune_aug_model_for_test.pt'))
model.cuda(device)
model.eval()
print("Testing...")
ids = ids.to(device)
masks = masks.to(device)
token_type_ids = token_type_ids.to(device)
with torch.no_grad():
val_preds = model(ids, masks, token_type_ids, init_qrs)
m = torch.nn.Softmax()
val_npreds = m(val_preds).cpu().numpy()
class_preds = np.argmax(val_npreds, axis = 1)
class_preds = class_preds + 1
outs = [(tids[i], class_preds[i]) for i in range(len(class_preds))]
outs = np.array(outs)
np.savetxt('preds_model_1_dev.txt', outs, fmt="%s %s")
np.save('classpreds_model_1_dev.npy', class_preds)
test()
| bavik022/thesis-ambiguity_detection | bert_finetune_cuda_test.py | bert_finetune_cuda_test.py | py | 7,314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
31676609044 | import time
import pyautogui
import cv2
import numpy as np
import datetime
import win32api
import win32con
import find_box
import log_message
import role_loc
import role_move
import send_message
map_in_store = cv2.imread('img/map_in_store.png')
open_map_btn = cv2.imread('img/open_map.png')
map_title = cv2.imread('img/map_title.png')
buy_map_tip = cv2.imread('img/buy_map_tip.png')
confirm_btn = cv2.imread('img/confirm_btn.png')
bag_left = cv2.imread('img/bag_left.png')
store_npc = cv2.imread('img/store_npc.png')
open_map_error = cv2.imread('img/open_map_error.png')
home_door_btn = cv2.imread('img/home_door_btn.png')
home_main_btn = cv2.imread('img/home_main_btn.png')
back_origin_btn = cv2.imread('img/back_origin_btn.png')
new_day_tip = cv2.imread('img/new_day_tip.png')
close_btn = cv2.imread('img/close_btn.png')
horse = cv2.imread('img/horse.png')
isdead = cv2.imread('img/isdead.png')
# 点开藏宝地图模式位置
open_box_map_pos = [537, 51]
# 要丢掉的首张图位值
first_map_pos = [1750, 350]
# 确定按钮位置
confirm_pos = [880, 450]
# 打开藏宝图等待时间(当前等级)
wait_open_time1 = 131
# 打开藏宝图等待时间(50张)
wait_open_time2 = 163
# 藏宝图探查吟唱时间减少
decreased_percent = 0.4
# 开始挖宝的坐标方向和大小
begin_find_loc_1 = [-825, -540]
begin_find_direct_1 = 0.6
find_area_1 = [65, 42]
# 挖宝区域大小
begin_find_loc_2 = [-975, -525]
begin_find_direct_2 = -0.5
find_area_2 = [61, 34]
# 背包格子大小
bag_item_size = 36
bag_width = 12
# 家园走到门口的位移距离
home_to_door = [-4, -1]
# 复活至龙星阵的具体坐标
resurrect_loc = [970,800]
def match_img(template):
image = cv2.cvtColor(np.asarray(pyautogui.screenshot()), cv2.COLOR_RGB2BGR)
match_res = cv2.matchTemplate(image, template, 3)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match_res)
return max_val, max_loc
def clear_map():
pyautogui.press('m')
time.sleep(0.5)
max_val, max_loc = match_img(map_title)
#print(max_val)
if max_val < 0.95:
pyautogui.moveTo(open_box_map_pos[0], open_box_map_pos[1])
pyautogui.leftClick()
#time.sleep(0.1)
count = role_loc.get_clear_map_count()
print('丢图数量 = '+str(count))
for i in range(0, count):
pyautogui.moveTo(first_map_pos[0], first_map_pos[1])
pyautogui.rightClick()
pyautogui.moveTo(first_map_pos[0] + 50, first_map_pos[1] + 30)
pyautogui.leftClick()
pyautogui.press('enter')
# pyautogui.moveTo(confirm_pos[0], confirm_pos[1])
# pyautogui.leftClick()
pyautogui.moveTo(first_map_pos[0] - 50, first_map_pos[1] - 50)
pyautogui.leftClick()
pyautogui.press('m')
max_val, max_loc = match_img(isdead)
if max_val > 0.95:
time.sleep(15)
print("isdead的max_val = " + str(max_val))
resurrect()
return count
def buy_map():
max_val = 0
for i in range(0, 10):
time.sleep(0.2)
max_val, max_loc = match_img(store_npc)
# print(max_val)
if max_val > 0.9:
break
role_move.move_to([-803, -721])
role_move.move_to([-803, -716], None, 1)
if max_val <= 0.9:
send_message_with_loc("Find Map NPC Error")
return False
pyautogui.press('f')
time.sleep(0.5)
max_val, max_loc = match_img(map_in_store)
if max_val <= 0.9:
send_message_with_loc("Open Map Store Error")
return False
clear_bag()
pyautogui.moveTo(max_loc[0] + 24, max_loc[1] + 24)
pyautogui.keyDown('shift')
pyautogui.rightClick()
pyautogui.keyUp('shift')
max_val, max_loc = match_img(buy_map_tip)
if max_val > 0.9:
pyautogui.press('4')
pyautogui.press('1')
pyautogui.press('enter')
pyautogui.click(x=None, y=None, clicks=9, interval=0.001, button='right', duration=0.0, tween=pyautogui.linear)
# max_val, max_loc = match_img(confirm_btn)
# if max_val > 0.9:
# pyautogui.moveTo(max_loc[0] + 50, max_loc[1] + 15)
# pyautogui.leftClick()
time.sleep(0.5)
return True
def open_map():
role_move.move_to([-802, -703])
role_move.move_to([-791, -702])
role_move.move_to([-777, -701])
role_move.move_to([-756, -703], None, 0, 5)
max_val, max_loc = match_img(open_map_btn)
pyautogui.moveTo(max_loc[0] + 24, max_loc[1] + 24)
down_horse()
pyautogui.leftClick()
pyautogui.sleep(1)
max_val, max_loc = match_img(open_map_error)
open_map_count = 0
if max_val < 0.9:
pyautogui.press('m')
time.sleep(0.5)
max_val, max_loc = match_img(map_title)
#print(max_val)
if max_val < 0.95:
pyautogui.moveTo(open_box_map_pos[0], open_box_map_pos[1])
pyautogui.leftClick()
open_map_count = role_loc.get_clear_map_count()
pyautogui.press('m')
print('开图数量 = '+str(open_map_count))
wait_open_time = 5*(1-decreased_percent)*open_map_count-1
#print('开图时间 = '+str(wait_open_time))
pyautogui.sleep(wait_open_time)
pyautogui.moveRel(0, -100)
up_horse()
return open_map_count
else:
close_dialog()
up_horse()
send_message_with_loc("Open Map Error")
return False
def down_horse():
if not is_on_horse():
return
pyautogui.press('t')
pyautogui.press('shift')
pyautogui.sleep(3)
def up_horse():
if is_on_horse():
return
pyautogui.press('t')
pyautogui.sleep(3)
def close_dialog():
max_val, max_loc = match_img(close_btn)
if max_val > 0.9:
pyautogui.moveTo(max_loc[0] + 6, max_loc[1] + 6)
pyautogui.leftClick()
def prepare_to_find():
role_move.move_to([-779, -701])
role_move.move_to([-793, -703])
role_move.move_to([-793, -677])
role_move.move_to([-795, -666])
role_move.move_to([-795, -640])
role_move.move_to(begin_find_loc_1, None, 1, 5)
role_move.turn_to(begin_find_direct_1)
loc = role_loc.get_current_loc()
if loc is not None and abs(loc[0] - begin_find_loc_1[0]) < 5 and abs(loc[1] - begin_find_loc_1[1]) < 5:
return True
else:
send_message_with_loc("Go to Find Box Error")
return False
def find_boxs():
count = 0
role_move.move_to(begin_find_loc_1, None, 1, 5)
role_move.turn_to(begin_find_direct_1)
count += role_move.move_map(find_area_1[0], find_area_1[1], find_box.find_box_under_footer)
role_move.move_to(begin_find_loc_2, None, 1, 5)
role_move.turn_to(begin_find_direct_2)
count += role_move.move_map(find_area_2[0], find_area_2[1], find_box.find_box_under_footer)
role_move.move_to([-850, -560], None, 5, 3)
print("开盒次数" + str(count))
if count <= 0:
reset_keys()
send_message_with_loc("Find No Box")
max_val, max_loc = match_img(isdead)
if max_val > 0.95:
time.sleep(15)
print("isdead的max_val = " + str(max_val))
resurrect()
return count
def back_to_store():
role_move.move_to([-795, -644])
role_move.move_to([-795, -667])
role_move.move_to([-795, -702])
role_move.move_to([-802, -702])
role_move.move_to([-803, -721])
role_move.move_to([-803, -716], None, 0, 5)
loc = role_loc.get_current_loc()
if loc is not None and abs(-803 - loc[0]) < 5 and abs(-716 - loc[1]) < 5:
return True
else:
send_message_with_loc("Back To Store Error")
return False
def clear_bag():
max_val, max_loc = match_img(bag_left)
if max_val < 0.9:
return
first_loc = [max_loc[0] + 100, max_loc[1] - 90]
pyautogui.keyDown('shift')
for j in range(0, 4):
for i in range(0, bag_width):
pyautogui.moveTo(first_loc[0] + i * bag_item_size, first_loc[1] + j * bag_item_size)
pyautogui.rightClick()
# for i in range(0, 10):
# pyautogui.moveTo(first_loc[0] + i * bag_item_size, first_loc[1] + 3 * bag_item_size + 25)
# pyautogui.rightClick()
pyautogui.keyUp('shift')
def reset_to_store():
current_loc = role_loc.get_current_loc()
if current_loc is None:
return False
# 处理在商店附近情况
if abs(-803 - current_loc[0]) < 5 and abs(-716 - current_loc[1]) < 5:
role_move.move_to([-803, -721])
down_horse()
max_val, max_loc = match_img(home_door_btn)
if max_val < 0.9:
up_horse()
return False
pyautogui.moveTo(max_loc[0] + 24, max_loc[1] + 24)
pyautogui.leftClick()
pyautogui.sleep(5)
pyautogui.press('f')
pyautogui.moveRel(-100, -100)
time.sleep(1)
max_val, max_loc = match_img(home_main_btn)
if max_val < 0.9:
up_horse()
return False
pyautogui.moveTo(max_loc[0] + 30, max_loc[1] + 15)
pyautogui.leftClick()
pyautogui.sleep(30)
role_move.move(home_to_door[0], home_to_door[1])
pyautogui.press('f')
time.sleep(1)
max_val, max_loc = match_img(back_origin_btn)
if max_val < 0.9:
up_horse()
return False
pyautogui.moveTo(max_loc[0] + 30, max_loc[1] + 15)
pyautogui.leftClick()
pyautogui.sleep(60)
reset_visual_field()
loc = role_loc.get_current_loc()
up_horse()
if loc is not None and abs(-803 - loc[0]) < 5 and abs(-715 - loc[1]) < 5:
return True
return False
def reset_keys():
pyautogui.keyDown('shift')
pyautogui.keyUp('shift')
pyautogui.sleep(2)
pyautogui.moveTo(find_box.footer_pos[0], find_box.footer_pos[1])
pyautogui.sleep(2)
pyautogui.mouseDown(button='left')
pyautogui.sleep(2)
pyautogui.mouseUp(button='left')
pyautogui.sleep(2)
pyautogui.mouseDown(button='right')
pyautogui.sleep(2)
pyautogui.mouseUp(button='right')
pyautogui.sleep(2)
def try_reset():
if not deal_new_day():
return
count = 0
while not reset_to_store():
count += 1
send_message_with_loc("Try reset count " + str(count))
role_move.move(-10, -10)
time.sleep(600)
if not deal_new_day():
return
def deal_new_day():
current_time = datetime.datetime.now()
if 10 > current_time.hour > 5 and current_time.isoweekday() == 4:
# 关服了
return False
max_val, max_loc = match_img(new_day_tip)
if max_val > 0.9:
close_dialog()
return True
def is_on_horse():
max_val, max_loc = match_img(horse)
return max_val > 0.9
def reset_visual_field():
reset_look_down()
x, y = 1000, 700
win32api.SetCursorPos((x, y))
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y)
time.sleep(0.1)
for i in range(0, 3):
win32api.SetCursorPos((x, y))
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, 0, -100)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
time.sleep(0.1)
def reset_look_down():
x, y = 1000, 120
win32api.SetCursorPos((x, y))
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y)
time.sleep(0.1)
for i in range(0, 9):
win32api.SetCursorPos((x, y))
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, 0, 100)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
time.sleep(0.1)
def send_message_with_loc(message):
loc = role_loc.get_current_loc()
direct = role_loc.get_current_direction()
send_message.send_message(message + " " + str(loc) + " " + str(direct))
def print_log_with_loc(message):
loc = role_loc.get_current_loc()
direct = role_loc.get_current_direction()
log_message.log_error(message + " " + str(loc) + " " + str(direct))
def send_message_briefing(message:list,index = 0):
send_text = '简报第{0}次:'.format(index)+'\n'
for index,key in enumerate(message):
send_text = send_text+str(key)+':'+str(message[key])+'\n'
send_message.send_message(send_text)
def resurrect(count = 3):
for i in range(0,count):
pyautogui.moveTo(resurrect_loc[0],resurrect_loc[1])
pyautogui.leftClick()
time.sleep(3)
max_val, max_loc = match_img(isdead)
if max_val < 0.95:
print("复活后的max_val = " + str(max_val))
break
reset_to_store()
send_message.send_message()("try to resurrect" + i + "次") | fushenghuanyu/GJ | role_action.py | role_action.py | py | 12,490 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 18,
... |
16675614140 | import json
from mitmproxy import ctx
from habdel_mongo import mongo_info
def response(flow):
if "https://aweme.snssdk.com/aweme/v2/feed" in flow.request.url: #原视频链接获取
info = ctx.log.info
info(flow.response.text)
# print(flow.response.text,"888888888"*10)
# print(flow.request.url)
if "https://aweme.snssdk.com/aweme/v1/user/follower/list" in flow.request.url: #粉丝信息获取
# with open('user.txt',"w")as f:
# f.write(flow.request.text)
for user in json.loads(flow.response.text)['followers']:
douyin_info={}
douyin_info['uid']=user['uid']
douyin_info['sec_uid'] = user['sec_uid']
douyin_info['douyin_id'] = user['short_id']
douyin_info['nickname'] = user['nickname']
douyin_info['total_favorited'] = user['total_favorited']
douyin_info['follower_count'] = user['follower_count']
douyin_info['following_count'] = user['following_count']
print(douyin_info)
mongo_info.insert_item(douyin_info) | luopeixiong/python-test | 爬虫项目/app爬虫/抖音抓取/decode_douyin.py | decode_douyin.py | py | 1,101 | python | en | code | null | github-code | 1 | [
{
"api_name": "mitmproxy.ctx.log",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mitmproxy.ctx",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "habdel_mongo.mongo_info... |
17397569699 | import pygame
from time import *
pygame.init()
win=pygame.display.set_mode((300,300))
sleep(5)
pygame.mixer.music.load("1.mp3")
pygame.mixer.music.play(0,0,800)
run=True
click=[0,0,0]
def check_events():
global run,keys,mouse_pos,mouse_down,click
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
keys=pygame.key.get_pressed()
if keys[27]: run=False
mouse_pos=pygame.mouse.get_pos()
mouse_down=pygame.mouse.get_pressed()
for i in range(3):
if mouse_down[i]:
click[i]+=1
else:
click[i]=0
clicks=[]
start_time=time()
while run:
check_events()
if click[0]==1:
clicks.append(time()-start_time)
| makazis/School-Project-23.04.2023 | Music/Beat_Syncer.py | Beat_Syncer.py | py | 754 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.mus... |
9512087556 | from pathlib import Path
import pendulum
from airflow.models import Variable
from docker.types import Mount
class AppConst:
DOCKER_USER = Variable.get("DOCKER_USER", "thangphan")
PROJECT = "real_estate"
IMAGE_NAME = "model_serving"
TAG = "latest"
class AppPath:
ROOT_DIR = Path(Variable.get("ROOT_DIR"))
CODE_DIR = Path(ROOT_DIR, "code")
MODEL_SERVING_DIR = Path(CODE_DIR, "model_serving")
FEATURE_REPO = Path(ROOT_DIR, "feature_repo")
DATA_SOURCE_DIR = Path(ROOT_DIR, "data_sources")
BATCH_DATA_DIR = Path(MODEL_SERVING_DIR, "data")
ARTIFACTS_DIR = Path(MODEL_SERVING_DIR, "artifacts")
class DefaultConfig:
DEFAULT_DAG_ARGS = {
"owner": "thangphan",
"retries": 0,
"retry_deplay": pendulum.duration(seconds=20)
}
DEFAULT_DOCKER_OPERATORS_ARGS = {
"image": f"{AppConst.DOCKER_USER}/{AppConst.PROJECT}/{AppConst.IMAGE_NAME}:{AppConst.TAG}",
"api_version": "auto",
"auto_remove": True,
"mounts": [
# Feature repo
Mount(
source=AppPath.FEATURE_REPO.absolute().as_posix(),
target="/real_estate/feature_repo",
type="bind",
),
# Data source
Mount(
source=AppPath.DATA_SOURCE_DIR.absolute().as_posix(),
target="/real_estate/data_sources",
type="bind"
),
# Artifacts
Mount(
source=AppPath.ARTIFACTS_DIR.absolute().as_posix(),
target="/real_estate/code/model_serving/artifacts",
type="bind"
),
# Batch data
Mount(
source=AppPath.BATCH_DATA_DIR.absolute().as_posix(),
target="/real_estate/code/model_serving/data",
type="bind"
)
],
"mount_tmp_dir": False
} | Thangphan0102/RealEstateProject | code/model_serving/dags/utils.py | utils.py | py | 1,943 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "airflow.models.Variable.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "airflow.models.Variable",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "airflo... |
73033755553 | # -*- coding: utf-8 -*-
'''
Beacon to monitor disk usage.
.. versionadded:: 2015.5.0
'''
# Import Python libs
from __future__ import absolute_import
import logging
import psutil
import re
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
__virtualname__ = 'diskusage'
def __virtual__():
if salt.utils.is_windows():
return False
else:
return __virtualname__
def beacon(config):
'''
Monitor the disk usage of the minion
Specify thresholds for each disk and only emit a beacon if any of them are
exceeded.
code_block:: yaml
beacons:
diskusage:
- /: 63%
- /mnt/nfs: 50%
'''
ret = []
for diskusage in config:
mount = diskusage.keys()[0]
try:
_current_usage = psutil.disk_usage(mount)
except OSError:
# Ensure a valid mount point
log.error('{0} is not a valid mount point, skipping.'.format(mount))
continue
current_usage = _current_usage.percent
monitor_usage = diskusage[mount]
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'diskusage': current_usage, 'mount': mount})
return ret
| shineforever/ops | salt/salt/beacons/diskusage.py | diskusage.py | py | 1,359 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "salt.utils.utils.is_windows",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "salt.utils.utils",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "s... |
17094280444 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
('gas', '0005_auto_20150827_0116'),
]
operations = [
migrations.AddField(
model_name='gas',
name='other_percentage',
field=models.DecimalField(max_digits=5, default=Decimal('0.00'), decimal_places=1, verbose_name='Percentage Other'),
),
]
| stpyang/downtowndivers | gas/migrations/0006_gas_other_percentage.py | 0006_gas_other_percentage.py | py | 509 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 15,
"usage_type": "call"
},
{
... |
11078081992 | import os
from datetime import datetime
def setnum():
pass
# 图片命名格式化
if __name__ == '__main__':
time1 = datetime.now()
PATH = "Datalast_CUT"
SAVE_PATH = "Datalast"
if not os.path.isdir(SAVE_PATH):
os.mkdir(SAVE_PATH)
NUM_init = 9135 #自定义编号
num = NUM_init
for (dirpath, dirnames, filenames) in os.walk(PATH):
dirpath = str(dirpath).replace("\\", "/")
for file in filenames:
reoldfile = dirpath + '/' + file
# num = int(str(file).strip('.jpg'))
# print(num)
newfile = SAVE_PATH + "/fish" + str(num).zfill(6) + ".jpg"
# newfile = str(SAVE_PATH+'/'+"fish","%4d"%num,".jpg")
print(newfile)
os.rename(reoldfile, newfile)
num += 1
time2 = datetime.now() - time1
print(time2)
| ChangMQ267/VOC2COCO | PhotoSetNum.py | PhotoSetNum.py | py | 858 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
8669162789 | #!/usr/bin/env python3
# coding:utf8
import sys
import re
import requests
from bs4 import BeautifulSoup
import json
import random
from django.http import HttpResponse
from requests.packages.urllib3.exceptions import InsecureRequestWarning
reload(sys)
sys.setdefaultencoding('utf8')
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
agents = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
]
headers = {
'User-Agent': random.choice(agents),
'Cookie': 'HJ_UID=9bc75b02-cb96-8216-4699-5380ad3d24f8; TRACKSITEMAP=3%2C; _REF=; _SREF_3=; HJ_SID=9f518693-5897-82ba-c5db-03be24a4911e; HJ_CST=0; HJ_CMATCH=1; HJ_SSID_3=8a5cf2d4-5acf-9e49-cd09-73a1d06adc74; HJ_CSST_3=1; _SREF_3='
}
def searchWord(request, keyword):
wordURL = 'https://dict.hjenglish.com/jp/jc/' + keyword
wordPage = requests.get(wordURL, headers=headers, verify=False)
htmlData = wordPage.text.encode("utf8")
soup = BeautifulSoup(htmlData.decode("utf-8", "ignore"), "html.parser")
try:
wordList = []
for word in soup.find_all("div", class_="word-details-pane"):
try:
chinese = word.find("h2").text.encode("utf8")
except Exception as e:
print(e)
chinese = e
# print(chinese)
pronounceArray = word.findAll("span")
try:
realKana = pronounceArray[0].text.replace("[", "").replace("]", "").encode("utf8")
except Exception as e:
print(e)
realKana = e
# print(realKana)
try:
toneStr = pronounceArray[2].text
if toneStr == '':
tune = []
else:
tune = toneStr.split(unicode('或', "utf8"))
except Exception as e:
print(e)
tune = []
# print(tune)
simpleArray = word.find_all("div", class_="simple")
meanings = []
nominal = ""
for simples in simpleArray:
try:
nominal = simples.find("h2").text.replace("【", "").replace("】", "").replace(" ", "").replace(
unicode("词", "utf-8"), "").encode("utf8")
except Exception as e:
print(e)
nominal = ""
pattern = {
unicode('形容'): unicode('形'),
unicode('形容动'): unicode('形動'),
unicode('连'): unicode('連'),
unicode('动'): unicode('動'),
}
for x in pattern:
nominal = re.sub(x, pattern[x], nominal)
# print(nominal)
try:
meaningUl = simples.find("ul")
for meaningLi in meaningUl.findAll("li"):
meanings.append(re.sub(r'[0-9]\.', "", meaningLi.text.replace(" ", "").replace("\n", "")))
except Exception as e:
print(e)
# print(meanings)
wordList.append(
{
"kana": realKana,
"chinese": chinese,
"meanings": [x.encode("utf8") for x in meanings],
"nominal": nominal,
"tune": [x.encode("utf8") for x in tune],
}
)
# print(wordList)
print(json.dumps(wordList, ensure_ascii=False))
return HttpResponse(json.dumps(wordList, ensure_ascii=False))
except Exception as e:
return HttpResponse(e)
| songkuixi/JapaneseWordBook-Server | search/WordSearchNew.py | WordSearchNew.py | py | 5,204 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.exceptions.InsecureRequestWarning",
"line_number"... |
18216174452 | import os
import tkinter as tk
import sys
from tkinter import filedialog
from tkinter import ttk
from openslide import open_slide
from openslide.deepzoom import DeepZoomGenerator
from modules.recorder import Recorder
class FileSelection:
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
# select file button
select_button = tk.Button(self.frame, text='Select File')
select_button.pack(fill=tk.X)
select_button.bind('<Button-1>', self.file_selection)
self.frame.pack(padx=50, pady=50)
if len(sys.argv) >= 2:
root.tiles_directory = sys.argv[1]
else:
root.tiles_directory = None
def file_selection(self, event):
# open the file selection menu and get the file path
root.file_path = filedialog.askopenfilename()
# separate the file name from the full path
root.file_name = os.path.basename(root.file_path)
print("root.file_path: {}".format(root.file_path))
self.frame.pack_forget()
self.app = LevelSelection(self.master)
class LevelSelection:
def __init__(self, master):
frame = tk.Frame(root)
frame.focus_force()
slide = open_slide(root.file_path)
dz_generator = DeepZoomGenerator(slide)
select_level = ttk.Label(frame, text="Select Initial Level")
select_level.pack()
# combo box for initial level
selection = ttk.Combobox(
frame, values=[i for i in range(dz_generator.level_count)])
selection.pack()
# confirm button
confirm = tk.Button(frame, text='OK')
confirm.pack()
def on_button_press(event):
frame.pack_forget()
Recorder(root, deep_zoom_object=dz_generator, level=int(selection.get()))
# print ("Recorder")
# # if tiles_dirctory is provided in args, the visualiser tool is run
# if root.tiles_directory is None:
# Recorder(root, deep_zoom_object=dz_generator, level=int(selection.get()))
# print ("Recorder")
# else:
# Visualiser(root, deep_zoom_object=dz_generator, level=int(selection.get()))
# print ("Visualiser")
confirm.bind('<Button-1>', on_button_press)
frame.pack(padx=50, pady=50)
root = tk.Tk()
root.minsize(width=250, height=125)
root.title("WSI Viewer")
app = FileSelection(root)
root.mainloop()
| UmarJ/lsiv-python3 | interface_recorder.py | interface_recorder.py | py | 2,510 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tkinter.X",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_numb... |
6504127042 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cdnendpoint
version_added: "2.8"
short_description: Manage Azure Endpoint instance.
description:
- Create, update and delete instance of Azure Endpoint.
options:
resource_group:
description:
- Name of the Resource group within the Azure subscription.
required: True
profile_name:
description:
- Name of the CDN profile which is unique within the resource group.
required: True
name:
description:
- Name of the endpoint under the profile which is unique globally.
required: True
location:
description:
- Resource location.
- Required when C(state) is I(present).
origin_host_header:
description:
- "The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN
I(origins), such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default."
origin_path:
description:
- A directory path on the origin that CDN can use to retreive content from, e.g. contoso.cloudapp.net/originpath.
content_types_to_compress:
description:
- List of content types on which compression applies. The value should be a valid MIME type.
type: list
is_compression_enabled:
description:
- "Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as
compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or
larger than 1 MB."
is_http_allowed:
description:
- Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
is_https_allowed:
description:
- Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
query_string_caching_behavior:
description:
- "Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests
that contain query strings from being cached, or cache every request with a unique URL."
choices:
- 'ignore_query_string'
- 'bypass_caching'
- 'use_query_string'
- 'not_set'
optimization_type:
description:
- "Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can
apply scenario driven optimization."
choices:
- 'general_web_delivery'
- 'general_media_streaming'
- 'video_on_demand_media_streaming'
- 'large_file_download'
- 'dynamic_site_acceleration'
probe_path:
description:
- "Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN.
This is relative to the origin path."
geo_filters:
description:
- "List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an acess rule to a specified path or content,
e.g. block APAC for path /pictures/"
type: list
suboptions:
relative_path:
description:
- "Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)"
- Required when C(state) is I(present).
action:
description:
- Action of the geo filter, i.e. C(allow) or C(block) access.
- Required when C(state) is I(present).
choices:
- 'block'
- 'allow'
country_codes:
description:
- Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US.
- Required when C(state) is I(present).
type: list
delivery_policy:
description:
- A policy that specifies the delivery rules to be used for an endpoint.
suboptions:
description:
description:
- User-friendly description of the policy.
rules:
description:
- A list of the delivery rules.
type: list
suboptions:
order:
description:
- "The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be
applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and
I(actions) listed in it will always be applied."
- Required when C(state) is I(present).
actions:
description:
- A list of actions that are executed when all the I(conditions) of a rule are satisfied.
- Required when C(state) is I(present).
type: list
suboptions:
name:
description:
- Constant filled by server.
- Required when C(state) is I(present).
conditions:
description:
- A list of conditions that must be matched for the I(actions) to be executed
type: list
suboptions:
name:
description:
- Constant filled by server.
- Required when C(state) is I(present).
origins:
description:
- The source of the content being delivered via CDN.
- Required when C(state) is I(present).
type: list
suboptions:
name:
description:
- Origin name
- Required when C(state) is I(present).
host_name:
description:
- The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
- Required when C(state) is I(present).
http_port:
description:
- The value of the HTTP port. Must be between 1 and 65535
https_port:
description:
- The value of the HTTPS port. Must be between 1 and 65535
state:
description:
- Assert the state of the Endpoint.
- Use 'present' to create or update an Endpoint and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) Endpoint
azure_rm_cdnendpoint:
resource_group: RG
profile_name: profile1
name: endpoint1
location: WestCentralUs
origins:
- name: www-bing-com
host_name: www.bing.com
http_port: 80
https_port: 443
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: /subscriptions/subid/resourcegroups/RG/providers/Microsoft.Cdn/profiles/profile1/endpoints/endpoint1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.cdn import CdnManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMEndpoint(AzureRMModuleBase):
"""Configuration class for an Azure RM Endpoint resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
profile_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
origin_host_header=dict(
type='str'
),
origin_path=dict(
type='str'
),
content_types_to_compress=dict(
type='list'
),
is_compression_enabled=dict(
type='str'
),
is_http_allowed=dict(
type='str'
),
is_https_allowed=dict(
type='str'
),
query_string_caching_behavior=dict(
type='str',
choices=['ignore_query_string',
'bypass_caching',
'use_query_string',
'not_set']
),
optimization_type=dict(
type='str',
choices=['general_web_delivery',
'general_media_streaming',
'video_on_demand_media_streaming',
'large_file_download',
'dynamic_site_acceleration']
),
probe_path=dict(
type='str'
),
geo_filters=dict(
type='list',
options=dict(
relative_path=dict(
type='str'
),
action=dict(
type='str',
choices=['block',
'allow']
),
country_codes=dict(
type='list'
)
)
),
delivery_policy=dict(
type='dict',
options=dict(
description=dict(
type='str'
),
rules=dict(
type='list',
options=dict(
order=dict(
type='int'
),
actions=dict(
type='list',
options=dict(
name=dict(
type='str'
)
)
),
conditions=dict(
type='list',
options=dict(
name=dict(
type='str'
)
)
)
)
)
)
),
origins=dict(
type='list',
options=dict(
name=dict(
type='str'
),
host_name=dict(
type='str'
),
http_port=dict(
type='int'
),
https_port=dict(
type='int'
)
)
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.profile_name = None
self.name = None
self.endpoint = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMEndpoint, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.endpoint[key] = kwargs[key]
dict_camelize(self.endpoint, ['query_string_caching_behavior'], True)
dict_camelize(self.endpoint, ['optimization_type'], True)
dict_camelize(self.endpoint, ['geo_filters', 'action'], True)
response = None
self.mgmt_client = self.get_mgmt_svc_client(CdnManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_endpoint()
if not old_response:
self.log("Endpoint instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Endpoint instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if (not default_compare(self.endpoint, old_response, '', self.results)):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Endpoint instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_endpoint()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Endpoint instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_endpoint()
# This currently doesnt' work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Endpoint instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None)
})
return self.results
def create_update_endpoint(self):
'''
Creates or updates Endpoint with the specified configuration.
:return: deserialized Endpoint instance state dictionary
'''
self.log("Creating / Updating the Endpoint instance {0}".format(self.name))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.endpoints.create(resource_group_name=self.resource_group,
profile_name=self.profile_name,
endpoint_name=self.name,
endpoint=self.endpoint)
else:
response = self.mgmt_client.endpoints.update(resource_group_name=self.resource_group,
profile_name=self.profile_name,
endpoint_name=self.name,
endpoint_update_properties=self.endpoint_update_properties)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Endpoint instance.')
self.fail("Error creating the Endpoint instance: {0}".format(str(exc)))
return response.as_dict()
def delete_endpoint(self):
'''
Deletes specified Endpoint instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Endpoint instance {0}".format(self.name))
try:
response = self.mgmt_client.endpoints.delete(resource_group_name=self.resource_group,
profile_name=self.profile_name,
endpoint_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Endpoint instance.')
self.fail("Error deleting the Endpoint instance: {0}".format(str(e)))
return True
def get_endpoint(self):
'''
Gets the properties of the specified Endpoint.
:return: deserialized Endpoint instance state dictionary
'''
self.log("Checking if the Endpoint instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.endpoints.get(resource_group_name=self.resource_group,
profile_name=self.profile_name,
endpoint_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Endpoint instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Endpoint instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, None))
old = sorted(old, key=lambda x: x.get(key, None))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location':
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if new == old:
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def main():
"""Main execution"""
AzureRMEndpoint()
if __name__ == '__main__':
main()
| testormoo/ansible-azure-complete | modules/library/azure_rm_cdnendpoint.py | azure_rm_cdnendpoint.py | py | 21,277 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ansible.module_utils.azure_rm_common.AzureRMModuleBase",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "azure.mgmt.cdn.CdnManagementClient",
"line_number": 376,
"usage_type": "argument"
},
{
"api_name": "msrest.polling.LROPoller",
"line_number": 417... |
15471707418 | from functools import reduce
def my_lambda(acc,val):
res, max = acc
if(val>max):
max = val
res+=1
return res,max
myList = [1,2,3,4,5,1]
skyscraper = reduce(my_lambda, myList, (0,0))
print(myList, "Il max cambia " ,skyscraper[0], " volte") | FilippoBotti/linguaggi-Paradigmi | python/fold_skyscraper.py | fold_skyscraper.py | py | 269 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 11,
"usage_type": "call"
}
] |
11702648802 | import json
import csv
AUTHORS_JSON_PATH = '/Users/AB/Dropbox/Dev/CWP/authors.json'
NARRATIVES_JSON_PATH = '/Users/AB/Dropbox/Dev/CWP/narratives.json'
with open(AUTHORS_JSON_PATH, 'r') as f:
authors = json.load(f)
with open(NARRATIVES_JSON_PATH, 'r') as f:
narratives = json.load(f)
with open('narratives.csv', 'w') as csvfile:
# fieldnames = ['id', 'name', 'model']
fieldnames = ['id', 'author_id', 'name', 'year', 'text']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in narratives:
row = {}
# row['id'] = int(item['pk'])
# row['name'] = item['fields']['name']
# row['model'] = item['model']
row['id'] = int(item['pk'])
row['author_id'] = item['fields']['author_id']
author = [x for x in authors if x['pk'] == row['author_id']][0]
row['name'] = author['fields']['name']
row['year'] = int(item['fields']['year'])
row['text'] = item['fields']['text']
writer.writerow(row)
| bakera81/cwp-literacynarratives | json_to_csv.py | json_to_csv.py | py | 1,033 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 16,
"usage_type": "call"
}
] |
18808337505 | """
plot_model.py
Code to plot an HII region model fit and data
Based on IDL code by A.A. Kepley
Trey Wenger June 2016
dsb 21Jun2016 - Use linecolor to distinguish models instead of linestyle
dsb 02Sep2016 - Modify units labels in plots
dsb 14Sep2016 - Add spontaneous emission model; GBT data point
dsb 26Sep2016 - Create separate plots for different models
dsb 16Nov2016 - Need divide HII region size by 2 to get radius
dsb 07Dec2016 - Add obsDV and smDens to inputs.
"""
import numpy as np
import math
import matplotlib
matplotlib.rc_file('matplotlibrc')
import matplotlib.pyplot as plt
import astropy.constants as c
import astropy.units as u
from scipy.optimize import curve_fit
import utils
import argparse
import pdb
#pdb.set_trace()
# Define power law function
def powFunction(x, a, b):
return a*np.array(x)**b
def plot_model(filenames,labels=None,
freqrange=[1.,120.],
linerange=[1.e-25,1.e-21],
contrange=[1.e-29,1.e-26],
obsDV = 75.0,
smDen=[10.0, 25.0, 100.0]):
"""
Plot model fits on top of the data
Inputs:
filenames = list of HII region model pickle files to plot
labels = list of labels for legend
linerange = range for y-axis of line plot (W/m2)
contrange = range for y-axis of cont plot (W/m2/Hz)
obsDV = observed RRL FWHM line width (km/s)
smDen = densities to use for simple model (cm-3)
"""
# --------------------------------
# Model: Collection of HII regions
# --------------------------------
#
if len(filenames) > 4:
raise ValueError("Can plot up to 4 models at once")
if labels is None:
model_labels = [None] * len(filenames)
linecolors = ['k','r','g','b']
#
# Calculate best fit line and continuum flux models for a range
# of RRL frequencies
#
models = [np.load(filename) for filename in filenames]
# range of n to plot
fit_lines = np.arange(110)+40.
for model in models:
model.fit_lines = fit_lines
model.calc_HII_region_properties()
#
# Plot RRL
#
fig = plt.figure()
# Models
for model,linecolor,label in zip(models,linecolors,model_labels):
plt.plot(model.rrl_freq,model.line_flux*model.num_HII_regions,
linecolor,linestyle='-',label=label)
# VLA data
plt.errorbar(model.linedata['freq_GHz'],model.linedata['intflux_Wm2'],
yerr=model.linedata['intflux_Wm2_err'],linestyle='',
color='k',marker='o')
# GBT data
# (H52+H53) integrated intensity = 147.1 (5.7) mJy km/s
# H52alpha freq = 45.453 GHz
freqGBT = 45.45373
sInt_mks = 147.1*1.e-3*1.e-26*1.e3*freqGBT*1.e9/c.c.value
sIntErr_mks = 5.7*1.e-3*1.e-26*1.e3*freqGBT*1.e9/c.c.value
#plt.errorbar([freqGBT], [sInt_mks], yerr=[sIntErr_mks], color='g', marker='o')
# labels, etc.
plt.xlabel("Frequency (GHz)", fontsize=20)
plt.ylabel(r"Integrated Line Flux (W$\,$m$^{-2}$)", fontsize=20)
plt.xscale('log')
plt.yscale('log')
plt.xlim(freqrange)
plt.ylim(linerange)
plt.text(freqrange[0]*(1.2), linerange[1]*(1-0.5), 'Multiple Compact HII Region Model', fontsize=20)
fooNe = "%.2g" % (model.electron_dens)
fooTe = "%4.0f" % (model.electron_temp)
plt.text(freqrange[0]*(1.2), linerange[1]*(1-0.75), r'(n$_{\rm e}$, T$_{\rm e}$) = (' + fooNe + ' cm$^{-3}$, ' + fooTe + r' K)', fontsize=20)
if label is not None:
plt.legend(fontsize=12,loc='best')
plt.tight_layout()
plt.savefig('linefit.eps')
#
# Plot radio continuum
#
fig = plt.figure()
for model,linecolor,label in zip(models,linecolors,model_labels):
if label is None:
mylabel = ''
else:
mylabel = label+', '
plt.plot(model.rrl_freq,model.thermal_fluxden_rrl+model.nonthermal_fluxden_rrl,
linecolor,linestyle='-',label=mylabel+'Total')
plt.plot(model.rrl_freq,model.thermal_fluxden_rrl,
linecolor,linestyle=':',label=mylabel+'Thermal')
plt.plot(model.rrl_freq,model.nonthermal_fluxden_rrl,
linecolor,linestyle='--',label=mylabel+'Non-thermal')
plt.errorbar(model.contdata['freq_GHz'],model.contdata['S_C_mJy']*1.e-29,
yerr=model.contdata['S_C_mJy_err']*1.e-29,linestyle='',
color='k',marker='o')
plt.xlabel("Frequency (GHz)", fontsize=20)
plt.ylabel(r"Continuum Flux Density (W$\,$m$^{-2}\,$Hz$^{-1}$)", fontsize=20)
plt.xscale('log')
plt.yscale('log')
plt.xlim(freqrange)
plt.ylim(contrange)
plt.text(freqrange[0]*(1.2), contrange[1]*(1-0.5), 'Multiple Compact HII Region Model', fontsize=20)
plt.legend(fontsize=16,loc='best')
plt.tight_layout()
plt.savefig('contfit.eps')
# --------------------------------
# Model: Spontaneous emission
# --------------------------------
#
# Calculate simple model (sm): spontaneous, optically thin emission in LTE
# Use equations 4-9 from Puxley et al. (1991, MNRAS, 248, 585)
#
smTe = model.electron_temp # K
smDist = model.distance # Mpc
smFreq = model.linedata['freq_GHz'][0] # GHz use Ka-band freq
smOmega = model.omega_region # steradians
smAngSize = math.sqrt(smOmega/math.pi) # radians
smLinSize = smDist*1.e6*smAngSize # pc
# line width (Hz)
smDV = (obsDV*1.e3)*(smFreq*1.e9)/c.c.value
# Planck Function (mks)
smB = (2.0*c.h.value*(smFreq*1.e9)**3/c.c.value**2)*(1.0/(math.exp((c.h.value*smFreq*1.e9)/(c.k_B.value*smTe)) - 1.0))
# set density list to an np array
smDen = np.array(smDen)
# EM cm-6 pc
smEM = smDen**2*smLinSize
# Continuum optical depth (Eq. 8)
smTauC = 8.2e-2*smTe**(-1.35)*smFreq**(-2.1)*smEM
# LTE line optical depth (Eq. 7)
smTauL = 1.7e3*(smDV*1.e-3)**(-1)*smTe**(-2.5)*smEM
# thermal continuum flux density (Eq. 5)
smContFlux = smOmega*smB*(1.0 - np.exp(-smTauC))
# spontaneous line emission (Eq. 4)
smLineEmission = smOmega*smB*np.exp(-smTauC)*(1.0 - np.exp(-smTauL))
# integrated line emission (mks)
smIntLine = 1.064*smLineEmission*smDV
# Line flux (optically thin) = a * freq^2
aLine = smIntLine/smFreq**2
# Cont flux (optically thin) = a * freq^(-0.1)
aCont = smContFlux/smFreq**(-0.1)
# number of H-ionizing photons (assume a sphere)
# Assume case B recombination rate (cm^3 s-1)
# Use Hui & Gnedin (1997) approximation
lambda_hi = 315614.0/smTe
alpha_B = (2.753e-14*(lambda_hi**1.5/(1.0 + (lambda_hi/2.740)**0.407)**2.242))
smRadius = (smLinSize*u.pc).to(u.cm).value/2.0
smNL = (4.0/3.0)*math.pi*smRadius**3*smDen**2*alpha_B
# SFR from Murphy et al. (2011)
smSFR = 7.29e-54*smNL
# Plot RRL
#
fig = plt.figure()
# Simple model
xx = np.array([model.rrl_freq[len(model.rrl_freq)-1], model.rrl_freq[0]])
for i in range(len(aLine)):
yy = aLine[i]*xx**2
if smTauC[i] < 1.0 and smTauL[i] < 1.0:
fooNe = "%3.0f" % (smDen[i])
fooTe = "%4.0f" % (smTe)
plt.plot(xx, yy, '--', label=r'(n$_{\rm e}$, T$_{\rm e}$) = (' + fooNe + ' cm$^{-3}$, ' + fooTe + r' K)')
# VLA data
plt.errorbar(model.linedata['freq_GHz'],model.linedata['intflux_Wm2'],
yerr=model.linedata['intflux_Wm2_err'],linestyle='',
color='k',marker='o')
# GBT data
# (H52+H53) integrated intensity = 147.1 (5.7) mJy km/s
# H52alpha freq = 45.453 GHz
freqGBT = 45.45373
sInt_mks = 147.1*1.e-3*1.e-26*1.e3*freqGBT*1.e9/c.c.value
sIntErr_mks = 5.7*1.e-3*1.e-26*1.e3*freqGBT*1.e9/c.c.value
#plt.errorbar([freqGBT], [sInt_mks], yerr=[sIntErr_mks], color='g', marker='o')
# labels, etc.
plt.xlabel("Frequency (GHz)", fontsize=20)
plt.ylabel(r"Integrated Line Flux (W$\,$m$^{-2}$)", fontsize=20)
plt.xscale('log')
plt.yscale('log')
plt.xlim(freqrange)
plt.ylim(linerange)
plt.text(freqrange[0]*(1.2), linerange[1]*(1-0.5), 'Spontaneous Emission Model', fontsize=20)
plt.legend(fontsize=12, loc=4)
plt.tight_layout()
plt.savefig('linefitSimple.eps')
#
# Plot radio continuum (use second density model only)
#
fig = plt.figure()
# non-thermal = total observed - thermal
xObs = model.contdata['freq_GHz']
yObs = model.contdata['S_C_mJy']*1.e-29
yThermal = aCont[1]*xObs**(-0.1)
yNonthermal = yObs - yThermal
# Fit Power law
p0 = [yObs.mean(), -0.7]
popt, pcov = curve_fit(powFunction, xObs, yNonthermal, p0)
# Calculate various contributions over extended frequency range
xx = np.array([model.rrl_freq[len(model.rrl_freq)-1], model.rrl_freq[0]])
# Thermal
yyThermal = aCont[1]*xx**(-0.1)
# Non-thermal
yyNonthermal = popt[0]*xx**(popt[1])
# Total
yyTotal = yyThermal + yyNonthermal
# plot model
plt.plot(xx, yyTotal, '-k', label='Total')
plt.plot(xx, yyThermal, ':k', label='Thermal')
plt.plot(xx, yyNonthermal, '--k', label='Non-thermal')
# plot data
plt.errorbar(model.contdata['freq_GHz'],model.contdata['S_C_mJy']*1.e-29,
yerr=model.contdata['S_C_mJy_err']*1.e-29,linestyle='',
color='k',marker='o')
plt.xlabel("Frequency (GHz)", fontsize=20)
plt.ylabel(r"Continuum Flux Density (W$\,$m$^{-2}\,$Hz$^{-1}$)", fontsize=20)
plt.xscale('log')
plt.yscale('log')
plt.xlim(freqrange)
plt.ylim(contrange)
plt.text(freqrange[0]*(1.2), contrange[1]*(1-0.5), 'Spontaneous Emission Model', fontsize=20)
plt.legend(fontsize=16,loc='best')
plt.tight_layout()
plt.savefig('contfitSimple.eps')
# print results (second density model only)
print("=============================")
print("Spontaneous Emissionn Model")
print("=============================")
print("Electron temperature = {:g} K".format(smTe))
print("Electron density = {:g} cm-3".format(smDen[1]))
print("HII region size = {:g} pc".format(smLinSize))
print("Non-thermal spectral index = {:g}".format(popt[1]))
#print("Mass of ionized gas = {:g} Msun".format(best_model.mass_ion))
print("Number of H-ionizing photons = {:g} s-1".format(smNL[1]))
print("Star Formation Rate = {:g} Msun/yr".format(smSFR[1]))
print("=============================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot HII region model(s) and data",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('filenames',nargs="+",help='filenames of model pickle files')
parser.add_argument('--labels',nargs="+",default=None,help='labels for each model')
parser.add_argument('--freqrange',nargs=2,default=[1.,120.],type=float,help='x-axis range (min max) (GHz)')
parser.add_argument('--linerange',nargs=2,default=[1.e-25,1.e-21],type=float,help='y-axis range (min max) for line plots (W/m2)')
parser.add_argument('--contrange',nargs=2,default=[1.e-29,1.e-26],type=float,help='y-axis range (min max) for cont plots (W/m2/Hz)')
parser.add_argument('--obsDV',nargs=1,default=75.0,type=float,help='observed RRL FWHM line width (km/s)')
parser.add_argument('--smDen',nargs=3,default=[10., 25.0, 100.0],type=float,help='densities to use for simple model (cm-3)')
args = parser.parse_args()
plot_model(args.filenames,labels=args.labels,
linerange=args.linerange,contrange=args.contrange,
obsDV=args.obsDV,smDen=args.smDen)
| tvwenger/HII-Region-Models | plot_model.py | plot_model.py | py | 11,647 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.rc_file",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_nu... |
8403487450 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Объект цифрового LED индикатора.
"""
import wx
from ic.components import icwidget
from ic.utils import util
from ic.PropertyEditor import icDefInf
from ic.log import log
from ic.bitmap import bmpfunc
import wx.lib.gizmos as parentModule
LED_NUMBER_CTRL_STYLE = {'LED_ALIGN_LEFT': parentModule.LED_ALIGN_LEFT,
'LED_ALIGN_RIGHT': parentModule.LED_ALIGN_RIGHT,
'LED_ALIGN_CENTER': parentModule.LED_ALIGN_CENTER,
'LED_ALIGN_MASK': parentModule.LED_ALIGN_MASK,
'LED_DRAW_FADED': parentModule.LED_DRAW_FADED,
}
SPC_IC_LEDNUMBERCTRL = {'draw_faded': True,
'foreground_colour': None,
'background_colour': None,
'default': u'---',
'__parent__': icwidget.SPC_IC_WIDGET,
'__attr_hlp__': {'draw_faded': 'Признак, будут ли неосвещенные сегменты отображаться с выцветшей версией цвета переднего плана',
'foreground_colour': u'Цвет текста (По умолчанию зеленый)',
'background_colour': u'Цвет фона (По умолчанию черный)',
'default': u'Значение по умолчанию',
},
}
# Тип компонента
ic_class_type = icDefInf._icUserType
# Имя класса
ic_class_name = 'icLEDNumberCtrl'
# Спецификация на ресурсное описание класса
ic_class_spc = {'type': 'LEDNumberCtrl',
'name': 'default',
'child': [],
'activate': True,
'_uuid': None,
'style': parentModule.LED_ALIGN_LEFT,
'__styles__': LED_NUMBER_CTRL_STYLE,
'__events__': {},
'__lists__': {},
'__attr_types__': {icDefInf.EDT_TEXTFIELD: ['description', '_uuid', 'default'],
icDefInf.EDT_CHECK_BOX: ['draw_faded'],
icDefInf.EDT_COLOR: ['foreground_colour', 'background_colour'],
},
'__parent__': SPC_IC_LEDNUMBERCTRL,
}
# Имя иконки класса, которые располагаются в директории
# ic/components/user/images
ic_class_pic = bmpfunc.createLibraryBitmap('counter.png')
ic_class_pic2 = bmpfunc.createLibraryBitmap('counter.png')
# Путь до файла документации
ic_class_doc = 'SCADA/doc/_build/html/SCADA.usercomponents.led_number_ctrl.html'
ic_class_spc['__doc__'] = ic_class_doc
# Список компонентов, которые могут содержаться в компоненте
ic_can_contain = []
# Список компонентов, которые не могут содержаться в компоненте, если не определен
# список ic_can_contain
ic_can_not_contain = None
# Версия компонента
__version__ = (0, 1, 1, 2)
class icLEDNumberCtrl(icwidget.icWidget, parentModule.LEDNumberCtrl):
"""
Объект цифрового LED индикатора.
:type component_spc: C{dictionary}
:cvar component_spc: Спецификация компонента.
- B{type='defaultType'}:
- B{name='default'}:
"""
component_spc = ic_class_spc
def __init__(self, parent, id=-1, component=None, logType=0, evalSpace=None,
bCounter=False, progressDlg=None):
"""
Конструктор базового класса пользовательских компонентов.
:type parent: C{wx.Window}
:param parent: Указатель на родительское окно.
:type id: C{int}
:param id: Идентификатор окна.
:type component: C{dictionary}
:param component: Словарь описания компонента.
:type logType: C{int}
:param logType: Тип лога (0 - консоль, 1- файл, 2- окно лога).
:param evalSpace: Пространство имен, необходимых для вычисления внешних выражений.
:type evalSpace: C{dictionary}
:type bCounter: C{bool}
:param bCounter: Признак отображения в ProgressBar-е. Иногда это не нужно -
для создания объектов полученных по ссылки. Т. к. они не учтены при подсчете
общего количества объектов.
:type progressDlg: C{wx.ProgressDialog}
:param progressDlg: Указатель на идикатор создания формы.
"""
component = util.icSpcDefStruct(self.component_spc, component, True)
icwidget.icWidget.__init__(self, parent, id, component, logType, evalSpace)
# По спецификации создаем соответствующие атрибуты (кроме служебных атрибутов)
self.createAttributes(component)
# !!! Конструктор наследуемого класса !!!
# Необходимо вставить реальные параметры конструкора.
# На этапе генерации их не всегда можно определить.
parentModule.LEDNumberCtrl.__init__(self, parent,
pos=self.getPos(),
size=self.getSize(),
style=self.getStyle())
# Установка свойств
self.SetDrawFaded(self.getDrawFaded())
if self.foreground_colour:
self.SetForegroundColour(self.foreground_colour)
if self.background_colour:
self.SetBackgroundColour(self.background_colour)
if self.default:
self.setValue(self.default)
def getPos(self):
"""
Позиция.
"""
x, y = self.getICAttr('position')
return wx.Point(x, y)
def getSize(self):
"""
Размер компонента.
"""
width, height = self.getICAttr('size')
return wx.Size(width, height)
def getStyle(self):
"""
Стиль.
"""
return self.getICAttr('style')
def getDrawFaded(self):
"""
Признак, будут ли неосвещенные сегменты отображаться
с выцветшей версией цвета переднего плана.
"""
return self.draw_faded
def setValue(self, value):
"""
Установить значение контрола.
:param value: Значение контрола.
Значение может быть следующих типов: целое, вещественное и строка.
В противном случае все остальное приводится к виду строки.
:return: True/False.
"""
if not isinstance(value, str):
value = str(value)
self.SetValue(value)
return True
| XHermitOne/defis3 | SCADA/SCADA/usercomponents/led_number_ctrl.py | led_number_ctrl.py | py | 7,716 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "wx.lib.gizmos.LED_ALIGN_LEFT",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "wx.lib.gizmos",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "wx.lib.gizmos.LED_ALIGN_RIGHT",
"line_number": 20,
"usage_type": "attribute"
},
{
... |
3977143656 | import pygame
import random
import os
from enum import Enum
pygame.init()
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
class GameObject(pygame.sprite.Sprite):
def __init__(self, image_file, position):
super(GameObject, self).__init__()
self._image = pygame.image.load(image_file)
self.rect = self._image.get_rect()
self.width, self.height = self._image.get_size()
self.set_position(position)
def set_position(self, position):
self.position = position
self.rect.left, self.rect.top = position
def get_position(self):
return self.rect.left, self.rect.top
def draw(self, screen):
screen.blit(self._image, (self.rect.left, self.rect.top))
class AnimatedGameObject(pygame.sprite.Sprite):
def __init__(self, images_path, position, stop_time=0.1, running=False):
super(AnimatedGameObject, self).__init__()
self.images = [pygame.image.load(os.path.join(images_path, image_file)) for image_file in sorted(os.listdir(images_path))]
self.index = 0
self._image = self.images[self.index] # Start image for animation
self.rect = self._image.get_rect()
self.stop_time = stop_time
self.elapsed_time = 0
self.set_position(position)
self.running = running
def animate(self, dt):
if not self.running:
return False
if self.index + 1 >= len(self.images):
self.restart_anim()
return False
self.elapsed_time += dt
if self.elapsed_time >= self.stop_time:
self.elapsed_time = 0
self.index += 1
self._image = self.images[self.index]
self.rect = self._image.get_rect()
self.set_position(self.position)
return True
def set_position(self, position):
self.position = position
self.rect.left, self.rect.top = position
def draw(self, screen):
screen.blit(self._image, (self.rect.left, self.rect.top))
def restart_anim(self):
self.index = 0
self.running = False
class Background(GameObject):
def __init__(self, image_file, position=(0, 0)):
GameObject.__init__(self, image_file, position)
def draw(self, screen):
GameScene.SCREEN.fill(WHITE)
super(Background, self).draw(GameScene.SCREEN)
class Ball(GameObject):
def __init__(self, image_file, position=(-100, 0), safe=True):
GameObject.__init__(self, image_file, position)
self.safe = safe
def is_inside(self, bucket):
bucket_x0, bucket_y0 = bucket.rect.left, bucket.rect.top
bucket_x1 = bucket.rect.right
ball_x0 = self.rect.left
ball_x1, ball_y1 = self.rect.right, self.rect.bottom
if ball_x0 >= bucket_x0 \
and ball_x1 <= bucket_x1 \
and ball_y1 - 25 >= bucket_y0 >= ball_y1 - 50:
return True
else:
return False
class Bucket(GameObject):
def __init__(self, image_file, position):
GameObject.__init__(self, image_file, position)
self.x = position[0]
self.direction = Direction.NONE
self.dx = 3.8
class BallManager:
BALL_COUNT = 0
def __init__(self):
ball = Ball(image_file='ball_small.png')
spiky_ball = Ball(image_file='spiky_ball_small.png', safe=False)
self.balls = [ball, spiky_ball]
self.ball_weights = {0: 3, 1: 1}
self.ball_speed = 7
self.ball_x = 0
self.ball_y = 0
self.ball = None
self.reset_pos()
def reset_pos(self):
self.ball_x = random.randrange(0, 460)
self.ball_y = random.randrange(-600, -200)
def generate_ball(self):
BallManager.BALL_COUNT += 1
self.ball = self.balls[
random.choice([x for x in self.ball_weights for y in range(self.ball_weights[x])])
]
self.reset_pos()
self.ball.set_position((self.ball_x, self.ball_y))
return self.ball
def update_pos(self):
self.ball_y += self.ball_speed
self.ball.set_position((self.ball_x, self.ball_y))
def get_ball_in_scene(self):
# Do not change anything here
if self.ball_y > -20:
return self.ball
else:
return None
class Direction(Enum):
LEFT = -1
NONE = 0
RIGHT = 1
class GameScene:
SCREEN = None
FPS = 60
def __init__(self, width, height):
GameScene.SCREEN = pygame.display.set_mode((width, height))
self.width = width
self.height = height
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont("monospace", 16)
self.score = 0
self.lives = 3
self.running = False
self.time_elapsed = 0
self.bucket = Bucket('bucket_trans.png', (width * 0.45, height * 0.8))
self.bg = Background('bg.png')
self.anim = AnimatedGameObject('BallCollectAnim', (-100, 0))
self.ball_manager = BallManager()
pygame.display.set_caption('Catch em all!')
def process_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.bucket.direction = Direction.LEFT
if event.key == pygame.K_RIGHT:
self.bucket.direction = Direction.RIGHT
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
self.bucket.direction = Direction.NONE
def calculate_direction(self):
# gets the ball visible on screen
ball_in_scene = self.ball_manager.get_ball_in_scene()
# TODO: Write your code here
# Update self.direction based on ball_in_scene
# ball_in_scene has attributes like - height, width, rect.top, rect.left
# So does self.bucket
# self.bucket.direction = Direction.NONE
def display_score(self):
text = self.font.render("Score: {0}".format(self.score), 2, BLACK)
GameScene.SCREEN.blit(text, (5, 10))
def display_lives(self):
text = self.font.render("Lives: {0}".format(self.lives), 2, BLACK)
GameScene.SCREEN.blit(text, (5, 34))
def game_loop(self):
self.running = True
bucket_x, bucket_y = self.bucket.get_position()
ball = self.ball_manager.generate_ball()
while self.running:
# Comment the below line to stop processing events from the user
self.process_events()
self.calculate_direction()
dt = self.clock.tick(GameScene.FPS)
self.time_elapsed += dt
self.bg.draw(GameScene.SCREEN)
self.ball_manager.update_pos()
ball.draw(GameScene.SCREEN)
ball_caught = ball.is_inside(self.bucket)
if ball_caught:
if not ball.safe:
self.lives -= 1
else:
self.score += 1
self.anim.running = True
self.anim.set_position((ball.rect.left + ball.width // 2 - 10, self.bucket.rect.top + 5))
ball = self.ball_manager.generate_ball()
elif self.ball_manager.ball_y > self.height:
if ball.safe:
self.lives -= 1
ball = self.ball_manager.generate_ball()
bucket_new_x = bucket_x + self.bucket.dx * self.bucket.direction.value
if 0 <= bucket_new_x <= self.width - self.bucket.width:
bucket_x = bucket_new_x
self.bucket.set_position((bucket_x, bucket_y))
self.bucket.draw(GameScene.SCREEN)
if self.anim.animate(dt / 300):
self.anim.draw(GameScene.SCREEN)
self.display_score()
self.display_lives()
pygame.display.update()
if self.lives is 0:
self.running = False
if __name__ == '__main__':
scene = GameScene(500, 600)
scene.game_loop()
print("Final score: {0}, Total time played: {1} secs, Total balls dropped: {2}"
.format(scene.score, scene.time_elapsed/1000, BallManager.BALL_COUNT - 1))
pygame.quit()
quit() | praveensvsrk/CatchEmAll | CatchEmAll.py | CatchEmAll.py | py | 8,375 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"l... |
35361793296 | from setuptools import setup, find_packages
from pathlib import Path
this_directory = Path(__file__).parent
# long_description = (this_directory / "README.md").read_text()
with open(this_directory / "README.md", encoding="utf8") as file:
long_description = file.read()
VERSION = '0.1.10'
DESCRIPTION = 'Python generator project'
setup(
name = 'pynumbat',
packages = ['pynumbat'],
entry_points={
"console_scripts":
["pynumbat=pynumbat.__main__:main"]
},
include_package_data=True,
version = VERSION,
license='MIT',
description = DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
author = 'Eduardo Ismael Garcia Perez',
author_email = 'eduardo78d@gmail.com',
url = 'https://github.com/eduardogpg/pygenerate',
keywords = ['Python Generate', 'Generate', 'Project'],
install_requires=[
'click',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
) | eduardogpg/pygenerate | setup.py | setup.py | py | 1,150 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 15,
"usage_type": "call"
}
] |
38843635225 | import pandas as pd
import datetime as dt
import os
from difflib import SequenceMatcher
# This module takes downloaded golf score data and parses from said data
# round scores for individual golfers and course data. This information is
# written to CSVs
# NOTES
# Data is curated such that certain events are excluded
# Exclusion parameters include limited data, course changes mid tournament, or
# odd formatting
#I'm sure this is a mess of optimizability
pd.set_option('display.expand_frame_repr', False)
def find_nth(haystack, needle, n):
# Finds the nth of a string (needle) in a list (haystack)
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
def similar(a,b):
# Returns similarity ratio of strings
return SequenceMatcher(None, a, b).ratio()
def main():
frames = []
courses = []
golfer_dictionary = {}
base_directory = r'C:\Users\Mitch\Projects\Golf\Data\Tournaments'
for x in os.listdir(base_directory):
print(x)
file_object = open(file=base_directory+r"\\"+x, mode='r').read()
x = 1
golfers = []
while find_nth(file_object, '<td class="cell">', x) != -1:
start_string = '<td class="cell">'
nth_string = find_nth(file_object, start_string, x)
end_string_index = file_object[nth_string:].find('</td>')
golfer = file_object[nth_string + len(start_string) + 21:nth_string + end_string_index]
round_scores = file_object[nth_string + end_string_index + 35:nth_string + end_string_index + 240]
first_round_score = round_scores[find_nth(round_scores, '>', 1) + 1:find_nth(round_scores, '<', 2)]
second_round_score = round_scores[find_nth(round_scores, '>', 3) + 1:find_nth(round_scores, '<', 4)]
third_round_score = round_scores[find_nth(round_scores, '>', 5) + 1:find_nth(round_scores, '<', 6)]
fourth_round_score = round_scores[find_nth(round_scores, '>', 7) + 1:find_nth(round_scores, '<', 8)]
if first_round_score != "":
first_round_score = int(first_round_score)
if second_round_score != "":
second_round_score = int(second_round_score)
if third_round_score != "":
third_round_score = int(third_round_score)
if fourth_round_score != "":
fourth_round_score = int(fourth_round_score)
golfers += [[golfer, first_round_score, second_round_score, third_round_score, fourth_round_score]]
x += 1
next_end = file_object[file_object.find("Ending:"):].find('<')
date_string = file_object[file_object.find("Ending:") + len("Ending: "):file_object.find("Ending:") + next_end]
year = int(date_string[find_nth(date_string, '/', 2) + 1:])
month = int(date_string[:find_nth(date_string, '/', 1)])
day = int(date_string[find_nth(date_string, '/', 1) + 1:find_nth(date_string, '/', 2)])
fourth_date = dt.date(year, month, day)
first_date = fourth_date - dt.timedelta(days=3)
second_date = fourth_date - dt.timedelta(days=2)
third_date = fourth_date - dt.timedelta(days=1)
course_end = file_object[file_object.find("Course: "):].find('<')
course_name = file_object[
file_object.find("Course: ") + len("Course: "):file_object.find("Course:") + course_end]
par_end = file_object[file_object.find("PAR: "):].find('<')
par = file_object[file_object.find("PAR: ") + len("PAR: "):file_object.find("PAR:") + par_end]
if "amp;" in str(course_name):
course_name = str(course_name.replace("amp;", ""))
course = str(course_name), int(par)
if course not in courses:
courses += [course]
start_index = file_object.find('PAST <b>') + 67
end_index = file_object[start_index:].find("</span>")
tournament_name = file_object[start_index:start_index + end_index]
for x in golfers:
x.insert(1, tournament_name)
x.insert(2, course_name)
x.insert(3, str(first_date))
x.insert(5, str(second_date))
x.insert(7, str(third_date))
x.insert(9, str(fourth_date))
frames += golfers
# These indices are hardcoded because I couldn't think of an effective way
# to programatically find the indices for deletion. If/when this code is
# rerun, it would be worth again trying to automate
courses.sort(key=lambda x: x[1])
delete_indices = [221,217,215,212,211,210,206,205,204,203,202,201,200,199,198,195,191,190,187,183,177,176,174,172,168,163,161,156,
149,148,146,144,142,138,137,133,132,128,127,126,124,122,121,120,117,115,114,113,112,107,106,99,96,93,92,90,81,73,69,68,61,60,53,
48,44,35,29,26,25,16,12,2]
for x in delete_indices:
del courses[x]
course_pars_dictionary = {}
for x in courses:
course_pars_dictionary[x[0]] = x[1]
cdf = pd.DataFrame(courses)
cdf.to_csv(r'C:\Users\Mitch\Projects\Golf\Data\Courses.csv')
for x in frames:
golfer_dictionary[x[0]] = []
for x in frames:
golfer_dictionary[x[0]] += [x[1:]]
dictionary_keys = list(golfer_dictionary.keys())
for x in dictionary_keys:
golfer_dictionary[x] = pd.DataFrame(golfer_dictionary[x])
golfers_list = []
for x in dictionary_keys:
df = golfer_dictionary[x].transpose()
for col in range(0,len(df.columns)):
tournament = [x, df[col][0], df[col][1], df[col][2], df[col][3], df[col][4], df[col][5], df[col][6], df[col][7], df[col][8], df[col][9]]
golfers_list += [tournament]
for x in range(0,len(golfers_list)):
max_similarity = 0
max_string = ""
for y in course_pars_dictionary.keys():
if similar(golfers_list[x][2],y) > max_similarity:
max_similarity = similar(golfers_list[x][2],y)
max_string = y
golfers_list[x][2] = max_string
print(golfers_list[x])
print(float(x)/float(len(golfers_list)))
gdf = pd.DataFrame(golfers_list)
gdf.to_csv(r'C:\Users\Mitch\Projects\Golf\Data\Golfers.csv')
print(gdf)
print(cdf)
if __name__ == "__main__":
main()
| mtdiedrich/Golf | Collection/html_parser.py | html_parser.py | py | 6,395 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "pandas.set_option",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "difflib.SequenceMatcher",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.date",
... |
1833646743 | import requests
from bs4 import BeautifulSoup
from collections import Counter
from models.autor import Autor
from models.cancion import Cancion
from models.cantante import Cantante
def obtener_letra(url):
# Realizo una solicitud para obtener el contenido de la página
response = requests.get(url)
# Parseo el contenido de la página con BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# Busco el div que contiene la letra de la canción
letra_div = soup.find('div', {'style': 'padding: 10px; ', 'class': 'translate'})
if letra_div:
# Obtengo el texto bruto de la letra y lo separo en líneas
letra_bruto = letra_div.decode_contents()
letra = letra_bruto.replace('<br/>', '\n').split('\n')
letra = [linea.strip() for linea in letra if linea.strip()]
return letra
else:
# Si no se encuentra el div, retorno una lista vacía
return []
def populate_data():
url = "https://top40-charts.com/chart.php?cid=21&date=2023-09-03"
# Realizo una solicitud para obtener el contenido de la página
response = requests.get(url)
# Parseo el contenido de la página con BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# Encuentro todos los elementos que contienen información de las canciones
canciones_elements = soup.find_all('tr', class_='latc_song')
# Creo un diccionario para mantener un registro de los cantantes únicos
cantantes_unicos = {}
for cancion_element in canciones_elements:
# Extraigo el nombre de la canción y del cantante del elemento actual
nombre_cancion_element = cancion_element.find('a', {'style': 'padding-bottom: 1px; text-decoration: none; border-bottom: 2px solid #F17D38; font-size: 11pt;'})
nombre_cantante_element = cancion_element.find('a', style='text-decoration: none; ')
# Establezco valores predeterminados en caso de que no se encuentren algunos detalles
nombre_cancion = nombre_cancion_element.text.strip() if nombre_cancion_element else "Desconocido"
nombre_cantante = nombre_cantante_element.text.strip() if nombre_cantante_element else "Desconocido"
# Defino valores predeterminados para el género y el año de la canción
genero = "Desconocido"
año = 2023
autores = []
# Compruebo si el cantante ya está registrado, de lo contrario, creo una nueva instancia
if nombre_cantante not in cantantes_unicos:
cantante = Cantante(nombre=nombre_cantante, año_nacimiento=2000) # Año de nacimiento ficticio
cantantes_unicos[nombre_cantante] = cantante
else:
cantante = cantantes_unicos[nombre_cantante]
# Creo una nueva instancia de la clase Canción
cancion = Cancion(nombre=nombre_cancion, genero=genero, año=año, autores=autores, cantantes=[cantante])
# Construyo la URL para obtener la letra de la canción
url_letra = f"https://top40-charts.com/songs/lyrics.php{nombre_cancion_element['href'].replace('song.php', '')}"
# Obtengo y asigno la letra de la canción
letra = obtener_letra(url_letra)
cancion.letra = letra
# Añado la canción a la lista de canciones del cantante
cantante.añadir_cancion(cancion)
return cantantes_unicos
if __name__ == "__main__":
cantantes = populate_data()
for cantante in cantantes.values():
canciones_impresas = set()
for cancion in cantante.canciones:
if cancion.nombre not in canciones_impresas:
print(f"Cantante: {cantante.nombre}")
print(f"Canción: {cancion.nombre}")
print("Letra:")
for linea in cancion.letra:
print(linea)
print("\n")
canciones_impresas.add(cancion.nombre)
# Calculo las 10 palabras más usadas por el cantante
palabras_mas_usadas = Counter()
for cancion in cantante.canciones:
palabras_mas_usadas.update(cancion.histograma_palabras())
# Imprimo las 10 palabras más usadas por el cantante
print("Las 10 palabras más usadas en las canciones de este cantante:")
for palabra, frecuencia in palabras_mas_usadas.most_common(10):
print(f"{palabra}: {frecuencia}")
print("\n")
| JGaratL/ejercicio-python | populate_data.py | populate_data.py | py | 4,432 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
21058207959 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 16:12:35 2018
@author: yangchg
"""
import requests, json, time, sys
from bs4 import BeautifulSoup
from contextlib import closing
import pandas as pd
class lianjiaDownloader():
def __init__(self, url):
self.server = 'http://sh.lianjia.com'
self.url = url
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Referer": "https://sh.lianjia.com/ershoufang/"}
#self.s = requests.session()
def getAreaUrl(self):
req = requests.get(url=self.url, headers=self.headers)
html = BeautifulSoup(req.text)
urllist = html.find_all("div",attrs={"data-role":"ershoufang"})
divs = BeautifulSoup(str(urllist[0]))
pddivs = BeautifulSoup(str(divs.findAll('div')[2]))
alla = pddivs.find_all('a')
urls = {url.string:self.server+url.get('href') for url in alla }
return urls
def getUrlsByStreet(self,url):
req = requests.get(url=url, headers=self.headers)
html = BeautifulSoup(req.text)
pagebox = html.find_all("div",class_="page-box house-lst-page-box")
totalPage = json.loads(pagebox[0].get('page-data'))['totalPage']
urls = [url+'pg'+str(pageID) for pageID in range(2,totalPage+1)]
return urls
def getContext(self,url):
req = requests.get(url)
html = BeautifulSoup(req.text)
return html
def contextToDict(self,context):
#每个info clear 表示一个房子的信息,遍历当前页面上所有的info_clear
clears = context.find_all('div',class_='info clear')
houseDictPerPage = []
houseLabels = ["title","houseInfo","positionInfo","followInfo","subway","taxfree","haskey","totalPrice","unitPrice"]
for oneHouseInfos in clears :
houseDict = {}
oneHouseInfos = BeautifulSoup(str(oneHouseInfos))
#print oneHouseInfos.find(class_='title').text
for label in houseLabels:
try :
houseDict[label] = oneHouseInfos.find(class_=label).text
except :
houseDict[label] = ''
houseDictPerPage.append(houseDict)
return houseDictPerPage
if __name__ == '__main__':
url = 'http://sh.lianjia.com/ershoufang/pudong/'
downloader = lianjiaDownloader(url)
urls = downloader.getAreaUrl()
allDatas = []
testurl = {}
testurl[u'塘桥'] = urls[u'塘桥']
for key,FirstPageUrl in urls.items() :
#爬取该URL第一页数据
print(u'开始爬取'+key+u"街道数据:")
context = downloader.getContext(FirstPageUrl)
steetData = downloader.contextToDict(context)
#根据第一页数据的总页数,生成该街道总共页数,并生成URL列表
streetUrls = downloader.getUrlsByStreet(FirstPageUrl)
for streeturl in streetUrls:
print(u'开始爬取第'+streeturl+u"页数据:")
context = downloader.getContext(streeturl)
steetData.extend(downloader.contextToDict(context))
time.sleep(1)
steetData = pd.DataFrame(steetData)
steetData['street'] = key
allDatas.append(steetData)
print(u'数据爬取完成!')
#数据拆分和汇总,并保存到Excel中
allDataFrame = pd.concat(allDatas)
allDataFrame.reset_index(inplace=True)
houseInfo = allDataFrame['houseInfo'].str.split('|',expand=True)
housebs = houseInfo[houseInfo[6].notna()][[x for x in range(7) if x !=1 ]]
housebs.columns=[u'小区名称',u'格局',u'面积',u'朝向',u'装修',u'电梯']
floolHouse= houseInfo[~houseInfo[6].notna()][range(6)]
floolHouse.columns=[u'小区名称',u'格局',u'面积',u'朝向',u'装修',u'电梯']
allDataFrame=allDataFrame.join(pd.concat([floolHouse,housebs]))
positionInfo = allDataFrame['positionInfo'].str.split('-',expand=True)
positionInfo.columns=[u'楼层',u'区域']
allDataFrame=allDataFrame.join(positionInfo)
allDataFrame.to_excel('allDataFrame.xlsx')
allDataFrame["totalPrice"] = allDataFrame.totalPrice.apply(lambda x : float(x.replace(u'万','')))
allDataFrame = allDataFrame[~allDataFrame[u'面积'].apply(lambda x : u'室' in x )]
allDataFrame[u'面积'] = allDataFrame[u'面积'].apply(lambda x :float(x.replace(u'平米','')))
allDataFrame.describe()
| ycg860102/crawing | 链家/lianjia.py | lianjia.py | py | 4,603 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
15252690223 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.attention import SelfAttention
class DirectInferenceNet(nn.Module):
# DEPRECATED
r"""No autoregression."""
def __init__(self, program_encoder, num_nodes, label_domains,
hidden_size=256, device=None):
super().__init__()
self.program_encoder = program_encoder
self.label_pred_matrix = self.init_pred_layers(
num_nodes, label_domains, hidden_size)
self.device = device if device else torch.device('cpu')
self.hidden_size = hidden_size
self.label_domains = label_domains
self.num_nodes = num_nodes
def init_pred_layers(self, n, doms, dim):
modules = [
nn.Linear(10, doms[i])
for i in range(n)
]
return nn.ModuleList(modules)
def forward(self, program_args):
# program_emb = self.program_encoder(
# *program_args, return_hiddens=True)
# map from raw input embeddings...
program_emb = program_args[0][:, 1:-1] - 4
program_emb = program_emb.float()
outputs = []
for i in range(self.num_nodes):
out_i = self.label_pred_matrix[i](program_emb)
outputs.append(out_i)
return outputs
class AutoregressiveInferenceNet(nn.Module):
r"""Amortized Autoregressive Inference Network for Constrained Probabilistic Grammars.
@param program_encoder: instance of a ProgramEncoder class.
see rnn_encoder.py
@param num_nodes: integer
number of nodes
@param label_domains: list/numpy array/torch tensor
number of categories for each node in ascending global index
from 0 to num_nodes - 1
@param
number of nodes in the continuous embedding layer
for both NODES and ASSIGNMENTS.
@param hidden_size: integer
number of hidden nodes in the (main) RNN.
@param hidden_dropout: float [default: 0.2]
probability of dropping out nodes from the hidden embedding.
@param batch_norm: boolean [default: False]
should we apply a batch norm layer before the label prediction layer?
@param num_attention_heads: integer [default: 5]
number of heads for self_attention
https://arxiv.org/abs/1703.03130
"""
def __init__(self, program_encoder, num_nodes, label_domains, embedding_size=300,
hidden_size=256, hidden_dropout=0.2, num_attention_heads=5,
use_batchnorm=False, device=None):
super().__init__()
self.label_embedding_matrix = self.init_embeddings(num_nodes, label_domains, embedding_size)
self.node_embedding = nn.Embedding(num_nodes + 1, embedding_size)
self.program_encoder = program_encoder
self.autoreg_rnn = nn.GRUCell(
program_encoder.hidden_size + embedding_size * 2,
hidden_size)
self.dropout_layer = nn.Dropout(p=hidden_dropout)
self.autoreg_batchnorm_layer = nn.BatchNorm1d(hidden_size)
self.self_attention = None
self.use_attention = num_attention_heads > 0
if self.use_attention:
self.self_attention = SelfAttention(
hidden_size,
hidden_size,
num_attention_heads)
self.label_pred_matrix = self.init_pred_layers(
num_nodes, label_domains, hidden_size * num_attention_heads)
self.label_batchnorm_layer = nn.BatchNorm1d(
hidden_size * num_attention_heads)
else:
self.label_pred_matrix = self.init_pred_layers(
num_nodes, label_domains, hidden_size)
self.label_batchnorm_layer = nn.BatchNorm1d(hidden_size)
self.device = device if device else torch.device('cpu')
self.hidden_size = hidden_size
self.hidden_dropout = hidden_dropout
self.use_batchnorm = use_batchnorm
self.label_domains = label_domains
self.num_nodes = num_nodes
self.num_attention_heads = num_attention_heads
def init_embeddings(self, n, doms, dim):
# pad_hack... we want to vectorize things so we learn an extra
# embedding layer for the pad token...
modules = [nn.Embedding(doms[i], dim) for i in range(n)]
modules = modules + [nn.Embedding(2, dim)]
return nn.ModuleList(modules)
def init_pred_layers(self, n, doms, dim):
modules = [nn.Linear(dim, doms[i]) for i in range(n)]
modules = modules + [nn.Linear(dim, 2)]
return nn.ModuleList(modules)
def init_rnn_hiddens(self, batch_size):
hidden_state = torch.empty(batch_size, self.hidden_size, device=self.device)
nn.init.xavier_normal_(hidden_state)
return hidden_state
def step(self, node_i, node_ip1, program_emb, h0, hidden_store, node_assignments, execution_lengths):
batch_size = node_i.size(0)
node_i_emb = self.node_embedding(node_i) # batch_size x embedding_size
# this represents the node index
assign_i = []
for j in range(batch_size):
# node_i[j] is a single number representing a global index
# node_assignments[j, node_i[j].item()] is again a single number
# but it picks out the assignment for node_i[j]
# we unsqueeze to make a 1x1 tensor
assign_ij = node_assignments[j, node_i[j].item()].unsqueeze(0)
# we index the right embedding function T by the global index
# assign_ij: 1 x embedding_size
assign_ij = self.label_embedding_matrix[node_i[j].item()](assign_ij)
assign_i.append(assign_ij)
assign_i_emb = torch.cat(assign_i, dim=0) # batch_size x embedding_size
# batch_size x (program_encoder.hidden_size + embedding_size * 2)
input_i_emb = torch.cat((assign_i_emb, program_emb, node_i_emb), dim=1)
if self.use_batchnorm:
# add batch normalization on top of h0
# hopefully for more regularization
h0 = self.autoreg_batchnorm_layer(h0)
if self.hidden_dropout > 0:
# dropout some of the hidden embeddings
h0_dp = self.dropout_layer(h0)
h0 = self.autoreg_rnn(input_i_emb, h0_dp) # hidden_size
else:
h0 = self.autoreg_rnn(input_i_emb, h0)
hidden_store.append(h0.unsqueeze(1))
alphas_i = None
if self.use_attention:
attn_hiddens = torch.cat(hidden_store, dim=1)
attn_lengths = [max(execution_lengths[j].item(), len(hidden_store))
for j in range(batch_size)]
attn_lengths = torch.LongTensor(attn_lengths)
attn_lengths = attn_lengths.to(self.device)
# NOTE: should we add dropout to attn_hiddens?
h0_attn, alphas_i = self.self_attention(attn_hiddens, attn_lengths)
if self.use_batchnorm:
# add batch normalization on top of last h0 before label_pred_matrix...
if self.use_attention:
h0_attn = self.label_batchnorm_layer(h0_attn)
else:
h0 = self.label_batchnorm_layer(h0)
output_i = []
for j in range(batch_size):
# label_pred_matrix[node_ip1[j].item()] is a linear layer over
# the categories of the next node
if self.use_attention:
h0_j = h0_attn[j].unsqueeze(0)
else:
h0_j = h0[j].unsqueeze(0)
output_ij = self.label_pred_matrix[node_ip1[j].item()](h0_j)
output_ij = output_ij.squeeze(0)
output_i.append(output_ij)
return output_i, h0, alphas_i
def forward(self, execution_trace, execution_lengths, node_assignments, program_args, h0=None):
r"""
execution_trace: batch_size by max_trace_length; note this may not equal num_nodes
each row looks like: [START,1,5,2,7,...,END,PAD,PAD,...]
the number represents the global index of the node.
execution_lengths: batch_size
tells you how many non-pad tokens are in each row of execution_trace.
node_assignments: batch_size by num_nodes
each row looks like: [0,4,1,2,5,7,...,0,0,0]
the number represents the assignment of that node.
START,PAD,END tokens will always assign to 0.
program_args: miscellaneous arguments that should be passed to
the program_encoder. See <rnn_encoder.py>.
outputs: list of list of tensors
batch_size by max_trace_length - 1 by num categories
"""
batch_size = execution_trace.size(0)
program_emb = self.program_encoder(*program_args, return_hiddens=True)
max_trace_length = execution_trace.size(1)
if h0 is None:
h0 = self.init_rnn_hiddens(batch_size)
h0 = h0.to(self.device)
# hack: we need to support computation with padding so add a column of 0s to node_assignments
# This will learn a useless embedding for the pad variable
pad_assignments = torch.zeros(batch_size, 1, device=self.device).long()
node_assignments = torch.cat((node_assignments, pad_assignments), dim=1)
hidden_store = []
# it is important that we loop through max_trace_length in the outer loop
# this way we can still do the RNN computation in a minibatch. However, this
# means wasting computation on pad tokens...
outputs = []
alphas = [] # store attention weights if possible
for i in range(max_trace_length - 1):
node_i = execution_trace[:, i].long() # batch_size
# we use h0 to predict the assignment of the next node
node_ip1 = execution_trace[:, i+1].long()
# If attention = False, alpha_i will be None
# This method will modify hidden_store by appending the new h0 to it
output_i, h0, alphas_i = self.step(node_i, node_ip1, program_emb, h0, hidden_store, node_assignments, execution_lengths)
if self.use_attention:
alphas.append(alphas_i)
outputs.append(output_i)
# outputs is currently max_trace_length - 1 x batch_size x num categories
# we do not want to take pad into account, so replace them with None
for j in range(batch_size):
lengths_j = execution_lengths[j].item() - 1 # -1 bc we ignore last char
for i in range(lengths_j, max_trace_length - 1):
outputs[i][j] = None
# reshape this to batch_size x max_trace_length - 1 x num_categories
outputs_ = [[outputs[j][i] for j in range(max_trace_length - 1)] for i in range(batch_size)]
# we keep alphas as size max_trace_length - 1 x batch_size x alpha_matrix
return outputs_, alphas
class FeedforwardInferenceNet(nn.Module):
r"""A baseline to AutoregressiveInferenceNet, where no RNN is used. We directly try
to predict things from a single program embedding."""
def __init__(self, program_encoder, num_nodes, label_domains,
hidden_size=256, hidden_dropout=0.2, device=None, **kwargs):
super().__init__()
self.program_encoder = program_encoder
self.dropout_layer = nn.Dropout(p=hidden_dropout)
self.feedforward_nn = nn.Linear(program_encoder.hidden_size, hidden_size)
self.label_pred_matrix = self.init_pred_layers(
num_nodes, label_domains, hidden_size)
self.device = device if device else torch.device('cpu')
self.hidden_size = hidden_size
self.hidden_dropout = hidden_dropout
self.label_domains = label_domains
self.num_nodes = num_nodes
def init_embeddings(self, n, doms, dim):
modules = [nn.Embedding(doms[i], dim) for i in range(n)]
return nn.ModuleList(modules)
def init_pred_layers(self, n, doms, dim):
modules = [nn.Linear(dim, doms[i]) for i in range(n)]
return nn.ModuleList(modules)
def forward(self, program_args):
program_emb = self.program_encoder(*program_args, return_hiddens=True)
if self.hidden_dropout > 0 and self.training:
program_emb = self.dropout_layer(program_emb)
# a single hidden layer
program_emb = F.relu(self.feedforward_nn(program_emb))
# we now project this to every single possible label
outputs = []
for i in range(self.num_nodes):
output_i = self.label_pred_matrix[i](program_emb)
outputs.append(output_i)
return outputs # num_nodes x (categorical_dim for output_i)
| malik-ali/generative-grading | src/models/inference_net.py | inference_net.py | py | 13,330 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"lin... |
43652881788 | from enum import Enum
from pathlib import Path
# server constants
SERVER_URL = "http://localhost:5000"
TABLE_SCHEMA_PATH = str(Path("app") / "database" / "schema.sql")
DEFAULT_DB_PATH = "user.db"
FETCH_DELAY_PERIOD = 5 # time period beetween each server data update
# other
PREFFERED_ENCODING = "utf-8"
# crypto constants
HASH_SALT = "made by wilkueti".encode(PREFFERED_ENCODING) # NEVER DO THIS!!!
MAX_ONE_TIME_KEYS = 15
# length of the keyes is derived from the signal documentation
SHARED_KEY_LENGTH = 32
RATCHET_STATE_KEY_LENGTH = 64
# according to crypto library docs nonce should have 96 bits
AEAD_NONCE = "SEG0PPiuHAFm".encode(PREFFERED_ENCODING)
BLOCK_SIZE = 128
class MainMenuOptions(Enum):
MESSAGE = 0
ADD_FRIEND = 1
CHANGE_CREDENTIALS = 2
REMOVE_ACCOUNT = 3
WAITROOM = 4
EXIT = 5
| michalwilk123/elliot-chat-client | app/config.py | config.py | py | 823 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 25,
"usage_type": "name"
}
] |
8285329290 | import requests
import pyqrcode
import traceback, ctypes, msvcrt, time, os
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil import parser as dateutil_parser
def wait_any_key(prompt):
print(prompt, end = "", flush = True)
msvcrt.getch()
print()
# Negatives: infinite retries; others: retry at most n times
def run_fallible_or_report(fn, retries = 0):
tried_times = 0
while True:
try:
return fn()
except Exception as e:
print("严重错误: 程序抛出了未处理的异常")
traceback.print_exc()
if tried_times == retries:
return None
tried_times += 1
def get_ts():
return int(time.time() * 1000)
def get_time_str_precision_sec():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def get_time_str_precision_day():
return datetime.now().strftime("%Y-%m-%d")
def is_tool_exist(name):
from shutil import which
return which(name) != None
def is_file_exist(path):
return os.path.exists(path)
def start_ffmpeg_recording_blocking(filename, url):
os.system(f"start \"ffmpeg 录制监控台\" /w ffmpeg -i \"{url}\" -c copy -y \"{filename}.ts\"")
class NuaaFeiTianClient:
def __init__(self):
self.session = requests.session()
def do_login_qrcode(self):
import tkinter
succeeded = False
login_html = self.session.get("https://authserver.nuaa.edu.cn/authserver/login?type=qrcode").text
login_soup = BeautifulSoup(login_html, "html.parser")
login_post_data = {}
login_uuid = self.session.get(f"https://authserver.nuaa.edu.cn/authserver/qrCode/getToken?ts={get_ts()}").text
for i in login_soup.body.find_all("input"):
key = i.get("name")
value = i.get("value")
if key == "uuid":
value = login_uuid
login_post_data[key] = value
qr = pyqrcode.create(f"https://authserver.nuaa.edu.cn/authserver/qrCode/qrCodeLogin.do?uuid={login_uuid}")
ws = tkinter.Tk()
ws.call("tk", "scaling", ctypes.windll.shcore.GetScaleFactorForDevice(0) / 75)
ws.title("二维码登录")
ws.resizable(False, False)
ws.config(bg = "#ffffff")
img = tkinter.BitmapImage(data = qr.xbm(scale = 6))
img_lbl = tkinter.Label(ws)
img_lbl.config(image = img, bg = "#ffffff")
img_lbl.pack()
check_job = None
def poll_login_state():
nonlocal succeeded
resp = self.session.get(f"https://authserver.nuaa.edu.cn/authserver/qrCode/getStatus.htl?ts={get_ts()}&uuid={login_uuid}").text
# Check if scan is pending
if resp == "0" or resp == "2":
print(end = ".", flush = True)
return False
# Rest occasions requiring handling
print(".")
if resp == "1":
# Succeeded
succeeded = True
return True
elif resp == "3":
# Expired
print("错误: 二维码已过期")
return True
else:
print("错误: 未知的二维码登录状态")
return True
def loop_check(time):
nonlocal check_job
if poll_login_state():
check_job = None
ws.destroy()
else:
check_job = ws.after(time, lambda: loop_check(time))
print("正在等待响应", end = "", flush = True)
loop_check(1000)
ws.mainloop()
if check_job != None:
# Maybe the user closed the window
print(".")
print("错误: 登录过程出现未知错误或用户中止了操作")
ws.after_cancel(check_job)
if succeeded:
login_post = self.session.post("https://authserver.nuaa.edu.cn/authserver/login", data = login_post_data)
ft_login = self.session.get("https://ft.nuaa.edu.cn/jy-application-vod-he/oauth2/authorize?json=0&returnUri=https%3A%2F%2Fft.nuaa.edu.cn%2Fjy-application-vod-he-ui%2F%3Ftype%3Dcas")
return succeeded
def get_today_lessons(self):
data_json = self.session.get(
"https://ft.nuaa.edu.cn/jy-application-vod-he/v1/vod_live",
params = {
"page.pageIndex": "1",
"page.pageSize": "8",
"courBeginTime": get_time_str_precision_sec(),
"courEndTime": f"{get_time_str_precision_day()} 23:59:59",
"page.orders[0].asc": "true",
"page.orders[0].field": "courBeginTime"
},
timeout = 5
).json()
return data_json
def get_lesson_vinfo(self, lesson_id):
data_json = self.session.get(
f"https://ft.nuaa.edu.cn/jy-application-vod-he/v1/course_vod_videoinfos?courseId={lesson_id}",
timeout = 5
).json()
return data_json
def main():
ctypes.windll.shcore.SetProcessDpiAwareness(1)
print("南航飞天云课堂录播工具 v0.1.0")
print()
print("警告: 录制课程时, ffmpeg 录制窗口会弹出, 请务必使用 `q` 键来停止录制, 否则会导致录制文件损坏!")
print("提示: 云端提供的视频流有时可能会出现无法自动检测到的问题, 此时需要手动停止录制, 以让程序刷新出正确的视频流。")
print()
if not is_tool_exist("ffmpeg"):
print("错误: 找不到 ffmpeg。程序将退出。");
return
#os.system("start /w ffmpeg --help")
client = NuaaFeiTianClient()
print("即将通过二维码方式进行登录, 请使用 `i·南航` 扫码登录。")
if not client.do_login_qrcode():
print("错误: 登录失败。程序将退出。")
return
print("登录成功。")
print()
#while True:
# try:
# eval(input())
# except Exception as e:
# traceback.print_exc()
print("加载课程信息...", end = " ", flush = True)
lessons = client.get_today_lessons()["data"]["records"]
print("完成。")
while True:
if len(lessons) < 1:
print("今日已无课程可上。停止检测课程。")
break
lesson = lessons[0]
lesson_id = lesson["id"]
lesson_name = lesson["subjName"]
lesson_teacher = lesson["teacNames"][0]
begin_time = dateutil_parser.parse(lesson["courBeginTime"])
end_time = dateutil_parser.parse(lesson["courEndTime"])
print(
f"接下来的第一节课: `{lesson_name}`--{lesson_teacher}",
f"({begin_time.strftime('%H:%M:%S')} ~ {end_time.strftime('%H:%M:%S')})"
)
if (begin_time - datetime.now()).total_seconds() > 60 * 15:
# Too long; wait for some time
time.sleep(60 * 5)
else:
# Enter lesson recording loop
print(f"* 准备录制课程: `{lesson_name}`--{lesson_teacher}")
print(f"* 此课程的在线链接: https://ft.nuaa.edu.cn/jy-application-vod-he-ui/?type=cas#/live-detail?id={lesson_id}")
recorded = False
while True:
time.sleep(1.5)
vinfo = run_fallible_or_report(lambda: client.get_lesson_vinfo(lesson_id), 3)
if vinfo == None:
continue
if "data" not in vinfo or vinfo["data"] == None:
# Before starting or after ended; break loop in case of the latter
if recorded:
break
#if (datetime.now() - end_time).total_seconds() > 60 * 15:
# break
else:
recorded = True
vinfo_data = vinfo["data"]
rec_begin_time_str = datetime.now().strftime("%Y.%m.%d@%H.%M.%S")
print(f"* 开始录制视频流 ({rec_begin_time_str})")
vstream_url = vinfo_data["courseDeviceViewDtoList"][0]["chanNameMainHlsPlayUrl"]
start_ffmpeg_recording_blocking(f"{rec_begin_time_str}-{lesson_name}-{lesson_teacher}", vstream_url)
print(f"* 结束录制视频流 ({rec_begin_time_str})")
print(f"* 结束录制课程: `{lesson_name}`--{lesson_teacher}")
print("刷新课程信息...", end = " ", flush = True)
lessons = run_fallible_or_report(lambda: client.get_today_lessons()["data"]["records"])
print("完成。")
if __name__ == "__main__":
run_fallible_or_report(lambda: main())
wait_any_key("\n请按任意键继续...")
| apkipa/NuaaFTRecorder | 飞天云课堂录播工具.py | 飞天云课堂录播工具.py | py | 8,680 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "msvcrt.getch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
41055406846 | import json
import logging
import smtplib
import ssl
import boto3
from botocore.exceptions import ClientError, WaiterError
from ses_identities import SesIdentity
from ses_templates import SesTemplate
from ses_generate_smtp_credentials import calculate_key
logger = logging.getLogger(__name__)
# snippet-start:[python.example_code.ses.SesDestination]
class SesDestination:
"""Contains data about an email destination."""
def __init__(self, tos, ccs=None, bccs=None):
"""
:param tos: The list of recipients on the 'To:' line.
:param ccs: The list of recipients on the 'CC:' line.
:param bccs: The list of recipients on the 'BCC:' line.
"""
self.tos = tos
self.ccs = ccs
self.bccs = bccs
def to_service_format(self):
"""
:return: The destination data in the format expected by Amazon SES.
"""
svc_format = {"ToAddresses": self.tos}
if self.ccs is not None:
svc_format["CcAddresses"] = self.ccs
if self.bccs is not None:
svc_format["BccAddresses"] = self.bccs
return svc_format
# snippet-end:[python.example_code.ses.SesDestination]
# snippet-start:[python.example_code.ses.SesMailSender]
class SesMailSender:
"""Encapsulates functions to send emails with Amazon SES."""
def __init__(self, ses_client):
"""
:param ses_client: A Boto3 Amazon SES client.
"""
self.ses_client = ses_client
# snippet-end:[python.example_code.ses.SesMailSender]
# snippet-start:[python.example_code.ses.SendEmail]
def send_email(self, source, destination, subject, text, html, reply_tos=None):
"""
Sends an email.
Note: If your account is in the Amazon SES sandbox, the source and
destination email accounts must both be verified.
:param source: The source email account.
:param destination: The destination email account.
:param subject: The subject of the email.
:param text: The plain text version of the body of the email.
:param html: The HTML version of the body of the email.
:param reply_tos: Email accounts that will receive a reply if the recipient
replies to the message.
:return: The ID of the message, assigned by Amazon SES.
"""
send_args = {
"Source": source,
"Destination": destination.to_service_format(),
"Message": {
"Subject": {"Data": subject},
"Body": {"Text": {"Data": text}, "Html": {"Data": html}},
},
}
if reply_tos is not None:
send_args["ReplyToAddresses"] = reply_tos
try:
response = self.ses_client.send_email(**send_args)
message_id = response["MessageId"]
logger.info(
"Sent mail %s from %s to %s.", message_id, source, destination.tos
)
except ClientError:
logger.exception(
"Couldn't send mail from %s to %s.", source, destination.tos
)
raise
else:
return message_id
# snippet-end:[python.example_code.ses.SendEmail]
# snippet-start:[python.example_code.ses.SendTemplatedEmail]
def send_templated_email(
self, source, destination, template_name, template_data, reply_tos=None
):
"""
Sends an email based on a template. A template contains replaceable tags
each enclosed in two curly braces, such as {{name}}. The template data passed
in this function contains key-value pairs that define the values to insert
in place of the template tags.
Note: If your account is in the Amazon SES sandbox, the source and
destination email accounts must both be verified.
:param source: The source email account.
:param destination: The destination email account.
:param template_name: The name of a previously created template.
:param template_data: JSON-formatted key-value pairs of replacement values
that are inserted in the template before it is sent.
:return: The ID of the message, assigned by Amazon SES.
"""
send_args = {
"Source": source,
"Destination": destination.to_service_format(),
"Template": template_name,
"TemplateData": json.dumps(template_data),
}
if reply_tos is not None:
send_args["ReplyToAddresses"] = reply_tos
try:
response = self.ses_client.send_templated_email(**send_args)
message_id = response["MessageId"]
logger.info(
"Sent templated mail %s from %s to %s.",
message_id,
source,
destination.tos,
)
except ClientError:
logger.exception(
"Couldn't send templated mail from %s to %s.", source, destination.tos
)
raise
else:
return message_id
# snippet-end:[python.example_code.ses.SendTemplatedEmail]
# snippet-start:[python.example_code.ses.Scenario_SendEmail]
def usage_demo():
print("-" * 88)
print("Welcome to the Amazon Simple Email Service (Amazon SES) email demo!")
print("-" * 88)
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
ses_client = boto3.client("ses")
ses_identity = SesIdentity(ses_client)
ses_mail_sender = SesMailSender(ses_client)
ses_template = SesTemplate(ses_client)
email = input("Enter an email address to send mail with Amazon SES: ")
status = ses_identity.get_identity_status(email)
verified = status == "Success"
if not verified:
answer = input(
f"The address '{email}' is not verified with Amazon SES. Unless your "
f"Amazon SES account is out of sandbox, you can send mail only from "
f"and to verified accounts. Do you want to verify this account for use "
f"with Amazon SES? If yes, the address will receive a verification "
f"email (y/n): "
)
if answer.lower() == "y":
ses_identity.verify_email_identity(email)
print(f"Follow the steps in the email to {email} to complete verification.")
print("Waiting for verification...")
try:
ses_identity.wait_until_identity_exists(email)
print(f"Identity verified for {email}.")
verified = True
except WaiterError:
print(
f"Verification timeout exceeded. You must complete the "
f"steps in the email sent to {email} to verify the address."
)
if verified:
test_message_text = "Hello from the Amazon SES mail demo!"
test_message_html = "<p>Hello!</p><p>From the <b>Amazon SES</b> mail demo!</p>"
print(f"Sending mail from {email} to {email}.")
ses_mail_sender.send_email(
email,
SesDestination([email]),
"Amazon SES demo",
test_message_text,
test_message_html,
)
input("Mail sent. Check your inbox and press Enter to continue.")
template = {
"name": "doc-example-template",
"subject": "Example of an email template.",
"text": "This is what {{name}} will {{action}} if {{name}} can't display "
"HTML.",
"html": "<p><i>This</i> is what {{name}} will {{action}} if {{name}} "
"<b>can</b> display HTML.</p>",
}
print("Creating a template and sending a templated email.")
ses_template.create_template(**template)
template_data = {"name": email.split("@")[0], "action": "read"}
if ses_template.verify_tags(template_data):
ses_mail_sender.send_templated_email(
email, SesDestination([email]), ses_template.name(), template_data
)
input("Mail sent. Check your inbox and press Enter to continue.")
print("Sending mail through the Amazon SES SMTP server.")
boto3_session = boto3.Session()
region = boto3_session.region_name
credentials = boto3_session.get_credentials()
port = 587
smtp_server = f"email-smtp.{region}.amazonaws.com"
password = calculate_key(credentials.secret_key, region)
message = """
Subject: Hi there
This message is sent from the Amazon SES SMTP mail demo."""
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.starttls(context=context)
server.login(credentials.access_key, password)
server.sendmail(email, email, message)
print("Mail sent. Check your inbox!")
if ses_template.template is not None:
print("Deleting demo template.")
ses_template.delete_template()
if verified:
answer = input(f"Do you want to remove {email} from Amazon SES (y/n)? ")
if answer.lower() == "y":
ses_identity.delete_identity(email)
print("Thanks for watching!")
print("-" * 88)
# snippet-end:[python.example_code.ses.Scenario_SendEmail]
if __name__ == "__main__":
usage_demo()
| awsdocs/aws-doc-sdk-examples | python/example_code/ses/ses_email.py | ses_email.py | py | 9,394 | python | en | code | 8,378 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "botocor... |
34640311085 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from database.models import CollectionType
from database.models import AnnotationType
from rest.serializers.object_types import annotations
from . import types
MODEL = CollectionType.annotation_types.through # pylint: disable=E1101
class SelectSerializer(serializers.ModelSerializer):
icon = serializers.URLField(source='annotationtype.icon.url')
class Meta:
model = MODEL
fields = (
'url',
'id',
'icon',
)
class ListSerializer(serializers.ModelSerializer):
annotation_type = serializers.PrimaryKeyRelatedField(
many=False,
read_only=True,
source='annotationtype')
icon = serializers.ImageField(
source='annotationtype.icon')
annotation_schema = serializers.JSONField(
source='annotationtype.annotation_schema')
class Meta:
model = MODEL
fields = (
'url',
'id',
'annotation_type',
'annotation_schema',
'icon',
)
class DetailSerializer(serializers.HyperlinkedModelSerializer):
annotation_type = annotations.SelectSerializer(
many=False,
read_only=True,
source='annotationtype')
collection_type = types.SelectSerializer(
many=False,
read_only=True,
source='collectiontype')
class Meta:
model = MODEL
fields = (
'url',
'id',
'collection_type',
'annotation_type',
)
class CreateSerializer(serializers.ModelSerializer):
annotation_type = serializers.PrimaryKeyRelatedField(
many=False,
read_only=False,
queryset=AnnotationType.objects.all(), # pylint: disable=E1101
source='annotationtype')
class Meta:
model = MODEL
fields = (
'annotation_type',
)
def create(self, validated_data):
collection_type = self.context['collection_type']
validated_data['collectiontype'] = collection_type
return super().create(validated_data)
| CONABIO-audio/irekua | irekua/rest/serializers/object_types/data_collections/annotations.py | annotations.py | py | 2,184 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "database.models.CollectionType.annotation_types",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "database.models.CollectionType",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number... |
38182352525 | from types import NoneType
import pandas as pd
import matplotlib.pyplot as plt
import streamlit as st
import functions as func
import eda
def info_input(data):
##########################################################################################
###Prediction value input
##########################################################################################
#Get unique values
houseType = data.Type.unique()
provs = data.Province.unique()
autCom= data['Autonomous Community'].unique()
#Make columns and generate selection boxes
col1, col2, col3 = st.columns(3)
type = col1.selectbox("House type", houseType[:len(houseType)-1])
province = col2.selectbox("Province", provs)
autcom = col3.selectbox("Autonomous Community", autCom)
col4, col5, col6 = st.columns(3)
rooms = col4.slider("Rooms", 0, data.Rooms.max())
bathrooms = col5.slider("Bathrooms", data.Bathrooms.min(), data.Bathrooms.max())
surface = col6.number_input('Surface (m2)')
col7, col8, col9 = st.columns(3)
terrace = col7.checkbox("Terrace")
pool = col8.checkbox("Pool")
ac = col9.checkbox("Air Conditioner")
#Add prediction values to a list
property_values = [type, province, autcom, rooms, bathrooms, surface, terrace, pool, ac]
##########################################################################################
###Address input
##########################################################################################
address = st.text_input("Address:")
if address == "":
return None, property_values
else:
try:
lat, lon = func.house_gps_finder(address) ### get lat and lon of property ###
st.write(lat, lon)
coordinates = pd.DataFrame() ### create dataframe in order to append values ###
coordinates = coordinates.append({"lon": lon, "lat": lat}, ignore_index=True)
st.map(coordinates)
return coordinates, property_values ### return coordinates in order to use them in cluster_finder function (see Streamlit.py) ###
except:
st.write("No location found, please try again")
return None, None
def cluster_finder(coordinates, data):
###THIS FUNCTION WILL SUGGEST PRICE CLUSTERS IN ORDER TO HELP ML MODEL
st.header("Cluster finder")
if type(coordinates) == int or type(coordinates) == NoneType:
st.write("Input an address in order to start looking for clusters")
else:
### Note that lat and lon will be used consistently as variables of the selected +
### + property, lat1, lat2, lon1, lon2, are used in order to chop the dataframe
lat = coordinates.lat
lon = coordinates.lon
threshold = st.slider("Threshold", 1, 100, step=1, value = 40)
threshold = threshold/1000
lat1, lat2, lon1, lon2 = func.cluster_area(lat, lon, threshold) #OBTAIN COORDINATE LIMITS IN ORDER TO CHOP DATAFRAME
cut = func.dataframe_chopper(data, lat1, lat2, lon1, lon2) #CHOP DATAFRAME TO GET PROPERTIES WITHIN AREA
st.map(cut)
area_dframe = st.checkbox("See selected area dataframe")
if area_dframe:
st.dataframe(cut)
##########################################################################################
### Cluster generator
##########################################################################################
clus_amount = st.slider("Price/m2 cluster amount", 0, 20, step=1, value=10)
data['Price cluster'] = pd.qcut(data['Price/m2'], clus_amount, labels=False) #Create cluster column
cut = func.dataframe_chopper(data, lat1, lat2, lon1, lon2) #Make cut dataframe again with added cluster column
cut.reset_index()
##########################################################################################
### Circle area
##########################################################################################
### Variable point_lat & point_lon will be used to calculate the properties that fall within the selected property radius
radius = st.slider("Final radius (Km):", 0.1, 10.0, step=0.1, value=10.0)
cut = func.distance_calculator(cut, lat, lon) #Calculate distance between points
cut = func.radius_chop(cut, radius) #Delete rows that fall outside radius
##########################################################################################
### Plotting
##########################################################################################
fig = plt.Figure(figsize=(15,15))
ax = fig.add_subplot(111)
scatter = ax.scatter(cut.longitude, cut.latitude, c=cut['Price cluster'])
ax.scatter(lon, lat, c="red", s=400 ,label="Label2")
ax.legend(*scatter.legend_elements(),loc="upper right", title="Price/m2 clusters (0<9)")
st.pyplot(fig)
col1, col2, col3 = st.columns(3)
col1.metric("Properties count", len(cut))
col2.metric("Avrg. Price", int(round(cut.Price.mean(), 0)))
col3.metric("Avrg. Surface", round(cut.Surface.mean(), 2))
col4, col5, col6 = st.columns(3)
col4.metric("Avrg. Rooms", round(cut.Rooms.mean(), 2))
col5.metric("Avrg. Bathrooms", round(cut.Bathrooms.mean(),2))
col6.metric("Avrg. Price/m2", round(cut['Price/m2'].mean(),2))
col1, col2, col3 = st.columns(3)
col1.metric("Cluster mean", round(cut['Price cluster'].mean(), 2))
col2.metric("Cluster median", cut['Price cluster'].median())
col3.metric("Cluster mode", cut['Price cluster'].mode())
numerics = eda.numerics_generator(cut)
barchart = st.checkbox("Show cluster bar chart")
if barchart:
st.bar_chart(cut[['Price cluster', 'Price']].groupby("Price cluster").count())
eda.plotter(cut, numerics)
##########################################################################################
### Final values selection and returning
##########################################################################################
st.subheader("Select final cluster:")
choice = st.radio("Pick final cluster value:", [round(cut['Price cluster'].mean(), 2),
cut['Price cluster'].median(), cut['Price cluster'].mode()[0], "Input custom cluster", "Do not use cluster" ], index=4)
if choice == "Input custom cluster":
cluster = st.number_input("Input cluster", 0, cut['Price cluster'].max())
elif choice == "Do not use cluster":
cluster = None
clus_amount = None
else:
cluster = choice
return cluster, clus_amount
def prediction_data(property_values, cluster, coordinates): #Shows data for prediction and gets "actual price" input
st.subheader("Prediction info")
col1, col2 = st.columns(2)
col1.text("Property type: " + property_values[0]
+ "\nRooms: " + str(property_values[3]) + "\nBathrooms: " + str(property_values[4]) +
"\nSurface: " + str(property_values[5]) + "\nAutonomous Community: " + property_values[2]
+ "\nProvince: " + property_values[1] + "\nTerrace: " + str(property_values[6]) + "\nPool: "
+ str(property_values[7]) + "\nAir Conditioner: " + str(property_values[8]) +
"\nCluster: " + str(cluster) + "\nLatitude: " + str(coordinates.lat[0]) +
"\nLongitude: " + str(coordinates.lon[0]))
actual_price = st.number_input("I know the actual price: ", 0, 1000000, value=0)
col2.text(func.pretty_house()) #shows pretty house :D
return actual_price
def predict(data, coordinates, values, cluster, clus_amount, actual_price):
##########################################################################################
# Preparing data from ML model
##########################################################################################
st.subheader("Result")
mldata = data[['Price', 'Bathrooms', 'Rooms',
'Surface', 'Pools', 'Air Conditioner', 'Type',
'Terrace', 'Autonomous Community', 'Province',
'Latitude', 'Longitude']]
if clus_amount is not None:
mldata['cluster'] = pd.qcut(data['Price/m2'], clus_amount, labels=False)
prediction_data_dict = {'Bathrooms': values[4], 'Rooms': values[3],
'Surface': values[5], 'Pools': int(values[7]), 'Air Conditioner': int(values[8]),
'Type': values[0], 'Terrace': int(values[6]), 'Autonomous Community': values[2],
'Province': values[1], 'Latitude': coordinates.lat[0], 'Longitude': coordinates.lon[0], 'cluster': cluster}
prediction_row = pd.Series(prediction_data_dict) #Create row
prediction_index = len(mldata) #Get new row index
X_raw = mldata.drop("Price", axis=1) #Non encoded X
y = mldata.Price
X_raw = X_raw.append(prediction_row, ignore_index=True) #We append prediction row to X
X = func.data_encoder(X_raw, cluster) #We encode X
prediction_row_encoded = X.loc[prediction_index] #We get our prediction row encoded
X.drop(labels=prediction_index, axis=0, inplace=True) #We remove the row to train model with prices
##########################################################################################
# Prediction
##########################################################################################
col1, col2 = st.columns(2)
prediction = func.predictor(X, y, prediction_row_encoded, prediction_index) #Train model and get prediction back
col1.subheader("Prediction: " + str(prediction))
if actual_price != 0:
error = prediction - actual_price
col2.subheader("Error: " + str(error))
if error < actual_price*0.1:
st.balloons()
#Machine learning info input process
def machine_learning(coordinates, property_values, data):
if coordinates is not None:
clusterCheck = st.checkbox("Use clustering")
if clusterCheck:
cluster, clus_amount = cluster_finder(coordinates, data)
actual_price = prediction_data(property_values, cluster, coordinates)
else:
cluster = None
clus_amount = None
actual_price = prediction_data(property_values, cluster, coordinates)
predictbutton = st.button("Predict")
if predictbutton:
predict(data, coordinates, property_values, cluster, clus_amount, actual_price)
| HAL9044/IronHack-s-Final-Project | files/ml.py | ml.py | py | 10,675 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.columns",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "streamlit.text_... |
41465744834 | from apis import magichome
from jarvis import helper
import functools
import logging as log
bulb = None
bulb_addr = None
def action( args ):
global bulb
if bulb == None:
bulb = _discover()
try:
if args[0] in _light_actions:
key = args[0]
_light_actions[key]( args )
log.info( f"Magichome light with: {key} {args}." )
except Exception as e:
log.info( f"Magichome actions exception: {e}" )
bulb = None
#######################
### light state ###
#######################
_light_states = {
"on": True,
"off": False,
"default": True,
}
def _light_set_state( args ):
new_state = _light_states[ "default" ]
if args[0] in _light_states:
new_state = _light_states[ args[0] ]
#bulb.on = new_state
magichome.Light(bulb_addr).on = new_state
#########################
### light colours ###
#########################
_light_colours = {
"red": (230, 1, 1),
"green": (1, 230, 1),
"blue": (1, 1, 230),
"warm": (254, 197, 41),
"default": (254, 197, 41),
}
def _light_set_colour( args ):
args.pop(0)
log.info( f"Args: {args}")
colour = _light_colours[ "default" ]
if args and args[0] in _light_colours:
colour = _light_colours[ args[0] ]
bulb.rgb = colour
log.info( f"Bulb set colour: {colour} = {_light_colours[colour]}" )
############################
### light brightness ###
############################
_brightness_bindings = {
1: 30,
100: 30,
2: 55,
3: 80,
4: 105,
5: 130,
6: 155,
7: 180,
8: 205,
9: 230,
10: 255
}
def _light_set_brightness( args ):
args.pop(0)
bulb.brightness = _get_brightness_lvl( args )
def _get_brightness_lvl( args ):
# default
lvl = 10
if args:
if helper.is_int( args[0] ) and int( args[0] ) in _brightness_bindings:
lvl = int( args[0] )
else:
conv = helper.word_to_num( args[0] )
if conv is not None and conv in _brightness_bindings:
lvl = conv
return _brightness_bindings[ lvl ]
#######################
### light modes ###
#######################
_mode_bindings = {
"red": magichome.RED_GRADUALLY,
"normal": magichome.NORMAL,
"crossfade": magichome.RAINBOW_CROSSFADE,
"default": magichome.NORMAL,
}
def _light_set_mode( args ):
args.pop(0)
mode = "default"
if args[0] in _mode_bindings:
mode = args[0]
bulb.mode = _mode_bindings[ mode ]
#############################
### all light actions ###
#############################
_light_actions = {
"on": _light_set_state,
"off": _light_set_state,
"colour": _light_set_colour,
"brightness": _light_set_brightness,
"mode": _light_set_mode,
}
###################
### helpers ###
###################
def _discover():
global bulb_addr
addrs = magichome.Discovery("192.168.4.255").discover()
if addrs:
log.info( f"Bulb discovered {addrs[0]}." )
bulb_addr = addrs[0]
return magichome.Light( addrs[0] )
else:
log.info( "Bulb could not be discovered." )
return None
| Dimfred/my_jarvis | jarvis_server/jarvis/actions/light.py | light.py | py | 3,096 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "apis.magichome.Light",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "apis.magichome",
"... |
17617853804 | import cv2
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import joblib
from tensorflow.keras.models import load_model
model = load_model('../models/my_model.h5')
opencv_dnn_model = cv2.dnn.readNetFromCaffe(prototxt="../models/deploy.prototxt.txt",
caffeModel="../models/res10_300x300_ssd_iter_140000_fp16.caffemodel")
index_to_emotion = joblib.load("../models/id_to_emotion.pkl")
emotions = list(index_to_emotion.values())
emoji_arrs = {emot:cv2.imread(f"../emojis/{emot}.png", cv2.IMREAD_UNCHANGED) for emot in emotions}
emoji_arrs = {k: cv2.resize(v, (50, 50)) for k,v in emoji_arrs.items()}
def detect_faces(image, min_confidence=0.5):
image_height, image_width, _ = image.shape
preprocessed_image = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(300, 300),
mean=(104.0, 117.0, 123.0), swapRB=False, crop=False)
opencv_dnn_model.setInput(preprocessed_image)
results = opencv_dnn_model.forward()
boxes = []
for face in results[0][0]:
face_confidence = face[2]
if face_confidence > min_confidence:
bbox = face[3:]
x1 = int(bbox[0] * image_width)
y1 = int(bbox[1] * image_height)
x2 = int(bbox[2] * image_width)
y2 = int(bbox[3] * image_height)
boxes.append((x1, y1, x2-x1, y2-y1))
return boxes
# resizing the image
def predict_face(img):
try:
image = cv2.resize(img, (48,48), interpolation=cv2.INTER_AREA)
except Exception as e:
print(str(e))
image = image/255.
img_fed = np.expand_dims(image, axis=0)
scores = model.predict(img_fed)
# print(scores)
index = np.argmax(scores)
return index_to_emotion[index]
class StatsStore:
def __init__(self, indict):
self.dic = indict
def update(self, emot):
self.dic[emot] += 1
def reset(self):
self.dic = {emot:0 for emot in list(self.dic.keys())}
def overlay_emoji(img, pt, emotion):
x, y = pt
emoji_img = emoji_arrs[emotion]
tp_mask = emoji_img[:, :, 3] != 255
emoji_img[tp_mask, :3] = 0
img[y:y+emoji_img.shape[0], x:x+emoji_img.shape[1], :] = emoji_img[:, :, :3]
return img | Core9nvidia/behavioural-assessment | code/utils.py | utils.py | py | 2,321 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 11,
"usage_type": "call"
},
{
"api_nam... |
72065337954 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 14:31:00 2019
@author: andrew
"""
import numpy as np
from pyDOE import lhs as LHS
import subprocess
import os
import time
from scipy.special import erf as ERF
#import sys
from os.path import expanduser
import IscaOpt
# home directory
home = expanduser("~")
from hydro_plane import Ellipse
from multiprocessing import Process, Queue, current_process, freeze_support
try:
from data import support
except:
from .data import support
#from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
current = os.getcwd()
from scipy.spatial.distance import euclidean
import shutil
# APR added
try:
from data.SnappyHexOptimise import BasicHeatExchangerRun
except:
from .data.SnappyHexOptimise import BasicHeatExchangerRun
try:
from interfaces import EllipseInterface
except:
from .interfaces import EllipseInterface
try:
from base_class import Problem
except:
from .base_class import Problem
try:
from data import support
except:
from .data import support
#############
# Environment
#############
# On ISCA, there can be batches of up to the maximum number of samples
#environment = 'isambard'
#environment = 'isambard_test'
#environment = 'isca'
#environment = 'isca_test'
#environment = 'isca_forrester'
environment = 'local_forrester'
no_of_nodes = 4
# Locally, there can only be three "nodes" as each one has 2 CPUS, and I only have 8 CPUs
#environment = 'local'
#no_of_nodes = 3
##########
# Sampling
##########
#sampling = 'manual'
#sampling = 'latin'
sampling = 'latin_forrester'
##########################
# Selection of bash script
##########################
# this bash script is for the OpenFOAM run
if environment == 'isca':
bash = 'run_script_isca_parallel.sh'
elif environment == 'isca_test':
# initial sampling does not need to use the optimiser!!!!
bash = 'isca_initial_sampling_parallel_test.sh'
elif environment == 'isambard':
# initial sampling does not need to use the optimiser!!!!
bash = 'isambard_initial_sampling_parallel.sh'
elif environment == 'isambard_test':
# initial sampling does not need to use the optimiser!!!!
bash = 'isambard_initial_sampling_parallel_test.sh'
elif environment == 'isca_forrester':
bash = 'isca_forrester_initial_sampling_parallel.sh'
else:
bash = 'local_forrester_initial_sampling_parallel.sh'
################
# Create failure
################
failure = True
# HeadCell object:
class HeadCell(Problem, EllipseInterface):
def __init__(self, settings):
self.source_case = settings.get('source_case', './data/HeadCell/source/')
self.case_path = settings.get('case_path', './data/HeadCell/case_local/')
self.mesh_path = settings.get('mesh_path', './data/HeadCell/meshes/')
self.setup()
def setup(self, verbose=False):
"""
Just sets values
"""
self.L = 4.0
self.A = 45.0
self.R = 1.0
self.xlb, self.xub = -self.L, self.L
self.zlb, self.zub = -self.L, self.L
self.anglelb, self.angleub = 0, self.A
self.majorlb, self.majorub = 4.0*self.R, 8.0*self.R
self.minorlb, self.minorub = 4.0*self.R, 8.0*self.R
EllipseInterface.__init__(self, self.xlb, self.xub, self.zlb, self.zub, \
self.anglelb, self.angleub, self.majorlb, self.majorub, \
self.minorlb, self.minorub)
def info(self):
raise NotImplementedError
def get_configurable_settings(self):
raise NotImplementedError
def run(self, shape, verbose=False):
xp, yp, rp = shape
support.circle_to_stl(rp, xp, yp, \
file_directory=self.case_path+self.stl_dir, file_name=self.stl_file_name, draw=False)
t, p = self.problem.cost_function(sense="multi", verbose=verbose)
return t, p
def evaluate(self, decision_vector, verbose=False):
if not self.constraint(decision_vector):
raise ValueError('Constraint violated. Please supply a feasible decision vector.')
shape = self.convert_decision_to_shape(decision_vector)
try:
return self.run(shape, verbose)
except Exception as e:
print('Solution evaluation failed.')
print(e)
def lhs_initial_samples(n_dim, ub, lb, n_samples=4, cfunc=None, cargs=(), ckwargs={}):
"""
Generate Latin hypercube samples from the decision space using pyDOE.
Parameters.
-----------
n_samples (int): the number of samples to take.
cfunc (method): a cheap constraint function.
cargs (tuple): arguments for cheap constraint function.
ckawargs (dictionary): keyword arguments for cheap constraint function.
Returns a set of decision vectors.
"""
seed = 1234
np.random.seed(seed)
samples = LHS(n_dim, samples=n_samples)
scaled_samples = ((ub - lb) * samples) + lb
if cfunc is not None: # check for constraints
print('Checking for constraints.')
scaled_samples = np.array([i for i in scaled_samples if cfunc(i, *cargs, **ckwargs)])
return scaled_samples
# constaints:
def gap_and_checkMesh_constraint(x, layout):
"""
Create meshes and check the constraints using the decision vector and layout.
Parameters.
-----------
x (numpy array): the decision vector.
layout (Ellipse object): the object for the generation of the pointwise files.
Returns whether the constraint was successfully passed.
"""
# get the current directory
current = os.getcwd()
# defaults for success:
minimum_gap_success = True
checkMesh_success = False
success = False
# checkMesh:
utility3 = "checkMesh"
# platform specific changes (not used by Isambard):
if environment == 'blades':
Pointwise_path='/usr/local/Pointwise/PointwiseV18.0R2/pointwise'
elif environment == 'local':
Pointwise_path='/home/andrew/Pointwise/PointwiseV18.0R2/pointwise'
elif ((environment == 'isca') or (environment == 'isca_test')):
Pointwise_path = '/gpfs/ts0/home/apr207/Pointwise/PointwiseV18.0R2/pointwise'
else:
#local is default
Pointwise_path='/home/andrew/Pointwise/PointwiseV18.0R2/pointwise'
# paths:
mesh_path = "/data/HeadCell/meshes/"
source_path = "/data/HeadCell/source/"
# the neccessary name for the child directory:
dir_name = "_"
for j in range(len(x)):
dir_name += "{0:.4f}_".format(x[j])
dir_name = dir_name.replace("[","")
dir_name = dir_name.replace("]","")
# make the child directory:
subprocess.call(['mkdir', '-p', current + mesh_path + dir_name + '/system' ])
subprocess.call(['mkdir', '-p', current + mesh_path + dir_name + '/constant/polyMesh' ])
# location of centre of ellipse:
bottom_x_centre = x[0]
bottom_z_centre = x[1]
# rotation angle:
rot_angle = x[2]*(np.pi/180.0)
# ellipse sizes:
a = x[3]
b = x[4]
# create space for corners and gaps:
corners = np.zeros((4,2))
gaps = np.zeros(4)
# the bottom of the tray:
y_bottom = -14.9375
# very top of tray:
p1 = np.array([0, 6.0625, 0])
# very bottom of tray:
p2 = np.array([bottom_x_centre, y_bottom, bottom_z_centre])
# the radii at the first tray layer:
a_top = layout.get_ellipse_radii(21.25, a, -2.34375)
b_top = layout.get_ellipse_radii(21.25, b, -2.34375)
# the points at the first tray layer:
c_top = layout.get_ellipse_points(p1, p2, -2.34375)
# the angle at the first tray layer:
angle_top = layout.get_ellipse_angles(rot_angle, -2.34375)
# obtain the four corners:
for i in range(1,5):
angles = np.linspace(start=0.5*i*np.pi, stop=0.5*(i-1)*np.pi, num=1, endpoint=True)
corners[i-1,:] = layout.generate_ellipse(c_top, a_top, b_top, angles, theta=angle_top+np.pi, n=0)
# compute horizontal gaps:
for j in range(0,4):
gaps[j] = 21.25 - np.sqrt((corners[j,0])**2 + (corners[j,1])**2)
# minimim horizontal gap:
min_gap=min(gaps)
# get the index of the minimum horizontal gap and the maximum x-extent:
index = np.where(gaps == min_gap)
x_extent_vector = abs(corners[index])
x_extent= np.amax(x_extent_vector)
# Definition of the x-y plane locations
x1 = 21.25
x2 = x_extent
x3 = x_extent
x4 = 21.25
y1 = -2.9375
y2 = -2.34375
y3 = -6.59375
y4 = 1.3125
# Hero's formula for the normal gap
A = y2 - y3
B = np.sqrt((y4 - y2)**2 + (x4 - x2)**2)
P = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
S = (A+B+P)/2
Area = np.sqrt(S*(S-A)*(S-B)*(S-P))
H = 2*Area/B
# constraints:
if (H < 2.0):
# is the centre in the right place?:
minimum_gap_success = False
else:
# copy controlDict and fvSchemes ssfor checkMesh
shutil.copyfile(current + source_path + 'system/controlDict', current + mesh_path + dir_name + '/system/controlDict')
shutil.copyfile(current + source_path + 'system/fvSchemes', current + mesh_path + dir_name + '/system/fvSchemes')
shutil.copyfile(current + source_path + 'system/fvSolution', current + mesh_path + dir_name + '/system/fvSolution')
# copy all mesh files to mesh path
for filename in os.listdir(current + source_path):
full_file_name = os.path.join(current + source_path, filename)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, current + mesh_path + dir_name)
# update Pointwise files using decision vector (needs checking)
layout.update(x)
# decide where we are to run Pointwise:
if ((environment == 'isambard') or (environment == 'isambard_test')):
# copy pointwise_script.sh to submit script
shutil.copyfile(current + '/pointwise_script.sh', current + mesh_path + dir_name + '/pointwise_script.sh')
# write the location of the current directory to a shell script - used by ssh and qsub
with open(home + "/pointwise_directory.sh", "a") as myfile:
myfile.write('export POINTWISE_DIRECTORY=' + '"'+ current + mesh_path + dir_name + '"' + '\n')
print("checkMesh_constraint(): written current directory to a file")
# name of the script to submit pointwise with
bash_pointwise = 'pointwise_script.sh'
# change bash_pointwise in order to run from current directory
with open(current + mesh_path + dir_name + '/' + bash_pointwise, 'r') as f:
data = f.readlines()
# read in each line and change the directory location
for line in range(len(data)):
if '#PBS -o' in data[line]:
data[line] = '#PBS -o 10.141.0.1:' + current + mesh_path + dir_name + '/log.pointwise' + '\n'
if '#PBS -e' in data[line]:
data[line] = '#PBS -e 10.141.0.1:' + current + mesh_path + dir_name + '/err.pointwise' + '\n'
# write the changes to bash_pointwise
with open(current + mesh_path + dir_name + '/' + bash_pointwise, 'w') as f:
f.writelines(data)
# use ssh to send the job to phase 1
subprocess.call(['ssh','ex-aroberts@login-01.gw4.metoffice.gov.uk', 'source $HOME/pointwise_directory.sh; export PBS_HOME=/cm/shared/apps/pbspro/var/spool; export PBS_EXEC=/cm/shared/apps/pbspro/19.2.4.20190830141245; /cm/shared/apps/pbspro/19.2.4.20190830141245/bin/qsub $POINTWISE_DIRECTORY/pointwise_script.sh;'], cwd=current)
# check if the log files exist
while not (os.path.exists(current + mesh_path + dir_name + '/' + 'log.pointwise') and os.path.exists(current + mesh_path + dir_name + '/' + 'err.pointwise')):
time.sleep(1)
print("checkMesh_constraint(): pointwise completed")
# remove the shell script from the home directory
subprocess.call(['rm', home + '/' + 'pointwise_directory.sh'])
else:
# run pointwise on isca or locally using Pointwise_path:
subprocess.call([Pointwise_path, '-b', current + mesh_path + dir_name + '/Hydro_V18_3_tray_APR_grit_pot_parameterised_ellipse_correction.glf'], cwd=current, \
stdout = open(current + mesh_path + dir_name + '/log.pointwise', 'w'), \
stderr = open(current + mesh_path + dir_name + '/err.pointwise', 'w'))
# run checkmesh:
subprocess.call([utility3, '-case', current + mesh_path + dir_name], cwd=current, \
stdout = open(current + mesh_path + dir_name + '/log.checkMesh', 'w'), \
stderr = open(current + mesh_path + dir_name + '/err.checkMesh', 'w'))
# check if the mesh was successful and assign boolean if succeeded
with open(current + mesh_path + dir_name + "/log.checkMesh") as f:
if 'Mesh OK.' in f.read():
checkMesh_success = True
# Convert boolean into integer
if minimum_gap_success == True:
g_success = 1
else:
g_success = 0
# Convert boolean into integer
if checkMesh_success == True:
c_success = 1
else:
c_success = 0
# Load the constraints file
file_constraints = 'constraints.npz'
data = np.load(current + '/' + file_constraints)
# Load and set the decision vector
X = data['arr_0']
X_current = x
if X.size != 0:
X_new = np.vstack((X, X_current))
print('appended ', X_current, ' to ', X)
print('this gives ', X_new)
else:
X_new = X_current
print('set ', X_new, ' to ', X_current)
# Load and set gap constaint success
G = data['arr_1']
G_current = g_success
if G.size != 0:
G_new = np.vstack((G, G_current))
print('appended ', G_current, ' to ', G)
print('this gives ', G_new)
else:
G_new = G_current
print('set ', G_new, ' to ', G_current)
# Load and set checkMesh success
C = data['arr_2']
C_current = c_success
if C.size != 0:
C_new = np.vstack((C, C_current))
print('appended ', C_current, ' to ', C)
print('this gives ', C_new)
else:
C_new = C_current
print('set ', C_new, ' to ', C_current)
# Save the decision vector and checkMesh to a file:
try:
np.savez(current + '/' + file_constraints, X_new, G_new, C_new)
print('Data saved in file: ', file_constraints, ' X ', X_new, ' gap constraint ', G_new, ' checkMesh ', C_new)
except Exception as e:
print(e)
print('Data saving failed.')
# try:
# np.savez(current + '/' + file_C, initial_X, initial_C)
# print('Data saved in file: ', file_C)
# except Exception as e:
# print(e)
# print('Data saving failed.')
print('gap_and_checkMesh_constraint(x): minimum_gap_success passed? ', str(minimum_gap_success), ' as H = ', H)
print('gap_and_checkMesh_constraint(x): checkMesh_success passed? ', str(checkMesh_success))
success = checkMesh_success and minimum_gap_success
# if either it failed the centre location check or the checkMesh, delete the mesh:
if(checkMesh_success == False):
# remove mesh if there is a mesh failure
subprocess.call(['rm', '-r', current + mesh_path + dir_name + '/'])
print('centre_constraint(): mesh deleted')
return success
#
# NOT USED FOR INITIAL ISAMBARD TEST:
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# NOT USED FOR INITIAL ISAMBARD TEST:
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# NOT USED FOR INITIAL ISAMBARD TEST:
#
def mul(d):
if ((environment == 'isca') or (environment == 'isca_test') or (environment == 'isca_forrester')):
start_msub = time.time()
subprocess.call(['msub', '-K', d + '/' + bash], cwd=d, \
stdout = open(d + '/log.subprocess', 'w'), \
stderr = open(d + '/err.subprocess', 'w'))
# submit job using qsub if on isambard:
elif ((environment == 'isambard') or (environment == 'isambard_test')):
start_msub = time.time()
subprocess.call(['qsub', '-W', 'block=true', d + '/' + bash], cwd=d, \
stdout = open(d + '/log.subprocess', 'w'), \
stderr = open(d + '/err.subprocess', 'w'))
else:
start_msub = time.time()
subprocess.call(['sh', d + '/' + bash], cwd=d, \
stdout = open(d + '/log.subprocess', 'w'), \
stderr = open(d + '/err.subprocess', 'w'))
return "Done time," + str(time.time() - start_msub)
#
# NOT USED FOR INITIAL ISAMBARD TEST:
#
def queue():
filedirs = initialisation()
task_list = [(mul, (d,)) for d in filedirs]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in task_list:
task_queue.put(task)
# Start worker processes
for i in range(no_of_nodes):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Unordered results:')
for i in range(len(task_list)):
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(no_of_nodes):
task_queue.put('STOP')
def no_queue():
"""
Submit the job to the relevant queue without a python queue.
Parameters.
-----------
There are no parameters.
Does not return anything.
"""
filedirs = initialisation()
# submit job using moab if on isca:
if ((environment == 'isca') or (environment == 'isca_test') or (environment == 'isca_forrester')):
for d in range(len(filedirs)):
subprocess.Popen(['msub', filedirs[d] + '/' + bash], cwd=filedirs[d], \
stdout = open(filedirs[d] + '/log.subprocess', 'w'), \
stderr = open(filedirs[d] + '/err.subprocess', 'w'))
# submit job using qsub if on isambard:
elif ((environment == 'isambard') or (environment == 'isambard_test')):
for d in range(len(filedirs)):
subprocess.Popen(['qsub', filedirs[d] + '/' + bash], cwd=filedirs[d], \
stdout = open(filedirs[d] + '/log.subprocess', 'w'), \
stderr = open(filedirs[d] + '/err.subprocess', 'w'))
# submit job as a shell script if on local pc:
else:
for d in range(len(filedirs)):
subprocess.Popen(['sh', filedirs[d] + '/' + bash], cwd=filedirs[d], \
stdout = open(filedirs[d] + '/log.subprocess', 'w'), \
stderr = open(filedirs[d] + '/err.subprocess', 'w'))
def initialisation():
"""
Samples the design space,
Writes the decision vector to a file
Creates an empty initial samples file
Copies and edits the qsub or msub submission script.
Parameters.
-----------
There are no parameters.
Returns a list of directories for submission.
"""
if(sampling == 'latin_forrester'):
# number of dimensions and samples
n_dim = 1
#n_samples = 11*n_dim -1
n_samples = 5
ub = 1.4
lb = 0.3
print("initialisation(): number of Latin Hypercube samples", str(n_samples))
samples = lhs_initial_samples(n_dim, ub, lb, n_samples, cfunc=None, cargs=(), ckwargs={})
samples = np.array([np.array([0.5501084598698482]), np.array([0.6004338394793927]), np.array([0.9509761388286335]), np.array([1.2008676789587853]), np.array([1.3015184381778744])])
case_path = "/data/Forrester/case_local/"
source_file = 'run_case.py'
# create filedirs
filedirs = []
# loop through the list to create directories:
for s in samples:
# create a working directory from the sample:
dir_name = "_"
for j in range(len(s)):
dir_name += "{0:.4f}_".format(s[j])
# replace any directories containing []
dir_name = dir_name.replace("[","")
dir_name = dir_name.replace("]","")
# add the name to a list of directories
filedirs.append(current + case_path + dir_name)
# create the directory from the last in the list and
subprocess.call(['mkdir', filedirs[-1] + '/'])
# copy run_case.py to that directory
subprocess.call(['cp', '-r', current + '/' + source_file, filedirs[-1] + '/'])
# write the decision vector to a file
with open(filedirs[-1] + "/decision_vector.txt", "a") as myfile:
for i in range(0,len(s)):
myfile.write(str(s[i])+ '\n')
print("initialisation(): written decision vector to a file")
# read decision vector and write to npz file
print('Writing empty initial_samples.npz file...')
# hyper volume improvement is null
hpv = []
initial_time = 0
initial_X = []
initial_Y = []
# the name of the sim_file is initial_samples.npz
sim_file = 'initial_samples.npz'
# remove npz file if it exists
subprocess.call(['rm', '-r', current + '/' + sim_file])
# initial_X is decision vector
# initial_Y is 2 objectives
# hpv is hypervolume improvement
# initial_time is zero
# initial_convergence is convergence failure
try:
np.savez(current + '/' + sim_file, initial_X, initial_Y, hpv, initial_time)
print('Data saved in file: ', sim_file)
except Exception as e:
print(e)
print('Data saving failed.')
else:
# set the location of the case path
case_path = "/data/HeadCell/case_local/"
source_path = "/data/HeadCell/source/."
source_file = 'run_case.py'
# remove the current case path and create a new case path
subprocess.call(['rm', '-r', current + case_path])
subprocess.call(['mkdir','-p', current + case_path])
# get the lower and upper bounds
lb, ub = prob.get_decision_boundary()
print("lb, ub", lb, ub)
# number of dimensions and samples
n_dim = 5
n_samples = 11*n_dim -1
# read decision vector and write to checkMesh.npz file
print('Writing empty constraints.npz file...')
# gap and checkMesh is null
initial_X = []
initial_H = []
initial_C = []
file_constraints = 'constraints.npz'
subprocess.call(['rm', '-r', current + '/' + file_constraints])
# initial_X is decision vector
# initial_H is gap constraint
# initial_C is checkMesh
try:
np.savez(current + '/' + file_constraints, initial_X, initial_H, initial_C)
print('Data saved in file: ', file_constraints)
except Exception as e:
print(e)
print('Data saving failed.')
# take samples for the decision vector, either manual or latin hypercube
if sampling == 'manual':
print("initialisation(): number of manual samples", str(9))
# 3 deliberate passes:
one = np.array([0.5, 0.5, 10.0, 4.0, 4.0]) # 5,145,018 cells, 906 severly orthoginal faces, passes checkMesh Should result in H=3.02 inches
two = np.array([1.0, 1.0, 20.0, 4.0, 5.0]) # 5,095,425 cells, 1087 severly orthoginal faces, passes checkMesh Should result in H=2.85 inches
three = np.array([1.5, 1.5, 30.0, 4.0, 6.0]) # 4,993,048 cells, 1331 severly orthoginal faces, passes checkMesh Should result in H=2.67 inches
# 3 deliberate failures
four = np.array([4.0, 4.0, 10.0, 8.0, 6.0]) # Should result in H=1.51 inches
five = np.array([4.0, -4.0, 30.0, 8.0, 4.0]) # Should result in H=1.62 inches
six = np.array([4.0, -4.0, 45.0, 8.0, 4.0]) # Should result in H=1.76 inches
# Mix of success and failure
seven = np.array([1.0, 1.0, 10.0, 4.0, 5.0]) # Should result in H=2.83 inches
eight = np.array([1.5, 1.5, 10.0, 4.0, 6.0]) # Should result in H=2.61 inches
nine = np.array([1.0, 1.0, 30.0, 4.0, 5.0]) # Should result in H=2.88 inches
all_samples = np.array([one, two, three, four, five, six, seven, eight, nine])
samples = []
for x in all_samples:
constraint_success = gap_and_checkMesh_constraint(x, layout)
if constraint_success == True:
samples.append(x)
else:
print("initialisation(): number of Latin Hypercube samples", str(n_samples))
samples = lhs_initial_samples(n_dim, ub, lb, n_samples, cfunc=gap_and_checkMesh_constraint, cargs=(layout,), ckwargs={})
# create filedirs
filedirs = []
# loop through the list to create directories:
for s in samples:
# create a working directory from the sample:
dir_name = "_"
for j in range(len(s)):
dir_name += "{0:.4f}_".format(s[j])
# replace any directories containing []
dir_name = dir_name.replace("[","")
dir_name = dir_name.replace("]","")
# add the name to a list of directories
filedirs.append(current + case_path + dir_name)
# create the directory from the last in the list and
subprocess.call(['mkdir', filedirs[-1] + '/'])
# copy all source files into the newly created directory
subprocess.call(['cp', '-r', current + source_path, filedirs[-1]])
# copy run_case.py to that directory
subprocess.call(['cp', '-r', current + '/' + source_file, filedirs[-1] + '/'])
# write the decision vector to a file
with open(filedirs[-1] + "/decision_vector.txt", "a") as myfile:
for i in range(0,len(s)):
myfile.write(str(s[i])+ '\n')
print("initialisation(): written decision vector to a file")
# read decision vector and write to npz file
print('Writing empty initial_samples.npz file...')
# hyper volume improvement is null
hpv = []
initial_time = 0
initial_X = []
initial_Y = []
initial_convergence = []
# the name of the sim_file is initial_samples.npz
sim_file = 'initial_samples.npz'
# remove npz file if it exists
subprocess.call(['rm', '-r', current + '/' + sim_file])
# initial_X is decision vector
# initial_Y is 2 objectives
# hpv is hypervolume improvement
# initial_time is zero
# initial_convergence is convergence failure
try:
np.savez(current + '/' + sim_file, initial_X, initial_Y, hpv, initial_time, initial_convergence)
print('Data saved in file: ', sim_file)
except Exception as e:
print(e)
print('Data saving failed.')
# decide environment and adjust bash script for running.
if ((environment == 'isca') or (environment == 'isca_test')):
# if the environment is isca
for d in range(len(filedirs)):
# copy the bash run script to the run directory
subprocess.call(['cp', current + '/' + bash, filedirs[d] + '/'])
# open the bash script for running
with open(filedirs[d] + '/' + bash, 'r') as f:
data = f.readlines()
# change the line to the correct directory
for line in range(len(data)):
if '#PBS -d' in data[line]:
data[line] = '#PBS -d '+ filedirs[d]+'/' + '\n'
#write the lines to the file
with open(filedirs[d] + '/' + bash, 'w') as f:
f.writelines(data)
elif ((environment == 'isambard') or (environment == 'isambard_test')):
# if the environment is isambard
for d in range(len(filedirs)):
# copy the bash run script to the run directory
subprocess.call(['cp', current + '/' + bash, filedirs[d] + '/'])
# open the bash script for running
with open(filedirs[d] + '/' + bash, 'r') as f:
data = f.readlines()
# change the line to the correct directory
for line in range(len(data)):
if '#PBS -d' in data[line]:
data[line] = '#PBS -d '+ filedirs[d]+'/' + '\n'
#write the lines to the file
with open(filedirs[d] + '/' + bash, 'w') as f:
f.writelines(data)
else:
# if the environment is local:
for d in range(len(filedirs)):
# copy the bash script to the local directory
subprocess.call(['cp', current + '/' + bash, filedirs[d] + '/'])
print('Total number of simulations, ', len(filedirs))
print('All directories created.')
return filedirs
def transform_sequence(n, lb=0, ub=185, scale=0.01, lw=0.25, uw=0.75):
np = (n/ub * (ub -lb)) - ((ub -lb)/2)
nub = (ub -lb)/2
nlb = -(ub-lb)/2
w = (ERF(scale*np) - ERF(scale*nlb))/(ERF(scale*nub) - ERF(scale*nlb))
w = (w * (uw - lw)) + lw
return w
def expected_improvement_weighting(self, x, obj_sense=1, lb=None, ub=None, weight=None, n=None,\
cfunc=None, cargs=(), ckwargs={}):
"""Calculate the expected improvement at a given set of input parameters,
based on the trained model. See the following for details.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.139.9315&rep=rep1&type=pdf (maximisation)
http://www.schonlau.net/publication/_96jsm_global_optimization.pdf (minimisation)
Parameters.
-----------
x: a set of input parameters.
obj_sense: whether to maximise or minimise. Key for the input:
1: maximise
-1: minimise (default)
Returns the expected improvement value at x.
"""
#print(x, x.shape)
if len(x.shape) < 2:
x = np.array(x)
x = x[:, np.newaxis].T
if lb is not None:
#print(x, lb, ub)
xp = np.clip(x.copy(), lb, ub)
rows = x.shape[0]
e = np.ones(rows)
e_inds = [i for i in range(xp.shape[0]) \
if np.any(np.all(np.abs(xp[i] - self.X) <= 1e-9, axis=1))]
if len(e_inds) == rows:
return e[:, np.newaxis].T
c_inds = []
if cfunc is not None:
#print('==========')
#print('xp: ', xp)
if xp.shape[0] == 1:
#print(cfunc(None, xp[0], *cargs, **ckwargs))
if not cfunc(xp[0], *cargs, **ckwargs):
#print('Infeasible solution.')
return np.zeros((1,1))
else:
#print('------')
#for i in xp:
# print(cfunc(None, i, *cargs, *ckwargs))
#c_inds = np.array([i for i in range(xp.shape[0]) if not cfunc(xp[i], *cargs, **ckwargs)])
xp = np.array([i for i in xp if cfunc(i, *cargs, **ckwargs)])
y, std_dev = self.predict(xp)
epsilon = 0
if n is not None:
weight = transform_sequence(n)
f_best = obj_sense * np.max(obj_sense * self.Y) + (obj_sense * epsilon)
u = obj_sense * (y - f_best) / std_dev
# normal cumulative distribution function
PHI = (0.5 * ERF(u/np.sqrt(2.0)))+0.5
# normal density function
phi = 1/np.sqrt(2.0*np.pi)*np.exp(-u**2/2.0)
if weight is None:
a = std_dev * ((u * PHI) + (phi))
else:
a = std_dev * ((weight * u * PHI) + ((1 - weight) * phi))
a[e_inds] = 0
#a[c_inds] = 0
#print(a.shape)
return a
def expected_improvement(self, x, obj_sense=1, lb=None, ub=None,\
cfunc=None, cargs=(), ckwargs={}):
"""
Calculate the expected improvement at a given set of decision vectors,
based on the trained model. See the paper for details.
Parameters.
-----------
x (np.array): a set of decision vectors.
obj_sense (int): whether to maximise or minimise. Key for the input:
1: maximise
-1: minimise (default)
lb (np.array): lower bound for the decision space.
ub (np.array): upper cound for the decision space.
cfunc (function): cheap constraint fucntion.
cargs (tuple): arguments for the constraint function.
ckwargs (dict): keyword arguments for the constraint function.
Returns the expected improvement values at x.
"""
if len(x.shape) < 2:
x = np.array(x)
x = x[:, np.newaxis].T
if lb is not None:
xp = np.clip(x.copy(), lb, ub)
rows = x.shape[0]
b_inds = [i for i in range(rows) if not np.all(xp[i]==x[i])]
if len(b_inds) == rows: # outside boundary = zero exp. imp.
return np.zeros((rows, 1))
e_inds = [i for i in range(x.shape[0]) \
if np.any(np.all(np.abs(x[i] - self.X) <= 1e-9, axis=1))]
if len(e_inds) == rows: # violates xtol = zero exp. imp.
return np.zeros((rows, 1))
if cfunc is not None:
c_inds = []
if xp.shape[0] == 1:
if not cfunc(xp[0], *cargs, **ckwargs):
return np.zeros((1,1))
else:
c_inds = [i for i in range(xp.shape[0]) if not cfunc(xp[i], *cargs, **ckwargs)]
if len(c_inds) == rows:
return np.zeros((rows, 1))
y, std_dev = self.predict(xp)
f_best = obj_sense * np.max(obj_sense * self.Y)
u = obj_sense * (y - f_best) / std_dev
sinds = [i for i in range(std_dev.shape[0]) if std_dev[i] == 0] # to get rid of NaN issues
u[sinds] = 1e30
# normal cumulative distribution function
PHI = (0.5 * ERF(u/np.sqrt(2.0)))+0.5
# normal density function
phi = 1/np.sqrt(2.0*np.pi)*np.exp(-u**2/2.0)
ei = std_dev * ((u * PHI) + (phi))
ei[sinds] = 0
if lb is not None:
ei[e_inds] = 0
ei[b_inds] = 0
if cfunc is not None:
ei[c_inds] = 0
return ei
def Forrester_BO_Function(x):
# if (x > 1.3999):
# return 100000
# elif (x < 0.3001):
# return 100000
# elif ((x > 0.76) and (x < 0.77)):
# return 100000
# else:
return (6*x - 2)**2 * np.sin(12*x - 4)
if __name__ == '__main__':
current = os.getcwd()
if (sampling == 'latin_forrester'):
start_sim = time.time()
print('Forrester run.' + ' in ' + current)
subprocess.call(['rm', '-r', current + '/data/Forrester/case_local/'])
subprocess.call(['mkdir', '-p', current + '/data/Forrester/case_local/'])
print("main: removed old case, copied new case")
else:
start_sim = time.time()
print('Demo run for Hydro case.')
seed = 1005
np.random.seed(seed)
prob = HeadCell({})
# sets values and removes olds case directory
subprocess.call(['rm', '-r', prob.case_path])
subprocess.call(['rm', '-r', prob.mesh_path])
subprocess.call(['cp', '-r', prob.source_case, prob.case_path])
print("main: removed old case, copied new case")
subprocess.call(['mkdir', prob.mesh_path])
# get upper and lower bounds:
lb, ub = prob.get_decision_boundary()
print("lb", lb)
print("ub", ub)
sim_id = 20191002
init_file_name = None
n_samples = 10 # (11n - 1)
budget = 90 # (11n - 1) + (2/3)(11n-1)
layout = Ellipse(lb, ub)
#initialisation()
#no_queue()
freeze_support()
queue()
print("Number of simultaneous runs, ", no_of_nodes)
print("Time taken (seconds), ", (time.time()-start_sim))
# Serial optimiser
start = time.time()
n_obj = 1
n_dim = 1
n_samples = 5
budget = n_samples + 3
lb = 0.3
ub = 1.4
res = IscaOpt.Optimiser.EMO(Forrester_BO_Function, \
settings={'n_dim':n_dim, \
'obj_sense':[-1]*n_obj, \
'n_obj':n_obj, \
'method_name':'EGO', \
'lb':np.array([lb]), \
'ub':np.array([ub]), \
'n_samples':n_samples, \
'budget':budget, \
'visualise':True, \
'multisurrogate':False, \
'init_file':current + '/initial_samples.npz'})
print('Objective function value:', res)
print('Optimiser Time taken:', time.time()-start, ' seconds.')
| en9apr/M_penalised_funtions | forrester_BO_search.py | forrester_BO_search.py | py | 38,867 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.expanduser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "base_class.Problem",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "interfaces.EllipseIn... |
31284936895 | import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import json
import pandas as pd
from urllib.request import Request, urlopen
from urllib.error import URLError
import json
from pathlib import Path
import socket
from collections import namedtuple
#########################################################
#class
class Sender(object):
def __init__(self,
name,
host='192.168.1.214',
port=8745,
encoding="cp1251"):
self.host = host
self.port = port
self.encoding = encoding
self.name = name
self.ip = socket.gethostbyname(socket.gethostname())
self.project_id = self.__get_project_id()
self.group = name
def __sock(self, route, message=None):
url = f"http://{self.host}:{self.port}/{route}"
request = Request(url)
request.add_header('Content-Type', 'application/json; charset=utf-8')
if message is None:
message = json.dumps(self.name).encode(self.encoding)
return urlopen(request, message)
def __get_project_id(self):
with self.__sock('get_project_id') as s:
return json.loads(s.read())
def log(self, key, value):
_log = json.dumps((self.ip,
self.project_id,
self.name,
self.group,
key,
dump2json(value))).encode(self.encoding)
with self.__sock('log', _log) as s:
return json.loads(s.read())
def update_group(self, group):
self.group = dump2json(group) # general.
def list_logs(self):
LOG = namedtuple('log', 'date ip project_id group process_name key value')
with self.__sock('get_all_logs') as s:
return [LOG(*log) for log in json.loads(s.read())]
#####################################################################
#functions
def get_data_mock(file):
with open(file, 'r') as data:
data = data.read()
logs = json.loads(data)
return logs
def get_data():
s = Sender('Test', '192.168.1.214')
logs = s.list_logs()
return logs
def __get_df():
#logs = get_data_mock('logs.json')
logs = get_data()
df = pd.DataFrame(columns=['id',
'name',
'sample',
'aquired',
'proteins',
'peptides',
'queries',
'hits',
'type',
'analyzed',
'msResolution',
'ChromFWHM_Min'])
lenghth = len(logs)
i = 0
f1 = False
f2 = False
while i < lenghth:
for x in logs[i]:
if x == "create_params_file:output":
stats = logs[i][6]
stats = stats.split(',')
aquired = stats[3].split('"')
aquired = aquired[3]
msResolution = stats[10].split(':')
msResolution = float(msResolution[2])
ChromFWHM_Min = stats[7].split(':')
ChromFWHM_Min = float(ChromFWHM_Min[2])
f1 = True
if x == "get_search_stats:output":
#date
analyzed = logs[i][0].split('.', 2)
analyzed = analyzed[0]
#experiment number
id = logs[i][2]
type = logs[i][3]
#peptides_cnt + proteins_cnt + hits_cnt + queries_cnt
search_stats = logs[i][6]
stats = search_stats.split(',', 7)
peptides = stats[5].split(':', 2)
peptides = int(peptides[1])
proteins = stats[6].split(':', 2)
proteins = proteins[1].split('}',2)
proteins = int(proteins[0])
queries = stats[3].split(':', 2)
queries = int(queries[1])
hits = stats[4].split(':', 2)
hits = int(hits[1])
#acquired_name + sample_description
name = stats[1].split(':', 2)
name = name[1].split('"', 3)
name = name[1]
sample = stats[2].split(':', 2)
sample = sample[1].split('"', 3)
sample = sample[1]
f2 = True
if (f1==True) and (f2==True):
df_temp = pd.DataFrame({'id': [id],
'name': [name],
'sample': [sample],
'aquired': [aquired],
'proteins': [proteins],
'peptides': [peptides],
'queries': [queries],
'hits': [hits],
'type': [type],
'analyzed': [analyzed],
'msResolution': [msResolution],
'ChromFWHM_Min': [ChromFWHM_Min]})
df = pd.concat([df, df_temp])
f1=False
f2=False
i += 1
df = df.reset_index(drop=True)
return df
class refresh_cache(object):
def __init__(self, foo):
self.output = None
self.foo = foo
def __call__(self, refresh=False, *args, **kwds):
if self.output is None or refresh:
self.output = self.foo(*args, **kwds)
return self.output
mem_get_df = refresh_cache(__get_df)
def sort_df(df):
df = df.sort_values(by=['analyzed'])
df = df.reset_index(drop=True)
return df
def get_index_list(df):
index_list = list(df.index.values)
return index_list
def split_filter_part(filter_part):
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
def filter_data(df, filter):
filtering_expressions = filter.split(' && ')
dff = df
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
return dff
def get_page_count(df, PAGE_SIZE):
lenght_df = len(df.index)
PAGE_COUNT = int(lenght_df/PAGE_SIZE)+1
return PAGE_COUNT
| thiloschild/RockLog | RockLog/functions.py | functions.py | py | 8,050 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 1,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 1,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 1,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
34864971721 | import os
import random
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import torchvision.transforms.functional as F
class StasDataset(Dataset):
def __init__(self, image_path_list, label_dir, image_transform, ann_suffix):
super().__init__()
self.image_path_list = image_path_list
self.label_dir = label_dir
self.image_transform = image_transform
self.ann_suffix = ann_suffix
self.resize_image_fn_dict, self.resize_mask_fn_dict = dict(), dict()
def __getitem__(self, item):
if type(item) == int:
index, size = item, None
elif type(item) == list or type(item) == tuple:
index, size = item
image = Image.open(self.image_path_list[index]).convert('RGB')
if self.ann_suffix == '.png':
label_path = os.path.join(
self.label_dir,
os.path.basename(self.image_path_list[index]).split(".")[0] + self.ann_suffix
)
label = torch.from_numpy(np.array(Image.open(label_path))).unsqueeze(dim=0)
elif self.ann_suffix == '.npz':
label_path = os.path.join(
self.label_dir,
'label_' + os.path.basename(self.image_path_list[index]).split(".")[0] + self.ann_suffix
)
label = torch.from_numpy(np.load(label_path)['image']).unsqueeze(dim=0)
if size is not None:
if size not in self.resize_fn_dict:
self.resize_image_fn_dict[size] = transforms.Resize((size, size), interpolation=InterpolationMode.BILINEAR)
self.resize_mask_fn_dict[size] = transforms.Resize((size, size), interpolation=InterpolationMode.NEAREST)
image = self.resize_image_fn_dict[size](image)
label = self.resize_mask_fn_dict[size](label)
# if multiscale_list is None, image transform should not contain resize function
if self.image_transform is not None:
image, label = self.image_transform(image, label)
return image, label
def __len__(self):
return len(self.image_path_list)
class Train_Preprocessor(nn.Module):
def __init__(self, img_size=None, h_flip_p=0.5, v_flip_p=0.5):
super().__init__()
if img_size is not None:
self.img_size = img_size
self.resize_image = transforms.Resize(self.img_size, interpolation=InterpolationMode.BILINEAR)
self.resize_mask = transforms.Resize(self.img_size, interpolation=InterpolationMode.NEAREST)
else:
self.resize_image = nn.Identity()
self.resize_mask = nn.Identity()
self.jitter = transforms.ColorJitter(0.15, 0.15)
self.blur = transforms.GaussianBlur((1, 3))
self.h_flip_p = h_flip_p
self.v_flip_p = v_flip_p
self.preprocess = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]
)
@torch.no_grad()
def forward(self, img, label):
# random crop
W, H = img.size
w, h = random.randint(int(0.90*W), W), random.randint(int(0.90*H), H)
i, j = random.randint(0, H-h), random.randint(0, W-w)
img = F.crop(img, i, j, h, w)
label = F.crop(label, i, j, h, w)
# resize & color transform
img = self.blur(self.jitter(self.resize_image(img)))
label = self.resize_mask(label)
# Random horizontal flipping
if random.random() < self.h_flip_p:
img = F.hflip(img)
label = F.hflip(label)
# Random vertical flipping
if random.random() < self.v_flip_p:
img = F.vflip(img)
label = F.vflip(label)
return self.preprocess(img), label
class Test_Preprocessor(nn.Module):
def __init__(self, img_size=None):
super().__init__()
if img_size is not None:
self.resize_image = transforms.Resize(img_size, interpolation=InterpolationMode.BILINEAR)
self.resize_mask = transforms.Resize(img_size, interpolation=InterpolationMode.NEAREST)
else:
self.resize_image = nn.Identity()
self.resize_mask = nn.Identity()
self.preprocess = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]
)
@torch.no_grad()
def forward(self, img, label):
return self.preprocess(self.resize_image(img)), None if label is None else self.resize_mask(label) | travisergodic/T-brain_STAS_Segmentation | data.py | data.py | py | 4,933 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.join",
... |
15798007027 | #!/usr/bin/python3
'''
takes in a URL,
sends a request to the URL
displays the body of the response (decoded in utf-8).
'''
import requests
from sys import argv
if __name__ == "__main__":
myUrl = argv[1]
req = requests.get(myUrl)
if req.status_code > 400:
print("Error code: {}".format(req.status_code))
else:
print(req.text)
| Just-Akinyi/alx-higher_level_programming | 0x11-python-network_1/7-error_code.py | 7-error_code.py | py | 359 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
15288174099 | from django.shortcuts import get_object_or_404
from services.models import Services
from decimal import Decimal
def get_cart_items_and_total(cart):
cart_items = []
total = 0
for item_id, item_quantity in cart.items():
this_service = get_object_or_404(Services, pk=item_id)
this_total = this_service.price * Decimal(item_quantity)
total += this_total
this_item = {
'service_id': item_id,
'name': this_service.name,
'quantity': item_quantity,
'price': this_service.price,
'total': this_total,
}
cart_items.append(this_item)
return { 'cart_items': cart_items, 'total': total } | wolfenchic/djvalet | cart/utils.py | utils.py | py | 700 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "services.models.Services",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 10,
"usage_type": "call"
}
] |
2059870361 | from collections import deque
# file = "test12.txt"
file = "input12.txt"
def get_terrain():
ter = []
start = (0, 0)
end = (0, 0)
with open(file, "r") as f:
for y, line in enumerate(f):
l = list(line.strip())
if "S" in l:
x = l.index("S")
start = (x, y)
l[x] = "a"
if "E" in l:
x = l.index("E")
end = (x, y)
l[x] = "z"
ter.append([ord(c) - ord("a") for c in l])
return ter, start, end
terrain, start, end = get_terrain()
h = len(terrain)
w = len(terrain[0])
score = [[-1] * w for _ in range(h)]
file = deque([(start, 0)])
while True:
v = file.popleft()
print(v)
((x, y), p) = v
if score[y][x] >= 0:
continue
score[y][x] = p
if (x, y) == end:
print(p)
break
if x > 0 and terrain[y][x - 1] <= terrain[y][x] + 1:
file.append(((x - 1, y), p + 1))
if x < w - 1 and terrain[y][x + 1] <= terrain[y][x] + 1:
file.append(((x + 1, y), p + 1))
if y > 0 and terrain[y - 1][x] <= terrain[y][x] + 1:
file.append(((x, y - 1), p + 1))
if y < h - 1 and terrain[y + 1][x] <= terrain[y][x] + 1:
file.append(((x, y + 1), p + 1))
score = [[-1] * w for _ in range(h)]
file = deque([(end, 0)])
while True:
v = file.popleft()
print(v)
((x, y), p) = v
if score[y][x] >= 0:
continue
score[y][x] = p
if terrain[y][x] == 0:
print(p)
break
if x > 0 and terrain[y][x - 1] >= terrain[y][x] - 1:
file.append(((x - 1, y), p + 1))
if x < w - 1 and terrain[y][x + 1] >= terrain[y][x] - 1:
file.append(((x + 1, y), p + 1))
if y > 0 and terrain[y - 1][x] >= terrain[y][x] - 1:
file.append(((x, y - 1), p + 1))
if y < h - 1 and terrain[y + 1][x] >= terrain[y][x] - 1:
file.append(((x, y + 1), p + 1))
| professeurb/AdventOfCode2022 | adc12.py | adc12.py | py | 1,932 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 51,
"usage_type": "call"
}
] |
21870337610 | from collections import OrderedDict
from six.moves.urllib.parse import urlencode
def joined_or_null(arr):
return "null" if len(arr) == 0 else ','.join(arr)
def build_url(path, includes=None, fields=None):
connector = '&' if '?' in path else '?'
params = {}
if includes:
params.update({'include': joined_or_null(includes)})
if fields:
params.update(
{
"fields[{resource_type}]".format(resource_type=resource_type):
joined_or_null(attributes)
for resource_type, attributes in fields.items()
}
)
if not params:
return path
sorted_params = OrderedDict(sorted(params.items(), key=lambda t: t[0]))
return path + connector + urlencode(sorted_params)
| Patreon/patreon-python | patreon/jsonapi/url_util.py | url_util.py | py | 787 | python | en | code | 109 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "six.moves.urllib.parse.urlencode",
"line_number": 29,
"usage_type": "call"
}
] |
23854869141 | """Setup file for package"""
from setuptools import setup, find_namespace_packages
from os import path
__version__ = "1.0.b1"
__author__ = "rmflynn"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="dram2",
version=__version__,
packages=find_namespace_packages(
include=["dram2.*", "dram2.db_kits.*"],
),
description=(
"Distilled and Refined Annotation of Metabolism: A tool for the"
" annotation and curation of function for"
" microbial and viral genomes"
),
long_description=long_description,
long_description_content_type="text/markdown", # Optional (see note above)
package_data={
"dram2.rule_adjectives": ["rules.tsv"],
"dram2.tree_kit": [
"data/nxr_nar/color_map.tsv",
"data/nxr_nar/nxr-nar-tree-mapping.tsv",
"data/nxr_nar/nxr-nar-tree-mapping.tsv",
"data/nxr_nar/nxr-nar_seqs_for_tree_aligned.faa",
"data/nxr_nar/nxr_nar.refpkg/CONTENTS.json",
"data/nxr_nar/nxr_nar.refpkg/nxr_nar.tre",
"data/nxr_nar/nxr_nar.refpkg/phylo_modelNQB2rG.json",
"data/nxr_nar/nxr_nar.refpkg/RAxML_info.nxr_nar_raxml",
"data/nxr_nar/nxr_nar.refpkg/nxr-nar_seqs_for_tree_aligned.faa",
],
"dram2.distill": [
"data/amg_database.tsv",
"data/etc_module_database.tsv",
"data/genome_summary_form.tsv",
"data/module_step_form.tsv",
"data/function_heatmap_form.tsv",
],
"dram2.db_kits.methyl_kit": [
"methylotrophy.faa",
"methylotrophy_distillate.tsv",
],
"dram2.db_kits.camper_kit": [
"CAMPER_distillate.tsv",
],
"dram2.db_kits.cant_hyd_kit": [
"data/BacMet_ExpVerified_BiocideRes_genes_SHORT.faa",
"data/CANT_HYD_BLAST_scores.csv",
"data/CANT_HYD_HMM_scores.csv",
"data/engineeredsys_dram_module.tsv",
],
},
# package_dir={'': ''},
python_requires=">=3.10",
install_requires=[
"scikit-bio",
"pandas",
"altair",
"sqlalchemy",
"networkx",
"openpyxl",
"numpy",
"click",
"pytest",
"biopython",
],
entry_points={
"console_scripts": [
"dram2 = dram2.cli:dram2",
# 'adj = dram2.rule_adjectives:evaluate',
# 'tree = dram2.tree_kit.dram_phylo_pipe:tree_kit',
],
},
author="Rory Flynn",
author_email="Rory.Flynn@colostate.edu",
url="", # this will change
download_url="",
include_package_data=True, # include all files in MANIFEST.in
)
| rmFlynn/collection_of_typical_ocoli_samples | setup.py | setup.py | py | 2,806 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
75065179233 | __all__ = [
"Model",
]
r""""
Adapted from the Robustness Against Backdoors (RAB) repository.
See: https://github.com/AI-secure/Robustness-Against-Backdoor-Attacks
"""
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F # noqa
from .types import PoisonLearner
class Model(PoisonLearner):
def __init__(self):
super(Model, self).__init__(n_classes=1)
# Note: noqa below due to type resolve errors when ints used instead of tuples for params
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0) # noqa
# self.bn1 = nn.BatchNorm2d(num_features=16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=0) # noqa
# self.bn2 = nn.BatchNorm2d(num_features=32)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
# self.fc = nn.Linear(32*4*4, 512)
# self.output = nn.Linear(512, 1)
# def unfix_pert(self,):
# del self.fixed_pert
# def fix_pert(self, sigma, hash_num):
# assert not hasattr(self, 'fixed_pert')
# rand = np.random.randint(2**32-1)
# np.random.seed(hash_num)
# self.fixed_pert = torch.FloatTensor(1,1,28,28).normal_(0, sigma)
# if self.gpu:
# self.fixed_pert = self.fixed_pert.cuda()
# np.random.seed(rand)
def conv_only(self) -> nn.Sequential:
return nn.Sequential(self.conv1,
self.max_pool,
# self.bn1,
self.conv2,
# self.bn2,
)
def forward(self, x: Tensor, penu: bool = False, block: bool = False) -> Tensor:
assert not block, "Block mode not currently supported"
# B = x.size()[0]
# if hasattr(self, 'fixed_pert'):
# x = x + self.fixed_pert
out = x
out = self.max_pool(F.relu(self.conv1(out)))
# out = self.bn1(out)
out = self.max_pool(F.relu(self.conv2(out)))
# out = self.bn2(out)
out = self.flatten(out)
out = self.fc_first(out)
if penu:
return out
out = self.linear(out)
return out
# def loss(self, pred, label):
# if self.gpu:
# label = label.cuda()
# label = label.float()
# return F.binary_cross_entropy_with_logits(pred, label)
| ZaydH/target_identification | fig01_cifar_vs_mnist/poison/datasets/_mnist_cnn.py | _mnist_cnn.py | py | 2,414 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "types.PoisonLearner",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"l... |
31245781337 | # response.py
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained("gpt2")
def get_response(user_input):
inputs = tokenizer.encode_plus(
user_input,
add_special_tokens=True,
padding="longest",
truncation=True,
max_length=64,
return_tensors="pt"
)
input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
response_ids = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=100,
num_return_sequences=1,
do_sample=True,
temperature=0.7
)
response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
return response
| Ghassen-bgh/gpt-2-huggingface-chatbot | response.py | response.py | py | 844 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "transformers.GPT2Tokenizer.from_pretrained",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Tokenizer",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "transformers.GPT2LMHeadModel.from_pretrained",
"line_number": 7,
"usa... |
31245757127 | from fastapi import FastAPI
import pandas as pd
#from pydantic import BaseModel
#from typing import Optional
# Ruta al archivo JSON
file_path = '../PI MLOps - STEAM/steam_games.json'
# Leer el archivo JSON línea por línea y cargar los datos en una lista
data_list = []
with open(file_path, 'r') as f:
for line in f:
data_list.append(eval(line.strip()))
# Crear DataFrame a partir de la lista de diccionarios
df = pd.DataFrame(data_list)
# Función para obtener juegos por año
def juegos_por_año(df, año):
# Convertir el año a entero
#año = int(año)
# Filtrar el DataFrame para obtener solo los juegos del año proporcionado
juegos_del_año = df[df['release_date'].str.startswith((año)) & pd.notna(df['release_date'])]
# Validar si se encontraron juegos para el año dado
if juegos_del_año.empty:
return f"No se encontraron juegos para el año {año}"
# Obtener la lista de nombres de juegos y ordenarla alfabéticamente
lista_juegos = sorted(juegos_del_año['app_name'].tolist())
# Devolver la lista de juegos ordenada alfabéticamente
return lista_juegos
app = FastAPI()
# Endpoint para obtener juegos por año
@app.get("/juegos/{Anio}")
def juegos(Anio: str):
return juegos_por_año(df, Anio)
# app = FastAPI()
# class Libro(BaseModel):
# titulo: str
# autor: str
# paginas: int
# editorial: Optional [str]
# @app.get("/")
# def index():
# return {"Mensaje" : "Mi Primera API"}
# @app.get("/Libros/{Id}")
# def mostrar_libro(Id: int):
# return {"data" : Id}
# @app.post("/Libros")
# def insertar_libro(Libro: Libro):
# return {"mensaje" : f"Libro {Libro.titulo} insertado"} | Gio2M/2 | main.py | main.py | py | 1,792 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.notna",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 39,
"usage_type": "call"
}
] |
22190353631 | # Coded By WEYT.MM /*\ 28-05-2020
# https://github.com/WEYTMM/Jadwal-Kencan
from datetime import *
now = datetime.now()
hari_kencan = now.strftime("%A")
tanggal = date.today()
class main:
def __init__(self, kencan):
#Edit Sendiri Jadwal Kencan Kalian
senin = """Gak ada jadwal kencan hari ini
Doi lagi sibuk kegiatan ekskul nya disekolah
Maklum lah cewe w kan aktif di organisasi
"""
selasa = """Jam 15:30 • Jemput doi pulang sekolah
Jam 16:30 • Bincang sore menunggu senja di rumah doi
Jam 18:30 • Pamitan slurr gak enak maen sampe malem dirumah cewe
Jam 21:00 • Tidur nyenyak hari ini"""
rabu = """Libur dulu bos-Q lagi gak ada bensin buat kesono"""
kamis = """Hari ini cuma jemput doi pulang sekolah doang
Gak ada jadwal nge-date!!!"""
jumat = """Gak ada jadwal nge-date bos-Q
Padahal udah rindu berat ini:'("""
sabtu = """Jam 15:30 • Jemput doi pulang sekolah
Jam 16:10 • Makan sore dulu slurr
Jam 16:30 • Jalan - jalan ke pinggir danau
Jam 17:40 • Pulang ah udah mau Maghrib"""
minggu = """Jam 08:00 • Mandi dulu bos-Q
Jam 09:00 • Nyamper doi kerumahnya
Jam 12:15 • Makan siang buat tenaga hari ini
Jam 16:00 • Jalan - jalan di taman
Jam 17:30 • Nganter doi pulang
Jam 18:30 • Pamitan ke ortu doi w mau pulang
Jam 21:00 • Tidur nyenyak hari ini"""
if kencan == "Monday":
kencan = senin
hari = "Senin"
elif kencan == "Tuesday":
kencan = selasa
hari = "Selasa"
elif kencan == "Wednesday":
kencan = rabu
hari = "Rabu"
elif kencan == "Thursday":
kencan = kamis
hari = "Kamis"
elif kencan == "Friday":
kencan = jumat
hari = "Jum'at"
elif kencan == "Saturday":
kencan = sabtu
hari = "Sabtu"
elif kencan == "Sunday":
kencan = minggu
hari = "Minggu"
self.kencan = kencan
self.hari = hari
def tampilkan(self):
hasil = {}
hasil['hari'] = self.hari
hasil['kencan'] = self.kencan
return hasil
def home():
h = "\033[92m"
p = "\033[0m"
a = main(str(hari_kencan))
b = a.tampilkan()
print("Hari : " + h + b['hari'] + p)
print("Tanggal : " + h + str(tanggal) + p)
print("\n\n\nJadwal Kencan Hari Ini :\n")
for s in b['kencan'].replace('\t', '').splitlines():
print("* " + s)
home()
| WEYTMM/Jadwal-Kencan | kencan.py | kencan.py | py | 2,266 | python | id | code | 0 | github-code | 1 | [
{
"api_name": "datetime.now",
"line_number": 5,
"usage_type": "call"
}
] |
38950183255 | import logging
import os
import pickle
from scipy import ndimage
import numpy
import tensorflow
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
# todo fix the image size; our images aren't square
image_size = 28 # Pixel width and height.
image_height = 28
image_width = 5 * 28
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = numpy.ndarray(shape=(len(image_files), image_height, image_width), dtype=numpy.float32)
logging.debug(folder)
correct_values = []
num_images = 0
for image in image_files:
correct_value = image.split('.')[0].split('_')[1]
correct_values.append(correct_value)
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
if image_data.shape != (image_height, image_width):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images += 1
except IOError as e:
logging.warn('Could not read: %s : %s - it\'s ok, skipping.' % (image_file, e))
logging.debug('correct values: %s' % correct_values)
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
logging.debug('Full dataset tensor: %s' % str(dataset.shape))
logging.debug('Mean: %s' % numpy.mean(dataset))
logging.debug('Standard deviation: %s' % numpy.std(dataset))
return dataset, correct_values
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
logging.debug('%s already present - Skipping pickling.' % set_filename)
else:
logging.debug('Pickling %s.' % set_filename)
dataset, correct_values = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump([dataset, correct_values], f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
logging.warn('Unable to save data to', set_filename, ':', e)
return dataset_names
def make_arrays(nb_rows, arg_image_height, arg_image_width):
if nb_rows:
dataset = numpy.ndarray((nb_rows, arg_image_height, arg_image_width), dtype=numpy.float32)
labels = numpy.ndarray(nb_rows, dtype=numpy.int32)
else:
dataset, labels = None, None
return dataset, labels
# todo split labels into 5 one-hot result sets
def split_data(arg_pickle_file_name, arg_train_size, arg_validation_size, arg_test_size, arg_image_height,
arg_image_width):
logging.debug(arg_pickle_file_name)
with open(arg_pickle_file_name, 'rb') as f:
letter_set, correct_values = pickle.load(f)
state = numpy.random.get_state()
numpy.random.shuffle(letter_set)
numpy.random.set_state(state)
numpy.random.shuffle(correct_values)
start_train = 0
end_train = start_train + arg_train_size
start_validation = arg_train_size
end_validation = start_validation + arg_validation_size
start_test = arg_train_size + arg_validation_size
end_test = start_test + arg_test_size
result_train_data = numpy.ndarray((end_train - start_train + 1, arg_image_height, arg_image_width),
dtype=numpy.float32)
result_train_data[0:end_train - start_train] = letter_set[start_train:end_train]
result_train_correct = correct_values[start_train:end_train]
result_validation_data = numpy.ndarray((end_validation - start_validation + 1, arg_image_height, arg_image_width),
dtype=numpy.float32)
result_validation_data[0:end_validation - start_validation] = letter_set[start_validation:end_validation]
result_validation_correct = correct_values[start_validation:end_validation]
result_test_data = numpy.ndarray((end_test - start_test + 1, arg_image_height, arg_image_width),
dtype=numpy.float32)
result_test_data[0:end_test - start_test] = letter_set[start_test:end_test]
result_test_correct = correct_values[start_test:end_test]
return result_train_data, result_train_correct, result_validation_data, result_validation_correct, \
result_test_data, result_test_correct
def special_ord(arg, arg_index):
char = arg[arg_index]
result = ord(char)
result = result if result != 32 else 64
result -= 64
return result
vector_special_ord = numpy.vectorize(special_ord)
def reformat_as_list(dataset, arg_labels, arg_num_labels, arg_image_height, arg_image_width):
dataset = dataset.reshape((-1, arg_image_height * arg_image_width)).astype(numpy.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
size = len(arg_labels)
result = []
for index in range(0, 5):
t0 = vector_special_ord(arg_labels, 0)
t1 = numpy.zeros((size, 11))
t1[numpy.arange(size), t0] = 1
result.append(t1)
return dataset, result
def reformat(dataset, arg_labels, arg_num_labels, arg_image_height, arg_image_width):
dataset = dataset.reshape((-1, arg_image_height * arg_image_width)).astype(numpy.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
size = len(arg_labels)
# result = numpy.ndarray(shape=(5, arg_num_labels, 11))
t2 = []
for index in range(0, 5):
t0 = vector_special_ord(arg_labels, 0)
t1 = numpy.zeros((size, 11))
t1[numpy.arange(size), t0] = 1
t2.append(t1)
result = numpy.asanyarray(t2)
return dataset, result
def accuracy_old(predictions, labels):
t0 = numpy.argmax(predictions, 1)
t1 = numpy.argmax(labels, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result = 100.0 * t3 / predictions.shape[0]
return result
# return (100.0 * numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1))
# / predictions.shape[0])
def accuracy(predictions, labels0, labels1, labels2, labels3, labels4):
result = 0
t0 = numpy.argmax(predictions[0], 1)
t1 = numpy.argmax(labels0, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[0].shape[0]
t0 = numpy.argmax(predictions[1], 1)
t1 = numpy.argmax(labels1, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[1].shape[0]
t0 = numpy.argmax(predictions[2], 1)
t1 = numpy.argmax(labels2, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[2].shape[0]
t0 = numpy.argmax(predictions[3], 1)
t1 = numpy.argmax(labels3, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[3].shape[0]
t0 = numpy.argmax(predictions[4], 1)
t1 = numpy.argmax(labels4, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[4].shape[0]
return result
def alt_accuracy(predictions, labels0, labels1, labels2, labels3, labels4):
result = 0
t0 = numpy.argmax(predictions[0])
t1 = numpy.argmax(labels0, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[0].shape[0]
t0 = numpy.argmax(predictions[1], 1)
t1 = numpy.argmax(labels1, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[1].shape[0]
t0 = numpy.argmax(predictions[2], 1)
t1 = numpy.argmax(labels2, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[2].shape[0]
t0 = numpy.argmax(predictions[3], 1)
t1 = numpy.argmax(labels3, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[3].shape[0]
t0 = numpy.argmax(predictions[4], 1)
t1 = numpy.argmax(labels4, 1)
t2 = t0 == t1
t3 = numpy.sum(t2)
result += 100.0 * t3 / predictions[4].shape[0]
return result
pickle_file_name = maybe_pickle(['concatenate_output'], 1800)
with open('concatenate_output.pickle', 'rb') as file_pointer:
t, _ = pickle.load(file_pointer)
total_data = len(t)
validation_size = total_data / 20
test_size = total_data / 20
train_size = total_data - validation_size - test_size
train_data, train_labels, validation_data, validation_labels, test_data, test_labels = \
split_data(pickle_file_name[0], train_size, validation_size, test_size, image_height, image_width)
logging.debug('Training: %d %d' % (len(train_data), len(train_labels)))
logging.debug('Validation: %d %d' % (len(validation_data), len(validation_labels)))
logging.debug('Testing: %d %d' % (len(test_data), len(test_labels)))
num_labels = 11
train_dataset, train_labels = reformat(train_data, train_labels, num_labels, image_height, image_width)
valid_dataset, valid_labels = reformat(validation_data, validation_labels, num_labels, image_height, image_width)
test_dataset, test_labels = reformat(test_data, test_labels, num_labels, image_height, image_width)
logging.info('Training set: %s %s' % (train_dataset.shape, train_labels.shape)) # was train_labels[0].shape
logging.info('Validation set: %s %s' % (valid_dataset.shape, valid_labels.shape))
logging.debug('Test set: %s %s ' % (test_dataset.shape, test_labels.shape))
batch_size = 128
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tensorflow.Graph()
with graph.as_default():
tf_train_dataset = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, image_height * image_width))
tf_train_labels0 = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, num_labels))
tf_train_labels1 = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, num_labels))
tf_train_labels2 = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, num_labels))
tf_train_labels3 = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, num_labels))
tf_train_labels4 = tensorflow.placeholder(tensorflow.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tensorflow.constant(valid_dataset)
tf_test_dataset = tensorflow.constant(test_dataset)
weights = tensorflow.Variable(tensorflow.truncated_normal([image_height * image_width, num_labels]))
biases0 = tensorflow.Variable(tensorflow.zeros([num_labels]))
logits0 = tensorflow.matmul(tf_train_dataset, weights) + biases0
biases1 = tensorflow.Variable(tensorflow.zeros([num_labels]))
logits1 = tensorflow.matmul(tf_train_dataset, weights) + biases1
biases2 = tensorflow.Variable(tensorflow.zeros([num_labels]))
logits2 = tensorflow.matmul(tf_train_dataset, weights) + biases2
biases3 = tensorflow.Variable(tensorflow.zeros([num_labels]))
logits3 = tensorflow.matmul(tf_train_dataset, weights) + biases3
biases4 = tensorflow.Variable(tensorflow.zeros([num_labels]))
logits4 = tensorflow.matmul(tf_train_dataset, weights) + biases4
loss = \
tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(logits0, tf_train_labels0)) + \
tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(logits1, tf_train_labels1)) + \
tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels2)) + \
tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(logits3, tf_train_labels3)) + \
tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(logits4, tf_train_labels4))
optimizer = tensorflow.train.GradientDescentOptimizer(0.5).minimize(loss)
train_prediction0 = tensorflow.nn.softmax(logits0)
train_prediction1 = tensorflow.nn.softmax(logits1)
train_prediction2 = tensorflow.nn.softmax(logits2)
train_prediction3 = tensorflow.nn.softmax(logits3)
train_prediction4 = tensorflow.nn.softmax(logits4)
train_prediction = tensorflow.pack(
[train_prediction0, train_prediction1, train_prediction2, train_prediction3, train_prediction4])
# todo expand this out to 5 softmax calls just like the above
valid_prediction = numpy.asanyarray([
tensorflow.nn.softmax(tensorflow.matmul(tf_valid_dataset, weights) + biases0),
tensorflow.nn.softmax(tensorflow.matmul(tf_valid_dataset, weights) + biases1),
tensorflow.nn.softmax(tensorflow.matmul(tf_valid_dataset, weights) + biases2),
tensorflow.nn.softmax(tensorflow.matmul(tf_valid_dataset, weights) + biases3),
tensorflow.nn.softmax(tensorflow.matmul(tf_valid_dataset, weights) + biases4)]),
test_prediction = tensorflow.pack([
tensorflow.nn.softmax(tensorflow.matmul(tf_test_dataset, weights) + biases0),
tensorflow.nn.softmax(tensorflow.matmul(tf_test_dataset, weights) + biases1),
tensorflow.nn.softmax(tensorflow.matmul(tf_test_dataset, weights) + biases2),
tensorflow.nn.softmax(tensorflow.matmul(tf_test_dataset, weights) + biases3),
tensorflow.nn.softmax(tensorflow.matmul(tf_test_dataset, weights) + biases4)])
num_steps = 10001 # 3001
with tensorflow.Session(graph=graph, config=tensorflow.ConfigProto(device_count={'GPU': 0})) as session:
tensorflow.global_variables_initializer().run()
logging.debug("Initialized.")
for step in range(num_steps):
offset = (step * batch_size) % (train_labels[0].shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels0 = train_labels[0][offset:(offset + batch_size), :]
batch_labels1 = train_labels[1][offset:(offset + batch_size), :]
batch_labels2 = train_labels[2][offset:(offset + batch_size), :]
batch_labels3 = train_labels[3][offset:(offset + batch_size), :]
batch_labels4 = train_labels[4][offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset: batch_data, tf_train_labels0: batch_labels0, tf_train_labels1: batch_labels1,
tf_train_labels2: batch_labels2, tf_train_labels3: batch_labels3,
tf_train_labels4: batch_labels4}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
logging.info("Minibatch loss at step %d: %f" % (step, l))
logging.info("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels0, batch_labels1,
batch_labels2, batch_labels3, batch_labels4))
# logging.info("Validation accuracy: %.1f%%" % accuracy(valid_prediction.eval(), valid_labels[0],
logging.info("Validation accuracy: %.1f%%" % alt_accuracy(valid_prediction, valid_labels[0],
valid_labels[1], valid_labels[2], valid_labels[3],
valid_labels[4]))
logging.info(
"Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels[0], test_labels[1], test_labels[2],
test_labels[3], test_labels[4]))
# with tensorflow.Session(graph=graph, config=tensorflow.ConfigProto(device_count={'GPU': 0})) as session:
# tensorflow.global_variables_initializer().run()
# print('Initialized.')
# for step in range(num_steps):
# # Run the computations. We tell .run() that we want to run the optimizer,
# # and get the loss value and the training predictions returned as numpy arrays
# _, l, predictions = session.run([optimizer, loss, train_prediction])
# if (step % 100 == 0):
# print('Loss at step %d: %f' % (step, l))
# print('Training accuracy: %.1f%%' % accuracy(predictions, train_labels[:train_subset, :]))
# # Calling .eval() on valid_prediction is basically like calling run(), but
# # just to get that one numpy array. Note that it recomputes all its graph
# # dependencies.
# print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels[0]))
# print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
| mikedelong/machine-learning | tensorflow/make_pickle.py | make_pickle.py | py | 16,902 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"... |
71068940194 | import os
from copy import copy
from logging import DEBUG
from logging import INFO
from logging import NOTSET
from furl import furl
from src.typeshed import ConfigDict
from src.typeshed import DBConfigDict
from src.typeshed import DiscordConfigDict
from src.typeshed import FormatterDict
from src.typeshed import LoggerDict
from src.typeshed import LoggerItemDict
ASYNC_PSQL_SCHEME = "postgresql+asyncpg"
discord_owner_id_str = os.environ.get("DISCORD_OWNER_ID")
DISCORD_OWNER_ID = int(discord_owner_id_str) if discord_owner_id_str else None
DATABASE_NAME = os.environ.get("DATABASE_NAME")
DATABASE_USER = os.environ.get("DATABASE_USER")
DATABASE_HOST = os.environ.get("DATABASE_HOST", "localhost")
DATABASE_PASSWORD = os.environ.get("DATABASE_PASSWORD")
PSQL_SCHEME = "postgresql"
database_furl = furl(
scheme=PSQL_SCHEME,
username=DATABASE_USER,
password=DATABASE_PASSWORD,
host=DATABASE_HOST,
path=DATABASE_NAME,
)
DATABASE_URI = copy(database_furl.url)
database_furl.set(scheme=ASYNC_PSQL_SCHEME)
ASYNC_DATABASE_URI = database_furl.url
DISCORD_ACCOUNT_TOKEN = os.environ.get("DISCORD_ACCOUNT_TOKEN", "token")
DISCORD_LOG_FILENAME = "logs/discord.log"
config_dict = ConfigDict(
db=DBConfigDict(database_uri=DATABASE_URI, async_database_uri=ASYNC_DATABASE_URI),
discord=DiscordConfigDict(
account_token=DISCORD_ACCOUNT_TOKEN,
),
logger=LoggerDict(
version=1,
formatters={
"base_formatter": FormatterDict(format="[%(asctime)s] [%(levelname)s] [%(name)s]: %(message)s"),
"simple_formatter": FormatterDict(format="[%(asctime)s] [%(levelname)s]: %(message)s"),
},
handlers={
"file_handler": {
"class": "logging.FileHandler",
"formatter": "base_formatter",
"filename": DISCORD_LOG_FILENAME,
"encoding": "utf-8",
"mode": "w",
},
"error_handler": {
"class": "logging.StreamHandler",
"level": DEBUG,
"formatter": "base_formatter",
"stream": "ext://sys.stderr",
},
"basic_handler": {
"class": "logging.StreamHandler",
"level": INFO,
"formatter": "simple_formatter",
"stream": "ext://sys.stdout",
},
},
loggers={
"discord": LoggerItemDict(level=DEBUG, handlers=["file_handler"]),
"discord.http": LoggerItemDict(level=INFO, handlers=["basic_handler"]),
},
root=LoggerItemDict(handlers=["error_handler"], level=NOTSET),
),
)
| jplhanna/discord_quest_bot | src/config.py | config.py | py | 2,666 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
12922429389 | # De django
from django.urls import path
# Propios
from . import views
urlpatterns = [
path('v1',views.v1,name='v1'),
path('getstaff',views.getStaff,name='getStaff'),
path('getcluster',views.getCluster,name='getCluster'),
path('setdata',views.setData, name='setData'),
path('active-client',views.changeClient,name='updateClient'),
path('stats',views.stats),
path('sheet',views.getSheet),
path('verify-psw',views.verifyPsw,name='verifyPsw'),
] | Haziel-Soria-Trejo/GymAdmin | API/urls.py | urls.py | py | 475 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
28801028633 | import random
import uuid
from typing import Dict
from typing import Union
from fastapi import APIRouter
from fastapi import HTTPException
from fastapi import Request
from bowled_match_engine.match_engine.game_simulator import simulate_game
from gamelib.team.live_team import get_players_by_team_id
from gamelib.team.live_team import get_team_by_id
from rest_server.live.api_models import LiveGameInput
# Create FastAPI router
router = APIRouter(prefix="/live")
@router.post(
path="/game",
response_model=Dict,
tags=["Live"],
)
async def play_game(
request: Request,
game_input: LiveGameInput,
) -> Union[Dict, HTTPException]:
"""
Simulate game API
"""
context = request.state.context
await context.logger.info("Simulate game API")
# TODO: Validate input data
# choose a random enemy team
icc_teams = ["ind", "aus", "eng", "pak", "nz", "wi", "sl", "sa"]
ipl_teams = [
"csk",
"dc",
"gt",
"kkr",
"lsg",
"mi",
"pbks",
"rr",
"rcb",
"srh",
]
if game_input.team_id in icc_teams:
icc_teams.remove(game_input.team_id)
team_ids = icc_teams
elif game_input.team_id in ipl_teams:
ipl_teams.remove(game_input.team_id)
team_ids = ipl_teams
else:
team_ids = icc_teams + ipl_teams
enemy_team_id = random.choice(team_ids)
# Fetch team and players data
async with context.data_store.acquire() as connection:
context.ds_connection = connection
user_team = await get_team_by_id(
team_id=game_input.team_id,
context=context,
)
user_team_name = user_team["team_name"]
user_team_players = await get_players_by_team_id(
team_id=game_input.team_id,
context=context,
)
enemy_team = await get_team_by_id(
team_id=enemy_team_id,
context=context,
)
enemy_team_name = enemy_team["team_name"]
enemy_team_players = await get_players_by_team_id(
team_id=enemy_team_id,
context=context,
)
user_batting_lineup = []
user_bowling_lineup = []
player_data = {}
for player in user_team_players:
player_data[player["player_id"]] = player
for player_id in game_input.batting_lineup:
user_batting_lineup.append(player_data[player_id])
for player_id in game_input.bowling_lineup:
user_bowling_lineup.append(player_data[player_id])
while len(user_bowling_lineup) < 20:
user_bowling_lineup += user_bowling_lineup
user_bowling_lineup = user_bowling_lineup[:20]
# generate batting and bowling lineups for bot team
enemy_bowling_lineup = [
player
for player in enemy_team_players
if player["player_type"] in ("bowler", "all-rounder")
]
while len(enemy_bowling_lineup) < 20:
enemy_bowling_lineup += enemy_bowling_lineup
enemy_bowling_lineup = enemy_bowling_lineup[:20]
# Toss generate random number and decide who will bat first
toss_string = "%s won the toss and elected to bat first"
toss = random.randint(0, 1)
if toss == 0:
game_results = await simulate_game(
team_one_batting=user_batting_lineup,
team_one_bowling=user_bowling_lineup,
team_two_batting=enemy_team_players,
team_two_bowling=enemy_bowling_lineup,
)
game_results["team_name"] = user_team_name
game_results["enemy_team_name"] = enemy_team_name
game_results["toss_result"] = toss_string % user_team_name
else:
game_results = await simulate_game(
team_one_batting=enemy_team_players,
team_one_bowling=enemy_bowling_lineup,
team_two_batting=user_batting_lineup,
team_two_bowling=user_bowling_lineup,
)
game_results["team_name"] = enemy_team_name
game_results["enemy_team_name"] = user_team_name
game_results["toss_result"] = toss_string % enemy_team_name
# update game metrics
live_metrics = context.cache_store.get_dictionary("live_metrics")
await context.logger.info("Live game started, updating metrics")
if "games_played" not in live_metrics:
live_metrics["games_played"] = 0
live_metrics["games_live"] = 0
live_metrics["games_played"] += 1
live_metrics["games_live"] += 1
game_results["game_id"] = str(uuid.uuid4())
game_results["user_team"] = player_data
game_results["enemy_team"] = {
player["player_id"]: player for player in enemy_team_players
}
# print(json.dumps(game_results, indent=4))
return game_results
| bunsamosa/bowled_server | rest_server/live/start_game.py | start_game.py | py | 4,729 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fastapi.Request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "rest_server.live.api_models.LiveGameInput",
"line_number": 26,
"usage_type": "name"
},
{
"api_n... |
35080534874 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0036_auto_20150817_1057'),
]
operations = [
migrations.AlterField(
model_name='event',
name='landscape',
field=models.ImageField(null=True, upload_to='/media/', default='default.jpg'),
),
migrations.AlterField(
model_name='event',
name='portrait',
field=models.ImageField(null=True, upload_to='/media/', default='default.jpg'),
),
]
| smmsadrnezh/bilityab | event/migrations/0037_auto_20150817_2113.py | 0037_auto_20150817_2113.py | py | 638 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
41027393186 | from typing import Dict, List
def xyz_args(script_name, arg, current_index, args, _) -> Dict[str, List[str]]:
if script_name != 'x/y/z plot':
return {}, None
if not arg or type(arg) is not list:
return {}, None
# 10 represent the checkpoint_name option for both img2img and txt2img
# ref: xyz_grid.py#L204
if current_index - 2 < 0 or args[current_index - 2] != 10:
return {}, None
models = [' '.join(md.split()[:-1]) for md in arg]
for _id, val in enumerate(models):
args[current_index][_id] = val
return {'Stable-diffusion': models}, None
| awslabs/stable-diffusion-aws-extension | aws_extension/inference_scripts_helper/xyz_helper.py | xyz_helper.py | py | 610 | python | en | code | 111 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
28437687470 | import grpc
import os
import pickle
from concurrent import futures
from core.inference_service import InferenceService
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from protos import inference_pb2
from protos import inference_pb2_grpc
_LISTEN_HOST = "[::]"
_MODEL_PATH = "./lib/classifier.pkl"
_THREAD_POOL_SIZE = 4
def load_model():
if not os.path.exists(_MODEL_PATH):
return None
clf = pickle.load(open(_MODEL_PATH, 'rb'))
return clf
def _configure_health_server(server: grpc.Server, port: int) -> None:
# Add the health servicer to the server.
listen_address = f"{_LISTEN_HOST}:{port}"
server.add_insecure_port(listen_address)
# Create a health check servicer. We use the non-blocking implementation to avoid thread starvation.
health_servicer = health.HealthServicer(
experimental_non_blocking=True,
experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=_THREAD_POOL_SIZE)
)
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, server)
# Create a tuple of all of the services we want to export via reflection.
services = tuple(
service.full_name
for service in inference_pb2.DESCRIPTOR.services_by_name.values()) + (
reflection.SERVICE_NAME, health.SERVICE_NAME)
# Mark all services as healthy.
for service in services:
health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(services, server)
def _configure_inferencing_server(server: grpc.Server, port: int) -> None:
# Add the application servicer to the server.
inference_pb2_grpc.add_InferenceServicer_to_server(InferenceService(load_model()), server)
listen_address = f"{_LISTEN_HOST}:{port}"
server.add_insecure_port(listen_address)
def serve(inferencing_port: int, health_port: int):
inferencing_server = grpc.server(futures.ThreadPoolExecutor(max_workers=_THREAD_POOL_SIZE))
_configure_inferencing_server(inferencing_server, inferencing_port)
inferencing_server.start()
print(f"Inferencing server listening on port {inferencing_port}")
health_server = grpc.server(futures.ThreadPoolExecutor(max_workers=_THREAD_POOL_SIZE))
_configure_health_server(health_server, health_port)
health_server.start()
print(f"Health server listening on port {health_port}")
inferencing_server.wait_for_termination()
health_server.wait_for_termination()
if __name__ == "__main__":
inferencing_port = os.getenv("INFERENCING_PORT", "50051")
health_port = os.getenv("INFERENCING_PORT", "50039")
serve(inferencing_port, health_port)
| liupeirong/MLOpsManufacturing | samples/edge-inferencing-and-mlops/grpc_inferencing_service/service/main.py | main.py | py | 2,764 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "grpc.Server",
"line_numbe... |
28419540730 | # -*- coding: utf-8 -*-
'''
猫狗分类
'''
# @Time : 2021/4/8 17:14
# @Author : LINYANZHEN
# @File : CatDogModel.py
import torch.nn as nn
class CDNet(nn.Module):
def __init__(self):
super(CDNet, self).__init__()
self.convent = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
# 尝试修改模型以提升测试集准确率
# nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.8),
# nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.8),
# nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
# nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.5),
# nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.5),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
)
self.linenet = nn.Sequential(
nn.Linear(64 * 64 * 8, 1000),
# nn.Linear(62 * 62 * 8, 1000),
nn.ReLU(inplace=True),
nn.Dropout(p=0.8),
nn.Linear(1000, 1000),
nn.ReLU(inplace=True),
nn.Dropout(p=0.8),
nn.Linear(1000, 2),
nn.Softmax(dim=1)
)
def forward(self, x):
x = self.convent(x)
# x = x.view(x.size(0), 64 * 64 * 8)
# 将多维度tensor展开为1维
x = x.view(x.size(0), -1)
out = self.linenet(x)
return out
| AWSDJIKL/Artificial-Intelligence-and-Neural-Network | CatDogNet/CatDogModel.py | CatDogModel.py | py | 2,025 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
15066641758 | from django.db import models
from django.contrib.auth.models import User
from django.forms import CheckboxInput
class Base(models.Model):
create = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
class Modelo(models.Model):
modelo = models.CharField(max_length=150)
class Meta:
ordering = ['modelo']
def __str__(self):
return self.modelo
class osnumero(models.Model):
class Meta:
verbose_name_plural = 'OS Números'
numero = models.IntegerField()
class Tempo(models.Model):
modelo = models.ForeignKey(Modelo, on_delete=models.CASCADE)
etapa = models.CharField(choices=(
('1', 'Etapa Um'),
('2', 'Etapa Dois'),
('3', 'Etapa Três'),
), max_length=1)
media = models.IntegerField()
def __str__(self):
return 'Tempo'
class Funcionario(Base):
class Meta:
verbose_name_plural = 'Funcionários'
ordering = ['nome']
nome = models.CharField(max_length=60)
contato = models.CharField(max_length=15)
tipo = models.CharField(choices=(
('V', 'Vendedor'),
('O', 'Operador'),
), max_length=1)
def __str__(self):
return self.nome
class Ordem(Base):
class Meta:
verbose_name_plural = 'Ordens'
vendedor = models.ForeignKey(User, on_delete=models.CASCADE)
modelo = models.ForeignKey(Modelo, on_delete=models.CASCADE)
cliente = models.CharField(max_length=200)
email = models.CharField(max_length=200)
numeros = models.IntegerField(default=1)
tem_acessorio = models.BooleanField(default=False, verbose_name="Tem acessório?")
tipo = models.CharField(choices=(
('N', 'Nova'),
('R', 'Retrabalho'),
('G', 'Garantia'),
('2', 'Outro'),
), max_length=1)
orcamento = models.DecimalField(max_digits=10, decimal_places=2, default=0)
data_orcamento = models.DateTimeField(auto_now_add=True)
data_aprovacao = models.DateTimeField(auto_now_add=True)
data_entrega = models.DateTimeField(auto_now_add=True)
status = models.CharField(choices=(
('0', 'inciada'),
('1', 'Andamento'),
('2', 'Aguardando'),
('3', 'Finalizada'),
('4', 'Pendente de Abertura')
), max_length=1, default='4')
prioridade = models.IntegerField(default=1)
lateral = models.IntegerField(default=0)
defletor = models.IntegerField(default=0)
conexoes = models.IntegerField(default=0)
grade = models.IntegerField(default=0)
bracadeira = models.IntegerField(default=0)
tampa = models.IntegerField(default=0)
coxin = models.IntegerField(default=0)
mangueira = models.IntegerField(default=0)
colmeia = models.IntegerField(default=0)
obs_ordem = models.TextField(default='Nada')
def save(self, *args, **kwargs):
if not self.pk: # Verifica se é uma nova instância de Ordem
ultimo_numero_os = osnumero.objects.last()
if ultimo_numero_os:
self.numeros = ultimo_numero_os.numero + 1
ultimo_numero_os.numero = self.numeros
ultimo_numero_os.save()
else:
self.numeros = 1
osnumero.objects.create(numero=self.numeros)
super().save(*args, **kwargs)
@classmethod
def get_default_numeros(cls):
ultimo_numero_os = osnumero.objects.last()
if ultimo_numero_os:
return ultimo_numero_os.numero + 1
return 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._meta.get_field('numeros').default = self.get_default_numeros
class Etapa(Base):
id_ordem = models.ForeignKey(Ordem, on_delete=models.CASCADE, editable=False)
os_ordem = models.IntegerField(default=0)
operador = models.ForeignKey(Funcionario, on_delete=models.CASCADE, default=3)
fase = models.CharField(choices=(
('1', 'Fase1'),
('2', 'Fase2'),
('3', 'Fase3'),
), max_length=1)
media = models.IntegerField()
inicio = models.DateTimeField(blank=True, null=True)
parada = models.DateTimeField(blank=True, null=True)
retomada = models.DateTimeField(blank=True, null=True)
fim = models.DateTimeField(blank=True, null=True)
decorrido = models.IntegerField(default=0)
quantidadeParadas = models.IntegerField(default=0, verbose_name='Quantiadade de Paradas')
parado = models.IntegerField(default=0, verbose_name='Total Parado Bruto')
controleParado = models.IntegerField(default=0, verbose_name='Total Parado Efetivo')
limpesa = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
conserto = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
caixa_sup = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
caixa_inf = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
elemento = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
bocal = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
rad_novo = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
colmeia = models.CharField(choices=(
('S', 'Sim'),
('N', 'Não'),
), max_length=1, default='N')
mostrar = models.CharField(choices=(
('1', 'Sim'),
('0', 'Não'),
('2', 'Ativo'),
), max_length=1, default='0')
status = models.CharField(choices=(
('0', 'Aberto'),
('1', 'Executando'),
('6', 'Aguardando'),
('8', 'Parado'),
('2', 'Retomar'),
('3', 'Final'),
), max_length=1, default='0')
obs_etapa = models.TextField(default='Nada')
almoco = models.IntegerField(default=0)
fim_de_turno = models.IntegerField(default=0)
setup = models.IntegerField(default=0)
faltaMaterial = models.IntegerField(default=0)
quebraFerramenta = models.IntegerField(default=0)
necessidadesPessoais = models.IntegerField(default=0)
outros = models.IntegerField(default=0) | ctedescojr/apontamentos-gestao-visual-producao | home/models.py | models.py | py | 6,341 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name... |
10955676150 | import re, math
from googletrans import Translator
from collections import Counter
translator=Translator()
WORD = re.compile(r'\w+')
lines1=open('C:/Users/Fahim/Desktop/Final_thesis/QUE_Bangla.txt',encoding='utf8').read().split('\n')
lines2=open('C:/Users/Fahim/Desktop/Final_thesis/Questions_final2.txt',encoding='utf8').read().split('\n')
path=''
train_bangla1 = open(path + 'bangla_convn.txt','a',encoding='utf8')
file=[]
def clean_text(text):
'''Clean text by removing unnecessary characters and altering the format of words.'''
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
return text
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
text = text.lower()
text=clean_text(text)
words = WORD.findall(text)
return Counter(words)
i=13340
num=0
for j in range(0, 39516):
translation = translator.translate(lines1[i], dest='en')
english1= str(translation.text)
text1=lines2[i]
text2=english1
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine1 = get_cosine(vector1, vector2)
i=i+1
translation = translator.translate(lines1[i], dest='en')
english2 = str(translation.text)
text1 = lines2[i]
text2 = english2
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine2 = get_cosine(vector1, vector2)
cosine= cosine1+cosine2
#print(cosine)
if cosine>1.6:
train_bangla1.write(lines1[i-1] + '\n')
train_bangla1.write(lines1[i] + '\n')
num=num+1
print("Total Taken",i)
i=i+1
| yousuffahim8/Bengali-Social-Virtual-Robot- | Code/fahim_translation.py | fahim_translation.py | py | 2,677 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "googletrans.Translator",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 16... |
9030843872 | import itertools
import os
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from multiprocessing import Pool, cpu_count
from diskcache import Cache
from conf import OUTPUT_FOLDER, CACHE_FOLDER, EPG_FILE, DOWNLOAD_EXTRA_INFO, DAYS_TO_DOWNLOAD, DELAYS, HD_CHANNELS
from date_time import DateTime
from movistar import Movistar
class EPGGenerator(object):
def __init__(self):
self.cache = Cache(CACHE_FOLDER)
def run(self):
movistar_data = self.download_movistar_data()
channels_data, programmes_data = self.merge_movistar_data(movistar_data)
channels, programmes = self.generate_epg_data(channels_data, programmes_data)
self.dump_epg_data(channels, programmes)
self.cache.expire()
def download_movistar_data(self):
print("Downloading movistar data...")
channels = Movistar.get_channels()
return [
Movistar.get_programation(
(datetime.now() + timedelta(days=days)).strftime("%Y-%m-%d"),
channels,
)
for days in range(-1, DAYS_TO_DOWNLOAD)
]
def merge_movistar_data(self, data):
data_channels = []
data_programmes = []
for dt in data:
data_channels.extend(dt["channels"])
for dt in data:
data_programmes.extend(dt["channelsProgram"][0])
data_programmes.extend(dt["channelsProgramDayBefore"][0])
channels = list({v["cod_cadena_tv"]: v for v in data_channels}.values())
programmes = list({v["cod_evento_rejilla"]: v for v in data_programmes}.values())
return channels, programmes
def create_channels(self, channel_data):
channels = [self.create_channel(channel_data)]
if channel_data["cod_cadena_tv"] in HD_CHANNELS:
channel = dict(channels[0]) # We clone the original channel
channel["name"] = f"{channel['name']} HD"
channel["code"] = f"{channel['code']} HD"
channels.append(channel)
delays = DELAYS.get(channel_data["cod_cadena_tv"], 0)
for delay in range(1, delays + 1):
channel = dict(channels[0]) # We clone the original channel
channel["name"] = f"{channel['name']}-{delay}h"
channel["code"] = f"{channel['code']}-{delay}h"
channels.append(channel)
return channels
def create_channel(self, channel):
return {
"name": channel["des_cadena_tv"],
"code": channel["cod_cadena_tv"],
"logo": Movistar.get_channel_logo(channel["cod_cadena_tv"])
}
def create_programmes(self, programme_data):
programmes = [self.create_programme(programme_data)]
if programme_data["cod_cadena_tv"] in HD_CHANNELS:
programme = dict(programmes[0]) # We clone the original programme
programme["channel"] = f"{programme['channel']} HD"
programmes.append(programme)
delays = DELAYS.get(programme_data["cod_cadena_tv"], 0)
for delay in range(1, delays + 1):
programme = dict(programmes[0]) # We clone the original programme
programme["channel"] = f"{programme['channel']}-{delay}h"
programme["start"] += timedelta(hours=delay)
programme["stop"] += timedelta(hours=delay)
programmes.append(programme)
return programmes
def create_programme(self, programme):
cer = programme["cod_evento_rejilla"]
if cer in self.cache:
return self.cache.get(cer)
info = {
"channel": programme["cod_cadena_tv"],
"title": programme["des_evento_rejilla"],
"category": programme["des_genero"],
"start": DateTime().parse(programme["f_evento_rejilla"]),
"stop": DateTime().parse(programme["f_fin_evento_rejilla"])
}
cee = programme["cod_elemento_emision"]
if cee and DOWNLOAD_EXTRA_INFO:
info.update(Movistar.get_extra_info(cee))
# DAYS_TO_DOWNLOAD + 1 we add the day before data | 86400 seconds in a day
expire_time = (DAYS_TO_DOWNLOAD + 1) * 86400
self.cache.set(cer, info, expire=expire_time)
return info
def generate_epg_data(self, channels, programmes):
print("Generating epg data...")
p = Pool(cpu_count())
epg_channels = p.map(self.create_channels, channels)
p.terminate()
p.join()
p = Pool(cpu_count())
epg_programmes = p.map(self.create_programmes, programmes)
p.terminate()
p.join()
return itertools.chain(*epg_channels), itertools.chain(*epg_programmes)
def dump_epg_data(self, channels, programmes):
print("Dumping epg data to xml...")
tv = ET.Element("tv")
tv.set("date", datetime.now().strftime("%Y-%m-%d"))
tv.set("source-info-url", "http://comunicacion.movistarplus.es/programacion/")
tv.set("source-info-name", "Movistar")
tv.set("generator-info-name", "Movistar EPG generator")
tv.set("generator-info-url", "https://github.com/oscarbc96/epg_generator")
for channel_data in channels:
channel = ET.SubElement(tv, "channel")
channel.set("id", channel_data["code"])
display_name = ET.SubElement(channel, "display-name")
display_name.set("lang", "es")
display_name.text = channel_data["name"]
icon = ET.SubElement(channel, "icon")
icon.set("src", channel_data["logo"])
for programme_data in programmes:
programme = ET.SubElement(tv, "programme")
programme.set("start", DateTime().format(programme_data["start"]))
programme.set("stop", DateTime().format(programme_data["stop"]))
programme.set("channel", programme_data["channel"])
title = ET.SubElement(programme, "title")
title.set("lang", "es")
title.text = programme_data["title"]
category = ET.SubElement(programme, "category")
category.set("lang", "es")
category.text = programme_data["category"]
if "desc" in programme_data:
desc = ET.SubElement(programme, "desc")
desc.set("lang", "es")
desc.text = programme_data["desc"]
if "image" in programme_data:
icon = ET.SubElement(programme, "icon")
icon.set("src", programme_data["image"])
if "age_rating" in programme_data:
rating = ET.SubElement(programme, "rating")
rating.set("system", "ES")
value = ET.SubElement(rating, "value")
value.text = programme_data["age_rating"]
if "details" in programme_data:
details = programme_data["details"]
if "temporada" in details and "capitulo" in details:
season = details["temporada"]
chapter = details["capitulo"]
episode_num = ET.SubElement(programme, "episode-num")
episode_num.set("system", "xmltv_ns")
episode_num.text = f"{season}.{chapter}.0/1"
credits = ET.SubElement(programme, "credits")
if "actor" in details:
for actor_data in details["actor"]:
actor = ET.SubElement(credits, "actor")
actor.text = actor_data
if "director" in details:
for director_data in details["director"]:
director = ET.SubElement(credits, "director")
director.text = director_data
xml = ET.tostring(tv, encoding="ISO-8859-1", method="xml")
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
output = open(EPG_FILE, "wb")
output.write(xml)
output.close()
if __name__ == "__main__":
print("EPG Generator")
print(f"N. cores: {cpu_count()}")
print(f"Download extra info: {DOWNLOAD_EXTRA_INFO}")
print(f"Days to download: {DAYS_TO_DOWNLOAD}")
start = datetime.now()
print(f"Start: {start}")
EPGGenerator().run()
stop = datetime.now()
print(f"Stop: {stop}")
print(f"Execution time: {stop - start}")
| oscarbc96/epg_generator | epg_generator.py | epg_generator.py | py | 8,413 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "diskcache.Cache",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "conf.CACHE_FOLDER",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "movistar.Movistar.get_channels",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "... |
34020545749 |
# coding: utf-8
# In[21]:
import json
import urllib.request
import time
user = 'whyisjake'
url = 'https://api.github.com/users/%s/repos' % user
users = ['elmiram', 'nevmenandr', 'shwars', 'JelteF', 'timgraham', 'arogozhnikov', 'jasny', 'bcongdon', 'whyisjake']
response = urllib.request.urlopen(url)
text = response.read().decode('utf-8')
data = json.loads(text)
print(len(data))
lang_arr = {}
for i in data:
#print('%s %s'%(i['name'], i['description']))
if i['language'] not in lang_arr:
lang_arr[i['language']] = 1
else:
lang_arr[i['language']] += 1
#print(lang_arr)
maxim = 0
max_us = ''
lang_arr1 = {}
for user in users:
url = 'https://api.github.com/users/%s/repos' % user
req = urllib.request.Request(url)
req.add_header('Authorization', 'token 8ce69c608c02eb9a555161c2e4fe77632275cd05')
result = urllib.request.urlopen(req)
text = result.read().decode('utf-8')
data = json.loads(text)
if len(data) >= maxim:
maxim = len(data)
max_us = user
for i in data:
if i['language'] not in lang_arr1:
lang_arr1[i['language']] = 1
else:
lang_arr1[i['language']] += 1
print(max_us)
print(lang_arr1)
# In[ ]:
| aischeveva/hw_python2017 | sem071017_json.py | sem071017_json.py | py | 1,230 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_nam... |
4848988793 | from datetime import datetime
import os
from random import randint
import tempfile
from time import sleep
from urllib.parse import parse_qs, urlparse
from uuid import uuid1
import tator
def test_get_file(host, token, project, video):
tator_api = tator.get_api(host, token)
video_obj = tator_api.get_media(video)
with tempfile.TemporaryDirectory() as temp_dir:
outpath = os.path.join(temp_dir, "video.mp4")
for progress in tator.download_media(tator_api, video_obj, outpath):
print(f"Video download progress: {progress}%")
assert(os.path.exists(outpath))
def test_get_audio(host, token, project, video):
tator_api = tator.get_api(host, token)
video_obj = tator_api.get_media(video)
audio = video_obj.media_files.audio
assert len(audio) > 0
assert audio[0].codec == 'aac'
def test_get_by_id(host, token, project, video):
tator_api = tator.get_api(host, token)
video_obj = tator_api.get_media(video)
other_obj = tator_api.get_media_list_by_id(project, {'ids': [video]})[0]
assert video_obj.id == other_obj.id
count = tator_api.get_media_count_by_id(project, {'ids': [video]})
assert(count == 1)
def test_archive(host, token, project, video):
tator_api = tator.get_api(host, token)
video_obj = tator_api.get_media(video)
# Test default value of `archived` is "live"
assert video_obj.archive_state == "live"
# Test default `get_media_list` filters on `archive_lifecycle == "all"`
response = tator_api.get_media_list(project, media_id=[video])
assert len(response) == 1
assert response[0].archive_state == "live"
# Test `get_media_list` with `archive_lifecycle="archived"` doesn't return "live" objects
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="archived")
assert len(response) == 0
# Test returning subset of media that is live
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="live")
assert len(response) == 1
assert response[0].archive_state == "live"
# Test returning subset of media that has any `archive_lifecycle` state
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="all")
assert len(response) == 1
assert response[0].archive_state == "live"
# Mark the video to archive
tator_api.update_media_list(project, {"archive_state": "to_archive", "ids": [video]})
# Wait for update to propagate to ES
sleep(2)
# Test default `get_media_list` filters on `archive_lifecycle == "all"`
response = tator_api.get_media_list(project, media_id=[video])
assert len(response) == 1
assert response[0].archive_state == "to_archive"
# Test `get_media_list` with `archive_lifecycle="archived"` returns archived objects
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="archived")
assert len(response) == 1
assert response[0].archive_state == "to_archive"
# Test returning subset of media that is live
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="live")
assert len(response) == 0
# Test returning subset of media that has any `archive_state` state
response = tator_api.get_media_list(project, media_id=[video], archive_lifecycle="all")
assert len(response) == 1
assert response[0].archive_state == "to_archive"
# Additional attempts to set the state to `to_archive` will not change anything
tator_api.update_media(video, {"archive_state": "to_archive"})
video_obj = tator_api.get_media(video)
assert video_obj.archive_state == "to_archive"
# Setting the `archive_state` to `to_live` when a media is in the state `to_archive` should
# result in the media's state changing to `live`
tator_api.update_media(video, {"archive_state": "to_live"})
video_obj = tator_api.get_media(video)
assert video_obj.archive_state == "live"
def test_section(host, token, project, video):
tator_api = tator.get_api(host, token)
# Create test section
section_spec = {"name": "Test Section", "tator_user_sections": str(uuid1())}
response = tator_api.create_section(project, section_spec=section_spec)
# Update media `tator_user_sections` attribute
update_spec = {"attributes": {"tator_user_sections": section_spec["tator_user_sections"]}}
response = tator_api.update_media(
video, update_spec
)
media = tator_api.get_media(video)
assert media.attributes["tator_user_sections"] == section_spec["tator_user_sections"]
# Unset media `tator_user_sections` attribute
response = tator_api.update_media(
video, {"attributes": {"tator_user_sections": ""}}
)
media = tator_api.get_media(video)
assert media.attributes["tator_user_sections"] == ""
# Update media `tator_user_sections` attribute with a bulk update
response = tator_api.update_media_list(project, media_id=[video], media_bulk_update=update_spec)
media = tator_api.get_media(video)
assert media.attributes["tator_user_sections"] == section_spec["tator_user_sections"]
# Unset media `tator_user_sections` attribute with a bulk update
response = tator_api.update_media_list(
project,
media_id=[video],
media_bulk_update={"attributes": {"tator_user_sections": ""}},
)
media = tator_api.get_media(video)
assert media.attributes["tator_user_sections"] == ""
def test_import_multiple_images(host, token, project, image_type):
tator_api = tator.get_api(host, token)
image_url = "https://www.gstatic.com/webp/gallery/1.jpg"
n_images = 5
uuid = str(uuid1())
project_media_count = tator_api.get_media_count(project, type=image_type)
media_specs = [
{
"type": image_type,
"section": "Multiple image upload",
"name": f"{uuid}_{idx}.jpg",
"url": image_url,
"md5": "",
}
for idx in range(n_images)
]
start = datetime.now()
response = tator_api.create_media_list(project, body=media_specs)
created_ids = response.id
duration = (datetime.now() - start).total_seconds()
assert duration < 5
assert str(len(media_specs)) in response.message
assert len(media_specs) == len(created_ids)
new_project_media_count = -1
desired_project_media_count = project_media_count + len(media_specs)
for _ in range(30):
sleep(1)
new_project_media_count = tator_api.get_media_count(project, type=image_type)
if new_project_media_count == desired_project_media_count:
break
assert new_project_media_count == desired_project_media_count
for _ in range(30):
sleep(1)
media_list = tator_api.get_media_list(project, type=image_type, media_id=created_ids)
n_with_media_files = 0
for media in media_list:
if media.name.startswith(uuid) and media.media_files is not None:
n_with_media_files += 1
if n_with_media_files == n_images:
break
assert n_with_media_files == n_images
def parse_url(url):
parsed_url = urlparse(url)
return parsed_url.path, parse_qs(parsed_url.query)
def parse_media_files(media):
return dict(
parse_url(spec["path"])
for file_type, file_specs in media.media_files.to_dict().items()
if file_specs is not None
for spec in file_specs
if "path" in spec
)
def test_presigned_no_cache(host, token, project, video_type, video_file):
expires_key = "X-Amz-Expires"
# Set up new video to ensure a clean cache
tator_api = tator.get_api(host, token)
uuid_val = str(uuid1())
attributes = {"test_string": uuid_val}
for progress, response in tator.util.upload_media(
tator_api, video_type, video_file, attributes=attributes
):
print(f"Upload video progress: {progress}%")
print(response.message)
while True:
response = tator_api.get_media_list(
project,
name='AudioVideoSyncTest_BallastMedia.mp4',
attribute=[f"test_string::{uuid_val}"],
)
print("Waiting for transcode...")
sleep(2.5)
if len(response) == 0:
continue
if response[0].media_files is None:
continue
streaming = response[0].media_files.streaming
have_archival = response[0].media_files.archival is not None
if streaming and have_archival and len(streaming) == 4:
video_id = response[0].id
break
# Get initial presigned url
original_presigned_duration = new_presigned_duration = randint(8640, 86400)
while new_presigned_duration == original_presigned_duration:
new_presigned_duration = randint(8640, 86400)
video_obj = tator_api.get_media(video_id, presigned=original_presigned_duration)
init_presigned_url = parse_media_files(video_obj)
# Request a new duration without the `no_cache` flag and assert it returns the same urls
video_obj = tator_api.get_media(video_id, presigned=new_presigned_duration, no_cache=False)
no_cache_false_presigned_url = parse_media_files(video_obj)
for path, query_params in no_cache_false_presigned_url.items():
assert path in init_presigned_url
assert int(query_params[expires_key][0]) == original_presigned_duration
# Request a new duration with the `no_cache` flag and assert it returns new urls
video_obj = tator_api.get_media(video_id, presigned=new_presigned_duration, no_cache=True)
no_cache_false_presigned_url = parse_media_files(video_obj)
for path, query_params in no_cache_false_presigned_url.items():
assert path in init_presigned_url
assert int(query_params[expires_key][0]) == new_presigned_duration
# Request a new duration without the `no_cache` flag again and assert it returns the original
# cached urls and not the ones from the new duration
video_obj = tator_api.get_media(video_id, presigned=original_presigned_duration, no_cache=False)
no_cache_false_presigned_url = parse_media_files(video_obj)
for path, query_params in no_cache_false_presigned_url.items():
assert path in init_presigned_url
assert int(query_params[expires_key][0]) == original_presigned_duration
| cvisionai/tator-py | test/test_media.py | test_media.py | py | 10,371 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "tator.get_api",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.