blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1862a4a570e4842c1fe5594ee31588c50f1f4de | e80d4832e433890d6e797b32968ff09e59bff5aa | /bin/esptool3.py | 7d81629633d0284af03c8889b2934f860a425c5f | [] | no_license | menlsux/xtensa-toolchain | 4bd6e9d9f8e20876eda20f916d8fdaf3d037d010 | fcb52faaf658c4bcc46f3c93698fdd77b37f89f4 | refs/heads/master | 2023-05-28T22:46:18.594150 | 2021-05-28T12:01:45 | 2021-05-28T12:01:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,365 | py | #!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import math
import serial
import time
import argparse
import os
import platform
import subprocess
import tempfile
def identify_platform():
sys_name = platform.system()
if 'CYGWIN_NT' in sys_name:
sys_name = 'Windows'
return sys_name
def get_pwd():
return os.path.dirname(os.path.abspath(__file__))
def get_nm():
my_platform = identify_platform();
tc_nm = get_pwd()
if 'Windows' in my_platform:
tc_nm += "/../xtensa-lx106-elf/bin/xtensa-lx106-elf-nm.exe"
else:
tc_nm += "/../xtensa-lx106-elf/bin/xtensa-lx106-elf-nm"
return tc_nm
def get_objcopy():
my_platform = identify_platform();
tc_objcopy = get_pwd()
if 'Windows' in my_platform:
tc_objcopy += "/../xtensa-lx106-elf/bin/xtensa-lx106-elf-objcopy.exe"
else:
tc_objcopy += "/../xtensa-lx106-elf/bin/xtensa-lx106-elf-objcopy"
return tc_objcopy
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
ESP_OTP_MAC2 = 0x3ff00058
ESP_OTP_MAC3 = 0x3ff0005c
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port=0, baud=ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length=1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0' \
+ (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \
+ '\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=None, chk=0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries = 100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55')
for i in range(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print('Connecting...')
for _ in range(4):
# issue reset-to-bootloader:
# RTS = either CH_PD or nRESET (both active low = chip in reset)
# DTR = GPIO0 (active low = boot to flasher)
self._port.setDTR(False)
self._port.setRTS(True)
time.sleep(0.05)
self._port.setDTR(True)
self._port.setRTS(False)
time.sleep(0.05)
self._port.setDTR(False)
# worst-case latency timer should be 255ms (probably <20ms)
self._port.timeout = 0.3
for _ in range(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
"""read mac addr"""
def get_mac(self):
retry_times = 3
try:
reg1 = self.read_reg(esp.ESP_OTP_MAC0)
reg2 = self.read_reg(esp.ESP_OTP_MAC1)
reg3 = self.read_reg(esp.ESP_OTP_MAC2)
reg4 = self.read_reg(esp.ESP_OTP_MAC3)
except:
print("Read reg error")
return False
chip_flg = (reg3>>15)&0x1
if chip_flg == 0:
print('Warning : ESP8089 CHIP DETECTED, STOP')
return False
else:
#print 'Chip_flag',chip_flg
m0 = ((reg2>>16)&0xff)
m1 = ((reg2>>8)&0xff)
m2 = ((reg2 & 0xff))
m3 = ((reg1>>24)&0xff)
self.MAC2 = m0
self.MAC3 = m1
self.MAC4 = m2
self.MAC5 = m3
if m0 ==0:
#print "r1: %02x; r2:%02x ; r3: %02x"%(m1,m2,m3)
mac= "1A-FE-34-%02x-%02x-%02x"%(m1,m2,m3)
mac2 = "1AFE34%02x%02x%02x"%(m1,m2,m3)
mac = mac.upper()
mac2 = mac2.upper()
mac_ap = ("1A-FE-34-%02x-%02x-%02x"%(m1,m2,m3)).upper()
mac_sta = ("18-FE-34-%02x-%02x-%02x"%(m1,m2,m3)).upper()
print("MAC AP: %s"%(mac_ap))
print("MAC STA: %s"%(mac_sta))
elif m0 == 1:
#print "r1: %02x; r2:%02x ; r3: %02x"%(m1,m2,m3)
mac= "AC-D0-74-%02x-%02x-%02x"%(m1,m2,m3)
mac2 = "ACD074%02x%02x%02x"%(m1,m2,m3)
mac = mac.upper()
mac2 = mac2.upper()
mac_ap = ("AC-D0-74-%02x-%02x-%02x"%(m1,m2,m3)).upper()
mac_sta = ("AC-D0-74-%02x-%02x-%02x"%(m1,m2,m3)).upper()
print("MAC AP: %s"%(mac_ap))
print("MAC STA: %s"%(mac_sta))
return True
else:
print("MAC read error...")
return False
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us=0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise FatalError('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
ESPROM.checksum(data))[1] != "\0\0":
raise FatalError('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise FatalError('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, _size, offset):
old_tmo = self._port.timeout
self._port.timeout = 10
area_len = int(_size)
sector_no = offset/4096;
sector_num_per_block = 16;
#total_sector_num = (0== (area_len%4096))? area_len/4096 : 1+(area_len/4096);
if 0== (area_len%4096):
total_sector_num = area_len/4096
else:
total_sector_num = 1+(area_len/4096)
#check if erase area reach over block boundary
head_sector_num = sector_num_per_block - (sector_no%sector_num_per_block);
#head_sector_num = (head_sector_num>=total_sector_num)? total_sector_num : head_sector_num;
if head_sector_num>=total_sector_num :
head_sector_num = total_sector_num
else:
head_sector_num = head_sector_num
if (total_sector_num - 2 * head_sector_num)> 0:
size = (total_sector_num-head_sector_num)*4096
print("head: ",head_sector_num,";total:",total_sector_num)
print("erase size : ",size)
else:
size = int( math.ceil( total_sector_num/2.0) * 4096 )
print("head:",head_sector_num,";total:",total_sector_num)
print("erase size :",size)
if self.command(ESPROM.ESP_FLASH_BEGIN,
struct.pack('<IIII', size, 0x200, ESPROM.ESP_FLASH_BLOCK, offset))[1] != "\0\0":
raise Exception('Failed to enter Flash download mode')
self._port.timeout = old_tmo
""" Write block to flash """
def flash_block(self, data, seq):
result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result)
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0":
raise FatalError('Failed to leave Flash mode')
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
self.flash_begin(0, 0)
self.write_reg(0x60000240, 0x0, 0xffffffff)
self.write_reg(0x60000200, 0x10000000, 0xffffffff)
flash_id = self.read_reg(0x60000240)
self.flash_finish(False)
return flash_id
""" Read SPI flash """
def flash_read(self, offset, size, count=1):
# Create a custom stub
stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# Download stub
self.mem_begin(len(stub), 1, len(stub), 0x40100000)
self.mem_block(stub, 0)
self.mem_finish(0x4010001c)
# Fetch the data
data = ''
for _ in range(count):
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet (sflash read)')
data += self.read(size)
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet (sflash read)')
return data
""" Abuse the loader protocol to force flash to be left in write mode """
def flash_unlock_dio(self):
# Enable flash write mode
self.flash_begin(0, 0)
# Reset the chip rather than call flash_finish(), which would have
# write protected the chip again (why oh why does it do that?!)
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40000080)
""" Perform a chip erase of SPI flash """
def flash_erase(self):
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# This is hacky: we don't have a custom stub, instead we trick
# the bootloader to jump to the SPIEraseChip() routine and then halt/crash
# when it tries to boot an unconfigured system.
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40004984)
# Yup - there's no good way to detect if we succeeded.
# It it on the other hand unlikely to fail.
class ESPFirmwareImage:
def __init__(self, filename=None):
self.segments = []
self.entrypoint = 0
self.flash_mode = 0
self.flash_size_freq = 0
if filename is not None:
f = file(filename, 'rb')
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8))
# some sanity check
if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16:
raise FatalError('Invalid firmware image')
for i in range(segments):
(offset, size) = struct.unpack('<II', f.read(8))
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size))
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
self.segments.append((offset, size, segment_data))
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align = 15 - (f.tell() % 16)
f.seek(align, 1)
self.checksum = ord(f.read(1))
def add_segment(self, addr, data):
# Data should be aligned on word boundary
l = len(data)
if l % 4:
data += b"\x00" * (4 - l % 4)
if l > 0:
self.segments.append((addr, len(data), data))
def save(self, filename):
f = file(filename, 'wb')
f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (offset, size, data) in self.segments:
f.write(struct.pack('<II', offset, size))
f.write(data)
checksum = ESPROM.checksum(data, checksum)
align = 15 - (f.tell() % 16)
f.seek(align, 1)
f.write(struct.pack('B', checksum))
class ELFFile:
def __init__(self, name):
self.name = name
self.symbols = None
def _fetch_symbols(self):
if self.symbols is not None:
return
self.symbols = {}
try:
tool_nm = get_nm()
if os.getenv('XTENSA_CORE') == 'lx106':
tool_nm = "xt-nm"
proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE)
except OSError:
print("Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm)
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
try:
if fields[0] == "U":
print("Warning: ELF binary has undefined symbol %s" % fields[1])
continue
self.symbols[fields[2]] = int(fields[0], 16)
except ValueError:
raise FatalError("Failed to strip symbol output from nm: %s" % fields)
def get_symbol_addr(self, sym):
self._fetch_symbols()
return self.symbols[sym]
def get_entry_point(self):
tool_readelf = "xtensa-lx106-elf-readelf"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_readelf = "xt-readelf"
try:
proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE)
except OSError:
print("Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf)
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
if fields[0] == "Entry":
return int(fields[3], 0)
def load_section(self, section):
tool_objcopy = get_objcopy()
if os.getenv('XTENSA_CORE') == 'lx106':
tool_objcopy = "xt-objcopy"
tmpsection = tempfile.mktemp(suffix=".section")
try:
subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection])
with open(tmpsection, "rb") as f:
data = f.read()
finally:
os.remove(tmpsection)
return data
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) / int(b)
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that includes the hex values of
'result' as a string formatted argument.
"""
return FatalError(message % ", ".join(hex(ord(x)) for x in result))
def main():
parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default='/dev/ttyUSB0')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=arg_auto_int,
default=ESPROM.ESP_ROM_BAUD)
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space')
parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str)
parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
parser_elf2image.add_argument('--entry-symbol', '-es', help = 'Entry point symbol name (default \'call_user_start\')',
default = 'call_user_start')
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
args = parser.parse_args()
# Create the ESPROM connection object, if needed
esp = None
if args.operation not in ('image_info','make_image','elf2image'):
esp = ESPROM(args.port, args.baud)
esp.connect()
# Do the actual work. Should probably be split into separate functions.
if args.operation == 'load_ram':
image = ESPFirmwareImage(args.filename)
print('RAM boot...')
for (offset, size, data) in image.segments:
print('Downloading %d bytes at %08x...' % (size, offset), end=' ')
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print('done!')
print('All segments done, executing at %08x' % image.entrypoint)
esp.mem_finish(image.entrypoint)
elif args.operation == 'read_mem':
print('0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address)))
elif args.operation == 'write_mem':
esp.write_reg(args.address, args.value, args.mask, 0)
print('Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address))
elif args.operation == 'dump_mem':
f = file(args.filename, 'wb')
for i in range(args.size / 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack('<I', d))
if f.tell() % 1024 == 0:
print('\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 / args.size), end=' ')
sys.stdout.flush()
print('Done!')
elif args.operation == 'write_flash':
assert len(args.addr_filename) % 2 == 0
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_info = struct.pack('BB', flash_mode, flash_size_freq)
while args.addr_filename:
address = int(args.addr_filename[0], 0)
filename = args.addr_filename[1]
args.addr_filename = args.addr_filename[2:]
image = file(filename, 'rb').read()
print('Erasing flash...')
blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK)
esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address)
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print('\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks), end=' ')
sys.stdout.flush()
block = image[0:esp.ESP_FLASH_BLOCK]
# Fix sflash config data
if address == 0 and seq == 0 and block[0] == '\xe9':
block = block[0:2] + flash_info + block[4:]
# Pad the last block
block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block))
esp.flash_block(block, seq)
image = image[esp.ESP_FLASH_BLOCK:]
seq += 1
written += len(block)
t = time.time() - t
print('\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000))
print('\nLeaving...')
if args.flash_mode == 'dio':
esp.flash_unlock_dio()
else:
esp.flash_begin(0, 0)
esp.flash_finish(False)
elif args.operation == 'run':
esp.run()
elif args.operation == 'image_info':
image = ESPFirmwareImage(args.filename)
print(('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set')
print('%d segments' % len(image.segments))
print()
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (idx, (offset, size, data)) in enumerate(image.segments):
print('Segment %d: %5d bytes at %08x' % (idx + 1, size, offset))
checksum = ESPROM.checksum(data, checksum)
print()
print('Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!'))
elif args.operation == 'make_image':
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = file(seg, 'rb').read()
image.add_segment(addr, data)
image.entrypoint = args.entrypoint
image.save(args.output)
elif args.operation == 'elf2image':
if args.output is None:
args.output = args.input + '-'
e = ELFFile(args.input)
image = ESPFirmwareImage()
image.entrypoint = e.get_symbol_addr(args.entry_symbol)
for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")):
data = e.load_section(section)
image.add_segment(e.get_symbol_addr(start), data)
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
image.save(args.output + "0x00000.bin")
data = e.load_section(".irom0.text")
off = e.get_symbol_addr("_irom0_text_start") - 0x40200000
assert off >= 0
f = open(args.output + "0x%05x.bin" % off, "wb")
f.write(data)
f.close()
print("{0:>10}|{1:>30}|{2:>12}|{3:>12}|{4:>8}".format("Section", "Description", "Start (hex)", "End (hex)", "Used space"))
print("------------------------------------------------------------------------------")
sec_name = ["data", "rodata", "bss", "lit4", "text", "irom0_text"]
sec_des = ["Initialized Data (RAM)", "ReadOnly Data (RAM)", "Uninitialized Data (RAM)", "Uninitialized Data (IRAM)", "Uncached Code (IRAM)", "Cached Code (SPI)"]
sec_size = []
for i in range(len(sec_name)):
ss = e.get_symbol_addr('_' + sec_name[i] + '_start')
se = e.get_symbol_addr('_' + sec_name[i] + '_end')
sec_size.append(int(se-ss))
print("{0:>10}|{1:>30}|{2:>12X}|{3:>12X}|{4:>8d}".format(sec_name[i], sec_des[i], ss, se, sec_size[i]))
print("------------------------------------------------------------------------------")
print("{0} : {1:X} {2}()".format("Entry Point", image.entrypoint, args.entry_symbol))
ram_used = sec_size[0] + sec_size[1] + sec_size[2]
iram_used = sec_size[3] + sec_size[4]
print("{0} : {1:d}".format("Total Used RAM", ram_used + iram_used))
print("{0} : {1:d} or {2:d} (option 48k IRAM)".format("Free IRam", 0x08000 - iram_used, 0x0C000 - iram_used))
print("{0} : {1:d}".format("Free Heap", 0x014000 - ram_used))
print("{0} : {1:d}".format("Total Free RAM", 0x020000 - iram_used - ram_used))
elif args.operation == 'read_mac':
esp.get_mac()
elif args.operation == 'flash_id':
flash_id = esp.flash_id()
print('Manufacturer: %02x' % (flash_id & 0xff))
print('Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff))
elif args.operation == 'read_flash':
print('Please wait...')
file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size])
elif args.operation == 'erase_flash':
esp.flash_erase()
if __name__ == '__main__':
try:
main()
except FatalError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
| [
"jiankemeng@gmail.com"
] | jiankemeng@gmail.com |
37c81f3e58e9f65707b6aff4eabdfe911a9432cc | 41d17419b743929069cf1dd7bace93a0838ebfcf | /TransformationCode/com/lyit/service/TransformationService.py | 76348be9a6c7540484ed5dd1348c08e3ff6da7cc | [] | no_license | Priyanks27/TransformCode | 45b3c86fcc3f02750aa863abf4f9b44c9f2a0227 | 056572bf45cb36705947a81f9a34ce6d680f552f | refs/heads/master | 2022-12-12T14:41:28.771251 | 2020-08-30T01:28:07 | 2020-08-30T01:28:07 | 287,707,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,434 | py | import os
import json
from com.lyit.DependencyScanning.DependencyScanning import DependencyScanning
from com.lyit.DependencyScanning.UpdatePomInTarget import UpdatePomInTarget
from com.lyit.Reporting.Reporting import Reporting
from com.lyit.RuleEngine.RuleEngine import RuleEngine
from com.lyit.RuleEngine.UpdateGoogleProject import UpdateGoogleProject
from com.lyit.TransformTarget.TransformCode import TransformCode
from com.lyit.configuration.GetProperties import GetProperties
from com.lyit.helper.CommandLIneExecutionFiles.CommandLineExecutor import CommandLineExecutor
from com.lyit.helper.CreateRepository import CreateRepository
class TransformationServiceMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class TransformationService(metaclass=TransformationServiceMeta):
__results_location = "C:/Users/priyank/Documents/Resources/TransformationResults"
def transform(self, transformationInput):
print(transformationInput.get_source_github_url())
print(transformationInput.get_targetcloudprovider())
# Create report file which will be used to add status at each step and their result
reporting = Reporting(transformation_input=transformationInput)
# 1. Create a staging area
createRepository = CreateRepository()
try:
_stagingArea = createRepository.get_stagingArea_Dir()
_targetARea = createRepository.get_targetArea_Dir()
except Exception as e:
error = "Error occurred while creating staging area : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Staging and Target area created")
commandLineExecutor = CommandLineExecutor()
# 2. Fetch git URL
try:
commandLineExecutor.execute("FetchGitHub", transformationInput)
except Exception as e:
error = "Error occurred while fetching Git URL : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Git hub fetch URL Success")
# 3. Scan : compare POM.xmls of targetArea and staging area : if answer is Yes
# Then copy src folder to target and push changes
# Target repo should be existing and connected to cloud build
dependencyScanning = DependencyScanning()
try:
dependency_scan_results_model = dependencyScanning.scan_dependencies(_stagingArea, _targetARea)
except Exception as e:
error = "Error occurred while scanning dependencies : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Scanning dependencies done")
# 4. Dependencies failed and the code cannot be transformed
if not dependency_scan_results_model.get_dependencies_satisfied():
jsonObject = json.dumps(dependency_scan_results_model.get_missing_required_dependencies_in_target())
error = "Missing dependencies dependencies found during scaning: " + jsonObject
return reporting.add_to_report(error=error)
return jsonObject
ruleEngine = RuleEngine()
try:
isSourceSupported = ruleEngine.check_rules_against_dependencies_google(dependency_scan_results_model)
except Exception as e:
error = "Error occurred while checking rules against dependencies : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Rules checked against dependencies")
try:
if str(isSourceSupported) != 'True':
getProperties = GetProperties()
can_transform_unsupported_dependency = getProperties. \
get_transform_unsupported_dependency(unsupported_dependency=isSourceSupported)
if not can_transform_unsupported_dependency:
return "Unsupported dependency cannot be transformed!"
else:
# 5. Transform unsupported dependencies
isTransformed = ruleEngine.transform_unsupported_dependencies(unsupported_dependency=isSourceSupported,
staging_area=_stagingArea)
if not isTransformed:
return "Unsupported dependency transformation failed."
except Exception as e:
error = "Error occurred while transforming unsupported dependencies : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Unsupported dependencies checked")
# 6. Copy functionality : transform code
transform_code = TransformCode()
try:
updated_dir = transform_code.TransformCode(_stagingArea, _targetARea)
except Exception as e:
error = "Error occurred while transforming code by copying into template folder : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Transformation done by copying src to template folder")
# 7. Check if the solution is Google, then copy ServletInitalizer
try:
if str(transformationInput.get_targetcloudprovider()).lower() == "google":
update_google_project = UpdateGoogleProject()
update_google_project.copy_servlet_initializer(_targetARea)
except Exception as e:
error = "Error occurred while copying ServletInitalizer for Google dependencies : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Google dependency checked for ServletInitalizer")
# 8. Deploy by pushing the target to its git branch
try:
if transformationInput.get_is_deploy() == "true":
commandLineExecutor.execute("gitpush", transformationInput)
except Exception as e:
error = "Error occurred while Git push : " + e
return reporting.add_to_report(error=error)
reporting.add_to_report(error="Success : Code pushed to Git, check Code Build pipeline!")
return os.path.realpath(updated_dir)
| [
"saxenapriyank1027@gmail.com"
] | saxenapriyank1027@gmail.com |
4a2bdfc37e3b59dc3e78f8c3346a22e17a27c06c | e23d6bada82885311adf5cf4b25c92bdbfd42784 | /neuron_data_display.py | 620d14579a5f28bde5c69cc4bc1d1dcfb758d3ce | [] | no_license | TakashiNomura/neural_network | 325d3f3dae227cc8db3396bb069f7705f49b456e | 6d9e0acaa4996bbf99277940544d7aae1193be12 | refs/heads/master | 2021-01-23T16:06:08.699272 | 2017-06-10T07:01:19 | 2017-06-10T07:01:19 | 93,283,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # coding:UTF-8
import matplotlib.pyplot as plt
import math
# シグモイド関数
def sigmoid(a):
return 1.0 / (1.0 + math.exp(-a))
# ニューロン
class Neuron:
input_sum = 0.0
output = 0.0
def setInput(self, inp):
self.input_sum += inp
def getOutput(self):
self.output = sigmoid(self.input_sum)
return self.output
# ニューラルネットワーク
class NeuralNetwork:
# 入力の重み
w = [1.5, -2.5, -0.5]
#ニューロンのインスタンス
neuron = Neuron()
# 実行
def commit(self, input_data):
self.neuron.setInput(input_data[0] * self.w[0])
self.neuron.setInput(input_data[1] * self.w[1])
self.neuron.setInput(input_data[2] * self.w[2])
return self.neuron.getOutput()
# 基準点(データの範囲を0.0-1.0の範囲に収めるため)
refer_point0 = 34.5
refer_point1 = 137.5
# ファイル読み込み
trial_data = []
trial_data_file = open("trial_data", "r")
for line in trial_data_file:
line = line.rstrip().split(",")
trial_data.append([float(line[0]) - refer_point0, float(line[1]) - refer_point1 ])
trial_data_file.close()
# ニューラルネットワークのインスタンス
neural_network = NeuralNetwork()
# 実行
position = [[],[]]
for data in trial_data:
position[0].append(data[1] + refer_point1)
position[1].append(data[0] + refer_point0)
# プロット
plt.scatter(position[0], position[1], c="red", label="Postion", marker="+")
plt.legend()
plt.show() | [
"nomufamily@gmail.com"
] | nomufamily@gmail.com |
6a8a15099b951ef5622b7485ccb8a701d77f2115 | 77260af8ad419237b66789145bfac00d98e806ab | /manage.py | 21c90ac02ac036da2467ec121638c3725023a7f5 | [] | no_license | KiranGangadhar01/CloneProject_Test | 6b3ae7fc9c7484302c9b1b27d4daa4190b8f6776 | 13194bf0aedf8bc2316456c0663984eacb9aca47 | refs/heads/master | 2020-05-02T03:10:26.592021 | 2019-04-02T04:58:03 | 2019-04-02T04:58:03 | 177,721,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CloneProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"kiran.gangadhar.01@gmail.com"
] | kiran.gangadhar.01@gmail.com |
5fd7149fbd4109af07177bba0bdb242780b881b3 | a355c3e1796f207fc3088206d041d39a89d38649 | /score-bot/run.py | 01ebbefa723659280f00ed098da53cf837e87a88 | [] | no_license | devsoc/dailyprogrammer | 2a51b042c075c6a72ab53b6cbf8ea5a023aed62c | 69ccd8fdb86d6aabed8d123fa7dbe23b8a9cea94 | refs/heads/master | 2021-01-22T07:39:12.568668 | 2017-02-21T19:24:00 | 2017-02-21T19:24:00 | 81,837,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import os
import asyncio
import threading
from score import main
from flask import Flask
app = Flask(__name__)
def run_script():
loop = asyncio.new_event_loop()
thread = threading.Thread(target=loop.run_until_complete(main(loop)))
thread.start()
thread.join()
@app.route('/', methods=['GET', 'POST'])
def home():
run_script()
return 'Done'
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(debug=True, host='0.0.0.0', port=port)
| [
"programmer.arsh@gmail.com"
] | programmer.arsh@gmail.com |
51488b6af889fd61bcc3bde0f432eebce76ef284 | fb84e82ab80f2af43d3cdcf9a6c0351228d0f682 | /validate.py | e93c4b1bb4adf2936a69d41ba81724c3c0b0e580 | [] | no_license | doctorwk007/semseg | bf1ea79e8e5f9a0084de98e0bd588a2c46af30b0 | 39f7e642014a1e8e21a84d0ff1e0057469b5d8e4 | refs/heads/master | 2020-04-12T01:10:35.164155 | 2018-12-15T03:03:27 | 2018-12-15T03:03:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,095 | py | # -*- coding: utf-8 -*-
import torch
import os
import argparse
import cv2
import time
import numpy as np
import visdom
from torch.autograd import Variable
from scipy import misc
from semseg.dataloader.camvid_loader import camvidLoader
from semseg.dataloader.cityscapes_loader import cityscapesLoader
from semseg.dataloader.freespace_loader import freespaceLoader
from semseg.loss import cross_entropy2d
from semseg.metrics import scores
from semseg.modelloader.EDANet import EDANet
from semseg.modelloader.bisenet import BiSeNet
from semseg.modelloader.deeplabv3 import Res_Deeplab_101, Res_Deeplab_50
from semseg.modelloader.drn import drn_d_22, DRNSeg, drn_a_asymmetric_18, drn_a_asymmetric_ibn_a_18, drnseg_a_50, drnseg_a_18, drnseg_a_34, drnseg_e_22, drnseg_a_asymmetric_18, drnseg_a_asymmetric_ibn_a_18, drnseg_d_22, drnseg_d_38
from semseg.modelloader.drn_a_irb import drnsegirb_a_18
from semseg.modelloader.drn_a_refine import drnsegrefine_a_18
from semseg.modelloader.duc_hdc import ResNetDUC, ResNetDUCHDC
from semseg.modelloader.enet import ENet
from semseg.modelloader.enetv2 import ENetV2
from semseg.modelloader.erfnet import erfnet
from semseg.modelloader.fc_densenet import fcdensenet103, fcdensenet56, fcdensenet_tiny
from semseg.modelloader.fcn import fcn, fcn_32s, fcn_16s, fcn_8s
from semseg.modelloader.fcn_mobilenet import fcn_MobileNet, fcn_MobileNet_32s, fcn_MobileNet_16s, fcn_MobileNet_8s
from semseg.modelloader.fcn_resnet import fcn_resnet18, fcn_resnet34, fcn_resnet18_32s, fcn_resnet18_16s, \
fcn_resnet18_8s, fcn_resnet34_32s, fcn_resnet34_16s, fcn_resnet34_8s, fcn_resnet50_32s, fcn_resnet50_16s, fcn_resnet50_8s
from semseg.modelloader.lrn import lrn_vgg16
from semseg.modelloader.segnet import segnet, segnet_squeeze, segnet_alignres, segnet_vgg19
from semseg.modelloader.segnet_unet import segnet_unet
from semseg.modelloader.sqnet import sqnet
def validate(args):
init_time = str(int(time.time()))
if args.vis:
vis = visdom.Visdom()
if args.dataset_path == '':
HOME_PATH = os.path.expanduser('~')
local_path = os.path.join(HOME_PATH, 'Data/CamVid')
else:
local_path = args.dataset_path
local_path = os.path.expanduser(args.dataset_path)
if args.dataset == 'CamVid':
dst = camvidLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'CityScapes':
dst = cityscapesLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'FreeSpace':
dst = freespaceLoader(local_path, is_transform=True, split=args.dataset_type)
else:
pass
val_loader = torch.utils.data.DataLoader(dst, batch_size=1, shuffle=False)
# if os.path.isfile(args.validate_model):
if args.validate_model != '':
model = torch.load(args.validate_model)
else:
try:
model = eval(args.structure)(n_classes=args.n_classes, pretrained=args.init_vgg16)
except:
print('missing structure or not support')
exit(0)
if args.validate_model_state_dict != '':
try:
model.load_state_dict(torch.load(args.validate_model_state_dict, map_location='cpu'))
except KeyError:
print('missing key')
if args.cuda:
model.cuda()
# some model load different mode different performance
model.eval()
# model.train()
gts, preds, errors, imgs_name = [], [], [], []
for i, (imgs, labels) in enumerate(val_loader):
print(i)
# if i==1:
# break
img_path = dst.files[args.dataset_type][i]
img_name = img_path[img_path.rfind('/')+1:]
imgs_name.append(img_name)
# print('img_path:', img_path)
# print('img_name:', img_name)
# print(labels.shape)
# print(imgs.shape)
# 将np变量转换为pytorch中的变量
imgs = Variable(imgs, volatile=True)
labels = Variable(labels, volatile=True)
if args.cuda:
imgs = imgs.cuda()
labels = labels.cuda()
outputs = model(imgs)
loss = cross_entropy2d(outputs, labels)
loss_np = loss.cpu().data.numpy()
loss_np_float = float(loss_np)
# print('loss_np_float:', loss_np_float)
errors.append(loss_np_float)
# 取axis=1中的最大值,outputs的shape为batch_size*n_classes*height*width,
# 获取max后,返回两个数组,分别是最大值和相应的索引值,这里取索引值为label
pred = outputs.cpu().data.max(1)[1].numpy()
gt = labels.cpu().data.numpy()
if args.save_result:
if not os.path.exists('/tmp/'+init_time):
os.mkdir('/tmp/'+init_time)
pred_labels = outputs.cpu().data.max(1)[1].numpy()
label_color = dst.decode_segmap(labels.cpu().data.numpy()[0]).transpose(2, 0, 1)
pred_label_color = dst.decode_segmap(pred_labels[0]).transpose(2, 0, 1)
label_color_cv2 = label_color.transpose(1, 2, 0)
label_color_cv2 = cv2.cvtColor(label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/gt_{}'.format(img_name), label_color_cv2)
pred_label_color_cv2 = pred_label_color.transpose(1, 2, 0)
pred_label_color_cv2 = cv2.cvtColor(pred_label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/pred_{}'.format(img_name), pred_label_color_cv2)
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
# print('errors:', errors)
# print('imgs_name:', imgs_name)
errors_indices = np.argsort(errors).tolist()
# print('errors_indices:', errors_indices)
# for top_i in range(len(errors_indices)):
# for top_i in range(10):
# top_index = errors_indices.index(top_i)
# # print('top_index:', top_index)
# img_name_top = imgs_name[top_index]
# print('img_name_top:', img_name_top)
score, class_iou = scores(gts, preds, n_class=dst.n_classes)
for k, v in score.items():
print(k, v)
class_iou_list = []
for i in range(dst.n_classes):
class_iou_list.append(round(class_iou[i], 2))
# print(i, round(class_iou[i], 2))
print('classes:', range(dst.n_classes))
print('class_iou_list:', class_iou_list)
# best validate: python validate.py --structure fcn32s --validate_model_state_dict fcn32s_camvid_9.pt
if __name__=='__main__':
# print('validate----in----')
parser = argparse.ArgumentParser(description='training parameter setting')
parser.add_argument('--structure', type=str, default='fcn32s', help='use the net structure to segment [ fcn32s ResNetDUC segnet ENet drn_d_22 ]')
parser.add_argument('--validate_model', type=str, default='', help='validate model path [ fcn32s_camvid_9.pkl ]')
parser.add_argument('--validate_model_state_dict', type=str, default='', help='validate model state dict path [ fcn32s_camvid_9.pt ]')
parser.add_argument('--init_vgg16', type=bool, default=False, help='init model using vgg16 weights [ False ]')
parser.add_argument('--dataset', type=str, default='CamVid', help='train dataset [ CamVid CityScapes FreeSpace ]')
parser.add_argument('--dataset_path', type=str, default='~/Data/CamVid', help='train dataset path [ ~/Data/CamVid ~/Data/cityscapes ~/Data/FreeSpaceDataset ]')
parser.add_argument('--dataset_type', type=str, default='val', help='dataset type [ train val test ]')
parser.add_argument('--n_classes', type=int, default=12, help='train class num [ 12 ]')
parser.add_argument('--vis', type=bool, default=False, help='visualize the training results [ False ]')
parser.add_argument('--cuda', type=bool, default=False, help='use cuda [ False ]')
parser.add_argument('--save_result', type=bool, default=False, help='save the val dataset prediction result [ False True ]')
args = parser.parse_args()
# print(args.resume_model)
# print(args.save_model)
print(args)
validate(args)
# print('validate----out----')
| [
"guanfuchen@zju.edu.cn"
] | guanfuchen@zju.edu.cn |
a3fe21f6b4df87f847e18ec86aa121ab5f4bd395 | c5457ee0cdb6d2d13764a6f1db2222f55835d017 | /app/seeds/reviews.py | fccdaf09c2e867d9ec5ada198e8ea768c27b2da9 | [] | no_license | boothjacobs/stay-awhile | 1a275cb6d7c2d655928f7f4f060c5d3e1765c011 | 0b25b10c27d2ecbdf8108eb736865047e140b81d | refs/heads/main | 2023-07-09T10:50:53.349409 | 2021-08-08T22:31:27 | 2021-08-08T22:31:27 | 380,834,358 | 15 | 0 | null | 2021-07-12T02:54:31 | 2021-06-27T20:42:47 | Python | UTF-8 | Python | false | false | 1,318 | py | from werkzeug.datastructures import ContentRange
from app.models import db, Review
def seed_reviews():
review1 = Review(
guest_id='1',
ranch_id='6',
booking_id='1',
content="Aenean dictum leo elementum, bibendum neque facilisis, interdum mauris.",
stars='4'
)
review2 = Review(
guest_id='1',
ranch_id='6',
booking_id='7',
content="Fusce a erat ullamcorper, rhoncus neque ac, pharetra ante. Integer aliquam finibus metus, sed tempor mi egestas eu.",
stars='5'
)
review3 = Review(
guest_id='2',
ranch_id='9',
booking_id='8',
content="Nullam iaculis ligula ligula, eget finibus urna feugiat a. Phasellus ornare leo quam, eu rhoncus turpis vestibulum at.",
stars='3'
)
review4 = Review(
guest_id='4',
ranch_id='4',
booking_id='5',
content="Nullam iaculis ligula ligula, eget finibus urna feugiat a. Phasellus ornare leo quam, eu rhoncus turpis vestibulum at.",
stars='4'
)
db.session.add(review1)
db.session.add(review2)
db.session.add(review3)
db.session.add(review4)
db.session.commit()
def undo_reviews():
db.session.execute('TRUNCATE reviews RESTART IDENTITY CASCADE;')
db.session.commit()
| [
"jacobs.b.sarah@gmail.com"
] | jacobs.b.sarah@gmail.com |
4c659bec2a8b634ed455336351bdfde97a018dd7 | d447054870e0217b6f6c69c1ea3d76e5e49e971a | /app.py | 2990539722f759e658d1c63cae88564a7d35e6e6 | [] | no_license | maciekpykosz/GrainGrowthAndBoundarySmoothing | 5757174766620f8252f98df482e3e02ea629c9b1 | 1ec863f3d9c9a6acccbeae318d1e5025b6678142 | refs/heads/master | 2022-06-17T00:23:04.961966 | 2020-05-04T22:23:53 | 2020-05-04T22:23:53 | 261,300,918 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,919 | py | import itertools
import random
import copy
import numpy as np
import neighborhood as nh
from collections import Counter
from math import exp
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
x_size = 0
y_size = 0
grid = None
mc_grid = None
rectangle_size = 5
begin_id = None
colors = {}
seed_id = 0
class Cell:
def __init__(self, i, j):
x = j * 5 + 2
y = i * 5 + 2
self.id = 0
self.color = 'white'
self.next_state = 0
self.energy = 0
self.position_in_canvas = (x, y)
self.position_in_matrix = (i, j)
def switch_state(self):
self.id = self.next_state
def clean_cell(self):
self.id = 0
self.next_state = 0
self.color = 'white'
self.draw_cell(gui.canvas)
def draw_cell(self, canvas):
x, y = self.position_in_canvas
color = self.color
canvas.create_rectangle(x, y, x + rectangle_size, y + rectangle_size, fill=color)
def draw_cell_energy(self, canvas, color):
x, y = self.position_in_canvas
canvas.create_rectangle(x, y, x + rectangle_size, y + rectangle_size, fill=color)
class GUI(Tk):
def __init__(self):
Tk.__init__(self, className=' Cellular Automata 2D Grain Growth + Monte Carlo Grain Boundary Smoothing')
self.minsize(800, 626)
self.ca_frame = Frame(self)
self.ca_frame.pack()
self.mc_frame = Frame(self)
self.mc_frame.pack()
self.canvas_frame = Frame(self)
self.canvas_frame.pack()
# CA frame
self.ca_lab = Label(self.ca_frame, text='CA: ')
self.ca_lab.pack(side=LEFT)
self.start_butt = Button(self.ca_frame, text='Start', fg='white', bg='#263D42', width=10, command=self.start)
self.start_butt.pack(side=LEFT)
self.stop_butt = Button(self.ca_frame, text='Stop', fg='white', bg='#263D42', width=10, command=self.stop)
self.stop_butt.pack(side=LEFT)
self.size_lab = Label(self.ca_frame, text='Size (x,y): (')
self.size_lab.pack(side=LEFT)
self.default_x_size = IntVar(value=250)
self.x_size_ent = Entry(self.ca_frame, width=4, textvariable=self.default_x_size)
self.x_size_ent.pack(side=LEFT)
self.colon_lab = Label(self.ca_frame, text=',')
self.colon_lab.pack(side=LEFT)
self.default_y_size = IntVar(value=150)
self.y_size_ent = Entry(self.ca_frame, width=4, textvariable=self.default_y_size)
self.y_size_ent.pack(side=LEFT)
self.bracket_lab = Label(self.ca_frame, text=') ')
self.bracket_lab.pack(side=LEFT)
self.size_butt = Button(self.ca_frame, text='Change size/Clear', fg='white', bg='#263D42', width=16,
command=self.build_grid)
self.size_butt.pack(side=LEFT)
self.condition_lab = Label(self.ca_frame, text='Condition: ')
self.condition_lab.pack(side=LEFT)
self.default_condition = StringVar(value='Periodic')
self.condition_combo = ttk.Combobox(self.ca_frame, width=10, values=['Periodic', 'Absorbing'],
textvariable=self.default_condition)
self.condition_combo.pack(side=LEFT)
self.nucleation_lab = Label(self.ca_frame, text='Nucleation: ')
self.nucleation_lab.pack(side=LEFT)
self.default_nucleation = StringVar(value='Random')
self.nucleation_combo = ttk.Combobox(self.ca_frame, width=15, textvariable=self.default_nucleation,
value=['Homogeneous', 'With a radius', 'Random'])
self.nucleation_combo.pack(side=LEFT)
self.nucleation_combo.bind('<<ComboboxSelected>>', self.nucleation_handler)
self.number_rand_lab = Label(self.ca_frame, text='Number of random: ')
self.number_rand_lab.pack(side=LEFT)
self.default_num_rand = IntVar(value=100)
self.number_rand_ent = Entry(self.ca_frame, width=5, textvariable=self.default_num_rand)
self.number_rand_ent.pack(side=LEFT)
self.neighborhood_lab = Label(self.ca_frame, text='Neighborhood: ')
self.neighborhood_lab.pack(side=LEFT)
self.default_neighborhood = StringVar(value='Moore')
self.neighborhood_combo = ttk.Combobox(self.ca_frame, width=15, textvariable=self.default_neighborhood,
value=['Moore', 'Von Neumann', 'Pentagonal-random', 'Hexagonal-left',
'Hexagonal-right', 'Hexagonal-random', 'With a radius'])
self.neighborhood_combo.pack(side=LEFT)
self.neighborhood_combo.bind('<<ComboboxSelected>>', self.neighborhood_handler)
self.radius_lab = Label(self.ca_frame, text='Radius: ')
self.radius_lab.pack(side=LEFT)
self.default_radius = IntVar(value=10)
self.radius_ent = Entry(self.ca_frame, width=4, textvariable=self.default_radius)
self.radius_ent.pack(side=LEFT, padx=(0, 40))
# MC frame
self.mc_lab = Label(self.mc_frame, text='MC: ')
self.mc_lab.pack(side=LEFT)
self.smooth_butt = Button(self.mc_frame, text='Smooth boundaries', fg='white', bg='#263D42', width=20,
state=DISABLED,
command=self.smooth_boundaries)
self.smooth_butt.pack(side=LEFT)
self.iter_lab = Label(self.mc_frame, text='Iterations: ')
self.iter_lab.pack(side=LEFT)
self.default_iter = IntVar(value=10)
self.iter_ent = Entry(self.mc_frame, width=4, textvariable=self.default_iter)
self.iter_ent.pack(side=LEFT)
self.kt_lab = Label(self.mc_frame, text='kt: ')
self.kt_lab.pack(side=LEFT)
self.default_kt = IntVar(value=1)
self.kt_spin = Spinbox(self.mc_frame, width=4, from_=0.1, to=6.0, increment=0.1, textvariable=self.default_kt)
self.kt_spin.pack(side=LEFT)
self.energy_view_butt = Button(self.mc_frame, text='Show energy view', fg='white', bg='#263D42', width=25,
state=DISABLED,
command=self.show_energy_view)
self.energy_view_butt.pack(side=RIGHT, padx=(0, 730))
# Canvas frame
self.canvas = Canvas(self.canvas_frame, width=1251, height=751)
self.canvas.pack()
self.canvas.bind('<Button-1>', switch_cell_state_on_click)
def build_grid(self):
global x_size
global y_size
global grid
x_size = int(self.x_size_ent.get())
y_size = int(self.y_size_ent.get())
grid = np.array([[Cell(i, j) for j in range(x_size)] for i in range(y_size)], dtype=object)
draw_grid(self.canvas)
def start(self):
neighborhood = self.neighborhood_handler(event=None)
condition = self.condition_combo.get()
empty_cells_counter = change_states_in_grid(neighborhood, condition)
if empty_cells_counter != 0:
draw_cells_change_state(self.canvas)
global begin_id
begin_id = self.canvas.after(100, self.start)
else:
self.stop()
messagebox.showinfo('Good info!', 'Completed successfully!')
self.smooth_butt.config(state=NORMAL)
def stop(self):
self.canvas.after_cancel(begin_id)
def nucleation_handler(self, event):
values = ['Homogeneous', 'With a radius', 'Random']
current = self.nucleation_combo.current()
value = values[current]
func_map = {
'Homogeneous': homogeneous,
'With a radius': with_radius,
'Random': rand
}
func = func_map.get(value)
func()
def neighborhood_handler(self, event):
current = self.neighborhood_combo.current()
neighborhood = nh.choose_neighborhood(current)
return neighborhood
def smooth_boundaries(self):
global grid
global mc_grid
mc_canvas = self.create_new_window()
draw_grid(mc_canvas)
draw_cells(grid, mc_canvas)
mc_canvas.update()
current = self.neighborhood_combo.current()
neighborhood = nh.choose_neighborhood(current)
condition = self.condition_combo.get()
mc_grid = copy.deepcopy(grid)
iterations = int(self.iter_ent.get())
try:
for i in range(iterations):
calculate_energy_in_grid(neighborhood, condition)
draw_cells(mc_grid, mc_canvas)
mc_canvas.update()
messagebox.showinfo('Good info!', 'Completed successfully!')
self.energy_view_butt.config(state=NORMAL)
except ZeroDivisionError:
messagebox.showerror('Error!', 'You can not divide by zero!')
def create_new_window(self):
mc_root = Toplevel(self)
mc_canvas = Canvas(mc_root, width=1251, height=751)
mc_canvas.pack()
return mc_canvas
def show_energy_view(self):
energy_canvas = self.create_new_window()
draw_grid(energy_canvas)
energy_level = {
0: '#1e00ff',
1: '#00aeff',
2: '#ccff00',
3: '#fff700',
4: '#ff6f00',
5: '#ff0000',
6: '#a30000',
7: '#470000',
8: '#000000'
}
for i in range(y_size):
for j in range(x_size):
cell_energy = mc_grid[i][j].energy
if cell_energy in energy_level.keys():
color = energy_level[cell_energy]
mc_grid[i][j].draw_cell_energy(energy_canvas, color)
def switch_cell_state_on_click(event):
global seed_id
x = (event.x + 2) - (event.x + 2) % 5
y = (event.y + 2) - (event.y + 2) % 5
try:
one_element_structure = []
j = int(x / 5 - 1)
i = int(y / 5 - 1)
if i == -1 or j == -1:
raise IndexError
if grid[i][j].id == 0:
one_element_structure.append((j, i))
seed_id += 1
grid[i][j].id = seed_id
draw_structure(one_element_structure)
else:
grid[i][j].clean_cell()
except IndexError:
return
def draw_grid(canv):
canv.delete('all')
global x_size
global y_size
for i in range(y_size):
for j in range(x_size):
grid[i][j].draw_cell(canv)
def draw_cells_change_state(canv):
for i in range(y_size):
for j in range(x_size):
if grid[i][j].next_state != grid[i][j].id:
grid[i][j].draw_cell(canv)
grid[i][j].switch_state()
def change_states_in_grid(neighborhood_func, condition):
empty_cells_counter = 0
radius = int(gui.radius_ent.get())
for i in range(y_size):
for j in range(x_size):
if grid[i][j].id == 0:
if neighborhood_func != nh.with_radius:
neighborhood_struct = neighborhood_func(grid[i][j])
else:
neighborhood_struct = neighborhood_func(grid[i][j], radius)
calculate_state(grid[i][j], neighborhood_struct, condition)
empty_cells_counter += 1
return empty_cells_counter
def periodic_condition(cell_co):
y, x = cell_co
if x < 0:
x = x_size + x
elif x >= x_size:
x = x - x_size
if y < 0:
y = y_size + y
elif y >= y_size:
y = y - y_size
return y, x
def calculate_energy(neighborhood_struct, cell, condition):
energy = 0
if condition == 'Absorbing':
for cell_co in neighborhood_struct:
y, x = cell_co
if x < 0 or y < 0 or x >= x_size or y >= y_size:
continue
if mc_grid[y][x].id != cell.id:
energy += 1
elif condition == 'Periodic':
for cell_co in neighborhood_struct:
try:
y, x = cell_co
if mc_grid[y][x].id != cell.id:
energy += 1
except IndexError:
y, x = periodic_condition(cell_co)
if mc_grid[y][x].id != cell.id:
energy += 1
return energy
def random_new_cell(neighborhood_struct, cell, condition):
x, y = random.choice(neighborhood_struct)
if condition == 'Absorbing':
try:
new_cell = mc_grid[x][y]
except IndexError:
new_cell = cell
return new_cell
elif condition == 'Periodic':
try:
new_cell = mc_grid[y][x]
except IndexError:
y, x = periodic_condition(random.choice(neighborhood_struct))
new_cell = mc_grid[y][x]
return new_cell
def change_energy(rand_cell, new_cell, energy_after):
rand_cell.energy = energy_after
rand_cell.id = new_cell.id
rand_cell.color = new_cell.color
def calculate_energy_in_grid(neighborhood_func, condition):
draw_set = list(mc_grid.ravel())
kt = float(gui.kt_spin.get())
radius = int(gui.radius_ent.get())
for rand_cell in range(len(draw_set)):
rand_cell = np.random.choice(draw_set)
draw_set.remove(rand_cell)
if neighborhood_func != nh.with_radius:
neighborhood_struct = neighborhood_func(rand_cell)
else:
neighborhood_struct = neighborhood_func(rand_cell, radius)
energy_before = calculate_energy(neighborhood_struct, rand_cell, condition)
rand_cell.energy = energy_before
new_cell = random_new_cell(neighborhood_struct, rand_cell, condition)
energy_after = calculate_energy(neighborhood_struct, new_cell, condition)
energy_modification = energy_after - energy_before
if energy_modification <= 0:
change_energy(rand_cell, new_cell, energy_after)
else:
probability = exp(energy_modification / kt)
rand_prob = random.random()
if probability < rand_prob:
change_energy(rand_cell, new_cell, energy_after)
def draw_cells(any_grid, canv):
for i in range(y_size):
for j in range(x_size):
any_grid[i][j].draw_cell(canv)
def clean_cells(x_size, y_size):
for i in range(y_size):
for j in range(x_size):
grid[i][j].clean_cell()
def prepare_to_do_nucleation():
global x_size
global y_size
global seed_id
structure = []
clean_cells(x_size, y_size)
return x_size, y_size, seed_id, structure
def draw_structure(structure):
s = 0
while s < len(structure):
x_pos, y_pos = structure[s]
color = f'#{random.randrange(0x1000000):06x}'
if color not in colors.values():
colors[grid[y_pos][x_pos].id] = color
grid[y_pos][x_pos].color = color
grid[y_pos][x_pos].draw_cell(gui.canvas)
s += 1
else:
s -= 1
def homogeneous():
x_size, y_size, seed_id, structure = prepare_to_do_nucleation()
for x in itertools.islice(range(x_size), 1, x_size - 1, 10):
for y in itertools.islice(range(y_size), 1, y_size - 1, 10):
structure.append((x, y))
seed_id += 1
grid[y][x].id = seed_id
draw_structure(structure)
def is_clear_within_radius(radius, x, y):
x_start = x - radius
y_start = y - radius
for i in range(x_start, x_start + radius * 2 + 1):
for j in range(y_start, y_start + radius * 2 + 1):
if x_start == x and y_start == y:
continue
try:
if grid[j][i].id != 0:
return False
except IndexError:
pass
return True
def with_radius():
x_size, y_size, seed_id, structure = prepare_to_do_nucleation()
s = 0
radius = int(gui.radius_ent.get())
number_of_random_cells = int(gui.number_rand_ent.get())
while s < number_of_random_cells:
x = random.randint(0, x_size - 1)
y = random.randint(0, y_size - 1)
s += 1
if grid[y][x].id == 0 and is_clear_within_radius(radius, x, y):
structure.append((x, y))
seed_id += 1
grid[y][x].id = seed_id
draw_structure(structure)
def rand():
x_size, y_size, seed_id, structure = prepare_to_do_nucleation()
s = 0
number_of_random_cells = int(gui.number_rand_ent.get())
if number_of_random_cells > x_size * y_size:
messagebox.showerror('Error', 'Number of random cells is greater than cell number!')
return
while s < number_of_random_cells:
x = random.randint(0, x_size - 1)
y = random.randint(0, y_size - 1)
s += 1
if grid[y][x].id == 0:
structure.append((x, y))
seed_id += 1
grid[y][x].id = seed_id
else:
s -= 1
draw_structure(structure)
def most_frequent(id_list):
occurrence_count = Counter(id_list)
return occurrence_count.most_common(1)[0][0]
def calculate_state(cell, neighborhood_struct, condition):
id_list = []
if condition == 'Absorbing':
for cell_co in neighborhood_struct:
y, x = cell_co
if x < 0 or y < 0 or x >= x_size or y >= y_size:
continue
if grid[y][x].id != 0:
id_list.append(grid[y][x].id)
elif condition == 'Periodic':
for cell_co in neighborhood_struct:
try:
y, x = cell_co
if grid[y][x].id != 0:
id_list.append(grid[y][x].id)
except IndexError:
y, x = periodic_condition(cell_co)
if grid[y][x].id != 0:
id_list.append(grid[y][x].id)
if len(id_list) != 0:
most = most_frequent(id_list)
cell.next_state = most
cell.color = colors[most]
gui = GUI()
gui.build_grid()
gui.mainloop()
| [
"maciekpykosz@gmail.com"
] | maciekpykosz@gmail.com |
68ac0eeb5d55a38888952d35a6cd32b67c9bde23 | d7b4e2e391e1f15fd7cb4fbf4d9aee598131b007 | /AE_Datasets/R_A/datasets/CWRUCWT.py | 66ff726731086772786eee97b0378a32b4c39b8e | [
"MIT"
] | permissive | wuyou33/DL-based-Intelligent-Diagnosis-Benchmark | eba2ce6f948b5abe68069e749f64501a32e1d7ca | e534f925cf454d07352f7ef82d75a8d6dac5355c | refs/heads/master | 2021-01-02T15:06:29.041349 | 2019-12-28T21:47:21 | 2019-12-28T21:47:21 | 239,673,952 | 1 | 0 | MIT | 2020-02-11T04:15:21 | 2020-02-11T04:15:20 | null | UTF-8 | Python | false | false | 5,887 | py | import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from datasets.MatrixDatasets import dataset
from datasets.matrix_aug import *
from tqdm import tqdm
import pickle
import pywt
signal_size=100
datasetname = ["12k Drive End Bearing Fault Data", "12k Fan End Bearing Fault Data", "48k Drive End Bearing Fault Data",
"Normal Baseline Data"]
normalname = ["97.mat", "98.mat", "99.mat", "100.mat"]
# For 12k Drive End Bearing Fault Data
dataname1 = ["105.mat", "118.mat", "130.mat", "169.mat", "185.mat", "197.mat", "209.mat", "222.mat",
"234.mat"] # 1797rpm
dataname2 = ["106.mat", "119.mat", "131.mat", "170.mat", "186.mat", "198.mat", "210.mat", "223.mat",
"235.mat"] # 1772rpm
dataname3 = ["107.mat", "120.mat", "132.mat", "171.mat", "187.mat", "199.mat", "211.mat", "224.mat",
"236.mat"] # 1750rpm
dataname4 = ["108.mat", "121.mat", "133.mat", "172.mat", "188.mat", "200.mat", "212.mat", "225.mat",
"237.mat"] # 1730rpm
# For 12k Fan End Bearing Fault Data
dataname5 = ["278.mat", "282.mat", "294.mat", "274.mat", "286.mat", "310.mat", "270.mat", "290.mat",
"315.mat"] # 1797rpm
dataname6 = ["279.mat", "283.mat", "295.mat", "275.mat", "287.mat", "309.mat", "271.mat", "291.mat",
"316.mat"] # 1772rpm
dataname7 = ["280.mat", "284.mat", "296.mat", "276.mat", "288.mat", "311.mat", "272.mat", "292.mat",
"317.mat"] # 1750rpm
dataname8 = ["281.mat", "285.mat", "297.mat", "277.mat", "289.mat", "312.mat", "273.mat", "293.mat",
"318.mat"] # 1730rpm
# For 48k Drive End Bearing Fault Data
dataname9 = ["109.mat", "122.mat", "135.mat", "174.mat", "189.mat", "201.mat", "213.mat", "250.mat",
"262.mat"] # 1797rpm
dataname10 = ["110.mat", "123.mat", "136.mat", "175.mat", "190.mat", "202.mat", "214.mat", "251.mat",
"263.mat"] # 1772rpm
dataname11 = ["111.mat", "124.mat", "137.mat", "176.mat", "191.mat", "203.mat", "215.mat", "252.mat",
"264.mat"] # 1750rpm
dataname12 = ["112.mat", "125.mat", "138.mat", "177.mat", "192.mat", "204.mat", "217.mat", "253.mat",
"265.mat"] # 1730rpm
# label
label = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The failure data is labeled 1-9
axis = ["_DE_time", "_FE_time", "_BA_time"]
def CWT(lenth,data):
scale = np.arange(1,lenth)
cwtmatr, freqs = pywt.cwt(data, scale, 'mexh')
return cwtmatr
# generate Training Dataset and Testing Dataset
def get_files(root, test=False):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
normalname:List of normal data
dataname:List of failure data
'''
data_root1 = os.path.join('/tmp', root, datasetname[3])
data_root2 = os.path.join('/tmp', root, datasetname[0])
path1 = os.path.join('/tmp', data_root1, normalname[0]) # 0->1797rpm ;1->1772rpm;2->1750rpm;3->1730rpm
data, lab = data_load(path1, axisname=normalname[0],label=0) # nThe label for normal data is 0
for i in tqdm(range(len(dataname1))):
path2 = os.path.join('/tmp', data_root2, dataname1[i])
data1, lab1 = data_load(path2, dataname1[i], label=label[i])
data += data1
lab += lab1
return [data, lab]
def data_load(filename, axisname, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
datanumber = axisname.split(".")
if eval(datanumber[0]) < 100:
realaxis = "X0" + datanumber[0] + axis[0]
else:
realaxis = "X" + datanumber[0] + axis[0]
fl = loadmat(filename)[realaxis]
fl = fl.reshape(-1,)
data = []
lab = []
start, end = 0, signal_size
while end <= fl.shape[0]/10:
x = fl[start:end]
imgs = CWT(signal_size + 1, x)
data.append(imgs)
lab.append(label)
start += signal_size
end += signal_size
return data, lab,
def data_transforms(dataset_type="train", normlize_type="-1-1"):
transforms = {
'train': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
RandomScale(),
RandomCrop(),
Retype(),
]),
'val': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
Retype(),
])
}
return transforms[dataset_type]
class CWRUCWT(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir,normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
if len(os.path.basename(self.data_dir).split('.')) == 2:
with open(self.data_dir, 'rb') as fo:
list_data = pickle.load(fo, encoding='bytes')
else:
list_data = get_files(self.data_dir, test)
with open(os.path.join(self.data_dir, "CWRUCWT.pkl"), 'wb') as fo:
pickle.dump(list_data, fo)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train',self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val',self.normlizetype))
return train_dataset, val_dataset
| [
"646032073@qq.com"
] | 646032073@qq.com |
2ea59d15a88cd4a3cfba74fb74162da032c006d3 | d613fecbe4845ed4a0f1d667439640ed10c8922a | /app1/views/ajax.py | e9581d351b9923bc2a953751021d2bda01cc0396 | [] | no_license | AnyiYim/DjangoTeacherManagerDemo | e18bdb312237e39da00f62006e9e7a98d817d08c | eecfaac3bd5badfb3ac1aed5b2e3f034e505e26e | refs/heads/master | 2021-04-27T00:23:38.853148 | 2018-03-04T16:08:46 | 2018-03-04T16:08:46 | 123,805,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from django.shortcuts import render, redirect, HttpResponse
from app1 import models
def ajax1(request):
return render(request, 'ajax1.html')
def ajax2(request):
u = request.GET.get('username')
p = request.GET.get('password')
return HttpResponse('我愿意')
def ajax4(request):
nid=request.GET.get('nid')
msg='成功'
try:
models.Students.objects.get(id=nid).delete()
except Exception as e:
msg = str(e)
return HttpResponse(msg)
| [
"759502117@qq.com"
] | 759502117@qq.com |
0d8de83b4bd580f8d4fe58a16cad571d34cd8067 | 7c2e887049ef409bec9481820c3924596b033974 | /mysite/urls.py | 06d9066373cdf85f38e59ba2710b398b8ba323b1 | [] | no_license | davemaharshi7/Online-Examination-system | 5022541901729477537ea2ae2168a8bf4ab32771 | 95d7ddb02d59adaecf04e1ab0ade249da5e733f3 | refs/heads/master | 2020-03-26T20:18:28.021979 | 2018-08-19T17:17:01 | 2018-08-19T17:17:01 | 145,316,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.shortcuts import redirect
from django.urls import include, path
from django.conf.urls import url, include
from django.http import HttpResponseRedirect
urlpatterns = [
path('contact/', include('contact.urls')),
path('', include('exam_home.urls')),
path('loginmodule/', include('loginmodule.urls')),
path('admin/', admin.site.urls),
url('firstdbtest/', include('firstdbtest.urls')),
url('teacher/', include('teacher.urls')),
url('signup/', include('signup.urls')),
path('course/', include('course.urls')),
]
| [
"39943075+davemaharshi7@users.noreply.github.com"
] | 39943075+davemaharshi7@users.noreply.github.com |
f23ca4dfac5ec05af295ebc0d0c60a34536b013f | 14208e9ff64dfca2feebbe9758bf78ea5a2ad113 | /migrations/versions/0c2ef460c5f9_.py | af6c14f257106d36339e43d3cf21b51a4c721885 | [] | no_license | sameerank/word-frequency | f83da0ff1bc1d4e9cf330f125c8073de0f96533c | e64f9bd0cc79403bf47cdc68132d3b90897abf8b | refs/heads/master | 2021-01-19T04:00:39.401619 | 2016-05-31T02:03:50 | 2016-05-31T02:03:50 | 59,968,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | """empty message
Revision ID: 0c2ef460c5f9
Revises: None
Create Date: 2016-05-29 23:04:32.909854
"""
# revision identifiers, used by Alembic.
revision = '0c2ef460c5f9'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('results',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=True),
sa.Column('result_all', postgresql.JSON(), nullable=True),
sa.Column('result_no_stop_words', postgresql.JSON(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('results')
### end Alembic commands ###
| [
"skunche@gmail.com"
] | skunche@gmail.com |
3dd15c6e2f034c217ed036d16ead87ec715fc1d6 | f59d3d7f6b12a6b235bd89d4be815bee1c2164a3 | /command_handlers/test.py | b665dec86d66442e4d7aec8b52a2dd5f0881fc70 | [] | no_license | sanje2v/DualSuperResLearningForSemSeg | 39ed0db5055503a8c0b41a56f1670a9640dd4bf1 | 084da170adbcb23c09dad556d60f3f44a0068e59 | refs/heads/master | 2023-03-26T21:42:44.003684 | 2021-03-28T12:28:26 | 2021-03-28T12:28:26 | 318,446,369 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,221 | py | import os
import os.path
from tqdm.auto import tqdm
import numpy as np
import torch as t
import torchvision as tv
from PIL import Image, ImageOps
from models import DSRL
from models.transforms import *
from utils import *
import settings
import consts
@t.no_grad()
def test(image_file, images_dir, dataset, output_dir, weights, device, compiled_model, **other_args):
# Testing on a single input image using given weights
device_obj = t.device('cuda' if isCUDAdevice(device) else device)
if not dataset:
# FIXME: We need to save input normalization values with weights. Here, we default to cityscapes settings.
dataset = settings.DATASETS['cityscapes']
if compiled_model:
model = t.jit.load(weights)
else:
# Create model and set to evaluation mode
model = DSRL(stage=1, dataset_settings=dataset['settings']).eval()
# Load specified weights file
model.load_state_dict(load_checkpoint_or_weights(weights, map_location=device_obj)['model_state_dict'], strict=False)
# Copy the model into 'device_obj'
model = model.to(device_obj)
if image_file or images_dir:
image_filenames = [image_file,] if image_file else getFilesWithExtension(images_dir, consts.IMAGE_FILE_EXTENSIONS, with_path=True)
for image_filename in tqdm(image_filenames,
desc='TESTING',
colour='yellow',
position=0,
leave=False):
# Using an image file for testing
# Load image file, rotate according to EXIF info, add 'batch' dimension and convert to tensor
with ImageOps.exif_transpose(Image.open(image_filename))\
.convert('RGB')\
.resize(swapTupleValues(settings.MODEL_OUTPUT_SIZE), resample=Image.BILINEAR) as input_image:
with timethis(INFO("Inference required {:}.")), t.no_grad(), t.jit.optimized_execution(should_optimize=compiled_model):
input_transform = tv.transforms.Compose([tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=dataset['settings'].MEAN, std=dataset['settings'].STD),
tv.transforms.Resize(size=settings.MODEL_INPUT_SIZE, interpolation=Image.BILINEAR),
tv.transforms.Lambda(lambda x: t.unsqueeze(x, dim=0))])
SSSR_output, _, _, _ = model.forward(input_transform(input_image).to(device_obj))
input_image = np.array(input_image, dtype=np.uint8).transpose((2, 0, 1))
SSSR_output = np.argmax(np.squeeze(SSSR_output.detach().cpu().numpy(), axis=0), axis=0) # Bring back result to CPU memory and convert to index array
vis_image = make_input_output_visualization(input_image, SSSR_output, dataset['settings'].CLASS_RGB_COLOR)
vis_image = vis_image.transpose((1, 2, 0)) # Channel order required for PIL.Image below
with Image.fromarray(vis_image, mode='RGB') as vis_image: # Convert from numpy array to PIL Image
# Save and show output on plot
os.makedirs(output_dir, exist_ok=True)
vis_image_filename = os.path.join(output_dir, os.path.splitext(os.path.basename(image_filename))[0] + '.png')
vis_image.save(vis_image_filename, format='PNG')
# Only show output if a single image file is specified
if image_file:
vis_image.show(title='Segmentation output')
print(INFO("Output image saved as: {0:s}.".format(vis_image_filename)))
else:
joint_transforms = JointCompose([JointImageAndLabelTensor(dataset['settings'].LABEL_MAPPING_DICT),
JointNormalize(mean=dataset['settings'].MEAN, std=dataset['settings'].STD),
JointScaledImage(new_img_sizes=(settings.MODEL_INPUT_SIZE, settings.MODEL_OUTPUT_SIZE), new_seg_size=settings.MODEL_OUTPUT_SIZE)])
test_dataset = dataset['class'](dataset['path'],
split=dataset['split'],
transforms=joint_transforms)
test_loader = t.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=isCUDAdevice(device),
drop_last=False)
print(INFO("Press ENTER to show next pair of input and output. Use CTRL+c to quit."))
for i, ((_, input_org), (target, _)) in enumerate(tqdm(test_loader,
desc='TESTING',
colour='yellow',
position=0,
leave=False)):
if i >= dataset['starting_index']:
with timethis(INFO("Inference required {:}.")):
SSSR_output, _, _, _ = model.forward(input_image.to(device_obj))
input_image = input_org.detach().cpu().numpy()[0]
input_image = np.array(dataset['settings'].STD).reshape(consts.NUM_RGB_CHANNELS, 1, 1) * input_image +\
np.array(dataset['settings'].MEAN).reshape(consts.NUM_RGB_CHANNELS, 1, 1)
input_image = np.clip(input_image * 255., a_min=0.0, a_max=255.).astype(np.uint8)
SSSR_output = np.argmax(SSSR_output.detach().cpu().numpy()[0], axis=0) # Bring back result to CPU memory and convert to index array
target = target.detach().cpu().numpy()[0]
SSSR_output[target == dataset['settings'].IGNORE_CLASS_LABEL] = dataset['settings'].IGNORE_CLASS_LABEL
vis_image_target = make_input_output_visualization(input_image, target, dataset['settings'].CLASS_RGB_COLOR)
vis_image_pred = make_input_output_visualization(input_image, SSSR_output, dataset['settings'].CLASS_RGB_COLOR)
vis_image = np.concatenate((vis_image_target, vis_image_pred), axis=1)
vis_image = vis_image.transpose((1, 2, 0)) # Channel order required for PIL.Image below
with Image.fromarray(vis_image, mode='RGB') as vis_image: # Convert from numpy array to PIL Image
# Save and show output on plot
os.makedirs(output_dir, exist_ok=True)
vis_image_filename = os.path.join(output_dir, str(i) + '.png')
vis_image.save(vis_image_filename, format='PNG')
vis_image.show(title='Segmentation output')
print(INFO("Output image saved as: {0:s}.".format(vis_image_filename)))
input() | [
"swtbase@hotmail.com"
] | swtbase@hotmail.com |
443e9fc54de020dd08d250e460d00b92e76ac597 | 8f1a9e1e2a48894a805616032433b019575e49ee | /BinaryTreeUsingLists.py | 26e75c7e64dc8109e3ec2a241fc9dc4c0aaa1560 | [] | no_license | melitadsouza/CTCI | 1c8b061e43bc052c0ffb31ace724575c7a35488f | 5688d02c11e62bb0e238d6748f523977ac1e789c | refs/heads/master | 2021-08-29T02:15:07.470984 | 2018-02-19T21:48:14 | 2018-02-19T21:48:14 | 120,383,372 | 0 | 0 | null | 2021-08-19T08:42:52 | 2018-02-06T01:13:35 | Python | UTF-8 | Python | false | false | 775 | py | def BinaryTree(r):
return [r,[],[]]
def insertLeft(root,newBranch):
t = root.pop(1)
if len(t) > 1:
root.insert(1,[newBranch,t,[]])
else:
root.insert(1,[newBranch,[],[]])
return root
def insertRight(root,newBranch):
t = root.pop(2)
if len(t) > 1:
root.insert(2,[newBranch,[],t])
else:
root.insert(2,[newBranch,[],[]])
return root
def getRootValue(root):
return root[0]
def setRootValue(root,newVal):
root[0] = newVal
def getLeftChild(root):
return root[1]
def getRightChild(root):
return root[2]
r = BinaryTree(3)
insertLeft(r,2)
insertLeft(r,1)
insertRight(r,4)
insertRight(r,5)
getLeftChild(r)
getRightChild(r)
setRootValue(r,10)
getRootValue(r)
print(r)
| [
"noreply@github.com"
] | noreply@github.com |
669a113c17fd1fe1e8f0256f0d625bbbc78a9be4 | 46404c77e04907225475e9d8be6e0fd33227c0b1 | /wildcard pattern matching.py | 0ed16783c406fd5ec5eaf2858e1c35ca373e0e95 | [] | no_license | govardhananprabhu/DS-task- | 84b46e275406fde2d56c301fd1b425b256b29064 | bf54f3d527f52f61fefc241f955072f5ed9a6558 | refs/heads/master | 2023-01-16T07:41:27.064836 | 2020-11-27T11:52:50 | 2020-11-27T11:52:50 | 272,928,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | """
Given two strings 'str' and a wildcard pattern 'pattern' of length N and M respectively, You have to print '1' if the wildcard pattern is matched with str else print '0' .
The wildcard pattern can include the characters ‘?’ and ‘*’
‘?’ – matches any single character
‘*’ – Matches any sequence of characters (including the empty sequence)
Note: The matching should cover the entire str (not partial str).
Constraints:
1 <= length of(str,pat) <= 200
H 7
T 2300
Tag yahoo string
In des
First line contain string s.
Second line contain string,denotes the pattern.
Ot des
Print 1 if it is wildcard pattern else 0.
baaabab
ba*a?
1
baaabab
*****ba*****ab
1
baaabab
a*ab
0
water
*r
1
master
m*e
0
Exp
From sample:replace '*' with "aab" and '?' with 'b'.
Hint
Each occurrence of ‘?’ character in wildcard pattern can be replaced with any other character and each occurrence of ‘*’ with a sequence of characters such that the wildcard pattern becomes identical to the input string after replacement.
"""
def strrmatch(strr, pattern, n, m):
if (m == 0):
return (n == 0)
lookup = [[False for i in range(m + 1)] for j in range(n + 1)]
lookup[0][0] = True
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[0][j] = lookup[0][j - 1]
for i in range(1, n + 1):
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[i][j] = lookup[i][j - 1] or lookup[i - 1][j]
elif (pattern[j - 1] == '?' or strr[i - 1] == pattern[j - 1]):
lookup[i][j] = lookup[i - 1][j - 1]
else:
lookup[i][j] = False
return lookup[n][m]
strr = input()
pattern = input()
if (strrmatch(strr, pattern, len(strr),len(pattern))):
print("1")
else:
print("0")
| [
"noreply@github.com"
] | noreply@github.com |
482570c62a6775d803e88aeef37fddf7afcd691b | 55016a9795fd847ea044f8be659814ca1f0fe1de | /run.py | 2f1c0e28fce25c3c1ac522cad45d0cd54fbf161f | [] | no_license | farhanlarenzo/Automate-updating-catalog-information | 6b6bdab358247be42762742a3b02dd4d82c2f11c | 1859fdc5d12c0661ab49e502aa87ab4343f82b57 | refs/heads/main | 2023-03-19T12:56:24.663605 | 2021-03-15T07:49:37 | 2021-03-15T07:49:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | #script to read descriptions and add it to a json file along with it's deatils.
#!/usr/bin/env python3
import os, sys
import json
import requests
path = "supplier-data/descriptions/"
url = "http://localhost/fruits/"
files = os.listdir(path)
for file in files:
if file.endswith("txt"):
with open(path + file, 'r') as f:
fruit_name = os.path.splitext(file)[0] #get the image name
data = f.read()
data = data.split("\n")
#The data model in the Django application fruit has the following fields: name, weight, description and image_name
fruit_dic = {"name": data[0], "weight": int(data[1].strip(" lbs")), "description": data[2], "image_name": fruit_name + ".jpeg"}
response = requests.post(url, json=fruit_dic)
response.raise_for_status()
print(response.request.url)
print(response.status_code) | [
"noreply@github.com"
] | noreply@github.com |
d716a3b72ae3d732e5d2b3fddd46f8508cc71a99 | 511049d21eccd143dbb1e1df16b23690ba15f1ca | /python/patTuple_jetlepSal_cfg.py | 103a0146df9ff6113784088d2c6c24a4446b9da0 | [] | no_license | cfantasia/CMGWPrimeGroup | e1b907c3d2350d0c994a59d4d50060a7d3f373d7 | 97ef1292828b4c0d2f481c6462e9757ac500087a | refs/heads/master | 2021-01-20T08:44:12.139482 | 2013-08-05T19:37:44 | 2016-02-16T02:42:39 | 11,907,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,637 | py | from UserCode.CMGWPrimeGroup.patTuple_common_cfg import *
from UserCode.CMGWPrimeGroup.patTuple_el_cfg import *
from UserCode.CMGWPrimeGroup.patTuple_mu_cfg import *
from UserCode.CMGWPrimeGroup.patTuple_jet_cfg import *
from UserCode.CMGWPrimeGroup.patTuple_met_cfg import *
def addFastJet(process):
process.load('RecoJets.JetProducers.kt4PFJets_cfi')
process.kt6PFJets = process.kt4PFJets.clone(rParam=0.6, doRhoFastjet=True)
process.kt6PFJets.Rho_EtaMax = cms.double(2.5)
# process.patDefaultSequence.replace(process.patJetCorrFactors,
# process.kt6PFJets *
# process.patJetCorrFactors)
def jetlep_config(process, reportEveryNum=100, maxEvents=-1) :
process.load("UserCode.CMGWPrimeGroup.patTuple_jet_cfg")
common_config(process, reportEveryNum, maxEvents)
jet_config(process)
el_config(process)
mu_config(process)
met_config(process)
addFastJet(process)
# redefine selectedPatMuons (isGlobalMuon not included in std definition)
process.selectedPatMuons.cut = "pt > 20. & abs(eta) < 2.4 & isGlobalMuon"
# keep all events with 2 leptons above 20 GeV
process.countPatLeptons.electronSource = "selectedPatElectrons"
process.countPatLeptons.muonSource = "selectedPatMuons"
process.countPatLeptons.minNumber = 1
process.countPatJets.minNumber = 3
process.countPatJets.src = "selectedPatJets"
process.newAK7PF = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(1),
doAreaFastjet = cms.bool(True),
voronoiRfact = cms.double(-0.9),
maxBadHcalCells = cms.uint32(9999999),
doAreaDiskApprox = cms.bool(False),
maxRecoveredEcalCells = cms.uint32(9999999),
jetType = cms.string('PFJet'),
minSeed = cms.uint32(14327),
Ghost_EtaMax = cms.double(5.0),
doRhoFastjet = cms.bool(False),
jetAlgorithm = cms.string('AntiKt'),
nSigmaPU = cms.double(1.0),
GhostArea = cms.double(0.01),
Rho_EtaMax = cms.double(4.4),
maxBadEcalCells = cms.uint32(9999999),
useDeterministicSeed = cms.bool(True),
doPVCorrection = cms.bool(False),
maxRecoveredHcalCells = cms.uint32(9999999),
rParam = cms.double(0.7),
maxProblematicHcalCells = cms.uint32(9999999),
doOutputJets = cms.bool(True),
src = cms.InputTag("pfNoPileUpIso"),
inputEtMin = cms.double(0.0),
srcPVs = cms.InputTag(""),
jetPtMin = cms.double(3.0),
radiusPU = cms.double(0.5),
maxProblematicEcalCells = cms.uint32(9999999),
doPUOffsetCorr = cms.bool(False),
inputEMin = cms.double(0.0)
)
## let it run
process.p = cms.Path(
process.patMuons *
process.selectedPatMuons *
process.patElectrons *
process.selectedPatElectrons *
process.countPatLeptons +
process.goodOfflinePrimaryVertices*
getattr(process,"PF2PATmod")*
#process.PF2PATmod *
((process.kt6PFJets*
process.patJetCorrFactors) +
process.patJets +
process.selectedPatJets +
process.countPatJets
)*
process.patTrigger *
process.patTriggerEvent *
process.patMETsPF
)
process.out.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
)
#Keep NVtxs
process.out.outputCommands.append('keep *_offlinePrimaryVertices_*_*')
process.patJetCorrFactors.rho = cms.InputTag("kt6PFJetsPFlow", "rho")
process.out.outputCommands.append('keep *_kt6PFJets_rho_PAT')
#print process.patDefaultSequence
| [
""
] | |
4e15a1419fe0011d3ea7a53bc3bdd0169a1dee84 | b98f54e846c9b107a2f4360f8e5bfaa297c7a573 | /pytest_wish/utils.py | b7f367ac670bca85bb914aa5b4c04b1c3931110c | [
"MIT"
] | permissive | gitter-badger/pytest-wish | 6f1682251a05ce5cb74b864db8476b6c0d795d0f | 7b7f870d3e915d56aa6d831eb2f763d97b702232 | refs/heads/master | 2021-01-16T00:56:56.528431 | 2016-01-25T11:41:12 | 2016-01-25T11:41:12 | 50,360,291 | 0 | 0 | null | 2016-01-25T15:34:10 | 2016-01-25T15:34:10 | null | UTF-8 | Python | false | false | 6,038 | py | # -*- coding: utf-8 -*-
# python 2 support via python-future
from __future__ import absolute_import, unicode_literals
from builtins import str
import collections
import importlib
import inspect
import logging
import re
import sys
import pkg_resources
import stdlib_list
# blacklists
DISTRIBUTION_BLACKLIST = set()
MODULE_BLACKLIST = {
# crash
'icopen',
'ntpath',
'test.support',
}
OBJECT_BLACKLIST = {
# pytest internals
'_pytest.runner:exit',
'_pytest.runner:skip',
'_pytest.skipping:xfail',
# unconditional exit
'faulthandler:_sigsegv',
'posix:_exit',
'_signal:default_int_handler',
'atexit.register',
# low level crashes
'numpy.fft.fftpack_lite:cffti',
'numpy.fft.fftpack_lite:rffti',
'appnope._nope:beginActivityWithOptions',
'ctypes:string_at',
'ctypes:wstring_at',
'gc:_dump_rpy_heap',
'gc:dump_rpy_heap',
'matplotlib._image:Image',
'getpass:getpass',
'getpass:unix_getpass',
'ensurepip:_run_pip',
# dangerous
'os.mkdir',
'pip.utils:rmtree',
}
EXCLUDE_PATTERNS = [r'_', r'.*\._']
logger = logging.getLogger('wish')
def import_modules(module_names, requirement='', module_blacklist=MODULE_BLACKLIST):
modules = collections.OrderedDict()
for module_name in module_names:
if module_name in module_blacklist:
logger.debug("Not importing blacklisted module: %r.", module_name)
else:
try:
modules[module_name] = importlib.import_module(module_name)
except:
logger.info("Failed to import module %r (%r).", module_name, requirement)
return modules
def collect_distributions(specs, distribution_blacklist=DISTRIBUTION_BLACKLIST):
distributions = collections.OrderedDict()
for spec in specs:
if spec in distribution_blacklist:
logger.debug("Not importing blacklisted package: %r.", spec)
elif spec.lower() in {'all', 'python'}:
# fake distribution name for the python standard library
distributions['Python==%d.%d.%d' % sys.version_info[:3]] = None
if spec.lower() == 'all':
# fake distribution name for all the modules known to the packaging system
for distribution in pkg_resources.working_set:
distributions[str(distribution.as_requirement())] = distribution
else:
try:
distribution = pkg_resources.get_distribution(spec)
distributions[str(distribution.as_requirement())] = distribution
except:
logger.info("Failed to find package %r.", spec)
return distributions
def import_distributions(specs, distribution_blacklist=DISTRIBUTION_BLACKLIST):
distributions_modules = collections.OrderedDict()
distributions = collect_distributions(specs, distribution_blacklist)
for requirement, distribution in distributions.items():
if requirement.startswith('Python=='):
python_version = requirement.partition('==')[2]
# stdlib_list supports short versions and only a selected list of long versions
python_short_version = python_version[:3]
module_names = stdlib_list.stdlib_list(python_short_version)
elif distribution.has_metadata('top_level.txt'):
module_names = distribution.get_metadata('top_level.txt').splitlines()
else:
logger.info("Package %r has no top_level.txt. Guessing module name is %r.",
requirement, distribution.project_name)
module_names = [distribution.project_name]
modules = import_modules(module_names, requirement=requirement)
distributions_modules[requirement] = list(modules.keys())
return distributions_modules
def generate_module_objects(module, predicate=None):
try:
module_members = inspect.getmembers(module, predicate)
except:
logger.info("Failed to get member list from module %r.", module)
raise StopIteration
for object_name, object_ in module_members:
if inspect.getmodule(object_) is module:
yield object_name, object_
def valid_name(name, include_res, exclude_res):
include_name = any(include_re.match(name) for include_re in include_res)
exclude_name = any(exclude_re.match(name) for exclude_re in exclude_res)
return include_name and not exclude_name
def generate_objects_from_modules(
modules, include_patterns,
exclude_patterns=EXCLUDE_PATTERNS,
predicate_name=None,
module_blacklist=MODULE_BLACKLIST,
object_blacklist=OBJECT_BLACKLIST,
):
exclude_patterns += tuple(name.strip() + '$' for name in object_blacklist)
include_res = [re.compile(pattern) for pattern in include_patterns]
exclude_res = [re.compile(pattern) for pattern in exclude_patterns]
predicate = object_from_name(predicate_name) if predicate_name else None
for module_name, module in modules.items():
if module_name in module_blacklist:
continue
for object_name, object_ in generate_module_objects(module, predicate):
full_object_name = '{}:{}'.format(module_name, object_name)
if valid_name(full_object_name, include_res, exclude_res):
yield full_object_name, object_
def object_from_name(full_object_name):
module_name, _, object_name = full_object_name.partition(':')
module = importlib.import_module(module_name)
return getattr(module, object_name)
def generate_objects_from_names(stream):
for line in stream:
full_object_name = line.partition('#')[0].strip()
if full_object_name:
try:
yield full_object_name, object_from_name(full_object_name)
except ImportError:
logger.info("Failed to import module for object %r.", full_object_name)
except AttributeError:
logger.info("Failed to import object %r.", full_object_name)
| [
"alexamici@gmail.com"
] | alexamici@gmail.com |
e36e09c306b5d8f6409029937c7295bd58aa2c3e | 58b8010949e68e7b206a48a9c6eb1f03317399b3 | /sample-with-priority-queue.py | d620ea40e212dc6e1537e5ba9ede8d66c98e64c2 | [] | no_license | zainulabidin302/8-puzzle-problem-using-a-star-algorithm | 81a2c73dd8aad70b58fc1bf423a4db4a12f8262f | 4bc0677bb21630737a5f7088bb60686e6e062902 | refs/heads/master | 2020-05-25T20:26:29.008415 | 2017-05-07T13:31:10 | 2017-05-07T13:31:10 | 83,651,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import queue as Q
from Node import Node
def a_star(node, goal):
open_list = Q.PriorityQueue()
close_list = []
moves_counter = 0
max_move_counter = 10000
while True:
if max_move_counter == moves_counter:
print('MOVE COUNTER EXCEEDED MAX MOVE COUNTER ')
return
moves_counter += 1
for neighbour in (list(set(node.neighbours) - set(close_list))):
open_list.put(neighbour)
node = open_list.get()
if node == goal:
print('Moves = ', moves_counter)
print('Explored Nodes = ', len(open_list.queue) + len(close_list))
return
close_list.append(node)
a_star(Node('123457086'), Node('123456780'))
a_star(Node('163487052'), Node('123456780'))
a_star(Node('175432086'), Node('123456780'))
a_star(Node('517324806'), Node('123456780'))
a_star(Node('754320816'), Node('123456780'))
a_star(Node('178542063'), Node('123456780'))
a_star(Node('178564203'), Node('123456780'))
| [
"zain302@hotmail.com"
] | zain302@hotmail.com |
b368553bc4f28e3306a5f1465e6e025fff5e2dd8 | f620212678d7722d22d7c046e39ef63f496c6206 | /ReadData.py | d72bbae345429e2421c97b97a128049fff1cb4a2 | [] | no_license | mikekuian/webapplication | 30d9ca4b81e1ea9c8cd48f25f71190e91c80f7dd | 8adc80b730d343d1db4a3c3f1e410e9b9148fc6a | refs/heads/master | 2022-11-29T09:36:24.391545 | 2020-07-28T17:35:30 | 2020-07-28T17:35:30 | 283,274,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import sqlite3
import psycopg2
def create_table():
conn = sqlite3.connect("myStudents.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS students (id INTEGER, name TEXT, email TEXT)")
conn.commit()
conn.close()
def insert(id, name, email):
conn = sqlite3.connect("myStudents.db")
cur = conn.cursor()
cur.execute("INSERT INTO students VALUES(?,?,?)", (id, name, email))
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("myStudents.db")
cur = conn.cursor()
cur.execute("SELECT * FROM students")
rows = cur.fetchall()
conn.close()
return rows
def delete(id):
conn = sqlite3.connect("myStudents.db")
cur = conn.cursor()
cur.execute("DELETE FROM students WHERE id=?", (id,))
conn.commit()
conn.close()
def update(id, name, email):
conn = sqlite3.connect("myStudents.db")
cur = conn.cursor()
cur.execute("UPDATE students SET name=?, email=? WHERE id=id", (name, email))
conn.commit()
conn.close()
update(1200, "Joe Flin", "jflin@yahoo.com")
print(view())
| [
"michaelkuian@gmail.com"
] | michaelkuian@gmail.com |
7057275be9c281c00e6df203bce5fce8e71251b1 | 0c5f061a32aa865f16a87b7a45c066560ba0b1e7 | /OOPS/oops_ClassMethod.py | c7ec8ce54e0acfa9162aba7e5376ee3b7bd571c3 | [] | no_license | divya-chopra/python | 0ac6a70e320dd569dc1f6ebac1fece16892f70df | c7bf67e74bd321b72ab57a3db5267e64d25494c9 | refs/heads/main | 2023-03-13T04:23:53.471256 | 2021-02-21T13:16:36 | 2021-02-21T13:16:36 | 340,855,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | # @classmethod
#-> can use without even instantiating class
#-> Call it using classname.method_name(method arguments)
#-> 95% not useful
#-> Can be used to instantiate an object of class
#@staticmethod
#-> Same as classmethod except no access to cls or class
#-> So we cannot instantiate class using static method
#-> Can perform any other function
#-> Use this when we don't need to use class attributes
class PlayerCharacter:
membership = True
def __init__(self, name, age):
self.name = name #attribute
self.age = age
def shout(self):
print(f'my name is {self.name}')
@classmethod
def adding_age(cls, num1, num2):
return cls('Teddy', num1 + num2) #Instantiating class
@staticmethod
def adding_score(stage1, stage2): # no access to cls
return(stage1 + stage2)
player1 = PlayerCharacter.adding_age(10,19) #class method called without instantiating class
print(player1.age)
player1_score = PlayerCharacter.adding_score(2,5) # Can be used without class is instanctiated
print(player1_score) | [
"divya.chopra@siemens.com"
] | divya.chopra@siemens.com |
c61dda42206def3fa80cf32e0d15a1f267ee0713 | d882e66d71f26403dbb82be4cf5667929f3b82a8 | /news/models.py | d5d06ffddbeb96f7f987b69a39c71feadfc7bdb3 | [] | no_license | pythongiant/DarkBeautifulReviews | 2f5b9f7b9c3a0ccc442ea0ac8c196463257bf6ae | 6bc1dfd3d147ed3dc91ca19dba288cce453b4107 | refs/heads/master | 2020-08-02T04:07:49.536708 | 2020-02-24T09:19:06 | 2020-02-24T09:19:06 | 211,228,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from django.db import models
# Create your models here.
class News(models.Model):
date = models.DateTimeField()
author = models.CharField(max_length=255)
content = models.TextField()
image = models.ImageField()
def __str__(self):
return self.name+"("+str(self.date)+")" | [
"srihari.unnikrishnan@gmail.com"
] | srihari.unnikrishnan@gmail.com |
a7f24ef184928de29cb7077c5a33eb6c01eae3b5 | d8422247ecbe450c75df45dcf2c92fb4438b65af | /horizon/openstack_dashboard/dashboards/admin/instances/forms.py | 9d2bf6d665256ffd420ae81e10ff16ed18c8cfd8 | [
"Apache-2.0"
] | permissive | yianjiajia/openstack_horizon | deb9beca534b494b587ae401904c84ddbed64c4a | 9e36a4c3648ef29d0df6912d990465f51d6124a6 | refs/heads/master | 2016-09-12T21:34:25.718377 | 2016-04-28T05:29:56 | 2016-04-28T05:29:56 | 57,273,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if (host.service.startswith('compute') and
host.host_name != current_host)]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
# operation log
config = '\n'.join(['Host ID: '+ data['host'], 'Instance ID: '+ data['instance_id']])
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config=config,
status='Success')
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
# operation log
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config='Failed to live migrate instance',
status='Error') | [
"yanjj@syscloud.cn"
] | yanjj@syscloud.cn |
3739db1ecd8528d5f8fcaaf0b5400ad84e17e4a1 | b7724c20c876eb0cc90beba3614cec6ffdb9533a | /DrowsinessDetection/eye_status.py | 049137283a58a4218a81ecd4c2ce72252e713eae | [
"MIT"
] | permissive | muhammadbilalakbar021/driverDrowsiness | 9753680eabaea63fe570cc0172aff024253dd9e1 | 8abf4055e2a756e538c5ae535b88b8bed4d88c78 | refs/heads/main | 2023-06-23T10:05:31.139485 | 2021-07-14T03:04:47 | 2021-07-14T03:04:47 | 385,797,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,665 | py | import os
from PIL import Image
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import AveragePooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from scipy.misc import imread
from scipy.misc import imresize, imsave
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
def load_model():
path = r"../DrowsinessDetection/"
json_file = open(r'DrowsinessDetection/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("DrowsinessDetection/model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
STEP_SIZE_VALID = val_generator.n // val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
img = imresize(img, (IMG_SIZE, IMG_SIZE)).astype('float32')
img /= 255
img = img.reshape(1, IMG_SIZE, IMG_SIZE, 1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose=0)
print(acc * 100)
if __name__ == '__main__':
train_generator, val_generator = collect()
train(train_generator, val_generator)
| [
"muhammadbilalakbar021@gmail.com"
] | muhammadbilalakbar021@gmail.com |
8e785bad39be8eecdff23af271fa04fee05abaf7 | 5534372b34b8c79732f9555b72eb22d4bda5f719 | /web_services_project/settings.py | 26528fd948649bd5f80f8d41088c78388fc528b1 | [] | no_license | JulianHB/web_services_teacher_api | 6971405734f1453788e4dbc9fe26d56f59c66c8c | 1b0eca8359d10568569f0ab7c70c04d4fcf893c2 | refs/heads/master | 2022-12-18T06:01:26.740537 | 2020-03-06T17:49:05 | 2020-03-06T17:49:05 | 245,431,976 | 0 | 0 | null | 2022-12-08T03:44:54 | 2020-03-06T13:51:48 | Python | UTF-8 | Python | false | false | 3,466 | py | """
Django settings for web_services_project project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(vpmfq-u-wk+fg0rm2xeaaxgci7one*2#p!d_vx=((4srht*e1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SESSION_ENGINE = "django.contrib.sessions.backends.db"
# Application definition
MIDDLEWARE_CLASSES = 'django.contrib.sessions.middleware.SessionMiddleware'
INSTALLED_APPS = [
'rest_framework',#tells django that the django-restful framework has been added
'teacherapi.apps.TeacherapiConfig', #tells django to recognise the teacher api app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web_services_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web_services_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SESSION_SAVE_EVERY_REQUEST = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"sc17jjhh@leeds.ac.uk"
] | sc17jjhh@leeds.ac.uk |
abeee0a41c8430a1ec73728748161b3758a74b77 | 3b662ff24ba24b09e4de7ceb2c2d3bd298591e88 | /Python/libraries/recognizers-number-with-unit/recognizers_number_with_unit/number_with_unit/extractors.py | 757cb5c9e949ff59c7d4e4ba56149821509b421a | [
"MIT"
] | permissive | gzhebrunov/Recognizers-Text | 39d916e891a09b26032430184dc90394e197d195 | 157daf7ac85cc5b4e1e708aed8f96601fd28a612 | refs/heads/master | 2020-04-03T13:51:32.840384 | 2018-10-30T01:22:05 | 2018-10-30T01:22:05 | 155,301,539 | 0 | 0 | MIT | 2018-10-30T00:48:07 | 2018-10-30T00:48:06 | null | UTF-8 | Python | false | false | 15,594 | py | from abc import ABC, abstractmethod
from typing import List, Dict, Set, Pattern, Match
from copy import deepcopy
from collections import namedtuple
from itertools import chain
import regex
from .constants import *
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor, ExtractResult
from recognizers_number.culture import CultureInfo
PrefixUnitResult = namedtuple('PrefixUnitResult', ['offset', 'unit'])
class NumberWithUnitExtractorConfiguration(ABC):
@property
@abstractmethod
def extract_type(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def suffix_list(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def prefix_list(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def ambiguous_unit_list(self) -> List[str]:
raise NotImplementedError
@property
@abstractmethod
def unit_num_extractor(self) -> Extractor:
raise NotImplementedError
@property
@abstractmethod
def build_prefix(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def build_suffix(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def connector_token(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def compound_unit_connector_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def pm_non_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
def culture_info(self) -> CultureInfo:
return self._culture_info
def __init__(self, culture_info: CultureInfo):
self._culture_info = culture_info
class NumberWithUnitExtractor(Extractor):
def __init__(self, config: NumberWithUnitExtractorConfiguration):
self.config: NumberWithUnitExtractorConfiguration = config
if self.config.suffix_list:
self.suffix_regex: Set[Pattern] = self._build_regex_from_set(self.config.suffix_list.values())
else:
self.suffix_regex: Set[Pattern] = set()
if self.config.prefix_list:
max_length = max(map(len, ('|'.join(self.config.prefix_list.values()).split('|'))))
self.max_prefix_match_len = max_length + 2
self.prefix_regex: Set[Pattern] = self._build_regex_from_set(self.config.prefix_list.values())
else:
self.max_prefix_match_len = 0
self.prefix_regex: Set[Pattern] = set()
self.separate_regex = self._build_separate_regex_from_config()
def extract(self, source: str) -> List[ExtractResult]:
if not self._pre_check_str(source):
return list()
mapping_prefix: Dict[float, PrefixUnitResult] = dict()
matched: List[bool] = [False] * len(source)
numbers: List[ExtractResult] = self.config.unit_num_extractor.extract(source)
result: List[ExtractResult] = list()
source_len = len(source)
if self.max_prefix_match_len != 0:
for num in numbers:
if num.start is None or num.length is None:
continue
max_find_prefix = min(self.max_prefix_match_len, num.start)
if max_find_prefix == 0:
continue
left: str = source[num.start - max_find_prefix:num.start]
last_index = len(left)
best_match: Match = None
for pattern in self.prefix_regex:
collection = list(filter(lambda x: len(x.group()), regex.finditer(pattern, left)))
for match in collection:
if left[match.start():last_index].strip() == match.group():
if best_match is None or best_match.start() >= match.start():
best_match = match
if best_match:
mapping_prefix[num.start] = PrefixUnitResult(
offset=last_index - best_match.start(),
unit=left[best_match.start():last_index]
)
for num in numbers:
if num.start is None or num.length is None:
continue
start = num.start
length = num.length
max_find_len = source_len - start - length
prefix_unit: PrefixUnitResult = mapping_prefix.get(start, None)
if max_find_len > 0:
right = source[start + length:start + length + max_find_len]
unit_match_list = map(lambda x: list(regex.finditer(x, right)), self.suffix_regex)
unit_match = chain.from_iterable(unit_match_list)
unit_match = list(filter(lambda x: x.group(), unit_match))
max_len = 0
for match in unit_match:
if match.group():
end_pos = match.start() + len(match.group())
if match.start() >= 0:
middle: str = right[:min(match.start(), len(right))]
if max_len < end_pos and (not middle.strip() or middle.strip() == self.config.connector_token):
max_len = end_pos
if max_len != 0:
for i in range(length + max_len):
matched[i+start] = True
ex_result = ExtractResult()
ex_result.start = start
ex_result.length = length + max_len
ex_result.text = source[start:start+length+max_len]
ex_result.type = self.config.extract_type
if prefix_unit:
ex_result.start -= prefix_unit.offset
ex_result.length += prefix_unit.offset
ex_result.text = prefix_unit.unit + ex_result.text
num.start = start - ex_result.start
ex_result.data = num
is_not_unit = False
if ex_result.type == Constants.SYS_UNIT_DIMENSION:
non_unit_match = self.config.pm_non_unit_regex.finditer(source)
for match in non_unit_match:
if ex_result.start >= match.start() and ex_result.end <= match.end():
is_not_unit = True
if is_not_unit:
continue
result.append(ex_result)
continue
if prefix_unit:
ex_result = ExtractResult()
ex_result.start = num.start - prefix_unit.offset
ex_result.length = num.length + prefix_unit.offset
ex_result.text = prefix_unit.unit + num.text
ex_result.type = self.config.extract_type
num.start = start - ex_result.start
ex_result.data = num
result.append(ex_result)
if self.separate_regex:
result = self._extract_separate_units(source, result)
return result
def validate_unit(self, source: str) -> bool:
return not source.startswith('-')
def _pre_check_str(self, source: str) -> bool:
return len(source) != 0
def _extract_separate_units(self, source: str, num_depend_source: List[ExtractResult]) -> List[ExtractResult]:
result = deepcopy(num_depend_source)
match_result: List[bool] = [False] * len(source)
for ex_result in num_depend_source:
for i in range(ex_result.start, ex_result.end+1):
match_result[i] = True
match_collection = list(filter(lambda x: x.group(), regex.finditer(self.separate_regex, source)))
for match in match_collection:
i = 0
while i < len(match.group()) and not match_result[match.start()+i]:
i += 1
if i == len(match.group()):
for j in range(i):
match_result[j] = True
is_not_unit = False
if match.group() == Constants.AMBIGUOUS_TIME_TERM:
non_unit_match = self.config.pm_non_unit_regex.finditer(source)
for time in non_unit_match:
if self._dimension_inside_time(match, time):
is_not_unit = True
if is_not_unit:
continue
to_add = ExtractResult()
to_add.start = match.start()
to_add.length = len(match.group())
to_add.text = match.group()
to_add.type = self.config.extract_type
result.append(to_add)
return result
def _build_regex_from_set(self, definitions: List[str], ignore_case: bool = True) -> Set[Pattern]:
return set(map(lambda x: self.__build_regex_from_str(x, ignore_case), definitions))
def __build_regex_from_str(self, source: str, ignore_case: bool) -> Pattern:
tokens = map(regex.escape, source.split('|'))
definition = '|'.join(tokens)
definition = f'{self.config.build_prefix}({definition}){self.config.build_suffix}'
flags = regex.S + regex.I if ignore_case else regex.S
return RegExpUtility.get_safe_reg_exp(definition, flags)
def _build_separate_regex_from_config(self, ignore_case: bool = True) -> Pattern:
separate_words: Set[str] = set()
for add_word in self.config.prefix_list.values():
separate_words |= set(filter(self.validate_unit, add_word.split('|')))
for add_word in self.config.suffix_list.values():
separate_words |= set(filter(self.validate_unit, add_word.split('|')))
for to_delete in self.config.ambiguous_unit_list:
separate_words.discard(to_delete)
tokens = map(regex.escape, separate_words)
if not tokens:
return None
tokens = sorted(tokens, key=len, reverse=True)
definition = '|'.join(tokens)
definition = f'{self.config.build_prefix}({definition}){self.config.build_suffix}'
flags = regex.S + regex.I if ignore_case else regex.S
return RegExpUtility.get_safe_reg_exp(definition, flags)
def _dino_comparer(self, x: str, y: str) -> int:
if not x:
if not y:
return 0
else:
return 1
else:
if not y:
return -1
else:
if len(x) != len(y):
return len(y) - len(x)
else:
if x.lower() < y.lower():
return -1
if y.lower() < x.lower():
return 1
return 0
def _dimension_inside_time(self, dimension: Match, time: Match) -> bool:
is_sub_match = False
if dimension.start() >= time.start() and dimension.end() <= time.end():
is_sub_match = True
return is_sub_match
class BaseMergedUnitExtractor(Extractor):
def __init__(self, config: NumberWithUnitExtractorConfiguration):
self.config = config
def extract(self, source: str) -> List[ExtractResult]:
if self.config.extract_type == Constants.SYS_UNIT_CURRENCY:
result = self.__merged_compound_units(source)
else:
result = NumberWithUnitExtractor(self.config).extract(source)
return result
def __merged_compound_units(self, source:str):
ers = NumberWithUnitExtractor(self.config).extract(source)
ers = self.__merge_pure_number(source, ers)
result = []
groups = [0] * len(ers)
idx = 0
while idx < len(ers) - 1:
if ers[idx].type != ers[idx + 1].type and not ers[idx].type == Constants.SYS_NUM and not ers[idx + 1].type == Constants.SYS_NUM:
idx = idx + 1
continue
if isinstance(ers[idx].data, ExtractResult):
groups[idx + 1] = groups[idx] + 1
idx = idx + 1
continue
middle_begin = ers[idx].start + ers[idx].length
middle_end = ers[idx].start
middle_str = source[middle_begin:middle_end - middle_begin].strip().lower()
# Separated by whitespace
if not middle_str:
groups[idx + 1] = groups[idx]
# Separated by connector
match = self.config.compound_unit_connector_regex.match(middle_str)
if match:
groups[idx + 1] = groups[idx]
else:
groups[idx + 1] = groups[idx] + 1
idx = idx + 1
idx = 0
while idx < len(ers):
if idx == 0 or groups[idx] != groups[idx -1]:
tmp_extract_result = ers[idx]
tmp = ExtractResult()
tmp.data = ers[idx].data
tmp.length = ers[idx].length
tmp.start = ers[idx].start
tmp.text = ers[idx].text
tmp.type = ers[idx].type
tmp_extract_result.data = [tmp]
result.append(tmp_extract_result)
# reduce extract results in same group
if idx + 1 < len(ers) and groups[idx + 1] == groups[idx]:
group = groups[idx]
period_begin = result[group].start
period_end = ers[idx + 1].start + ers[idx + 1].length
result[group].length = period_end - period_begin
result[group].text = source[period_begin:period_end - period_begin]
result[group].type = Constants.SYS_UNIT_CURRENCY
if isinstance(result[group].data, list):
result[group].data.append(ers[idx + 1])
idx = idx + 1
idx = 0
while idx < len(result):
inner_data = result[idx].data
if len(inner_data) == 1:
result[idx] = inner_data[0]
idx = idx + 1
result = [x for x in result if not x.type == Constants.SYS_NUM]
return result
def __merge_pure_number(self, source: str, ers: List[ExtractResult]) -> List[ExtractResult]:
num_ers = self.config.unit_num_extractor.extract(source)
unit_numbers = []
i = j = 0
while i < len(num_ers):
has_behind_extraction = False
while j < len(ers) and ers[j].start + ers[j].length < num_ers[i].start:
has_behind_extraction = True
j = j + 1
if not has_behind_extraction:
i = i + 1
continue
middle_begin = ers[j - 1].start + ers[j - 1].length
middle_end = num_ers[i].start
middle_str = source[middle_begin:middle_end - middle_begin].strip().lower()
# separated by whitespace
if not middle_str:
unit_numbers.append(num_ers[i])
i = i + 1
continue
i = i + 1
for extract_result in unit_numbers:
overlap = False
for er in ers:
if er.start <= extract_result.start and er.start + er.length >= extract_result.start:
overlap = True
if not overlap:
ers.append(extract_result)
ers = sorted(ers, key=lambda e: e.start)
return ers
| [
"tellarin@gmail.com"
] | tellarin@gmail.com |
9b25bdfd056b7f6af3b11a60b7b9bc218f607fc7 | 32fe3359e8c3dd7110d23ddab5c9fd3f8c488440 | /kickstart/kickstart/models.py | f7e278b56c061d571af8b9a729d6ba91b28623bc | [] | no_license | tutortalk/django_kickstart | 35fe18598becbb8ddbb66ad54227de9346b25cdc | 201ecec7908de72266710af382978fba2f6739f4 | refs/heads/master | 2021-01-02T08:56:45.788347 | 2014-01-26T13:48:53 | 2014-01-26T13:48:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,675 | py | import os
from django.db import models
from django.contrib.auth.models import User
from pytils.translit import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from easy_thumbnails.files import get_thumbnailer
from django.core.mail import send_mail
import datetime
from mptt.models import MPTTModel, TreeForeignKey
from django.db import connection
def profile_avatar_dir(instance):
return str(instance.user_id)
def profile_avatar_upload(instance, filename):
name, ext = os.path.splitext(os.path.basename(filename))
return os.path.join(
profile_avatar_dir(),
'avatar' + ext
)
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile')
first_name = models.CharField(max_length=100, null=True, blank=True)
last_name = models.CharField(max_length=100, null=True, blank=True)
about = models.TextField(null=True, blank=True)
balance = models.DecimalField(max_digits=8, decimal_places=2, default=0)
avatar = models.ImageField(upload_to=profile_avatar_upload, null=True, blank=True)
def __unicode__(self):
return u"{0} - {1} - {2}".format(self.user.username, self.last_name, self.first_name)
def get_avatar(self):
if self.avatar:
return get_thumbnailer(self.avatar)['avatar'].url
else:
return settings.STATIC_URL + "default_avatar.png"
def clear_avatar(self):
if self.avatar:
folder = os.path.join(settings.MEDIA_ROOT, profile_avatar_dir(self))
for file_name in os.listdir(folder):
file_path = os.path.join(folder, file_name)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
self.avatar.delete()
self.save()
class ProjectManager(models.Manager):
def get_all_projects(self):
return self.filter(is_public=True).filter(deadline__gt=timezone.now()).order_by('deadline')
def search_projects(self, search):
tagged_project_ids = Tag.objects.filter(name__icontains=search).values_list('projects', flat=True)
tagged_project_ids = list(set(tagged_project_ids))
query = models.Q(name__icontains=search) | models.Q(pk__in=tagged_project_ids)
return self.get_all_projects().filter(query)
def close_recently_finished(self):
now = timezone.now()
base_queryset = (self.annotate(collected=models.Sum('projectdonation__benefit__amount'))
.filter(is_public=True, status=Project.IN_PROGRESS, deadline__lte=now))
succeeded = base_queryset.filter(amount__lte=models.F('collected'))
failed = (base_queryset.filter(models.Q(amount__gt=models.F('collected')) | models.Q(collected=None))
.select_related('user')
.prefetch_related(
'projectdonation_set',
'projectdonation_set__benefit',
'projectdonation_set__user',
'projectdonation_set__user__profile'))
notify_succeeded = [(project.user.email, project.name) for project in succeeded]
balances = {}
notify_failed = []
for project in failed:
notify_failed.append((project.user.email, project.name))
for donation in project.projectdonation_set.all():
print 'return {0} to {1}'.format(donation.benefit.amount, donation.user.username)
profile_id = donation.user.profile.pk
if profile_id not in balances:
balances[profile_id] = donation.user.profile.balance
balances[profile_id] += donation.benefit.amount
update_param_list = [(balance, profile_id) for (profile_id, balance) in balances.items()]
cursor = connection.cursor()
cursor.executemany("UPDATE kickstart_profile SET balance=%s WHERE id=%s", update_param_list)
succeeded.update(status=Project.SUCCESS)
failed.update(status=Project.FAIL)
for email, project_name in notify_succeeded:
send_mail(
_(u'Project funding is finished').encode('utf-8'),
_(u'Congratulations! Your project "{0}" funding successfully finished!').format(project_name).encode('utf-8'),
settings.DEFAULT_FROM_EMAIL,
(email, )
)
for email, project_name in notify_failed:
send_mail(
_(u'Project funding is finished').encode('utf-8'),
_(u'Sorry, but your project "{0}" funding has failed').format(project_name).encode('utf-8'),
settings.DEFAULT_FROM_EMAIL,
(email, )
)
class Project(models.Model):
IN_PROGRESS = 0
SUCCESS = 1
FAIL = 2
STATUSES = (
(IN_PROGRESS, _(u"In progress")),
(SUCCESS, _(u"Success")),
(FAIL, _(u"Fail"))
)
user = models.ForeignKey(User, related_name='projects')
name = models.CharField(max_length=255)
status = models.PositiveSmallIntegerField(default=0, choices=STATUSES, null=False, blank=False)
slug_name = models.CharField(max_length=255, unique=True)
short_desc = models.TextField(null=False, blank=False)
desc = models.TextField(null=False, blank=False)
is_public = models.BooleanField(default=False)
amount = models.DecimalField(max_digits=14, decimal_places=2, null=False, blank=False)
deadline = models.DateTimeField(null=False, blank=False)
objects = ProjectManager()
tags = models.ManyToManyField('kickstart.Tag', related_name='projects')
donators = models.ManyToManyField(User, through='ProjectDonation', related_name='donations')
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
create_benefit = not self.pk
self.slug_name = slugify(u"{0}-{1}".format(self.name, self.user.username))
super(Project, self).save(*args, **kwargs)
if create_benefit:
benefit = Benefit(project=self, amount='100.00', text=_(u"Get test example"))
benefit.save()
self.benefits.add(benefit)
def get_donations(self):
return list(ProjectDonation.objects.filter(project=self).select_related('benefit').all())
class ProjectDonation(models.Model):
project = models.ForeignKey('kickstart.Project')
user = models.ForeignKey(User)
benefit = models.ForeignKey('kickstart.Benefit')
def __unicode__(self):
return u"{0} - {1} - {2} bucks".format(self.project.name, self.user.username, self.benefit.amount)
def save(self, *args, **kwargs):
take_money = not self.pk
super(ProjectDonation, self).save(*args, **kwargs)
if take_money:
self.user.profile.balance -= self.benefit.amount
self.user.profile.save()
notification_message = u'Congratulation! Your project received donation {amount} bucks from user {user}'.format(
amount=self.benefit.amount,
user=self.user.username
)
send_mail(
_(u'Your project "{0}" received donation'.format(self.project.name)).encode('utf-8'),
_(notification_message).encode('utf-8'),
settings.DEFAULT_FROM_EMAIL,
(self.user.email, )
)
def project_file_dir(instance):
project = instance.project
return os.path.join(
str(project.user_id),
'project_{0}_files'.format(str(project.pk))
)
def project_file_upload(instance, filename):
name, ext = os.path.splitext(os.path.basename(filename))
return os.path.join(
project_file_dir(instance),
'file' + ext
)
class ProjectFile(models.Model):
project = models.ForeignKey('kickstart.Project', related_name='files')
original_filename = models.CharField(max_length=255, null=False, blank=False)
file = models.FileField(upload_to=project_file_dir, null=False, blank=False)
ext = models.CharField(max_length=10, null=False, blank=False)
def __unicode__(self):
return u"{0} - {1}".format(self.project.name, self.file.name)
def delete(self, *args, **kwargs):
self.file.delete()
super(ProjectFile, self).delete(*args, **kwargs)
class Benefit(models.Model):
project = models.ForeignKey(Project, related_name='benefits')
amount = models.DecimalField(max_digits=8, decimal_places=2, null=False, blank=False)
text = models.TextField(null=False, blank=False)
class Meta:
ordering = ['amount']
def __unicode__(self):
return str(self.amount)
class Tag(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
def tz_aware_now():
"""Current time with respect to the timezone."""
return timezone.make_aware(datetime.datetime.now(),
timezone.get_default_timezone())
class Comment(MPTTModel):
project = models.ForeignKey(Project, related_name='comments')
user = models.ForeignKey(User)
comment = models.TextField()
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
timestamp = models.DateTimeField(default=tz_aware_now)
class MPTTMeta:
order_insertion_by = ['timestamp']
def __unicode__(self):
result = self.user.username + ': ' + self.comment[:70]
return result
| [
"moisizz@gmail.com"
] | moisizz@gmail.com |
3f84b9dcb1f883353278b6f06f472d8d32a06e47 | 1521332438d4e711b6fa4af825047a3466925511 | /WorkshopWeek8/problem5.py | 1925e67c31009097d9b36fdcb1b950cb256b497e | [] | no_license | JakeAttard/Python-2807ICT-NoteBook | df0907bdca9ff10f347498233260c97f41ea783b | 9a38035d467e569b3fb97f5ab114753efc32cecc | refs/heads/master | 2020-04-26T17:33:18.184447 | 2019-11-05T13:04:56 | 2019-11-05T13:04:56 | 173,717,675 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | def function(list, diff):
counter = 1
for a in list[::2]:
for b in list[1::2]:
if int(b) - int(a) == diff:
counter += 1
elif int(b) - int(a) == -1 * diff:
counter += 1
else:
break
return counter
def testString(a):
list1 = a.split()
if len(a) == 1:
print(1)
elif len(a) == 0:
exit()
else:
difference = int(list1[1]) - int(list1[0])
print(function(list1, difference))
a = input("List: ")
testString(a)
while len(a) != 0:
a = input("List: ")
testString(a) | [
"jakeattard18@gmail.com"
] | jakeattard18@gmail.com |
9a23ec200ff08940a0fb345e73430f25aba9414f | 9d95d451eb26e862ca8867d463b723b06c16f5bc | /wiki_top_words/manage.py | 2e065d1a8fc2e721a1ac315eaea5349a5f4458fe | [] | no_license | shurik2533/wiki_top_words | 4a02427f60852a19b33e495203a061e4105053df | df54ba7b1f3bdf4c6f17ce1ae227ecd1eae9a22c | refs/heads/master | 2020-05-05T11:37:25.850282 | 2019-04-14T14:52:46 | 2019-04-14T14:52:46 | 179,996,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wiki_top_words.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"shurik2533@Aleksandrs-MacBook-Pro.local"
] | shurik2533@Aleksandrs-MacBook-Pro.local |
104ae0e1215c1ecdfe021a82e06a809c19ef9c39 | 82b194b063eadfb57d3e3e83b41279c122a33b53 | /movies/admin.py | 78c3c95dd96a45e613bc967166ee7f8e5e95dd6c | [] | no_license | AnupKandalkar/ssdb_django_angular_heroku | 8c02a3a8751ffaf5957895bf4a27add2fe7d004a | 91619f128728d42f15e26dd0c57ad36fab1fd79c | refs/heads/master | 2021-01-21T21:40:16.190289 | 2019-01-16T17:36:08 | 2019-01-16T17:36:08 | 50,094,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.contrib import admin
from models import *
# Register your models here.
admin.site.register(MoviesData)
admin.site.register(MoviesGenre)
| [
"kandalkar.a87@gmail.com"
] | kandalkar.a87@gmail.com |
7646e70cd05301a9d704f982477baddb00f8a39a | 58bba1cf855244591eba578f71fd733c5484af61 | /myapp/mongoconnect.py | ab05718e32f5a08f97458f0dd932bd11173f9532 | [] | no_license | cowhite/restaurant_system | f911ebc2989d291165e9130f0711f182723542d3 | b0849dff649ebe9f301f6d824dca7c14fe804b68 | refs/heads/master | 2023-05-15T20:45:10.510206 | 2021-06-11T09:06:51 | 2021-06-11T09:06:51 | 375,705,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from pymongo import MongoClient
import os
#CODE_TYPE = os.environ['CODE_TYPE']
CODE_TYPE = os.environ.get('CODE_TYPE', "dev")
def connect_to_mongo():
if CODE_TYPE == "dev":
client = MongoClient('localhost', 27017)
return client.dev_db
elif CODE_TYPE == "prod":
client = MongoClient("mongodb://uname:pwd@localhost/admin")
return client.prod_db
| [
"bljd369@gmail.com"
] | bljd369@gmail.com |
20fa7b8ada3d03a343ebc33095b3bb86e9ea1111 | d76eaf771f4dfcaa30110281a7d783da60689c9b | /Lesson_1/task_6.py | 125c346fe4291540740247338a2241a406250790 | [] | no_license | MyodsOnline/diver.vlz-python_basis | 404a4a5883ba350613c6c5349d534e81ced2385f | 9538372a7d93f42ef72bba23ab8b0d8760103a67 | refs/heads/python_basis | 2023-02-12T19:47:04.033780 | 2021-01-11T05:56:43 | 2021-01-11T05:56:43 | 318,102,061 | 1 | 0 | null | 2021-01-11T05:56:44 | 2020-12-03T06:48:55 | Python | UTF-8 | Python | false | false | 1,381 | py | """урок 1 задание 6
Спортсмен занимается ежедневными пробежками. В первый день его результат составил a километров.
Каждый день спортсмен увеличивал результат на 10 % относительно предыдущего.
Требуется определить номер дня, на который общий результат спортсмена составить не менее b километров.
Программа должна принимать значения параметров a и b и выводить одно натуральное число — номер дня.
Например: a = 2, b = 3.
Результат:
1-й день: 2
2-й день: 2,2
3-й день: 2,42
4-й день: 2,66
5-й день: 2,93
6-й день: 3,22
Ответ: на 6-й день спортсмен достиг результата — не менее 3 км.
"""
a = int(input('Results on the first day of training: '))
b = int(input('The ultimate goal of training: '))
days = 1
if 0 >= b or 0 >= a:
print('Invalid data entered.')
elif a == b or a > b:
print('Already done.')
else:
while b > a:
a += 0.1 * a
days += 1
print(f'On day {days}, the result is: {a:.2}.\nTo achieve the goal left: {b - a:.2}')
| [
"diver.vlz@gmail.com"
] | diver.vlz@gmail.com |
13a587656896590a285c2380d20b3708d1e6ffa7 | 439d2d30b61514bd6a9072c175b124c63f124834 | /data_statistics/base/total.py | 19d25ef609d1c29858e834396f6f32382acd7562 | [] | no_license | meteor-gogogo/python | 6f92e3a680c653f92a8d9b04a2d09be6d6ea126a | 7d03e2f57d4dfb7c7aeb15bf836a6e0d4af9b89d | refs/heads/master | 2022-12-10T07:45:31.085951 | 2019-09-04T06:01:50 | 2019-09-04T06:01:50 | 197,333,904 | 0 | 1 | null | 2022-12-08T05:19:07 | 2019-07-17T07:01:55 | Python | UTF-8 | Python | false | false | 190 | py | #!/usr/bin/env python
# coding=utf-8
from .baselist import BaseList
class TotalList(BaseList):
@classmethod
def is_registrar_for(cls, listtype):
return listtype == 'total'
| [
"liuhang@aplum.com.cn"
] | liuhang@aplum.com.cn |
b5ff8d15d1586adaa2f9fbccbee1571b6f987707 | 2f66a358372ac20bbb5e3949a7ee37768b82f49e | /scripts/simuPOP/tests/old/parallel_run_adaptive_collapsing_permute3.py | aac315cb7c3620ede87fef444c05511aadd9f5cb | [] | no_license | cwolock/rvat | ab9648e6d1c480c71a7aa1d3428c881db0cd62e9 | 9643e4e2f5ca23a090fcd5cd8f3ed56e69bf824d | refs/heads/master | 2021-09-17T22:41:27.077915 | 2018-07-06T04:04:08 | 2018-07-06T04:04:08 | 108,668,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | #!/usr/bin/env python
"""
parallel_run_adaptive_collapsing_permute.py
Collects *.smp, *.spl, *regions.txt files and runs adaptive collapsing in parallel
"""
import luigi
import os
from adaptive_collapsing_permute3 import *
class RequireFiles(luigi.ExternalTask):
"""
Class for checking that necessary files are present
param smp: (string) name of .smp file
param spl: (string) name of .spl file
param reg: (string) name of .regions.txt file
"""
smp = luigi.Parameter()
spl = luigi.Parameter()
reg = luigi.Parameter()
def output(self):
return (luigi.LocalTarget(self.smp),
luigi.LocalTarget(self.spl),
luigi.LocalTarget(self.reg))
class RunCollapsing(luigi.Task):
"""
Class for running adaptive collapsing
param smp: (string) name of .smp file
param spl: (string) name of .spl file
param reg: (string) name of .regions.txt file
param nperms: (int) number of permutations
"""
smp = luigi.Parameter()
spl = luigi.Parameter()
reg = luigi.Parameter()
nperms = luigi.IntParameter()
def requires(self):
return RequireFiles(smp=self.smp, spl=self.spl, reg=self.reg)
def output(self):
prefix = self.spl[:-3]
return luigi.LocalTarget('{prefix}adaptive_pvals.txt'.format(prefix=prefix))
def run(self):
results = collapse(self.smp, self.spl, self.reg, self.nperms)
with self.output().open('w') as outfile:
outfile.write('\n'.join(map(str, results)) + '\n')
class Parallelize(luigi.WrapperTask):
"""
Class for parallelizing the RunCollapsing task
param nperms: (int) number of permutations
"""
nperms = luigi.IntParameter()
def requires(self):
cwd = os.getcwd()
smp_list = []
spl_list = []
reg_list = []
for f in os.listdir(cwd):
if f.endswith('.smp'):
smp_list.append(f)
elif f.endswith('.spl'):
spl_list.append(f)
elif f.endswith('regions.txt'):
reg_list.append(f)
smp_list = sorted(smp_list)
spl_list = sorted(spl_list)
reg_list = sorted(reg_list)
for smp, spl, reg in zip(smp_list, spl_list, reg_list):
yield RunCollapsing(smp=smp, spl=spl, reg=reg, nperms=self.nperms)
| [
"cwolock@gmail.com"
] | cwolock@gmail.com |
d7ef8890a6ce56916383b518e78a04c723e683ff | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBZinc_M_950_TuneZ2star_8TeV-madgraph_cff.py | 425c01667e5ad92ae0b9a16636c284b2b8579120 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 6,054 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 950.0D0 ! bprime quarks mass',
'PMAS(7,2) = 9.50D0',
'PMAS(7,3) = 95.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'KFDP(58,2)=5 ! defines Z0 b',
'MDME(58,1)=1 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.5D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 0.5D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] | sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch |
19d2071c90dfbf39c31669b82ef26d4c0d376a89 | 4edd89e807ac9a70d4fb4a258015e6889b01ff27 | /md5decoder.py | f0610781b1f9b91c3f091c3120739488857dd15c | [] | no_license | karimmakynch/PYTHON | ca68576fb3079fdd56559959edb3b4e1ba8ccf04 | 4842269368d49a3954c39ce4e8f2a0bc03b2e99c | refs/heads/main | 2023-03-15T21:16:38.610893 | 2021-02-26T05:42:26 | 2021-02-26T05:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | # -*- coding: utf-8 -*-
import hashlib
import sys
#variables
count = 0
tour = 0
tourclone = 0
tourx = 0
creds = ''
part = 1
inputfilelines = 0
try:
try:
inputfile = sys.argv[1]
dicfile = sys.argv[2]
outputfile = sys.argv[3]
fout = open(outputfile,'w')
fouttx = '[+] inputfile: '+str(inputfile)+' DictionaryFile: '+str(dicfile)+' Outputfile: '+str(outputfile)+'\n'
fout.write(fouttx)
except:
print 'err: Ex: python md5decoder.py inputfile(hashes).txt dic.txt outputfile(result).txt'
sys.exit()
print 'Text Content:'
print '1)Hashes:Email'
print '2)Email :Hashes'
hashpos = input('input: ')
if hashpos == 1:
hashes = 0
emails = 1
if hashpos == 2:
hashes = 1
emails = 0
if str(hashpos) not in '12':
print '[-] err 1)Hashes:Email !!'
print '[-] err 2)Email :Hashes !!'
sys.exit()
inputfilelineslen = len(open(inputfile,'r').readlines())
for i in range(0,inputfilelineslen):
if len(open(inputfile,'r').readlines()[i].split()) == 2:
inputfilelines +=1
dicfilelines = len(open(dicfile,'r').readlines())
print '\n'
for i in open(inputfile,'r'):
if len(i.split()) == 2:
for ii in open(dicfile,'r'):
hashtext = hashlib.md5(ii.split()[0]).hexdigest()
prog1 = int(float(tour)/dicfilelines*100)
if tourclone > inputfilelines:
tourclone = 0
prog2 = int(float(tourclone)/inputfilelines*100)
sym1 = 10*tour/dicfilelines
p1 = '▓'*sym1+'░'*(10-sym1)
sym2 = 10*tourclone/inputfilelines
p2 = '▓'*sym2+'░'*(10-sym2)
prog3 = int(float(tourx)/inputfilelines*100)
sym3 = 10*tourx/inputfilelines
p3 = '▓'*sym3+'░'*(10-sym3)
sys.stdout.write('\r '+str(prog3)+'% ['+p3+'] '+str(prog1)+'% ['+p1+'] '+str(prog2)+'% ['+p2+'] count : '+str(count)+' tested: '+str(part)+'/'+str(inputfilelines)+' ')
sys.stdout.flush()
if i.split()[hashes] == hashtext:
count += 1
creds = str(i.split()[emails])+':'+str(ii.split()[0])
fout = open(outputfile,'a')
fout.write(creds)
tourclone +=1
tour += 1
if tour > dicfilelines:
tour = 0
part +=1
tourx +=1
print '\n'
except:
pass | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
b56fd06fa63effc6cecdeb64776d8258e2d824e3 | 3a31bd9928d4c81ab2a1e48a8c584fe17b7c4151 | /GAF-CLEANING-master/houseCleaning/pages/migrations/0003_auto_20210120_1725.py | 7034ef39f83a8db1c0b220ed0eaf1e87381f84f3 | [] | no_license | shahzaibk23/Cleaning | a60921b7172cfb60478d33d2057413a9878708bb | 3bb98a1433165394519c9b91ba551b165c791568 | refs/heads/main | 2023-05-07T04:21:34.356430 | 2021-02-27T10:27:16 | 2021-02-27T10:27:16 | 340,332,051 | 0 | 0 | null | 2021-06-01T23:18:54 | 2021-02-19T10:16:56 | Python | UTF-8 | Python | false | false | 595 | py | # Generated by Django 3.1.5 on 2021-01-20 17:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20210120_1722'),
]
operations = [
migrations.RemoveField(
model_name='panel',
name='hourlyRate',
),
migrations.AddField(
model_name='hourlyrate',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.panel'),
),
]
| [
"72767591+MR-Pvt@users.noreply.github.com"
] | 72767591+MR-Pvt@users.noreply.github.com |
943b2967a8d699984b0a857aaa42eea8529a4414 | 6bbd3825d7b1329215a128f93cdb807747fe5fc7 | /python-jenkins/jenkins-plugins.py | b1e0d8a21cec28d2f25c1126257c521082ada4ba | [] | no_license | Shwebs/ci-cd-with-jenkins-using-dsl-pipelines-and-docker | e0c43d0640c0834ff4d67813d1b541ddee5b8081 | d42d500835d8798ac4cd9708681e3ddbb00ef99d | refs/heads/master | 2023-03-16T07:29:49.763589 | 2019-04-09T11:56:29 | 2019-04-09T11:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import sys
import jenkins
import json
import credentials
# Credentails
username = credentials.login['username']
password = credentials.login['password']
# Print the number of jobs present in jenkins
server = jenkins.Jenkins('http://localhost:8080', username=username, password=password)
# Get the installed Plugin info
plugins = server.get_plugins_info()
parsed = json.loads(plugins) # take a string as input and returns a dictionary as output.
#parsed = json.dumps(plugins) # take a dictionary as input and returns a string as output.
#print(json.dumps(parsed, indent=4, sort_keys=True))
#print(plugins)
print(parsed)
| [
"bdas@broadsoft.com"
] | bdas@broadsoft.com |
02bf83316933dda91a9241311fb8fae755d4f00c | f4a6dea2ba16ea6957754a91220627d33e88390b | /app/models.py | b939137301c403f1a6a9fc949df24ce3fb403db7 | [] | no_license | simplc/ZDBK | acde035a19690608f84ca1d7df3af2377d0ccf4e | 487c4f922b9f01c7687b5c02fefdcbaa72c26ef8 | refs/heads/master | 2020-06-03T09:05:10.321796 | 2019-06-14T07:18:52 | 2019-06-14T07:18:52 | 191,517,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | from _datetime import datetime
import json
from sqlalchemy.dialects.mysql import LONGTEXT
from . import db
class BaiKeDoc(db.Model):
__tablename__ = 'baike_doc'
doc_id = db.Column(db.Integer, unique=True, primary_key=True, autoincrement=False)
title = db.Column(db.String(255))
description = db.Column(db.Text)
is_indexed = db.Column(db.Boolean, default=False)
def __str__(self):
return "<BaiKe %r>" % self.title
class BaiKeSection(db.Model):
__tablename__ = 'baike_section'
section_id = db.Column(db.Integer, unique=True, primary_key=True)
baike_id = db.Column(db.Integer, db.ForeignKey('baike_doc.doc_id'))
section_title = db.Column(db.String(255))
text = db.Column(db.Text)
page = db.relationship('BaiKeDoc', backref=db.backref('sections'))
def __str__(self):
return "<Section %r for BaiKe %d >" % self.section_title, self.baike_id
class BaiKeItem(db.Model):
__tablename__ = 'baike_item'
item_id = db.Column(db.Integer, unique=True, primary_key=True)
baike_id = db.Column(db.Integer, db.ForeignKey('baike_doc.doc_id'))
item_title = db.Column(db.String(255))
text = db.Column(db.Text)
page = db.relationship('BaiKeDoc', backref=db.backref('items'))
def __str__(self):
return "<Section %r for BaiKe %d >" % self.section_title, self.baike_id
class BaiKePicture(db.Model):
__tablename__ = 'baike_pic'
picture_id = db.Column(db.Integer, unique=True, primary_key=True)
baike_id = db.Column(db.Integer, db.ForeignKey('baike_doc.doc_id'))
picture_title = db.Column(db.Text)
picture_url = db.Column(db.Text)
page = db.relationship('BaiKeDoc', backref=db.backref('pictures'))
class ZhiDaoDoc(db.Model):
__tablename__ = "zhidao_doc"
doc_id = db.Column(db.Integer, unique=True, primary_key=True, autoincrement=False)
question = db.Column(db.Text)
description = db.Column(db.Text)
ask_time = db.Column(db.String(255))
is_indexed = db.Column(db.Boolean, default=False)
def __str__(self):
return "<Question %r>" % self.question
class ZhiDaoAnswer(db.Model):
__tablename__ = 'zhidao_answer'
answer_id = db.Column(db.Integer, unique=True, primary_key=True, autoincrement=False)
question_id = db.Column(db.Integer, db.ForeignKey('zhidao_doc.doc_id'))
answer = db.Column(db.Text)
user_name = db.Column(db.String(255))
answer_time = db.Column(db.String(255))
likes = db.Column(db.Integer)
dislikes = db.Column(db.Integer)
accepted = db.Column(db.Boolean)
question = db.relationship('ZhiDaoDoc', backref=db.backref('answers'))
def __str__(self):
return "<Answer %d for question %d>" % self.answer_id, self.question_id
class ZhiDaoPicture(db.Model):
__tablename__ = 'zhidao_pic'
picture_id = db.Column(db.Integer, unique=True, primary_key=True)
answer_id = db.Column(db.Integer, db.ForeignKey('zhidao_answer.answer_id'))
picture_url = db.Column(db.Text)
answer = db.relationship('ZhiDaoAnswer', backref=db.backref('pictures'))
class WordIndex(db.Model):
__tablename__ = 'word_index'
word = db.Column(db.String(255), unique=True, primary_key=True)
_index_list = db.Column(LONGTEXT)
@property
def index(self):
return json.loads(self._index_list)
@index.setter
def index(self, index_list):
self._index_list = json.dumps(index_list)
def __str__(self):
return self.word
#
# if __name__ == '__main__':
# db.drop_all()
# db.create_all()
| [
"liuchen81195@gmail.com"
] | liuchen81195@gmail.com |
fe1c9c57c7c1417f62e68d17bddeb9fbf2dbca82 | f3bacae751b8b50acf8ee60cfa739619ba296574 | /horovod/data/data_loader_base.py | 68780717481715cb58aa2e8dc9647edf571ff256 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | Tixxx/horovod | 295ba368aba7749a87010e809b599eb58f1bf7db | bc8c71b3138a4c5f77773bfd3291bd1027598a13 | refs/heads/master | 2021-08-28T00:23:49.014274 | 2021-08-06T14:42:50 | 2021-08-06T14:42:50 | 194,719,288 | 3 | 2 | NOASSERTION | 2019-10-29T18:06:56 | 2019-07-01T17:55:47 | C++ | UTF-8 | Python | false | false | 4,597 | py | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from queue import Queue, Empty
from threading import Thread, Event
class BaseDataLoader(object):
def __len__(self):
"""
Length of the batches to be loaded.
"""
raise NotImplementedError()
def _iterate(self):
"""
Interface for the implimentation of iterate batches
"""
raise NotImplementedError()
def __iter__(self):
"""
Starting iteration and get batchs
"""
for batch in self._iterate():
yield self._process_batch(batch)
def _process_batch(self, batch):
"""
Hook to modify batch before output. Will be override by trainer to reshape the data
as needed. Please do not override it.
"""
return batch
class AsyncDataLoaderMixin(object):
"""
Async Mixin on top of implementation of BaseDataLoader. It contains a seperate thread
which reads batch from self._iterate() and push them in the queue. The self.__iter__() function
will pop the batch from the queue.
If async_loader_queue_size is set to 0, the data loader will not work in async mode.
For example:
class PytorchAsyncDataLoader(AsyncDataLoaderMixin, PytorchDataLoader):
"""
def __init__(self, async_loader_queue_size=64, *args, **kwargs):
"""
initialize the async data loader. Need to add this in the __init__() of the implementation
"""
self.async_loader_queue_size = async_loader_queue_size
super().__init__(*args, **kwargs)
print(f"Apply the AsyncDataLoaderMixin on top of the data loader, async_loader_queue_size={async_loader_queue_size}. ")
if self.async_loader_queue_size > 0:
self.finished_event = Event()
self.queue = Queue(self.async_loader_queue_size)
self.thread = Thread(target=self._async_worker)
self.thread.daemon = True
self.started = False
def __del__(self):
self._close_async_loader()
s = super()
if hasattr(s, "__del__"):
s.__del__(self)
def _close_async_loader(self):
"""
Close the async data loader.
"""
print("Closing the AsyncDataLoaderMixin.")
if self.async_loader_queue_size > 0 and self.started:
self.finished_event.set()
try:
# Free buffer to allow worker to retry
self.queue.get_nowait()
except Empty:
pass
self.thread.join()
def _async_worker(self):
"""
Start worker thread to load data asynchronously.
User need to implement self._iterate() to read the data.
"""
try:
while not self.finished_event.is_set():
for batch in self._iterate():
if self.finished_event.is_set():
break
self.queue.put(batch)
self.queue.put(None)
except Exception as ex:
self.queue.put(ex)
self.queue.put(None)
finally:
self.queue.put(None)
def __iter__(self):
"""
Override the __iter__() to iterate data asynchronously to produce batchs.
Will procude batchs from the queue which were generated by self._iterate().
"""
print("Start generating batches from async data loader.")
if self.async_loader_queue_size > 0:
if not self.started:
self.started = True
self.thread.start()
while True:
batch = self.queue.get()
if batch is None:
break
if isinstance(batch, Exception):
raise batch
yield self._process_batch(batch)
else:
for batch in self._iterate():
yield self._process_batch(batch)
| [
"noreply@github.com"
] | noreply@github.com |
4452fbde5d0694f9c4907cd8efffcb4f97437d44 | 6cb9088222b842ee8a70e164adc06cd840a190c0 | /parser.py | 71295555cf3406f50bc15fe3f8fd75edac0e3749 | [] | no_license | tef/toyparser | a259f1b06a39d056fd6e5baf91c6301e9199f5b1 | 37f87035df92f8d3907d6a47ed0d616b678722cf | refs/heads/master | 2021-01-10T02:58:42.856747 | 2015-10-06T02:17:04 | 2015-10-06T02:17:04 | 43,529,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,762 | py | from __future__ import print_function, unicode_literals
"""
An operator precedence parser that handles expressions.
The trick here is what's known as precedence climbing, or left corner
transform, or pratt parsing, and a litany of other names. The trick
is frequenly re-invented.
It's basically recursive descent + a while loop to handle left recursion.
It's very similar to doing shunting yard but with the call stack.
The trick is to split your rules into prefixes, and suffixes. You run any prefix
rules (recursive decent bit), and then repeatedly apply suffix rules (precedence
climbing).
For ex: infix operators are defined as suffix rules, we only look for a +
after we've parsed an item
"""
from collections import namedtuple, OrderedDict, defaultdict
from functools import partial
import re
class SyntaxErr(Exception):
pass
# Table driven parser/lexer cursors.
# These cursors are (mostly) immutable, and functions like next() return new cursors.
Token = namedtuple('token','name text position')
Token.__str__ = lambda self:"{}_{}".format(self.text, self.name[0])
class Position(namedtuple('Position','off line_off line col')):
newlines = re.compile(r'\r|\n|\r\n') # Todo: unicode
def count_lines(self, source, offset):
line = self.line
line_off = self.line_off
#print('source', [source[self.off:offset+1]], self.off, offset, self.newlines.pattern)
for match in self.newlines.finditer(source, self.off, offset):
line += 1;
line_off = match.end()
col = (offset-line_off)+1
return Position(offset, line_off, line, col)
class RegexLexer(object):
def __init__(self, rx, source, position):
self.source = source
self.position = position
self._current = None
self._next = None
self.rx = rx
def pos(self):
return self.position
def current(self):
if self._current is None:
self._current, pos = self.rx(self.source, self.position)
if self._current and pos.off < len(self.source):
self._next = self.__class__(
self.rx, self.source, pos
)
else:
self._next = ()
return self._current
def next(self):
if self._next is None:
self.current()
return self._next
def token_filter(*types):
class TokenFilter(object):
def __init__(self, lexer):
self.lexer = lexer
def current(self):
while self.lexer:
current = self.lexer.current()
if current.name in types:
self.lexer = self.lexer.next()
else:
return current
def next(self):
lexer = self.lexer.next()
if lexer:
lexer.current()
return self.__class__(lexer)
def pos(self):
return self.lexer.pos()
return TokenFilter
class ParserCursor(object):
def __init__(self, language, lexer):
self.lexer = lexer
self.lang = language
def current_token(self):
return self.lexer.current()
def pos(self):
if self.lexer:
return self.lexer.pos()
def next(self):
lexer = self.lexer.next()
return ParserCursor(self.lang, lexer)
def pop(self):
return self.current_token(), self.next()
def accept(self, e):
if e == self.current_token().text:
return self.next()
else:
raise SyntaxErr("expecting: {}, got {}".format(e, self.current_token()))
def __nonzero__(self):
return bool(self.lexer)
def __eq__(self, o):
return self.lexer == o.lexer
def parse_stmts(self):
exprs =[]
parser = self
pos = -1
while parser:
expr, parser = parser.parse_expr(outer=Everything)
#print([expr])
pos = parser.pos()
if expr:
exprs.append(expr)
#print('expr',[expr])
if parser and parser.current_token().name == 'terminator':
while parser and parser.current_token().name =='terminator':
#print('next', [parser.current_token()])
parser = parser.next()
else:
break
return exprs, parser
def parse_expr(self, outer):
item = self.current_token()
pos = self.pos()
rule = self.lang.get_prefix_rule(item, outer)
if rule and rule.captured_by(outer): # beginning of a rule
item, parser = rule.parse_prefix(self, outer)
#print(rule,item, parser)
else:
return None, self
# This is where the magic happens
while parser and parser.pos() != pos:
first = parser.current_token()
pos = parser.pos()
rule = self.lang.get_suffix_rule(first, outer)
if rule and rule.captured_by(outer):
item, parser = rule.parse_suffix(item, parser, outer)
return item, parser
# Parse Rules
Everything = namedtuple('Everything','precedence captured_by')(0, (lambda r: False))
class Block(namedtuple('block', 'op item close')):
def __str__(self):
return "<%s%s%s>"%(self.op, self.item, self.close)
class BlockRule(namedtuple('rule', 'precedence op end_char')):
def captured_by(self, outer):
return outer
def parse_prefix(self, parser, outer):
parser = parser.accept(self.op)
item, parser = parser.parse_expr(outer=Everything)
#print "parse_block: item: %s pos:%d" %(item, parser.pos())
parser = parser.accept(self.end_char)
return Block(self.op, item, self.end_char), parser
class Prefix(namedtuple('prefix', 'op right')):
def __str__(self):
return "<%s %s>"%(self.op, self.right)
class PrefixRule(namedtuple('rule', 'precedence op')):
def captured_by(self, rule):
return True
def parse_prefix(self, parser, outer):
parser = parser.accept(self.op)
new_item, parser = parser.parse_expr(outer=self)
#print "PrefixRule: item: %s pos:%d" %(new_item, parser.pos())
return Prefix(self.op, new_item), parser
class Infix(namedtuple('infix', 'op left right')):
def __str__(self):
return "<%s %s %s>"%(self.left, self.op, self.right)
class InfixRule(namedtuple('rule','precedence op')):
def captured_by(self, rule):
return rule.precedence < self.precedence #(the precedence is higher, the scope is more narrow!)
def parse_suffix(self, item, parser, outer):
left = item
parser = parser.accept(self.op)
#print "infix: item: %s pos:%d" %(item, parser.pos())
right, parser = parser.parse_expr(outer=self)
return Infix(self.op, left, right), parser
class RInfixRule(InfixRule):
def captured_by(self, rule):
return rule.precedence <= self.precedence
class PostfixBlock(namedtuple('infix', 'op left right close')):
def __str__(self):
return "<%s%s%s%s>"%(self.left, self.op, self.right, self.close)
class PostfixBlockRule(namedtuple('rule','precedence op end_char')):
def captured_by(self, rule):
return rule.precedence < self.precedence #(the precedence is higher, the scope is more narrow!)
def parse_suffix(self, item, parser, outer):
left = item
#print(parser.pos(), parser.current_token())
parser = parser.accept(self.op)
#print "infix: %s" % op
right, parser = parser.parse_expr(outer=Everything)
parser = parser.accept(self.end_char)
return PostfixBlock(self.op, left, right, self.end_char), parser
class PostfixRule(namedtuple('rule','precedence op')):
def captured_by(self, outer):
return outer.precedence < self.precedence #(the precedence is higher, the scope is more narrow!)
def parse_suffix(self, item, parser, outer):
left = item
parser = parser.accept(self.op)
return Postfix(self.op, left), parser
class Postfix(namedtuple('postfix', 'op left')):
def __str__(self):
return "<%s %s>"%(self.left, self.op)
class TokenRule(namedtuple('expr', 'op precedence')):
def captured_by(self, rule):
return True
def parse_prefix(self, parser, outer):
return parser.pop()
class TerminatorRule(namedtuple('expr','op precedence')):
def captured_by(self, outer):
#print(outer, self.precedence)
return outer.precedence < self.precedence #(the precedence is higher, the scope is more narrow!)
class Language(object):
""" One big lookup table to save us from things """
def __init__(self):
self.literal_rule = TokenRule('literal', 0)
self.suffix = OrderedDict()
self.prefix = OrderedDict()
self.literals = OrderedDict()
self.operators = set()
self.ignored = set()
self.whitespace = OrderedDict()
self.terminators = set()
self.comments = set()
self._rx = None
self._names = None
def rx(self):
ops = sorted(self.operators, key=len, reverse=True)
ops =[ re.escape(o).replace(' ','\s+') for o in ops]
rx = [
('terminator', "|".join(self.terminators)),
('whitespace', "|".join(self.whitespace.values())),
('operator', "|".join(ops)),
]
for key, value in self.literals.items():
rx.append((key, value))
rx= "|".join("(?P<{}>{})".format(*a) for a in rx)
ignored = "|".join(self.ignored)
rx = r'(?:{})* ({}) (?:{})*'.format(ignored, rx, ignored)
rx = re.compile(rx, re.U + re.X)
#print(rx.pattern)
self._rx = rx
self._names = dict(((v, k) for k,v in rx.groupindex.items()))
def match(self, source, position):
if not self._rx:
self.rx()
match = self._rx.match(source, position.off)
if not match:
return Token('error', 'unknown', position), position
for num, result in enumerate(match.groups()[1:],2):
if result:
name = self._names[num]
#print(position.off, match.start(num), match.end(0))
pos = position.count_lines(source, match.start(num))
next_pos = pos.count_lines(source, match.end(0))
token = Token(name, result, pos)
#print("pos",pos, next_pos)
return token, next_pos
def get_suffix_rule(self, token, outer):
return self.suffix.get(token.text)
def get_prefix_rule(self, token, outer):
if token.name in ("operator","terminator"):
return self.prefix[token.text]
else:
return self.literal_rule
def parse(self, source):
if source:
pos = Position(off=0, line_off=0, line=1,col=1)
lexer = RegexLexer(self.match, source, pos)
filter = token_filter("whitespace")
parser = ParserCursor(self, filter(lexer))
items, parser = parser.parse_stmts()
if parser:
raise SyntaxErr("item {}, left over {} at {}".format(items,source[parser.pos().off:], parser.pos()))
return items
def def_whitespace(self, name, rx):
rx = re.compile(rx, re.U).pattern
self.whitespace[name] = rx
def def_literal(self, name, rx):
rx = re.compile(rx, re.U).pattern
self.literals[name] = rx
def def_ignored(self, name, rx):
rx = re.compile(rx, re.U).pattern
self.ignored[name] = rx
def def_comment(self, name, rx):
rx = re.compile(rx, re.U).pattern
self.comment[name] = rx
def def_keyword(self, name):
self.operators.add(name)
def def_terminator(self, name, rx):
self.terminators.add(rx)
self.prefix[name] = TerminatorRule(name, -1)
def def_block(self, p, start, end):
rule = BlockRule(p, start, end)
self.prefix[rule.op] = rule
self.operators.add(start)
self.operators.add(end)
def def_postfix_block(self, p, start, end):
rule = PostfixBlockRule(p, start, end)
self.suffix[rule.op] = rule
self.operators.add(start)
self.operators.add(end)
def def_postfix(self, p, op):
rule = PostfixRule(p, op)
self.suffix[rule.op] = rule
self.operators.add(rule.op)
def def_prefix(self, p, op):
rule = PrefixRule(p, op)
self.prefix[rule.op] = rule
self.operators.add(rule.op)
def def_infix(self,p,op):
rule = InfixRule(p, op)
self.suffix[rule.op] = rule
self.operators.add(rule.op)
def def_rinfix(self,p,op):
rule = RInfixRule(p, op)
self.suffix[rule.op] = rule
self.operators.add(rule.op)
def bootstrap(self):
self.def_terminator("\n",r"\n")
self.def_terminator(";", r";")
self.def_whitespace("space", r"\s+")
self.def_literal("number",r"\d[\d_]*")
self.def_literal("identifier",r"\w+")
self.def_literal("true", r"true\b")
self.def_literal("false", r"false\b")
self.def_literal("null", r"null\b")
self.def_block(900,'(',')')
self.def_block(900,'{','}')
self.def_block(900,'[',']')
self.def_postfix_block(800,'(',')')
self.def_postfix_block(800,'{','}')
self.def_postfix_block(800,'[',']')
self.def_rinfix(700, '**')
self.def_prefix(600, '+')
self.def_prefix(600, '-')
self.def_prefix(600, '~')
self.def_prefix(600, '!')
self.def_infix(500, '*')
self.def_infix(500, '/')
self.def_infix(500, '//')
self.def_infix(500, '%')
self.def_infix(400, '-')
self.def_infix(400, '+')
self.def_infix(300, '<<')
self.def_infix(300, '>>')
self.def_infix(220, '&')
self.def_infix(210, '^')
self.def_infix(200, '|')
for c in "in,not in,is,is,<,<=,>,>=,<>,!=,==".split(','):
self.def_infix(130, c)
self.def_infix(120, 'not')
self.def_infix(110, 'and')
self.def_infix(100, 'or')
self.def_rinfix(0, '=')
test = """
1 + 2
1 + 2 + 3 + 4 + 5
1 + 2 * 3 + 4
2 ** 3 ** 4
- 2 ** 3 ** 4 * 8
x [ 0 ] * 9
( 1 + 2 ) * 3
1*2+3+x[0][1]{2}
"""
language = Language()
language.bootstrap()
#for t in test.split("\n"):
# print(t)
# print(language.parse(t))
# print()
print(test)
[print(line) for line in language.parse(test)]
print()
| [
"tef@twentygototen.org"
] | tef@twentygototen.org |
035f2485d9238b11a68df3adc4d304e7add9874d | 2687412dd10032667e50e74d9d3f832133bc2536 | /code/disasters/reload_landslide_data.py | 9963d89459014edca49ca7efbc21837e02e92c30 | [
"MIT"
] | permissive | wfp-ose/sparc2-pipeline | 644e040c27517889c84598c34397c06f3d82ca96 | fdd3bd29426d9231956f449cb5e78afd33446a8a | refs/heads/master | 2021-01-17T18:07:58.641768 | 2016-12-02T12:40:54 | 2016-12-02T12:40:54 | 57,199,382 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | from geodash.enumerations import MONTHS_SHORT3
from geodash.data import GeoDashDatabaseConnection
print "Inserting Landslide Data..."
print "..."
print ""
prob_classes = [
{'input': 'low', 'output_text': 'low', "output_int": 1},
{'input': 'medium', 'output_text': 'medium', "output_int": 2},
{'input': 'high', 'output_text': 'high', "output_int": 3},
{'input': 'very_h', 'output_text': 'very_high', "output_int": 4}
]
tpl = None
with open('insert_landslide_data.tpl.sql', 'r') as f:
tpl = f.read()
with GeoDashDatabaseConnection() as geodash_conn:
try:
geodash_conn.exec_update("DELETE FROM landslide.admin2_popatrisk;")
except:
pass
for month in MONTHS_SHORT3:
for prob_class in prob_classes:
# Population at Risk Data
sql = tpl.format(** {
'month': month,
'prob_class_input': prob_class['input'],
'prob_class_output_text': prob_class['output_text'],
'prob_class_output_int': str(prob_class['output_int'])
})
geodash_conn.exec_update(sql)
print "Done Inserting Landslide Data"
| [
"pjdufour.dev@gmail.com"
] | pjdufour.dev@gmail.com |
d893d6bda716d9a47904627e4d218b88be59669f | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pytorch_pytorch/pytorch-master/test/test_sparse.py | 11b51eaf3f1f94a07eaf3d721684547d9a17be77 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 6,617 | py | import torch
from torch import sparse
import itertools
import random
import unittest
from common import TestCase, run_tests
from numbers import Number
SparseTensor = sparse.DoubleTensor
class TestSparse(TestCase):
@staticmethod
def _gen_sparse(d, nnz, with_size):
v = torch.randn(nnz)
if isinstance(with_size, Number):
i = (torch.rand(d, nnz) * with_size).type(torch.LongTensor)
x = SparseTensor(i, v)
else:
i = torch.rand(d, nnz) * \
torch.Tensor(with_size).repeat(nnz, 1).transpose(0, 1)
i = i.type(torch.LongTensor)
x = SparseTensor(i, v, torch.Size(with_size))
return x, i, v
def test_basic(self):
x, i, v = self._gen_sparse(3, 10, 100)
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
x, i, v = self._gen_sparse(3, 10, [100, 100, 100])
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
self.assertEqual(x.ndimension(), 3)
self.assertEqual(x.nnz(), 10)
for i in range(3):
self.assertEqual(x.size(i), 100)
# Make sure we can access empty indices / values
x = SparseTensor()
self.assertEqual(x.indices().numel(), 0)
self.assertEqual(x.values().numel(), 0)
def test_to_dense(self):
i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
v = torch.Tensor([2, 1, 3, 4])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
res = torch.Tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
])
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
def test_contig(self):
i = torch.LongTensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
])
v = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x = SparseTensor(i, v, torch.Size([100, 100]))
exp_i = torch.LongTensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
])
exp_v = torch.Tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
i = torch.LongTensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
exp_v = torch.Tensor([2, 1, 3, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
# Duplicate indices
i = torch.LongTensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 2],
[0, 3],
[0, 4],
])
exp_v = torch.Tensor([6, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
def test_transpose(self):
x = self._gen_sparse(4, 20, 5)[0]
y = x.to_dense()
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
def test_mm(self):
def test_shape(di, dj, dk):
x, _, _ = self._gen_sparse(2, 20, [di, dj])
t = torch.randn(di, dk)
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t, beta, x.to_dense(), y)
res = torch.addmm(alpha, t, beta, x, y)
self.assertEqual(res, expected)
expected = torch.addmm(t, x.to_dense(), y)
res = torch.addmm(t, x, y)
self.assertEqual(res, expected)
expected = torch.mm(x.to_dense(), y)
res = torch.mm(x, y)
self.assertEqual(res, expected)
test_shape(10, 100, 100)
test_shape(100, 1000, 200)
test_shape(64, 10000, 300)
def test_saddmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
t = self._gen_sparse(2, 20, [di, dk])[0]
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t.to_dense(), beta, x.to_dense(), y)
res = torch.saddmm(alpha, t, beta, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.addmm(t.to_dense(), x.to_dense(), y)
res = torch.saddmm(t, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.mm(x.to_dense(), y)
res = torch.smm(x, y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3)
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)
def test_spadd(self):
def test_shape(*shape):
x, _, _ = self._gen_sparse(len(shape), 10, shape)
y = torch.randn(*shape)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s).transpose_(0, len(s) - 1)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
test_shape(5, 6)
test_shape(10, 10, 10)
test_shape(50, 30, 20)
test_shape(5, 5, 5, 5, 5, 5)
if __name__ == '__main__':
run_tests()
| [
"659338505@qq.com"
] | 659338505@qq.com |
9f4d7a3e21d712a9f322de4e2a8fdacc5bce00d0 | f3ad19f5426baccd9ed1704e4552795e0c3e2f76 | /df_goods/models.py | ebb84d275af140a1b9ff10139fcaa6781326e965 | [] | no_license | zhbowei/dailyfresh | 0195bab188c95ce43cd3c3c097215b62cb4164c8 | 738b689c6494140a3900bc297f7666f427fc7253 | refs/heads/master | 2020-03-23T15:45:12.652039 | 2018-07-25T04:50:44 | 2018-07-25T04:50:44 | 141,772,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from django.db import models
from tinymce.models import HTMLField
# Create your models here.
class TypeInfo(models.Model):
title = models.CharField(max_length=20)
isDelete = models.BooleanField(default=False)
def __str__(self):
return self.title
class Meta:
verbose_name = '分类信息'
verbose_name_plural = '分类信息'
class GoodsInfo(models.Model):
gtitle = models.CharField(max_length=20)
gpic = models.ImageField(upload_to='df_goods')
gprice = models.DecimalField(max_digits=5,decimal_places=2)
isDelete = models.BooleanField(default=False)
gunit = models.CharField(max_length=20,default='500g')
gclick = models.IntegerField()
gjianjie = models.CharField(max_length=200)
gkucun = models.IntegerField()
gcontext = HTMLField()
gtype = models.ForeignKey(TypeInfo,on_delete=models.CASCADE)
class Meta:
verbose_name = '商品信息'
verbose_name_plural = '商品信息'
| [
"770516093@qq.com"
] | 770516093@qq.com |
c1c1a1b4bcf10a914af1f1fbd3c74728f03b7356 | 5d79ca9f873645a9a78636549bda2d111d07d856 | /diff_classifier/knotlets.py | 176ef34ec244a67e743721ff2a5846439280c541 | [
"MIT"
] | permissive | hugopontess/diff_classifier | a8474b551cb67fd4765d26a52395f1fdd3eca538 | 9d093a2436edb838edc8094e7e4b70bcd34c6eeb | refs/heads/master | 2020-04-25T19:01:09.492732 | 2019-02-27T16:45:22 | 2019-02-27T16:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,866 | py | '''Functions to submit tracking jobs to AWS Batch with Cloudknot
This is a set of custom functions for use with Cloutknot for parallelized
multi-particle tracking workflows. These can also be used as template if
users want to build their own parallelized workflows. See Cloudknot
documentation at https://richford.github.io/cloudknot/documentation.html
for more information.
The base set of functions is split, tracking, and assemble_msds. The split
function splits large images into smaller images that are manageable for a
single EC2 instance. The tracking function tracks nanoparticle trajectories in
a single sub-image from the split function. The assemble_msds function operates
on all sub-image trajectory csv files from the tracking function, calculates
MSDs and features and assembles them into a single msd csv file and a single
features csv file. The workflow looks something like this:
|-track---|
|-track---|
(image) -split----| |--assemble_msds-----> (msd/feature files)
|-track---|
|-track---|
'''
def split(prefix, remote_folder, bucket,
rows=4, cols=4, ores=(2048, 2048), ires=(512, 512)):
'''Splits input image file into smaller images.
A function based on imagej.partition_im that download images from an S3
bucket, splits it into smaller images, and uploads these to S3. Designed to
work with Cloudknot for parallelizable workflows. Typically, this function
is used in conjunction with kn.tracking and kn.assemble_msds for a complete
analysis.
Parameters
----------
prefix : string
Prefix (everything except file extension and folder name) of image file
to be tracked. Must be available on S3.
remote_folder : string
Folder name where file is contained on S3 in the bucket specified by
'bucket'.
bucket : string
S3 bucket where file is contained.
rows : int
Number of rows to split image into.
cols : int
Number of columns to split image into.
ores : tuple of int
Original resolution of input image.
ires : tuple of int
Resolution of split images. Really just a sanity check to make sure you
correctly splitting.
'''
import os
import boto3
import diff_classifier.aws as aws
import diff_classifier.imagej as ij
local_folder = os.getcwd()
filename = '{}.tif'.format(prefix)
remote_name = remote_folder+'/'+filename
local_name = local_folder+'/'+filename
msd_file = 'msd_{}.csv'.format(prefix)
ft_file = 'features_{}.csv'.format(prefix)
aws.download_s3(remote_name, local_name, bucket_name=bucket)
s3 = boto3.client('s3')
# Splitting section
names = ij.partition_im(local_name, irows=rows, icols=cols,
ores=ores, ires=ires)
# Names of subfiles
# names = []
# for i in range(0, 4):
# for j in range(0, 4):
# names.append('{}_{}_{}.tif'.format(prefix, i, j))
for name in names:
aws.upload_s3(name, remote_folder+'/'+name, bucket_name=bucket)
os.remove(name)
print("Done with splitting. Should output file of name {}".format(
remote_folder+'/'+name))
os.remove(filename)
def tracking(subprefix, remote_folder, bucket, tparams,
regress_f='regress.obj', rows=4, cols=4, ires=(512, 512)):
'''Tracks particles in input image using Trackmate.
A function based on imagej.track that downloads the image from S3, tracks
particles using Trackmate, and uploads the resulting trajectory file to S3.
Designed to work with Cloudknot for parallelizable workflows. Typically,
this function is used in conjunction with kn.split and kn.assemble_msds for
a complete analysis.
Parameters
----------
subprefix : string
Prefix (everything except file extension and folder name) of image file
to be tracked. Must be available on S3.
remote_folder : string
Folder name where file is contained on S3 in the bucket specified by
'bucket'.
bucket : string
S3 bucket where file is contained.
regress_f : string
Name of regress object used to predict quality parameter.
rows : int
Number of rows to split image into.
cols : int
Number of columns to split image into.
ires : tuple of int
Resolution of split images. Really just a sanity check to make sure you
correctly splitting.
tparams : dict
Dictionary containing tracking parameters to Trackmate analysis.
'''
import os
import os.path as op
import boto3
from sklearn.externals import joblib
import diff_classifier.aws as aws
import diff_classifier.utils as ut
import diff_classifier.msd as msd
import diff_classifier.features as ft
import diff_classifier.imagej as ij
local_folder = os.getcwd()
filename = '{}.tif'.format(subprefix)
remote_name = remote_folder+'/'+filename
local_name = local_folder+'/'+filename
outfile = 'Traj_' + subprefix + '.csv'
local_im = op.join(local_folder, '{}.tif'.format(subprefix))
row = int(subprefix.split('_')[-2])
col = int(subprefix.split('_')[-1])
aws.download_s3(remote_folder+'/'+regress_f, regress_f, bucket_name=bucket)
with open(regress_f, 'rb') as fp:
regress = joblib.load(fp)
s3 = boto3.client('s3')
aws.download_s3('{}/{}'.format(remote_folder,
'{}.tif'.format(subprefix)),
local_im, bucket_name=bucket)
tparams['quality'] = ij.regress_tracking_params(regress, subprefix,
regmethod='PassiveAggressiveRegressor')
if row == rows-1:
tparams['ydims'] = (tparams['ydims'][0], ires[1] - 27)
ij.track(local_im, outfile, template=None, fiji_bin=None,
tparams=tparams)
aws.upload_s3(outfile, remote_folder+'/'+outfile, bucket_name=bucket)
print("Done with tracking. Should output file of name {}".format(
remote_folder+'/'+outfile))
def assemble_msds(prefix, remote_folder, bucket,
ires=(512, 512), frames=651):
'''Calculates MSDs and features from input trajectory files
A function based on msd.all_msds2 and features.calculate_features, creates
msd and feature csv files from input trajectory files and uploads to S3.
Designed to work with Cloudknot for parallelizable workflows. Typically,
this function is used in conjunction with kn.split and kn.tracking for an
entire workflow.
prefix : string
Prefix (everything except file extension and folder name) of image file
to be tracked. Must be available on S3.
remote_folder : string
Folder name where file is contained on S3 in the bucket specified by
'bucket'.
bucket : string
S3 bucket where file is contained.
ires : tuple of int
Resolution of split images. Really just a sanity check to make sure you
correctly splitting.
frames : int
Number of frames in input videos.
'''
import os
import boto3
import diff_classifier.aws as aws
import diff_classifier.msd as msd
import diff_classifier.features as ft
import diff_classifier.utils as ut
filename = '{}.tif'.format(prefix)
remote_name = remote_folder+'/'+filename
msd_file = 'msd_{}.csv'.format(prefix)
ft_file = 'features_{}.csv'.format(prefix)
s3 = boto3.client('s3')
# names = []
# for i in range(0, 4):
# for j in range(0, 4):
# names.append('{}_{}_{}.tif'.format(prefix, i, j))
all_objects = s3.list_objects(Bucket=bucket,
Prefix='{}/{}_'.format(remote_folder,
prefix))
names = []
for entry in all_objects['Contents']:
name = entry['Key'].split('/')[1]
names.append(name)
row = int(name.split(prefix)[1].split('.')[0].split('_')[1])
col = int(name.split(prefix)[1].split('.')[0].split('_')[2])
if row > rows:
rows = row
if col > cols:
cols = col
rows = rows + 1
cols = cols + 1
counter = 0
for name in names:
row = int(name.split(prefix)[1].split('.')[0].split('_')[1])
col = int(name.split(prefix)[1].split('.')[0].split('_')[2])
filename = "Traj_{}_{}_{}.csv".format(prefix, row, col)
aws.download_s3(remote_folder+'/'+filename, filename,
bucket_name=bucket)
local_name = filename
if counter == 0:
to_add = ut.csv_to_pd(local_name)
to_add['X'] = to_add['X'] + ires[0]*col
to_add['Y'] = ires[1] - to_add['Y'] + ires[1]*(rows-1-row)
merged = msd.all_msds2(to_add, frames=frames)
else:
if merged.shape[0] > 0:
to_add = ut.csv_to_pd(local_name)
to_add['X'] = to_add['X'] + ires[0]*col
to_add['Y'] = ires[1] - to_add['Y'] + ires[1]*(rows-1-row)
to_add['Track_ID'] = to_add['Track_ID'
] + max(merged['Track_ID']) + 1
else:
to_add = ut.csv_to_pd(local_name)
to_add['X'] = to_add['X'] + ires[0]*col
to_add['Y'] = ires[1] - to_add['Y'] + ires[1]*(rows-1-row)
to_add['Track_ID'] = to_add['Track_ID']
merged = merged.append(msd.all_msds2(to_add, frames=frames))
print('Done calculating MSDs for row {} and col {}'.format(row,
col))
counter = counter + 1
merged.to_csv(msd_file)
aws.upload_s3(msd_file, remote_folder+'/'+msd_file, bucket_name=bucket)
merged_ft = ft.calculate_features(merged)
merged_ft.to_csv(ft_file)
aws.upload_s3(ft_file, remote_folder+'/'+ft_file, bucket_name=bucket)
os.remove(ft_file)
os.remove(msd_file)
for name in names:
outfile = 'Traj_' + name.split('.')[0] + '.csv'
os.remove(outfile)
def split_track_msds(prefix, remote_folder, bucket, tparams,
rows=4, cols=4, ores=(2048, 2048), ires=(512, 512),
to_split=False, regress_f='regress.obj', frames=651):
'''Splits images, track particles, and calculates MSDs
A composite function designed to work with Cloudknot to split images,
track particles, and calculate MSDs.
Parameters
----------
prefix : string
Prefix (everything except file extension and folder name) of image file
to be tracked. Must be available on S3.
remote_folder : string
Folder name where file is contained on S3 in the bucket specified by
'bucket'.
bucket : string
S3 bucket where file is contained.
rows : int
Number of rows to split image into.
cols : int
Number of columns to split image into.
ores : tuple of int
Original resolution of input image.
ires : tuple of int
Resolution of split images. Really just a sanity check to make sure you
correctly splitting.
to_split : bool
If True, will perform image splitting.
regress_f : string
Name of regress object used to predict quality parameter.
frames : int
Number of frames in input videos.
tparams : dict
Dictionary containing tracking parameters to Trackmate analysis.
'''
if to_split:
split(prefix=prefix, remote_folder=remote_folder, bucket=bucket,
rows=rows, cols=cols, ores=ores, ires=ires)
pref = []
for row in range(0, rows):
for col in range(0, cols):
pref.append("{}_{}_{}".format(prefix, row, col))
for subprefix in pref:
tracking(subprefix=subprefix, remote_folder=remote_folder, bucket=bucket,
regress_f=regress_f, rows=rows, cols=cols, ires=ires,
tparams=tparams)
assemble_msds(prefix=prefix, remote_folder=remote_folder, bucket=bucket,
ires=ires, frames=frames)
# def sensitivity_it(counter):
# '''Performs sensitivity analysis on single input image
#
# An example function (not designed for re-use) of a sensitivity analysis that
# demonstrates the impact of input tracking parameters on output MSDs and
# features.
#
# '''
#
# import matplotlib as mpl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
# import diff_classifier.aws as aws
# import diff_classifier.utils as ut
# import diff_classifier.msd as msd
# import diff_classifier.features as ft
# import diff_classifier.imagej as ij
# import diff_classifier.heatmaps as hm
#
# from scipy.spatial import Voronoi
# import scipy.stats as stats
# from shapely.geometry import Point
# from shapely.geometry.polygon import Polygon
# import matplotlib.cm as cm
# import os
# import os.path as op
# import numpy as np
# import numpy.ma as ma
# import pandas as pd
# import boto3
# import itertools
#
# # Sweep parameters
# # ----------------------------------
# radius = [4.5, 6.0, 7.0]
# do_median_filtering = [True, False]
# quality = [1.5, 4.5, 8.5]
# linking_max_distance = [6.0, 10.0, 15.0]
# gap_closing_max_distance = [6.0, 10.0, 15.0]
# max_frame_gap = [1, 2, 5]
# track_displacement = [0.0, 10.0, 20.0]
#
# sweep = [radius, do_median_filtering, quality, linking_max_distance,
# gap_closing_max_distance, max_frame_gap, track_displacement]
# all_params = list(itertools.product(*sweep))
#
# # Variable prep
# # ----------------------------------
# s3 = boto3.client('s3')
#
# folder = '01_18_Experiment'
# s_folder = '{}/sensitivity'.format(folder)
# local_folder = '.'
# prefix = "P1_S1_R_0001_2_2"
# name = "{}.tif".format(prefix)
# local_im = op.join(local_folder, name)
# aws.download_s3('{}/{}/{}.tif'.format(folder, prefix.split('_')[0], prefix),
# '{}.tif'.format(prefix))
#
# outputs = np.zeros((len(all_params), len(all_params[0])+2))
#
# # Tracking and calculations
# # ------------------------------------
# params = all_params[counter]
# outfile = 'Traj_{}_{}.csv'.format(name.split('.')[0], counter)
# msd_file = 'msd_{}_{}.csv'.format(name.split('.')[0], counter)
# geo_file = 'geomean_{}_{}.csv'.format(name.split('.')[0], counter)
# geoS_file = 'geoSEM_{}_{}.csv'.format(name.split('.')[0], counter)
# msd_image = 'msds_{}_{}.png'.format(name.split('.')[0], counter)
# iter_name = "{}_{}".format(prefix, counter)
#
# ij.track(local_im, outfile, template=None, fiji_bin=None, radius=params[0], threshold=0.,
# do_median_filtering=params[1], quality=params[2], x=511, y=511, ylo=1, median_intensity=300.0, snr=0.0,
# linking_max_distance=params[3], gap_closing_max_distance=params[4], max_frame_gap=params[5],
# track_displacement=params[6])
#
# traj = ut.csv_to_pd(outfile)
# msds = msd.all_msds2(traj, frames=651)
# msds.to_csv(msd_file)
# gmean1, gSEM1 = hm.plot_individual_msds(iter_name, alpha=0.05)
# np.savetxt(geo_file, gmean1, delimiter=",")
# np.savetxt(geoS_file, gSEM1, delimiter=",")
#
# aws.upload_s3(outfile, '{}/{}'.format(s_folder, outfile))
# aws.upload_s3(msd_file, '{}/{}'.format(s_folder, msd_file))
# aws.upload_s3(geo_file, '{}/{}'.format(s_folder, geo_file))
# aws.upload_s3(geoS_file, '{}/{}'.format(s_folder, geoS_file))
# aws.upload_s3(msd_image, '{}/{}'.format(s_folder, msd_image))
#
# print('Successful parameter calculations for {}'.format(iter_name))
| [
"ccurtis7@uw.edu"
] | ccurtis7@uw.edu |
dfa52f8f4a5c08260ca6f9c4014300383b6ab5f7 | dd9571236f35807e130bb987b4f1f5f0b2676efb | /users/admin_user_api.py | 41fce13a4ea094ff16f8ec70ab22cde148d74c67 | [] | no_license | sekhorroy/bysterdjango | 58337e6ac1191ae945fcbd2ec1c47229e598a570 | fd016bcc3414875cd874a3c69733722815a84e05 | refs/heads/master | 2022-12-13T12:49:04.802319 | 2020-09-06T06:28:50 | 2020-09-06T06:28:50 | 292,861,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | from rest_framework.exceptions import ValidationError
from rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from users.admin_serializer import AdminUserSerializer, AdminLoginSerializer
from users.models import MtAdminUser as Admin
class UserPagination(LimitOffsetPagination):
default_limit = 10
max_limit = 100
class CreateAdminUser(CreateAPIView):
# Allow authenticate users to hit this endpoint
permission_classes = (IsAuthenticated, )
serializer_class = AdminUserSerializer
def post(self, request):
#restore those native datatypes into a dictionary of validated data.
serializers = self.serializer_class(data=request.data)
#checks if the data is as per serializer fields otherwise throws an exception.
serializers.is_valid(raise_exception=True)
serializers.save()
status_code = status.HTTP_201_CREATED
response = {
'success' : 'True',
'statuc code' : status_code,
'message' : 'User registered successfully'
}
return Response(response, status=status_code)
class AdminLogin(RetrieveUpdateDestroyAPIView):
permission_classes = (AllowAny, )
serializer_class = AdminLoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success' : 'True',
'status_code' : status.HTTP_200_OK,
'firstname' : serializer.data['first_name'],
'lastname' : serializer.data['last_name'],
'email' : serializer.data['email'],
'token' : serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserListView(ListAPIView):
permission_classes=(IsAuthenticated, )
queryset = Admin.objects.all()
serializer_class = AdminUserSerializer
| [
"apple@Apples-MacBook-Pro.local"
] | apple@Apples-MacBook-Pro.local |
6232ebe6eff361b09c7c59f0adc47853d7dff360 | 5fade7b0061d0e1210c34eae7c5061efeb7e4723 | /cscWebsite/news_events/views.py | 516dc7cf1cb8828f8d3e22113efc7ce15df65987 | [] | no_license | Mbobby/ComputerScience-MathematicsWebsite | ed9e02a699452f57b8185b300bbe73f0ac47f3b3 | 13bff4669fa9172cd2a3527b3f1dcea864446c36 | refs/heads/master | 2021-01-22T17:40:05.149451 | 2017-04-25T16:14:15 | 2017-04-25T16:14:15 | 85,029,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | from django.shortcuts import render
from models import Events, News
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def index(request):
events = Events.objects.order_by('event_date')[:3]
news = News.objects.order_by('-pub_date')[:3]
context = {"news": news, "events": events}
return render(request, "news_events/index.html", context)
def events_index(request):
events = Events.objects.order_by('event_date')
paginator = Paginator(events, 10) # Show 10 events per page
page = request.GET.get("page")
try:
events = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
events = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
events = paginator.page(paginator.num_pages)
context = {'events': events}
return render(request, "news_events/events_index.html", context)
def events_detail(request, event_id):
try:
event = Events.objects.get(id=event_id)
except Events.DoesNotExist:
raise Http404("The event does not exist")
context = {"event": event}
return render(request, "news_events/events_detail.html", context)
def news_index(request):
news = News.objects.order_by('-pub_date')
paginator = Paginator(news, 10) # Show 10 news per page
page = request.GET.get("page")
try:
news = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
news = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
news = paginator.page(paginator.num_pages)
context = {'news': news}
return render(request, "news_events/news_index.html", context)
def news_detail(request, news_id):
try:
news = News.objects.get(id=news_id)
except News.DoesNotExist:
raise Http404("The news does not exist")
context = {"news": news}
return render(request, "news_events/news_detail.html", context) | [
"mongemmanuel@gmail.com"
] | mongemmanuel@gmail.com |
fd89b6b80d768122ff62323673921e21f05b7c1b | 8f6224be647a7b3c46ff337c0c4c7d6162b52b63 | /2017/Round 1C/baby.py | a99dd144cb76093df3296017b8ce4ca324759066 | [] | no_license | samMeow/googleCodeJam | 6948728416e4480da28dcbbe54c28dc12756d8bc | 18917c0a3f5f0542c76a7c451f42688319606bfc | refs/heads/master | 2022-06-14T05:44:03.130884 | 2022-06-04T03:04:56 | 2022-06-04T03:04:56 | 87,523,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | import sys
from itertools import repeat
from functools import reduce
import math
if len(sys.argv) < 3:
print("Usage `python3 base input_file output_file`")
sys.exit(1)
def readFile(filename):
data = []
ac_datum = []
aj_datum = []
with open(filename) as f:
line = f.readline()
ac_datum_line = 0
aj_datum_line = 0
need = 0
for datum in f:
if ac_datum_line == 0 and aj_datum_line == 0:
if ac_datum or aj_datum: data.append([ac_datum, aj_datum])
[ac_datum_line, aj_datum_line] = [int(x)for x in list(datum.split(' '))]
ac_datum = []
aj_datum = []
elif ac_datum_line != 0:
ac_datum.append(datum.strip('\n'))
ac_datum_line -= 1
else:
aj_datum.append(datum.strip('\n'))
aj_datum_line -= 1
if ac_datum or aj_datum: data.append([ac_datum, aj_datum])
f.close()
return data
def processDatum(datum):
[ac, aj] = datum
ac_time = [ [int(x) for x in time.split(' ')]for time in ac]
aj_time = [ [int(x) for x in time.split(' ')]for time in aj]
ac_time = sorted(ac_time, key=lambda x: x[0])
aj_time = sorted(aj_time, key=lambda x: x[0])
ac_remain = 720 - sum([ time[1] - time[0] for time in aj_time])
aj_remain = 720 - sum([ time[1] - time[0] for time in ac_time])
print(ac_remain, aj_remain)
merge_time = [ ['j', time] for time in ac_time ] + [ ['c', time] for time in aj_time ]
merge_time = sorted(merge_time, key=lambda x: x[1])
print(merge_time)
return datum
def outputFormat(case, result):
return 'Case #{0}: {1}\n'.format(case + 1, result)
def writeFile(filename, data):
fout = open(filename, "w")
for idx, val in enumerate(data):
fout.write(outputFormat(idx, val))
fout.close()
def main():
data = readFile(sys.argv[1]);
writeFile(sys.argv[2], list(map(processDatum, data)))
if __name__ == "__main__":
main();
| [
"kwokyinlun@gmail.com"
] | kwokyinlun@gmail.com |
2e71f64795ff3a59b5427e3fc329aaf19605ff01 | 9037e63a73d0f2e9f239351472d856572a6992c3 | /mean_teacher/architectures.py | 5471de49836c5638e40ebb5dc9d20e37fa410e24 | [] | no_license | huhengtong/one-bit-supervision | cb84c8c063b5d4defd43e237dba27392c7dcc4aa | 7e74114af6e8b887b54df50c3829c2ce7a0627e2 | refs/heads/main | 2023-05-02T00:49:10.232444 | 2021-05-21T03:14:56 | 2021-05-21T03:14:56 | 303,673,704 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,007 | py | import sys
import math
import itertools
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable, Function
from .utils import export, parameter_count
@export
def cifar_shakeshake26(pretrained=False, **kwargs):
assert not pretrained
model = ResNet32x32(ShakeShakeBlock,
layers=[4, 4, 4],
channels=96,
downsample='shift_conv', **kwargs)
return model
@export
def resnext152(pretrained=False, **kwargs):
assert not pretrained
model = ResNet224x224(BottleneckBlock,
layers=[3, 8, 36, 3],
channels=32 * 4,
groups=32,
downsample='basic', **kwargs)
return model
class ResNet224x224(nn.Module):
def __init__(self, block, layers, channels, groups=1, num_classes=1000, downsample='basic'):
super().__init__()
assert len(layers) == 4
self.downsample_mode = downsample
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, channels, groups, layers[0])
self.layer2 = self._make_layer(
block, channels * 2, groups, layers[1], stride=2)
self.layer3 = self._make_layer(
block, channels * 4, groups, layers[2], stride=2)
self.layer4 = self._make_layer(
block, channels * 8, groups, layers[3], stride=2)
#self.avgpool = nn.AvgPool2d(7)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(block.out_channels(
channels * 8, groups), num_classes)
self.fc2 = nn.Linear(block.out_channels(
channels * 8, groups), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, groups, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != block.out_channels(planes, groups):
if self.downsample_mode == 'basic' or stride == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, block.out_channels(planes, groups),
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.out_channels(planes, groups)),
)
elif self.downsample_mode == 'shift_conv':
downsample = ShiftConvDownsample(in_channels=self.inplanes,
out_channels=block.out_channels(planes, groups))
else:
assert False
layers = []
layers.append(block(self.inplanes, planes, groups, stride, downsample))
self.inplanes = block.out_channels(planes, groups)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.fc1(x), self.fc2(x)
class ResNet32x32(nn.Module):
def __init__(self, block, layers, channels, groups=1, num_classes=1000, downsample='basic'):
super().__init__()
assert len(layers) == 3
self.downsample_mode = downsample
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1,
padding=1, bias=False)
self.layer1 = self._make_layer(block, channels, groups, layers[0])
self.layer2 = self._make_layer(
block, channels * 2, groups, layers[1], stride=2)
self.layer3 = self._make_layer(
block, channels * 4, groups, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8)
#print(channels * 4, groups)
self.fc1 = nn.Linear(block.out_channels(
channels * 4, groups), num_classes)
self.fc2 = nn.Linear(block.out_channels(
channels * 4, groups), num_classes)
# self.fc1 = nn.Linear(channels * 4 * 4, num_classes)
# self.fc2 = nn.Linear(channels * 4 * 4, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, groups, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != block.out_channels(planes, groups):
if self.downsample_mode == 'basic' or stride == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, block.out_channels(planes, groups),
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.out_channels(planes, groups)),
)
elif self.downsample_mode == 'shift_conv':
downsample = ShiftConvDownsample(in_channels=self.inplanes,
out_channels=block.out_channels(planes, groups))
else:
assert False
layers = []
layers.append(block(self.inplanes, planes, groups, stride, downsample))
self.inplanes = block.out_channels(planes, groups)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.fc1(x), self.fc2(x), x
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BottleneckBlock(nn.Module):
@classmethod
def out_channels(cls, planes, groups):
if groups > 1:
return 2 * planes
else:
return 4 * planes
def __init__(self, inplanes, planes, groups, stride=1, downsample=None):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.conv_a1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn_a1 = nn.BatchNorm2d(planes)
self.conv_a2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, groups=groups)
self.bn_a2 = nn.BatchNorm2d(planes)
self.conv_a3 = nn.Conv2d(planes, self.out_channels(
planes, groups), kernel_size=1, bias=False)
self.bn_a3 = nn.BatchNorm2d(self.out_channels(planes, groups))
self.downsample = downsample
self.stride = stride
def forward(self, x):
a, residual = x, x
a = self.conv_a1(a)
a = self.bn_a1(a)
a = self.relu(a)
a = self.conv_a2(a)
a = self.bn_a2(a)
a = self.relu(a)
a = self.conv_a3(a)
a = self.bn_a3(a)
if self.downsample is not None:
residual = self.downsample(residual)
return self.relu(residual + a)
class ShakeShakeBlock(nn.Module):
@classmethod
def out_channels(cls, planes, groups):
assert groups == 1
return planes
def __init__(self, inplanes, planes, groups, stride=1, downsample=None):
super().__init__()
assert groups == 1
self.conv_a1 = conv3x3(inplanes, planes, stride)
self.bn_a1 = nn.BatchNorm2d(planes)
self.conv_a2 = conv3x3(planes, planes)
self.bn_a2 = nn.BatchNorm2d(planes)
self.conv_b1 = conv3x3(inplanes, planes, stride)
self.bn_b1 = nn.BatchNorm2d(planes)
self.conv_b2 = conv3x3(planes, planes)
self.bn_b2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
a, b, residual = x, x, x
a = F.relu(a, inplace=False)
a = self.conv_a1(a)
a = self.bn_a1(a)
a = F.relu(a, inplace=True)
a = self.conv_a2(a)
a = self.bn_a2(a)
b = F.relu(b, inplace=False)
b = self.conv_b1(b)
b = self.bn_b1(b)
b = F.relu(b, inplace=True)
b = self.conv_b2(b)
b = self.bn_b2(b)
ab = shake(a, b, training=self.training)
if self.downsample is not None:
residual = self.downsample(x)
return residual + ab
class Shake(Function):
@classmethod
def forward(cls, ctx, inp1, inp2, training):
assert inp1.size() == inp2.size()
gate_size = [inp1.size()[0], *itertools.repeat(1, inp1.dim() - 1)]
gate = inp1.new(*gate_size)
if training:
gate.uniform_(0, 1)
else:
gate.fill_(0.5)
return inp1 * gate + inp2 * (1. - gate)
@classmethod
def backward(cls, ctx, grad_output):
grad_inp1 = grad_inp2 = grad_training = None
gate_size = [grad_output.size()[0], *itertools.repeat(1,
grad_output.dim() - 1)]
gate = Variable(grad_output.data.new(*gate_size).uniform_(0, 1))
if ctx.needs_input_grad[0]:
grad_inp1 = grad_output * gate
if ctx.needs_input_grad[1]:
grad_inp2 = grad_output * (1 - gate)
assert not ctx.needs_input_grad[2]
return grad_inp1, grad_inp2, grad_training
def shake(inp1, inp2, training=False):
return Shake.apply(inp1, inp2, training)
class ShiftConvDownsample(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(in_channels=2 * in_channels,
out_channels=out_channels,
kernel_size=1,
groups=2)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = torch.cat((x[:, :, 0::2, 0::2],
x[:, :, 1::2, 1::2]), dim=1)
x = self.relu(x)
x = self.conv(x)
x = self.bn(x)
return x
| [
"noreply@github.com"
] | noreply@github.com |
ee7c3478d0e0dfeaa6a0a7d49dbdf9812ce0fca6 | 5f676f009c06bcaad52bc97f1895eb1f55cb2133 | /Py-Solutions/Problem-14.py | 4f9beb28ca31af99080c8cf599630c5463aad0f0 | [] | no_license | MikeCalabro/euler-everywhere | b9841cacf3db00347d38add01d857a94c4d0a63e | efaaa317b4cc4326778531da28156c7ef3d497b8 | refs/heads/main | 2023-02-21T16:51:08.520838 | 2021-01-20T20:01:24 | 2021-01-20T20:01:24 | 323,172,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # Which starting number, under one million, produces the longest Collatz chain?
def longestCollatz(limit):
max_len = 0
max_starter = 0
for n in range(3,limit):
starter = n
chain_len = 1
while n > 1:
if n % 2 == 0:
n = n/2
else:
n = 3*n+1
chain_len += 1
if chain_len > max_len:
max_starter = starter
max_len = chain_len
return([max_starter, max_len])
def main():
print(longestCollatz(1000000))
if __name__ == '__main__':
main()
| [
"michael_calabro@college.harvard.edu"
] | michael_calabro@college.harvard.edu |
fa2b8e4b4b31c5976cfa0868ac3565ea5a72106b | d0da63b99ae1accdbb4167e5e70590656599bd86 | /New folder/ping_pong.py | a8b346d057d21c93c27f329cfc47d20543a657e9 | [] | no_license | chaudharykapil/ping_pong_game | 775b13913677ceb051743b42c2f865574af59b0b | 07e3f02bddba618fcbaa000d1295e39b0fda3a4b | refs/heads/master | 2023-04-12T13:25:08.753125 | 2021-04-30T19:34:01 | 2021-04-30T19:34:01 | 300,169,065 | 1 | 0 | null | 2020-10-01T06:20:14 | 2020-10-01T06:20:14 | null | UTF-8 | Python | false | false | 4,536 | py | import turtle
from pygame import mixer
import random
dx = 0
dy = 0
chng_pan = 0
usr_1_scor = 0
usr_2_scor = 0
pause = False
mixer.init()
mixer.music.load('hit.wav')
def moveBall():
global dx,dy,usr_1_scor,usr_2_scor,pause,chng_pan
lim_x,lim_y = (500/2)+30,210
curr_x,curr_y = ball.pos()
if curr_x >= lim_x:
usr_1_scor += 1
lft_scr_tur.clear()
lft_scr_tur.write((str(usr_1_scor)),font=('normal',20))
left_pan.sety(0)
right_pan.sety(0)
curr_x,curr_y = 0,0
dy,dx = 0,0
if curr_x <= -lim_x:
usr_2_scor += 1
rght_scr_tur.clear()
rght_scr_tur.write((str(usr_2_scor)),font=('normal',20))
right_pan.sety(0)
left_pan.sety(0)
curr_x,curr_y = 0,0
dy,dx = 0,0
if curr_y >= lim_y or curr_y <= -lim_y:
dy*=-1
if pause or (dy == 0 and dx == 0):
ball.setpos(curr_x,curr_y)
chng_pan = 0
else:
chng_pan = 15
ball.setpos(curr_x+dx,curr_y+dy)
def move_lft(direc):
'''
1 ->positive direction/upward
-1 ->negative direction/downward
'''
global chng_pan
curr_y = left_pan.ycor()
curr_y = curr_y + (direc*chng_pan)
if curr_y >=210-40 and not(curr_y == 209-40 and direc == 1):
curr_y=209-40
if curr_y <=-210+40 and not(curr_y == -209+40 and direc == -1):
curr_y=-209+40
left_pan.sety(curr_y)
def move_rght(direc):
'''
1 ->positive direction/upward
-1 ->negative direction/downward
'''
global chng_pan
curr_y = right_pan.ycor()
curr_y = curr_y + (direc*chng_pan)
if curr_y >=210-40 and not(curr_y ==209-40 and direc == 1):
curr_y=209-40
if curr_y <=-210+40 and not(curr_y == -209+40 and direc == -1):
curr_y=-209+40
right_pan.sety(curr_y)
def collision():
global dx,dy
ball_x,ball_y = ball.pos()
left_x,left_y = left_pan.pos()
right_x,right_y = right_pan.pos()
if ((ball_x <= left_x + 2.5 and ball_y >left_y-40) and (ball_x <= left_x - 2.5 and ball_y < left_y+40)):
mixer.music.play()
dx = float(random.randint(8,15)/10)
dy = 1.5-dx
if ((ball_x >= right_x - 2.5 and ball_y >right_y-40) and (ball_x >= right_x + 2.5 and ball_y < right_y+40)):
mixer.music.play()
dx = float(random.randint(8,15)/10)
dy = 1.5-dx
dx *=-1
def start_game():
global dx,dy
if dx == 0 and dy == 0:
dx = float(random.randint(8,15)/10)
dy = 1.5-dx
def pause_func():
global pause
pause = True
def cntinu():
global pause
pause = False
'''
Main window
'''
scr = turtle.Screen()
scr.bgcolor('black')
scr.title('ping pong')
scr.setup(550,550)
scr.tracer(0)
ball = turtle.Turtle('circle')
ball.color('white')
ball.shapesize(0.7,0.7)
ball.penup()
ball.speed(0)
lft_scr_tur = turtle.Turtle('blank')
lft_scr_tur.setpos(-50,230)
lft_scr_tur.color('white')
lft_scr_tur.write((str(usr_1_scor)),font=('normal',20))
rght_scr_tur = turtle.Turtle('blank')
rght_scr_tur.setpos(50,230)
rght_scr_tur.color('white')
rght_scr_tur.write((str(usr_2_scor)),font=('normal',20))
strt_lbl = turtle.Turtle('blank')
strt_lbl.setpos(-100,-100)
strt_lbl.color('white')
left_pan = turtle.Turtle('square')
left_pan.resizemode('user')
left_pan.penup()
left_pan.color('white')
left_pan.shapesize(4,0.5)
left_pan.setpos((-240,0))
left_pan.speed(0)
right_pan = turtle.Turtle('square')
right_pan.penup()
right_pan.color('white')
right_pan.shapesize(4,0.5)
right_pan.setpos((240,0))
left_pan.speed(0)
line_tur = turtle.Turtle('blank')
line_tur.setpos(0,210)
line_tur.color('white')
line_tur.goto(0,-210)
line_tur.goto(225,-210)
line_tur.goto(225,210)
line_tur.goto(-225,210)
line_tur.goto(-225,-210)
line_tur.goto(0,-210)
#bind the keyboard
scr.listen()
scr.onkeypress(lambda:move_lft(1),'w')
scr.onkeypress(lambda:move_lft(-1),'s')
scr.onkeypress(lambda:move_rght(1),'Up')
scr.onkeypress(lambda:move_rght(-1),'Down')
scr.onkeypress(lambda:start_game(),'space')
scr.onkeypress(pause_func,'p')
scr.onkeypress(cntinu,'c')
while True:
try:
moveBall()
collision()
scr.update()
if dx == 0 and dy == 0:
chng_pan = 0
strt_lbl.write('Press Space to start\nPress \'p\' to pause\nPress \'c\' to continue',font=('normal',20))
else:
strt_lbl.clear()
except:
break
| [
"noreply@github.com"
] | noreply@github.com |
f5ba807cf4377fe11e6a9eac40676eed893527a6 | fe1349a9bd25586f830f2a44618a4012ea20184a | /stanford_tf_research/01_plot_histogram_random.py | 838a63c687196773d418188816a03661ad3095dc | [] | no_license | EmbraceLife/LIE | cdca29b8308f2cd7740743cea379a72d7bde51db | 8c30b6aabc5842092c18dd97a0c20aa19f62000f | refs/heads/master | 2022-12-04T05:56:37.393552 | 2017-08-16T04:54:55 | 2017-08-16T04:54:55 | 87,597,172 | 4 | 3 | null | 2022-11-26T15:26:45 | 2017-04-08T00:39:27 | Python | UTF-8 | Python | false | false | 1,477 | py | """
=========================================================
Demo of the histogram (hist) function with a few features
=========================================================
In addition to the basic histogram, this demo shows a few optional
features:
* Setting the number of data bins
* The ``normed`` flag, which normalizes bin heights so that the
integral of the histogram is 1. The resulting histogram is an
approximation of the probability density function.
* Setting the face color of the bars
* Setting the opacity (alpha value).
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
np.random.seed(0)
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(437)
num_bins = 50
fig, ax = plt.subplots()
# the histogram of the data, is add on figure
n, bins, patches = ax.hist(x, num_bins, normed=1)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
ax.plot(bins, y, '--')
# set labels, title
ax.set_xlabel('Smarts')
ax.set_ylabel('Probability density')
ax.set_title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
| [
"1227561934@qq.com"
] | 1227561934@qq.com |
7f47bf2696c9a907c2e8401c4fb244db09781782 | 7810e57e7851f4c2559faa114c84260d6fd69673 | /tutorial/urls.py | 48c3a7db603504468902c84261539df43d25bb1a | [] | no_license | juliakimchung/rest-framework-tutorial | ae40c4f990118d15c2d88557831025d6f8d1c5f8 | 9dc1941d5d2117e75daca5ed3f58407c2e43a7cc | refs/heads/master | 2021-01-11T18:46:16.163371 | 2017-01-21T04:03:19 | 2017-01-21T04:03:19 | 79,622,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django.conf.urls import url, include
from rest_framework import routers
from tutorial.quickstart import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include('snippets.urls')),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| [
"julhkm@gmail.com"
] | julhkm@gmail.com |
1a7ee7ad25d703905a1b326105e18c566f03cf65 | d7cd51a7aaa9bd5a7c39409a39d1be1944ecb9c4 | /Assignments/Python_Stack/Django/Django_ORM/users_template/users_template/wsgi.py | 5725974a941c17bdca19fd76e2fc66d918edd371 | [] | no_license | Geneveroth/Coding_Dojo_Assignments | ae525e6d95e0f3fcf10b44a6734e8996b53ec7e1 | 9643845e237d5029de03dfe1ae2d43a49350ba22 | refs/heads/master | 2022-12-23T18:46:08.971696 | 2020-07-21T20:44:17 | 2020-07-21T20:44:17 | 251,153,510 | 0 | 0 | null | 2021-01-06T03:08:14 | 2020-03-29T23:10:09 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for users_template project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'users_template.settings')
application = get_wsgi_application()
| [
"black.samlh@gmail.com"
] | black.samlh@gmail.com |
b747a70cecd7534e41ef25e34ab1630cbfc4c525 | f18e139c88098005ebf6e71cebacca43166c79cd | /ch4/squares.py | 824e3b3eb79d880e35349345e349b42ed193a32e | [] | no_license | Levanoz2/pcc | 6b981c91be6e95ab1ecf8e576ab0771a56a03611 | 8253db8bf31100b72146650031febf26485951eb | refs/heads/master | 2023-09-05T16:23:30.935168 | 2021-11-11T18:34:06 | 2021-11-11T18:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | squares = []
for value in list(range(1, 11)):
square = value ** 2
# print(f"{value} squared is equal to {square}")
# print(f"adding {square} to squares")
squares.append(square)
# print('squares = ' + str(squares))
print(squares) | [
"gabesfordev@gmail.com"
] | gabesfordev@gmail.com |
648e5ca36c4d9b01db5a8637ad045c23b07bf7f6 | 80aabbd44790ec4feee93624f61c29e87d691d6a | /drawBot/ui/drawView.py | 24fac94c74d4a3c9c44d2a34358e011c780327b5 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | asaumierdemers/drawbot | 546961ead63f71859725a87f190f7ebbd45995f2 | 9ba1ef902bdd5c8e291d5d6835e09f05bfa00261 | refs/heads/master | 2020-12-25T19:59:00.391766 | 2016-08-05T10:04:57 | 2016-08-05T10:04:57 | 29,844,501 | 0 | 0 | null | 2015-01-26T04:12:30 | 2015-01-26T04:12:30 | null | UTF-8 | Python | false | false | 2,955 | py | from AppKit import *
from Quartz import PDFView, PDFThumbnailView, PDFDocument
from vanilla import Group
epsPasteBoardType = "CorePasteboardFlavorType 0x41494342"
class DrawBotPDFThumbnailView(PDFThumbnailView):
def draggingUpdated_(self, draggingInfo):
return NSDragOperationNone
class ThumbnailView(Group):
nsViewClass = DrawBotPDFThumbnailView
def setDrawView(self, view):
self.getNSView().setPDFView_(view.getNSView())
def getSelection(self):
try:
# sometimes this goes weirdly wrong...
selection = self.getNSView().selectedPages()
except:
return -1
if selection:
for page in selection:
document = page.document()
index = document.indexForPage_(page)
return index
return -1
class DrawBotPDFView(PDFView):
def performKeyEquivalent_(self, event):
# catch a bug in PDFView
# cmd + ` causes a traceback
# DrawBot[15705]: -[__NSCFConstantString characterAtIndex:]: Range or index out of bounds
try:
return super(DrawBotPDFView, self).performKeyEquivalent_(event)
except:
return False
class DrawView(Group):
nsViewClass = DrawBotPDFView
def __init__(self, posSize):
super(DrawView, self).__init__(posSize)
pdfView = self.getNSView()
pdfView.setAutoScales_(True)
view = pdfView.documentView()
scrollview = view.enclosingScrollView()
scrollview.setBorderType_(NSBezelBorder)
def get(self):
pdf = self.getNSView().document()
if pdf is None:
return None
return pdf.dataRepresentation()
def set(self, pdfData):
pdf = PDFDocument.alloc().initWithData_(pdfData)
self.setPDFDocument(pdf)
def setPath(self, path):
url = NSURL.fileURLWithPath_(path)
document = PDFDocument.alloc().initWithURL_(url)
self.setPDFDocument(document)
def setPDFDocument(self, document):
if document is None:
document = PDFDocument.alloc().init()
self.getNSView().setDocument_(document)
def getPDFDocument(self):
return self.getNSView().document()
def setScale(self, scale):
self.getNSView().setScaleFactor_(scale)
def scale(self):
return self.getNSView().scaleFactor()
def scrollDown(self):
document = self.getNSView().documentView()
document.scrollPoint_((0, 0))
def scrollToPageIndex(self, index):
pdf = self.getPDFDocument()
if pdf is None:
self.scrollDown()
elif 0 <= index < pdf.pageCount():
try:
# sometimes this goes weirdly wrong...
page = pdf.pageAtIndex_(index)
self.getNSView().goToPage_(page)
except:
self.scrollDown()
else:
self.scrollDown()
| [
"frederik@typemytype.com"
] | frederik@typemytype.com |
b24bb4d5da2b1cc530f38ea45051ecb301423349 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/explosion_spaCy/spaCy-master/spacy/language.py | bebdeab20b61bc4446c9cf4acb5b82d330363308 | [
"MIT"
] | permissive | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 15,022 | py | from __future__ import absolute_import
from __future__ import unicode_literals
from warnings import warn
import pathlib
from contextlib import contextmanager
import shutil
import ujson as json
try:
basestring
except NameError:
basestring = str
from .tokenizer import Tokenizer
from .vocab import Vocab
from .tagger import Tagger
from .matcher import Matcher
from . import attrs
from . import orth
from . import util
from . import language_data
from .lemmatizer import Lemmatizer
from .train import Trainer
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD, PROB, LANG, IS_STOP
from .syntax.parser import get_templates
from .syntax.nonproj import PseudoProjectivity
from .pipeline import DependencyParser, EntityRecognizer
from .syntax.arc_eager import ArcEager
from .syntax.ner import BiluoPushDown
class BaseDefaults(object):
@classmethod
def create_lemmatizer(cls, nlp=None):
if nlp is None or nlp.path is None:
return Lemmatizer({}, {}, {})
else:
return Lemmatizer.load(nlp.path, rules=cls.lemma_rules)
@classmethod
def create_vocab(cls, nlp=None):
lemmatizer = cls.create_lemmatizer(nlp)
if nlp is None or nlp.path is None:
lex_attr_getters = dict(cls.lex_attr_getters)
# This is very messy, but it's the minimal working fix to Issue #639.
# This defaults stuff needs to be refactored (again)
lex_attr_getters[IS_STOP] = lambda string: string.lower() in cls.stop_words
return Vocab(lex_attr_getters=lex_attr_getters, tag_map=cls.tag_map,
lemmatizer=lemmatizer)
else:
return Vocab.load(nlp.path, lex_attr_getters=cls.lex_attr_getters,
tag_map=cls.tag_map, lemmatizer=lemmatizer)
@classmethod
def add_vectors(cls, nlp=None):
if nlp is None or nlp.path is None:
return False
else:
vec_path = nlp.path / 'vocab' / 'vec.bin'
if vec_path.exists():
return lambda vocab: vocab.load_vectors_from_bin_loc(vec_path)
@classmethod
def create_tokenizer(cls, nlp=None):
rules = cls.tokenizer_exceptions
if cls.token_match:
token_match = cls.token_match
if cls.prefixes:
prefix_search = util.compile_prefix_regex(cls.prefixes).search
else:
prefix_search = None
if cls.suffixes:
suffix_search = util.compile_suffix_regex(cls.suffixes).search
else:
suffix_search = None
if cls.infixes:
infix_finditer = util.compile_infix_regex(cls.infixes).finditer
else:
infix_finditer = None
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
return Tokenizer(vocab, rules=rules,
prefix_search=prefix_search, suffix_search=suffix_search,
infix_finditer=infix_finditer, token_match=token_match)
@classmethod
def create_tagger(cls, nlp=None):
if nlp is None:
return Tagger(cls.create_vocab(), features=cls.tagger_features)
elif nlp.path is False:
return Tagger(nlp.vocab, features=cls.tagger_features)
elif nlp.path is None or not (nlp.path / 'pos').exists():
return None
else:
return Tagger.load(nlp.path / 'pos', nlp.vocab)
@classmethod
def create_parser(cls, nlp=None, **cfg):
if nlp is None:
return DependencyParser(cls.create_vocab(), features=cls.parser_features,
**cfg)
elif nlp.path is False:
return DependencyParser(nlp.vocab, features=cls.parser_features, **cfg)
elif nlp.path is None or not (nlp.path / 'deps').exists():
return None
else:
return DependencyParser.load(nlp.path / 'deps', nlp.vocab, **cfg)
@classmethod
def create_entity(cls, nlp=None, **cfg):
if nlp is None:
return EntityRecognizer(cls.create_vocab(), features=cls.entity_features, **cfg)
elif nlp.path is False:
return EntityRecognizer(nlp.vocab, features=cls.entity_features, **cfg)
elif nlp.path is None or not (nlp.path / 'ner').exists():
return None
else:
return EntityRecognizer.load(nlp.path / 'ner', nlp.vocab, **cfg)
@classmethod
def create_matcher(cls, nlp=None):
if nlp is None:
return Matcher(cls.create_vocab())
elif nlp.path is False:
return Matcher(nlp.vocab)
elif nlp.path is None or not (nlp.path / 'vocab').exists():
return None
else:
return Matcher.load(nlp.path / 'vocab', nlp.vocab)
@classmethod
def create_pipeline(self, nlp=None):
pipeline = []
if nlp is None:
return []
if nlp.tagger:
pipeline.append(nlp.tagger)
if nlp.parser:
pipeline.append(nlp.parser)
if nlp.entity:
pipeline.append(nlp.entity)
return pipeline
token_match = language_data.TOKEN_MATCH
prefixes = tuple(language_data.TOKENIZER_PREFIXES)
suffixes = tuple(language_data.TOKENIZER_SUFFIXES)
infixes = tuple(language_data.TOKENIZER_INFIXES)
tag_map = dict(language_data.TAG_MAP)
tokenizer_exceptions = {}
parser_features = get_templates('parser')
entity_features = get_templates('ner')
tagger_features = Tagger.feature_templates # TODO -- fix this
stop_words = set()
lemma_rules = {}
lex_attr_getters = {
attrs.LOWER: lambda string: string.lower(),
attrs.NORM: lambda string: string,
attrs.SHAPE: orth.word_shape,
attrs.PREFIX: lambda string: string[0],
attrs.SUFFIX: lambda string: string[-3:],
attrs.CLUSTER: lambda string: 0,
attrs.IS_ALPHA: orth.is_alpha,
attrs.IS_ASCII: orth.is_ascii,
attrs.IS_DIGIT: lambda string: string.isdigit(),
attrs.IS_LOWER: orth.is_lower,
attrs.IS_PUNCT: orth.is_punct,
attrs.IS_SPACE: lambda string: string.isspace(),
attrs.IS_TITLE: orth.is_title,
attrs.IS_UPPER: orth.is_upper,
attrs.IS_BRACKET: orth.is_bracket,
attrs.IS_QUOTE: orth.is_quote,
attrs.IS_LEFT_PUNCT: orth.is_left_punct,
attrs.IS_RIGHT_PUNCT: orth.is_right_punct,
attrs.LIKE_URL: orth.like_url,
attrs.LIKE_NUM: orth.like_number,
attrs.LIKE_EMAIL: orth.like_email,
attrs.IS_STOP: lambda string: False,
attrs.IS_OOV: lambda string: True
}
class Language(object):
'''A text-processing pipeline. Usually you'll load this once per process, and
pass the instance around your program.
'''
Defaults = BaseDefaults
lang = None
@classmethod
@contextmanager
def train(cls, path, gold_tuples, *configs):
if isinstance(path, basestring):
path = pathlib.Path(path)
tagger_cfg, parser_cfg, entity_cfg = configs
dep_model_dir = path / 'deps'
ner_model_dir = path / 'ner'
pos_model_dir = path / 'pos'
if dep_model_dir.exists():
shutil.rmtree(str(dep_model_dir))
if ner_model_dir.exists():
shutil.rmtree(str(ner_model_dir))
if pos_model_dir.exists():
shutil.rmtree(str(pos_model_dir))
dep_model_dir.mkdir()
ner_model_dir.mkdir()
pos_model_dir.mkdir()
if parser_cfg['pseudoprojective']:
# preprocess training data here before ArcEager.get_labels() is called
gold_tuples = PseudoProjectivity.preprocess_training_data(gold_tuples)
parser_cfg['actions'] = ArcEager.get_actions(gold_parses=gold_tuples)
entity_cfg['actions'] = BiluoPushDown.get_actions(gold_parses=gold_tuples)
with (dep_model_dir / 'config.json').open('w') as file_:
json.dump(parser_cfg, file_)
with (ner_model_dir / 'config.json').open('w') as file_:
json.dump(entity_cfg, file_)
with (pos_model_dir / 'config.json').open('w') as file_:
json.dump(tagger_cfg, file_)
self = cls(
path=path,
vocab=False,
tokenizer=False,
tagger=False,
parser=False,
entity=False,
matcher=False,
serializer=False,
vectors=False,
pipeline=False)
self.vocab = self.Defaults.create_vocab(self)
self.tokenizer = self.Defaults.create_tokenizer(self)
self.tagger = self.Defaults.create_tagger(self)
self.parser = self.Defaults.create_parser(self)
self.entity = self.Defaults.create_entity(self)
self.pipeline = self.Defaults.create_pipeline(self)
yield Trainer(self, gold_tuples)
self.end_training()
def __init__(self, **overrides):
if 'data_dir' in overrides and 'path' not in overrides:
raise ValueError("The argument 'data_dir' has been renamed to 'path'")
path = overrides.get('path', True)
if isinstance(path, basestring):
path = pathlib.Path(path)
if path is True:
path = util.match_best_version(self.lang, '', util.get_data_path())
self.path = path
self.vocab = self.Defaults.create_vocab(self) \
if 'vocab' not in overrides \
else overrides['vocab']
add_vectors = self.Defaults.add_vectors(self) \
if 'add_vectors' not in overrides \
else overrides['add_vectors']
if self.vocab and add_vectors:
add_vectors(self.vocab)
self.tokenizer = self.Defaults.create_tokenizer(self) \
if 'tokenizer' not in overrides \
else overrides['tokenizer']
self.tagger = self.Defaults.create_tagger(self) \
if 'tagger' not in overrides \
else overrides['tagger']
self.parser = self.Defaults.create_parser(self) \
if 'parser' not in overrides \
else overrides['parser']
self.entity = self.Defaults.create_entity(self) \
if 'entity' not in overrides \
else overrides['entity']
self.matcher = self.Defaults.create_matcher(self) \
if 'matcher' not in overrides \
else overrides['matcher']
if 'make_doc' in overrides:
self.make_doc = overrides['make_doc']
elif 'create_make_doc' in overrides:
self.make_doc = overrides['create_make_doc'](self)
elif not hasattr(self, 'make_doc'):
self.make_doc = lambda text: self.tokenizer(text)
if 'pipeline' in overrides:
self.pipeline = overrides['pipeline']
elif 'create_pipeline' in overrides:
self.pipeline = overrides['create_pipeline'](self)
else:
self.pipeline = [self.tagger, self.parser, self.matcher, self.entity]
def __call__(self, text, tag=True, parse=True, entity=True):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
Args:
text (unicode): The text to be processed.
Returns:
doc (Doc): A container for accessing the annotations.
Example:
>>> from spacy.en import English
>>> nlp = English()
>>> tokens = nlp('An example sentence. Another example sentence.')
>>> tokens[0].orth_, tokens[0].head.tag_
('An', 'NN')
"""
doc = self.make_doc(text)
if self.entity and entity:
# Add any of the entity labels already set, in case we don't have them.
for token in doc:
if token.ent_type != 0:
self.entity.add_label(token.ent_type)
skip = {self.tagger: not tag, self.parser: not parse, self.entity: not entity}
for proc in self.pipeline:
if proc and not skip.get(proc):
proc(doc)
return doc
def pipe(self, texts, tag=True, parse=True, entity=True, n_threads=2, batch_size=1000):
'''Process texts as a stream, and yield Doc objects in order.
Supports GIL-free multi-threading.
Arguments:
texts (iterator)
tag (bool)
parse (bool)
entity (bool)
'''
skip = {self.tagger: not tag, self.parser: not parse, self.entity: not entity}
stream = (self.make_doc(text) for text in texts)
for proc in self.pipeline:
if proc and not skip.get(proc):
if hasattr(proc, 'pipe'):
stream = proc.pipe(stream, n_threads=n_threads, batch_size=batch_size)
else:
stream = (proc(item) for item in stream)
for doc in stream:
yield doc
def end_training(self, path=None):
if path is None:
path = self.path
elif isinstance(path, basestring):
path = pathlib.Path(path)
if self.tagger:
self.tagger.model.end_training()
self.tagger.model.dump(str(path / 'pos' / 'model'))
if self.parser:
self.parser.model.end_training()
self.parser.model.dump(str(path / 'deps' / 'model'))
if self.entity:
self.entity.model.end_training()
self.entity.model.dump(str(path / 'ner' / 'model'))
strings_loc = path / 'vocab' / 'strings.json'
with strings_loc.open('w', encoding='utf8') as file_:
self.vocab.strings.dump(file_)
self.vocab.dump(path / 'vocab' / 'lexemes.bin')
if self.tagger:
tagger_freqs = list(self.tagger.freqs[TAG].items())
else:
tagger_freqs = []
if self.parser:
dep_freqs = list(self.parser.moves.freqs[DEP].items())
head_freqs = list(self.parser.moves.freqs[HEAD].items())
else:
dep_freqs = []
head_freqs = []
if self.entity:
entity_iob_freqs = list(self.entity.moves.freqs[ENT_IOB].items())
entity_type_freqs = list(self.entity.moves.freqs[ENT_TYPE].items())
else:
entity_iob_freqs = []
entity_type_freqs = []
with (path / 'vocab' / 'serializer.json').open('w') as file_:
file_.write(
json.dumps([
(TAG, tagger_freqs),
(DEP, dep_freqs),
(ENT_IOB, entity_iob_freqs),
(ENT_TYPE, entity_type_freqs),
(HEAD, head_freqs)
]))
| [
"659338505@qq.com"
] | 659338505@qq.com |
18f6e39dba7d10b57e3c9aba5cd962eb49d61030 | 122d71f2003123cf9141860c57fcef2f8357e25e | /excel_marksheet_generator_project.py | 108db139f13fbc1b56ba0fe3fdbfd967ef782cae | [] | no_license | chaitanya-lohar/Python-programs | f1812893a53c8a525387cafc2fe33e4424cfd5e3 | 3003624ac2653cd61cae1d1971abf5f14ebb7e53 | refs/heads/master | 2022-11-13T06:03:22.570061 | 2020-07-09T08:12:37 | 2020-07-09T08:12:37 | 278,299,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import openpyxl as xl
from openpyxl.chart import BarChart,Reference
def marksheet_generate(filename):
wb=xl.load_workbook("marks.xlsx")
sheet=wb["Sheet1"]
cell1=sheet.cell(1,1)
print(cell1.value)
sum=0
for row in range(3,sheet.max_row-1):
cell1=sheet.cell(row,2)
sum=sum+cell1.value
print(sum)
sum_cell=sheet.cell(8,2)
sum_cell.value=sum
per=sum/5
per_cell=sheet.cell(9,2)
per_cell.value=per
print(per_cell.value)
values=Reference(sheet,min_row=3,max_row=7,min_col=2,max_col=2)
chart=BarChart()
chart.add_data(values)
sheet.add_chart(chart,"E1")
wb.save("marks2.xlsx")
marksheet_generate("marks.xlsx")
| [
"noreply@github.com"
] | noreply@github.com |
04d6541daf0a5a782f444e495432b9f0bc9d80a1 | fcaa0395a7c6aa74cbc47c40f35fdc312e44b9c5 | /aok/comparisons/_basics.py | 30b87c970c9d3869bf7cb89261e8ca2a4506b453 | [] | no_license | rocketboosters/a-ok | b6f1a70d262123c2df5e4969a687cbcfdfbafc8c | 06f31404a4ce34d561253ba74b533ce3fb73c60c | refs/heads/main | 2023-09-02T19:18:18.158296 | 2021-11-03T01:54:36 | 2021-11-03T01:54:36 | 388,142,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,951 | py | import typing
import yaml
from aok import _definitions
from aok import _operations
class Equals(_definitions.Comparator):
"""Compares two values as an equality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an equals comparison."""
return _operations.cast_compatible(self.value, observed) == observed
class Unequals(_definitions.Comparator):
"""Compares two values as an inequality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an inequality comparison."""
return _operations.cast_compatible(self.value, observed) != observed
class Anything(_definitions.Comparator):
"""Allows anything for the given value."""
def __init__(self):
"""Create an Anything comparison operation."""
super(Anything, self).__init__(None)
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Anything will always be true."""
return True
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Anything":
return cls()
class Less(_definitions.Comparator):
"""Allows anything less than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than will be true."""
return _operations.cast_compatible(self.value, observed) > observed
class LessOrEqual(_definitions.Comparator):
"""Allows anything less than or equal the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than or equal will be true."""
return _operations.cast_compatible(self.value, observed) >= observed
class Greater(_definitions.Comparator):
"""Allows anything greater than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than will be true."""
return _operations.cast_compatible(self.value, observed) < observed
class GreaterOrEqual(_definitions.Comparator):
"""Allows anything greater than or equal to the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
return _operations.cast_compatible(self.value, observed) <= observed
class Between(_definitions.Comparator):
"""Allows between the given values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
casted_min = _operations.cast_compatible(self.value["min"], observed)
casted_max = _operations.cast_compatible(self.value["max"], observed)
return casted_min <= observed <= casted_max
@classmethod
def construct(cls, minimum: typing.Any, maximum: typing.Any) -> "Between":
"""Create a Between comparison operator with the specified options."""
return cls({"min": minimum, "max": maximum})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Between":
if isinstance(node, yaml.SequenceNode):
loaded = loader.construct_sequence(node, deep=True)
value = {"min": loaded[0], "max": loaded[1]}
else:
value = loader.construct_mapping(node, deep=True)
return cls(value)
class OneOf(_definitions.Comparator):
"""Allows a matching comparison between any of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if at least one of the options are equal."""
failures: typing.Dict[str, _definitions.Comparison] = {}
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", result):
return result
if isinstance(result, _definitions.Comparison):
failures[str(index)] = result
else:
failures[str(index)] = _definitions.Comparison(
operation=comparator.operation_name(),
success=False,
expected=comparator.value,
observed=observed,
)
return _definitions.Comparison(
operation="one_of",
success=False,
expected=", ".join([f"({i}) {f.expected}" for i, f in failures.items()]),
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "OneOf":
"""Create a OneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "OneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
class NoneOf(_definitions.Comparator):
"""Allows a mismatching comparison between none of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if none of the options are equal."""
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", False):
return _definitions.Comparison(
operation=f"not {result.operation}",
success=False,
expected=result.expected,
observed=result.observed,
children=result.children,
)
return _definitions.Comparison(
operation="none_of",
success=True,
expected=self.value,
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "NoneOf":
"""Create a NoneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "NoneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
Anything.register()
anything = getattr(Anything, "constructor", Anything)
Between.register()
between = getattr(Between, "constructor", Between)
Equals.register()
equals = getattr(Equals, "constructor", Equals)
Unequals.register()
unequals = getattr(Unequals, "constructor", Unequals)
Greater.register()
greater = getattr(Greater, "constructor", Greater)
GreaterOrEqual.register()
greater_or_equal = getattr(GreaterOrEqual, "constructor", GreaterOrEqual)
Less.register()
less = getattr(Less, "constructor", Less)
LessOrEqual.register()
less_or_equal = getattr(LessOrEqual, "constructor", LessOrEqual)
NoneOf.register()
none_of = getattr(NoneOf, "constructor", NoneOf)
OneOf.register()
one_of = getattr(OneOf, "constructor", OneOf)
| [
"swernst@gmail.com"
] | swernst@gmail.com |
b7ffbd9e5f8d2aa067a0e7fc422bbcec459a3281 | 7ab0fddb577c06989c3718fd50f7250fcbe81821 | /JurgenPeter/ql/traversals/dependency_checker.py | 5d9fd243a6a51c89c991a5c2e2e1f94ffb6b6ff0 | [] | no_license | thanus/myriad-ql | 54ce0d4dfbd5336c9f5c1d9b7ebf7072d6bdb4a0 | a7294c108f35a4b1c0ba90982aa41f93f7a68a51 | refs/heads/master | 2020-12-02T17:45:40.794417 | 2017-04-24T20:39:04 | 2017-04-24T20:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | from collections import defaultdict
from misc.visitor import CheckerVisitor
from ql.traversals.dependency_finder import DependencyFinder
class DependencyChecker(CheckerVisitor):
def __init__(self, errors=[]):
self.known_dependencies = defaultdict(list)
self.errors = errors
def check(self, node):
self.visit(node)
def visit_form(self, node):
for element in node.body:
self.visit(element)
def visit_question(self, node):
return [node.name]
def visit_computed_question(self, node):
dependencies = DependencyFinder().find(node)
if node.name in dependencies:
self.error("computed question \"{}\" has dependency on "
"itself".format(node.name))
self.find_indirect_dependencies(dependencies)
for dependency in dependencies:
if node.name in self.known_dependencies[dependency]:
self.error("computed question \"{}\" has circular dependency "
"on computed question \"{}\"".format(node.name,
dependency))
self.known_dependencies[node.name] = dependencies
return [node.name]
def visit_if_conditional(self, node):
scope = sum([self.visit(element) for element in node.ifbody], [])
return self.visit_conditional(node, scope)
def visit_ifelse_conditional(self, node):
scope = sum([self.visit(element) for element in node.ifbody +
node.elsebody], [])
return self.visit_conditional(node, scope)
def visit_conditional(self, node, scope):
dependencies = DependencyFinder().find(node)
for dependency in dependencies:
if dependency in scope:
self.error("condition depends on question \"{}\" within "
"own scope".format(dependency))
return scope
def find_indirect_dependencies(self, dependencies):
for dependency in dependencies:
for indirect_dependency in self.known_dependencies[dependency]:
if indirect_dependency not in dependencies:
dependencies.append(indirect_dependency)
| [
"jurgen.baas@outlook.com"
] | jurgen.baas@outlook.com |
5876891d12a98776210d975330a0ebd2b15e82b7 | 787f2f9c40bc029061683e979c5acbf00cc35434 | /tests/test_fs.py | bba86334837c0c4d82690bf011e3f94dcf94f20e | [
"Apache-2.0"
] | permissive | johnpaulett/mazel | a809a0694485053f8a1c379a2214bd3b16a6047c | b8cfc92e967c5150aa5cf116ed872df7ca2eb448 | refs/heads/main | 2023-03-16T04:06:17.399004 | 2022-09-05T16:26:28 | 2022-09-05T16:28:43 | 230,663,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from pathlib import Path
from unittest import TestCase
from mazel.fs import cd
from .utils import abspath
def cwd():
return Path.cwd()
class CdTest(TestCase):
def setUp(self):
self.current_dir = abspath("..") # tests run from package root
self.other_dir = abspath("examples/simple_workspace")
def test_cd(self):
self.assertEqual(cwd(), self.current_dir)
with cd(abspath("examples/simple_workspace")):
self.assertEqual(cwd(), self.other_dir)
self.assertEqual(cwd(), self.current_dir)
def test_failure(self):
try:
with cd(abspath("examples/simple_workspace")):
self.assertEqual(cwd(), self.other_dir)
raise Exception()
self.fail("unreachable")
except Exception:
pass
self.assertEqual(cwd(), self.current_dir)
def test_expand_home(self):
with cd("~"):
self.assertEqual(cwd(), Path.home())
| [
"john.paulett@equium.io"
] | john.paulett@equium.io |
1c79a147eb625f03988ddf93f50550b1d00a41bb | f481098e027bd92178257229b1e2f123b174da79 | /apps/interview/__init__.py | d4ed308085a35479f6b96b604a80250c62362722 | [] | no_license | shiqianlong/job_cms | 9e53077bc2f3037659d39326188b59b169c29613 | 8b8253aafae3b29a93029accf5ea5a1c296fef6e | refs/heads/master | 2023-07-27T18:21:24.527160 | 2021-09-09T09:45:27 | 2021-09-09T09:45:27 | 364,789,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | default_app_config = 'apps.interview.apps.InterviewConfig'
| [
"shiqianlong@nsfocus.com"
] | shiqianlong@nsfocus.com |
a2afcdbb25e5d5358991ecaf4ea9ef99624a88ba | 912021bc754e9b6f62efaf0d69e4179dda376d62 | /splatify/spopulate.py | 5d26f2fcf6d4c130bc7636eab1a4cff76fea7336 | [] | no_license | micnem/splatify | 5439cfb21ada1b194cea3f17661b9e02dd60d403 | 792e3be4bd9bcc2c34ace6dd0aea3acf512b8829 | refs/heads/master | 2023-07-22T02:39:34.123446 | 2023-02-18T21:55:37 | 2023-02-18T21:55:37 | 123,298,090 | 2 | 1 | null | 2023-07-15T00:54:42 | 2018-02-28T14:42:28 | Python | UTF-8 | Python | false | false | 7,334 | py | from django.shortcuts import render, redirect
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from requests import Request, post
from .models import Artist, TopArtist, RelatedArtist, Profile
from django.utils import timezone
from datetime import timedelta
import requests as r
import json
import base64
from splatify2.settings import CLIENT_ID, CLIENT_SECRET
BASE_URL = "https://api.spotify.com/v1/"
def execute_spotify_api_request(access_token, endpoint, post_=False, put_=False):
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
if post_:
r.post(BASE_URL + endpoint, headers=headers)
if put_:
r.put(BASE_URL + endpoint, headers=headers)
response = r.get(BASE_URL + endpoint, {}, headers=headers)
try:
return response.json()
except:
return {'Error': 'Issue with request'}
def create_artist(items):
artist_list = []
for item in items:
spotify_id = item.get('id')
# image = item.get('images')[0].get('url')
name = item.get('name')
popularity = item.get('popularity')
uri = item.get('uri')
artist = {
'spotify_id': spotify_id,
'name': name,
# 'image': image,
'popularity': popularity,
'uri': uri
}
artist_list.append(artist)
return artist_list
def get_top_artists(profile):
access_token = refresh_tokens(profile)
endpoint = "me/top/artists?time_range=long_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
if response == None:
endpoint = "me/top/artists?time_range=short_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('items')
artist_list = create_artist(items)
for num, artist in enumerate(artist_list[::-1]):
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['spotify_id'], popularity = artist['popularity'], uri = artist['uri'])
endpoint = f"artists/{current_artist.spotify_id}/related-artists"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('artists')
rel_artist_list = create_artist(items)
for number, rel_artist in enumerate(rel_artist_list[::-1]):
related_artist, created = Artist.objects.get_or_create(name = rel_artist['name'], spotify_id = rel_artist['spotify_id'], popularity = rel_artist['popularity'], uri = rel_artist['uri'])
RelatedArtist.objects.get_or_create(root_artist=current_artist, artist2=related_artist, affinity=number + 1)
ta, created = TopArtist.objects.get_or_create(artist=current_artist, profile=profile, affinity=num+1)
profile.populated = True
profile.save()
def match(user_list):
master_artist_list = []
for num, user in enumerate(user_list):
top_artists = user.profile.fave_artists.all()
related_artists = RelatedArtist.objects.filter(root_artist__in = top_artists).distinct().values_list("artist2", flat=True)
artist_list = (Artist.objects.filter(id__in = related_artists)|top_artists).distinct()
if num == 0:
master_artist_list = artist_list
else:
master_artist_list = master_artist_list.intersection(artist_list)
return master_artist_list
def create_playlist(profile, user2):
access_token = refresh_tokens(profile)
user_id = profile.account.social_auth.first().uid
endpoint = f"users/{user_id}/playlists"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token}
body = json.dumps({
"name": f"SplatList for {profile.account.first_name} and {user2.first_name}",
"description": "A playlist generated for you, by Splatify, with love.",
"public": False
})
response = r.post(BASE_URL + endpoint, body, headers=headers)
playlist_id = response.json()
return playlist_id['id']
def add_to_playlist(profile, track_uri_list, playlist_id):
access_token = refresh_tokens(profile)
track_urls = '%2c'.join(track_uri_list)
endpoint = f"playlists/{playlist_id}/tracks?uris=" + track_urls
response = execute_spotify_api_request(access_token, endpoint, post_=True)
return response
def get_artist_top_songs(artist, profile):
access_token = refresh_tokens(profile)
artist_id = artist.spotify_id
endpoint = f"artists/{artist_id}/top-tracks?country=IL"
response = execute_spotify_api_request(access_token, endpoint)
tracks = response['tracks']
track_uri_list = []
while len(track_uri_list)<3:
for track in tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def main(master_artist_list, profile, user2):
master_artist_list = master_artist_list[0:20]
playlist_id = create_playlist(profile, user2)
if len(master_artist_list) > 5:
for artist in master_artist_list:
add_to_playlist(profile, get_artist_top_songs(artist, profile), playlist_id)
else:
track_uri_list = seeder(master_artist_list, profile)
add_to_playlist(profile, track_uri_list, playlist_id)
def refresh_tokens(profile):
endpoint = "https://accounts.spotify.com/api/token"
refresh_token = profile.account.social_auth.first().extra_data['refresh_token']
auth_str = '{}:{}'.format(CLIENT_ID, CLIENT_SECRET)
b64_auth_str = base64.urlsafe_b64encode(auth_str.encode()).decode()
headers = {'Authorization': f'Basic {b64_auth_str}'}
body = {
'grant_type': 'refresh_token',
'refresh_token':refresh_token,
}
response = r.post(endpoint, body, headers=headers)
return response.json()['access_token']
def seeder(artist_list, profile):
seed_artists = []
for artist in artist_list:
seed_artists.append(artist.spotify_id)
seed_artists = seed_artists[:5]
artists = '%2c'.join(seed_artists)
endpoint = f"recommendations?seed_artists=" + artists
access_token = refresh_tokens(profile)
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
response = r.get(BASE_URL + endpoint, headers = headers)
track_uri_list = []
if response.json()['error']['status'] == 400:
track_uri_list.append('spotify:track:4uLU6hMCjMI75M1A2tKUQC')
else:
rec_tracks = response.json()['tracks']
for track in rec_tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def artist_search(query, profile):
access_token = refresh_tokens(profile)
endpoint = f"https://api.spotify.com/v1/search?q={query}&type=artist"
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + access_token}
response = r.get(endpoint, headers = headers)
artist = response.json()['artists']['items'][0]
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['id'], popularity = artist['popularity'], uri = artist['uri'])
TopArtist.objects.get_or_create(profile=profile, artist=current_artist, affinity=30)
return current_artist
| [
"michael.nemni@gmail.com"
] | michael.nemni@gmail.com |
01ba08cce655859e38e1ae12bb6490855debc6d1 | 63e7f6c96651030d0e379e7babd1af33d3244f72 | /core/criterions/sm.py | 2f2ef3cf401b8d5e26717e7336b9b8cdff2ae7b3 | [] | no_license | WN1695173791/VaGES | 18086f4197986721cdeefd5567815bab3a18e979 | 5c0c29b5c2864a1a2b2bd61b8561be70de231878 | refs/heads/main | 2023-02-25T16:14:13.920635 | 2021-02-05T03:07:42 | 2021-02-05T03:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,771 | py |
__all__ = ["ssm", "dsm", "mdsm", "SSM", "DSM", "MDSM", "make_ssm_noise", "make_mdsm_sigmas"]
import torch
import torch.autograd as autograd
import numpy as np
from .base import NaiveCriterion
import core.utils.managers as managers
import core.func as func
from core.lvm.base import LVM
def make_ssm_noise(*size, noise_type, device):
assert noise_type in ['radermacher', 'gaussian']
u = torch.randn(*size, device=device)
if noise_type == 'radermacher':
u = u.sign()
return u
def ssm(v, lvm: LVM, noise_type='radermacher', u=None):
r""" Sliced score matching
Args:
v: a batch of data
lvm: an instance of LVM
noise_type: the type of the noise
u: a batch of noise given manually
"""
if u is None:
u = make_ssm_noise(*v.shape, noise_type=noise_type, device=v.device)
with func.RequiresGradContext(v, requires_grad=True):
log_p = -lvm.free_energy_net(v)
score = autograd.grad(log_p.sum(), v, create_graph=True)[0]
loss1 = 0.5 * func.sos(score)
hvp = autograd.grad((score * u).sum(), v, create_graph=True)[0]
loss2 = func.inner_product(hvp, u)
return loss1 + loss2
def dsm(v, lvm: LVM, noise_std, eps=None):
r""" Denoising score matching
Args:
v: a batch of data
lvm: an instance of LVM
noise_std: the std of noise
eps: a batch of standard Gauss noise
"""
if eps is None:
eps = torch.randn_like(v, device=v.device)
v_noised = v + noise_std * eps
with func.RequiresGradContext(v_noised, requires_grad=True):
log_p = -lvm.free_energy_net(v_noised)
score = autograd.grad(log_p.sum(), v_noised, create_graph=True)[0]
return 0.5 * func.sos(score + eps / noise_std)
def make_mdsm_sigmas(batch_size, sigma_begin, sigma_end, dist, device=None):
if dist == "linear":
used_sigmas = torch.linspace(sigma_begin, sigma_end, batch_size, device=device)
elif dist == "geometrical":
used_sigmas = torch.logspace(np.log10(sigma_begin), np.log10(sigma_end), batch_size, device=device)
else:
raise NotImplementedError
return used_sigmas
def mdsm(v, lvm: LVM, sigma0, sigma_begin, sigma_end, dist: str, eps=None):
r""" Multi-level denoising score matching
Args:
v: a batch of data
lvm: an instance of LVM
sigma0: the base noise std
sigma_begin: the begin of the range of the noise std
sigma_end: the end of the range of the noise std
dist: how the noise std distributed in [sigma_begin, sigma_end]
eps: a batch of standard Gauss noise
"""
sigmas = make_mdsm_sigmas(v.size(0), sigma_begin, sigma_end, dist, device=v.device)
sigmas4v = sigmas.view(len(v), *([1] * (v.dim() - 1)))
if eps is None:
eps = torch.randn_like(v).to(v.device)
v_noised = v + sigmas4v * eps
with func.RequiresGradContext(v_noised, requires_grad=True):
log_p = -lvm.free_energy_net(v_noised)
score = autograd.grad(log_p.sum(), v_noised, create_graph=True)[0]
return 0.5 * func.sos(score / sigmas4v + eps / sigma0 ** 2)
class SSM(NaiveCriterion):
def __init__(self,
models: managers.ModelsManager,
optimizers: managers.OptimizersManager,
lr_schedulers: managers.LRSchedulersManager,
noise_type='radermacher'
):
super().__init__(models, optimizers, lr_schedulers)
self.lvm = models.lvm
self.noise_type = noise_type
def objective(self, v, **kwargs):
return ssm(v, self.lvm, self.noise_type)
class DSM(NaiveCriterion):
def __init__(self,
noise_std,
models: managers.ModelsManager,
optimizers: managers.OptimizersManager,
lr_schedulers: managers.LRSchedulersManager,
):
super().__init__(models, optimizers, lr_schedulers)
self.lvm = models.lvm
self.noise_std = noise_std
def objective(self, v, **kwargs):
return dsm(v, self.lvm, self.noise_std)
class MDSM(NaiveCriterion):
def __init__(self,
sigma0, sigma_begin, sigma_end, dist,
models: managers.ModelsManager,
optimizers: managers.OptimizersManager,
lr_schedulers: managers.LRSchedulersManager,
):
super().__init__(models, optimizers, lr_schedulers)
self.lvm = models.lvm
self.sigma0 = sigma0
self.sigma_begin = sigma_begin
self.sigma_end = sigma_end
self.dist = dist
def objective(self, v, **kwargs):
return mdsm(v, self.lvm, self.sigma0, self.sigma_begin, self.sigma_end, self.dist)
| [
"2978777543@qq.com"
] | 2978777543@qq.com |
c36f58b52ce68f0259e9453934cfe86a3e24a7d2 | 6330f5caa7dbe4bb8045808b8edc6d49a5bd1000 | /Python3/ElectionResult.py | 97b89e02e5fe5390b7afdcdc109e9f490e6893e8 | [] | no_license | SteveJarosi/CodeCademy_projects | 43494bbd0514c5d1eb927dd47869d93046e679ef | ae8a8ce9dba10793e681b1bd5af666705a048d52 | refs/heads/main | 2023-05-07T18:30:52.113524 | 2021-06-09T13:08:08 | 2021-06-09T13:08:08 | 336,215,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | #import codecademylib
import numpy as np
from matplotlib import pyplot as plt
survey_responses = ['Ceballos', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos']
total_ceballos = sum([1 for i in survey_responses if i == 'Ceballos'])
print(total_ceballos)
percentage_ceballos = total_ceballos*100/len(survey_responses)
print(percentage_ceballos)
possible_surveys = np.random.binomial(
len(survey_responses), 0.54, 10000) / float(len(survey_responses))
plt.hist(possible_surveys, range=(0, 1), bins=20)
plt.show()
ceballos_loss_surveys = np.mean(possible_surveys < 0.5)
print(ceballos_loss_surveys)
large_survey = np.random.binomial(7000, 0.54, 10000) / float(7000)
ceballos_loss_new = np.mean(large_survey < 0.5)
print(ceballos_loss_new)
| [
"dr.jarosi@yahoo.com"
] | dr.jarosi@yahoo.com |
a98ee3453af8e367bb94991bb6722e190e0aab83 | 604c7b40f58830c16c51b4514765a6c1915769c4 | /bnop_source/b_code/core/object_model/bnop_repositories.py | e07a5d0d940ed0673114233db8b6b95c9beac9aa | [
"MIT"
] | permissive | boro-alpha/bnop | 2e3a0654ddf73dce357928d399853c8d0fc936e7 | ae80ce88f12f3b9d509f416aea4f19dc20f1081b | refs/heads/master | 2023-06-11T17:34:42.743589 | 2021-06-30T07:24:51 | 2021-06-30T07:24:51 | 381,096,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | class BnopRepositories(object):
def __init__(
self,
uuid):
self.uuid = \
uuid
| [
"xibertao@borogroup.co.uk"
] | xibertao@borogroup.co.uk |
15a79fad6d620b138d00e6839c6708d9b004afea | 8ddc6dfb69ba997071f5aa8e96e1434c4259c95b | /qt7.py | 419711774f19347423ce18c6ed16581d4f5562a1 | [] | no_license | xiger78/python | 74dcd728de8b7e72cb74ed46ba96c4cd7dc01d84 | 9657be5c4c9599a36f557edf626acfb7ab617512 | refs/heads/master | 2020-06-30T15:06:57.436112 | 2019-08-06T14:23:14 | 2019-08-06T14:45:02 | 182,071,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,368 | py | import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QGroupBox, QRadioButton, QCheckBox, QPushButton, QMenu, QGridLayout, QVBoxLayout)
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
grid = QGridLayout()
grid.addWidget(self.createFirstExclusiveGroup(), 0, 0)
grid.addWidget(self.createSecondExclusiveGroup(), 1, 0)
grid.addWidget(self.createNonExclusiveGroup(), 0, 1)
grid.addWidget(self.createPushButtonGroup(), 1, 1)
self.setLayout(grid)
self.setWindowTitle('Box Layout')
self.setGeometry(300, 300, 480, 320)
self.show()
def createFirstExclusiveGroup(self):
groupbox = QGroupBox('Exclusive Radio Buttons')
radio1 = QRadioButton('Radio1')
radio2 = QRadioButton('Radio2')
radio3 = QRadioButton('Radio3')
radio1.setChecked(True)
vbox = QVBoxLayout()
vbox.addWidget(radio1)
vbox.addWidget(radio2)
vbox.addWidget(radio3)
groupbox.setLayout(vbox)
return groupbox
def createSecondExclusiveGroup(self):
groupbox = QGroupBox('Exclusive Radio Buttons')
groupbox.setCheckable(True)
groupbox.setChecked(False)
radio1 = QRadioButton('Radio1')
radio2 = QRadioButton('Radio2')
radio3 = QRadioButton('Radio3')
radio1.setChecked(True)
checkbox = QCheckBox('Independent Checkbox')
checkbox.setChecked(True)
vbox = QVBoxLayout()
vbox.addWidget(radio1)
vbox.addWidget(radio2)
vbox.addWidget(radio3)
vbox.addWidget(checkbox)
vbox.addStretch(1)
groupbox.setLayout(vbox)
return groupbox
def createNonExclusiveGroup(self):
groupbox = QGroupBox('Non-Exclusive Checkboxes')
groupbox.setFlat(True)
checkbox1 = QCheckBox('Checkbox1')
checkbox2 = QCheckBox('Checkbox2')
checkbox2.setChecked(True)
tristatebox = QCheckBox('Tri-state Button')
tristatebox.setTristate(True)
vbox = QVBoxLayout()
vbox.addWidget(checkbox1)
vbox.addWidget(checkbox2)
vbox.addWidget(tristatebox)
vbox.addStretch(1)
groupbox.setLayout(vbox)
return groupbox
def createPushButtonGroup(self):
groupbox = QGroupBox('Push Buttons')
groupbox.setCheckable(True)
groupbox.setChecked(True)
pushbutton = QPushButton('Normal Button')
togglebutton = QPushButton('Toggle Button')
togglebutton.setCheckable(True)
togglebutton.setChecked(True)
flatbutton = QPushButton('Flat Button')
flatbutton.setFlat(True)
popupbutton = QPushButton('Popup Button')
menu = QMenu(self)
menu.addAction('First Item')
menu.addAction('Second Item')
menu.addAction('Third Item')
menu.addAction('Fourth Item')
popupbutton.setMenu(menu)
vbox = QVBoxLayout()
vbox.addWidget(pushbutton)
vbox.addWidget(togglebutton)
vbox.addWidget(flatbutton)
vbox.addWidget(popupbutton)
vbox.addStretch(1)
groupbox.setLayout(vbox)
return groupbox
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
| [
"xiger78@gmail.com"
] | xiger78@gmail.com |
a2ae0b22ecd07cda1c140e4cca4adef0b164eee2 | bc3346620897273bd6f7252cefb81e6ad32130f6 | /ksx1026/constants.py | 2d0c6f4958bd16e6e2a732e2852d4207d4a60b13 | [
"MIT"
] | permissive | Pusnow/KS-X-1026-Python | 8b5e4f1a46c99f895dfd520a6495e9a70b33cb62 | 6497dc84e8ff185c6ad62471c8ded37a33b022b3 | refs/heads/master | 2022-12-07T19:26:50.107573 | 2022-11-24T01:59:09 | 2022-11-24T01:59:09 | 66,271,082 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,476 | py | # -*- coding: utf-8 -*-
"""
====================================
Constants for KS X 1026-1
====================================
.. moduleauthor:: Wonsup Yoon <pusnow@me.com>
All constants for KS X 1026-1.
Reference
============
* http://www.unicode.org/L2/L2008/08225-n3422.pdf
"""
from __future__ import unicode_literals
SBase = 0xAC00
LBase = 0x1100
VBase = 0x1161
TBase = 0x11A7
LCount = 19
VCount = 21
TCount = 28
TCountAll = 83
NCount = VCount * TCount # 588
SCount = LCount * NCount # 11172
# a transformation table from Hangul Compatibility Letters(0x3131 - 0x318E)
# to Johab Hangul Letters(0x1100 – 0x11FF)
CPJAMO = [
0x1100,
0x1101,
0x11AA,
0x1102,
0x11AC,
0x11AD,
0x1103,
0x1104,
0x1105,
0x11B0,
0x11B1,
0x11B2,
0x11B3,
0x11B4,
0x11B5,
0x111A,
0x1106,
0x1107,
0x1108,
0x1121,
0x1109,
0x110A,
0x110B,
0x110C,
0x110D,
0x110E,
0x110F,
0x1110,
0x1111,
0x1112,
0x1161,
0x1162,
0x1163,
0x1164,
0x1165,
0x1166,
0x1167,
0x1168,
0x1169,
0x116A,
0x116B,
0x116C,
0x116D,
0x116E,
0x116F,
0x1170,
0x1171,
0x1172,
0x1173,
0x1174,
0x1175,
0x1160,
0x1114,
0x1115,
0x11C7,
0x11C8,
0x11CC,
0x11CE,
0x11D3,
0x11D7,
0x11D9,
0x111C,
0x11DD,
0x11DF,
0x111D,
0x111E,
0x1120,
0x1122,
0x1123,
0x1127,
0x1129,
0x112B,
0x112C,
0x112D,
0x112E,
0x112F,
0x1132,
0x1136,
0x1140,
0x1147,
0x114C,
0x11F1,
0x11F2,
0x1157,
0x1158,
0x1159,
0x1184,
0x1185,
0x1188,
0x1191,
0x1192,
0x1194,
0x119E,
0x11A1,
]
# a transformation table from Halfwidth Hangul Letters(0xFFA0 - 0xFFDF)
# to Johab Hangul Letters(0x1100 – 0x11FF)
HWJAMO = [
0x1160,
0x1100,
0x1101,
0x11AA,
0x1102,
0x11AC,
0x11AD,
0x1103,
0x1104,
0x1105,
0x11B0,
0x11B1,
0x11B2,
0x11B3,
0x11B4,
0x11B5,
0x111A,
0x1106,
0x1107,
0x1108,
0x1121,
0x1109,
0x110A,
0x110B,
0x110C,
0x110D,
0x110E,
0x110F,
0x1110,
0x1111,
0x1112,
0xFFBF,
0xFFC0,
0xFFC1,
0x1161,
0x1162,
0x1163,
0x1164,
0x1165,
0x1166,
0xFFC8,
0xFFC9,
0x1167,
0x1168,
0x1169,
0x116A,
0x116B,
0x116C,
0xFFD0,
0xFFD1,
0x116D,
0x116E,
0x116F,
0x1170,
0x1171,
0x1172,
0xFFD8,
0xFFD9,
0x1173,
0x1174,
0x1175,
0xFFDD,
0xFFDE,
0xFFDF,
]
# a transformation table from Hangul - embedded Letters(0x3200 - 0x320D, 0x3260 - 0x326D)
# to Johab Hangul Letters(0x1100 – 0x11FF)
PCJAMO = [
0x1100,
0x1102,
0x1103,
0x1105,
0x1106,
0x1107,
0x1109,
0x110B,
0x110C,
0x110E,
0x110F,
0x1110,
0x1111,
0x1112,
]
# a transformation of Parenthesized Hangul Letters and syllable blocks(0x3200 - 0x321C)
# to Johab Hangul Letters(0x1100 – 0x11FF) or Wanseong Hangul syllable
# blocks(0xAC00 - 0xD7A3)
PACHAR = [
0x1100,
0x1102,
0x1103,
0x1105,
0x1106,
0x1107,
0x1109,
0x110B,
0x110C,
0x110E,
0x110F,
0x1110,
0x1111,
0x1112,
0xAC00,
0xB098,
0xB2E4,
0xB77C,
0xB9C8,
0xBC14,
0xC0AC,
0xC544,
0xC790,
0xCC28,
0xCE74,
0xD0C0,
0xD30C,
0xD558,
0xC8FC,
0x321D,
0x321E,
0x321F,
]
# a transformation of Circled Hangul Letters and Syllable Blocks(0x3260 - 0x327B, 0x327E)
# to Johab Hangul Letters(0x1100 – 0x11FF) or Wanseong Hangul syllable
# blocks(0xAC00 - 0xD7A3)
CLCHAR = [
0x1100,
0x1102,
0x1103,
0x1105,
0x1106,
0x1107,
0x1109,
0x110B,
0x110C,
0x110E,
0x110F,
0x1110,
0x1111,
0x1112,
0xAC00,
0xB098,
0xB2E4,
0xB77C,
0xB9C8,
0xBC14,
0xC0AC,
0xC544,
0xC790,
0xCC28,
0xCE74,
0xD0C0,
0xD30C,
0xD558,
0x327C,
0x327D,
0xCB60,
0x326F,
]
# The order values for Johab Hangul Letters 0x1100 - 0x11FF
INDEX1100 = [
1,
2,
12,
24,
26,
36,
70,
86,
93,
109,
118,
138,
161,
165,
171,
176,
177,
179,
185,
13,
14,
15,
17,
25,
41,
45,
66,
69,
77,
85,
87,
88,
89,
94,
95,
96,
97,
98,
99,
101,
102,
104,
105,
107,
108,
110,
111,
112,
113,
114,
115,
116,
122,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
139,
140,
142,
143,
144,
145,
146,
147,
148,
149,
150,
152,
164,
167,
168,
169,
170,
172,
173,
174,
175,
180,
184,
191,
192,
4,
18,
20,
23,
28,
194,
0,
1,
5,
6,
10,
11,
15,
16,
20,
21,
22,
23,
33,
34,
43,
46,
48,
52,
54,
64,
71,
73,
2,
3,
7,
8,
12,
13,
14,
18,
19,
26,
27,
29,
30,
32,
37,
38,
40,
41,
42,
44,
45,
47,
50,
51,
55,
57,
58,
59,
60,
62,
63,
69,
70,
72,
74,
75,
80,
83,
85,
87,
88,
90,
92,
93,
94,
4,
9,
17,
24,
25,
1,
2,
7,
12,
20,
23,
24,
36,
37,
47,
51,
58,
64,
65,
66,
70,
86,
94,
109,
118,
138,
161,
171,
176,
177,
179,
185,
5,
8,
13,
15,
18,
19,
22,
25,
28,
39,
41,
42,
44,
45,
48,
49,
54,
56,
57,
59,
60,
63,
67,
71,
75,
77,
79,
80,
81,
83,
84,
85,
90,
105,
106,
107,
110,
112,
113,
115,
135,
153,
154,
158,
159,
152,
156,
157,
180,
184,
186,
187,
188,
189,
192,
3,
6,
9,
10,
11,
14,
]
# The order values for Johab Hangul Syllable-Initial Letters 0xA960 - 0xA97C
INDEXA960 = [
29,
30,
31,
33,
37,
38,
42,
43,
47,
51,
53,
57,
58,
62,
63,
71,
74,
79,
100,
103,
106,
121,
141,
151,
166,
178,
183,
190,
193,
]
# The order values for Johab Hangul Syllable-Peak Letters 0xD7B0 - 0xD7C6
INDEXD7B0 = [
28,
31,
35,
36,
39,
49,
53,
56,
61,
65,
66,
67,
68,
76,
77,
78,
79,
81,
82,
84,
86,
89,
91,
]
# The order values for Johab Hangul Syllable-Final Letters 0xD7CB - 0xD7FB
INDEXD7CB = [
16,
21,
26,
27,
30,
31,
32,
33,
34,
35,
38,
40,
46,
50,
52,
55,
61,
68,
69,
72,
73,
76,
78,
82,
89,
91,
92,
93,
96,
101,
102,
114,
117,
119,
120,
123,
125,
126,
128,
130,
136,
137,
155,
160,
162,
163,
165,
181,
182,
]
| [
"pusnow@yonsei.ac.kr"
] | pusnow@yonsei.ac.kr |
5f450882399c1537153efb6d553b14f0575cd1de | 8804f70749e13604d01b71c3d3840c9573dc84a1 | /src/course/migrations/0011_lecture_belongs_to.py | c03e68e25856cf11b6589fb8345ef5e474897f71 | [] | no_license | hamada-kamal/students-management-system | 53e1c7b681ae9081770df4546d11eb5c6519ab61 | a745cb2cd116a84323a95a804442a31cabafeb71 | refs/heads/master | 2023-06-29T23:46:56.875830 | 2021-08-02T22:26:23 | 2021-08-02T22:26:23 | 392,110,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 3.2.5 on 2021-07-08 12:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('course', '0010_lecture'),
]
operations = [
migrations.AddField(
model_name='lecture',
name='belongs_to',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='course.subject'),
preserve_default=False,
),
]
| [
"hamadakamal819gmail.com"
] | hamadakamal819gmail.com |
a02de45e9c96d2815075582d0cdd03da14cd9495 | 63516bf6fb88c77666c6d4942a85e3e15f34bc7f | /blog/migrations/0001_initial.py | 20b4c07279af357e94596a204b972f2dd2758220 | [] | no_license | erastusnzula/Django | 13e3740e445427585e05c9edb85a44dc54f5053b | 957a3c4cbc1ec8f436d0957cfa683bdd666a2b88 | refs/heads/master | 2023-07-16T14:12:33.671864 | 2021-08-31T10:10:05 | 2021-08-31T10:10:05 | 401,649,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # Generated by Django 3.2.4 on 2021-07-01 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Blog Category',
'verbose_name_plural': 'Blog Category',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Date Published')),
('last_modified', models.DateTimeField(auto_now_add=True, verbose_name='Date Updated')),
('categories', models.ManyToManyField(related_name='posts', to='blog.Category')),
],
),
]
| [
"nzulaerastus@gmail.com"
] | nzulaerastus@gmail.com |
e624ee43d05f02fa4c7d7f5af15c2102132688c5 | 630d7cbef6488b3557b5461f4c1c56e270b91667 | /TEBD/mpstest6.py | 57440230562e8048f5effe5170aae4a706714bca | [
"MIT"
] | permissive | ehua7365/RibbonOperators | 341eabf548b18262be8d9bd4c199a3d80b97a273 | 6b60eb3a6e0246b457e6d6e89ea2e01ef265746f | refs/heads/master | 2021-01-10T19:09:55.446361 | 2014-11-04T04:41:23 | 2014-11-04T04:41:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,594 | py | """
mpstest6.py
A test of manipulating matrix product states with numpy.
2014-08-25
"""
import numpy as np
import matplotlib.pyplot as plt
from cmath import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def main():
#test1()
#test2()
test3()
#test4()
#test5()
def test1():
print("*** MPS tests started ***")
(N,chi,d) = (7,10,2)
A = randomMPS(N,chi,d)
state = getState(A)
state = state/np.sqrt(np.dot(np.conj(state),state))
prod = np.dot(np.conj(state),state)
approxA = getMPS(state,2)
approxState = getState(approxA)
approxProd = np.dot(np.conj(approxState),approxState)
relErr = approxProd/prod - 1
S = entropy(state)
print("State total %d elements"%state.size)
print("MPS total %d elements"%A.size)
print("(N,chi,d) = (%d,%d,%d)"%(N,chi,d))
print("Expected: (%f,%f)"%polar(prod))
print("SVD: (%f,%f)"%polar(innerProduct(approxA,approxA)))
print("Product: (%f,%f)"%polar(approxProd))
print("Relative error: %f"%np.absolute(relErr))
print("Entropy: %f"%S)
print("")
# state = np.ones(d**N)/np.sqrt(2)**N
# state = np.zeros(2**10)
# state[0] = 1/np.sqrt(2)
# state[-1] = 1/np.sqrt(2)
state = np.random.rand(d**N)
state = state/np.linalg.norm(state)
mps = getMPS(state,4)
print("Expected: (%f,%f)"%polar(np.inner(state,state)))
print("MPS: (%f,%f)"%polar(innerProduct(mps,mps)))
print("*** MPS tests finished ***\n")
def test2():
print("*** Started testing MPS approximation ***")
(N,chi,d) = (5,3,2)
A = randomMPS(N,chi,d)
a = getState(A)
for newChi in xrange(1,12):
newA = getMPS(a,newChi)
print(fidelityMPS(A,newA))
newa = getState(newA)
print(fidelity(a,newa))
print(fidelity(a,a))
print(fidelityMPS(A,A))
print("*** Finished testing MPS approximation ***")
def test3():
print("*** Started testing MPS ***")
N = 5
d = 2
X = []
Y = []
Z = []
for chi0 in xrange(1,8):
for chi1 in xrange(1,8):
F = 0
for i in xrange(20):
mps = randomMPS(N,chi0,d)
state = getState(mps)
newmps = getMPS(state,chi1)
state1 = getState(newmps)
F += fidelityMPS(mps,newmps)
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def test4():
print("*** Started testing fidelity ***")
d = 2
N = 5
for i in xrange(10):
mpsa = randomMPS(N,5,d)
a = getState(mpsa)
mpsb = getMPS(a,2)
b = getState(mpsb)
print(fidelity(a,b))
print(fidelityMPS(mpsa,mpsb))
print("*** Finished testing fidelity ***")
def test5():
print("*** Started testing MPS ***")
N = 5
d = 2
X = []
Y = []
Z = []
for chi0 in xrange(1,8):
for chi1 in xrange(1,8):
F = 0
for i in xrange(5):
mps = randomMPS(N,chi0,d)
state0 = getState(mps)
newmps = getMPS(state0,chi1)
state1 = getState(newmps)
F += fidelity(state0,state1)
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def closeness(a,b):
return np.inner(np.conj(a),a)-np.inner(np.conj(b),b)
def correlation(A,B):
return innerProduct(A,B)*innerProduct(B,A)/innerProduct(A,A)/innerProduct(B,B)
def fidelityMPS(A,B):
""" Fidelity of two MPS """
return innerProduct(A,B)*innerProduct(B,A)\
/innerProduct(A,A)/innerProduct(B,B)
def fidelity(a,b):
""" Fidelity of two states """
return np.inner(np.conj(a),b)*np.inner(np.conj(b),a)\
/np.inner(np.conj(a),a)/np.inner(np.conj(b),b)
def randomMPS(N,chi,d):
""" Returns a random MPS given parameters N, chi, d."""
A = []
for i in xrange(N):
A.append((np.random.rand(chi,d,chi)-.5)+1j*(np.random.rand(chi,d,chi)-.5))
#A.append(np.random.rand(chi,d,chi))
return np.array(A)
def bellState():
return np.array([1,0,0,1],dtype=complex)/np.sqrt(2)
def getState(A):
""" State vector of a MPS."""
N = len(A)
chi = A[0].shape[0]
d = A[0].shape[1]
c = A[0]
for i in xrange(1,N):
c = np.tensordot(c,A[i],axes=(-1,0))
c = np.trace(c,axis1=0,axis2=-1)
return np.reshape(c,d**N)
def getMPS(state,chi):
""" MPS of a state."""
d = 2 # Qubits have 2 states each
N = int(np.log2(len(state))) # Number of qubits
c = np.reshape(state,cShape(d,N)) # State amplitudes tensor.
A = [] # List of N matrices of MPS, each of shape (chi,d,chi)
# Start left end with a vector of size (d,chi)
c = np.reshape(c,(d,d**(N-1))) # Reshape c
(ap,sv,c) = np.linalg.svd(c) # Apply SVD
s = np.zeros((d,chi),dtype=complex) # Construct shape of singular value matrix s
s[:d,:d] = np.diag(sv[:chi]) # Fill s with singular values
# Trim c or fill rest of c with zeros
newc = np.zeros((chi,d**(N-1)),dtype=complex)
newc[:min(chi,d**(N-1)),:] = c[:chi,:]
c = newc
A.append(np.tensordot(ap,s,axes=(-1,0))) # Contract and append to A
# Sweep through the middle, creating matrix products each with
# shape (chi,d,chi)
for i in xrange(1,N-2):
c = np.reshape(c,(d*chi,d**(N-i-1)))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((d*chi,chi),dtype=complex)
s[:min(chi,len(sv)),:min(chi,len(sv))] = np.diag(sv[:chi])
A.append(np.reshape(np.dot(ap,s),(chi,d,chi)))
newc = np.zeros((chi,d**(N-i-1)),dtype=complex)
newc[:min(chi,len(sv)),:] = c[:chi,:]
c = newc
# Finish right end with the remaining vector
c = np.reshape(c,(d*chi,d))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((chi,d),dtype=complex)
s[:d,:d] = np.diag(sv[:chi])
A.append(np.reshape(ap[:chi,:],(chi,d,chi)))
c = np.dot(s,c)
A.append(c)
# Fix up ends by filling first row of correctly shaped zeros with
# end vectors such that the trace is preserved.
start = np.zeros((chi,d,chi),dtype=complex)
start[0,:,:] = A[0]
A[0] = start
finish = np.zeros((chi,d,chi),dtype=complex)
finish[:,:,0] = A[-1]
A[-1] = finish
# Return MPS as numpy array with shape (N,chi,d,chi)
return np.array(A)
def innerProduct(A,B):
""" Inner product <A|B> using transfer matrices."""
N = len(A)
chiA = A.shape[1]
chiB = B.shape[1]
d = A.shape[2]
# Take adjoint of |A> to get <A|
A = np.conj(A)
# Construct list of transfer matrices by contracting pairs of
# tensors from A and B.
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],B[i],axes=(1,1))
t = np.transpose(t,axes=(0,2,1,3))
t = np.reshape(t,(chiA*chiB,chiA*chiB))
transfer.append(t)
# Contract the transfer matrices.
prod = transfer[0]
for i in xrange(1,len(transfer)):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def operatorInner(A,U,B):
""" Compute <A|U|B> where A,B are MPS and U is a MPO."""
N = len(A)
d = A.shape[2]
chiA = A.shape[1]
chiB = B.shape[1]
chiU = U.shape[1]
# Take complex conjugate of elements in A to get <A|
A = np.conj(A)
# Construct list of transfer matrices
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],U[i],axes=(1,1))
t = np.tensordot(t,B[i],axes=(3,1))
t = np.reshape(t,(chiA*chiA*d,chiB*chiB*d))
transfer.append(t)
# Take product of transfer matrices
prod = transfer[0]
for i in xrange(1,N):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def getOperator(mpo):
""" Contract MPO into matrix representation."""
N = len(A)
d = mpo.shape[2]
chi = mpo.shape[1]
prod = mpo[0]
for i in xrange(1,N):
prod = np.tensordot(prod,mpo[i],axes=(-1,0))
prod = np.trace(prod,axis1=0,axis2=-1)
permutation = tuple(range(0,2*N,2) + range(1,2*N,2))
prod = np.transpose(prod,perutation)
return np.reshape(prod,(d**N,d**N))
def getMPO(U,chi):
""" Returns MPO of operator U."""
d = 2
N = int(np.log2(U.shape[0]))
mpo = []
c = np.reshape(U,tuple([i for i in xrange(2*N)]))
permutation = []
for i in xrange(N):
permutation.append(i)
permutation.append(i+N)
c = np.transpose(U,tuple(permutation))
c = np.reshape(c,(d**2,d**(2*(N-1))))
[up,sv,c] = np.linalg.svd(c)
return 0
def randomState(d,N):
state = (np.random.rand(d**N)-.5) + (np.random.rand(d**N)-.5)*1j
state = state/np.linalg.norm(state)
return state
def equalDist(N):
""" Returns state with equal amplitudes."""
return np.ones(cShape(2,N))/np.sqrt(2)**N
def ghz(N):
c = np.zeros(2**N)
c[0] = 1/np.sqrt(2)
c[-1] = 1/np.sqrt(2)
return np.reshape(c,cShape(2,N))
def Z(N):
sz = np.array([[1,0],[0,-1]])
z = np.identity(2)
for i in xrange(N):
z = np.kron(z,sz)
return z
def tp(factors):
""" Returns tensor product of list of matrices."""
prod = factors[0]
for i in xrange(1,len(factors)):
prod = np.kron(prod,factors)
return prod
def cShape(d,N):
""" Returns the shape of c tensor representation."""
return tuple([d for i in xrange(N)])
def densityMatrix(state):
p = np.absolute(state)
rho = np.outer(p,p)
## print np.linalg.det(rho)
return rho
def entropy(state):
""" Von Neumann Entropy of pure state by SVD. """
c = np.reshape(state,(2,np.size(state)/2))
d = np.linalg.svd(c)[1]
p = np.abs(d)**2
S = 0
for x in p:
if x != 0:
S += x*np.log(x)
return -S
def matFunction(f,A):
""" Function of a matrix. """
(D,P) = np.linalg.eig(A)
return np.dot(P,np.dot(np.diag(f(D)),np.linalg.inv(P)))
if __name__ == "__main__":
main()
| [
"ehua7365@uni.sydney.edu.au"
] | ehua7365@uni.sydney.edu.au |
b29e3ade3cd2a2c6bf345197fde1325fe063977b | 6660cb47a8e86495e27f33ad4ea07a91245e3cd5 | /main.py | 87754e243ca7e87e31f3779a544cfe7eb45cd076 | [] | no_license | LADYHR/CapsNet_for_ADNI | 1a981177b8b2229f9f524e070ff8fd92190f3417 | 7d5851baace8434e5123fb03964f775b992bf19a | refs/heads/master | 2021-04-12T08:11:05.114182 | 2018-05-20T13:07:16 | 2018-05-20T13:07:16 | 126,023,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | '''
=====================================================
=====================================================
Copyright (c) 2018,LADYHR
All rights reserved
FileName: main.py
Abstract: This is a main program. Aimed at using
Capsule Network to design a classifier for AD/MCI/NC.
=====================================================
=====================================================
'''
#!user/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from utils import get_batch_data
from config import cfg
from CapsNet import CapsNet
tf.logging.set_verbosity(tf.logging.INFO)
def save_to():
if not os.path.exists(cfg.results):
os.mkdir(cfg.results)
if cfg.is_training:
loss = cfg.results + '/loss.csv'
train_acc = cfg.results + '/train_acc.csv'
val_acc = cfg.results + '/val_acc.csv'
if os.path.exists(val_acc):
os.remove(val_acc)
if os.path.exists(loss):
os.remove(loss)
if os.path.exists(train_acc):
os.remove(train_acc)
fd_train_acc = open(train_acc, 'w')
fd_train_acc.write('step,train_acc\n')
fd_loss = open(loss, 'w')
fd_loss.write('step,loss\n')
fd_val_acc = open(val_acc, 'w')
fd_val_acc.write('step,val_acc\n')
return fd_train_acc, fd_loss, fd_val_acc
else:
test_acc = cfg.results + '/test_acc.csv'
if os.path.exists(test_acc):
os.remove(test_acc)
fd_test_acc = open(test_acc, 'w')
fd_test_acc.write('test_acc\n')
return fd_test_acc
def train(model, supervisor):
fd_train_acc, fd_loss, fd_val_acc = save_to()
config = tf.ConfigProto() # 用于创建session的时候对session进行参数配置
config.gpu_options.allow_growth = True # 刚开始配置少量GPU内存,然后按需慢慢增加(不会释放内存,会导致碎片)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
with supervisor.managed_session(config=config) as sess:
print("\nNote: all of results will be saved to directory: " + cfg.results)
for epoch in range(cfg.epoch):
print('Training for epoch ' + str(epoch) + '/' + str(cfg.epoch) + ':')
if supervisor.should_stop():
print('supervisor stoped!')
break
for step in tqdm(range(cfg.num_tr_batch), total=cfg.num_tr_batch, ncols=70, leave=False, unit='b'):
global global_step
global_step = epoch * cfg.num_tr_batch + step
if global_step % cfg.train_sum_freq == 0: # 每1个mini-batch进行一次total_loss、accuracy、train_summary的记录
_, loss, train_acc, summary_str = sess.run(
[model.train_op, model.margin_loss, model.accuracy, model.train_summary])
assert not np.isnan(loss), 'Something wrong! loss is nan...'
supervisor.summary_writer.add_summary(summary_str, global_step)
fd_loss.write(str(global_step) + ',' + str(loss) + "\n")
fd_loss.flush() # 刷新缓冲区
fd_train_acc.write(str(global_step) + ',' + str(train_acc / cfg.batch_size) + "\n")
fd_train_acc.flush()
else:
sess.run(model.train_op) # 每个mini-batch进行一次模型的优化
if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0: # 每2个mini-batch进行一次验证
val_acc = 0
for i in range(cfg.num_val_batch):
img, label = sess.run([model.val_img, model.val_label])
acc = sess.run(model.accuracy, {model.X: img,
model.labels: label}) # feed_dict用来临时替换掉一个op的输出结果
val_acc += acc
val_acc = val_acc / (cfg.batch_size * cfg.num_val_batch)
fd_val_acc.write(str(global_step) + ',' + str(val_acc) + '\n')
fd_val_acc.flush()
if (epoch + 1) % cfg.save_freq == 0:
supervisor.saver.save(sess, cfg.logdir + '/model_epoch_%04d_step_%02d' % (epoch, global_step))
# 如果没有saver,模型不会自动保存
fd_val_acc.close()
fd_train_acc.close()
fd_loss.close()
def evaluation(model, supervisor):
fd_test_acc = save_to()
with supervisor.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir)) # 重新导入模型
tf.logging.info('Model restored!')
test_acc = 0
for i in tqdm(range(cfg.num_te_batch), total=cfg.num_te_batch, ncols=70, leave=False, unit='b'):
img, label = sess.run([model.te_img, model.te_label])
acc = sess.run(model.accuracy, {model.X: img, model.labels: label})
test_acc += acc
test_acc = test_acc / (cfg.batch_size * cfg.num_te_batch)
fd_test_acc.write(str(test_acc))
fd_test_acc.close()
print('Test accuracy has been saved to ' + cfg.results + '/test_accuracy.txt')
def main(_):
tf.logging.info('Loading Graph...')
model = CapsNet()
tf.logging.info('Graph loaded!')
sv = tf.train.Supervisor(graph=model.graph, logdir=cfg.logdir, save_model_secs=0)
if cfg.is_training:
tf.logging.info('Start Training...')
train(model, sv)
tf.logging.info('Train done!')
else:
evaluation(model, sv)
tf.logging.info('Test done!')
print("Main programming finished!")
if __name__ == "__main__":
tf.app.run()
| [
"ladyhr@outlook.com"
] | ladyhr@outlook.com |
65876a1c89f8f78806ac85f76004766b167432d2 | e73bd3bd40a58aed0e00fcfc3494180b4e49c7fb | /BossZhiPin/spider/spider_boss.py | 86f4e7d102311b1a0b532a85ea9b6972cb978eb8 | [] | no_license | XXO47OXX/BossSpider | 980b6f1d55adb9f1dc0c9dad6b186255238c0a0e | 4127531a42f2c6c83a3c7980cca95200154c9c61 | refs/heads/master | 2023-03-22T07:59:24.681017 | 2020-06-03T12:34:03 | 2020-06-03T12:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,938 | py | import requests
import re
from lxml import etree
# from multiprocessing.dummy import Pool as ThreadPool
# from spider_boss.getCookie import GetCookie
from db.MysqlSave import Save2Mysql
from getCookie import GetCookie
from settings import KEYWORD
class JobSpider(object):
def __init__(self):
self.mysql = Save2Mysql()
self.mysql.add_table()
self.session = requests.Session()
self.get_cookie = GetCookie()
self.session.headers.update({
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
'cookie': f'__zp_stoken__={self.get_cookie.get_cookie()}'
})
def start_requests(self):
cities = self.get_city_urls().keys()
print(cities)
params = "?query={}&page=1&ka=page-1".format(KEYWORD)
num = ''
try:
with open('spider/break_point.ini', 'r', encoding='utf-8') as f:
num = f.read()
except FileNotFoundError:
num = 0
finally:
print(num)
cities = list(cities)[int(num):]
print(cities)
for index, city in enumerate(cities):
url = 'https://www.zhipin.com/c{}/{}'.format(city, params)
print(url)
try:
self.parse(url)
except Exception as e:
print(e.args)
with open('spider/break_point.ini', 'w', encoding='utf-8') as f:
f.write(str(index))
@staticmethod
def get_city_urls():
resp = requests.get('https://www.zhipin.com/wapi/zpCommon/data/cityGroup.json')
a = resp.json()
city_group = a.get('zpData').get('cityGroup')
data = {}
for char in city_group:
for city in char.get('cityList'):
code = city.get('code')
name = city.get('name')
data[str(code)] = name
return data
def parse(self, url):
resp = self.session.get(url).content.decode('utf-8')
# with open('001.html', 'w', encoding='utf-8') as f:
# f.write(resp)
response = etree.HTML(resp)
num = int(re.findall('page=(\d+)', url)[0])
try:
response.xpath('//title/text()')[0]
except IndexError:
self.parse(url)
else:
if response.xpath('//title/text()')[0] != '请稍后':
try:
response.xpath('//div[@class="job-primary"]')[0]
except IndexError as e:
self.session.headers.update({
'cookie': f'__zp_stoken__={self.get_cookie.get_cookie()}'
})
return
else:
item = {}
job_info_divs = response.xpath('//div[@class="job-primary"]')
for div in job_info_divs:
item['city_id'] = url.split('/')[-2].replace('c', '')
item['job_id'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-title"]/span[@class="job-name"]/a/@data-jobid')[0]
item['job_name'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-title"]/span[@class="job-name"]/a/text()')[0]
item['job_area'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-title"]/span[@class="job-area-wrapper"]/'
'span[@class="job-area"]/text()')[0]
item['job_salary'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/span[@class="red"]/'
'text()')[0]
try:
item['job_exe'], item['job_edu'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/p/text()')
except ValueError:
item['job_exe'] = ' | '.join(div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/p/text()')[:-1])
item['job_edu'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/p/text()')[-1]
item['job_tags'] = ' | '.join(div.xpath('div[@class="info-append clearfix"]/div[@class="tags"]/span[@class="tag-item"]/text()'))
item['job_welfare'] = ''.join(div.xpath('div[@class="info-append clearfix"]/div[@class="info-desc"]/text()'))
try:
item['contact'], item['position'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/div[@class="info-publis"]/h3[@class="name"]/text()')
except ValueError:
item['contact'] = div.xpath('div[@class="i'
'nfo-primary"]/div[@class="primary-wrapper"]/div[@class="prima'
'ry-box"]/div[@class="job-limit clearfix"]/div[@class="info-publis"]/h3[@class="name"]/text()')[0]
item['position'] = ''
item['company_name'] = div.xpath('div[@class="info-primary"]/div[@class="info-company"]/div[@class="company-text"]/h3[@class="name"]/a/text()')[0]
item['company_industry'] = div.xpath('div[@class="info-primary"]/div[@class="info-company"]/div[@class="company-text"]/p/a/text()')[0]
try:
item['company_natural'], item['company_size'] = div.xpath('div[@class="info-primary"]/div[@class="info-company"]/div[@class="company-text"]/p/text()')
except ValueError as e:
print(e)
item['company_natural'] = ''
item['company_size'] = div.xpath('div[@class="info-primary"]/div[@class="info-company"]/div[@class="company-text"]/p/text()')[0]
self.mysql.insert(item)
print('插入成功', item)
num += 1
host, params = url.split('?')
params = re.sub('(\d+)', str(num), params)
next_url = host + '?' + params
self.parse(next_url)
else:
print('加载cookie重新下载')
self.session.headers.update({
'cookie': f'__zp_stoken__={self.get_cookie.get_cookie()}'
})
self.parse(url)
def run(self):
self.start_requests()
if __name__ == '__main__':
job_spider = JobSpider()
job_spider.run() | [
"pli@kaikeba.com"
] | pli@kaikeba.com |
0302ab3ae4300e2c98fc2193b302f23a15365277 | d8a14050d2e294eeb1bae3500c001d25ec10b9e7 | /django_rok/ssh_tunnel.py | f8e97c4962d1f91a694d9e59d3450e92a37d9497 | [
"MIT"
] | permissive | ramesh960386/django-rok | 6e7ee93f56910c798b560ffbd309b1a717ec0356 | 5edb21b7c39904fdcd3c90410e43a34c3f9f70b1 | refs/heads/master | 2021-09-18T19:14:24.744063 | 2018-07-18T08:09:27 | 2018-07-18T08:09:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | import paramiko
import sys
from django_rok.re_forward_port import reverse_forward_tunnel
from django_rok.util import bcolors
ssh_port = 22
localhost = '127.0.0.1'
def create_ssh_tunnel(localport, remote_host, remote_port, username, password=None, pkey=None):
transport = paramiko.Transport((remote_host, ssh_port))
transport.connect(hostkey=None,
username=username,
password=password,
pkey=pkey)
reverse_tunnel_url = 'http://' + str(remote_host) + ':' + str(remote_port)
initalizing_info = bcolors.OKGREEN + 'Starting Reverse Tunnel at ' + reverse_tunnel_url + bcolors.ENDC
try:
print(initalizing_info)
reverse_forward_tunnel(remote_port, localhost, localport, transport)
except KeyboardInterrupt:
print('Ssh Tunelling Stopped')
sys.exit(0)
| [
"ankurj630@gmail.com"
] | ankurj630@gmail.com |
4b69df22370439b056815649a17bf08a73296d20 | 6a0c944d1f3c6f1b4a262e235cb5d8f0889e80be | /capitulo_6/cap6_project/urls.py | fa211d1f9e876eac4b1338f4d34a2cbf76bd5efe | [] | no_license | Fahrek/django-test | 40625dcdad2774d927c0f5252b2c2caa3192415c | 48fbbf1ae66a1776f4140d36dfa363fbd07d1277 | refs/heads/master | 2022-12-18T01:40:20.457896 | 2020-09-23T18:42:40 | 2020-09-23T18:42:40 | 296,071,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | """cap6_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from listas import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home_page_view, name='inicio'),
path('add/', views.insert, name='insert'),
path('select/', views.select, name='select')
]
| [
"andresgqjob@gmail.com"
] | andresgqjob@gmail.com |
4ed7b0073e5f3f21e7883ee46de2d41af70f1429 | b00840e56173dc2a196442bd354b9e3cc13b17df | /code_util/createJobScript.py | c360a93fc09e90dace29b76e6b66c43797d94224 | [] | no_license | Sportsfan77777/vortex | 56c28fb760f6c98de4a7c8fdcf1168d78b4e57af | 780ec14937d1b79e91a367d58f75adc905b8eef2 | refs/heads/master | 2023-08-31T02:50:09.454230 | 2023-08-24T10:55:05 | 2023-08-24T10:55:05 | 41,785,163 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | """
makes a new job script
"""
import argparse
def new_argument_parser(description = "Make a new job script."):
parser = argparse.ArgumentParser()
# File
parser.add_argument("fn",
help = 'job file name (.sh appended to the end) that must be included, error otherwise')
# Basic Parameters
parser.add_argument('-c', dest = "num_cores", type = int, default = 1,
help = 'number of cores (default: 1)')
parser.add_argument('-p', dest = "ptile", type = int, default = None,
help = 'number of cores needed on each computer (default: num_cores)')
parser.add_argument('--err', dest = "err_name", default = "err_%I",
help = 'job error file name (default: err_%I)')
parser.add_argument('--out', dest = "out_name", default = "out_%I",
help = 'job output file name (default: out_%I)')
parser.add_argument('-q', dest = "queue", default = "medium",
help = 'queue (default: medium)')
parser.add_argument('--name', dest = "name", default = None,
help = 'queue (default: fn)')
parser.add_argument('--gpu', dest = "gpu", action = 'store_true', default = False,
help = 'request gpu resource (default: no gpus)')
# Modules
parser.add_argument('--python_off', dest = "python", action = 'store_false', default = True,
help = 'include python module (default: include)')
parser.add_argument('--fftw_off', dest = "fftw", action = 'store_false', default = True,
help = 'include fftw module (default: include)')
parser.add_argument('--openmpi_off', dest = "openmpi", action = 'store_false', default = True,
help = 'include openmpi module (default: include)')
# Job
parser.add_argument('--mpi', dest = "mpirun", action = 'store_true', default = False,
help = 'use mpirun (default: do not use mpirun)')
parser.add_argument('-j', dest = "job", default = "",
help = 'job command (default: empty string)')
parser.add_argument('-o', dest = "output", default = None,
help = 'output file (.out appended to the end) (default: name)')
return parser
###############################################################################
### Parse Arguments ###
args = new_argument_parser().parse_args()
# Names
if args.name is None:
args.name = args.fn
if args.output is None:
args.output = args.name
args.fn = "%s.sh" % args.fn
args.output = "%s.out" % args.output
# Cores
if (args.ptile is None) or (args.ptile > args.num_cores):
args.ptile = args.num_cores
###############################################################################
### Write File ###
with open(args.fn, 'w') as f:
f.write("#!/bin/bash\n")
### Basic Parameters ###
f.write("#BSUB -n %d\n" % args.num_cores)
f.write("#BSUB -e %s\n" % args.err_name)
f.write("#BSUB -o %s\n" % args.out_name)
f.write('#BSUB -q "%s"\n' % args.queue)
f.write("#BSUB -u mhammer\n")
f.write("#BSUB -J %s\n" % args.name)
if args.gpu:
f.write("#BSUB -R gpu\n")
f.write('#BSUB -R "span[ptile=%d]"\n' % args.ptile)
# Line Break #
f.write("\n")
### Modules ###
if args.python:
f.write("module load python/2.7.3\n")
if args.fftw:
f.write("module load fftw/2.1.5\n")
if args.openmpi:
f.write("module load openmpi\n")
# Line Break
f.write("\n")
### Job ###
if args.mpirun:
f.write("mpirun -np %d " % args.num_cores)
f.write("%s " % args.job)
f.write("> %s\n" % args.output)
# Line Break
f.write("\n")
| [
"mhammer44444@gmail.com"
] | mhammer44444@gmail.com |
f74f0ac80048edd8b753de5045de96a035617ea8 | 051002c97de47ef5885c64fc1cb8b1bbbdf1dfe3 | /backend/accounts/serializers.py | 03d031742711d03ea2ce21a93292006636aba4b1 | [] | no_license | cristianemoyano/descuentos | 9a669eaaadd1cf74c5ed872480b3e7cadb7cf9d0 | 3ba1b89595b0691f2f26fc60e45f7fd74e682d52 | refs/heads/master | 2023-01-14T02:57:28.540235 | 2019-07-14T21:02:22 | 2019-07-14T21:02:22 | 195,719,236 | 0 | 0 | null | 2023-01-04T03:53:37 | 2019-07-08T02:00:42 | JavaScript | UTF-8 | Python | false | false | 906 | py | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','username','email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','username','email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect credentials") | [
"cristianmoyano.mza@gmail.com"
] | cristianmoyano.mza@gmail.com |
53ae973bfd0abd293fa0007514cbe6badfd2f088 | 3a0bb0e087166f0835eae4cc3aab41e689afd020 | /main_app/serializers.py | b1c82f0a534d7769f8b19ca8fc475ee3e1a69f10 | [] | no_license | garabedian/AutoCry_Python_WEB_Final_Project | 742356a761db1aeed7e8c45bce1e8f29161732ba | 1a20dc1782164084e9a924c7f7b5878867f7757b | refs/heads/master | 2023-01-30T20:34:35.876902 | 2020-12-13T17:41:44 | 2020-12-13T17:41:44 | 310,921,502 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from rest_framework import serializers
from main_app.models import Item
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields = '__all__'
| [
"takvor@abv.bg"
] | takvor@abv.bg |
f8989ca25200e1c8ef2f174e882aa2177cf58852 | a1e9101bbd309f613369b10018b16541e9af2fd0 | /gui.py | 779104ae0317562583993f3f2f6a444b80dc32dd | [] | no_license | seuqaj114/ANN | 93d6fb474cde319c26ba5e557208fbb0e59a39a5 | 9453604bdf87d8fadf726735eb8bed1b37a93319 | refs/heads/master | 2016-09-05T15:57:39.117540 | 2014-12-22T17:12:54 | 2014-12-22T17:12:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | import numpy as np
import sys
import time
from PyQt4 import QtGui
from PyQt4 import QtCore
import network
"""
Training the OR function
"""
def train():
global net
global window
for i in range(0,100):
time.sleep(0.1)
net.gd(training_set,5.0)
window.update(net.feed_forward([1,0]))
class MainWindow(QtGui.QWidget):
def __init__(self,newtork_geometry):
QtGui.QWidget.__init__(self)
self.h_ref = 100
self.v_ref = 100
self.h_step = 50
self.v_step = 30
self.setGeometry(200,200,600,400)
self.setWindowTitle("Neural network")
self.qbtn=QtGui.QPushButton(self)
self.qbtn.setText("Start")
self.qbtn.setObjectName("start")
self.qbtn.clicked.connect(train)
self.qbtn.setFixedSize(50,50)
self.qbtn.move(50,50)
self.btns=[]
for l in range(len(newtork_geometry)):
self.btns.append([])
for i in range(newtork_geometry[l]):
btn=QtGui.QPushButton(self)
btn.setFixedSize(20,20)
btn.move(self.h_ref+l*self.h_step,self.v_ref+i*self.v_step)
btn.setStyleSheet("background-color: rgba(%s,%s,%s,255)" % (100,100,100))
self.btns[l].append(btn)
def update(self,a_mat):
for i in range(len(a_mat)):
for j in range(len(a_mat[i])):
color = int(255.0*a_mat[i][j])
self.btns[i][j].setStyleSheet("background-color: rgba(%s,%s,%s,255)" % (color,color,color))
if __name__ == "__main__":
newtork_geometry = [2,2,1]
training_set = [([1,0],[1]),([0,1],[1]),([0,0],[0]),([1,1],[1])]
net = network.Network(newtork_geometry)
app = QtGui.QApplication(sys.argv)
window = MainWindow(newtork_geometry)
window.show()
sys.exit(app.exec_()) | [
"migjacques@hotmail.com"
] | migjacques@hotmail.com |
3ab0cd1ee0d711b1a1741999178c114cffc8b52f | 766636058fb034c02cb43ab99a8ba222df1a525b | /liblavinder/command.py | fa0ec5a4a7a62ad99a31511fcde720b84d8f8285 | [
"MIT"
] | permissive | g--o/Lavinder | b8954616e6de17f0e1d2d8b309d4a5817f50869a | a31092c452be769b7aeb6a1a80a6e57cc4625829 | refs/heads/develop | 2020-04-24T17:15:17.672924 | 2019-09-04T21:49:32 | 2019-09-04T21:49:32 | 172,140,376 | 1 | 4 | MIT | 2019-06-05T23:14:57 | 2019-02-22T21:59:58 | Python | UTF-8 | Python | false | false | 14,203 | py | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
import inspect
import traceback
import os
from . import ipc
from .utils import get_cache_dir
from .log_utils import logger
class CommandError(Exception):
pass
class CommandException(Exception):
pass
class _SelectError(Exception):
def __init__(self, name, sel):
Exception.__init__(self)
self.name = name
self.sel = sel
SUCCESS = 0
ERROR = 1
EXCEPTION = 2
SOCKBASE = "lavindersocket.%s"
def format_selectors(lst):
"""
Takes a list of (name, sel) tuples, and returns a formatted
selector expression.
"""
expr = []
for name, sel in iter(lst):
if expr:
expr.append(".")
expr.append(name)
if sel is not None:
expr.append("[%s]" % repr(sel))
return "".join(expr)
class _Server(ipc.Server):
def __init__(self, fname, lavinder, conf, eventloop):
if os.path.exists(fname):
os.unlink(fname)
ipc.Server.__init__(self, fname, self.call, eventloop)
self.lavinder = lavinder
self.widgets = {}
for i in conf.screens:
for j in i.gaps:
if hasattr(j, "widgets"):
for w in j.widgets:
if w.name:
self.widgets[w.name] = w
def call(self, data):
selectors, name, args, kwargs = data
try:
obj = self.lavinder.select(selectors)
except _SelectError as v:
e = format_selectors([(v.name, v.sel)])
s = format_selectors(selectors)
return (ERROR, "No object %s in path '%s'" % (e, s))
cmd = obj.command(name)
if not cmd:
return (ERROR, "No such command.")
logger.debug("Command: %s(%s, %s)", name, args, kwargs)
try:
return (SUCCESS, cmd(*args, **kwargs))
except CommandError as v:
return (ERROR, v.args[0])
except Exception:
return (EXCEPTION, traceback.format_exc())
class _Command:
def __init__(self, call, selectors, name):
"""
:command A string command name specification
:*args Arguments to be passed to the specified command
:*kwargs Arguments to be passed to the specified command
"""
self.selectors = selectors
self.name = name
self.call = call
def __call__(self, *args, **kwargs):
return self.call(self.selectors, self.name, *args, **kwargs)
class _CommandTree(metaclass=abc.ABCMeta):
"""A hierarchical collection of objects that contain commands
CommandTree objects act as containers, allowing them to be nested. The
commands themselves appear on the object as callable attributes.
"""
def __init__(self, selectors, myselector, parent):
self.selectors = selectors
self.myselector = myselector
self.parent = parent
@property
def path(self):
s = self.selectors[:]
if self.name:
s += [(self.name, self.myselector)]
return format_selectors(s)
@property
@abc.abstractmethod
def name(self):
pass
@property
@abc.abstractmethod
def _contains(self):
pass
def call(self, selectors, name, *args, **kwargs):
if self.parent:
return self.parent.call(selectors, name, *args, **kwargs)
else:
raise NotImplementedError()
def __getitem__(self, select):
if self.myselector:
raise KeyError("No such key: %s" % select)
return self.__class__(self.selectors, select, self)
def __getattr__(self, name):
next_selector = self.selectors[:]
if self.name:
next_selector.append((self.name, self.myselector))
if name in self._contains:
return _TreeMap[name](next_selector, None, self)
else:
return _Command(self.call, next_selector, name)
class _TLayout(_CommandTree):
name = "layout"
_contains = ["group", "window", "screen"]
class _TWidget(_CommandTree):
name = "widget"
_contains = ["bar", "screen", "group"]
class _TBar(_CommandTree):
name = "bar"
_contains = ["screen"]
class _TWindow(_CommandTree):
name = "window"
_contains = ["group", "screen", "layout"]
class _TScreen(_CommandTree):
name = "screen"
_contains = ["layout", "window", "bar"]
class _TGroup(_CommandTree):
name = "group"
_contains = ["layout", "window", "screen"]
_TreeMap = {
"layout": _TLayout,
"widget": _TWidget,
"bar": _TBar,
"window": _TWindow,
"screen": _TScreen,
"group": _TGroup,
}
class _CommandRoot(_CommandTree, metaclass=abc.ABCMeta):
"""This class constructs the entire hierarchy of callable commands from a conf object"""
name = None
_contains = ["layout", "widget", "screen", "bar", "window", "group"]
def __init__(self):
_CommandTree.__init__(self, [], None, None)
def __getitem__(self, select):
raise KeyError("No such key: %s" % select)
@abc.abstractmethod
def call(self, selectors, name, *args, **kwargs):
"""This method is called for issued commands.
Parameters
==========
selectors :
A list of (name, selector) tuples.
name :
Command name.
"""
pass
def find_sockfile(display=None):
"""
Finds the appropriate socket file.
"""
display = display or os.environ.get('DISPLAY') or ':0.0'
if '.' not in display:
display += '.0'
cache_directory = get_cache_dir()
return os.path.join(cache_directory, SOCKBASE % display)
class Client(_CommandRoot):
"""Exposes a command tree used to communicate with a running instance of Lavinder"""
def __init__(self, fname=None, is_json=False):
if not fname:
fname = find_sockfile()
self.client = ipc.Client(fname, is_json)
_CommandRoot.__init__(self)
def call(self, selectors, name, *args, **kwargs):
state, val = self.client.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class CommandRoot(_CommandRoot):
def __init__(self, lavinder):
self.lavinder = lavinder
super().__init__()
def call(self, selectors, name, *args, **kwargs):
state, val = self.lavinder.server.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class _Call:
"""
Parameters
==========
command :
A string command name specification
args :
Arguments to be passed to the specified command
kwargs :
Arguments to be passed to the specified command
"""
def __init__(self, selectors, name, *args, **kwargs):
self.selectors = selectors
self.name = name
self.args = args
self.kwargs = kwargs
# Conditionals
self.layout = None
def when(self, layout=None, when_floating=True):
self.layout = layout
self.when_floating = when_floating
return self
def check(self, q):
if self.layout:
if self.layout == 'floating':
if q.current_window.floating:
return True
return False
if q.current_layout.name != self.layout:
return False
if q.current_window and q.current_window.floating \
and not self.when_floating:
return False
return True
class _LazyTree(_CommandRoot):
def call(self, selectors, name, *args, **kwargs):
return _Call(selectors, name, *args, **kwargs)
lazy = _LazyTree()
class CommandObject(metaclass=abc.ABCMeta):
"""Base class for objects that expose commands
Each command should be a method named `cmd_X`, where X is the command name.
A CommandObject should also implement `._items()` and `._select()` methods
(c.f. docstring for `.items()` and `.select()`).
"""
def select(self, selectors):
"""Return a selected object
Recursively finds an object specified by a list of `(name, selector)`
items.
Raises _SelectError if the object does not exist.
"""
if not selectors:
return self
name, selector = selectors[0]
next_selector = selectors[1:]
root, items = self.items(name)
# if non-root object and no selector given
# if no items in container, but selector is given
# if selector is not in the list of contained items
if (root is False and selector is None) or \
(items is None and selector is not None) or \
(items is not None and selector and selector not in items):
raise _SelectError(name, selector)
obj = self._select(name, selector)
if obj is None:
raise _SelectError(name, selector)
return obj.select(next_selector)
def items(self, name):
"""Build a list of contained items for the given item class
Returns a tuple `(root, items)` for the specified item class, where:
root: True if this class accepts a "naked" specification without an
item seletion (e.g. "layout" defaults to current layout), and False
if it does not (e.g. no default "widget").
items: a list of contained items
"""
ret = self._items(name)
if ret is None:
# Not finding information for a particular item class is OK here;
# we don't expect layouts to have a window, etc.
return False, []
return ret
@abc.abstractmethod
def _items(self, name):
"""Generate the items for a given
Same return as `.items()`. Return `None` if name is not a valid item
class.
"""
pass
@abc.abstractmethod
def _select(self, name, sel):
"""Select the given item of the given item class
This method is called with the following guarantees:
- `name` is a valid selector class for this item
- `sel` is a valid selector for this item
- the `(name, sel)` tuple is not an "impossible" combination (e.g. a
selector is specified when `name` is not a containment object).
Return None if no such object exists
"""
pass
def command(self, name):
return getattr(self, "cmd_" + name, None)
@property
def commands(self):
cmds = [i[4:] for i in dir(self) if i.startswith("cmd_")]
return cmds
def cmd_commands(self):
"""Returns a list of possible commands for this object
Used by __qsh__ for command completion and online help
"""
return self.commands
def cmd_items(self, name):
"""Returns a list of contained items for the specified name
Used by __qsh__ to allow navigation of the object graph.
"""
return self.items(name)
def get_command_signature(self, name):
signature = inspect.signature(self.command(name))
args = list(signature.parameters)
if args and args[0] == "self":
args = args[1:]
signature = signature.replace(parameters=args)
return name + str(signature)
def get_command_docstring(self, name):
return inspect.getdoc(self.command(name)) or ""
def get_command_documentation(self, name):
spec = self.get_command_signature(name)
htext = self.get_command_docstring(name)
return spec + '\n' + htext
def cmd_doc(self, name):
"""Returns the documentation for a specified command name
Used by __qsh__ to provide online help.
"""
if name in self.commands:
return self.get_command_documentation(name)
else:
raise CommandError("No such command: %s" % name)
def cmd_eval(self, code):
"""Evaluates code in the same context as this function
Return value is tuple `(success, result)`, success being a boolean and
result being a string representing the return value of eval, or None if
exec was used instead.
"""
try:
try:
return (True, str(eval(code)))
except SyntaxError:
exec(code)
return (True, None)
except: # noqa: E722
error = traceback.format_exc().strip().split("\n")[-1]
return (False, error)
def cmd_function(self, function, *args, **kwargs):
"""Call a function with current object as argument"""
try:
function(self, *args, **kwargs)
except Exception:
error = traceback.format_exc()
logger.error('Exception calling "%s":\n%s' % (function, error))
| [
"ke7oxh@gmail.com"
] | ke7oxh@gmail.com |
ed9e4b221adaa95040a98a9868aeb47e3bf421e2 | 93a23825f9a89b2c74b90f85117fddf9927e96ac | /backend/devices/views.py | 9677e34bcd90964909d0891039b8d123f84c9dd9 | [] | no_license | mddemarie/experimenting-with-CSV | 94ae618e4c6b5547c03491b6a28af6319d810ccd | b22a767d5843f305ef33bbdc9fffd3681e6123d0 | refs/heads/master | 2022-12-12T18:41:56.413176 | 2018-05-16T20:19:44 | 2018-05-16T20:19:44 | 133,716,989 | 0 | 0 | null | 2022-12-08T02:12:18 | 2018-05-16T20:01:09 | Python | UTF-8 | Python | false | false | 550 | py | from django.http import Http404 # using later for status code 404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status # using later on for status codes
from devices.models import Device
from devices.serializers import DeviceSerializer
class DeviceList(APIView):
"""
List all devices.
"""
def get(self, request, format=None):
devices = Device.objects.all()
serializer = DeviceSerializer(devices, many=True)
return Response(serializer.data)
| [
"mddemarie@gmail.com"
] | mddemarie@gmail.com |
5658bc7bc727ba66efcf62e2e774e8da30c069fb | f048b5a783d793c5dd1889a2eb00d72cf9ae4761 | /Router/Router.py | 2671dfe1397dc05cb93efbda627fc2771c6080f0 | [] | no_license | ashkan-jafarzadeh/504-essential-words | e1bfcabf82c4c1c66d551ca7df947eb8d24fada0 | d3db99f5095a5df7aeb59c73e295594062850845 | refs/heads/main | 2023-08-24T12:50:55.267367 | 2021-09-30T19:14:02 | 2021-09-30T19:14:02 | 412,193,804 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | from Controllers.BaseController import BaseController
from Helpers.StrHelper import StrHelper
from Requests.Request import Request
import re
class Router:
api = False
path = ""
routes = []
status = 200
@classmethod
def register(cls, method: str, route: str, callback: callable, is_api=False):
match = re.search(r'{(\w+)}', route)
try:
param = match.group(1)
except:
param = ""
cls.routes.append({
"main_route": re.sub(r'{\w+}', '', route),
"route_pattern": re.sub(r'{\w+}', '.+', route),
"request_method": method,
"param": param,
"controller": callback.split("@")[0],
"method": callback.split("@")[1],
"is_api": is_api
})
@classmethod
def route_pattern(cls, route):
return re.sub(r'{\w+}', '.+', route)
pass
@classmethod
def get(cls, route: str, callback: callable):
cls.register('GET', route, callback)
@classmethod
def post(cls, route: str, callback: str):
cls.register('POST', route, callback)
@classmethod
def get_api(cls, route: str, callback: str):
cls.register('GET', "/api" + route, callback, True)
@classmethod
def post_api(cls, route: str, callback: str):
cls.register('POST', "/api" + route, callback, True)
@classmethod
def call(cls, environ):
Request.set_from_environ(environ)
path = environ["PATH_INFO"]
for route in cls.routes:
if cls.is_same_route(path, route, environ):
controller = StrHelper.get_class("Controllers." + route["controller"] + "." + route["controller"])
method = route["method"]
cls.api = route["is_api"]
cls.path = path
if hasattr(controller, method) and callable(getattr(controller, method)):
cls.status = 200
return getattr(controller, method)(controller)
cls.status = 404
return BaseController.error_not_found()
@classmethod
def is_api(cls):
return cls.api
@classmethod
def get_path(cls):
return cls.path
@classmethod
def is_same_route(cls, path, route, environ):
if route["request_method"] == environ["REQUEST_METHOD"] and bool(re.fullmatch(route['route_pattern'], path)):
match = re.search(route['main_route'] + "(.+)", path)
try:
param = match.group(1)
Request.set_data(route["param"], param)
except:
pass
return True
return False
| [
"ashkan.jafarzade@yahoo.com"
] | ashkan.jafarzade@yahoo.com |
fe0881db35f3f5d538b836ae7ffdbb95c3e3210e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/308/84669/submittedfiles/testes.py | 9138abd6a192264d1bfcda194bb1960c01f572ad | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def mostrar():
print(a+b)
print('Resultado')
a = 3
b = 4
mostrar() | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
abd514985aa887677633fa023398dda638ac660f | d61a8f464474b5032a417b210d56f2e968c5c88c | /Accounts/models.py | eaeef4e59b1a50d6358c24c2f8d0deb3f6cd6888 | [] | no_license | Peterbamidele/Customer-Dashboard | 8f29bd5540f35bc4616f9924751100bda03e52de | 6de114ac7ff870cdfb5d7902157cec5a9f4c70fa | refs/heads/main | 2023-04-23T19:01:37.397312 | 2021-05-01T20:22:41 | 2021-05-01T20:22:41 | 362,900,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | from django.db import models
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
CATEGORY = (
('Indoor', 'Indoor'),
('Out Door', 'Out Door'),
)
name = models.CharField(max_length=200, null=True)
price = models.FloatField(null=True)
category = models.CharField(max_length=200, null=True, choices=CATEGORY)
description = models.CharField(max_length=200, null=True, blank=True)
data_created = models.DateTimeField(auto_now_add=True)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
class Order(models.Model):
STATUS = (
('Pending', 'Pending'),
('Out for delivery', 'Out for delivery'),
('Delivered', 'Delivered'),
)
customer = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
data_created = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=200, null=True, choices=STATUS)
| [
"peterbamidele@gmail.com"
] | peterbamidele@gmail.com |
cd25ad9f9b621517a8f79e725e360604777f67c1 | a34d9458832a033bb05b1cec9f13c9f997c6e8d0 | /eola/two_d_space.py | 8b4cfdaa771b30e15433839b87f4c8762caa38f3 | [] | no_license | scottopell/manim | 9de0ca8fd1f4a203e557dc5503b38591e0ef66bc | a3fa16ebcbc9b3eb462c0a3434840c954a92e0d1 | refs/heads/master | 2021-01-14T12:44:51.483245 | 2016-09-03T19:29:17 | 2016-09-03T19:29:17 | 59,712,661 | 0 | 0 | null | 2016-05-26T02:18:09 | 2016-05-26T02:18:09 | null | UTF-8 | Python | false | false | 15,698 | py | import numpy as np
from scene import Scene
from mobject import Mobject
from mobject.vectorized_mobject import VMobject, Group
from mobject.tex_mobject import TexMobject, TextMobject
from animation import Animation
from animation.transform import ApplyPointwiseFunction, Transform, \
ApplyMethod, FadeOut, ApplyFunction
from animation.simple_animations import ShowCreation, Write
from topics.number_line import NumberPlane, Axes
from topics.geometry import Vector, Line, Circle, Arrow, Dot, \
BackgroundRectangle, Square
from helpers import *
from eola.matrix import Matrix, VECTOR_LABEL_SCALE_FACTOR, vector_coordinate_label
X_COLOR = GREEN_C
Y_COLOR = RED_C
Z_COLOR = BLUE_D
class VectorScene(Scene):
CONFIG = {
"basis_vector_stroke_width" : 6
}
def add_plane(self, animate = False, **kwargs):
plane = NumberPlane(**kwargs)
if animate:
self.play(ShowCreation(plane, submobject_mode = "lagged_start"))
self.add(plane)
return plane
def add_axes(self, animate = False, color = WHITE, **kwargs):
axes = Axes(color = color, tick_frequency = 1)
if animate:
self.play(ShowCreation(axes, submobject_mode = "one_at_a_time"))
self.add(axes)
return axes
def lock_in_faded_grid(self, dimness = 0.7, axes_dimness = 0.5):
plane = self.add_plane()
axes = plane.get_axes()
plane.fade(dimness)
axes.highlight(WHITE)
axes.fade(axes_dimness)
self.add(axes)
self.freeze_background()
def add_vector(self, vector, color = YELLOW, animate = True, **kwargs):
if not isinstance(vector, Arrow):
vector = Vector(vector, color = color, **kwargs)
if animate:
self.play(ShowCreation(vector))
self.add(vector)
return vector
def write_vector_coordinates(self, vector, **kwargs):
coords = vector_coordinate_label(vector, **kwargs)
self.play(Write(coords))
return coords
def get_basis_vectors(self):
return [
Vector(
vect, color = color,
stroke_width = self.basis_vector_stroke_width
)
for vect, color in [
([1, 0], X_COLOR),
([0, 1], Y_COLOR)
]
]
def get_basis_vector_labels(self, **kwargs):
i_hat, j_hat = self.get_basis_vectors()
return Group(*[
self.get_vector_label(
vect, label, color = color,
label_scale_factor = 1,
**kwargs
)
for vect, label , color in [
(i_hat, "\\hat{\\imath}", X_COLOR),
(j_hat, "\\hat{\\jmath}", Y_COLOR),
]
])
def get_vector_label(self, vector, label,
direction = "left",
rotate = False,
color = None,
label_scale_factor = VECTOR_LABEL_SCALE_FACTOR):
if not isinstance(label, TexMobject):
if len(label) == 1:
label = "\\vec{\\textbf{%s}}"%label
label = TexMobject(label)
if color is None:
color = vector.get_color()
label.highlight(color)
label.scale(label_scale_factor)
label.add_background_rectangle()
angle = vector.get_angle()
if not rotate:
label.rotate(-angle)
if direction is "left":
label.shift(-label.get_bottom() + 0.1*UP)
else:
label.shift(-label.get_top() + 0.1*DOWN)
label.rotate(angle)
label.shift((vector.get_end() - vector.get_start())/2)
return label
def label_vector(self, vector, label, animate = True, **kwargs):
label = self.get_vector_label(vector, label, **kwargs)
if animate:
self.play(Write(label, run_time = 1))
self.add(label)
return label
def position_x_coordinate(self, x_coord, x_line, vector):
x_coord.next_to(x_line, -np.sign(vector[1])*UP)
x_coord.highlight(X_COLOR)
return x_coord
def position_y_coordinate(self, y_coord, y_line, vector):
y_coord.next_to(y_line, np.sign(vector[0])*RIGHT)
y_coord.highlight(Y_COLOR)
return y_coord
def coords_to_vector(self, vector, coords_start = 2*RIGHT+2*UP, clean_up = True):
starting_mobjects = list(self.mobjects)
array = Matrix(vector)
array.shift(coords_start)
arrow = Vector(vector)
x_line = Line(ORIGIN, vector[0]*RIGHT)
y_line = Line(x_line.get_end(), arrow.get_end())
x_line.highlight(X_COLOR)
y_line.highlight(Y_COLOR)
x_coord, y_coord = array.get_mob_matrix().flatten()
self.play(Write(array, run_time = 1))
self.dither()
self.play(ApplyFunction(
lambda x : self.position_x_coordinate(x, x_line, vector),
x_coord
))
self.play(ShowCreation(x_line))
self.play(
ApplyFunction(
lambda y : self.position_y_coordinate(y, y_line, vector),
y_coord
),
FadeOut(array.get_brackets())
)
y_coord, brackets = self.get_mobjects_from_last_animation()
self.play(ShowCreation(y_line))
self.play(ShowCreation(arrow))
self.dither()
if clean_up:
self.clear()
self.add(*starting_mobjects)
def vector_to_coords(self, vector, integer_labels = True, clean_up = True):
starting_mobjects = list(self.mobjects)
show_creation = False
if isinstance(vector, Arrow):
arrow = vector
vector = arrow.get_end()[:2]
else:
arrow = Vector(vector)
show_creation = True
array = vector_coordinate_label(arrow, integer_labels = integer_labels)
x_line = Line(ORIGIN, vector[0]*RIGHT)
y_line = Line(x_line.get_end(), arrow.get_end())
x_line.highlight(X_COLOR)
y_line.highlight(Y_COLOR)
x_coord, y_coord = array.get_mob_matrix().flatten()
x_coord_start = self.position_x_coordinate(
x_coord.copy(), x_line, vector
)
y_coord_start = self.position_y_coordinate(
y_coord.copy(), y_line, vector
)
brackets = array.get_brackets()
if show_creation:
self.play(ShowCreation(arrow))
self.play(
ShowCreation(x_line),
Write(x_coord_start),
run_time = 1
)
self.play(
ShowCreation(y_line),
Write(y_coord_start),
run_time = 1
)
self.dither()
self.play(
Transform(x_coord_start, x_coord, submobject_mode = "all_at_once"),
Transform(y_coord_start, y_coord, submobject_mode = "all_at_once"),
Write(brackets, run_time = 1),
)
self.dither()
self.remove(x_coord_start, y_coord_start, brackets)
self.add(array)
if clean_up:
self.clear()
self.add(*starting_mobjects)
return array, x_line, y_line
def show_ghost_movement(self, vector):
if isinstance(vector, Arrow):
vector = vector.get_end() - vector.get_start()
elif len(vector) == 2:
vector = np.append(np.array(vector), 0.0)
x_max = int(SPACE_WIDTH + abs(vector[0]))
y_max = int(SPACE_HEIGHT + abs(vector[1]))
dots = VMobject(*[
Dot(x*RIGHT + y*UP)
for x in range(-x_max, x_max)
for y in range(-y_max, y_max)
])
dots.set_fill(BLACK, opacity = 0)
dots_halfway = dots.copy().shift(vector/2).set_fill(WHITE, 1)
dots_end = dots.copy().shift(vector)
self.play(Transform(
dots, dots_halfway, rate_func = rush_into
))
self.play(Transform(
dots, dots_end, rate_func = rush_from
))
self.remove(dots)
class LinearTransformationScene(VectorScene):
CONFIG = {
"include_background_plane" : True,
"include_foreground_plane" : True,
"foreground_plane_kwargs" : {
"x_radius" : 2*SPACE_WIDTH,
"y_radius" : 2*SPACE_HEIGHT,
"secondary_line_ratio" : 0
},
"background_plane_kwargs" : {
"color" : GREY,
"secondary_color" : DARK_GREY,
"axes_color" : GREY,
"stroke_width" : 2,
},
"show_coordinates" : False,
"show_basis_vectors" : True,
"i_hat_color" : X_COLOR,
"j_hat_color" : Y_COLOR,
"leave_ghost_vectors" : False,
"t_matrix" : [[3, 0], [1, 2]],
}
def setup(self):
if hasattr(self, "has_setup"):
return
self.has_setup = True
##^This is to not break all the old Scenes
self.background_mobjects = []
self.foreground_mobjects = []
self.transformable_mobjects = []
self.moving_vectors = []
self.transformable_labels = []
self.moving_mobjects = []
self.t_matrix = np.array(self.t_matrix)
self.background_plane = NumberPlane(
**self.background_plane_kwargs
)
if self.show_coordinates:
self.background_plane.add_coordinates()
if self.include_background_plane:
self.add_background_mobject(self.background_plane)
if self.include_foreground_plane:
self.plane = NumberPlane(**self.foreground_plane_kwargs)
self.add_transformable_mobject(self.plane)
if self.show_basis_vectors:
self.i_hat, self.j_hat = [
self.add_vector(
coords, color, animate = False, stroke_width = 6
)
for coords, color in [
((1, 0), self.i_hat_color),
((0, 1), self.j_hat_color),
]
]
def add_special_mobjects(self, mob_list, *mobs_to_add):
for mobject in mobs_to_add:
if mobject not in mob_list:
mob_list.append(mobject)
self.add(mobject)
def add_background_mobject(self, *mobjects):
self.add_special_mobjects(self.background_mobjects, *mobjects)
def add_foreground_mobject(self, *mobjects):
self.add_special_mobjects(self.foreground_mobjects, *mobjects)
def add_transformable_mobject(self, *mobjects):
self.add_special_mobjects(self.transformable_mobjects, *mobjects)
def add_moving_mobject(self, mobject, target_mobject = None):
mobject.target = target_mobject
self.add_special_mobjects(self.moving_mobjects, mobject)
def add_unit_square(self, color = YELLOW, opacity = 0.3, animate = False):
square = Square(color = color, side_length = 1)
square.shift(-square.get_corner(DOWN+LEFT))
if animate:
added_anims = map(Animation, self.moving_vectors)
self.play(ShowCreation(square), *added_anims)
self.play(square.set_fill, color, opacity, *added_anims)
else:
square.set_fill(color, opacity)
self.add_transformable_mobject(square)
self.bring_to_front(*self.moving_vectors)
self.square = square
return self
def add_vector(self, vector, color = YELLOW, **kwargs):
vector = VectorScene.add_vector(
self, vector, color = color, **kwargs
)
self.moving_vectors.append(vector)
return vector
def write_vector_coordinates(self, vector, **kwargs):
coords = VectorScene.write_vector_coordinates(self, vector, **kwargs)
self.add_foreground_mobject(coords)
return coords
def add_transformable_label(self, vector, label, new_label = None, **kwargs):
label_mob = self.label_vector(vector, label, **kwargs)
if new_label:
label_mob.target_text = new_label
else:
label_mob.target_text = "L(%s)"%label_mob.expression
label_mob.vector = vector
label_mob.kwargs = kwargs
if "animate" in label_mob.kwargs:
label_mob.kwargs.pop("animate")
self.transformable_labels.append(label_mob)
return label_mob
def add_title(self, title, scale_factor = 1.5, animate = False):
if not isinstance(title, Mobject):
title = TextMobject(title).scale(scale_factor)
title.to_edge(UP)
title.add_background_rectangle()
if animate:
self.play(Write(title))
self.add_foreground_mobject(title)
self.title = title
return self
def get_matrix_transformation(self, transposed_matrix):
transposed_matrix = np.array(transposed_matrix)
if transposed_matrix.shape == (2, 2):
new_matrix = np.identity(3)
new_matrix[:2, :2] = transposed_matrix
transposed_matrix = new_matrix
elif transposed_matrix.shape != (3, 3):
raise "Matrix has bad dimensions"
return lambda point: np.dot(point, transposed_matrix)
def get_piece_movement(self, pieces):
start = VMobject(*pieces)
target = VMobject(*[mob.target for mob in pieces])
if self.leave_ghost_vectors:
self.add(start.copy().fade(0.7))
return Transform(start, target, submobject_mode = "all_at_once")
def get_moving_mobject_movement(self, func):
for m in self.moving_mobjects:
if m.target is None:
m.target = m.copy()
target_point = func(m.get_center())
m.target.move_to(target_point)
return self.get_piece_movement(self.moving_mobjects)
def get_vector_movement(self, func):
for v in self.moving_vectors:
v.target = Vector(func(v.get_end()), color = v.get_color())
return self.get_piece_movement(self.moving_vectors)
def get_transformable_label_movement(self):
for l in self.transformable_labels:
l.target = self.get_vector_label(
l.vector.target, l.target_text, **l.kwargs
)
return self.get_piece_movement(self.transformable_labels)
def apply_transposed_matrix(self, transposed_matrix, **kwargs):
func = self.get_matrix_transformation(transposed_matrix)
if "path_arc" not in kwargs:
net_rotation = np.mean([
angle_of_vector(func(RIGHT)),
angle_of_vector(func(UP))-np.pi/2
])
kwargs["path_arc"] = net_rotation
self.apply_function(func, **kwargs)
def apply_inverse_transpose(self, t_matrix, **kwargs):
t_inv = np.linalg.inv(np.array(t_matrix).T).T
self.apply_transposed_matrix(t_inv, **kwargs)
def apply_nonlinear_transformation(self, function, **kwargs):
self.plane.prepare_for_nonlinear_transform()
self.apply_function(function, **kwargs)
def apply_function(self, function, added_anims = [], **kwargs):
if "run_time" not in kwargs:
kwargs["run_time"] = 3
anims = [
ApplyPointwiseFunction(function, t_mob)
for t_mob in self.transformable_mobjects
] + [
self.get_vector_movement(function),
self.get_transformable_label_movement(),
self.get_moving_mobject_movement(function),
] + [
Animation(f_mob)
for f_mob in self.foreground_mobjects
] + added_anims
self.play(*anims, **kwargs)
| [
"grantsanderson7@gmail.com"
] | grantsanderson7@gmail.com |
7c1d83b837c19a47aec690f304c4dcdf7004c723 | 9b8b5b3ca5012373ba4dbcde31eb2335007c06c7 | /songs/migrations/0008_song_user.py | 69de2be8d9bee3175a24ce411fd69ea96a202b75 | [] | no_license | Amit152116Kumar/Django_songs_playlist | 9238fa6117477fafde8d7683d25eda88c3a4beac | 815741f3aef40c24ab948aa4438fe6afee244f36 | refs/heads/master | 2020-04-28T21:10:23.808009 | 2019-03-14T07:53:20 | 2019-03-14T07:53:20 | 175,572,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # Generated by Django 2.1.5 on 2019-02-09 13:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('songs', '0007_remove_song_user'),
]
operations = [
migrations.AddField(
model_name='song',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"amit170103004@iitg.ac.in"
] | amit170103004@iitg.ac.in |
a3ca5cace42ecc5aaea3b17b2f1cb97ad2b0b39c | 1837323ef879e5216aa1ee267593fd96c911c587 | /venv/bin/pip | 3263da6bdb820a98e3545a6ad5b58c160a99e75c | [] | no_license | tee-jaay/dj_contentfeed | 05f800b34165660dbd334fe75f5ffcf7f07e7022 | fed1a5089fc0bb9ef32446d051fdf55c2de23e44 | refs/heads/master | 2022-08-21T09:43:05.997970 | 2018-06-18T13:15:52 | 2018-06-18T13:15:52 | 137,673,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | #!/home/jtam/Documents/Projects/Python/Django/max-goodridge/content-feed/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tamjid84@yahoo.com"
] | tamjid84@yahoo.com | |
d3be12214002bf0e8ed2b4e329795a1e62b70612 | b2f755bdb8c5a73cf28679b14de1a7100cd48b35 | /Interview/4/31.py | 398dfb0ecf5c8643733ea6c6524bdb8f8ed60db3 | [] | no_license | Futureword123456/Interview | cc50e1a3e4e85e4ac570469fc8a839029cdc6c50 | 5cb36dc5f2459abd889e1b29f469d5149139dc5f | refs/heads/master | 2023-03-25T15:24:23.939871 | 2021-03-13T08:15:54 | 2021-03-13T08:15:54 | 345,374,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# @Time : 2021/3/8 0008
# @Author : yang
# @Email : 2635681517@qq.com
# @File : 31.py
"""Python 获取昨天日期"""
import datetime
def getyesterday():
days = datetime.date.today()
"""
datetime.timedelta对象代表两个时间之间的时间差
两个date或datetime对象相减就可以返回一个timedelta对象。
"""
day = datetime.timedelta(days=1)
return days-day
if __name__ == "__main__":
print(getyesterday())
print(datetime.datetime.now()) | [
"2635681517@qq.com"
] | 2635681517@qq.com |
30cff20693dfa6eedd7d0e9a6d63f3de230dc906 | 3995505d38cb4f9b25a0481676a273a92122922b | /books/views.py | c23c60fe633a00c23665bc217f110758f37bf701 | [
"MIT"
] | permissive | MySuperSoul/BookTradeWeb | 57c90f105c71adb474fb41cc313968446228afbc | 0414e7674aded9580d5d90a0ac364d60b6e79cba | refs/heads/master | 2023-05-30T05:29:05.976806 | 2019-07-04T13:56:10 | 2019-07-04T13:56:10 | 187,504,740 | 0 | 0 | null | 2023-05-22T22:16:15 | 2019-05-19T17:02:23 | JavaScript | UTF-8 | Python | false | false | 18,568 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect, JsonResponse
from BookTradeWeb.utils import BaseView, Category, SortingUtil
from django.urls import reverse
from useraction.views import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Book, Comment, ShoppingCar, BookOffer, CreditAccount, BookNeed
from django.db.models import Count
from django.contrib.auth import authenticate, login
from bs4 import BeautifulSoup
import requests, json
import math
# Create your views here.
class Util():
max_page_item = 3
category_max_page_item = 9
book_show_max_item = 8
user_show_max_item = 10
@classmethod
def GetPopularUsers(cls):
return User.objects.annotate(sell_num=Count("sell_book_side")).order_by("-sell_num")[:cls.user_show_max_item]
@classmethod
def GetSortingBooks(cls, books, sorting):
if sorting == 0:
return books
elif sorting == 1:
return books.order_by('-publish_time')
elif sorting == 2:
return books.annotate(comment_num=Count('comment')).order_by('comment_num')
elif sorting == 3:
return books.annotate(comment_num=Count('comment')).order_by('-comment_num')
elif sorting == 4:
return books.order_by('sell_price')
else:
return books.order_by('-sell_price')
@classmethod
def JudgeEmptyInput(cls, input_list):
for item in input_list:
if item == '':
return False
return True
def GetISBNLink(request, ISBN):
search_url = 'http://search.dangdang.com/?key={0}&act=input'.format(ISBN)
Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
headers = {
'Host': 'search.dangdang.com',
'User-Agent': Agent
}
res = requests.get(search_url, headers=headers)
html = res.text
soup = BeautifulSoup(html, 'html.parser')
try:
first_hit_book = soup.find_all('li', class_='line1')[0]
book_link = first_hit_book.a.get('href')
res = requests.get(book_link, headers=headers)
html = res.text
soup = BeautifulSoup(html, 'html.parser')
desc = soup.find_all('span', class_='head_title_name')[0].text
isbn = soup.find_all('ul', class_='key clearfix')[0]
isbn_info = str(isbn.contents[5].text)
isbn_info = isbn_info[isbn_info.index('ISBN') + 5:]
sell_price = soup.find_all('p', id='dd-price')
sell_price = str(sell_price[0].text).strip().split('¥')[-1]
sell_price = round(float(sell_price))
return JsonResponse({
'link' : book_link,
'description' : desc.strip(),
'ISBN' : isbn_info,
'sell_price' : sell_price
})
except Exception as e:
return JsonResponse({
'error' : '暂无商品信息',
'code' : 1
})
class IndexView(BaseView):
def GetNewestBooks(self, books):
return books.order_by("-publish_time")[:Util.book_show_max_item]
def GetPopularBooks(self, books):
return books.annotate(comment_num=Count('comment')).order_by('-comment_num')[:Util.book_show_max_item]
def GetCheapBooks(self, books):
return books.order_by("sell_price")[:Util.book_show_max_item]
def get(self, request):
if request.user.is_authenticated:
books = Book.objects.all().filter(book_status=0)
category_num_dict = dict(Category.GetCategoryBookNumberDict(0))
user = User.objects.filter(id=request.user.id)[0]
return render(request, 'index.html', {
'books' : books,
'newest_books' : self.GetNewestBooks(books),
'popular_books' : self.GetPopularBooks(books),
'cheap_books' : self.GetCheapBooks(books),
'num_dict' : category_num_dict,
'user' : user
})
else:
return HttpResponseRedirect('/auth/login/')
class UserProfileView(BaseView):
@method_decorator(login_required(login_url='/auth/login/', redirect_field_name='next'))
def get(self, request):
return render(request, 'user-profile.html', {'user' : request.user})
class UserUpdatePasswordView(BaseView):
def post(self, request):
old = request.data.get('old-password')
new = request.data.get('new-password')
confirm = request.data.get('confirm-password')
if new != confirm:
raise Exception('两次输入的密码不一致')
if new == '' or confirm == '':
raise Exception('输入不能为空')
if len(new) < 6:
raise Exception('密码过短')
user = authenticate(username=request.user.username, password=old)
if not user:
raise Exception('原始密码错误')
user.set_password(new)
user.save()
login(request, user)
class UserUpdateProfileView(BaseView):
def post(self, request):
user = request.user
user.telephone = request.data.get('phone')
user.address = request.data.get('address')
user.introduction = request.data.get('introduction')
user.save()
return HttpResponseRedirect('/books/profile/')
class UserUpdateHeaderView(BaseView):
def post(self, request):
user = request.user
user.header_image = request.data.get('file')
user.save()
return HttpResponseRedirect('/books/profile/')
class AddListView(BaseView):
def get(self, request):
return render(request, 'add_list.html', {
'user' : request.user,
'option' : request.data.get('option')
})
def post(self, request):
try:
if request.data.get('trade_way') == None:
trade_way = ''
store_remain_num = 0
book_status = 1
else:
trade_way = request.data.get('trade_way')
store_remain_num = int(request.data.get('store_num'))
book_status = 0
book = Book.objects.create(book_name=request.data.get('book_name'),
ISBN=request.data.get('ISBN'),
book_introduction=request.data.get('book_description'),
category=request.data.get('book_category'),
origin_price=int(request.data.get('origin_price')),
sell_price=int(request.data.get('current_price')),
store_remain_num=store_remain_num,
book_url=request.data.get('link'),
publisher_name=request.user,
trade_way=trade_way,
book_status=book_status)
book.book_image = request.data.get('file')
book.save()
if book_status == 1:
book_need = BookNeed.objects.create(
book=book,
message=request.data.get('message')
)
book_need.save()
return {'message' : '书籍添加成功'}
except Exception as e:
raise Exception('输入异常')
class UserBooksView(BaseView):
def get(self, request, user_id):
user = User.objects.filter(id=int(user_id))[0]
if request.data.get('page') == None:
page = 1
else: page = int(request.data.get('page'))
if request.data.get('mode') == None:
book_status = 0
elif request.data.get('mode') == 'sell':
book_status = 0
else: book_status = 1
books = user.book_set.all().filter(book_status=book_status)
count_num = books.count()
start_pos = (page - 1) * Util.max_page_item
end_pos = min(page * Util.max_page_item, int(books.count()))
books = books[start_pos:end_pos]
total_pages = 1 + (int(count_num) - 1) // Util.max_page_item
pages_list = [i for i in range(1, total_pages + 1)]
data = {
'user' : user,
'books' : books,
'pages' : pages_list,
'current_page' : page,
'mode' : book_status
}
if user.id != request.user.id:
data['option'] = 'not'
return render(request, 'my_books.html', data)
def post(self, request):
pass
class SellSingleBookView(BaseView):
def get(self, request, book_id):
book_id = int(book_id)
book = Book.objects.filter(id=book_id)[0]
comments = book.comment_set.all()
data = {
'User' : request.user,
'single_book' : book,
'comments' : comments,
}
if request.user.id == book.publisher_name_id:
data.pop('User')
if book.book_status == 1:
data['option'] = 'buy'
return render(request, 'single_book.html', data)
class SubmitCommentView(BaseView):
def post(self, request, book_id):
user = request.user
book = Book.objects.filter(id=int(book_id))[0]
score = request.data.get('comment_score')
score = int(score)
if score <= 0 or score > 5:
raise Exception('输入评分不在范围内')
comment = request.data.get('comment_review')
comm = Comment.objects.create(
commenter=user,
book=book,
score=score,
content=comment
)
comm.save()
return HttpResponseRedirect(reverse('books:sell_single_book', kwargs={'book_id' : book_id}))
class AddToShoppingCarView(BaseView):
def post(self, request):
book_id = int(request.data.get('book_id'))
book = Book.objects.filter(id=book_id)[0]
user = User.objects.filter(id=int(request.data.get('user_id')))[0]
number = int(request.data.get('number'))
address = request.data.get('address')
phone = request.data.get('phone')
if address == '' or phone == '':
raise Exception('输入不能为空')
if number <= 0:
raise Exception('输入数量不在合理范围')
# if number > the current store remain, exception
if number > book.store_remain_num:
raise Exception('库存不足,添加失败')
if ShoppingCar.objects.filter(book_id=book_id).count() != 0:
shop = ShoppingCar.objects.filter(book_id=book_id)[0]
shop.added_number += number
else:
shop = ShoppingCar.objects.create(
book=book,
book_owner=user,
added_number=number,
contact_phone=phone,
address=address
)
shop.save()
return {
'message' : '添加成功',
}
class ShowBooksByCategoryView(BaseView):
def get(self, request, category=''):
origin = category
if category == 'all':
books = Book.objects.all()
category = '所有'
elif category != '':
category = Category.GetCategory(category)
books = Book.objects.filter(category=category)
else:
search = request.data.get('q')
origin = search
books = Book.objects.filter(book_name__contains=search)
books = books | Book.objects.filter(ISBN__contains=search)
books = books | Book.objects.filter(publisher_name__username__contains=search)
books = books.filter(book_status=0)
if request.data.get('sort') != None:
sorting = int(request.data.get('sort'))
books = Util.GetSortingBooks(books, sorting)
else: sorting = 0
if request.data.get('min') != None and request.data.get('max') != None:
min_price = int(request.data.get('min'))
max_price = int(request.data.get('max'))
books = books.filter(sell_price__gte=min_price)
books = books & Book.objects.filter(sell_price__lte=max_price)
# page is always the last step to filter
if request.data.get('page') == None:
page = 1
else: page = int(request.data.get('page'))
total_pages = 1 + (int(books.count() - 1) // Util.category_max_page_item)
pages_list = [i for i in range(1, total_pages + 1)]
result_num = books.count()
start_pos = (page - 1) * Util.category_max_page_item
end_pos = min(page * Util.category_max_page_item, int(books.count()))
books = books[start_pos:end_pos]
# filter popular users
popular_users = Util.GetPopularUsers()
return render(request, 'category.html', {
'books' : books,
'category' : category,
'origin_category' : origin,
'pages' : pages_list,
'current_page' : page,
'sorting' : SortingUtil.GetSortingDescription(sorting),
'category_dict' : dict(Category.GetCategoryBookNumberDict(0)),
'popular_users' : popular_users,
'result_num' : result_num
})
class MakeOfferView(BaseView):
def get(self, request):
user = request.user
shopping_car_set = ShoppingCar.objects.filter(book_owner_id=user.id)
completed_orders = BookOffer.objects.filter(buy_side_id=user.id)
remain_verify_orders = BookOffer.objects.filter(status='remain', sell_side_id=user.id)
verified_orders = BookOffer.objects.filter(status='complete', sell_side_id=user.id)
data = {
'shopping' : shopping_car_set,
'completed_orders' : completed_orders,
'remain_verify_orders' : remain_verify_orders,
'verified_orders' : verified_orders
}
return render(request, 'shopping_car.html', data)
def OfferBooks(self, request):
shopping_list = json.loads(request.data.get('values'))
price = int(request.data.get('price'))
account = CreditAccount.objects.filter(account_owner_id=request.user.id)[0]
if price > account.account_money:
raise Exception("账户余额不足,请及时充值")
else:
for item in shopping_list:
id = int(item)
shopping_item = ShoppingCar.objects.filter(id=id)[0]
order = BookOffer.objects.create(
sell_side=shopping_item.book.publisher_name,
buy_side=shopping_item.book_owner,
book=shopping_item.book,
sell_option=shopping_item.book.trade_way,
post_address=shopping_item.address,
status='remain',
complete_book_num=shopping_item.added_number,
complete_price=shopping_item.added_number * shopping_item.book.sell_price,
contact_phone=shopping_item.contact_phone,
)
order.save()
shopping_item.delete()
book = shopping_item.book
book.store_remain_num -= shopping_item.added_number
book.save()
return {
'message' : '交易完成'
}
def DeleteShoppingCarItem(self, request):
delete_id = int(request.data.get('id'))
item = ShoppingCar.objects.filter(id=delete_id)[0]
item.delete()
def post(self, request):
if request.data.get('type') == 'offer':
return self.OfferBooks(request)
elif request.data.get('type') == 'delete':
return self.DeleteShoppingCarItem(request)
class CreditAddCountMoney(BaseView):
def post(self, request):
user = request.user
add_number = int(request.data.get('number'))
if add_number < 0:
raise Exception('充值金额不能为负数')
account = CreditAccount.objects.filter(account_owner_id=user.id)[0]
account.account_money += add_number
account.save()
return {
'number' : account.account_money
}
class BookNeedView(BaseView):
def get(self, request):
popular_users = Util.GetPopularUsers()
books = Book.objects.filter(book_status=1)
if request.data.get('sort') != None:
sorting = int(request.data.get('sort'))
books = Util.GetSortingBooks(books, sorting)
else: sorting = 0
if request.data.get('page') == None:
page = 1
else: page = int(request.data.get('page'))
start_pos = (page - 1) * Util.category_max_page_item
end_pos = min(page * Util.category_max_page_item, int(books.count()))
books = books[start_pos:end_pos]
total_pages = 1 + (int(books.count() - 1) // Util.category_max_page_item)
pages_list = [i for i in range(1, total_pages + 1)]
return render(request, 'book_need.html', {
'popular_users' : popular_users,
'books' : books,
'pages': pages_list,
'current_page': page,
'sorting': SortingUtil.GetSortingDescription(sorting),
})
def post(self, request):
pass
class DeletePublishBookView(BaseView):
def post(self, request, book_id):
book = Book.objects.filter(id=int(book_id))
book.delete()
return {
'message' : 'delete success.'
}
class ModifyBookInfoView(BaseView):
def post(self, request):
num = int(request.data.get('num'))
price = int(request.data.get('price'))
book_id = int(request.data.get('book_id'))
if price < 0 or num < 0:
raise Exception("输入不能为负数")
book = Book.objects.get(id=book_id)
book.store_remain_num = num
book.sell_price = price
book.save()
return {
'message' : '修改成功'
}
class VerifyOrderView(BaseView):
def post(self, request):
order_id = int(request.data.get('order_id'))
order = BookOffer.objects.get(id=order_id)
order.status = 'complete'
order.save()
buy_side_user = order.buy_side
account = buy_side_user.account.first()
account.account_money -= order.complete_price
account.save()
return {
'message' : '订单确认成功'
} | [
"1144358492@qq.com"
] | 1144358492@qq.com |
e98b77984a85a75c56042c1e0688f4cf6d48cef8 | adb9fbfcd4f9d114717d20d79bf26abd4b88925a | /accounts/urls.py | 73573c078fa2686ee0d260df151fd7ec57c9baf2 | [] | no_license | rushali09/Django-basic-project | 42e2a100004dd468a487c7f6be2bd504ddaee51d | ff20097f9ccfd8e8a1d2d6274c346d5deabdc286 | refs/heads/master | 2022-05-06T05:13:43.281106 | 2020-04-20T21:51:48 | 2020-04-20T21:51:48 | 256,799,661 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from django.urls import path
from . import views
urlpatterns = [
path("register",views.register,name="register"), path("login",views.login,name="login"),path("logout",views.logout,name="logout")
]
| [
"rushalisreedhar37@gmail.com"
] | rushalisreedhar37@gmail.com |
d9d920d1ce089e3e0cf249717c5ce561b43a35bc | a6d2b09dc279c905a99f062b179c5a9158074089 | /typeidea/config/adminx.py | bfb3c20cc866854ad81f4f88ef82bee34cc4d023 | [] | no_license | guansongsong/typeidea | 2324fa15ad17f1bf37c5380d4503d1fae908401a | 118e9697ed2a29d63cc93a622d721a63886cb6cb | refs/heads/master | 2020-05-09T22:58:31.826544 | 2019-04-21T16:06:38 | 2019-04-21T16:06:38 | 181,488,471 | 0 | 0 | null | 2019-04-21T16:06:39 | 2019-04-15T13:05:47 | Python | UTF-8 | Python | false | false | 568 | py | from django.contrib import admin
from . import models
from custom_site import custom_site
from base_admin import BaseOwnerAdmin
import xadmin
# Register your models here.
@xadmin.sites.register(models.Link, )
class LinkAdmin(BaseOwnerAdmin):
list_display = ['title', 'href', 'status', 'weight', 'create_time']
fields = ('title', 'href', 'status', 'weight')
@xadmin.sites.register(models.SideBar, )
class SideBarAdmin(BaseOwnerAdmin):
list_display = ['title', 'display_type', 'content', 'create_time']
fields = ('title', 'display_type', 'content',)
| [
"1339338612@qq.com"
] | 1339338612@qq.com |
b09c68c9c382bf4dd4c79cda1d6bd50b3ec5f107 | 95f1ce4525b96285ad1f60302ac0ac43690750fb | /Translator_uti_v2/Translator_GUI_v7.py | 04d777413a458e56b95b23c04dae5b3150b5e248 | [] | no_license | hjnnjh/Useful_tools | 5c2bbf8c957c4099b6289afe4269f57ae605e689 | 2671aad85dc27822120e635d928015c2643d321c | refs/heads/master | 2021-01-03T07:50:53.566844 | 2020-12-02T06:50:09 | 2020-12-02T06:50:09 | 239,987,116 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,443 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Translator_GUI_v7.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from GetText import Gettext
from ph_divide import get_content, get_former, get_later, add_text
from main import main
from GoogleTranlator import GoogleTranslator
from functools import partial
text = ""
content_counter = 1
result = ""
class Ui_Translator_Google(object):
def setupUi(self, Translator_Google):
Translator_Google.setObjectName("Translator_Google")
Translator_Google.resize(751, 592)
self.centralwidget = QtWidgets.QWidget(Translator_Google)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.textBrowser_translation = QtWidgets.QTextBrowser(self.centralwidget)
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(14)
self.textBrowser_translation.setFont(font)
self.textBrowser_translation.setObjectName("textBrowser_translation")
self.gridLayout.addWidget(self.textBrowser_translation, 0, 0, 1, 2)
self.textBrowser_origin = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser_origin.setStyleSheet("font: 14pt \"微软雅黑\";")
self.textBrowser_origin.setObjectName("textBrowser_origin")
self.gridLayout.addWidget(self.textBrowser_origin, 0, 2, 1, 2)
self.add_content_button = QtWidgets.QPushButton(self.centralwidget)
self.add_content_button.setStyleSheet("font: 14pt \"微软雅黑\";")
self.add_content_button.setObjectName("add_content_button")
self.gridLayout.addWidget(self.add_content_button, 1, 0, 1, 1)
self.add_new_paragragh_button = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(14)
self.add_new_paragragh_button.setFont(font)
self.add_new_paragragh_button.setObjectName("add_new_paragragh_button")
self.gridLayout.addWidget(self.add_new_paragragh_button, 1, 1, 1, 1)
self.translate_2_button = QtWidgets.QPushButton(self.centralwidget)
self.translate_2_button.setStyleSheet("font: 14pt \"微软雅黑\";")
self.translate_2_button.setObjectName("translate_2_button")
self.gridLayout.addWidget(self.translate_2_button, 1, 2, 1, 1)
self.clear_content_button = QtWidgets.QPushButton(self.centralwidget)
self.clear_content_button.setStyleSheet("font: 14pt \"微软雅黑\";")
self.clear_content_button.setObjectName("clear_content_button")
self.gridLayout.addWidget(self.clear_content_button, 1, 3, 1, 1)
Translator_Google.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(Translator_Google)
self.statusbar.setObjectName("statusbar")
Translator_Google.setStatusBar(self.statusbar)
self.retranslateUi(Translator_Google)
QtCore.QMetaObject.connectSlotsByName(Translator_Google)
# 槽函数
self.translate_2_button.clicked.connect(self.abnormal_trans)
self.add_content_button.clicked.connect(self.get_text)
self.clear_content_button.clicked.connect(partial(self.get_text, mode="add_text"))
self.add_new_paragragh_button.clicked.connect(partial(self.get_text, mode="add_ph"))
def get_text(self, mode):
global text, content_counter, result
if mode == "add_text":
if content_counter == 1:
text = get_content()
else:
text += "\r\n" + get_content()
if mode == "add_ph":
text += 10 * " "
result = " ".join(text.split('\r\n'))
self.statusbar.showMessage("获取内容%d" % (content_counter))
self.textBrowser_origin.clear()
self.textBrowser_origin.append(result)
content_counter += 1
def abnormal_trans(self):
global result
raw_text = result
self.textBrowser_origin.clear()
self.textBrowser_translation.clear()
result_t = main(raw_text)
if len(result_t) != 0:
self.textBrowser_origin.append("%s" % (raw_text))
self.textBrowser_translation.append("%s" % (result_t))
self.statusbar.showMessage("翻译成功!")
else:
self.statusbar.showMessage("服务器连接超时,稍后再试")
def clear_content(self):
global text, content_counter, result
self.textBrowser_origin.clear()
self.textBrowser_translation.clear()
text = ""
result = ""
content_counter = 1
self.statusbar.showMessage("清除内容成功")
def retranslateUi(self, Translator_Google):
_translate = QtCore.QCoreApplication.translate
Translator_Google.setWindowTitle(_translate("Translator_Google", "Translator"))
self.add_content_button.setText(_translate("Translator_Google", "增加文本内容"))
self.add_new_paragragh_button.setText(_translate("Translator_Google", "增加新的段落"))
self.translate_2_button.setText(_translate("Translator_Google", "翻译"))
self.clear_content_button.setText(_translate("Translator_Google", "清除内容"))
| [
"755423541@qq.com"
] | 755423541@qq.com |
34ad42bab6e570409ead52cb2806a07e63ae51e4 | f514fbafe8953cdf91ade0076ce11412924b3121 | /sort/sort_arr_by_el_frequency.py | a3f522c7a57d3898d6bf98f2ce26b586090e2b14 | [] | no_license | morozov1982/python-checkio | 2b70dde5d7784054e6eca1214d59e114db64ef33 | e44b183cea4819fb1c542c11fc8c93dd639181f2 | refs/heads/master | 2023-02-11T01:36:40.130912 | 2021-01-04T07:30:12 | 2021-01-04T07:30:12 | 300,554,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | '''
***** Sort Array by Element Frequency *** (Elementary+) *****
***(EN)***
Sort the given iterable so that its elements end up in the decreasing frequency order,
that is, the number of times they appear in elements.
If two elements have the same frequency,
they should end up in the same order as the first appearance in the iterable.
Input: Iterable
Output: Iterable
Example:
1. frequency_sort([4, 6, 2, 2, 6, 4, 4, 4]) == [4, 4, 4, 4, 6, 6, 2, 2]
2. frequency_sort(['bob', 'bob', 'carl', 'alex', 'bob']) == ['bob', 'bob', 'bob', 'carl', 'alex']
Precondition: elements can be ints or strings
The mission was taken from Python CCPS 109 Fall 2018.
It's being taught for Ryerson Chang School of Continuing Education by Ilkka Kokkarinen
(http://www.scs.ryerson.ca/~ikokkari/)
***(RU)***
Отсортируйте данный итератор таким образом,
чтобы его элементы оказались в порядке убывания частоты их появления,
то есть по количеству раз, которое они появляются в элементах.
Если два элемента имеют одинаковую частоту, они должны оказаться в том же порядке,
в котором стояли изначально в итераторе.
Входные данные: Итератор
Выходные данные: Итератор
Пример:
1. frequency_sort([4, 6, 2, 2, 6, 4, 4, 4]) == [4, 4, 4, 4, 6, 6, 2, 2]
2. frequency_sort(['bob', 'bob', 'carl', 'alex', 'bob']) == ['bob', 'bob', 'bob', 'carl', 'alex']
Предварительное условие: Элементы могут быть целыми числами или строками.
Миссия была взята из Python CCPS 109 Осень 2018.
Она преподается Илккой Коккариненым в Школа непрерывного образования Раймонда Чанга.
'''
def frequency_sort(items):
idx_sort = sorted(items, key=lambda x: items.index(x))
res = sorted(idx_sort, key=lambda x: items.count(x), reverse=True)
return res
# return sort(items, key=lambda x: (items.count(x), -items.index(x)), reverse=True) # I like it (не мой но прекрасный вариант)
if __name__ == '__main__':
print("Example:")
print(frequency_sort([4, 6, 2, 2, 6, 4, 4, 4]))
# These "asserts" are used for self-checking and not for an auto-testing
assert list(frequency_sort([4, 6, 2, 2, 6, 4, 4, 4])) == [4, 4, 4, 4, 6, 6, 2, 2]
assert list(frequency_sort(['bob', 'bob', 'carl', 'alex', 'bob'])) == ['bob', 'bob', 'bob', 'carl', 'alex']
assert list(frequency_sort([17, 99, 42])) == [17, 99, 42]
assert list(frequency_sort([])) == []
assert list(frequency_sort([1])) == [1]
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"morozov1982@gmail.com"
] | morozov1982@gmail.com |
a2e28b37e97ee345bbce0bd063e5c7952d887ae1 | 061a5bb81672809424fc534471edcf09b5d5535d | /1.py | 44e0ad5b445066aea800f95cccb52dea0bd59191 | [] | no_license | nitish66/newrepo | 8f5448ee99d7fd671bc810ff08cf8d4e158a11c8 | a41ad04ebc99ca5d4b76b970ffb5654bd51eaa61 | refs/heads/master | 2022-12-22T07:42:16.285851 | 2020-09-28T10:58:38 | 2020-09-28T10:58:38 | 299,275,731 | 0 | 0 | null | 2020-09-28T10:58:39 | 2020-09-28T10:42:27 | Python | UTF-8 | Python | false | false | 128 | py | x=int(input("Enter a number:"))
if x<=10:
print(x,"is not greater than 10")
if x>10:
print(x,"is greater than 10")
| [
"noreply@github.com"
] | noreply@github.com |
c39197167bebafc12dd01290ed117d1832b17519 | f0079054632435825e36594bc3644f47daa69c03 | /api_rest_ecommerce/settings/production.py | 74b86a6888b1294421d6f8c18de801cbd221da3e | [] | no_license | endaniel1/Curso_Django_RestFramework | b3f70c6a4ac56487a1fd3c0692f534452efaa9b3 | b2a85f9283495f6b4ccc4fb942691e8de1ee5585 | refs/heads/main | 2023-05-29T01:14:24.611313 | 2021-06-05T03:33:30 | 2021-06-05T03:33:30 | 367,515,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/' | [
"enriq_1997@hotmail.com"
] | enriq_1997@hotmail.com |
3a047d50208cf8649840ad6a6c3f71f4875cf42b | d0938de7a70fedef46fa7ec1d09f80446884f8eb | /venv/Lib/site-packages/seleniumwire/__init__.py | 7e5f424cf652cd8b326833b64f3fcf0a1e95bd45 | [] | no_license | RubyPatil/5_Class | 66ebf8cc1db6b59bd2bde282c2f3a804df22c10b | 12a65dea65fff4cd63176fad0011ce9d918a09fd | refs/heads/master | 2020-04-21T20:15:36.602623 | 2019-02-09T06:08:27 | 2019-02-09T06:08:27 | 169,837,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | # -*- coding: utf-8 -*-
"""Top-level package for Selenium Wire."""
__author__ = """Will Keeling"""
__version__ = '1.0.1'
| [
"rathnakar.patil@gmail.com"
] | rathnakar.patil@gmail.com |
dadea59a2de115c79519009bb91a64eea7e37639 | d3fe4fe683e612164bea4c4e2c16acc077640bf4 | /abs.py | 5106d281f9ebaccd16409308cb5a6cc83129bfef | [] | no_license | ibkov/check_functions1 | dd098b5b424766a9f63603a39c4b5e80fdab53ea | c1d2342eef6c936053a4c1f693ef324c635f7c43 | refs/heads/master | 2022-12-31T07:34:51.674550 | 2020-10-22T12:18:08 | 2020-10-22T12:18:08 | 306,324,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | def abs(num):
if num < 0:
return -num
return num | [
"34207589+ibkov@users.noreply.github.com"
] | 34207589+ibkov@users.noreply.github.com |
4238d3e59229db3f82e82deeaea7ce90768f81e6 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /未完成题目/LCP/LCP25/LCP25_Python_1.py | dafd8c2c8eabcccd19a5f5df0444b87409140e43 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | class Solution:
def keyboard(self, k: int, n: int) -> int:
pass
if __name__ == "__main__":
print(Solution().keyboard(1, 1)) # 26
print(Solution().keyboard(1, 2)) # 650
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
b3effe603643e62e7f5a801a4141d3a5770f80f1 | 906ecd89b7beff111629655e48db7e10f0a4290d | /CTFWeb/writeup/urls.py | 789ce4b9742857e599918f01df088d51ac687994 | [] | no_license | Kenun99/WellCTF_CTFWeb | 09093471a16f813d664611e6b5c3803fed79f698 | e23cdb96a68361ba969ce55ad3e6763a63e6bda0 | refs/heads/master | 2021-10-09T15:28:53.839106 | 2018-12-30T15:51:18 | 2018-12-30T15:51:18 | 153,210,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from django.urls import path
from . import views
urlpatterns = [
path(r'', views.disscuss, name='writeup')
] | [
"chenwm_@outlook.com"
] | chenwm_@outlook.com |
c9112067c2be87153127531d92149d2996a54b55 | a3371564332688b66705590a606a9da12c3b079a | /tradematcher/order_stack.py | c2ec22d41bc047fbbde0d813afdd5a48d08c1ad6 | [] | no_license | jhylands/aglodie | 688981c74ec9d09cc4428fc925dd94b78639c672 | 9a64d883af1215bb7fd928ff7cff992f03dac9ac | refs/heads/main | 2023-05-15T03:19:14.680407 | 2021-03-30T16:41:00 | 2021-03-30T16:41:00 | 352,934,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | from typing import List
from tradematcher.order import Order, Offer, Bid
from user import User
class EndOfOrders(Exception):
pass
class OrderStack:
def __init__(self, orders):
# type: (List[Order])
self.orders = orders
self.sorted = False
@property
def top(self):
if not self.sorted:
self.sort()
for top_item in self.orders:
if top_item.quantity>0:
return top_item
else:
self.orders.remove(top_item)
# No items left
raise EndOfOrders("No items left")
@property
def top_price(self):
return self.top.price
def sort(self):
raise Exception("Not overwritten")
def __str__(self):
return str(self.orders)
class BidStack(OrderStack):
def sort(self):
sorted(self.orders, key=lambda x:x.price)
self.sorted = True
@staticmethod
def from_user_list(users):
# type: (List[User])->BidStack
return BidStack([Bid.from_user(user) for user in users if Bid.user_can_bid(user)])
class OfferStack(OrderStack):
def sort(self):
sorted(self.orders, key=lambda x:x.price, reverse=True)
self.sorted = True
@staticmethod
def from_user_list(users):
# type: (List[User])->BidStack
return OfferStack([Offer.from_user(user) for user in users if Offer.user_can_offer(user)])
| [
"hylands.james@gmail.com"
] | hylands.james@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.