index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,100 | 833dc0ad1a1e58ab6ec2093dc8178dc877bfdabf | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, json, os, time
from neighborhood_hash import neighborhood_hash, save_to_file
from common.utils import use_progressbar, count_file, read_hashed_call_graph
from smali_opcode import HCG_FILE_NAME
def save_to_file(hash_cg, graphdir):
# Dump hash_cg to json file
f = open(os.path.join(graphdir, 'directed_double_hcg_15bit.json'), 'w')
json.dump(hash_cg, f)
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='directory of the apk')
args = parser.parse_args()
if args.directory:
# progressbar
file_count = count_file(args.directory, HCG_FILE_NAME)
pbar = use_progressbar('double hashing...', file_count)
pbar.start()
progress = 0
for parent, dirnames, filenames in os.walk(args.directory):
for filename in filenames:
if filename == HCG_FILE_NAME:
graphdir = parent
hcg = read_hashed_call_graph(os.path.join(parent, filename))
for node in hcg:
hcg[node]['label'] = hcg[node]['nhash']
double_hcg = neighborhood_hash(hcg, graphdir)
save_to_file(double_hcg, graphdir)
# progressbar
progress += 1
pbar.update(progress)
# progressbar
pbar.finish()
else:
parser.print_help()
if __name__ == '__main__':
main() |
18,101 | f664cb69438c8eb9b5f7a7ba0cdf4fbe0d9c51df | import sys
import struct
from disk import Disk
class FATable:
def __init__(self, tables, fstype):
self.tables = tables
self.fstype = fstype
self.fatformat = "<l"
self.fatsize = 4
if fstype != "FAT32":
self.fatformat = "<h"
self.fatsize = 2
def get(self, cluster):
arr = [cluster]
num = cluster
next_cluster = self.get_info(num)
while next_cluster != 268435455:
arr.append(next_cluster)
next_cluster = self.get_info(next_cluster)
return arr
def get_info(self, cluster):
pos = cluster * self.fatsize
return struct.unpack(self.fatformat, self.tables[pos : pos+self.fatsize])[0]
class FAT:
def __init__(self, sector):
self.fstype = "FAT16"
self.bytes_per_sector = struct.unpack("<h", sector[11:13])[0]
self.sectors_per_cluster = struct.unpack("<B", sector[13])[0]
self.reserved_sector_count = struct.unpack("<h", sector[14:16])[0]
self.number_of_fats = struct.unpack("<B", sector[16])[0]
self.root_entry_number = struct.unpack("<h", sector[17:19])[0]
self.fatsize = 0
self.backup_boot_sector = 0
if self.root_entry_number == 0:
self.fstype = "FAT32"
if self.fstype == "FAT32":
self.total_sectors = struct.unpack("<L", sector[32:36])[0]
self.fatsize = struct.unpack("<L", sector[36:40])[0]
self.backup_boot_sector = struct.unpack("<h", sector[50:52])[0]
else:
self.total_sectors = struct.unpack("<h", sector[19:21])[0]
self.fatsize = struct.unpack("<h", sector[22:24])[0]
self.root_dir_sectors = ((self.root_entry_number * 32) + (self.bytes_per_sector - 1)) / self.bytes_per_sector
self.first_data_sector = self.reserved_sector_count + self.fatsize * self.number_of_fats + self.root_dir_sectors
def c2s(self, cluster):
return ((cluster-2) * self.sectors_per_cluster) + self.first_data_sector
def usage():
print "%s filename sector"%(sys.argv[0])
sys.exit(0)
def show_part_info(sector):
return FAT(sector)
def get_fat_table(fat, disk):
s = fat.reserved_sector_count
print s
tables = disk.read(s, fat.fatsize)
return FATable(tables, fat.fstype)
if __name__ == '__main__':
filename = sys.argv[1]
sector = int(sys.argv[2])
disk = Disk(512, filename)
raw = disk.read(sector, 512)
fat = show_part_info(raw)
print fat.fstype, fat.first_data_sector, fat.reserved_sector_count, fat.fatsize, fat.root_dir_sectors
fat_table = get_fat_table(fat, disk)
print fat.c2s(2)
|
18,102 | babd9c9568c86038bb1beb6309d108d692323429 | # -*- coding: utf-8 -*-
"""
Created on Sun May 24 20:23:58 2020
@author: PC
"""
from astroquery.simbad import Simbad
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
x,y,z = np.random.randn(3,1000000)
seiza = SkyCoord(x,y,z,representation_type='cartesian').get_constellation()
percent = sorted(('%.4f%%%20s'%(list(seiza).count(c)/10000,c) for c in set(seiza)),reverse=True)
print('\n'.join(percent))
|
18,103 | b087dd8acab205611158aa97bb99354e6a55c43d | def read_file():
input_file = open("input.txt", "r")
if input_file.mode == 'r':
return input_file.read() |
18,104 | 2de0272d2c39e8b02dea7f808d26cdef7e1bc9a6 | class CharacterGetter(object):
def __init__(self, words):
self.words = words
self.characters = set([w_i for w in self.words for w_i in w])
self.number_of_characters = len(self.characters)
self.char2idx = self.set_char2idx()
def set_char2idx(self):
char2idx = {c: i + 2 for i, c in enumerate(self.characters)}
char2idx["UNK"] = 1
char2idx["PAD"] = 0
return char2idx
|
18,105 | fc81d289057eb44883de087a6469375e7692aed5 | #!/usr/bin/python
import os, optparse, io, subprocess, socket, threading, stat, sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import StringIO
# This works on Python 2
StringIO = StringIO.StringIO
except ImportError:
# This works on Python 3
StringIO = io.StringIO
defines = {}
def add_var(option, opt, value, parser):
var, val = value.split('=')
defines[var] = val
def expand(items):
for name, hostname in items:
if name.endswith('/**') and hostname.endswith('/**'):
name = name[:-2]
hostname = hostname[:-2]
for dirpath, dirnames, filenames in os.walk(hostname):
for filename in filenames:
relpath = dirpath[len(hostname):]
if relpath != "":
relpath += "/"
yield (name + relpath + filename,
hostname + relpath + filename)
elif '/&/' in name and hostname.endswith('/&'):
prefix, suffix = name.split('/&/', 1)
yield (prefix + '/' + suffix, hostname[:-1] + suffix)
else:
yield (name, hostname)
def unsymlink(f):
if f.startswith('!'):
return f[1:]
if f.startswith('->'):
return f
try:
link = os.readlink(f)
if link.startswith('/'):
# try to find a match
base = os.path.dirname(f)
while not os.path.exists(base + link):
if base == '/':
return f
base = os.path.dirname(base)
else:
base = os.path.dirname(f) + '/'
return unsymlink(base + link)
except Exception:
return f
def upload(osv, manifest, depends):
files = dict([(f, manifest.get('manifest', f, vars=defines))
for f in manifest.options('manifest')])
files = list(expand(files.items()))
files = [(x, unsymlink(y)) for (x, y) in files]
# Wait for the guest to come up and tell us it's listening
while True:
line = osv.stdout.readline()
if not line or line.find(b"Waiting for connection") >= 0:
break
os.write(sys.stdout.fileno(), line)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 10000))
# We'll want to read the rest of the guest's output, so that it doesn't
# hang, and so the user can see what's happening. Easiest to do this with
# a thread.
def consumeoutput(file):
for line in iter(lambda: file.readline(), b''):
os.write(sys.stdout.fileno(), line)
threading.Thread(target=consumeoutput, args=(osv.stdout,)).start()
# Send a CPIO header or file, padded to multiple of 4 bytes
def cpio_send(data):
s.sendall(data)
partial = len(data)%4
if partial > 0:
s.sendall(b'\0'*(4-partial))
def cpio_field(number, length):
return ("%.*x" % (length, number)).encode()
def cpio_header(filename, mode, filesize):
if sys.version_info >= (3, 0, 0):
filename = filename.encode("utf-8")
return (b"070701" # magic
+ cpio_field(0, 8) # inode
+ cpio_field(mode, 8) # mode
+ cpio_field(0, 8) # uid
+ cpio_field(0, 8) # gid
+ cpio_field(0, 8) # nlink
+ cpio_field(0, 8) # mtime
+ cpio_field(filesize, 8) # filesize
+ cpio_field(0, 8) # devmajor
+ cpio_field(0, 8) # devminor
+ cpio_field(0, 8) # rdevmajor
+ cpio_field(0, 8) # rdevminor
+ cpio_field(len(filename)+1, 8) # namesize
+ cpio_field(0, 8) # check
+ filename + b'\0')
def strip_file(filename):
stripped_filename = filename
if filename.endswith(".so") and \
(filename[0] != "/" or filename.startswith(os.getcwd())):
stripped_filename = filename[:-3] + "-stripped.so"
if not os.path.exists(stripped_filename) \
or (os.path.getmtime(stripped_filename) < \
os.path.getmtime(filename)):
subprocess.call(["strip", "-o", stripped_filename, filename])
return stripped_filename
# Send the files to the guest
for name, hostname in files:
if hostname.startswith("->"):
link = hostname[2:]
cpio_send(cpio_header(name, stat.S_IFLNK, len(link)))
cpio_send(link.encode())
else:
depends.write('\t%s \\\n' % (hostname,))
hostname = strip_file(hostname)
if os.path.islink(hostname):
perm = os.lstat(hostname).st_mode & 0o777
link = os.readlink(hostname)
cpio_send(cpio_header(name, perm | stat.S_IFLNK, len(link)))
cpio_send(link.encode())
elif os.path.isdir(hostname):
perm = os.stat(hostname).st_mode & 0o777
cpio_send(cpio_header(name, perm | stat.S_IFDIR, 0))
else:
perm = os.stat(hostname).st_mode & 0o777
cpio_send(cpio_header(name, perm | stat.S_IFREG, os.stat(hostname).st_size))
with open(hostname, 'rb') as f:
cpio_send(f.read())
cpio_send(cpio_header("TRAILER!!!", 0, 0))
s.shutdown(socket.SHUT_WR)
# Wait for the guest to actually finish writing and syncing
s.recv(1)
s.close()
def main():
make_option = optparse.make_option
opt = optparse.OptionParser(option_list=[
make_option('-o',
dest='output',
help='write to FILE',
metavar='FILE'),
make_option('-d',
dest='depends',
help='write dependencies to FILE',
metavar='FILE',
default=None),
make_option('-m',
dest='manifest',
help='read manifest from FILE',
metavar='FILE'),
make_option('-D',
type='string',
help='define VAR=DATA',
metavar='VAR=DATA',
action='callback',
callback=add_var),
])
(options, args) = opt.parse_args()
depends = StringIO()
if options.depends:
depends = file(options.depends, 'w')
manifest = configparser.SafeConfigParser()
manifest.optionxform = str # avoid lowercasing
manifest.read(options.manifest)
depends.write('%s: \\\n' % (options.output,))
image_path = os.path.abspath(options.output)
osv = subprocess.Popen('cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -e "--norandom --noinit /tools/cpiod.so" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE)
upload(osv, manifest, depends)
osv.wait()
# Disable ZFS compression; it stops taking effect from this point on.
osv = subprocess.Popen('cd ../..; scripts/run.py -m 512 -c1 -i %s -u -s -e "--norandom --noinit /zfs.so set compression=off osv"' % image_path, shell=True, stdout=subprocess.PIPE)
osv.wait()
depends.write('\n\n')
depends.close()
if __name__ == "__main__":
main()
|
18,106 | bf0a290b7f95d520e8cb7ccecdb6548b3799fa96 | from Dominolib import DominoTwo
from Dominolib import DominoOne
from fctdomino import DOMINO_24, DRAW, draw, Split1, Split2, print_board
from colors import bcolors
from Board_player import Player, Castle, Dom
import numpy as np
print(f"{bcolors.GREEN}*************************************")
print("* Affichage liste Domino mélangée *")
print(f"*************************************{bcolors.BASE}")
for k in range(len(DOMINO_24)):
print(DOMINO_24[k])
print(f"{bcolors.GREEN}***********************************")
print("* Fin liste Domino mélangée *")
print(f"***********************************{bcolors.BASE}")
print(" ")
print(f"taille liste : {len(DOMINO_24)}")
print(" ")
draw(DOMINO_24)
print(f"{bcolors.GREEN}*************************************")
print("* Affichage liste Domino mélangée *")
print(f"*************************************{bcolors.BASE}")
for k in range(len(DRAW)):
print(DRAW[k])
while True:
numbertuile= int(input("Enter num tuile : "))
if numbertuile < 4:
break
print("")
SPLIT1=Split1(DRAW,numbertuile)
#print(SPLIT1)
print("")
SPLIT2=Split2(DRAW,numbertuile)
#print(SPLIT2)
print("")
print(DRAW[numbertuile])
print("")
while True:
print("landOne = 0 and landTwo = 1")
print("")
numberTuile = int(input("what is your choice 0 or 1 : "))
print("")
#landOne fixe
if numberTuile == 0:
#landTwo localisation
Dom1 = SPLIT1
Dom2 = SPLIT2
while True:
localisationTuile = int(input("what is your choice 2 or 4 or 6 or 8 : "))
if localisationTuile == 2:
print(DominoOne(SPLIT1[0], SPLIT1[1]))
print(DominoTwo(SPLIT2[0], SPLIT2[1]))
break
elif localisationTuile == 4:
print(DominoTwo(SPLIT2[0], SPLIT2[1]), DominoOne(SPLIT1[0], SPLIT1[1]))
break
elif localisationTuile == 6:
print(DominoOne(SPLIT1[0], SPLIT1[1]), DominoTwo(SPLIT2[0], SPLIT2[1]))
break
elif localisationTuile == 8:
print(DominoTwo(SPLIT2[0], SPLIT2[1]))
print(DominoOne(SPLIT1[0], SPLIT1[1]))
break
break
#landTwo fixe
elif numberTuile == 1:
Dom1 = SPLIT2
Dom2 = SPLIT1
while True:
#landOne localisation
localisationTuile = int(input("what is your choice 2 or 4 or 6 or 8 : "))
if localisationTuile == 2:
print(DominoTwo(SPLIT2[0], SPLIT2[1]))
print(DominoOne(SPLIT1[0], SPLIT1[1]))
break
elif localisationTuile == 4:
print(DominoOne(SPLIT1[0], SPLIT1[1]), DominoTwo(SPLIT2[0], SPLIT2[1]))
break
elif localisationTuile == 6:
print(DominoTwo(SPLIT2[0], SPLIT2[1]), DominoOne(SPLIT1[0], SPLIT1[1]))
break
elif localisationTuile == 8:
print(DominoOne(SPLIT1[0], SPLIT1[1]))
print(DominoTwo(SPLIT2[0], SPLIT2[1]))
break
break
print("")
a = [[0, 0, 7, 0, 4, 0, 2, 2, 2, 1 ], [1, 1, 2, 3, 4, 0, 2, 2, 2, 1 ], [1, 1, 2, 3, 4, 0, 2, 2, 2, 1 ], [1, 1, 2, 3, 4, 0, 2, 2, 2, 1 ], [1, 1, 2, 3, 4, 0, 2, 2, 2, 1 ]]
print_board(a)
print(a[0])
print("")
(a[0][0])= SPLIT1[0]
(a[0][1])= SPLIT1[1]
print(a[0])
print_board(a)
P1=Player(1)
P2=Player(2)
"""
print_board(P1.a)
print("")
(P1.a[0][0])= SPLIT1[0]
print_board(P1.a)
print("")
"""
while True:
board = int(input("what is your board (1,2) : "))
if (board == 1):
board = P1.a
break
if (board == 2):
board = P1.b
break
while True:
line = int(input("what is your line (1,2,3,4,5) : "))
if ((line == 1)or(line == 2)or(line == 3)or(line == 4)or(line == 5)):
break
while True:
colum = int(input("what is your column (1,2,3,4,5) : "))
if ((colum == 1)or(colum == 2)or(colum == 3)or(colum == 4)or(colum == 5)):
break
Castle(line,colum,board)
print_board(board)
print("")
while True:
board = int(input("what is your board (1,2) : "))
if (board == 1):
board = P1.a
break
if (board == 2):
board = P1.b
break
while True:
line = int(input("what is your line (1,2,3,4,5) : "))
if ((line == 1)or(line == 2)or(line == 3)or(line == 4)or(line == 5)):
break
while True:
colum = int(input("what is your column (1,2,3,4,5) : "))
if ((colum == 1)or(colum == 2)or(colum == 3)or(colum == 4)or(colum == 5)):
break
Dom(line,colum,board,Dom1)
if localisationTuile == 2 :
Dom(line+1,colum,board,Dom2)
elif localisationTuile == 4 :
Dom(line,colum-1,board,Dom2)
elif localisationTuile == 6 :
Dom(line,colum+1,board,Dom2)
elif localisationTuile == 8 :
Dom(line-1,colum,board,Dom2)
#Castle(line,colum,board)
print(board)
print_board(board)
print("") |
18,107 | e26d37f37e6ba60855eae850facb708f514cbe11 | #!/usr/bin/env python
# cardinal_pythonlib/formatting.py
"""
===============================================================================
Original code copyright (C) 2009-2022 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Formatting simple Python objects.**
"""
from typing import Any
# =============================================================================
# Number printing, e.g. for parity
# =============================================================================
def trunc_if_integer(n: Any) -> Any:
"""
Truncates floats that are integers to their integer representation.
That is, converts ``1.0`` to ``1``, etc.
Otherwise, returns the starting value.
Will raise an exception if the input cannot be converted to ``int``.
"""
if n == int(n):
return int(n)
return n
|
18,108 | 4ae251f27abe31118be787702a3d1e39b91167b0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import hashlib
import urllib
from django.template import Library
from django.template.defaultfilters import stringfilter
from django.utils import six
from django.utils.functional import allow_lazy
from django.utils.html import escape
from django.utils.text import normalize_newlines
from django.utils.safestring import mark_safe, SafeData
register = Library()
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p data-parnum="%s">%s</p>' % (i, escape(p).replace('\n', '<br />')) for i, p in enumerate(paras)]
else:
paras = ['<p data-parnum="%s">%s</p>' % (i, p.replace('\n', '<br />')) for i, p in enumerate(paras)]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
@register.filter("paragraphlines", is_safe=True, needs_autoescape=True)
@stringfilter
def paragraphlines_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter("numtocny", is_safe=True)
@stringfilter
def numtocny(data):
return numtocn(data, cny=True)
@register.filter("numtocnnum", is_safe=True)
@stringfilter
def numtocnnum(data):
return numtocn(data, cny=False)
def numtocn(data, cny=True):
"""
算法说明:要求字符串输入,现将字符串差费为整数部分和小数部分生成list[整数部分,小数部分]
将整数部分拆分为:[亿,万,仟]三组字符串组成的List:['0000','0000','0000'](根据实际输入生成阶梯List)
例如:600190000010.70整数部分拆分为:['600','1900','0010']
然后对list中每个字符串分组进行大写化再合并
最后处理小数部分的大写化
"""
cdict = {1: '', 2: '拾', 3: '佰', 4: '仟'} if cny else {1: '', 2: '十', 3: '百', 4: '千'}
xdict = {2: '万', 3: '亿', 4: '兆'}
xdict[1] = '元' if cny else ''
if cny:
gdict = {'0': '零', '1': '壹', '2': '贰', '3': '叁','4': '肆',
'5': '伍', '6': '陆', '7': '柒', '8': '捌', '9': '玖'}
else:
gdict = {'0': '零', '1': '一', '2': '二', '3': '三', '4': '四',
'5': '五', '6': '六', '7': '七', '8': '八', '9': '九'}
cdata = str(data).split('.')
integer = ''.join(list(reversed(cdata[0])))
decimal = cdata[1] if len(cdata) == 2 else []
ch_str = ''
# 分解字符数组[亿,万,仟]三组List:['0000','0000','0000']
split_integer = list(
reversed([''.join(list(reversed(integer[i:i+4])))
for i in range(0,len(integer),4)])
)
split_integer_len = len(split_integer) # 获取拆分后的List长度
# 大写合并
for i in range(split_integer_len):
split_integer_group = split_integer[i]
grouped_str = ''
# 对[亿,万,仟]的list中每个字符串分组进行大写化再合并
split_integer_group_len = len(split_integer_group)
lk = split_integer_group_len
for j in range(split_integer_group_len):
this_char = split_integer_group[j]
if this_char == '0':
if j < split_integer_group_len-1:
if split_integer_group[j+1] != '0':
grouped_str = grouped_str+gdict[this_char]
else:
grouped_str = grouped_str+gdict[this_char]+cdict[lk]
lk -= 1
if grouped_str == '': # 有可能一个字符串全是0的情况
ch_str += grouped_str # 此时不需要将数字标识符引入
else:
# 合并:前字符串大写+当前字符串大写+标识符
ch_str += grouped_str+xdict[split_integer_len-i]
# 处理小数部分
decimal_len = len(decimal)
if cny:
if decimal_len == 0:
ch_str += '整'
elif decimal_len == 1: # 若小数只有1位
if int(decimal[0]) == 0:
ch_str += '整'
else:
ch_str += gdict[decimal[0]]+'角整'
else: # 若小数有两位的四种情况
if int(decimal[0]) == 0 and int(decimal[1]) != 0:
ch_str += '零'+gdict[decimal[1]]+'分'
elif int(decimal[0]) == 0 and int(decimal[1]) == 0:
ch_str += '整'
elif int(decimal[0]) != 0 and int(decimal[1]) != 0:
ch_str += gdict[decimal[0]]+'角'+gdict[decimal[1]]+'分'
else:
ch_str += gdict[decimal[0]]+'角整'
else:
if decimal_len != 0:
ch_str = ch_str + '点'
for decimal_char in decimal:
ch_str += gdict[decimal_char]
return ch_str
# return only the URL of the gravatar
# TEMPLATE USE: {{ email|gravatar_url:150 }}
@register.filter
def gravatar_url(email, size=40):
default = "retro"
return "https://cdn.v2ex.com/gravatar/%s?%s" % (hashlib.md5(email.lower()).hexdigest(), urllib.urlencode({'d':default, 's':str(size)}))
# return an image tag with the gravatar
# TEMPLATE USE: {{ email|gravatar:150 }}
@register.filter
def gravatar(email, size=40):
url = gravatar_url(email, size)
return mark_safe('<img src="%s" height="%d" width="%d">' % (url, size, size))
|
18,109 | f334dee7e595fcfda19cd21b05b9eabea5adee11 | def max(num1, num2):
if num1 > num2:
print((num1), "is greater" )
else:
print((num2), "is greater")
max(54,98) |
18,110 | 28cbcca388bd23905c5f7b4e1051bcbf840b68b0 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# inspirehep is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""INSPIRE module that adds more fun to the platform."""
import pytest
@pytest.fixture(scope="module")
def app_config(app_config):
app_config.update({"LEGACY_PID_PROVIDER": "http://someurl.com"})
return app_config
|
18,111 | 20411fb420f8ea6086e1fd75ff76989b1e02b0a2 | # List Comprehensions
import os
print(list(range(1,11)))
print([x*x for x in range(1, 11)])
print([x*x for x in range(1, 20) if x % 2 == 0])
print([m+n for m in 'ABC' for n in 'XYZ'])
print([d for d in os.listdir('/home/philex/SegNet')])
d = {'x':'A', 'y':'B', 'z':'C'}
for k, v in d.items():
print(k+'='+v)
print([k+'='+v for k,v in d.items()])
L = ['HEllo','World','IBM','APPLE']
print([s.lower() for s in L])
L1 = ['Hello','World',18,'Apple',None]
L2 = []
for l in L1:
if isinstance(l, str):
L2.append(l.lower())
else:
continue
print(L2)
# 使用列表生成式简洁太多了,厉害!
print([l.lower() for l in L1 if isinstance(l, str)])
|
18,112 | fd355a79b40ca31fc954ca2d6fd1752171182829 | # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'dependencies': [
'../base/base.gyp:base',
'../ipc/ipc.gyp:ipc',
'../ui/events/events.gyp:events_base',
'../ui/events/events.gyp:events_ipc',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
'../url/url.gyp:url_lib',
],
'include_dirs': [
'..',
],
'sources': [
'ipc/service/gpu_channel.cc',
'ipc/service/gpu_channel.h',
'ipc/service/gpu_channel_manager.cc',
'ipc/service/gpu_channel_manager.h',
'ipc/service/gpu_channel_manager_delegate.h',
'ipc/service/gpu_command_buffer_stub.cc',
'ipc/service/gpu_command_buffer_stub.h',
'ipc/service/gpu_config.h',
'ipc/service/gpu_memory_buffer_factory.cc',
'ipc/service/gpu_memory_buffer_factory.h',
'ipc/service/gpu_memory_manager.cc',
'ipc/service/gpu_memory_manager.h',
'ipc/service/gpu_memory_tracking.cc',
'ipc/service/gpu_memory_tracking.h',
'ipc/service/gpu_watchdog.h',
'ipc/service/image_transport_surface.h',
'ipc/service/pass_through_image_transport_surface.cc',
'ipc/service/pass_through_image_transport_surface.h',
],
'conditions': [
['OS=="win"', {
'sources': [
'ipc/service/child_window_surface_win.cc',
'ipc/service/child_window_surface_win.h',
'ipc/service/image_transport_surface_win.cc',
],
}],
['OS=="mac"', {
'sources': [
'ipc/service/image_transport_surface_overlay_mac.h',
'ipc/service/image_transport_surface_overlay_mac.mm',
'ipc/service/ca_layer_partial_damage_tree_mac.h',
'ipc/service/ca_layer_partial_damage_tree_mac.mm',
'ipc/service/ca_layer_tree_mac.h',
'ipc/service/ca_layer_tree_mac.mm',
'ipc/service/gpu_memory_buffer_factory_io_surface.cc',
'ipc/service/gpu_memory_buffer_factory_io_surface.h',
'ipc/service/image_transport_surface_mac.mm',
],
'dependencies': [
'../skia/skia.gyp:skia',
'../ui/accelerated_widget_mac/accelerated_widget_mac.gyp:accelerated_widget_mac',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreMedia.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
],
},
}],
['OS=="android"', {
'sources': [
'ipc/service/image_transport_surface_android.cc',
'ipc/service/stream_texture_android.cc',
'ipc/service/stream_texture_android.h',
'ipc/service/gpu_memory_buffer_factory_surface_texture.cc',
'ipc/service/gpu_memory_buffer_factory_surface_texture.h',
],
'link_settings': {
'libraries': [
'-landroid', # ANativeWindow
],
},
}],
['OS=="linux"', {
'sources': [ 'ipc/service/image_transport_surface_linux.cc' ],
}],
['use_x11 == 1 and (target_arch != "arm" or chromeos == 0)', {
'sources': [
'ipc/service/x_util.h',
],
}],
['use_ozone == 1', {
'sources': [
'ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc',
'ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h',
],
'dependencies': [
'../ui/ozone/ozone.gyp:ozone',
],
}],
],
}
|
18,113 | d3eb72800d3f159f392fb852193662b94c2f0712 | from __future__ import absolute_import
from __future__ import division
import logging
from rdkit import Chem
import networkx as nx
import numpy as np
import six
logger = logging.getLogger(__name__)
class Molecule:
max_number_of_parents = 4
def __init__(self, smile, logp=None, contract_rings=False):
self.smile = smile
self.logp = logp
# logger.info("Parsing Molecule {:},contract rings: {:}".format(smile, contract_rings))
self.atoms = []
m = Chem.MolFromSmiles(smile)
# Chem.Kekulize(self.m)
self.no_of_atoms = m.GetNumAtoms()
self.graph = nx.Graph()
for i in range(self.no_of_atoms):
atom = m.GetAtomWithIdx(i)
self.graph.add_node(i, attr_dict={"atom_features": Molecule.atom_features(atom)})
for neighbour in atom.GetNeighbors():
neighbour_idx = neighbour.GetIdx()
bond = m.GetBondBetweenAtoms(i, neighbour_idx)
self.graph.add_edge(i, neighbour_idx,
attr_dict={"bond_features": Molecule.bond_features(bond)})
if contract_rings:
self.reduce_graph_rings()
self.create_directed_graphs()
self.create_feature_vectors()
def create_directed_graphs(self):
'''
:return:
'''
self.directed_graphs = np.empty(
(self.no_of_atoms, self.no_of_atoms - 1, 3), dtype=int)
# parse all the atoms one by one and get directed graph to that atom
# as the sink node
for idx in range(self.no_of_atoms):
# get shortest path from the root to all the other atoms and then reverse the edges.
path = nx.single_source_dijkstra_path(self.graph, idx)
G = nx.DiGraph()
for i in range(self.no_of_atoms):
temp = path[i]
temp.reverse()
G.add_path(temp)
# do a topological sort to get a order of atoms with all edges pointing to the root
topological_order = nx.topological_sort(G)
sorted_path = np.empty((self.no_of_atoms - 1, 3))
no_of_incoming_edges = {}
for i in range(self.no_of_atoms - 1):
node = topological_order[i]
edge = (nx.edges(G, node))[0]
if edge[1] in no_of_incoming_edges:
index = no_of_incoming_edges[edge[1]]
no_of_incoming_edges[edge[1]] += 1
else:
index = 0
no_of_incoming_edges[edge[1]] = 1
sorted_path[i, :] = [node, edge[1], index]
self.directed_graphs[idx, :, :] = sorted_path
def create_feature_vectors(self):
'''
:return:
'''
# create a three dimesnional matrix I,
# such that Iij is the local input vector for jth vertex in ith DAG
length_of_bond_features = Molecule.num_bond_features()
length_of_atom_features = Molecule.num_atom_features()
self.local_input_vector = np.zeros(
(self.no_of_atoms, self.no_of_atoms, Molecule.num_of_features()))
for idx in range(self.no_of_atoms):
sorted_path = self.directed_graphs[idx, :, :]
self.local_input_vector[idx, idx, :length_of_atom_features] = \
self.get_atom_features(idx)
no_of_incoming_edges = {}
for i in range(self.no_of_atoms - 1):
node1 = sorted_path[i, 0]
node2 = sorted_path[i, 1]
self.local_input_vector[idx, node1, :length_of_atom_features] = \
self.get_atom_features(node1)
if node2 in no_of_incoming_edges:
index = no_of_incoming_edges[node2]
no_of_incoming_edges[node2] += 1
if index >= Molecule.max_number_of_parents:
continue
else:
index = 0
no_of_incoming_edges[node2] = 1
start = length_of_atom_features + index* length_of_bond_features
end = start + length_of_bond_features
self.local_input_vector[idx, node2, start:end] = \
self.get_bond_features(node1, node2)
def get_cycle(self):
try:
return nx.find_cycle(self.graph)
except:
return []
def reduce_graph_rings(self):
'''
:return:
'''
cycle_name_format = "R_{:}"
index = 0
cycle = self.get_cycle()
while cycle:
cycle_name = cycle_name_format.format(index)
self.graph.add_node(cycle_name)
# ebunch = zip(cycle, (cycle[1:] + cycle[:1]))
self.graph.remove_edges_from(cycle)
for node1, node2 in cycle:
if isinstance(node1, six.string_types):
self.graph.add_edge(node1, cycle_name,
attr_dict={"bond_features": Molecule.bond_features_between_contract_rings()})
continue
neighbours = self.graph.neighbors(node1)
if not neighbours:
continue
for neighbour in neighbours:
edge_attrs = self.get_bond_features(neighbour, node1)
self.graph.add_edge(neighbour, cycle_name, attr_dict={
"bond_features": edge_attrs})
self.graph.remove_edge(node1, neighbour)
nx.set_node_attributes(self.graph, "atom_features",
values={cycle_name: Molecule.atom_features_of_contract_rings(0)})
for node1, node2 in cycle:
if not isinstance(node1, six.string_types):
self.graph.remove_node(node1)
index += 1
cycle = self.get_cycle()
self.graph = nx.convert_node_labels_to_integers(self.graph,
first_label=0)
nx.draw(self.graph)
self.no_of_atoms = len(self.graph)
def get_atom_features(self, node_id):
attrs = nx.get_node_attributes(self.graph, "atom_features")
return attrs[node_id]
def get_bond_features(self, node1, node2):
attrs = self.graph.get_edge_data(node1, node2)
return attrs["bond_features"]
@staticmethod
def atom_features(atom):
return np.array(Molecule.one_of_k_encoding_unk(atom.GetSymbol(),
['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl',
'Br', 'Mg', 'Na',
'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K',
'Tl', 'Yb',
'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti',
'Zn', 'H', # H?
'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In',
'Mn',
'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
Molecule.one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5]) +
Molecule.one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4]) +
Molecule.one_of_k_encoding_unk(atom.GetImplicitValence(),
[0, 1, 2, 3, 4, 5]) + [atom.GetIsAromatic()])
@staticmethod
def atom_features_of_contract_rings(degree):
return np.array(Molecule.one_of_k_encoding_unk('Unknown',
['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl',
'Br', 'Mg', 'Na',
'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K',
'Tl', 'Yb',
'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti',
'Zn', 'H', # H?
'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In',
'Mn', 'Zr',
'Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
Molecule.one_of_k_encoding(degree, [0, 1, 2, 3, 4, 5]) +
Molecule.one_of_k_encoding_unk(0, [0, 1, 2, 3, 4]) +
Molecule.one_of_k_encoding_unk(0, [0, 1, 2, 3, 4, 5]) +
[0])
@staticmethod
def bond_features_between_contract_rings():
return np.array([1, 0, 0, 0, 0, 0])
@staticmethod
def bond_features(bond):
bt = bond.GetBondType()
return np.array([bt == Chem.rdchem.BondType.SINGLE,
bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC,
bond.GetIsConjugated(),
bond.IsInRing()])
@staticmethod
def num_of_features():
return Molecule.max_number_of_parents*Molecule.num_bond_features() + Molecule.num_atom_features()
@staticmethod
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception(
"input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
@staticmethod
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
@staticmethod
def num_atom_features():
# Return length of feature vector using a very simple molecule.
m = Chem.MolFromSmiles('CC')
alist = m.GetAtoms()
a = alist[0]
return len(Molecule.atom_features(a))
@staticmethod
def num_bond_features():
# Return length of feature vector using a very simple molecule.
simple_mol = Chem.MolFromSmiles('CC')
Chem.SanitizeMol(simple_mol)
return len(Molecule.bond_features(simple_mol.GetBonds()[0]))
if __name__ == '__main__':
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logger = logging.getLogger(__name__)
m = Molecule("c2(Cl)c(Cl)c(Cl)c1nccnc1c2(Cl)", True)
|
18,114 | dae64e9abcdf994cfe189b64bee52051f31cd0db | #!/usr/bin/env python3
import linkStats as ls
file = "../parsedData/plvStats.txt"
ls.FPvsVP(file)
ls.ASvsVPvsCS(file)
ls.TPvsVPvsCS(file)
|
18,115 | 837677b2b4435e65d2d3c265aa41163f7ed038d1 | # USAGE
# python detect_shapes.py --image shapes_and_colors.png
# import the necessary packages
from pyimagesearch.shapedetector import ShapeDetector
import imutils
import cv2
import numpy as np
# construct the argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required=True,
# help="path to the input image")
# args = vars(ap.parse_args())
# load the image and resize it to a smaller factor so that
# the shapes can be approximated better
image = cv2.imread("map3.jpeg")
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
kernel = np.ones((5, 5), np.uint8)
# dilation = cv2.dilate(image,ratio,iterations = 2)
# cv2.imwrite("alperennnn.png",dilation)
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)[1]
#thresh = cv2.bitwise_not(threshed)
# find contours in the thresholded image and initialize the
# shape detector
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
stencil = np.zeros(resized.shape).astype(resized.dtype)
color = [255, 255, 255]
cv2.fillPoly(stencil, contours, color)
result = cv2.bitwise_and(resized, stencil)
result[np.where((result == [0, 0, 0]).all(axis=2))] = [192, 192, 192]
threshed = cv2.threshold(result, 150, 255, cv2.THRESH_BINARY)[1]
threshed = cv2.bitwise_not(threshed)
threshed = cv2.cvtColor(threshed, cv2.COLOR_BGR2GRAY)
cnts = cv2.findContours(threshed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# cv2.drawContours(image, cnts, -1, (0, 255, 0), 3)
sd = ShapeDetector()
cv2.imshow("Image", image)
cv2.waitKey(0)
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
area = cv2.contourArea(c)
shape = sd.detect(c)
if shape == "5 cm circle" or shape == "10 cm circle":
radius = np.sqrt(area / np.pi)
epsilon = 0.05 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
(x, y), (MA, ma), angle = cv2.fitEllipse(c)
x *= ratio
y *= ratio
radius *= ratio
center = (int(x), int(y))
radius = int(radius)
cv2.circle(image, center, radius, (255, 0, 0), 2)
cv2.circle(image, center, 1, (0, 0, 255), thickness=-1)
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
#cv2.circle(image, center, 5, (0, 0, 255), thickness=-1)
else:
epsilon = 0.05 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
(x, y), (MA, ma), angle = cv2.fitEllipse(c)
x *= ratio
y *= ratio
center = (int(x), int(y))
approx = approx.astype("float")
approx *= ratio
approx = approx.astype("int")
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.circle(image, center, 1, (0, 0, 255), thickness=-1)
cv2.drawContours(image, [approx], -1, (255, 0, 0), 2)
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
# show the output image
cv2.imshow("Image", image)
cv2.imwrite("utku2.png", image)
cv2.waitKey(0)
|
18,116 | a11c3ebd0c170704515531def0ad0e70da73ee00 | def solve(n):
dp = [0]*(n+1)
offset = 1
for i in range(1,n+1):
if(not i&(i-1)):
offset = i
dp[i] = dp[i-offset] + 1
return dp
class Solution:
def countBits(self, num: int) -> List[int]:
return solve(num)
|
18,117 | 8220df44b6038ed74a1992a26e510014061423c6 | from bs4 import BeautifulSoup as bsoup
import requests
my_url='https://summerofcode.withgoogle.com/archive/2017/organizations/'
original = "https://summerofcode.withgoogle.com"
response = requests.get(my_url)
html = response.content
soup = bsoup(html,"html.parser")
organizations = soup.findAll("li",{'class': 'organization-card__container'})
for organization in organizations:
page_url=organization.find('a',{'class':'organization-card__link'})
organization_name=organization['aria-label']
about=organization.find('div',{'class':'organization-card__tagline font-black-54'})
about=about.text
page_link=original+page_url['href']
page = requests.get(page_link)
if page.status_code != 200:
break
page_link=original+page_url['href']
response1 = requests.get(page_link)
html1=response1.content
soup1=bsoup(html1,"html.parser")
organization_link=soup1.find("a",{"class":"org__link"})
organization_link=organization_link.text
technologies=soup1.findAll("li",{"class":"organization__tag organization__tag--technology"})
tech = []
for t in technologies:
tech.append(t.text)
major_topics=soup1.findAll("li",{"class":"organization__tag organization__tag--topic"})
topics = []
for q in major_topics:
topics.append(q.text)
|
18,118 | 61ec4a1e6a391c8eb02825528a61f67a757fd29c | from django.test import TestCase
# Create your tests here.
import urllib.request
import re
import requests
# ret=requests.get('http://news.cnhubei.com/ctjb/ctjbsgk/ctjb40/200808/W020080822221006461534.jpg')
# print(ret.content)
# with open('wb') as f:
# f.write(ret.content)
# f.close()
# #
#
import selenium#驱动浏览器的一个库,让js能够渲染页面,可以打开浏览器
from selenium import webdriver
# driver=webdriver.PhantomJS()
#
#下载phantomjs,将bin目录下的文件放到环境变量下
#安装lxml
#安装beautifulsoup4
#安装pyquery
from pyquery import PyQuery
from bs4 import BeautifulSoup
import urllib.request
res=urllib.request.urlopen('http://www.baidu.com')
# print(res.read().decode('utf-8'))
print(type(res))
print(res.status) |
18,119 | 010e427143780cb56ee05808954a197074638737 | import os
BOT_NAME = "gov_scrape"
SPIDER_MODULES = ["gov_scrape.spiders"]
NEWSPIDER_MODULE = "gov_scrape.spiders"
# Splash config
SPLASH_URL = f"http://{os.getenv('SPLASH_IP', 'localhost:8050')}"
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
# Crawl responsibly
USER_AGENT = "gov_scrape (+https://gov-rss.github.io)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy
CONCURRENT_REQUESTS = 16
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
"scrapy_splash.SplashDeduplicateArgsMiddleware": 100,
}
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware": 500,
"scrapy_splash.SplashCookiesMiddleware": 723,
"scrapy_splash.SplashMiddleware": 725,
"scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware": 810,
}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"gov_scrape.pipelines.GovScrapePipeline": 500,
"scrapy_rss.pipelines.RssExportPipeline": 950,
}
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = "httpcache"
HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
HTTPCACHE_STORAGE = "scrapy_splash.SplashAwareFSCacheStorage"
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
HTTPCACHE_GZIP = False
|
18,120 | d6aba6d1322824cf8d5fb402b7b951889e25c815 | def repeat_and_missing_number(A):
r = []
sum_l = 0
n = len(A)
A = list(A)
# calculate repeating number by O(N)
for i in range(n):
sum_l += abs(A[i])
if A[abs(A[i])-1] > 0:
A[abs(A[i])-1] = -A[abs(A[i])-1]
else:
r.append(abs(A[i]))
# calculate sum of the list, n*(n+1)//2
# actual sum of 1 to N - (above sum -repeating no) = missing number
sum_n = n*(n+1)//2
r.append(sum_n-(sum_l-r[0]))
return r
|
18,121 | 4caf8cd1f1d97d174ace21207b6b3db064154320 | def ditcs(b):
db={}
for i in b: db[i]=None
for i,j in enumerate(b):
if db[j] is None:
db[j]=str(i)
else:
db[j]=db[j]+str(i)
return db
def sol(a,b):
da=ditcs(a)
db=ditcs(b)
for i in da.values():
iso=''
if i in db.values():
iso=True
continue
else:
iso=False
break
print(iso)
sol(a,b)
|
18,122 | 7d7cd113e2f192fad78c57bbfaafe4800cff7b77 | import keras.backend as K
from keras.layers import Input, Dense
from keras.models import Model
from neuraltree.builder import \
get_name_to_unlinked_layer_dict, \
get_incoming_and_outgoing_layers, \
get_input_and_output_layers, \
RootSystemBuilder, BranchSystemBuilder, TrunkBuilder
from neuraltree.model import RootSystem, BranchSystem, TrunkSystem
from neuraltree.model import NeuralTree
def create_sample_model():
# init
input_layer = Input(shape=(50,))
hidden_layer_1 = Dense(units=20)
hidden_layer_2 = Dense(units=20)
output_layer = Dense(units=4)
# build
hidden_layer_1_linked = hidden_layer_1(input_layer)
hidden_layer_2_linked = hidden_layer_2(hidden_layer_1_linked)
output_layer_linked = output_layer(hidden_layer_2_linked)
model = Model(inputs=[input_layer], outputs=[output_layer_linked])
model.compile(optimizer="rmsprop", loss="mse")
return model
def create_root_system():
sample_root_model = create_sample_model()
incoming_layers_by_name, \
outgoing_layers_by_name, \
layer_build_order_by_name = get_incoming_and_outgoing_layers(sample_root_model)
input_layers, output_layers = get_input_and_output_layers(sample_root_model)
root_builder = RootSystemBuilder(
get_name_to_unlinked_layer_dict(sample_root_model),
incoming_layers_by_name,
outgoing_layers_by_name,
layer_build_order_by_name,
input_layers,
output_layers
)
return RootSystem("", root_builder)
def create_branch_system():
sample_branch_model = create_sample_model()
incoming_layers_by_name, \
outgoing_layers_by_name, \
layer_build_order_by_name = get_incoming_and_outgoing_layers(sample_branch_model)
input_layers, output_layers = get_input_and_output_layers(sample_branch_model)
branch_builder = BranchSystemBuilder(
get_name_to_unlinked_layer_dict(sample_branch_model),
incoming_layers_by_name,
outgoing_layers_by_name,
layer_build_order_by_name,
input_layers,
output_layers
)
return BranchSystem("", branch_builder)
def create_trunk_system():
sample_trunk_model = create_sample_model()
incoming_layers_by_name, \
outgoing_layers_by_name, \
layer_build_order_by_name = get_incoming_and_outgoing_layers(sample_trunk_model)
input_layers, output_layers = get_input_and_output_layers(sample_trunk_model)
trunk_builder = TrunkBuilder(
get_name_to_unlinked_layer_dict(sample_trunk_model),
incoming_layers_by_name,
outgoing_layers_by_name,
layer_build_order_by_name,
input_layers,
output_layers
)
return TrunkSystem("", trunk_builder)
def test_create_root_system():
root_system = create_root_system()
assert RootSystem == type(root_system)
assert not {} == root_system.builder.name_to_unlinked_layer
print(root_system.builder.name_to_unlinked_layer)
def test_create_branch_system():
branch_system = create_branch_system()
assert BranchSystem == type(branch_system)
assert not {} == branch_system.builder.name_to_unlinked_layer
print(branch_system.builder.name_to_unlinked_layer)
def test_create_trunk_system():
trunk_system = create_trunk_system()
assert TrunkSystem == type(trunk_system)
assert not {} == trunk_system.builder.name_to_unlinked_layer
print(trunk_system.builder.name_to_unlinked_layer)
def test_create_tree():
root_system = create_root_system()
branch_system = create_branch_system()
trunk_system = create_trunk_system()
# --- Root System --- #
# input
# hidden
# hidden-------------------
# output |
# --- Trunk System --- # |
# input |
# hidden<------------------
# hidden-------------------
# output |
# --- Branch System --- # |
# input |
# hidden<------------------
# hidden
# output
roots_to_trunk_map = {
"dense_2": "dense_1"
}
trunk_to_branches_map = {
"dense_2": "dense_1"
}
tree = NeuralTree("", root_system, trunk_system, branch_system, roots_to_trunk_map, trunk_to_branches_map)
|
18,123 | fbf532e6d2b6aedf024a958e0e98b1b12adc7b62 | #encoding:utf8
##2005第几天
#判断是否是闰年
def is_leap(x):
if (x%400==0) or ((x%4==0) and (x%100!=0)):
return True
return False
def days(date):
l_str=date.split('/')
l_num=[int(i) for i in l_str] #将输入字符串变为整型
#print l_num
month_day={1:31,2:28,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}
if is_leap(l_num[0]):
month_day[2]=29
#print month_day
if l_num[1]==1:
return l_num[2]
else:
sum=0 #初始化不要放在循环里,要不结果会出现错误
for i in range(1,l_num[1]):
sum=sum+month_day[i]
#print sum
return sum+l_num[2]
print days('1985/1/20')
print days('2006/3/12')
print days('2000/3/12')
print days('2016/3/12')
print days('2100/3/12')
##以下代码是写代码时验证split是否能正确工作
# date='1985/1/20'
# l_str=date.split('/')
# l_num=[int(i) for i in l_str]
# print l_num
|
18,124 | b34a6d81a7632236d7f28de8252ad534c5b1c114 | import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert
from irise.plot.util import add_map, even_cscale
from myscripts.models.um import case_studies
levels = ('air_potential_temperature', [320])
clevs = even_cscale(2)
cmap = 'coolwarm'
def main(cubes):
pv = convert.calc('ertel_potential_vorticity', cubes, levels=levels)[0]
adv = convert.calc('advection_only_pv', cubes, levels=levels)[0]
for n, name in enumerate(['sum_of_physics_pv_tracers', 'epsilon',
'dynamics_tracer_inconsistency', 'residual_pv']):
cube = convert.calc(name, cubes, levels=levels)[0]
m = n / 2
ax = plt.subplot2grid((2, 2), (n - 2 * m, m))
iplt.contourf(cube, clevs, cmap=cmap)
add_map()
iplt.contour(pv, [2], colors='k', linestyles='-')
iplt.contour(adv, [2], colors='k', linestyles='-')
plt.show()
if __name__ == '__main__':
forecast = case_studies.iop5b.copy()
cubes = forecast.set_lead_time(hours=24)
main(cubes)
|
18,125 | af3dbcf124d52bc4fd47a9100982c9466b670a8d | from storages.backends.s3boto3 import S3Boto3Storage
from MxOnline.settings import MEDIAFILES_LOCATION
class MediaStorage(S3Boto3Storage):
location = MEDIAFILES_LOCATION
file_overwrite = False |
18,126 | d2e7ba8fc27eaac08afa2ab3aff801a2224e1085 | import numpy as np
from random import shuffle
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:,j] += X[i,:].T
dW[:,y[i]] -= X[i,:].T
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW/= num_train
# Add regularization to the loss.
loss += 0.5 * reg * np.sum(W * W)
dW += reg*W
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
num_classes = W.shape[1]
num_train = X.shape[0]
#loss = 0.0
loss = 0.0
scores = np.zeros((1,num_classes))
dW = np.zeros(W.shape) # initialize the gradient as zero
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
# lines begin with double "#" are the last version of code!!!!!
##for i in xrange(num_train):
#XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting
#scores = np.sum(np.multiply(XX,W.T), axis = 1)
## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)
## if i ==1: print scores
#loss += np.sum(scores - scores[y[i]]) + num_classes -1
#http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions
## scores+=1
## scores[y[i]]-=1
#however, this is sum over index, not values, glaube ich
#loss+= sum(x < 0 for x in (scores-scores[y[i]]))
## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()
#pass
############################################
# construct a zero loop version
############################################
scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores
scores1D = np.zeros((num_train,1)) #used to store corrected scores
#index1D = np.zeros((1,num_classes))
#index1D = range(num_classes)
#scores1D = y[index1D]
scores2D = np.dot(X,W)
##for i in xrange(num_train):
## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1
## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 -
#(correct score -1) = 0
#####################################
#for loop replacement###
indexInsert = np.arange(num_train)
scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing
scores2D[indexInsert,y[indexInsert]] -=1
#####################################
#scores2D = X.dot(W)
#http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python
#rewrite summation
#loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()
#temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score
temp = scores2D-scores1D #broadcasting!!
#print temp[1,:]
temp= temp.clip(min=0)
#loss += sum(map(sum, (temp)[temp>0]))
#loss += sum(map(sum, (temp)))
#loss += (temp)[temp >0].sum()
loss += sum(sum(x) for x in temp) #sum them up
#loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct
# score +1 = 1, but it should be 0, therefore, i deduce them at the last minute
# ( then I made this also in the for loop to meet intuitive)
#############################################################################
# END OF YOUR CODE #
#############################################################################
loss /= num_train
loss += 0.5 * reg * np.sum(W * W)
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
#tempBool = np.divide(temp, temp)
#tempBool = tempBool.clip(max=1,min=0)
#http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value
tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)
# temp is already the every score minus the correct labeled score
tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)
for j in xrange(num_train):
tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct
# label element, because it is the times that the corrected scores be used
dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product
#pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
dW/= num_train
dW += reg*W
return loss, dW
|
18,127 | cbb9527c7e476ecd564a97c171870070a9ec715b | from django.urls import path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('register', views.RegisterNewUser, name='register'),
path('login', obtain_auth_token, name='login'),
path('users_and_theirBooks',
views.get_users_and_theirBooks, name="users_all_inf"),
path('Mybooks', views.get_My_books, name="mybooks"),
path('addBook', views.create_book, name="create_book"),
path('find_poster/<str:pk>', views.find_poster, name="find_poster"),
path('update/<str:pk>', views.update_book, name="update")
]
|
18,128 | 9fdc815debf341b21ca4871021aac3ebf3c420eb | import os
import time
import tensorflow as tf
from tf_agents.metrics import tf_metrics
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
from pathlib import Path
from tf_agents.policies import policy_saver
from tf_agents.drivers import dynamic_episode_driver
class LearningHelper:
def __init__(self, train_env, test_env, agent, global_step, chkpdir='./',
num_iterations=20000, collect_episodes=100, collect_steps_per_iteration=2,
replay_buffer_capacity=20000, batch_size=64, log_interval=500,
num_eval_episodes=10, eval_interval=5000, IsAutoStoreCheckpoint=True, collect_policy=None):
if collect_policy is None:
self.collect_policy = self.agent.collect_policy
print('selected agent collect_policy')
else:
self.collect_policy = collect_policy
print('selected USER collect_policy')
tf.compat.v1.enable_v2_behavior()
self.IsAutoStoreCheckpoint = IsAutoStoreCheckpoint
self.num_iterations = num_iterations
self.collect_episodes = collect_episodes
self.collect_steps_per_iteration = collect_steps_per_iteration
self.replay_buffer_capacity = replay_buffer_capacity
self.batch_size = batch_size
self.log_interval = log_interval
self.num_eval_episodes = num_eval_episodes
self.eval_interval = eval_interval
self.agent = agent
self.train_env = train_env
self.test_env = test_env
self.global_step = global_step
# create reply buffer for collection trajectories
self.replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.replay_buffer_capacity)
# Checkpointer
self.checkpoint_dir = os.path.join(chkpdir, 'checkpoint')
Path(self.checkpoint_dir).mkdir(parents=True, exist_ok=True)
self.policy_dir = os.path.join(chkpdir, 'policy')
Path(self.policy_dir).mkdir(parents=True, exist_ok=True)
self.train_checkpointer = common.Checkpointer(
ckpt_dir=self.checkpoint_dir,
max_to_keep=1,
agent=self.agent,
policy=self.agent.policy,
replay_buffer=self.replay_buffer,
global_step=self.global_step
)
self.tf_policy_saver = policy_saver.PolicySaver(self.agent.policy)
self.local_step_counter = 0
pass
def evaluate_agent(self, n_episodes=100):
# define metrics
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
average_return = tf_metrics.AverageReturnMetric()
# rew = TFSumOfRewards()
# add reply buffer and metrict to the observer
observers = [num_episodes, env_steps, average_return]
_driver = dynamic_episode_driver.DynamicEpisodeDriver(self.test_env, self.agent.policy, observers,
num_episodes=n_episodes)
final_time_step, _ = _driver.run()
print('eval episodes = {0}: Average Return = {1}'.format(num_episodes.result().numpy(),
average_return.result().numpy()))
return average_return.result().numpy()
def collect_training_data(self, verbose=0):
if (verbose > 0):
# define metrics
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
# add reply buffer and metrict to the observer
observers = [self.replay_buffer.add_batch, num_episodes, env_steps]
else:
observers = [self.replay_buffer.add_batch]
self.replay_buffer.clear()
# create a driver
# we can create a driver using e.g. random policy
driver = dynamic_episode_driver.DynamicEpisodeDriver(
self.train_env, self.collect_policy, observers, num_episodes=self.collect_episodes)
# collect_steps_per_iteration = 2
# driver = dynamic_step_driver.DynamicStepDriver(
# train_env, tf_policy, observers, num_steps=collect_steps_per_iteration)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
if (verbose > 0):
# print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
pass
def train_step(self, n_steps):
# Convert the replay buffer to a tf.data.Dataset
# Dataset generates trajectories with shape [Bx2x...]
AUTOTUNE = tf.data.experimental.AUTOTUNE
dataset = self.replay_buffer.as_dataset(
num_parallel_calls=AUTOTUNE,
sample_batch_size=self.batch_size,
num_steps=2).prefetch(AUTOTUNE)
iterator = iter(dataset)
train_loss = None
# experience = self.replay_buffer.gather_all()
# train_loss = self.agent.train(experience)
for _ in range(n_steps):
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = self.agent.train(experience)
print('Global steps {}: Traning Loss {}'.format(self.global_step.numpy(), train_loss.loss))
def train_agent(self, n_epoch):
local_epoch_counter = 0
for i in range(n_epoch):
start_time = time.time()
self.collect_training_data(verbose=1)
# print('num_frames()', self.replay_buffer.num_frames().numpy())
# print('n_steps()', int(self.replay_buffer.num_frames().numpy()/self.batch_size))
self.train_step(self.replay_buffer.num_frames().numpy())
if (self.IsAutoStoreCheckpoint == True):
self.store_check_point()
epoch_train_time = time.time() - start_time
local_epoch_counter = local_epoch_counter + 1
print('Epoch: {}, epoch train time: {}'.format(local_epoch_counter, epoch_train_time))
pass
def train_agent_with_avg_ret_condition(self, max_steps, min_avg_return, n_eval_steps=100):
for i in range(max_steps):
self.collect_training_data()
self.train_step(1000)
if (self.IsAutoStoreCheckpoint == True):
self.store_check_point()
if ((i > 0) and (i % self.eval_interval) == 0):
avg_ret = self.evaluate_agent(n_eval_steps)
if (avg_ret > min_avg_return):
return
pass
def get_agent(self):
return self.agent
def store_check_point(self):
self.train_checkpointer.save(self.global_step)
pass
def restore_check_point(self):
self.train_checkpointer.initialize_or_restore()
self.global_step = tf.compat.v1.train.get_global_step()
pass
def save_policy(self):
self.tf_policy_saver.save(self.policy_dir)
pass
|
18,129 | d641578280bd850f485962a6a31db5aab3526aa2 | #!flask/bin/python
from app import app
app.run(debug=True)
'''
This script starts up the development web server with our application. Imports the app variable from our app package and invokes its run method to start the server.
'''
|
18,130 | de7308c442ec548175aa62a0dd946a1c0f27d90b | class Error(Exception):
pass
class GUIFeaturesNotEnabledError(Error):
"""Used when the user tries to call a function which requires "GUI Features" but it is not enabled"""
pass
class NoImageAvailableError(Error):
"""used when the user tries to retrieve an image but none is available"""
pass
class NotTrackingError(Error):
"""used when the user tries to use a feature that requires tracking to have been started"""
pass
class TrackingActiveError(Error):
"""used when the user tries to use a feature that can't be run at the same time as tracking"""
pass
class FeatureNotImplementedError(Error):
"""used when the user attempts to use a feature that has not been implemented fully"""
def __init__(self,message: str = 'This feature has not been implemented although it seems a function of this name is in the code. Possible solutions include updating to a newer version or implementing the feature and submitting it on github'):
super.__init__(message)
|
18,131 | bb0f7118af590ea61b17fc4abff1fc03aef0459c | #!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class Zabbix_Weakpwd_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'Zabbix弱口令漏洞',
'description': 'Zabbix弱口令漏洞',
'date': '',
'exptype': 'check',
'type': 'Weakpwd'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers = {
'User-Agent': get_useragent(),
'Content-Type': 'application/x-www-form-urlencoded'
}
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
urls = []
urls.append(self.url + '/index.php')
urls.append(self.url + '/zabbix/index.php')
for user in open('app/username.txt', 'r', encoding = 'utf-8').readlines():
user = user.strip()
for pwd in open('app/password.txt', 'r', encoding = 'utf-8').readlines():
if pwd != '':
pwd = pwd.strip()
for url in urls:
try:
data = {
'sid': '84fc9ff1d9310695',
'form_refresh': 1,
'name': user,
'password': pwd,
'autologin': 1,
'enter': 'Sign in'
}
req = request.post(url, headers = self.headers, data = data)
if 'zbx_sessionid' in req.headers['Set-Cookie'] and req.status_code == 302:
result = "exists Zabbix weak password, user: %s, pwd: %s"%(user, pwd)
#print(req.status_code)
print('存在Zabbix弱口令漏洞,弱口令为', result)
return True
except Exception as e:
print(e)
pass
print('不存在Zabbix弱口令漏洞')
return False
if __name__ == '__main__':
Zabbix_Weakpwd = Zabbix_Weakpwd_BaseVerify('http://baidu.com')
Zabbix_Weakpwd.check() |
18,132 | fceabe28018884fc5b1a7b83bdcb2d1870cfadb8 | import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ElementTree, Element, SubElement
XSTART = 40
xcoord = XSTART
ycoord = 30
def make_coords(el, membercount):
global xcoord, ycoord
coords = SubElement(el, 'coordinates')
x, y, w, h = [SubElement(coords, c)
for c in ['x', 'y', 'w', 'h']]
x.text = str(xcoord)
y.text = str(ycoord)
w.text = "230"
h.text = str(20 + (18 * membercount))
xcoord += 260
if xcoord > 1000:
xcoord = XSTART
ycoord += 300
def make_relation(diag, relname, frm):
rel = SubElement(diag, 'element')
tp = SubElement(rel, 'type')
tp.text = "com.umlet.element.Relation"
coords = SubElement(rel, 'coordinates')
x, y, w, h = [SubElement(coords, c)
for c in ['x', 'y', 'w', 'h']]
#middle pos?
x.text = "0"
y.text = "0"
#not used? but needed..? (didn't work with 0)
w.text = "10"
h.text = "10"
pn = SubElement(rel, 'panel_attributes')
pn.text = "lt=<-"
pn = SubElement(rel, 'additional_attributes')
pn.text = "0;0;10;100" #end and start offsets?
def createuml(classes):
diag = Element('diagram')
diag.attrib['program'] = "umlet"
diag.attrib['version'] = "11.3"
for c in classes.itervalues():
el = SubElement(diag, 'element')
tp = SubElement(el, 'type')
tp.text = "com.umlet.element.Class"
make_coords(el, len(c.fields) + len(c.methods))
pn = SubElement(el, 'panel_attributes')
pn.text = c.name
if len(c.supers) > 0:
pn.text += ": %s" % c.supers[0] #multiple inheritance ignored now XXX
#attrs
pn.text += "\n--\n"
for f in c.fields:
pn.text += "%s: type\n" % f #we don't have type info as it is not used in the calcs
#methods
pn.text += "\n--\n"
for method in c.methods.itervalues():
pn.text += "#%s(" % method.name
#print method, method.params
if len(method.params) > 0:
for p in method.params:
pn.text += "%s, " % p
pn.text = pn.text[:-2]
pn.text += ")\n"
#possible baseclass(es?)
#other relations (associations)
for rel in c.relations:
make_relation(diag, rel, c.name)
return diag
if __name__ == '__main__':
#import read_jsdoc_json
#classes = read_jsdoc_json.get_classes()
import read_asdoc_xml
classes = read_asdoc_xml.get_classes()
doc = createuml(classes)
print ET.tostring(doc)
|
18,133 | b84c88c6d853b278b95c4e545c8d72e9dc7e0d5a | from rest_framework import serializers
from . import models
class StudentSerializer(serializers.ModelSerializer):
""""""
class Meta:
model = models.Student
fields = '__all__'
class CourseSerializer(serializers.ModelSerializer):
""""""
class Meta:
model = models.Course
fields = '__all__'
class EnrollmentSerializer(serializers.ModelSerializer):
""""""
class Meta:
model = models.Enrollment
fields = '__all__'
class ListEnrollmentStudentSerializer(serializers.ModelSerializer):
""""""
course = serializers.ReadOnlyField(source='course.description')
time_course = serializers.SerializerMethodField()
class Meta:
model = models.Enrollment
fields = ['course', 'time_course']
def get_time_course(self, obj):
return obj.get_time_course_display()
class ListofStudentsEnrolledInCourseSerializer(serializers.ModelSerializer):
student_name = serializers.ReadOnlyField(source='student.name')
class Meta:
model = models.Enrollment
fields = ['student_name']
|
18,134 | 1e85a3fd043f4da1229914f661aab130b219d8cb | #!/usr/bin/env python
'''This script is used to manually update the
/media/multimedia/logs/find_next_recording_time/next file with the
time of the next recording and to send that to the server over
udp. This script is meant to be executed by the system event handler
in Mythtv every time the scheduler has been run.
'''
import time
import datetime
import xml.etree.ElementTree
import sys
import urllib
import socket
### Settings
#.................... EDIT HERE START
XML_FILE_NAME = 'http://127.0.0.1:6544/Status/GetStatus'
LOG_FILE_NAME = '/media/multimedia/logs/find_next_recording_time/log'
NEXT_FILE_NAME = '/media/multimedia/logs/find_next_recording_time/next'
SERVER_IP = '192.168.0.100'
SERVER_UDP_PORT = 9000
# Estimated fall a sleep and wake up time
SLEEP_WAKEUP_TIME = 600
#.................... EDIT HERE END
### Initial assignments ###
NOW = int(time.time())
LOG_LINE = time.strftime('%Y-%m-%d %T') + " === "
### Functions ###
def get_last_time():
''' Read the time that was identified for the next recording the last \
time the script was run
'''
try:
with open(NEXT_FILE_NAME) as file_:
# NOTE We add the SLEEP_WAKEUP_TIME because it was subtracted when
# the value was written
last_time = int(file_.readline().strip('\n')) + SLEEP_WAKEUP_TIME
except IOError:
last_time = 0
return last_time
def write_time(new_time):
''' Writes the time of the next recording to a local file and sends that \
file to server.
'''
last_time = get_last_time()
# Only do something if there has been a change
if new_time != last_time:
# Write wake up time to "next" file
with open(NEXT_FILE_NAME, 'w') as file_:
file_.write(str(new_time - SLEEP_WAKEUP_TIME))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(str(new_time - SLEEP_WAKEUP_TIME),
(SERVER_IP, SERVER_UDP_PORT))
return ' Time ' + str(new_time - SLEEP_WAKEUP_TIME) + ' written!'
else:
return ' Already updated!'
### End of functions, here start the main code ###
# Read the status xml file to a eTree object
def main():
"""The main method"""
global LOG_LINE
xmlfile = urllib.urlopen(XML_FILE_NAME)
xmlcontent = xml.etree.ElementTree.parse(xmlfile)
xmlfile.close()
rec = []
# Calculate the UTC offset
UTC_OFFSET = datetime.datetime.utcnow() - datetime.datetime.now()
for program in xmlcontent.find('Scheduled').getchildren():
# The format is '2014-02-22T14:25:00Z' and it is UTC or ZULU time
from_db = program.find('Recording').attrib['recStartTs']
utc_time = datetime.datetime.strptime(from_db[:-1], '%Y-%m-%dT%H:%M:%S')
# Calculate local time
localtime = utc_time - UTC_OFFSET
# Convert to time object to get epoch time
epochtime = time.mktime(localtime.timetuple())
rec.append(int(epochtime))
best_item = min(rec) if len(rec) > 0 else None
# If there is no recording in the future
if best_item is None:
LOG_LINE += ' No rec. in fut.'
# The return value from write_time is a part of the log line
LOG_LINE += write_time(0) + '\n'
with open(LOG_FILE_NAME, 'a') as file_: # 'a' is append
file_.write(LOG_LINE)
sys.exit(0)
# If next is more than SLEEP_WAKEUP_TIME into the future, then write the
# time to the 'next' file
if best_item - NOW > SLEEP_WAKEUP_TIME:
LOG_LINE += ' Rec. scheduled.'
# The return value from write_time is a part of the log line
LOG_LINE += write_time(best_item) + '\n'
with open(LOG_FILE_NAME, 'a') as file_: # 'a' is append
file_.write(LOG_LINE)
sys.exit(0)
if __name__ == '__main__':
main()
|
18,135 | 5863e52ab6bcc16aff679f9a13fd6fc836ec46db | # [h] change suffix in selected glyphs
# imports
from mojo.roboFont import CurrentFont
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.glyphutils import has_suffix, change_suffix
from hTools2.modules.messages import no_font_open, no_glyph_selected
# objects
class changeSuffixDialog(hDialog):
"""A dialog to change the suffix of the selected glyphs.
.. image:: imgs/glyphs/names-suffix-0.png
.. image:: imgs/glyphs/names-suffix-1.png
.. image:: imgs/glyphs/names-suffix-2.png
.. image:: imgs/glyphs/names-suffix-3.png
"""
# attributes
#: The old suffix to be substituted.
old_suffix = ''
#: New suffix for glyph names.
new_suffix = ''
#: Overwrite (or not) if glyph with new name already exists in font.
overwrite = True
# methods
def __init__(self):
self.title = 'suffix'
self.height = (self.text_height * 3) + (self.padding_y * 5) + self.button_height
self.column_1 = 33
self.column_2 = 70
self.w = FloatingWindow((self.width, self.height), self.title,)
# old suffix
x = self.padding_x
y = self.padding_y
self.w.old_suffix_label = TextBox(
(x, y,
self.column_1,
self.text_height),
"old",
sizeStyle=self.size_style)
x += self.column_1
self.w.old_suffix_value = EditText(
(x, y,
self.column_2,
self.text_height),
text=self.old_suffix,
placeholder='old suffix',
sizeStyle=self.size_style)
# new suffix
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.new_suffix_label = TextBox(
(x, y,
self.column_1,
self.text_height),
"new",
sizeStyle=self.size_style)
x += self.column_1
self.w.new_suffix_value = EditText(
(x, y,
self.column_2,
self.text_height),
text=self.new_suffix,
placeholder='new suffix',
sizeStyle=self.size_style)
y += (self.text_height + self.padding_y)
# checkbox overwrite
self.w.overwrite_checkbox = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"overwrite",
value=self.overwrite,
sizeStyle=self.size_style)
# apply button
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.button_apply = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
"apply",
callback=self.apply_callback,
sizeStyle=self.size_style)
# open window
self.w.open()
def apply_callback(self, sender):
# get font
f = CurrentFont()
if f is not None:
glyph_names = get_glyphs(f)
if len(glyph_names) > 0:
boolstring = [ False, True ]
# get parameters
self.old_suffix = self.w.old_suffix_value.get()
self.new_suffix = self.w.new_suffix_value.get()
self.overwrite = self.w.overwrite_checkbox.get()
# print info
print('changing glyph name suffixes...\n')
print('\told suffix: %s' % (self.old_suffix))
print('\tnew suffix: %s' % (self.new_suffix))
print('\toverwrite: %s' % boolstring[self.overwrite])
print()
# batch change glyph names
for glyph_name in glyph_names:
g = f[glyph_name]
# get glyphs with matching suffix
if has_suffix(g, self.old_suffix):
# switch suffixes : one.osf -> one.onum
if len(self.old_suffix) > 0 and len(self.new_suffix) > 0:
new_name = change_suffix(g, self.old_suffix, self.new_suffix)
# remove suffix : one.osf -> one
elif len(self.old_suffix) > 0 and len(self.new_suffix) == 0:
new_name = change_suffix(g, self.old_suffix, None)
# add suffix : one -> one.onum
elif len(self.old_suffix) == 0 and len(self.new_suffix) > 0:
new_name = '%s.%s' % (glyph_name, self.new_suffix)
else:
new_name = glyph_name
# new name not in font (rename)
if new_name != glyph_name:
if new_name not in f:
print('\trenaming %s to %s...' % (glyph_name, new_name))
g.name = new_name
# new name in font
else:
# overwrite
if self._overwrite:
print("\toverwriting '%s' with '%s'" % (new_name, glyph_name))
f.removeGlyph(_new_name)
f.update()
g.name = new_name
g.update()
# do not overwrite
else:
print("\t'%s' already exists in font, skipping '%s'" % (new_name, glyph_name))
# glyph does not have suffix
else:
pass
# done glyph
# done font
f.update()
print()
print('...done.\n')
# no glyph selected
else:
print(no_glyph_selected)
# no font open
else:
print(no_font_open)
pass
|
18,136 | 7f0c92b17d33e2c2481b51c51f24fecac55aab78 | """We can rotate digits by 180 degrees to form new digits. When 0, 1, 6, 8, 9 are rotated 180 degrees, they become 0, 1, 9, 8, 6 respectively. When 2, 3, 4, 5 and 7 are rotated 180 degrees, they become invalid.
A confusing number is a number that when rotated 180 degrees becomes a different number with each digit valid.(Note that the rotated number can be greater than the original number.)
Given a positive integer N, return the number of confusing numbers between 1 and N inclusive.
Example 1:
Input: 20
Output: 6
Explanation:
The confusing numbers are [6,9,10,16,18,19].
6 converts to 9.
9 converts to 6.
10 converts to 01 which is just 1.
16 converts to 91.
18 converts to 81.
19 converts to 61.
Example 2:
Input: 100
Output: 19
Explanation:
The confusing numbers are [6,9,10,16,18,19,60,61,66,68,80,81,86,89,90,91,98,99,100].
Note:
1 <= N <= 10^9"""
"""
Compose only the valid numbers.
Check if the number is the same as its rotation. Skip the case for leading 0, because it will cause the
repeated numbers.
Need to calculate the next possible rotation number.
Also, check that the number the next number to be added is less than N
"""
class Solution :
def __init__(self) :
self.mapping = {0:0, 1:1, 6:9, 8:8, 9:6}
self.valid_digits = [0,1,6,8,9]
def confusingNumber2(self, N) :
return self._dfs(0,0,1,N)
def _dfs(self, num, rotation, digit, N) :
res = 0
if num != rotation : res += 1
# add one more digit
for d in self.valid_digits :
# ignore the leading zero case
if d == 0 and num == 0 :
continue
# check if the new nubmer is less than N
if num * 10 + d <= N :
res += self._dfs(num * 10 + d, self.mapping[d] * digit + rotation, 10 * digit, N)
return res
|
18,137 | fdf7c001c47c21c2f5fbe15c55938fd271c88bfc | import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ask.settings")
django.setup()
from django.core.management.base import BaseCommand, CommandError
from django.db import models
from django.contrib.auth.models import User
from ask_nazarkov.models import Question, Answer, Tag
import names
class Command(BaseCommand):
def create_new_user():
username = names.get_full_name(gender='male')
email = username.replace(' ', '_') + "@mail.ru"
password = "password"
new_user = User.objects.create_new_user(username=username, email=email, password=password)
new_user
new_user.save()
# author = UserProperties.objects.create(
# user=new_user,
# filename="ex1.jpg",
# nickname=username[:20],
# )
return new_user
def handle(self, **options):
username = names.get_full_name(gender='male')
email = username.replace(' ', '_') + "@mail.ru"
password = "password"
new_user = User.objects.handle(username=username, email=email, password=password)
new_user
new_user.save()
|
18,138 | f7f5a77766093bdb99d067e77cd60b10a4c45df3 | #!/usr/bin/env python
"""Generate function summaries for the refguide. For example, if the
__init__ file of a submodule contains:
.. autosummary::
:toctree: generated/
foo
foobar
Then it will modify the __init__ file to contain (*)
.. autosummary::
:toctree: generated/
foo -- First line of the documentation of `foo`.
foobar -- First line of the documentation of `foobar`.
If there is already text after the function definitions it will be
overwritten, i.e.
.. autosummary::
:toctree: generated/
foo -- Blah blah blah.
foobar -- Blabbity blabbity.
will also become (*).
"""
import os
import argparse
import importlib
import re
EXCEPTIONS = {
'jn': ('Bessel function of the first kind of real order and '
'complex argument')
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("module",
help="module to add summaries to")
parser.add_argument("--dry-run",
help="print __init__ file instead of overwriting",
action="store_true")
args = parser.parse_args()
filename = os.path.join(os.path.dirname(__file__), '..', 'scipy',
args.module, '__init__.py')
module = importlib.import_module('scipy.' + args.module)
fnew = []
with open(filename, 'r') as f:
line = f.readline()
while line:
if '.. autosummary::' in line:
fnew.append(line.rstrip())
fnew.append(f.readline().rstrip()) # :toctree: generated/
fnew.append(f.readline().rstrip()) # blank line
line = f.readline()
summaries = []
maxlen = 0
while line.strip():
func = line.split('--')[0].strip()
ufunc = '[+]' not in line
if len(func) > maxlen:
maxlen = len(func)
if func in EXCEPTIONS.keys():
summary = [EXCEPTIONS[func]]
else:
summary = []
doc = getattr(module, func).__doc__.split('\n')
i = 0 if doc[0].strip() else 1
while True:
if re.match(func + r'\(.*\)', doc[i].strip()):
# ufunc docstrings contain the signature
i += 2
else:
break
while i < len(doc) and doc[i].strip():
summary.append(doc[i].lstrip())
i += 1
summary = ' '.join([x.lstrip() for x in summary])
summary = '[+]' + summary if not ufunc else summary
summaries.append((func, summary))
line = f.readline()
for (func, summary) in summaries:
spaces = ' '*(maxlen - len(func) + 1)
fnew.append(' ' + func + spaces + '-- ' + summary)
fnew.append(line.rstrip())
else:
fnew.append(line.rstrip())
line = f.readline()
if args.dry_run:
print('\n'.join(fnew))
else:
with open(filename, 'w') as f:
f.write('\n'.join(fnew))
f.write('\n')
if __name__ == "__main__":
main()
|
18,139 | 05234b881ed86b5679d31948b06d7d04f64b733d | import json
import tornado.web
from peewee import Model
from pkg.e import get_msg_by_code, code
from services import JwtCipher, set_dict
from schema import Schema
from aioredis import Redis
from playhouse.shortcuts import model_to_dict
class BaseHandler(tornado.web.RequestHandler):
user = None
_json_args = None
def data_received(self, chunk):
pass
@property
def redis(self) -> Redis:
return self.application.redis
def auth_user(self) -> bool:
"""判断用户是否登陆
:return bool
"""
token = self.request.headers.get("Authentication")
user_info = JwtCipher.decrypt(token)
if not user_info:
return False
self.user = set_dict(user_info)
return True
def get_json_args(self, key=None, default=None):
"""获取json数据
:return dict or val
"""
try:
if self._json_args is None:
self._json_args = json.loads(self.request.body)
if key is None:
return self._json_args
if default:
return self._json_args.get(key, default)
return self._json_args[key]
except json.JSONDecodeError:
raise tornado.web.HTTPError(400)
def is_valid(self, schema: Schema, data=None):
"""表单验证"""
if not data:
data = self.get_json_args()
return schema.is_valid(data)
def response(self, data=None, c=None):
"""统一返回消息"""
if not data:
data = dict()
elif isinstance(data, Model):
data = model_to_dict(data)
c = c or code.SUCCESS
data = json.dumps(dict(code=c, data=data, msg=get_msg_by_code(c)))
self.write(data)
class AuthHandler(BaseHandler):
def prepare(self):
if not self.auth_user():
raise tornado.web.HTTPError(401)
|
18,140 | 995c87d24e4dec8ea96c8e17bb2922039e33b027 | n = int(input())
max_num = -10000000000000
min_num = 10000000000000
for i in range(1, n + 1):
num = int(input())
if num > max_num:
max_num = num
if num < min_num:
min_num = num
print(f"Max number: {max_num}")
print(f"Min number: {min_num}")
|
18,141 | 00cb45e1cb7038c58a9f3d7dd529a4b12744521b | import pygame
from sys import exit
import config
from os import path
from button_class import Button
def load_image(name):
if not path.isfile(path.join(name)):
print(f"ERROR 01: '{path.join(name)}' not found.")
exit()
image = pygame.image.load(path.join(name))
return image
pygame.init()
size = width, height = config.SIZE_WINDOW
screen = pygame.display.set_mode(size)
image = load_image('background.jpg')
screen.blit(image, (0, 0))
pygame.display.flip()
clock = pygame.time.Clock()
work = True
class GameInfo:
pygame.font.init()
font_header = pygame.font.Font('font.ttf', 70)
font = pygame.font.Font('font.ttf', 24)
def __init__(self):
self.pages = [self.goal, self.arrange, self.walk]
self.page = 0
self.button_back = Button(60, 50)
self.button_further = Button(60, 50)
self.button_to_menu = Button(240, 50)
self.draw()
def draw(self):
pygame.draw.rect(screen, (166, 120, 65), (30, 30, 840, 745), 0)
pygame.draw.line(screen, 'black', (30, 150), (867, 150), 4)
self.pages[self.page]()
self.button_back.draw(40, 710, '<-')
self.button_further.draw(795, 710, '->')
self.button_to_menu.draw(330, 710, ' < Back to menu >')
def press(self):
self.button_back.press(40, 710, self.back)
self.button_further.press(795, 710, self.further)
self.button_to_menu.press(330, 710, self.to_greeting)
def move(self):
self.button_back.move(40, 710)
self.button_further.move(795, 710)
self.button_to_menu.move(330, 710)
def to_greeting(self):
global work
work = False
def further(self):
if self.page < len(self.pages) - 1:
self.page += 1
else:
self.page = 0
self.pages[self.page]()
def back(self):
if self.page > 0:
self.page -= 1
else:
self.page = len((self.pages)) - 1
self.pages[self.page]()
def render_text(self, text, x, y, fsize):
lines = text.split('\n')
for i, line in enumerate(lines):
screen.blit(self.font.render(line, True, [0, 0, 0]), (x, y + fsize * i))
def goal(self):
pygame.draw.rect(screen, (166, 120, 65), (30, 30, 840, 680), 0)
pygame.draw.line(screen, 'black', (30, 150), (867, 150), 4)
self.header = self.font_header.render(config.HEAD_INFO_GAME[0], True, [0, 0, 0])
self.render_text(config.TEXT_INFO_GAME[config.HEAD_INFO_GAME[0]], 60, 180, 40)
self.board = load_image('board_1.jpg')
screen.blit(self.header, (170, 40))
screen.blit(self.board, (500, 270))
def arrange(self):
pygame.draw.rect(screen, (166, 120, 65), (30, 30, 840, 680), 0)
pygame.draw.line(screen, 'black', (30, 150), (867, 150), 4)
self.header = self.font_header.render(config.HEAD_INFO_GAME[1], True, [0, 0, 0])
screen.blit(self.header, (100, 40))
self.render_text(config.TEXT_INFO_GAME[config.HEAD_INFO_GAME[1]], 60, 180, 40)
def walk(self):
pygame.draw.rect(screen, (166, 120, 65), (30, 30, 840, 680), 0)
pygame.draw.line(screen, 'black', (30, 150), (867, 150), 4)
self.header = self.font_header.render(config.HEAD_INFO_GAME[2], True, [0, 0, 0])
screen.blit(self.header, (230, 40))
self.render_text(config.TEXT_INFO_GAME[config.HEAD_INFO_GAME[2]], 60, 180, 40)
def main():
global work
work = True
gameinfo = GameInfo()
while work:
for event in pygame.event.get():
if event.type == pygame.QUIT:
work = False
if event.type == pygame.MOUSEBUTTONDOWN:
gameinfo.press()
if event.type == pygame.MOUSEMOTION:
gameinfo.move()
gameinfo.draw()
pygame.display.flip() |
18,142 | 117e870f0c3b38ef6c197715a38ef405d93a942b | import Tkinter as TK
import datetime
class countdown:
def __init__(self, master, time): # time in mm/dd/yy hh:mm:ss format
self.master = master
self.frame = TK.Frame(self.master)
self.targetTime = datetime.datetime.strptime(time, "%m/%d/%y %H:%M:%S")
self.timeRemainingLabel = TK.Label(self.frame)
self.startButton = TK.Button(self.frame, text="Start countdown", command=lambda:self.master.after(1000, self.update))
self.endTimeLabel = TK.Label(self.frame, text="Target time in mm/dd/yy hh:mm:ss format:")
self.endTimeEntry = TK.Entry(self.frame)
self.endTimeEntry.insert(0, time)
self.frame.grid()
self.timeRemainingLabel.grid(row=1,column=1, columnspan=3)
self.startButton.grid(row=2, column=1, rowspan=2)
self.endTimeLabel.grid(row=2, column=2)
self.endTimeEntry.grid(row=3, column=2)
def update(self):
remaining = self.targetTime-datetime.datetime.now()
daysRemaining = remaining.days
hoursRemaining = int(remaining.seconds) / 3600
minutesRemaining = int(remaining.seconds % 3600) / 60
secondsRemaining = int(remaining.seconds % 60)
self.timeRemainingLabel.config(text="Time remaining until {targetTime}:\n*** {days} days {hours} hrs {minutes} min {seconds} sec ***".format(targetTime=datetime.datetime.strptime(self.endTimeEntry.get(), "%m/%d/%y %H:%M:%S"), days=daysRemaining, hours=hoursRemaining, minutes=minutesRemaining, seconds=secondsRemaining))
self.master.after(1000, self.update)
root = TK.Tk()
c = countdown(root, "08/31/13 01:01:01")
root.mainloop() |
18,143 | 90580a7b75658c527ecfe4a43662320eeda9040b | #!/usr/bin/python
import sys
import time
import requests
import json
RED = "\033[1;31m"
BLUE = "\033[1;34m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
def print_usage():
print sys.argv[0] + " : clien_id clien_secret path_file"
print "for clien_id and clien_secret -1 to use deflaut"
def get_token(CLIENT_ID, CLIENT_SECRET):
URL_API = "https://api.intra.42.fr";
ACCESS_TOKEN_URL = "https://api.intra.42.fr/oauth/token"
payload = {
"grant_type" : "client_credentials",
"client_id" : CLIENT_ID,
"client_secret" : CLIENT_SECRET
}
token = requests.post(ACCESS_TOKEN_URL, payload);
return (token);
def get_list(path_file):
out = []
try:
fd = open(path_file, "r")
except IOError:
print "Erreur! File not exist"
sys.exit()
line = fd.readlines()
out.append(line);
fd.close()
return (out);
def print_info(to_print):
test = sorted(to_print)
print RED + "red:" + RESET + " for user not found or error"
print BLUE + "blue:" + RESET + " for user find but not in school now"
print GREEN + "green:" + RESET + "user find en in school"
print "not color: user find but is not in 101 student zone"
for user in test:
if user['statu'][0] == "z":
print "\n\t" + GREEN + user['user'] + RESET
elif user['statu'][0] == "N":
print "\n\t" + BLUE + user['user'] + RESET
elif user['statu'][0] == "E":
print "\n\t" + RED + user['user'] + RESET
else:
print "\n\t" + user['user']
print user['statu']
if len(sys.argv) < 4:
print_usage();
sys.exit();
lst_user = get_list(sys.argv[3]);
if sys.argv[1] == "-1" :
clien_id = "22db69e1b81963a5196c669540e072fec62f7269f3c71a9b0ee36e6b0f4898c3";
else :
clien_id = sys.argv[1];
if sys.argv[2] == "-1":
clien_secret = "c6af2954032d1e581f92d2f7d32a85d7d5644aa60b990e536c16816d1b6352b4";
else:
clien_secret = sys.argv[2];
while True:
token = get_token(clien_id, clien_secret)
if token.status_code == 200 :
break
elif token.status_code == 500:
continue
else :
print "Error code:"
print token.status_code
sys.exit()
token = token.json();
payload_token ={ 'access_token' : token['access_token']}
to_print = []
for user in lst_user[0] :
user = user.strip()
url_request = "https://api.intra.42.fr/v2/users/" + user + "/locations";
time.sleep(0.7);
rep = requests.get(url_request, payload_token)
if rep.status_code == 200 :
if rep.json()[0]['end_at']:
statu = "Not in school"
else :
statu = rep.json()[0]['host']
else :
statu = "Error " + str(rep.status_code)
to_print.append({
'user' : user,
'statu' : statu
})
print_info(to_print)
|
18,144 | 86f6dfb28aeeb98fde8784c92b0392d094b0b61a | from .thrower_dragon import ThrowerDragon
from utils import apply_effect, make_scare
class ScaryThrower(ThrowerDragon):
"""ThrowerDragon that intimidates Terminators, making them back away instead of advancing."""
name = 'Scary'
# BEGIN 4.4
implemented = True # Change to True to view in the GUI
food_cost=6
# END 4.4
def throw_at(self, target):
# BEGIN 4.4
"*** YOUR CODE HERE ***"
if target.ho_gya==0:
apply_effect(make_scare,target,2)
target.ho_gya=1
else:
super().throw_at(target)
# END 4.4
|
18,145 | 41347b82298480475b88a06407559c721e417f20 | class MatChainMult:
def __init__(self, p):
self.p = p
self._mat_chain_order()
def _mat_chain_order(self):
n = len(self.p) - 1
m = [[0 for x in range(n)] for y in range(n)]
s = [[0 for x in range(n-1)] for y in range(n-1)]
for l in range(1,n):
for i in range(n-l):
j = i+l
m[i][j] = 10**9
for k in range(i,j):
q = m[i][k] + m[k+1][j] + self.p[i]*self.p[k+1]*self.p[j+1]
if q < m[i][j]:
m[i][j] = q
s[i][j-1] = k
self.m,self.s = m,s
def _optimal_parens(self,i,j,order):
if i==j:
order += 'A{}'.format(i+1)
else:
order += '({0}{1})'.format(self._optimal_parens(i,self.s[i][j-1],order), self._optimal_parens(self.s[i][j-1]+1,j,order))
return order
def optimal_cost(self):
return self.m[0][len(self.m)-1]
def optimal_order(self):
return self._optimal_parens(0,len(self.p)-2,'')
|
18,146 | c746022617293d08dd25fddbbeda9ba08fdd2412 | from Crypto.Cipher import DES
import binascii
#we can use the DES algorithm like this
#there are several modes (7 modes)
# 1.) ECB: "Electronic Code Book" -> we use DES on every 64 bits long plaintext block
# these blocks are independnt of each other so we use DES seperately on every block
# 2.) CBC: "Cipher Block Chaining" -> uses a chaining mechanism that causes
# the decryption of a block of ciphertext to depend on all the preceding ciphertext blocks
#
# THE PADDING PROBLEM
# DES algorithm uses 128 bits lopng inputs: what if the plaintext is not divisible by 128?
# - in these cases we append some extra bits to the plaintext to be able to split
# the plaintext into 128 bits long chunks
#
# Padding Modes:
# -> we can add extra bits: 100000 for example
# -> we can add white-space to the plaintext
# -> we can use CMS "Cryptographic Message Syntax"...pad with bytes all of the same value as the number of padding bytes
def append_space_padding(str, blocksize = 64):
pad_len = blocksize - (len(str) % blocksize)
padding = b'a' * pad_len
return str + padding
def remove_space_padding(str, blocksize = 64):
pad_len = 0
for char in str[::-1]:
if char == 'a':
pad_len += 1
else:
break
str = str[:-pad_len]
return str
def encrypt(plaintext, key):
des = DES.new(key, DES.MODE_ECB)
return des.encrypt(plaintext)
def decrypt(ciphertext, key):
des = DES.new(key, DES.MODE_ECB)
return des.decrypt(ciphertext).decode('UTF-8')
if __name__ == "__main__":
key = b"secretaa"
plaintext = b"This is the secret message we want to encrypt!"
print(len(plaintext))
plaintext = append_space_padding(plaintext)
print(len(plaintext))
print(plaintext)
ciphertext = encrypt(plaintext, key)
print(binascii.hexlify(bytearray(ciphertext)))
decrypted = decrypt(ciphertext, key)
decrypted = remove_space_padding(decrypted)
print("Decrypted message:", decrypted) |
18,147 | 448edd2833f93bdc87ca621db02efafb69b2ab01 | from tea.models.basesqlalchemy import Base, Helper
from tea.models.origins import Origins
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship, backref
class Tea(Base, Helper):
__tablename__ = 'kinds'
name = Column(String)
flavors = Column(String)
origin_id = Column(Integer, ForeignKey('origins.id'))
origin = relationship('Origins', backref=backref('kinds'))
def to_dict(self):
return super(Tea, self).to_dict()
|
18,148 | 0318dc855584d421bf1e92ae5d54f2dbcf0b5688 | print("5") #String
print(5) #Integer
print(4+2) #add
print(4-2) #subtract
print(4*2) #multiply
print(4/2) #divide
print(4**2) #for squaring
print (4%2) #remainder zero after dividing
y = 4.15
print(y)
y = y + 2 # add 2 into y
print(y)
y += 2 # add 2 into y
print(y)
y = y - 1 # subtract 1 into y
print(y)
y -= 1 # subtract 1 into y
print(y)
|
18,149 | 53705ef9a0f83a3468bbbe060d1ad7aeb7c12113 | # pylint: disable=missing-docstring
from setuptools import setup, find_packages
if __name__ == '__main__':
setup(name='reentry-test-host',
packages=find_packages(),
install_requires=['reentry'],
reentry_register=True,
entry_points={
'console_scripts': ['reentry-test-hooks = reentry_test_host.tests:main'],
'reentry_test': ['builtin = reentry_test_host.builtin:PluginClass']
})
|
18,150 | 9d31ad7429903916f04705cacb99f3e647fd1c0f | import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from utils import get_request, parse_table, get_table_header
def get_ken_pomeroys_rating(year):
"""
:param year: string or int
:return: pandas.DataFrame containing Ken Pomeroy’s ratings for a given year
"""
base_url = "https://kenpom.com/index.php?y="
url = base_url + str(year)
r = get_request(url, headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A"})
if r is None:
return None
soup = BeautifulSoup(r.text, 'lxml')
table = soup.find_all('table')[0]
data = parse_table(table)
columns = get_table_header(table, index=1)
data = np.array(data)
cleaned_data = []
for i in data:
mask = [1, 2, 3, 4, 5, 7, 9, 11, 13, 15, 17, 19]
cleaned_data.append(i[mask])
df = pd.DataFrame(cleaned_data, index=np.arange(1, len(data) + 1), columns=columns)
df[df.columns[0]] = df[df.columns[0]].str.strip('0123456789 ')
return df
|
18,151 | 37ce452a9c7ee6805557115e7e17b94e4f32ea3a | #! /usr/bin/env python
#
def chi_square_noncentral_cdf_values ( n_data ):
#*****************************************************************************80
#
## CHI_SQUARE_NONCENTRAL_CDF_VALUES returns values of the noncentral chi CDF.
#
# Discussion:
#
# In Mathematica, the function can be evaluated by:
#
# Needs["Statistics`ContinuousDistributions`"]
# dist = NoncentralChiSquareDistribution [ df, lambda ]
# CDF [ dist, x ]
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 24 January 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Stephen Wolfram,
# The Mathematica Book,
# Fourth Edition,
# Wolfram Media / Cambridge University Press, 1999.
#
# Parameters:
#
# Input/output, integer N_DATA. The user sets N_DATA to 0 before the
# first call. On each call, the routine increments N_DATA by 1, and
# returns the corresponding data; when there is no more data, the
# output value of N_DATA will be 0 again.
#
# Output, integer DF, the number of degrees of freedom.
#
# Output, real LAMBDA, the noncentrality parameter.
#
# Output, real X, the argument of the function.
#
# Output, real F, the noncentral chi CDF.
#
import numpy as np
n_max = 28
df_vec = np.array ( ( \
1, 2, 3, \
1, 2, 3, \
1, 2, 3, \
1, 2, 3, \
60, 80, 100, \
1, 2, 3, \
10, 10, 10, \
10, 10, 10, \
10, 10, 10, \
8 ) )
f_vec = np.array ( ( \
0.8399444269398261E+00, \
0.6959060300435139E+00, \
0.5350879697078847E+00, \
0.7647841496310313E+00, \
0.6206436532195436E+00, \
0.4691667375373180E+00, \
0.3070884345937569E+00, \
0.2203818092990903E+00, \
0.1500251895581519E+00, \
0.3071163194335791E-02, \
0.1763982670131894E-02, \
0.9816792594625022E-03, \
0.1651753140866208E-01, \
0.2023419573950451E-03, \
0.4984476352854074E-06, \
0.1513252400654827E-01, \
0.2090414910614367E-02, \
0.2465021206048452E-03, \
0.2636835050342939E-01, \
0.1857983220079215E-01, \
0.1305736595486640E-01, \
0.5838039534819351E-01, \
0.4249784402463712E-01, \
0.3082137716021596E-01, \
0.1057878223400849E+00, \
0.7940842984598509E-01, \
0.5932010895599639E-01, \
0.2110395656918684E+00 ) )
lam_vec = np.array ( ( \
0.5E+00, \
0.5E+00, \
0.5E+00, \
1.0E+00, \
1.0E+00, \
1.0E+00, \
5.0E+00, \
5.0E+00, \
5.0E+00, \
20.0E+00, \
20.0E+00, \
20.0E+00, \
30.0E+00, \
30.0E+00, \
30.0E+00, \
5.0E+00, \
5.0E+00, \
5.0E+00, \
2.0E+00, \
3.0E+00, \
4.0E+00, \
2.0E+00, \
3.0E+00, \
4.0E+00, \
2.0E+00, \
3.0E+00, \
4.0E+00, \
0.5E+00 ) )
x_vec = np.array ( ( \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
3.000E+00, \
60.000E+00, \
60.000E+00, \
60.000E+00, \
0.050E+00, \
0.050E+00, \
0.050E+00, \
4.000E+00, \
4.000E+00, \
4.000E+00, \
5.000E+00, \
5.000E+00, \
5.000E+00, \
6.000E+00, \
6.000E+00, \
6.000E+00, \
5.000E+00 ))
if ( n_data < 0 ):
n_data = 0
if ( n_max <= n_data ):
n_data = 0
df = 0
lam = 0.0
x = 0.0
f = 0.0
else:
df = df_vec[n_data]
lam = lam_vec[n_data]
x = x_vec[n_data]
f = f_vec[n_data]
n_data = n_data + 1
return n_data, df, lam, x, f
def chi_square_noncentral_cdf_values_test ( ):
#*****************************************************************************80
#
## CHI_SQUARE_NONCENTRAL_CDF_VALUES_TEST tests CHI_SQUARE_NONCENTRAL_CDF_VALUES.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 January 2015
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'CHI_SQUARE_NONCENTRAL_CDF_VALUES_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' CHI_SQUARE_NONCENTRAL_CDF_VALUES: values of the noncentral Chi Square CDF.' )
print ( '' )
print ( ' DF LAM X CHI_SQUARE_NONCENTRAL_CDF' )
print ( '' )
n_data = 0
while ( True ):
n_data, df, lam, x, f = chi_square_noncentral_cdf_values ( n_data )
if ( n_data == 0 ):
break
print ( ' %12d %12f %12f %24.16g' % ( df, lam, x, f ) )
#
# Terminate.
#
print ( '' )
print ( 'CHI_SQUARE_NONCENTRAL_CDF_VALUES_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
chi_square_noncentral_cdf_values_test ( )
timestamp ( )
|
18,152 | b41040ab3b51b5e5e42172f57e8e8a9b1073e744 | #!/usr/bin/env python3
# pylint: disable=wrong-import-position
# Adding working directory to system path
import sys
import time
import json
import logging
import argparse
import Levenshtein
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
sys.path.append(".")
from mysql import mysql
from model import CONF_PATH, attach_search, sanitise_name
from model import User, Org, Orgalias, Note, Address, Orgtag, Contact, Medium
LOG = logging.getLogger('insert_organisation')
LOG_SEARCH = logging.getLogger('search')
def text_to_ngrams(text, size=5):
ngrams = []
for word in text.lower().split():
length = len(word)
space = " " * (size - 1)
word = space + word + space
for i in range(length + size - 1):
ngrams.append(word[i: i + size])
return ngrams
def get_names(orm):
names = {}
for org in orm.query(Org).all():
if org.org_id not in names:
names[org.org_id] = []
names[org.org_id].append(org.name)
for orgalias in orm.query(Orgalias).all():
org_id = orgalias.org.org_id
if org_id not in names:
names[org_id] = []
names[org_id].append(orgalias.name)
return names
def select_from_list(matches):
for m, (name, alias) in enumerate(matches):
print(
" %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or ""))
)
print()
print("Choose name or non-numeric to exit: ", end=' ')
choice = input()
try:
choice = int(choice)
except ValueError:
LOG.warning("Could not convert %s to integer.", choice)
return None
if choice >= len(matches) or choice < 0:
LOG.error("%d is out of range.", choice)
return None
return matches[choice][0]
def closest_names(name, names, orm):
matches = set()
lower = orm.query(Org.name) \
.filter(Org.name > name) \
.order_by(Org.name.asc()) \
.limit(3) \
.all()
higher = orm.query(Org.name) \
.filter(Org.name < name) \
.order_by(Org.name.desc()) \
.limit(3) \
.all()
for (name2, ) in lower + higher:
matches.add((name2, None))
for name2, alias in names:
ratio = Levenshtein.ratio(name.lower(), name2.lower())
if ratio > 0.8:
matches.add((name2, alias))
if not matches:
return None
matches = sorted(list(matches))
print()
print("\n%s\n" % name)
existing_name = select_from_list(matches)
return existing_name
def get_org(orm, name):
name = name.lower()
query = orm.query(Org) \
.filter(func.lower(Org.name) == name)
try:
return query.one()
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for name '%s'.", name)
return query.first()
query = orm.query(Orgalias) \
.filter(func.lower(Orgalias.name) == name)
try:
return query.one().org
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for alias '%s'.", name)
return query.first().org
return None
def get_candidates(es, text):
data = {
"query": {
"multi_match": {
"fields": [
"alias_all.straight^3",
"alias_all.fuzzy",
],
"query": text
}
}
}
LOG.debug("Search query: %s", repr(data))
results = es.search(data, index="mango", doc_type="org")
LOG.debug("Results: %s", repr(results))
org_list = []
for hit in results["hits"]["hits"]:
source = hit["_source"]
source["score"] = hit["_score"]
org_list.append(source)
return org_list
def search_org(es, text_orig, context=None, just_search=False):
"""Returns False to skip"""
# pylint: disable=redefined-variable-type
# `org_id` may be `None`, `False` or string.
org_id = None
text_search = text_orig
while True:
if context and context.get("refresh", None):
# Necessarily imprecise way of allowing recently
# inserted alias to appear in results
time.sleep(1)
context["refresh"] = False
candidates = get_candidates(es, text_search)
if not candidates:
break
sys.stderr.write(
("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig))
)
for i, org in enumerate(candidates, 1):
sys.stderr.write(
" %4d: \033[37m%-5d %s\033[0m\n" % (
i, org["org_id"], org["score"])
)
for name in org["alias_all"]:
sys.stderr.write(
(" \033[94m%s\033[0m\n" % name)
)
sys.stderr.write("\n")
sys.stderr.write(" Empty: None of the above\n")
sys.stderr.write(" Text: Alternative search\n: ")
sys.stderr.write(" '-': Skip\n\n: ")
if just_search:
return
choice = input()
choice = choice.strip()
if not len(choice):
org_id = None
break
sys.stderr.write("\n")
if choice == "-":
org_id = False
break
sys.stderr.write("\n")
try:
choice = int(choice)
except ValueError:
text_search = choice
continue
if choice == 0:
org_id = " "
break
if choice > len(candidates):
continue
org_id = candidates[choice - 1]["org_id"]
break
return org_id
def select_org(orm, name, context, search=True):
"""Returns False to skip"""
name = sanitise_name(name)
org = get_org(orm, name)
if org:
return org
if not search:
return
es = orm.get_bind().search
if es is None:
LOG.error("Cannot connect to Elasticsearch.")
sys.exit(1)
org_id = search_org(es, name, context=context)
if not org_id:
return org_id
try:
org = orm.query(Org).filter_by(org_id=org_id).one()
except NoResultFound as e:
LOG.warning("No result found for '%s', org_id '%d'.", name, org_id)
raise e
# Adds new `Orgalias` to `Org`.
Orgalias(name, org, moderation_user=context["user"], public=None)
context["refresh"] = True
es.refresh()
# Calling `refresh` here appears not to make any difference, but in
# theory should be a good idea.
# Waiting for inserted org to be searchable here doesn't seem to work.
return org
def insert_fast(
data, orm,
public=None, tag_names=None, dry_run=None, address_exclusive=None,
search=True, org_id_whitelist=None
):
user = orm.query(User).filter_by(user_id=-1).one()
tag_names = tag_names or []
tags = []
for tag_name in tag_names:
tag = Orgtag.get(
orm,
tag_name,
moderation_user=user,
public=public,
)
tags.append(tag)
context = {
"refresh": False,
"user": user
}
for chunk in data:
# pylint: disable=maybe-no-member
has_address = None
LOG.info("\n%s\n", chunk["name"])
org = select_org(orm, chunk["name"], context, search)
if (
org is False or
(org_id_whitelist and
((not org) or (org.org_id not in org_id_whitelist)))
):
LOG.info("Skipping org: %s", org and org.org_id)
orm.rollback()
continue
if not org:
LOG.warning("\nCreating org %s\n", chunk["name"])
org = Org(chunk["name"], moderation_user=user, public=public,)
orm.add(org)
# Querying org address list on a new org would trigger a commit
has_address = False
else:
has_address = bool(org.address_list)
if tags:
org.orgtag_list = list(set(tags + org.orgtag_list))
if "tag" in chunk:
for tag_name in chunk["tag"]:
tag = Orgtag.get(
orm, tag_name,
moderation_user=user, public=public,
)
if tag not in org.orgtag_list:
org.orgtag_list.append(tag)
if "address" in chunk and not (address_exclusive and has_address):
for address_data in chunk["address"]:
if address_data["postal"] in \
[address.postal for address in org.address_list]:
continue
address = Address(
address_data["postal"], address_data["source"],
moderation_user=user, public=None,
)
address.geocode()
LOG.debug(address)
orm.add(address)
org.address_list.append(address)
if "contact" in chunk:
for contact_data in chunk["contact"]:
text = sanitise_name(contact_data["text"])
match = False
for contact in org.contact_list:
if (
contact.text == text and
contact.medium.name == contact_data["medium"]
):
match = True
break
if match:
continue
try:
medium = orm.query(Medium) \
.filter_by(name=contact_data["medium"]) \
.one()
except NoResultFound:
LOG.warning("%s: No such medium", contact_data["medium"])
continue
contact = Contact(
medium, text,
source=contact_data["source"],
moderation_user=user, public=None,
)
LOG.debug(contact)
orm.add(contact)
org.contact_list.append(contact)
if "note" in chunk:
for note_data in chunk["note"]:
if note_data["text"] in [note.text for note in org.note_list]:
continue
note = Note(
note_data["text"], note_data["source"],
moderation_user=user, public=None,
)
LOG.debug(note)
orm.add(note)
org.note_list.append(note)
if not (orm.new or orm.dirty or orm.deleted):
LOG.info("Nothing to commit.")
continue
if dry_run is True:
LOG.warning("rolling back")
orm.rollback()
continue
LOG.info("Committing.")
orm.commit()
def main():
LOG.addHandler(logging.StreamHandler())
LOG_SEARCH.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser(description="__DESC__")
parser.add_argument(
"--verbose", "-v",
action="count", default=0,
help="Print verbose information for debugging.")
parser.add_argument(
"--quiet", "-q",
action="count", default=0,
help="Suppress warnings.")
parser.add_argument(
"-t", "--tag",
action="append",
help="Tag to apply to all insertions.", default=[])
parser.add_argument(
"-p", "--public",
action="store", type=int,
help="Public state of new items (True, False, None).")
parser.add_argument(
"-s", "--search",
action="store_true",
help="Search string using import merge tool.")
parser.add_argument(
"-d", "--do-not-search",
action="store_true",
help="Do not search for similar org names.")
parser.add_argument(
"-A", "--address-exclusive",
action="store_true",
help="Only import addresses if org has no existing "
"address.")
parser.add_argument(
"-L", "--limit-org",
action="store",
help="Only apply changes to orgs whose IDs are "
"supplied (a comma separated string)")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
help="Dry run.")
parser.add_argument(
"json_path", metavar="JSON",
nargs="+",
help="Path to JSON file.")
args = parser.parse_args()
log_level = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[
max(0, min(3, 1 + args.verbose - args.quiet))]
LOG.setLevel(log_level)
LOG_SEARCH.setLevel(log_level)
connection_url = mysql.connection_url_app(CONF_PATH)
engine = create_engine(connection_url,)
mysql.engine_disable_mode(engine, "ONLY_FULL_GROUP_BY")
session_ = sessionmaker(bind=engine, autocommit=False, autoflush=False)
orm = session_()
attach_search(engine, orm)
if args.public != None:
args.public = bool(args.public)
if args.search:
es = orm.get_bind().search
for arg in args:
search_org(es, arg, just_search=True)
sys.exit(0)
org_id_whitelist = None
if args.limit_org:
org_id_whitelist = []
for id_ in args.limit_org.split(","):
org_id_whitelist.append(int(id_))
for arg in args.json_path:
try:
data = json.load(open(arg, "r", encoding="utf8"))
except ValueError:
LOG.error("%s: Could not decode JSON data.", arg)
continue
insert_fast(
data, orm, args.public, args.tag, args.dry_run,
args.address_exclusive, (not args.do_not_search),
org_id_whitelist
)
if __name__ == "__main__":
main()
|
18,153 | 758d88c4c55943b13d150212bb8bc9955b22bf05 | ''''
Given two binary trees t1 and t2, determine whether the second tree is a subtree of the first tree. A subtree for vertex v in a binary tree t is a tree consisting of v and all its descendants in t. Determine whether or not there is a vertex v (possibly none) in tree t1 such that a subtree for vertex v (possibly empty) in t1 equals t2.
Example
For
t1 = {
"value": 5,
"left": {
"value": 10,
"left": {
"value": 4,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": null,
"right": null
}
},
"right": {
"value": 6,
"left": null,
"right": {
"value": -1,
"left": null,
"right": null
}
}
},
"right": {
"value": 7,
"left": null,
"right": null
}
}
and
t2 = {
"value": 10,
"left": {
"value": 4,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": null,
"right": null
}
},
"right": {
"value": 6,
"left": null,
"right": {
"value": -1,
"left": null,
"right": null
}
}
}
the output should be isSubtree(t1, t2) = true.
This is what these trees look like:
t1: t2:
5 10
/ \ / \
10 7 4 6
/ \ / \ \
4 6 1 2 -1
/ \ \
1 2 -1
As you can see, t2 is a subtree of t1 (the vertex in t1 with value 10).
For
t1 = {
"value": 5,
"left": {
"value": 10,
"left": {
"value": 4,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": null,
"right": null
}
},
"right": {
"value": 6,
"left": {
"value": -1,
"left": null,
"right": null
},
"right": null
}
},
"right": {
"value": 7,
"left": null,
"right": null
}
}
and
t2 = {
"value": 10,
"left": {
"value": 4,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": null,
"right": null
}
},
"right": {
"value": 6,
"left": null,
"right": {
"value": -1,
"left": null,
"right": null
}
}
}
the output should be isSubtree(t1, t2) = false.
This is what these trees look like:
t1: t2:
5 10
/ \ / \
10 7 4 6
/ \ / \ \
4 6 1 2 -1
/ \ /
1 2 -1
As you can see, there is no vertex v such that the subtree of t1 for vertex v equals t2.
For
t1 = {
"value": 1,
"left": {
"value": 2,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": null,
"right": null
}
}
and
t2 = {
"value": 2,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": null
}
the output should be isSubtree(t1, t2) = false.
Input/Output
[execution time limit] 4 seconds (py3)
[input] tree.integer t1
A binary tree of integers.
Guaranteed constraints:
0 ≤ tree size ≤ 6 · 104,
-1000 ≤ node value ≤ 1000.
[input] tree.integer t2
Another binary tree of integers.
Guaranteed constraints:
0 ≤ tree size ≤ 6 · 104,
-1000 ≤ node value ≤ 1000.
[output] boolean
Return true if t2 is a subtree of t1, otherwise return false.
''''
#
# Definition for binary tree:
class Tree(object):
def __init__(self, x):
self.value = x
self.left = None
self.right = None
def isSubtree(t1, t2):
if t1 == None and t2 == None:
return True
if t2 == None:
return True
if t1 == None:
return False
if areNodesEqual(t1, t2):
return True
return isSubtree(t1.left, t2) or isSubtree(t1.right, t2)
def areNodesEqual(t1, t2):
if t1 == None or t2 == None:
return t1 == t2
return (t1.value == t2.value
and areNodesEqual(t1.left, t2.left)
and areNodesEqual(t1.right, t2.right)) |
18,154 | 7d3f5b6b40f02f42796281e8b4652b7e56a66544 | print(5+3)
print(11-3)
print(2*4)
print(16/2)
# Любимое число в переменной + присваиваю его в переменной с текстом
favorite_num = 7
print(favorite_num)
text = 'Моё любимое число: ' + str(favorite_num)
print(text) |
18,155 | 1b7dc6a846e8c0d44b6839fb801e8063fcf89f19 | # coding=utf-8
"""
작성중인..호출 프로그램
"""
import time, sys
from Kiwoom_sungwon import *
import MySQLdb
from PyQt5.QtWidgets import QApplication
ACTION = ["주식틱차트조회요청"]
TODAY = '20170626'
TARGET = ["코스피,코스닥"]
if __name__ == '__main__':
print("START", sys.argv)
app = QApplication(sys.argv)
try:
kiwoom = Kiwoom_sungwon()
kiwoom.commConnect()
# 코스피 종목리스트 가져오기
if '코스피' in TARGET:
code_list = kiwoom.getCodeList("0")
elif '코스닥' in TARGET:
code_list = kiwoom.getCodeList("10")
elif '코스피,코스닥' in TARGET:
code_list = kiwoom.getCodeList("0", "10")
else:
sys.exit(app.exec_())
#code_list = ['900050', '000020', '000030', '000040', '000050']
print(code_list)
print(len(code_list))
# DB 접속
conn = MySQLdb.connect(host='localhost', user='pyadmin', password='password', db='pystock', charset='utf8',
port=3390)
curs = conn.cursor()
if "주식기본정보요청" in ACTION:
for code in code_list:
print(code)
kiwoom.setInputValue("종목코드", code)
kiwoom.commRqData("주식기본정보요청", "OPT10001", 0, "0001")
for cnt in kiwoom.data:
curs.execute("""replace into opt10079
(code, cur_price, volume, date, open,
high_price, low_price, modify_gubun, modify_ratio, big_gubun,
small_gubun, stock_inform, modify_event, before_close) values
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s)
""", (code, cnt[0], cnt[1], cnt[2], cnt[3],
cnt[4], cnt[5], cnt[6], cnt[7], cnt[8],
cnt[9], cnt[10], cnt[11], cnt[12]))
time.sleep(0.5)
if "업종별투자자순매수요청" in ACTION: # opt10051
pass
if "업종별주가요청" in ACTION: # OPT20002
pass
if "주식틱차트조회요청" in ACTION:
for code in code_list:
print(code)
curs.execute("select * from opt10079 where code=%s and date=%s", (code, TODAY))
if curs.fetchall():
continue
kiwoom.setInputValue("종목코드", code)
kiwoom.setInputValue("틱범위", '1')
kiwoom.setInputValue("수정주가구분", '0')
kiwoom.commRqData("주식틱차트조회요청", "OPT10079", 0, "1000")
for cnt in kiwoom.data:
curs.execute("""replace into opt10079
(code, cur_price, volume, date, open_price,
high_price, low_price, modify_gubun, modify_ratio, big_gubun,
small_gubun, stock_inform, modify_event, before_close) values
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s)
""", (code, cnt[0], cnt[1], cnt[2], cnt[3],
cnt[4], cnt[5], cnt[6], cnt[7], cnt[8],
cnt[9], cnt[10], cnt[11], cnt[12]))
time.sleep(0.1)
conn.commit()
while kiwoom.inquiry == '2':
kiwoom.setInputValue("종목코드", code)
kiwoom.setInputValue("틱범위", '1')
kiwoom.setInputValue("수정주가구분", '0')
kiwoom.commRqData("주식틱차트조회요청", "OPT10079", 0, "1000")
for cnt in kiwoom.data:
curs.execute("""replace into opt10079
(code, cur_price, volume, date, open_price,
high_price, low_price, modify_gubun, modify_ratio, big_gubun,
small_gubun, stock_inform, modify_event, before_close) values
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s)
""", (code, cnt[0], cnt[1], cnt[2], cnt[3],
cnt[4], cnt[5], cnt[6], cnt[7], cnt[8],
cnt[9], cnt[10], cnt[11], cnt[12]))
time.sleep(0.1)
conn.commit()
if "주식일봉차트조회요청" in ACTION:
# opt10081
for code in code_list:
print(code)
curs.execute("select date from opt10081 where code=%s and date=%s", (code, TODAY))
if curs.fetchall():
continue
kiwoom.setInputValue("종목코드", code)
kiwoom.setInputValue("기준일자", TODAY)
kiwoom.setInputValue("수정주가구분", '0')
kiwoom.commRqData("주식일봉차트조회요청", "OPT10081", 0, "1000")
for cnt in kiwoom.data:
curs.execute("""replace into opt10081
(code, cur_price, volume, volume_price, date,
open, high, low, modify_gubun, modify_ratio,
big_gubun, small_gubun, code_inform, modify_event, before_close) values
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s)
""", (code, cnt[0], cnt[1], cnt[2], cnt[3],
cnt[4], cnt[5], cnt[6], cnt[7], cnt[8],
cnt[9], cnt[10], cnt[11], cnt[12]))
time.sleep(0.5)
while kiwoom.inquiry == '2':
kiwoom.setInputValue("종목코드", code)
kiwoom.setInputValue("기준일자", TODAY)
kiwoom.setInputValue("수정주가구분", '0')
kiwoom.commRqData("주식일봉차트조회요청", "OPT10081", 2, "1000")
for cnt in kiwoom.data:
curs.execute("""replace into opt10081
(code, cur_price, volume, volume_price, date,
open, high, low, modify_gubun, modify_ratio,
big_gubun, small_gubun, code_inform, modify_event, before_close) values
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s)
""", (code, cnt[0], cnt[1], cnt[2], cnt[3],
cnt[4], cnt[5], cnt[6], cnt[7], cnt[8],
cnt[9], cnt[10], cnt[11], cnt[12]))
time.sleep(0.5)
conn.commit()
except Exception as e:
print(e)
print("END")
sys.exit(app.exec_())
|
18,156 | 11a2f292d5c582034ee4dfa6856e4d03c4d04831 | import pygame, time, random
from pygame.locals import*
pygame.mixer.pre_init()
pygame.mixer.init()
pygame.init()
screen = pygame.display.set_mode((1280,680)) #the screen size
backmusic = pygame.mixer.music.load("Music\Cloud_Wheels_Castle_Builder.mp3") # loads the background music
dooropen = pygame.mixer.Sound("Music\Wood_Door_Creak_Open.wav") # loads the opening door sound
doorunlocked = pygame.mixer.Sound("Music\Opening_door_with_Key.wav") # loads the unlocking door sound
startroom = pygame.image.load('Room\Starting Room.png') # load the starting room pic
key = pygame.image.load('Door\Lock_Large.png') # Load the key pic
corridor = pygame.image.load('Corridor\Corridor.png') # load the 1st corridor pic
closed = pygame.image.load('Door\Closed door.png') # load the closed door pic
opened = pygame.image.load('Door\Open Door.png') #load the open door pic
corridor2_cl = pygame.image.load('Corridor\Corridor 2 Closed.png')# loads the close door corridor 2 pic
corridor2_op = pygame.image.load('Corridor\Corridor 2 Open.png')# loads the open door corridor 2 pic
corridor34 = pygame.image.load('Corridor\Corridor 3,4.png') # loads the corridor 3/4
Leveloneexit = pygame.image.load('Door\Boss Door One Keyhole.png') # loads the 1st level exit
Longcorridor = pygame.image.load('Corridor\Long corridor.png') #loads long corridor
corridor5 = pygame.image.load('Corridor\Corridor 5.png') #loads 5th corridor
corridor6 = pygame.image.load('Corridor\Corridor 6.png')# loads 6th corridor
corridor7 = pygame.image.load('Corridor\Corridor 7.png')# loads 7th corridor
corridor8 = pygame.image.load('Corridor\Corridor 8.png')# loads 8th corridor
corridor10 = pygame.image.load('Corridor\Corridor 10.png')# loads 10th corridor
Largecorridor = pygame.image.load('Corridor\Corridor large.png')# loads 8th corridor
room2 = pygame.image.load('Room\Room 2.png') # loads 2nd room
room3 = pygame.image.load('Room\Room 3.png') # loads 3rd room
rooml2 = pygame.image.load('Room\Boss Room.png')# loads the level 2 room
finalexit = pygame.image.load('Door\Boss Door Two Keyhole.png')# loads the final exit!!
FinalRoomCL = pygame.image.load('Room\Boss Room Closed.png')# loads the final room clossed!!
FinalRoomOP = pygame.image.load('Room\Boss Room Open.png')# loads the final room open!!
pygame.mixer.music.play(-1) # play the music and loops it
pygame.mixer.music.set_volume(0.5) # sets the volume to half
pygame.display.set_caption("Abducted") # this adds a title to the game on the top left outside the screen in the border
colr = (0,0,0) #Background colour
recc = (255,255,255) #box colour
sc = (100, 250 , 0) #start button colour
hc = (100, 255, 150) #highlight colour
timer = pygame.time.Clock() #lets us use the clock more
timer.tick(60) # sets the max fps to 60
font = pygame.font.Font(None, 25)
frame_count = 0 # used to keep the seconds
frame_rate = 20 # used later on to find the total seconds
def textObj(msg, text): #this function mainly handles what colour the text has
textcolour = textfont.render(msg, 1, recc) #this sets the colour of the text
return textcolour, textcolour.get_rect()# basically returns the value to the function for later use
def text(msg, x, y, w, h, size): #this is the main function that creates the text and the position
global textfont
textfont = pygame.font.SysFont('forte', size) #this is for the font of the text
textscreen, textrecc = textObj(msg, textfont) #lets the text have colour and the font
textrecc.center = ((x+(w/2)) , (y+(h/2))) #for the position of th text
screen.blit(textscreen, textrecc) #puts the text on the screen
def button(msg, x, y, w, h, sc, hc, a): #creates a button the works and has words on it
global mouse
mouse = pygame.mouse.get_pos() #find where the mouse is
click = pygame.mouse.get_pressed() #finds which mouse button is pressed
if x+w > mouse[0] > x and y+h > mouse[1] > y: # check is the mouse is with in the range of the button
pygame.draw.rect(screen, hc,(x,y,w,h)) #makes the button highlighted
if click[0] == 1 and a == 1: #check if the left button is click
Startaction() #calls for this action
else:
pygame.draw.rect(screen, sc, (x,y,w,h)) #or it would put back the button back to normal
global textfont #the next few lines are just the text code just for the buttons
textfont = pygame.font.SysFont('forte', 72)
textscreen, textrecc = textObj(msg, textfont)
textrecc.center = ((x+(w/2)) , (y+(h/2)))
screen.blit(textscreen, textrecc)
def game(): #this is for the game to run
global start
start = True
while start: # as long as start is true it will run continuously
for event in pygame.event.get(): #gets the different events in pygame
if event.type == pygame.QUIT: #if the user quit the game, this will in turn end the program
start = False
screen.fill(colr) #makes the screen black
text("ABDUCTED", 160,80,960,300, 200)
button("Start", 455,374,350,100, sc, hc, 1) #creates a start button
pygame.display.flip() #puts everything on to the display, which lets the user see it
def Startaction(): #this lets the user click on the start button
start = True
while start:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = False
if event.type == pygame.MOUSEBUTTONDOWN:#interagative button
if (mouse[0] > 470 and mouse[0] < 810) and (mouse[1] > 355 and mouse[1] < 445):#The range of which the mouse clicks
print ("Hello, ") #just there to check if it works and still on the screen
screen.fill(colr)
pygame.draw.rect(screen, recc,[250,50,335,400],5) #Draws rectangles on the screen
global ChrF # turns the ChrF var into a global var that other functions can use
ChrF = pygame.image.load('Characters\Character - GirlV2.png') #Loads the image of female card
ChrM = pygame.image.load('Characters\Character - Dude.png') # loads the male card image
screen.blit(ChrF, (250, 50))# puts the image on the screen
pygame.draw.rect(screen, recc,[620,50,335,400],5)
screen.blit(ChrM, (620, 50))
text("Choose a character using 1 or 2, Don't use Numb Pad", 50,500,1080,150, 50) #another text that tells the user what to do
pygame.display.flip()# puts everything on the screen
Cchoice() #calls for the Cchoice functions
def Cchoice(): # lets the user choose the character
start = True
while start:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = False
if event.type == pygame.KEYDOWN:# check if the the keyboard key is pressed
if event.key == pygame.K_1: #check if the user press 1
print ("I'm your MC, I won't die soon (HOPEFULLY).")
pygame.draw.rect(screen, hc,[250,50,335,400],5) #highlighs the box and let it stay there for a short amount of time
screen.blit(ChrF, (250, 50))
pygame.display.update()
timer.tick(10)
pygame.time.delay(250)
screen.fill(colr)
story("F") #calls for the female story and character
pygame.display.update()
timer.tick(60)
start = False
if event.key == pygame.K_2: #check if the user pressed 2 and calls for the male story
print ("I'm your MC, I won't die soon (HOPEFULLY).")
pygame.draw.rect(screen, hc, [620,50,335,400], 5)
pygame.display.update()
timer.tick(10)
pygame.time.delay(250)
screen.fill(colr)
story("M")
pygame.display.update()
timer.tick(10)
start = False
def story(a): #story function
global char
char = 0 #this is to help indicate which character is chosen
text("Greeting, I am the goddess of Victory, Nike.", 100, 75, 1100, 100, 36) #general story
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("You were kidnapped by the Lord of the Flies.", 100, 175, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("I am here to help you escape. Listen to me carefully", 100, 275, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("The time has come, wait for the your chance and escape.", 100, 375, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("Be cautious and Good Luck.", 100, 475, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
text("Click space to continue -->", 100, 575, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
if a == "M": #this is the male sorry
char = 2
keypress = True
while keypress:
for event in pygame.event.get():
if event.type == pygame.QUIT:
keypress = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
screen.fill(colr)
pygame.time.delay(1500)
text("You awaken from a odd dream and hear people talking...", 100, 75, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("It's time to wake him up or the Boss will be mad", 100, 175, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("Prisonner 1019, it's time to wake up!", 100, 275, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("What the, where is he? Find him!! ... Wait Who are you?!", 100, 375, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text(" *Screams* ", 100, 475, 1100, 100, 36)
tutorial() #starts the actual game
if a == "F":#female story
char = 1
keypress = True
while keypress:
for event in pygame.event.get():
if event.type == pygame.QUIT:
keypress = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
screen.fill(colr)
pygame.time.delay(1500)
text("You awaken from a odd dream and hear people talking...", 100, 75, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("It's time to wake her up or the Boss will be mad", 100, 175, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("Prisonner 1, it's time to wake up!", 100, 275, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text("What the, where is she? Find her!! ... Wait Who are you?!", 100, 375, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
text(" *Screams* ", 100, 475, 1100, 100, 36)
pygame.display.update()
timer.tick(30)
pygame.time.delay(1500)
tutorial()
def Chr( x, y): #this is to load the character pic
if char == 1: #female character
Chrct = pygame.image.load('Characters\Girl drawing Larger.png') #loads the female character
screen.blit(Chrct, (x, y))
pygame.display.update()
timer.tick(60)
if char == 2: #male
Chrct = pygame.image.load('Characters\Guy drawing Larger.png') #loads the male character
screen.blit(Chrct, (x, y))
pygame.display.update()
timer.tick(60)
def tutorial():#starts the game and places the setting
screen.fill(colr)
screen.blit( startroom, (315, 35))
screen.blit( corridor, (945, 220))
screen.blit( closed, (945, 220))
Chr( 615, 259)
text("Use WASD to move", 100 , 0 , 100, 50, 36)
pygame.display.flip()
pygame.time.delay(1500)
movement()
def movement(): #this the movement
key_place = random.randint(1,3) # this generates a random number inclusively between 1 to 3 BTW this decides the key location
print (key_place) #this prints the number
key_place2 = random.randint(1, 3)# this is 2nd key location
key_place3 = random.randint(1, 3)# this is 3rd key location
print (key_place2)#this prints the 2nd number
print (key_place3)#this prints the 3rd number
global output_string, bound, unlock, xc, yc, minutes
frame_count = 0
bound = 0 #this is for the boundaries and to change them base on the different bound value
unlock = 0 #this is too unlock the exits when the value
if bound == 0: # when bound is 0 then place the user at the given location at the start
x = 615 #location of the x axis of the character
y = 259 #and the y axis of the character
xc = 0 #this is for the movement of the character (left to right)
yc = 0 #this is for the movement of the character (up to down)
sup =1 # this is to do speed boost
start = True
while start:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = False
if event.type == pygame.MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
print (mouse)
if event.type == pygame.KEYDOWN:
if event.key == K_w: #when the play presses w, a, s, or d the character will move up(w ) 7, left(a) 7, down(s) 7, and right(d) 7
yc -= 7
if event.key == K_a:
xc -= 7
if event.key == K_s:
yc += 7
if event.key == K_d:
xc += 7
if event.type == pygame.KEYUP: #when the user relase the key the character stops moving
if event.key == K_w:
yc = 0
if event.key == K_a:
xc = 0
if event.key == K_s:
yc = 0
if event.key == K_d:
xc = 0
x += (xc * sup)#makes the character move by changing the location on the x and y axis
y+= (yc * sup)
# The following is basically the 1st level, and the areas of which the screen will change
if x > 810 and x < 910 and y > 480 and y < 600 and bound == 0: # if you are in this area and bound is o, then it will
pygame.mixer.Sound.play(dooropen)# plays door open sound effects
bound +=1
print ("You are unbound!")
if x >= 1280 and bound == 1: # if it is bound 1 and this area, then it will do this:
x = 35
bound = 2
if x >= 1000 and bound == 2: # if it is bound 2 and this area, then it will do this:
bound = 3
text("It seems that the Boss door is locked. Go find the Key to unlock it.", 200, 50, 900, 100, 36)
pygame.display.flip()
if x >= 840 and x <= 1090 and y <= 211 and bound == 3: # if it is bound 3 and this area, then it will do this: (this is for the top part)
bound = 4
y = 600
if y >= 680 and x >= 250 and x <= 500 and bound == 3:# if it is bound 3 and this area, then it will do this: (this is for bottom)
bound = 5
y = 35
if x >= 1278 and bound == 5: # if it is bound 5 and this area, then it will do this: (right area)
bound = 6
x = 35
if y >= 660 and bound == 5: # if it is bound 1 and this area, then it will do this: (bottom area)
bound = 7
y = 35
# This lets the user back to the previous screen
if bound == 4 and y >= 680:
bound = 3
y = 215
if bound == 5 and y <= 0:
bound = 3
y = 600
if bound == 6 and x <= 0:
bound = 5
x = 1278
if bound == 7 and y <= 0:
bound = 5
y = 555
#This is for unlocking the 2nd door
if x >= 370 and x <= 392 and y >= 165 and y <= 175 and key_place == 1 and unlock == 0 and bound == 4:
unlock = 1
print("You did it, You got your freedom!")
pygame.mixer.Sound.play(dooropen)
if x >= 700 and x <= 715 and y >= 69 and y <= 79 and key_place == 2 and unlock == 0 and bound == 6:
unlock = 1
print("You did it, You got your freedom!")
pygame.mixer.Sound.play(dooropen)
if x >= 657 and x < 670 and y >= 336 and key_place == 3 and unlock == 0 and bound == 7:
unlock = 1
print("You did it, You got your freedom!")
pygame.mixer.Sound.play(dooropen)
#only the stuff above is for the unlocking the 2nd door
# The following is for the level 2 area and are the areas of which the screen will change
if x >= 1263 and bound == 3 and unlock == 1:
bound = 8
x = 35
sup = 2
print("Just Kidding, There's still more!")
pygame.mixer.music.stop() #ends the first song and goes to the next
backmusic = pygame.mixer.music.load("The_New_Order.mp3")
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.5)
# These are long hallways :)
if x >= 1270 and bound == 8:
bound = 9
x = 10
if x >= 1270 and bound == 9:
bound = 10
x = 10
if x >= 1270 and bound == 10:
bound = 11
x = 10
# This lets the user back to the previous screen
if x <= 0 and bound == 11:
bound = 10
x = 1260
# This lets the user back to the previous screen
if x <= 0 and bound == 10:
bound = 9
x = 1260
# This lets the user back to the previous screen
if x <= 0 and bound == 9:
bound = 8
x = 1260
if y <= 200 and bound == 11:
y = 670
bound = 12
# This lets the user back to the previous screen
if bound == 12 and y >= 680 and unlock != 3:
y = 210
bound = 11
if bound == 12 and y >= 680 and unlock == 3:
y = 210
bound = 20
if bound == 12 and x <= 0:
x = 1265
bound = 13
# This lets the user back to the previous screen
if bound == 13 and x >= 1280:
x = 10
y = 306
bound = 12
if bound == 13 and x <= 0:
x = 1260
bound = 14
# This lets the user back to the previous screen
if bound == 14 and x >= 1280:
x = 10
bound = 13
if bound == 14 and x <= 0:
x = 1260
bound = 15
# This lets the user back to the previous screen
if bound == 15 and x >= 1280:
x = 10
bound = 14
if bound == 15 and y <= 208:
y = 670
bound = 18
if bound == 14 and x <= 195 and y >= 680:
y = 15
bound = 16
# This lets the user back to the previous screen
if bound == 16 and y <= 0:
y = 670
bound = 14
if bound == 14 and x >= 1036 and y >= 680:
y = 20
bound = 17
# This lets the user back to the previous screen
if bound == 17 and y <= 0:
y = 670
bound = 14
if bound == 17 and y >= 680:
y = 10
bound = 19
# This lets the user back to the previous screen
if bound == 18 and y >= 680:
y = 280
bound = 15
# This lets the user back to the previous screen
if bound == 19 and y <= 0:
y = 670
bound = 17
#This is the unlocking of the 3rd door with two keys
if x >= 597 and x <= 697 and y >=124 and y >= 174 and key_place2 == 1 and bound == 12 and unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
unlock += 1
key_place2 = 0
print ("You got one of them! One to go!")
if x >= 905 and x <= 1005 and y >=124 and y >= 174 and key_place2 == 2 and bound == 13 and unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
unlock += 1
key_place2 = 0
print ("You got one of them! One to go!")
if x >= 615 and x <= 715 and y >=351 and y >= 401 and key_place2 == 3 and bound == 16 and unlock >= 1:
if unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
print ("You got one of them! One to go!")
if unlock == 2:
pygame.mixer.Sound.play(dooropen)
print ("You got both of them! Now go and be free!")
unlock += 1
key_place2 = 0
if x >= 98 and x <= 198 and y >= 266 and y <=3316 and key_place3 == 1 and bound == 15 and unlock >= 1:
if unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
print ("You got one of them! One to go!")
if unlock == 2:
pygame.mixer.Sound.play(dooropen)
print ("You got both of them! Now go and be free!")
unlock += 1
key_place3 = 0
if x >= 660 and x <= 760 and y >= 292 and y <=342 and key_place3 == 2 and bound == 18 and unlock >= 1:
if unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
print ("You got one of them! One to go!")
if unlock == 2:
pygame.mixer.Sound.play(dooropen)
print ("You got both of them! Now go and be free!")
unlock += 1
key_place3 = 0
if x >= 294 and x <= 394 and y >= 294 and y <=344 and key_place3 == 3 and bound == 19 and unlock >= 1:
if unlock == 1:
pygame.mixer.Sound.play(doorunlocked)
print ("You got one of them! One to go!")
if unlock == 2:
pygame.mixer.Sound.play(dooropen)
print ("You got both of them! Now go and be free!")
unlock += 1
key_place3 = 0
#This to go to the end room
if bound == 20 and x >= 1200:
x = 580
y = 271
bound = 21
pygame.mixer.music.stop()
backmusic = pygame.mixer.music.load("Frozen_Star.mp3")# loads the third song
pygame.mixer.music.play(-1)#plays it at half volume
pygame.mixer.music.set_volume(0.5)
# The boundaries for the different screens, screens are defined by the var bound and its value
if bound == 0:
if x <= 315: #these if statement are for the boundry of the charcter, BTW the top left corner is (0,0)
x += 7
if x >= 830:
x -= 7
if y <= 180:
y += 7
if y >= 500:
y -= 7
if bound == 1:
if x <= 315: #these if statement are for the boundry of the charcter, BTW the top left corner is (0,0)
x += 7
if x >= 830 and y >= 345:
if y >= 345:
y -= 7
if x >= 830 and y <= 230:
if y <=230:
y += 7
if x >= 830 and y >350:
x -=7
if y <= 180:
y += 7
if y >= 500:
y -= 7
if bound == 2:
if x <= 35:
x += 7
if x >= 1100:
x -= 7
if y <= 231:
y += 7
if y >= 345:
y -= 7
if bound == 3:
if x <= 35:
x += 7
if x >= 1100:
x -= 7
if y >= 345:
y -= 7
if x <= 840 and y <= 210:
x += 7
if x >= 965 and y <= 210:
x -= 7
if x >= 252 and x <= 377:
if y >= 340:
y += 7
if x <= 252 and y >= 349:
x += 7
if x >= 377 and y >= 349:
x -=7
if x <= 853 and y <= 231:
y += 7
if x >= 1089 and y <= 231:
y += 7
if unlock == 1:
if x >= 1095:
x += 7
if bound == 4:
if x <= 840 and y >= 460:
x += 7
if x >= 965:
x -= 7
if y <= 103:
y += 7
if y >= 334 and x <= 721:
y -= 7
if x <= 349:
x += 7
if bound == 5:
if x <= 259:
x += 7
if x >= 370:
x -= 7
if y >= 181 and x >= 259:
x += 7
if y <= 203 and x >= 483:
y += 7
if x >= 876:
x -= 7
if (y >= 504 and x <= 607) or (x >= 734 and y >= 504) :
y -= 7
if y >= 329 and y <= 434 and x >= 875:
x += 7
if y <= 329 and x >= 990:
y += 7
if y >= 434 and x >= 990:
y -= 7
if y >= 490 and x <= 620:
x += 7
if y >= 490 and x >= 735:
x -= 7
if bound == 6:
if y <= 320 and x <= 250:
y += 7
if y >= 440 and x <= 250:
y -= 7
if y >= 513:
y -= 7
if x >= 756:
x -= 7
if x <= 250 and y == 505:
x += 7
if x <= 250 and y <= 198:
x += 7
if y <= 76:
y += 7
if bound == 7:
if x <= 620:
x += 7
if x >= 735:
x -= 7
if y >= 371:
y -= 7
if bound == 8:
if x <= 35:
x += 14
if y <= 246 and x <= 223:
y += 14
if y >= 336 and x <= 223:
y -= 14
if x <= 313 and y <= 156:
x += 14
if x <= 313 and y >= 425:
x += 14
if y <= 96:
y += 14
if y >= 509:
y -= 14
if x >= 818 and y >= 404:
x -= 14
if x >= 818 and y <= 81:
x -= 14
if x >= 903 and y >= 333:
y -= 14
if x >= 945 and y <= 207:
y += 14
if x >= 819 and y <= 120:
x -= 14
if bound == 9:
if y <= 236:
y += 14
if y >= 334:
y -= 14
if bound == 10:
if y <= 236:
y += 14
if y >= 334:
y -= 14
if bound == 11:
if x >= 1080:
x -= 14
if y >= 341:
y -= 14
if y <= 236 and x < 841:
y += 14
if y <= 236 and x >= 1080:
y += 14
if bound == 12:
if y <= 110:
y += 14
if x >= 961:
x -= 14
if y >= 318 and x <= 841:
y -= 14
if y <= 255 and x <= 338:
x += 14
if y <= 236 and x < 338:
y += 14
if bound == 13:
if y <= 110:
y += 14
if y >= 335:
y -= 14
if x <= 663 and y <= 110:
x += 14
if y <= 235 and x <= 633:
y += 14
if bound == 14:
if y <= 220:
y += 14
if y >= 320 and x <= 1022 and x >= 220:
y -= 14
if y >= 320 and x <= 84 and x <= 0:
y -= 14
if x <= 1036 and y >= 460:
x += 14
if y >= 460 and x > 1147:
x -= 14
if y >= 460 and x < 83:
x += 14
if y >= 460 and x >= 196:
x -= 14
if bound == 15:
if x <= 924 and y <= 220:
y += 14
if x >= 1160 and y <= 220:
y += 14
if y >= 334:
y -= 14
if x <= 97:
x += 14
if bound == 16:
if x <= 94:
x += 14
if x >= 206 and y <= 280:
x -= 14
if y >= 519:
y -= 14
if y <= 196 and x >= 330:
y += 14
if x >= 696:
x -= 14
if bound == 17:
if x <= 1046:
x += 14
if x >= 1134:
x -= 14
if bound == 18:
if x <= 840 and y >= 460:
x += 14
if x >= 965:
x -= 14
if y <= 103:
y += 14
if y >= 334 and x <= 721:
y -= 14
if x <= 349:
x += 14
if bound == 19:
if x <= 1036 and y <= 110:
x += 14
if x <= 1036 and y >= 470:
x += 14
if x >= 1134:
x -= 14
if x <= 1036 and y <= 220:
y += 14
if x <= 1036 and y >= 334:
y -= 14
if x <= 250:
x += 14
if bound == 21:
if x <= 328:
x += 14
if x >= 825:
x -= 14
if y >= 509:
y -= 14
if x <= 502 and y <= 138:
y += 14
if x >= 747 and y <= 138:
y += 14
if bound == 11:
if y >= 341:
y -= 14
if y <= 236 and x < 841:
y += 14
if y <= 236 and x >= 1080:
y += 14
if bound == 22:
if x <= 328:
x += 14
if x >= 825:
x -= 14
if y >= 509:
y -= 14
if x <= 502 and y <= 138:
y += 14
if x >= 747 and y <= 138:
y += 14
#these are the graphics and whats going on in each screen
if bound == 0:
screen.fill(colr)
#this is the timer
total_seconds = frame_count // frame_rate # find the total seconds
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds) # puts the minutes adn seconds in this format
time = font.render(output_string, True, recc) # renders the output_string as a pic
screen.blit(time, [1100, 35])#allows it to be put on the screen
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1#adds one seconds
# only the above is the timer
screen.blit( corridor, (945, 220))
screen.blit( closed, (945, 220))
screen.blit( startroom, (315, 35))
text("Go to the key to unlock the door", 200, 10, 100, 50, 36)
text("All you have to do is ", 200, 70, 100, 50, 36)
text("stand at the key and you will hear", 210, 100, 100, 50, 36)
text("the door unlock or open ", 200, 130, 100, 50, 36)
Chr( x, y)
screen.blit(key, (820, 510))
pygame.display.flip()
if bound == 1:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit( corridor, (945, 220))
screen.blit( opened, (945, 220))
screen.blit( startroom, (315, 35))
Chr( x, y)
screen.blit(key, (820, 510))
pygame.display.update()
timer.tick(60)
if bound == 2:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
text("It seems the door is locked. Go find the Key to unlock it.", 200, 50, 900, 100, 36)
screen.blit( corridor2_cl, (35, 220))
screen.blit( Leveloneexit, (1235, 205,))
Chr( x, y)
pygame.display.update()
timer.tick(60)
if bound == 3:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit( corridor2_op, (35, 220))
screen.blit( corridor34, (250, 470,))
screen.blit( Leveloneexit, (1235, 205,))
text("If you see black holes in the wall, they are gates to go up!", 200, 50, 900, 100, 36)
if unlock == 1: #basically looks if the var unlock is 1 and gets rid of the exit door
pygame.draw.rect(screen, recc, [1235, 205, 45, 280])
Chr( x, y)
pygame.display.update()
timer.tick(60)
if bound == 4:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(room2 ,(340, 30))
screen.blit(corridor5, (840, 460))
Chr(x,y)
if key_place == 1: #any lines with if key_place/key_place2/key_place3 is checking for the value of the varible respectively and then puts the key on the screen if it gives true
screen.blit(key, (410, 199))
pygame.display.update()
timer.tick(60)
if bound == 5:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor6,(250, 0))
screen.blit(corridor7,(610, 630))
screen.blit(corridor8,(1000, 320))
screen.blit(room3,(250, 200))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 6:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor10, (0, 320,))
screen.blit(rooml2, (250, 35,))
Chr(x, y)
if key_place == 2:
screen.blit(key, (736, 79))
pygame.display.update()
timer.tick(60)
if bound == 7:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor6, (610, 0))
Chr(x, y)
if key_place == 3:
screen.blit(key, (687, 366))
pygame.display.update()
timer.tick(60)
if bound == 8:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
screen.blit(rooml2, (315, 35))
Chr(x,y)
pygame.display.update()
timer.tick(60)
if bound == 9:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 10:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 11:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
text("This is it, The Boss Door. Find the Two Keys to open it and excape!", 200, 50, 900, 100, 36)
text("Go up to find them", 127, 517, 1000, 100, 36)
screen.blit( corridor2_op, (0, 220))
screen.blit( finalexit, (1200, 205,))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 12:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
screen.blit(corridor5, (840, 460))
screen.blit(room2 ,(340, 30))
pygame.draw.rect(screen, colr, [1090, 0, 190, 680 ])
Chr(x, y)
if key_place2 == 1:
screen.blit(key,(597, 124))
pygame.display.update()
timer.tick(60)
if bound == 13:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
screen.blit( room2, (650, 35))
Chr(x, y)
if key_place2 == 2:
screen.blit(key,(905, 124))
pygame.display.update()
timer.tick(60)
if bound == 14:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor, (0, 220))
screen.blit( corridor34, (78, 470))
screen.blit( corridor34, (1020, 470))
Chr(x, y)
pygame.display. update()
timer.tick(60)
if bound == 15:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor2_op, (80, 220))
Chr(x, y)
if key_place3 == 1:
screen.blit(key,(122, 292))
pygame.display. update()
timer.tick(60)
if bound == 16:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor6,(78, 0))
screen.blit(room3,(78, 220))
screen.blit(Largecorridor,(1020, 0))
Chr(x, y)
if key_place2 == 3:
screen.blit(key, (643, 378))
pygame.display.update()
timer.tick(60)
if bound == 17:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(corridor6,(78, 0))
screen.blit(room3,(78, 220))
screen.blit(Largecorridor,(1020, 0))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 18:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(room2 ,(340, 30))
screen.blit(corridor5, (840, 460))
Chr(x, y)
if key_place3 == 2:
screen.blit(key,(660, 292))
pygame.display.update()
timer.tick(60)
if bound == 19:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit(Longcorridor,( 250, 220))
screen.blit(Largecorridor,(1020, 0))
pygame.draw.rect(screen, colr, [1270, 0, 10, 680])
Chr(x, y)
if key_place3 == 3:
screen.blit(key,(294, 290))
pygame.display.update()
timer.tick(60)
if bound == 20:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit( corridor2_op, (0, 220))
if unlock == 3:
pygame.draw.rect(screen, recc, [1200, 205, 45, 280])
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 21:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit( FinalRoomCL, (315, 35))
text("You did it, the Lord of the Flies and his minions have fled! You can Leave!", 0 , 460, 1280, 220, 36)
Chr(x, y)
pygame.display.update()
timer.tick(60)
pygame.time.delay(1000)
bound = 22
if bound == 22:
screen.fill(colr)
total_seconds = frame_count // frame_rate
minutes = total_seconds // 60
seconds = total_seconds % 60
output_string = "{0:02}:{1:02}".format(minutes, seconds)
time = font.render(output_string, True, recc)
screen.blit(time, [1100, 35])
total_seconds = (frame_count // frame_rate)
if total_seconds < 0:
total_seconds = 0
frame_count += 1
screen.blit( FinalRoomOP, (315, 35))
Chr(x, y)
pygame.display.update()
timer.tick(60)
if bound == 22 and y <= 138:
screen.fill(colr)
text(" Enter your name in the Shell or Command Prompt", 500, 100, 300, 200, 36)
pygame.display.update()
timer.tick(60)
name = input("Enter Name: ") #askes for name
bound = 23
if bound == 23:
score = 100 # this is the max score and the beginning score
frame_count += 0 #Stops the timer
if minutes >= 7: # if it 7 min, the minutes var is subtracted by 6 to do score calculations
minutes -= 6
score -= - minutes # calculates score
screen.fill(colr)
text("Final Score", 500, 100, 300, 200, 36)
text((name), 500, 200, 300, 200, 60)
text(str(score), 500, 300, 300, 200, 72)
pygame.display.update()
timer.tick(60)
game()
pygame.quit()
|
18,157 | 48ee30467e2da72c4a4380953fabfbb8cf55c33c | # -*- coding: utf-8 -*-
import os
import fnmatch
from bs4 import BeautifulSoup
import re
import ftfy
# from spacy_basics import initialise_spacy
import csv
import glob
import json
from collections import Counter
from PIL import Image
from tesserocr import PyTessBaseAPI
from enchant.checker import SpellChecker
from tribes_import import initialise_tribes
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
'''
Per folder:
read each image
check if there is ocr data
if not ocr the image
process hocr
generate full text and hocr
generate entities
gather stats
'''
def get_images(folder_name, ext):
'''
Return a list of images in folder with extenstion ext.
'''
image_list = []
for filename in find_files(folder_name, '*.' + ext):
if not os.path.basename(os.path.normpath(filename)).startswith('._'):
image_list.append(os.path.normpath(filename))
if image_list:
return image_list
def ocr_image(imagefile, hocr_file, write=True):
with PyTessBaseAPI() as api:
image = Image.open(imagefile)
api.SetImage(image)
hocr_contents = api.GetHOCRText(0)
if write:
with open(hocr_file, 'w') as hocka:
hocka.write(
hocr_contents.encode(
'ascii', 'xmlcharrefreplace')
)
return hocr_contents
def ocr_parse(hocr_data, text_file=None):
text_list = []
soup = BeautifulSoup(hocr_data, "html.parser")
lines = soup.find_all("span", class_="ocr_line")
count = 0
conf_total = 0
for line in lines:
line_soup = BeautifulSoup(
str(line), "html.parser")
words = line_soup.find_all(
"span", class_="ocrx_word")
for word in words:
count += 1
word_soup = BeautifulSoup(
str(word), "html.parser")
text_list.append(
ftfy.fix_text(word_soup.text))
confidence = int(
word_soup.span['title'].split(';')[1].split()[-1])
conf_total = conf_total + confidence
ocr_text = ' '.join(text_list)
ocr_text_sub = re.sub(r'\s+', ' ', ocr_text)
if text_file:
with open(text_file, 'w') as texta:
texta.write(
ocr_text_sub.encode('ascii', 'replace'))
if conf_total > 0 and count > 0:
average_confidence = (conf_total / count)
else:
average_confidence = 0
if average_confidence < 60:
typewritten = False
elif (average_confidence > 70 and len(ocr_text_sub) < 10):
typewritten = False
else:
typewritten = True
return average_confidence, typewritten, ocr_text_sub
def spelling(text):
chkr = SpellChecker("en_US")
chkr.set_text(text)
words = text.split()
num_words = len(words)
err_list = []
for err in chkr:
err_list.append(err.word)
num_errors = len(err_list)
if num_words != 0:
percent_wrong = float(num_errors) / float(num_words)
# print 'Num errors: %s' % num_errors
# print 'Num words: %s' % num_words
# print 'Percentage wrong %s' % percent_wrong
spell_percent = int(100 * (1 - percent_wrong))
return spell_percent
else:
return None
def nlp_image(text, imagefile, parser, matcher, id):
'''
Do entity extraction and entity stats gathering for
the text for an image.
'''
small_list = []
rows = []
parsed = parser(unicode(text))
matches = matcher(parsed)
for ent_id, label, start, end in matches:
'''
Grab the dict from the entity
'''
entity = matcher.get_entity(ent_id)
orth = parser.vocab.strings[label]
ent_type = entity["ent_type"]
small_list.append(entity["ent_type"])
cont_row = {'Entity_Orth': orth.encode('utf-8'),
'Entity_Label': ent_type,
'Source': id}
rows.append(cont_row)
for ent in parsed.ents:
if ent.label_ not in ['TIME', 'PERCENT',
'CARDINAL', 'ORDINAL',
'QUANTITY', 'MONEY'] and len(ent.orth_) > 3:
small_list.append(ent.label_)
cont_row = {'Entity_Orth': ent.orth_.encode('utf-8'),
'Entity_Label': ent.label_,
'Source': id}
rows.append(cont_row)
number_ents = len(parsed.ents)
c = Counter(small_list)
stats = {}
for item in c.items():
z = list(item)
stats[str(z[0])] = str(z[1])
return number_ents, stats, rows
def process_image(imagefile, parser, matcher, folder_name):
# text_file = imagefile.replace('jpg', 'txt')
base = os.path.basename(imagefile).replace('.jpg', '')
print 'Base: %s' % base
if '_' in base:
parts = base.split('_')
id = ''.join(
['https://dlcs-ida.org/iiif-img/2/1/',
folder_name + '_' + parts[1]])
else:
id = ''.join(
['https://dlcs-ida.org/iiif-img/2/1/',
folder_name + '_' + base])
hocr_file = imagefile # .replace('jpg', 'hocr')
if os.path.exists(imagefile):
if os.path.exists(hocr_file):
hocr = open(hocr_file, 'r')
hocr_data = hocr.read()
hocr.close()
else:
hocr_data = ocr_image(imagefile, hocr_file, write=False)
confidence, typewritten, text = ocr_parse(hocr_data)
number_ents, stats, rows = nlp_image(text, imagefile, parser, matcher, id)
accuracy = spelling(text)
page = {}
page['Average_confidence'] = confidence
if typewritten:
page['Entity_stats'] = stats
page['Total_entities_found'] = number_ents
if accuracy:
page['Spelling_accuracy'] = accuracy
else:
page['Entity_stats'] = {}
page['Total_entities_found'] = 0
page['Typescript'] = typewritten
page['Full_text_length'] = len(text)
page['id'] = id.replace('.hocr', '')
print json.dumps(page, indent=4)
return page, rows
def process_roll(folder_name, parser, matcher, writer, json_write=False):
'''
Process a folder of images for a roll.
'''
folder_base = str(folder_name.split('/')[-2])
json_file = os.path.join(
folder_name, folder_base + '.json')
summary = []
images = get_images(folder_name, 'hocr')
for image in images:
page, rows = process_image(image, parser, matcher, folder_base)
if rows:
for row in rows:
writer.writerow(row)
if page:
summary.append(page)
if json_write:
with open(json_file, 'w') as outfile:
print 'Writing %s' % json_file
json.dump(
summary, outfile, indent=4,
sort_keys=True, separators=(',', ':'))
def main():
csv_file = '/Users/matt.mcgrattan/Documents/tribe_names.csv'
matcher, parser = initialise_tribes(csv_file)
# parser = initialise_spacy()
with open('output_tribes.csv', 'wb') as f:
fieldnames = ['Entity_Orth', 'Entity_Label', 'Source']
writer = csv.DictWriter(f, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
# folders = glob.glob('/Volumes/IDA-IMAGES/source/[M, T]-*/')
folders = glob.glob(
'/users/matt.mcgrattan/Dropbox/Digirati/text/[M,T]-*/')
for folder in folders:
print 'Folder: %s' % folder
# folders = ['/Volumes/IDA-IMAGES/source/M-1011_R-09/']
process_roll(
folder, parser, matcher, writer, json_write=True)
if __name__ == '__main__':
main()
|
18,158 | affb86778591bc1029b3efad37c9eecacd40843c | # -*- coding: UTF-8 -*-
'''
author tian le
1'''
print 'Hello'[2:6]
print 'He' in 'Hello'
print 'Hello' * 9
h = 'Hello'
print max(h)
print cmp('a', 'b')
print ' Hello '.strip()
print '{1}{0}'.format(0, 1111)
# var1 = 'Hello \\ \a \\ \
# World!'
# print "更新字符串 :- ", var1[:6] + 'Runoob!'
# print var1
# print 'llo' in var1
# print 'llo' not in var1
# print True, False
# print "%d %s %%%%" % (231, 'HJK')
# print "F".encode('UTF-8')
# print "田乐".decode('UTF-8')
# print 'sdsd'.upper()
# print 'GldfKKK'.lower()
# print "DDD" + "FDSDSD"
# NAME = '''
# 1qwqwqw\
# 2qwqw
# 3qwqw
# 4qwwwwww=-
# '''
# print NAME
|
18,159 | b9687e9fdc1626a2b698fadc8c92dc2c133c8d66 | """Views for Django Imager."""
from django.views.generic import TemplateView
class HomeView(TemplateView):
"""Home view."""
template_name = "imagersite/home.html"
def get_context_data(self, **kwargs):
"""."""
context = super(HomeView, self).get_context_data(**kwargs)
user = context['view'].request.user
if user:
return {"username": user}
return {}
|
18,160 | b68184bf345a813b24bd9ddb1b90574426f583ed | from .logger import logger
from .info_codes import EInfoCode
from .error_codes import EErrorCode
from .debug_codes import EDebugCode
|
18,161 | 265fb8b197918baf1f16002a156045a3c1d09d6f | from decimal import Decimal
from lxml import etree
from corpora.utils.db_utils import SENTENCE_COLLECTION
from corpora.utils.format_utils import (
ANNOTATION_OPTION_SEP, ANNOTATION_TAG_SEP,
TECH_REGEX, get_audio_link, get_audio_annot_div,
get_annot_div, get_participant_tag_and_status
)
from corpora.utils.elan_utils import split_ann_for_db
def get_transcript_and_tags_dicts(words):
transcript = []
normz_tokens_dict = {}
annot_tokens_dict = {}
i = -1
for w in words:
transcript.append(w['transcription_view'])
if TECH_REGEX.match(w['transcription_view']) is not None:
continue
i += 1
standartization = w.get('standartization_view')
if standartization is None:
continue
normz_tokens_dict[i] = [standartization]
lemmata = []
annots = []
for ann in w.get('annotations', []): # todo: 'annotations' will now contain only one element
if ann['lemma_view'] not in lemmata:
lemmata.append(ann['lemma_view'])
annots.append(ann['tags_view'])
annot_tokens_dict[i] = [
''.join(lemmata),
''.join(annots)
]
return ' '.join(transcript), normz_tokens_dict, annot_tokens_dict
def db_response_to_html(results, reverse=False):
if results is None:
return '<div id="no_result">Empty search query.</div>', {}
item_divs = []
page_info = {}
for i, item in enumerate(results):
if not i:
page_info['min'] = {
'elan': item['elan'],
'audio_start': item['audio']['start']
}
transcript, normz_tokens_dict, annot_tokens_dict = get_transcript_and_tags_dicts(item['words'])
participant, participant_status = get_participant_tag_and_status(item['speaker'], item['tier'])
annot_div = get_annot_div(
tier_name=item['tier'],
dialect=item['dialect'],
participant=participant,
transcript=transcript,
normz_tokens_dict=normz_tokens_dict,
annot_tokens_dict=annot_tokens_dict,
elan_file=item['elan']
)
audio_annot_div = get_audio_annot_div(item['audio']['start'], item['audio']['end'])
annot_wrapper_div = '<div class="annot_wrapper %s">%s%s</div>' % (participant_status, audio_annot_div, annot_div)
audio_div = get_audio_link(item['audio']['file'])
item_div = audio_div + annot_wrapper_div
item_divs.append(item_div)
page_info['max'] = {
'elan': item['elan'],
'audio_start': item['audio']['start']
}
if reverse and item_divs:
item_divs = item_divs[::-1]
page_info['min'], page_info['max'] = page_info['max'], page_info['min']
return ''.join(item_divs) or '<div id="no_result">Nothing found.</div>', page_info
def process_html_token(token_el):
word_dict = {}
if token_el.tag in ['note', 'tech']:
trt = token_el.text
if token_el.tag == 'note':
trt = '[' + trt + ']'
word_dict['transcription_view'] = trt
word_dict['transcription'] = trt
return word_dict
trt_lst = token_el.xpath('trt/text()')
nrm_lst = token_el.xpath('nrm/text()')
lemma_lst = token_el.xpath('lemma/text()')
morph_lst = token_el.xpath('morph/text()')
if not trt_lst:
word_dict['transcription_view'] = token_el.text
word_dict['transcription'] = token_el.text.lower()
return word_dict
word_dict['transcription_view'] = trt_lst[0]
word_dict['transcription'] = trt_lst[0].lower()
if nrm_lst:
word_dict['standartization_view'] = nrm_lst[0]
word_dict['standartization'] = nrm_lst[0].lower()
if lemma_lst and morph_lst:
word_dict['annotations'] = split_ann_for_db((lemma_lst[0], morph_lst[0]))
return word_dict
def html_to_db(html_result):
html_obj = etree.fromstring(html_result)
for el in html_obj.xpath('//*[contains(@class,"annot_wrapper") and contains(@class, "changed")]'):
elan_name = el.xpath('*[@class="annot"]/@elan')[0]
tier_name = el.xpath('*[@class="annot"]/@tier_name')[0]
start = int(Decimal(el.xpath('*[@class="audiofragment"]/@starttime')[0]))
end = int(Decimal(el.xpath('*[@class="audiofragment"]/@endtime')[0]))
words = []
for token in el.xpath('*//*[self::token or self::tech or self::note]'):
word_dict = process_html_token(token)
words.append(word_dict)
filter_query = {'elan': elan_name, 'tier': tier_name, 'audio.start': start, 'audio.end': end}
update_query = {'$set': {'words': words}}
SENTENCE_COLLECTION.update_one(filter_query, update_query)
|
18,162 | 4116208f4378501693d0d1a20f2d79e5b30dcf91 | try:
import requests,threading,phonenumbers
from phonenumbers import carrier
from time import sleep
except Exception as e:
exit(e)
PRNT = threading.Lock()
r=requests.session()
def telegram_vv1ck(*a, **b):
with PRNT:
print(*a, **b)
def searched():
global SNP,TIK,TWR,EML,INS,SCLD,NON,ACP,VIM,NEapi,DARK,eml
telegram_vv1ck(f"[*] Email : {eml} \n\n{EML}\n{SNP}\n{TWR}\n{TIK}\n{INS}\n{SCLD}\n{NON}\n{ACP}\n{VIM}\n{NEapi}\n{DARK}")
def EMdark():
global SNP,TIK,TWR,EML,INS,SCLD,NON,ACP,VIM,NEapi,DARK,eml
try:
send = r.post('https://secure.darkwebid.com/user/login',headers={'Host': 'secure.darkwebid.com','Cookie': 'has_js=1; _ga=GA1.2.404847453.1629450192; _gid=GA1.2.317407483.1629450192; _gat=1','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0','Content-Length': '118','Origin': 'https://secure.darkwebid.com','Referer': 'https://secure.darkwebid.com/user/login','Upgrade-Insecure-Requests': '1','Sec-Fetch-User': '?1','Te': 'trailers'},data={
'name':eml,
'form_build_id':'form-T8F0Un2cVoe_nvYrXQHIOO176-yBIpuiG46EIOTmhQE',
'form_id':'user_login','op':'Continue'}).text
if 'is not recognized as a user name or an e-mail address.'in send:
DARK = '-[11] Not linked on darkwebid.com ✖️'
searched()
else:
DARK = '-[11] linked on darkwebid.com ☑️'
searched()
except:
DARK = '-[11] search error [darkwebid.com]'
searched()
telegram_vv1ck("""
_ ____ _ __ __ _
| | / / /_ ____ (_)____ / /_/ /_ (_)____
| | /| / / __ \/ __ \ / / ___/ / __/ __ \/ / ___/ ??
| |/ |/ / / / / /_/ / / (__ ) / /_/ / / / (__ ) ??
|__/|__/_/ /_/\____/ /_/____/ \__/_/ /_/_/____/ ??
By JOKER @vv1ck
?MODE :
1) Search for email on websites
2) Find phone number information
3) Search for people by their name
99) EXIT(<\>)
""")
def EMwapi():
global SNP,TIK,TWR,EML,INS,SCLD,NON,ACP,VIM,NEapi,eml
try:
send = r.post('https://newsapi.org/reset-password',headers={
'Host': 'newsapi.org','Cookie': '_ga=GA1.2.596557937.1629129476; _gid=GA1.2.1573072051.1629129476; _gat_gtag_UA_91285317_5=1','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0','Origin': 'https://newsapi.org','Referer': 'https://newsapi.org/reset-password'},data={'email':eml})
if "Please check your email for further instructions,"in send.text:
NEapi = '-[10] linked on newsapi.com ☑️'
EMdark()
elif "We don't have a registered user with that email address."in send.text:
NEapi = '-[10] Not linked on newsapi.com ✖️'
EMdark()
else:
NEapi = '-[10] search error [newsapi.com]'
EMdark()
except:
NEapi = '-[10] search error [newsapi.com]'
EMdark()
def EMvim():
global SNP,TIK,TWR,EML,INS,SCLD,NON,ACP,VIM,eml
send = r.post('https://vimeo.com/forgot_password',headers={'Host': 'vimeo.com','Cookie': 'vuid=2139188521.1640766174; OptanonConsent=isIABGlobal=false&datestamp=Fri+Aug+20+2021+08%3A40%3A36+GMT%2B0000+(Coordinated+Universal+Time)&version=6.15.0&hosts=&consentId=bd63d91c-a872-40c0-80e7-39abe84d0f6a&interactionCount=1&landingPath=https%3A%2F%2Fvimeo.com%2Fforgot_password&groups=C0001%3A1%2CC0004%3A1%2CC0003%3A1%2CC0002%3A1; _gcl_au=1.1.1859821931.1628954558; _rdt_uuid=1628954558430.7535fc24-43f4-422d-93e9-6a0baa44e962; _ga=GA1.2.1509050282.1628954559; _pin_unauth=dWlkPU1EWTBabUUyWkdRdFltVmtZeTAwWVRkbExXRmxaVE10TWpsaFl6VTFOekJqWkRFNA; _fbp=fb.1.1628954558941.1830594440; afUserId=501e2ec1-a355-48e9-bcc6-c4dddfdea089-p; AF_SYNC=1628954559966; _uetsid=4a0d4e10019211ec9951b7d194b6c2d4; _uetvid=75945460fd1311eb9eb943defc105f6b; _gid=GA1.2.1070279053.1629448836; _gat_UA-76641-8=1','Origin':'https://vimeo.com','Referer': 'https://vimeo.com/forgot_password','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.132 Safari/637.36','Upgrade-Insecure-Requests': '1','Sec-Fetch-Dest': 'document','Sec-Fetch-Mode': 'navigate','Sec-Fetch-Site': 'same-origin','Sec-Fetch-User': '?1','Te': 'trailers','Connection': 'close'},data={'email':eml,'token': '5051bb6298fc6a43b44bf7632b89c4f4c8e33f9b.yp67ttqiem.1629448837'})
if 'We’ve emailed you a link to reset your password.' in send.text:
VIM = '-[9] linked on vimeo.com ☑️'
EMwapi()
elif 'This email was not found in our system'in send.text:
VIM = '-[9] Not linked on vimeo.com ✖️'
EMwapi()
else:
VIM = '-[9] search error [vimeo.com]'
EMwapi()
def acaps():
global SNP,TIK,TWR,EML,INS,SCLD,NON,ACP,eml
try:
send = r.post('https://www.acaps.org/user/password',headers={'Host': 'www.acaps.org','Cookie': 'has_js=1; acaps_mode=advanced; _ga=GA1.2.895538350.1628908856; _gid=GA1.2.1525444071.1628908856; _gat_UA-21240261-1=1; cookie-agreed=2','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0','Referer': 'https://www.acaps.org/user/password'},data={'name': eml,'form_build_id':'form-GmhJBczE4v4RuumvVqBrs4AhCvdrGGNH64mmQDTotlw','form_id':'user_pass','op':'E-mail+new+password'}).text
if 'is not recognized as a user name or an e-mail address.'in send:
ACP = '-[8] Not linked on acaps.com ✖️'
EMvim()
else:
ACP = '-[8] linked on acaps.com ☑️'
EMvim()
except:
ACP = '-[8] search error [acaps.com]'
EMvim()
def EMnon():
global SNP,TIK,TWR,EML,INS,SCLD,NON,eml
try:
send = r.post('https://www.noon.com/_svc/customer-v1/auth/reset_password',headers={'cookie': 'next-i18next=ar-SA; AKA_A2=A; visitor_id=2d18be80-1958-4046-97a1-500742584616; _gcl_au=1.1.1522317105.1628906008; _ga=GA1.2.1536475616.1628906008; _gid=GA1.2.1694949827.1628906008; _gat_UA-84507530-14=1; _scid=f2b8cbdd-be9d-4edf-aed0-4b3fde0b4b9e; _fbp=fb.1.1628906007975.1440438626; nguest=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyIjoiZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnJhV1FpT2lJM05HUTRZVE0yTUdWa05XWTBOelV4WVRFNU4ySTNaRGc0TmpGbE9UaGtZeUlzSW1saGRDSTZNVFl5T0Rrd05qQXhNSDAuc0lQM1RDV0kzel9oZnFJY0hsTU5xTExvRGd3Yk1mNmowb1dGY0dmaUZEayIsImlhdCI6MTYyODkwNjAxMH0.68KVlD6GCJZxxPcenzuzfuc0nSkFksM4jFet3g3XMgY; _nsc=nsv1.public.eyJzdGlkIjoiNTFkMDVlZmMtMmYwZi00MmM4LWE3ZWEtMWZkNzY3NDhlNzYyIiwic2lkIjoiNzRkOGEzNjBlZDVmNDc1MWExOTdiN2Q4ODYxZTk4ZGMiLCJpYXQiOjE2Mjg5MDYwMTAsInZpZCI6IjJkMThiZTgwLTE5NTgtNDA0Ni05N2ExLTUwMDc0MjU4NDYxNiIsImhvbWVwYWdlIjp7fX01MW1EY3R5aXYxRlBxbGYzanRyOTZ2dWpjNmFQNVo0QUpDNTZOZWYwVTg0PQ.MQ; _sctr=1|1628899200000; __zlcmid=15ZkFmNm1eIjQ2X; _etc=STrytYJw5RtBDPau; RT="z=1&dm=noon.com&si=le66hf1mcuf&ss=ksb4kyp0&sl=1&tt=1yt&ld=1z4&nu=d41d8cd98f00b204e9800998ecf8427e&cl=4kl"','user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'},json={'email':eml}).text
if '"message":"ok"'in send:
NON = '-[7] linked on noon.com ☑️'
acaps()
elif '"error":"No user found with that email address"'in send:
NON = '-[7] Not linked on noon.com ✖️'
acaps()
else:
NON = '-[7] search error [noon.com]'
acaps()
except:
NON = '-[7] search error [noon.com]'
acaps()
def SCLDE():
global SNP,TIK,TWR,EML,INS,SCLD,eml
try:
aBaDy1337 = requests.post("https://api-mobile.soundcloud.com/users/passwords/reset?client_id=Fiy8xlRI0xJNNGDLbPmGUjTpPRESPx8C",json={"email":eml}).text
if '{}' in aBaDy1337:
SCLD = '-[6] linked on soundcloud.com ☑️'
EMnon()
elif 'identifier_not_found' in aBaDy1337:
SCLD = '-[6] Not linked on soundcloud.com ✖️'
EMnon()
else:
SCLD = '-[6] search error [soundcloud.com]'
EMnon()
except:
SCLD = '-[6] search error [soundcloud.com]'
EMnon()
def EMnsta():
global SNP,TIK,TWR,EML,INS,eml
headers = {'cookie':'mid=YQvmcwAEAAFVrBezgjwUhwEQuv3c; ig_did=6C10D114-3B6D-4E5E-9E35-5E808661CBAD; ig_nrcb=1; shbid="13126\05446165248972\0541660151679:01f76272a193b960d6a59109693b94e7ceb63f379a095665f9e6588098e95e4d3c3a7ecc"; shbts="1628615679\05446165248972\0541660151679:01f7c823b2dd58ddaa1efa6ba7df2d6c9c6c69ef00c48655061d874c9143ef8902c6ebd4"; csrftoken=HtWab2HXNN9vlV8mNsL8v1BdVF2Yji5l',
'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36','x-csrftoken': 'HtWab2HXNN9vlV8mNsL8v1BdVF2Yji5l','x-ig-app-id': '936619743392459','x-ig-www-claim': '0','x-instagram-ajax': '6abd51c369a9','x-requested-with': 'XMLHttpRequest'}
try:
send = r.post('https://www.instagram.com/accounts/account_recovery_send_ajax/',headers=headers,data={'email_or_username':eml,'recaptcha_challenge_field': '','flow': '','app_id': '','source_account_id': '',}).text
if '"status":"ok"' in send:
INS = '-[5] linked on instagram.com ☑️'
SCLDE()
elif 'checkpoint_url"' in send:
INS = '-[5] There is a captcha instagram.com ✖️'
SCLDE()
elif '"message":"No users found"' in send:
INS = '-[5] Not linked on instagram.com ✖️'
SCLDE()
else:
INS = '-[5] search error [instagram.com]'
SCLDE()
except:
INS = '-[5] search error [instagram.com]'
SCLDE()
def EMtk():
global SNP,TIK,TWR,EML,eml
try:
send = r.post('https://api16-normal-c-alisg.tiktokv.com/passport/email/send_code/?residence=AE&device_id=9488371953858433865&os_version=7.2.0&app_id=1233&iid=6951746276598204161&app_name=musical_ly&pass-route=1&vendor_id=EF3C1478-2AFC-4B8E-8030-C608120AECF9&locale=ar&pass-region=1&ac=WIFI&sys_region=US&ssmix=a&version_code=17.2.0&vid=EF3C1478-2AFC-4B8E-8030-C608120AECF9&channel=App%20Store&op_region=AE&os_api=27&idfa=00000000-0000-0000-0000-000000000000&install_id=6951746276598204161&idfv=EF3C1478-2AFC-4B8E-8030-C608120AECF9&device_platform=iphone&device_type=Pixel&openudid=3ce553bec09070081e5a698d3a14a988f3642ac4&account_region=&tz_name=Asia&tz_offset=12936&app_language=ar&carrier_region=AE¤t_region=AE&aid=1233&mcc_mnc=42402&screen_width=1242&uoo=1&content_language=&language=ar&cdid=FBF67CFE-39E1-4556-A3EB-624A20A434E1&build_number=172025&app_version=7.2.0&resolution=2883',headers={'Host': 'api16-normal-c-alisg.tiktokv.com','Connection': 'close','Content-Length': '76','x-Tt-Token': '9ABBszZbHK-ybQ4EmUNmO88d','Content-Type': 'application/x-www-form-urlencoded','x-tt-passport-csrf-token': 'f04fc476081a3d063b607f520e64780c','sdk-version': '2','passport-sdk-version': '7.2.0'},data={'email': eml,'account_sdk_source': 'app','mix_mode': '1','type': '31'})
if '"message":"success"' in send.text:
TIK = '-[4] linked on tiktok.com ☑️'
EMnsta()
elif 'description":"غير مسجل بعد"'in send.text:
TIK = '-[4] Not linked on tiktok.com ✖️'
EMnsta()
else:
TIK = '-[4] search error [tiktok.com]'
EMnsta()
except:
TIK = '-[4] search error [tiktok.com]'
EMnsta()
def EMTWR():
global SNP,TWR,EML,eml
urTW = "https://twitter.com/users/email_available?email="+eml
try:
go = r.get(urTW,headers={
'Host': 'twitter.com','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36','Cookie': 'personalization_id="v1_6TNKT0FSMkPP7CfzL5Rkfg=="; guest_id=v1%3A159789135703778252; _ga=GA1.2.490437195.1597891367'}).text
if '"taken":true'in go:
TWR = '-[3] linked on twitter.com ☑️'
EMtk()
elif '"taken":false'in go:
TWR = '-[3] Not linked on twitter.com ✖️'
EMtk()
else:
TWR = '-[3] search error [twitter.com]'
EMtk()
except:
TWR = '-[3] search error [twitter.com]'
EMtk()
def EMsn():
global SNP,EML,eml
try:
Send = r.post('https://accounts.snapchat.com/accounts/merlin/login',headers={'Host': 'accounts.snapchat.com','Cookie': 'xsrf_token=aDpeseUJS0ysikB9nhdNzA; _ga=GA1.2.113171992.1627308862; _scid=f8244bc8-117d-45aa-b1b0-f24ab31edabc; sc-cookies-accepted=true; Preferences=true; Performance=true; Marketing=true; sc_at=v2|H4sIAAAAAAAAAE3GwRGAMAgEwIqY4cIJxG6MSBUp3m/2teq4YEOlrEuIWpIfSxbD36fB2bFBveEjTDM991H9AatYyihAAAAA; _sctr=1|1627257600000; web_client_id=e64bb4c8-1a1f-4de7-970d-d637c2e9a642','User-Agent': 'Mozilla/5.0 (@vv1ck) Gecko/20100101 Firefox/90.0','X-Xsrf-Token': 'aDpeseUJS0ysikB9nhdNzA','Origin': 'https://accounts.snapchat.com','Connection': 'close'},json={"email":eml,"app":"BITMOJI_APP"})
if 'hasSnapchat' in Send.text:
SNP = '-[2] linked on snapchat.com ☑️'
EMTWR()
elif Send.status_code == 204:
SNP = '-[2] Not linked on snapchat.com ✖️'
EMTWR()
else:
SNP = '-[2] search error [snapchat.com]'
EMTWR()
except:
SNP = '-[2] search error [snapchat.com]'
EMTWR()
def all_Email(domn):
global EML,eml
url = "https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress=" + eml + "&_=1604288577990"
try:
send = r.get(url,headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36","Host": "odc.officeapps.live.com","Referer": "https://odc.officeapps.live.com/odc/v2.0/hrd?rs=ar-sa&Ver=16&app=23&p=6&hm=0","canary": "BCfKjqOECfmW44Z3Ca7vFrgp9j3V8GQHKh6NnEESrE13SEY/4jyexVZ4Yi8CjAmQtj2uPFZjPt1jjwp8O5MXQ5GelodAON4Jo11skSWTQRzz6nMVUHqa8t1kVadhXFeFk5AsckPKs8yXhk7k4Sdb5jUSpgjQtU2Ydt1wgf3HEwB1VQr+iShzRD0R6C0zHNwmHRnIatjfk0QJpOFHl2zH3uGtioL4SSusd2CO8l4XcCClKmeHJS8U3uyIMJQ8L+tb:2:3c","uaid": "d06e1498e7ed4def9078bd46883f187b","Cookie": "xid=d491738a-bb3d-4bd6-b6ba-f22f032d6e67&&RD00155D6F8815&354"})
if 'MSAccount' in send.text:
EML = f'-[1] linked on {domn} ☑️'
EMsn()
elif 'Neither' in send.text:
EML = f'-[1] Not linked on {domn} ✖️'
EMsn()
else:
EML = f'-[1] search error [{domn}]'
EMsn()
except:
EML = f'-[1] search error [{domn}]'
EMsn()
def START_EML():
global eml
eml = input('[+] Enter Email : ')
if "@"in eml:
domn = eml.split('@')[1]
telegram_vv1ck("\n ======= It's started, wait a bit =======\n")
all_Email(domn)
else:exit('[-] Please enter an email ..')
def number_search():
phon=input("\n[+] Enter Phone Number: ")
code = phon.split(' ')[0]
try:phone = phon.split(' ')[1]
except IndexError:exit('[-] You must type the country code, then a space, and then the phone number.. \nExample[ 974 52947429 ]')
if code == '20':country="EG:Egypt"
elif code =='98':country="IR:Iran"
elif code =='212':country="MA:Morocco"
elif code =='213':country="DZ:Algeria"
elif code =='216':country="TN:Tunisia"
elif code =='249':country="SD:Sudan"
elif code =='252':country="SO:Somalia"
elif code =='961':country="LB:Libya"
elif code =='962':country="JO:Jordan"
elif code =='963':country="SY:Syria"
elif code =='964':country="IQ:Iraq"
elif code =="965":country="KW:Kuwait"
elif code =='966':country="SA:Saudi Arabia"
elif code =='967':country="YE:Yemen"
elif code =='968':country="OM:Oman"
elif code =='970':country="PS:Palestine"
elif code =='971':country="AE:Emirates"
elif code =='972':country="ISR:Israel"
elif code =='973':country="BH:Bahrain"
elif code =='974':country="QA:Qatar"
else:exit("[¿] The country code is not added for this number, it will be added soon")
countr = country.split(':')[0]
countr2= country.split(':')[1]
send =r.get(f"http://caller-id.saedhamdan.com/index.php/UserManagement/search_number?number={phone}&country_code={countr}",headers={"User-Agent":"8Y/69"})
try:
name = send.json()['result'][0]['name']
if name=='':name='nothing'
nump = send.json()['result'][0]['number']
#Communication rights reserved @_agf
pho=phonenumbers.parse('+'+phon)
qtr = carrier.name_for_number(pho,'en')
telegram_vv1ck(f'\n[+] phone : {nump}\n[+] country : {countr2}\n[+] ZIP code : {countr}\n[+] name : {name}\n[+] number type : {qtr}')
except KeyError:
telegram_vv1ck('[-] No leaked information found')
def All_users():
username = input('[?] Enter username : ')
try:file = open('Link_all.txt','r')
except FileNotFoundError:exit('[-] Please make sure that the Link_all.txt file is in the same folder as the tool')
telegram_vv1ck('\n ========== started ==========\n')
sleep(1)
telegram_vv1ck(f'[+] Username : {username}')
while True:
urls = file.readline().split('\n')[0]
if urls=='':exit('\n ======= completed =======')
url = urls.format(username)
try:web = url.split('//')[1]
except KeyError:exit('\n ======= completed =======')
web_name = web.split('/')[0]
if 'https://t.me'in url:web_name='telegram.com'
send = r.get(url,headers={'User-Agent': 'Mozilla/5.0 (@vv1ck) Gecko/20100101 Firefox/90.0'})
if send.status_code == 200:telegram_vv1ck(f'[+] linked on {web_name}')
elif send.status_code == 404:telegram_vv1ck(f'[-] Not linked on {web_name}')
else:
telegram_vv1ck(f'[!] search error [{web_name}]')
try:
vv1ck=int(input('[?] Enter mode : '))
if vv1ck==1:START_EML()
elif vv1ck==2:number_search()
elif vv1ck==3:All_users()
else:exit('[<\>] see you soon ...')
except ValueError:exit('[<\>] Please enter one of the displayed numbers')
|
18,163 | f375d6d711e69f888bcbb0b63087d9f285c8b67a | import os
import shutil
import tempfile
import unittest
from dotenv import load_dotenv
load_dotenv() # noqa
from src.kbase_workspace_utils import download_reads
from src.kbase_workspace_utils.exceptions import InvalidWSType
class TestDownloadReads(unittest.TestCase):
def test_basic_valid(self):
"""
Test valid downloads for both paired and single-end reads. Paired-end has examples for both
interleaved and not.
"""
tmp_dir = tempfile.mkdtemp()
# Paired reads, non-interleaved
ref = '15/45/1'
paths = download_reads(ref=ref, save_dir=tmp_dir)
self.assertEqual(len(paths), 2)
self.assertTrue('rhodobacter.art.q10.PE.reads.paired.fwd.fastq' in paths[0])
self.assertTrue('rhodobacter.art.q10.PE.reads.paired.rev.fastq' in paths[1])
self.assertEqual(os.path.getsize(paths[0]), 36056522)
self.assertEqual(os.path.getsize(paths[1]), 37522557)
# Paired reads, interleaved
ref = '15/44/1'
paths = download_reads(ref=ref, save_dir=tmp_dir)
self.assertTrue('rhodobacter.art.q20.int.PE.reads.paired.interleaved.fastq' in paths[0])
self.assertEqual(len(paths), 1)
self.assertEqual(os.path.getsize(paths[0]), 36510129)
# Single-end reads
ref = '15/43/1'
paths = download_reads(ref=ref, save_dir=tmp_dir)
self.assertTrue('rhodobacter.art.q50.SE.reads.single.fastq' in paths[0])
self.assertEqual(len(paths), 1)
self.assertEqual(os.path.getsize(paths[0]), 53949468)
shutil.rmtree(tmp_dir)
# Error cases for invalid users and invalid ws references are covered in test_download_obj
def test_download_wrong_type(self):
assembly_id = '34819/10/1'
tmp_dir = tempfile.mkdtemp()
with self.assertRaises(InvalidWSType) as err:
download_reads(ref=assembly_id, save_dir=tmp_dir)
self.assertTrue('Invalid workspace type' in str(err.exception))
shutil.rmtree(tmp_dir)
|
18,164 | 197f5b97e1b2336e1571b48c641abe8cb95e773f | from paddle.trainer_config_helpers import *
settings(learning_rate=1e-4, batch_size=1000)
data_1 = data_layer(name='data_a', size=100)
data_2 = data_layer(name='data_b', size=100)
mixed_param = ParamAttr(name='mixed_param')
with mixed_layer(size=400, bias_attr=False) as m1:
m1 += full_matrix_projection(input=data_1, param_attr=mixed_param)
with mixed_layer(size=400, bias_attr=False) as m2:
m2 += full_matrix_projection(input=data_2, param_attr=mixed_param)
lstm_param = ParamAttr(name='lstm_param')
lstm_bias = ParamAttr(name='lstm_bias', initial_mean=0., initial_std=0.)
lstm1 = lstmemory_group(input=m1, param_attr=lstm_param, lstm_bias_attr=lstm_bias, mixed_bias_attr=False)
lstm2 = lstmemory_group(input=m2, param_attr=lstm_param, lstm_bias_attr=lstm_bias, mixed_bias_attr=False)
softmax_param = ParamAttr(name='softmax_param')
predict = fc_layer(input=[last_seq(input=lstm1), last_seq(input=lstm2)],
size=10,
param_attr=[softmax_param, softmax_param],
bias_attr=False,
act=SoftmaxActivation())
outputs(classification_cost(input=predict, label=data_layer(name='label', size=10)))
|
18,165 | 4b4d50cb2e6cca209e98306550db27010c202d7a | import urllib.parse
from notifier.grabbers.base import Base, Internet
class PSily(object):
@staticmethod
def sync(obj: Base, *args, **kwargs):
r = Internet.html_get(obj.sync_type.base_url)
links = r.html.xpath('/html/body/div[*]/div[*]/div/div[*]/div[*]/section/div[*]/div[*]/div[*]/a')
for a in links[::-1]:
path = a.attrs.get('href').split("?")[0]
url = urllib.parse.urljoin(obj.sync_type.base_url, path)
name = a.text.strip()
obj.add_text_task(
unique_key=url,
name=name,
url=url,
data=dict(text=url)
)
|
18,166 | a1c004682cf80097f2eb60a33142190205853380 | api_key = "507aa853ae902f61c5ac921503091e63"
|
18,167 | fe125eab887b57a3722b323b2d8c02587828fa50 | from flask import render_template, url_for, request
from app import app
from app.graph_bridge import GraphBridge
from app.graph_cycles import GraphCycles
import numpy as np
from time import time
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/bridges', methods=['GET', 'POST'])
def bridges():
grafo1 = None
if request.method == 'POST':
n_nodos = int(request.form['num_nodos'])
aristas = request.form['num_aristas']
start_time = time()
grafo1 = GraphBridge(n_nodos)
for arista in aristas.split(', '):
nodos = []
for nodo in arista.split('-'):
nodos.append(int(nodo))
grafo1.addEdge(nodos[0], nodos[1])
grafo1.bridge()
elapsed_time = time() - start_time
return render_template(
'bridge.html',
title='Proyecto ED',
grafo=grafo1,
ar=aristas,
numero_nodos=n_nodos,
elapsed_time=elapsed_time
)
return render_template('bridge.html', title='Proyecto ED', grafo=grafo1)
@app.route('/cycles', methods=['GET', 'POST'])
def cycles():
if request.method == 'POST':
n_nodos = int(request.form['num_nodos'])
aristas = request.form['aristas']
n_ciclos = int(request.form['n_ciclos'])
nodos = []
for arista in aristas.split(', '):
row_nodos = []
for nodo in arista.split('-'):
row_nodos.append(int(nodo))
nodos.append(row_nodos)
graph = np.zeros((n_nodos, n_nodos))
for i in range(n_nodos):
for j in range(n_nodos):
if [i, j] in nodos:
graph[i][j] = 1
graph[j][i] = 1
start_time = time()
grafo1 = GraphCycles(n_nodos, graph)
num_ciclos = grafo1.count_cycles(n_ciclos)
elapsed_time = time() - start_time
return render_template(
'cycles.html',
title='Proyecto ED',
aristas=aristas,
n_ciclos=n_ciclos,
num_ciclos=num_ciclos,
numero_nodos=n_nodos,
elapsed_time=elapsed_time
)
return render_template('cycles.html')
|
18,168 | 042d9e441d607cf750991e7746500c321ec4f155 | import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
__version__ = '0.2'
def make_folder(path, del_if_exist=False):
try:
os.stat(path)
except:
print(path)
os.makedirs(path)
def remove_folder(path):
import shutil
if os.path.exists(path):
shutil.rmtree(path)
|
18,169 | 93fdc1ddb4400c8947d6b01a776a94333caef7f0 | # -*- coding: utf-8 -*-
# from django.db import models <--- this old import is odd
from gtools import models
from django.core.exceptions import ValidationError
import datetime
def validate_end_with_question_mark(value):
if value[-1] != '?':
raise ValidationError(u"Your question should end with a question mark.")
class Poll(models.Model):
question = models.CharField(max_length=200, validators = [validate_end_with_question_mark])
pub_date = models.DateTimeField(u"date published", default=datetime.datetime.now())
"""
@question.setter
def question(self, value):
if value[-1] != u"?":
raise models.ValidationError(u"Your question should end with a question mark.")
self.set_value('question', value)
"""
def __unicode__(self):
return self.question
@models.permalink
def get_absolute_url(self):
return (
"PollViews:show",
(),
{ 'object_id': self.pk }
)
class Meta:
fields_accessible = [
'question',
'pub_date',
]
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField()
#class Meta:
# accessible = ['choice']
def __unicode__(self):
return self.choice
|
18,170 | f05f8593ef69ef4e6a4b5d2b6f40dfc1487fa3a8 | from enthought.mayavi import mlab
from numpy import *
import numpy as numpy
# Produce some nice data.
#n_mer, n_long = 6, 11
#pi = numpy.pi
#dphi = pi/1000.0
#phi = numpy.arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
#mu = phi*n_mer
'''
x = numpy.cos(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
y = numpy.sin(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
z = numpy.sin(n_long*mu/n_mer)*0.5
'''
s = (0.0,0.8)
for i in range(0,100):
theta = numpy.acos(0.1*i)
phi = 0.0
r = 0.7
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = r*numpy.sin(theta)*numpy.cos(phi)
y1 = r*numpy.sin(theta)*numpy.sin(phi)
z1 = r*numpy.cos(theta)
x = (x0,x1)
y = (y0,y1)
z = (z0,z1)
# View it.
l = mlab.plot3d(x, y, z, s, tube_radius=0.025, colormap='Spectral')
mlab.show()
'''
# Now animate the data.
ms = l.mlab_source
for i in range(100):
x = numpy.cos(mu)*(1+numpy.cos(n_long*mu/n_mer + numpy.pi*(i+1)/5.)*0.5)
scalars = numpy.sin(mu + numpy.pi*(i+1)/5)
ms.set(x=x, scalars=scalars)
'''
|
18,171 | 5f47fb9894accec115d0bd58260ba48295ea9651 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class SDNPluginProvider(RelationBase):
scope = scopes.GLOBAL
@hook('{provides:sdn-plugin}-relation-{joined,changed}')
def joined_or_changed(self):
''' Set the connected state from the provides side of the relation. '''
conv = self.conversation()
conv.set_state('{relation_name}.connected')
config = self.get_sdn_config()
# Ensure we have the expected data points from the sdn provider
# to ensure we have everything expected by the assumptions being
# made of the .available state
if config['mtu'] and config['subnet'] and config['cidr']:
conv.set_state('{relation_name}.available')
else:
conv.remove_state('{relation_name}.available')
@hook('{provides:sdn-plugin}-relation-{departed}')
def broken_or_departed(self):
'''Remove connected state from the provides side of the relation. '''
conv = self.conversation()
conv.remove_state('{relation_name}.connected')
conv.remove_state('{relation_name}.available')
def get_sdn_config(self):
''' Return a dict of the SDN configuration. '''
config = {}
conv = self.conversation()
config['mtu'] = conv.get_remote('mtu')
config['subnet'] = conv.get_remote('subnet')
config['cidr'] = conv.get_remote('cidr')
return config
|
18,172 | 2483f6d040fa480d7b74cbb0dcaf7dc92331f681 | #usage: python3.7 Merging_sequencing.py 000F.seq 001F.seq 002R.seq 003R.seq
"""
This script is used to merge multiple sucessive sanger DNA sequencing results.
The file names of the sequences to be merged starts with '000' plus 'F' (forward)
or 'R' (reverse), and are in .seq format.
The files are arranged in tandemly according to their real position corresponding
to the sequencing plasmid or linear DNA.
"""
import os,shutil
import sys
import string
from Bio.Emboss.Applications import NeedleCommandline
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import AlignIO
from Bio import SeqIO
for i in range(32):
if i % 2 == 1:
print(i*"(^_^)")
print("\n\n")
#get current path
current_path = os.getcwd()
print("Current path is %s." % current_path)
print("\n")
#make a new directory
folder_name = "merged_sequence"
dirs = os.listdir(current_path)
if folder_name not in dirs:
os.mkdir(folder_name)
else:
shutil.rmtree(folder_name)
os.mkdir(folder_name)
number_of_files = len(sys.argv) -1
if len(sys.argv) -1 < 2:
print("Must have 2 or more sequences. Please reinput sequences files to be merged.")
sys.exit()
else:
print("There are %s sequences to be joined." % number_of_files)
#copy sequence files to the new directory and change to F if it is in R
for file in sys.argv[1:]:
if file[3] is 'F':
print(file)
file_F = open(file)
file_sequence = file_F.read()
file_F.close()
DNA_sequence_tmp_list = []
for i in file_sequence:
if i.isalpha():
i = i.upper()
DNA_sequence_tmp_list.append(i)
DNA_sequence_tmp_str =''.join(DNA_sequence_tmp_list)
dir_sub = os.path.join(current_path,folder_name)
os.chdir(dir_sub)
f = open(file[0:4] + '.seq','w')
f.write(DNA_sequence_tmp_str)
f.close()
os.chdir(current_path)
if file[3] is 'R':
print(file)
file_R = open(file)
file_sequence = file_R.read()
file_R.close()
DNA_sequence_tmp_list = []
for i in file_sequence:
if i.isalpha():
i = i.upper()
DNA_sequence_tmp_list.append(i)
DNA_sequence_tmp_str =''.join(DNA_sequence_tmp_list)
DNA_sequence_tmp_Seq = Seq(DNA_sequence_tmp_str)
DNA_sequence_tmp_Seq_F = DNA_sequence_tmp_Seq.reverse_complement()
DNA_sequence_tmp_Seq_F_str = str(DNA_sequence_tmp_Seq_F)
dir_sub = os.path.join(current_path,folder_name)
os.chdir(dir_sub)
f = open(file[0:4] + '.seq','w')
f.write(DNA_sequence_tmp_Seq_F_str)
f.close()
os.chdir(current_path)
#function for the boundaries
def needle_align_to_get_boundaries(f1,f2):
output_file_name = f1.split('.')[0] + f2.split('.')[0] + ".needle"
needle_cline = NeedleCommandline()
needle_cline.asequence = f1
needle_cline.bsequence = f2
needle_cline.gapopen = 10
needle_cline.gapextend= 0.5
needle_cline.outfile = output_file_name
print(needle_cline)
stdout, stderr = needle_cline()
print(stdout + stderr)
#open the needle alignment output file and get boundaries
file = open(output_file_name)
file_lines = file.readlines()
file.close()
for line in file_lines:
print(line, end="")
aligment_a_squence_positions = []
aligment_b_squence_positions = []
file = open(output_file_name)
new_line1 = file.readline()
new_line2 = file.readline()
while len(new_line2):
line_a = new_line1
line_b = new_line2
new_line2 = new_line2.strip()
if (50*'|' in new_line2):
line_b = file.readline()
aligment_a_squence_line_str = line_a.strip()
aligment_b_squence_line_str = line_b.strip()
print("The beginning of excellent alignment is shown below.\n")
aligment_a_squence_line_str_split = aligment_a_squence_line_str.split()
print(aligment_a_squence_line_str_split[0].ljust(5,' '),\
aligment_a_squence_line_str_split[1],\
aligment_a_squence_line_str_split[2].rjust(6,' '),\
sep="")
aligment_b_squence_line_str_split = aligment_b_squence_line_str.split()
print(aligment_b_squence_line_str_split[0].ljust(5,' '),\
aligment_b_squence_line_str_split[1],\
aligment_b_squence_line_str_split[2].rjust(6,' '),\
sep="")
print("\n")
aligment_a_left = int(aligment_a_squence_line_str.split()[0])
aligment_b_left = int(aligment_b_squence_line_str.split()[0])
break
else:
new_line1 = new_line2 #notice the skill here, one must go step by step through the lines
new_line2 = file.readline()
file.close()
return aligment_a_left, aligment_b_left
#end of function
#align with needle progressively to get boundaries
dir_sub = os.path.join(current_path,folder_name)
os.chdir(dir_sub)
dir_new = os.listdir()
dir_new.sort()
Es_list = []
Es_list.append(1)
for i in range(len(dir_new) - 1):
E = needle_align_to_get_boundaries(dir_new[i],dir_new[i+1])
Es_list.append(E[0])
Es_list.append(E[1])
# length of the last sequence
f1 = open(dir_new[-1])
file_sequence = f1.read()
f1.close()
DNA_sequence_temp_list = []
for i in file_sequence:
if i.isalpha():
i = i.upper()
DNA_sequence_tmp_list.append(i)
length_of_last_file_sequence = len(DNA_sequence_tmp_list)
Es_list.append(length_of_last_file_sequence + 1)
#till now, got the boundaries for each sequence
#list to store the final merged sequence
merged_sequence_list = []
for i in range(len(dir_new)):
f = open(dir_new[i])
file_sequence = f.read()
f.close()
DNA_sequence_tmp_list = []
for j in file_sequence:
if j.isalpha():
j = j.upper()
DNA_sequence_tmp_list.append(j)
merged_sequence_list += DNA_sequence_tmp_list[Es_list[2*i]-1:Es_list[2*i+1]-1]
"""
The content of Es_list, 2n elements in toal
E0=1,E1, E2,E3, E4,E5, E6,E7 ... E2n-2,E2n-1
S0 S1 S2 S3 Sn-1
mathematical [X,Y)~~mathematical [X,Y-1]~~list elements from X-1 to Y-2~~list [X-1:Y-1]
"""
merged_sequence_str = ''.join(merged_sequence_list)
print("The merged DNA sequence is shown below.\n")
for j in range(0,len(merged_sequence_str),100):
DNA_string_100_per_line_str = merged_sequence_str[j:j+100]
print(DNA_string_100_per_line_str)
print("\n")
#write to the file
merged_file_name = "merged" + ".seq"
file=open(merged_file_name,'w')
file.write(merged_sequence_str)
file.close()
os.system('rm *.needle')
os.system('rm 00*')
|
18,173 | bd2161fa12e7edefc66adb99b9dfc52909ae736f | import web
import json
render = web.template.render("aplicacion/")
class Index():
def GET(sef):
zodiaco = None
return render.index(zodiaco)
def POST(self):
form = web.input()
dia = int(form["dia"])
meses = str(form["mes"])
mes=meses.lower()
aries = {} #21 de marzo - 20 de abril
aries["signo"]="Aries"
aries["elemento"]="Fuego"
aries["simbolo"]="Carnero"
aries["numero_suerte"]="7, 17 y 21"
aries["color"]=" Rojo, salmón y blanco"
aries["domicilio"]="Marte"
aries["horoscopo"]="Este año tendrás que plantearte buscar nuevos amigos porque es imposible seguir tu vida tan fiestera. Hacia la primavera, Plutón se posicionará en tu signo y hará que tus cejas se pueblen más de lo normal, por lo que deberás depilarte constantemente si no quieres que se queden las marcas del sol. Con la entrada del otoño, todo volverá a la normalidad y tu impulsividad favorecerá los golpes en la cabeza tan propios de ti."
tauro = {} #21 abril -20 mayo
tauro["signo"]="Tauro"
tauro["elemento"]="Tierra"
tauro["simbolo"]="Toro"
tauro["numero_suerte"]="4, 6 y 11"
tauro["color"]="Verde claro, rosa y turquesa"
tauro["domicilio"]="Venus"
tauro["horoscopo"]="Mientras que tus amistades y familiares conseguirán aumentos salariales, tu en cambio tendrás dificultades en el trabajo debido a conflictos con la secretaria de tu jefe. Además, tu adicción a las sabritas te llevará al borde de la ruina y pondrá en peligro la seguridad económica que tanto te preocupa. A pesar de todo, este año el amor te sonríe: si vas a Francia con frecuencia ten claro que el destino te pondrá a tiro al amor de tu vida, pero procura no comer sabritas si no, te vas a enfermar del estomago."
geminis = {} #21 mayo – 20 junio
geminis["signo"]="Geminis"
geminis["elemento"]="Aire"
geminis["simbolo"]="Gemelos"
geminis["numero_suerte"]="3, 12 y 18"
geminis["color"]="Azul, violeta y amarillo"
geminis["domicilio"]="Mercurio"
geminis["horoscopo"]="En 2030 aprovecharas cualquier fiesta de disfraces que se te presente para dar rienda suelta a tu bipolaridad, por lo mientas solo soñaras con ella. Ganarás un concurso en Facebook y unos cuantos premios en el casino. ¡Este es tu año!, porfin seras millonario"
cancer = {} #21 Junio – 21 Julio
cancer["signo"]="Cancer"
cancer["elemento"]="Agua"
cancer["simbolo"]="Cangrejo"
cancer["numero_suerte"]="2, 8 y 12"
cancer["color"]="Blanco, plateado y verde"
cancer["domicilio"]="Luna"
cancer["horoscopo"]="Ese esfuerzo por ayudar a todo el mundo hará que te olvides de tu propia higiene: el jabón y el desodorante no formarán parte de tu vida. En cuanto al amor, si tienes pareja, esta te dejará por no ducharte. Y si no tienes pareja, obviamente seguirás sin tenerla. De momento, compra una escoba para limpiar tu cuarto desordenado."
leo = {} #22 julio – 22 agosto
leo["signo"]="Leo"
leo["elemento"]="Fuego"
leo["simbolo"]="León"
leo["numero_suerte"]="1, 9 y 10"
leo["color"]="Dorado, naranja y verde"
leo["domicilio"]="Sol"
leo["horoscopo"]="Durante los ultimos meses del año brillarás con luz propia, pero ten cuidado con las envidias, porque las venenosas de tus vecinas no descansaran hasta apagarla. Gozarás de buena salud casi todo el año, pero hacia los meses de otoño deberás prestar atención a posibles lesiones en los gluteos: tanto reggaeton te hara daño."
virgo = {} #23 agosto – 22 septiembre
virgo["signo"]="Virgo"
virgo["elemento"]="Tierra"
virgo["simbolo"]="Virgen"
virgo["numero_suerte"]="10, 15 y 27"
virgo["color"]="Blanco, violeta y naranja"
virgo["domicilio"]="Mercurio"
virgo["horoscopo"]="Gracias a tu esfuerzo y constancia, este año desarrollarás tu musculatura de una forma muy grande. Tendrás que ahorrar para comprarte ropa nueva; aprovecha y comprate algo sexy ahora que estás en forma. Eso sí, te recomendamos hacerte una limpieza dental profunda si no quieres espantar a la gente al sonreir."
libra = {} #23 septiembre – 22 octubre
libra["signo"]="Libra"
libra["elemento"]="Aire"
libra["simbolo"]="Balanza"
libra["numero_suerte"]="2, 8 y 19"
libra["color"]="Rosa, azul y verde"
libra["domicilio"]="Venus"
libra["horoscopo"]="Aunque eras muy feo al nacer, los años te han ido concediendo esa perfecta belleza que te define. Correrás el riesgo de que tu pareja se quede dormida al hacer el amor."
escorpio = {} #23 Octubre – 21 Noviembre
escorpio["signo"]="Escorpio"
escorpio["elemento"]="Agua"
escorpio["simbolo"]="Escorpión"
escorpio["numero_suerte"]="4, 13 y 21"
escorpio["color"]="Rojo, verde y negro"
escorpio["domicilio"]="Plutón y Marte"
escorpio["horoscopo"]="Piensa antes de actuar porque tu obsesión por ser la pareja de tu crush hara que se te vaya el amor de tu vida. Tus compañeros de trabajo pegarán chicles en tu silla para hacerte bajar de las nubes. Esté año no conseguirás caer bien a nadie, y tendrás suerte si consigues que alguien se ría de tus chistes malos."
sagitario = {} #22 noviembre – 21 diciembre
sagitario["signo"]="Sagitario"
sagitario["elemento"]="Fuego"
sagitario["simbolo"]="Centauro"
sagitario["numero_suerte"]="9, 14 y 23"
sagitario["color"]="Blanco, azul y verde"
sagitario["domicilio"]="Júpiter"
sagitario["horoscopo"]="Los astros te acompañan este año y tendrás un año de lo más divertido. Cuidado con el alcohol porque podrias ir al hospital. Podrás calmar tus ansias de alcohol si te apuntas a clases de salsa o de bachata, donde podrás dar rienda suelta a tu sensualidad de una forma más sutil."
capricornio = {} #22 diciembre – 21 enero
capricornio["signo"]="Capricornio"
capricornio["elemento"]="Tierra"
capricornio["simbolo"]="Cabra"
capricornio["numero_suerte"]="3, 16 y 25"
capricornio["color"]="Negro, azul y marrón"
capricornio["domicilio"]="Saturno"
capricornio["horoscopo"]="Dado tu materialismo y ambición, tendrías mucho éxito como banquero, pero este año te presentarás especialmente generoso. Serás muy comprensivo con los vagabundos y hasta se te pasará por la cabeza alojarlos en tu casa."
acuario = {} #22 enero – 18 febrero
acuario["signo"]="Acuario"
acuario["elemento"]="Aire"
acuario["simbolo"]="El Aguador"
acuario["numero_suerte"]="7, 14 y 20"
acuario["color"]="Gris, azul y verde"
acuario["domicilio"]="Urano"
acuario["horoscopo"]="Un dolor del corazón anidará en lo más íntimo de tu ser y te acompañará durante unos meses. La luna estará muy cerca de Tauro, lo que favorece los nuevos proyectos profesionales y la entrada de dinero en tu hogar. No decaigas, si eres capaz de controlar el estrés laboral, el corazón roto curara con el tiempo"
piscis = {} #19 febrero – 20 marzo
piscis["signo"]="Piscis"
piscis["elemento"]="Agua"
piscis["simbolo"]="Pez"
piscis["numero_suerte"]="14, 25, 31, 39 y 58"
piscis["color"]="Verde, azul y morado"
piscis["domicilio"]="Neptuno y Júpiter"
piscis["horoscopo"]="La alineación de los astros potenciará tu iniciativa en el amor, lo cual te vendrá muy bien para abandonar las prácticas de cocina. Juguetes, disfraces y cremas de todo tipo habitarán tu mesita de noche. Pero no te confíes porque Marte entrará en tu signo hacia otoño, y tendrás problemas con los vecinos, que lo saben todo y son muy envidiosos."
if (mes == "marzo" and dia >= 21) or (mes == "abril" and dia <= 20):
result = json.dumps(aries)
return render.index(result)
elif (mes == "abril" and dia >= 21) or (mes == "mayo" and dia <= 20):
result = json.dumps(tauro)
return render.index(result)
elif (mes == "mayo" and dia >= 21) or (mes == "junio" and dia <= 20):
result = json.dumps(geminis)
return render.index(result)
elif (mes == "junio" and dia >= 21) or (mes == "julio" and dia <= 21):
result = json.dumps(cancer)
return render.index(result)
elif (mes == "julio" and dia >= 22) or (mes == "agosto" and dia <= 22):
result = json.dumps(leo)
return render.index(result)
elif (mes == "agosto" and dia >= 23) or (mes == "septiembre" and dia <= 22):
result = json.dumps(virgo)
return render.index(result)
elif (mes == "septiembre" and dia >= 23) or (mes == "octubre" and dia <= 22):
result = json.dumps(libra)
return render.index(result)
elif (mes == "octubre" and dia >= 23) or (mes == "noviembre" and dia <= 21):
result = json.dumps(escorpio)
return render.index(result)
elif (mes == "noviembre" and dia >= 22) or (mes == "diciembre" and dia <= 21):
result = json.dumps(sagitario)
return render.index(result)
elif (mes == "diciembre" and dia >= 22) or (mes == "enero" and dia <= 21):
result = json.dumps(capricornio)
return render.index(result)
elif (mes == "enero" and dia >= 22) or (mes == "febrero" and dia <= 18):
result = json.dumps(acuario)
return render.index(result)
elif (mes == "febrero" and dia >= 19) or (mes == "marzo" and dia <= 20):
result = json.dumps(piscis)
return render.index(result)
else:
datos="Los datos ingresados son incorrectos"
result = json.dumps(datos)
return render.index(result) |
18,174 | 89a57a59b198af6489e169a0b5d11f5082340234 | import os
import numpy as np
import pandas as pd
import cv2
path_csv_train = '/home/m433788/Thesis/data_asli/CSV/train.csv'
path_csv_val = '/home/m433788/Thesis/data_asli/CSV/val.csv'
path_csv_test = '/home/m433788/Thesis/data_asli/CSV/test.csv'
path_4_sub = '/home/m433788/Thesis/data_asli/4_subregion'
path_6_sub = '/home/m433788/Thesis/data_asli/6_subregion'
path_9_sub = '/home/m433788/Thesis/data_asli/9_subregion'
path_4_sub_overlapping = '/home/m433788/Thesis/data_asli/4_subregion_overlapping'
path_6_sub_overlapping = '/home/m433788/Thesis/data_asli/6_subregion_overlapping'
path_9_sub_overlapping = '/home/m433788/Thesis/data_asli/9_subregion_overlapping'
def list_data_4_sub(path_csv, save_path_1='', save_path_2='', save_path_3='', save_path_4=''):
num_region = 4
sub_1, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9 = [
], [], [], [], [], [], [], [], []
df = pd.read_csv(path_csv, header=0)
for i in range(len(df)):
id_1, _, _ = df['image_1'][i].split('@')
id_1_ = os.path.splitext(df['image_1'][i])[0]
id_2, _, _ = df['image_2'][i].split('@')
id_2_ = os.path.splitext(df['image_2'][i])[0]
for t in range(num_region):
pair_data = {}
pair_data['image_1'] = os.path.join(
id_1, id_1_, '0' + str(t + 1) + '.jpg')
pair_data['image_2'] = os.path.join(
id_2, id_2_, '0' + str(t + 1) + '.jpg')
pair_data['label'] = df['label'].iloc[i]
if t == 0:
sub_1.append(pair_data)
elif t == 1:
sub_2.append(pair_data)
elif t == 2:
sub_3.append(pair_data)
elif t == 3:
sub_4.append(pair_data)
elif t == 4:
sub_5.append(pair_data)
elif t == 5:
sub_6.append(pair_data)
elif t == 6:
sub_7.append(pair_data)
elif t == 7:
sub_8.append(pair_data)
else:
sub_9.append(pair_data)
df_1 = pd.DataFrame(sub_1)
df_1.to_csv(save_path_1, index=False)
df_2 = pd.DataFrame(sub_2)
df_2.to_csv(save_path_2, index=False)
df_3 = pd.DataFrame(sub_3)
df_3.to_csv(save_path_3, index=False)
df_4 = pd.DataFrame(sub_4)
df_4.to_csv(save_path_4, index=False)
def list_data_6_sub(path_csv, save_path_1='', save_path_2='', save_path_3='', save_path_4='', save_path_5='', save_path_6=''):
num_region = 6
sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 = [], [], [], [], [], []
df = pd.read_csv(path_csv, header=0)
for i in range(len(df)):
id_1, _, _ = df['image_1'][i].split('@')
id_1_ = os.path.splitext(df['image_1'][i])[0]
id_2, _, _ = df['image_2'][i].split('@')
id_2_ = os.path.splitext(df['image_2'][i])[0]
for t in range(num_region):
pair_data = {}
pair_data['image_1'] = os.path.join(
id_1, id_1_, '0' + str(t + 1) + '.jpg')
pair_data['image_2'] = os.path.join(
id_2, id_2_, '0' + str(t + 1) + '.jpg')
pair_data['label'] = df['label'].iloc[i]
if t == 0:
sub_1.append(pair_data)
elif t == 1:
sub_2.append(pair_data)
elif t == 2:
sub_3.append(pair_data)
elif t == 3:
sub_4.append(pair_data)
elif t == 4:
sub_5.append(pair_data)
else:
sub_6.append(pair_data)
df_1 = pd.DataFrame(sub_1)
df_1.to_csv(save_path_1, index=False)
df_2 = pd.DataFrame(sub_2)
df_2.to_csv(save_path_2, index=False)
df_3 = pd.DataFrame(sub_3)
df_3.to_csv(save_path_3, index=False)
df_4 = pd.DataFrame(sub_4)
df_4.to_csv(save_path_4, index=False)
df_5 = pd.DataFrame(sub_5)
df_5.to_csv(save_path_5, index=False)
df_6 = pd.DataFrame(sub_6)
df_6.to_csv(save_path_6, index=False)
def list_data_9_sub(path_csv, save_path_1='', save_path_2='', save_path_3='', save_path_4='', save_path_5='', save_path_6='', save_path_7='', save_path_8='', save_path_9=''):
num_region = 9
sub_1, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9 = [
], [], [], [], [], [], [], [], []
df = pd.read_csv(path_csv, header=0)
for i in range(len(df)):
id_1, _, _ = df['image_1'][i].split('@')
id_1_ = os.path.splitext(df['image_1'][i])[0]
id_2, _, _ = df['image_2'][i].split('@')
id_2_ = os.path.splitext(df['image_2'][i])[0]
for t in range(num_region):
pair_data = {}
pair_data['image_1'] = os.path.join(
id_1, id_1_, '0' + str(t + 1) + '.jpg')
pair_data['image_2'] = os.path.join(
id_2, id_2_, '0' + str(t + 1) + '.jpg')
pair_data['label'] = df['label'].iloc[i]
if t == 0:
sub_1.append(pair_data)
elif t == 1:
sub_2.append(pair_data)
elif t == 2:
sub_3.append(pair_data)
elif t == 3:
sub_4.append(pair_data)
elif t == 4:
sub_5.append(pair_data)
elif t == 5:
sub_6.append(pair_data)
elif t == 6:
sub_7.append(pair_data)
elif t == 7:
sub_8.append(pair_data)
else:
sub_9.append(pair_data)
df_1 = pd.DataFrame(sub_1)
df_1.to_csv(save_path_1, index=False)
df_2 = pd.DataFrame(sub_2)
df_2.to_csv(save_path_2, index=False)
df_3 = pd.DataFrame(sub_3)
df_3.to_csv(save_path_3, index=False)
df_4 = pd.DataFrame(sub_4)
df_4.to_csv(save_path_4, index=False)
df_5 = pd.DataFrame(sub_5)
df_5.to_csv(save_path_5, index=False)
df_6 = pd.DataFrame(sub_6)
df_6.to_csv(save_path_6, index=False)
df_7 = pd.DataFrame(sub_7)
df_7.to_csv(save_path_7, index=False)
df_8 = pd.DataFrame(sub_8)
df_8.to_csv(save_path_8, index=False)
df_9 = pd.DataFrame(sub_9)
df_9.to_csv(save_path_9, index=False)
sub_4SR_test = list_data_4_sub(path_csv_test, '/home/m433788/Thesis/data_asli/CSV/4SR_test_1.csv',
'/home/m433788/Thesis/data_asli/CSV/4SR_test_2.csv', '/home/m433788/Thesis/data_asli/CSV/4SR_test_3.csv',
'/home/m433788/Thesis/data_asli/CSV/4SR_test_4.csv')
sub_6SR_test = list_data_6_sub(path_csv_test, '/home/m433788/Thesis/data_asli/CSV/6SR_test_1.csv',
'/home/m433788/Thesis/data_asli/CSV/6SR_test_2.csv', '/home/m433788/Thesis/data_asli/CSV/6SR_test_3.csv',
'/home/m433788/Thesis/data_asli/CSV/6SR_test_4.csv', '/home/m433788/Thesis/data_asli/CSV/6SR_test_5.csv', '/home/m433788/Thesis/data_asli/CSV/6SR_test_6.csv')
sub_9SR_test = list_data_9_sub(path_csv_test, '/home/m433788/Thesis/data_asli/CSV/9SR_test_1.csv',
'/home/m433788/Thesis/data_asli/CSV/9SR_test_2.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_3.csv',
'/home/m433788/Thesis/data_asli/CSV/9SR_test_4.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_5.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_6.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_7.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_8.csv', '/home/m433788/Thesis/data_asli/CSV/9SR_test_9.csv')
|
18,175 | cc3b843fc99f5d31c3a1883c933f601d57099bb2 | from os import listdir
import os
import numpy as np
# import cv2
from scipy import ndimage
from multiprocessing import Pool
from keras.utils import to_categorical
from lib import util
PIXEL_SIZE = 1
classify = False
bin_class = False
small_obj_heights = False
def loadLocations(input_arr):
locName = input_arr
return {locName:Location.load(locName, 'all')}
class RawData(object):
def __init__(self, locs):
self.locs = locs
@staticmethod
def load(locNames='all', special_layers='all', new_data=None):
print("in rawdata load")
if locNames == 'all':
print('1')
locNames = listdir_nohidden('data/')
print('1')
if locNames == 'untrain':
print('2')
locNames = listdir_nohidden('data/_untrained/')
print('2')
if special_layers == 'all':
print('3')
if new_data is None:
#training
print("four locations")
locs = {}
cores = 4
chunksize = 1
with Pool(processes=cores) as pool:
location_list_return = pool.map(loadLocations, locNames, chunksize)
for i in location_list_return:
locs[list(i.keys())[0]] = i[list(i.keys())[0]]
#endtraining
else:
print('one testing location')
locs = {n:Location.load(n, 'all') for n in locNames}
print('3')
else:
# assumes dates is a dict, with keys being locNames and vals being special_layers
print('4')
locs = {n:Location.load(n, special_layers[n]) for n in locNames}
print('4')
return RawData(locs)
def getClassificationOutput(self, locName, location):
loc = self.locs[locName]
return loc.obj_height_classification[location]
def getOutput(self, locName, location):
loc = self.locs[locName]
return loc.layer_obj_heights[location]
def getSpecialLayer(self, locName, special_layer):
loc = self.locs[locName]
# layer = loc.specialLayers[special_layer]
return layer.layer_obj_heights
def __repr__(self):
return "Dataset({})".format(list(self.locs.values()))
class Location(object):
def __init__(self, name, specialLayers, obj_heights=None, layers=None):
self.name = name
self.specialLayers = specialLayers
self.layer_obj_heights = obj_heights if obj_heights is not None else self.loadLayerObjHeights()
self.layers = layers if layers is not None else self.loadLayers()
# what is the height and width of a layer of data
self.layerSize = list(self.layers.values())[0].shape[:2]
if classify:
if bin_class:
self.obj_height_classification = to_categorical(self.layer_obj_heights, 2)
else:
self.obj_height_classification = to_categorical(self.layer_obj_heights, 4)
def loadLayers(self):
cwd = os.getcwd()
untrainged_locNames = listdir_nohidden('data/_untrained/')
if self.name in untrainged_locNames:
directory = cwd + '/data/_untrained/{}/'.format(self.name)
else:
directory = cwd + '/data/{}/'.format(self.name)
dem = np.loadtxt(directory + 'dem.txt', delimiter=',') #util.openImg(folder+'dem.tif')
slope = np.loadtxt(directory + 'slope.txt', delimiter=',')#util.openImg(folder+'slope.tif')
band_1 = np.loadtxt(directory + 'band_1.txt', delimiter=',')#util.openImg(folder+'band_1.tif')
band_2 = np.loadtxt(directory + 'band_2.txt', delimiter=',')#util.openImg(folder+'band_2.tif')
band_3 = np.loadtxt(directory + 'band_3.txt', delimiter=',')#util.openImg(folder+'band_3.tif')
band_4 = np.loadtxt(directory + 'band_4.txt', delimiter=',')#util.openImg(folder+'band_4.tif')
ndvi = np.loadtxt(directory + 'ndvi.txt', delimiter=',')#util.openImg(folder+'ndvi.tif')
aspect = np.loadtxt(directory + 'aspect.txt', delimiter=',')#util.openImg(folder+'aspect.tif')
footprints = self.loadVeg(self.name)
f_32 = [dem, slope, ndvi, aspect]
# above_zero = [dem, slope]
u_8 = [band_1, band_2, band_3, band_4]
for l in f_32:
l = l.astype('float32')
for b in u_8:
b = b.astype('uint8')
b[b<0] = 0
b[b>255] = 255
grvi = np.divide(band_4, band_2, out=np.zeros_like(band_4), where=band_2!=0)
layers = {'dem':dem,
'slope':slope,
'ndvi':ndvi,
'aspect':aspect,
'band_4':band_4,
'band_3':band_3,
'band_2':band_2,
'band_1':band_1,
'footprints': footprints,
'grvi': grvi}
for name, layer in layers.items():
pass
return layers
# @staticmethod
def loadLayerObjHeights(self):
cwd = os.getcwd()
untrained_locNames = listdir_nohidden('data/_untrained/')
if self.name in untrained_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/obj_height.txt'.format(self.name)
else:
fname = cwd + '/data/{}/special_layers/obj_height.txt'.format(self.name)
obj_heights = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_UNCHANGED)
obj_heights = obj_heights.astype('float32')
if classify:
print('classify')
if bin_class:
obj_heights[obj_heights < 10] = 0
obj_heights[obj_heights >= 10] = 1
else:
obj_heights[obj_heights < 5] = 0
obj_heights[(obj_heights >= 5) & (obj_heights < 20)] = 1
obj_heights[(obj_heights >= 20) & (obj_heights < 50)] = 2
obj_heights[obj_heights >= 50] = 3
if small_obj_heights:
obj_heights[obj_heights<0] = 0
obj_heights[obj_heights>150] = 150
obj_heights = np.divide(obj_heights, 150, out=np.zeros_like(obj_heights))
return obj_heights
def loadVeg2(self):
cwd = os.getcwd()
untrained_locNames = listdir_nohidden('data/_untrained/')
if self.name in untrained_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/footprints.txt'.format(self.name)
else:
fname = cwd + '/data/{}/special_layers/footprints.txt'.format(self.name)
veg = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_COLOR)
veg = veg.astype('uint8')
if veg is None:
raise RuntimeError('Could not find veg for location {} for the layer'.format(locName))
veg = np.zeros_like(veg)
return veg
@staticmethod
def loadVeg(locName, specialLayers='veg'):
cwd = os.getcwd()
untrainged_locNames = listdir_nohidden('data/_untrained/')
if locName in untrainged_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/footprints.txt'.format(locName)
else:
fname = cwd + '/data/{}/special_layers/footprints.txt'.format(locName)
veg = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_COLOR)
veg = veg.astype('uint8')
if veg is None:
raise RuntimeError('Could not find veg for location {} for the layer'.format(locName))
veg[veg!=0] = 255
return veg
@staticmethod
def load(locName, specialLayers='all'):
if specialLayers == 'all':
special_layers = SpecialLayer.getVegLayer(locName)
specialLayers = {layer_name:SpecialLayer(locName, layer_name) for layer_name in special_layers}
return Location(locName, specialLayers)
def __repr__(self):
return "Location({}, {})".format(self.name, [d.layer_name for d in self.specialLayers.values()])
class SpecialLayer(object):
def __init__(self, locName, layer_name, allVeg=None, footprints=None, obj_heights=None):
self.locName = locName
self.layer_name = layer_name
self.allVeg = allVeg if allVeg is not None else self.loadAllVeg()
self.footprints = footprints if footprints is not None else self.loadFootprints()
self.obj_heights = obj_heights if obj_heights is not None else self.loadObjHeights()
def loadAllVeg(self):
cwd = os.getcwd()
untrainged_locNames = listdir_nohidden('data/_untrained/')
if self.locName in untrainged_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/footprints.txt'.format(self.locName)
else:
fname = cwd + '/data/{}/special_layers/footprints.txt'.format(self.locName)
veg = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_UNCHANGED)
veg = veg.astype('uint8')
if veg is None:
raise RuntimeError('Could not find veg for location {} for the layer {}'.format(self.locName, self.layer_name))
veg[veg!=0] = 0
return veg
def loadFootprints(self):
cwd = os.getcwd()
untrainged_locNames = listdir_nohidden('data/_untrained/')
if self.locName in untrainged_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/footprints.txt'.format(self.locName)
else:
fname = cwd + '/data/{}/special_layers/footprints.txt'.format(self.locName)
not_veg = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_UNCHANGED)
not_veg = not_veg.astype('uint8')
if not_veg is None:
raise RuntimeError('Could not open a footprint for the location {}'.format(self.locName))
return not_veg
def loadObjHeights(self):
cwd = os.getcwd()
untrainged_locNames = listdir_nohidden('data/_untrained/')
if self.locName in untrainged_locNames:
fname = cwd + '/data/_untrained/{}/special_layers/obj_height.txt'.format(self.locName)
else:
fname = cwd + '/data/{}/special_layers/obj_height.txt'.format(self.locName)
obj_heights = np.loadtxt(fname, delimiter=',')#cv2.imread(fname, cv2.IMREAD_UNCHANGED)
obj_heights = obj_heights.astype('float32')
if classify:
print('classify')
if bin_class:
obj_heights[obj_heights < 10] = 0
obj_heights[obj_heights >= 10] = 1
else:
obj_heights[obj_heights < 5] = 0
obj_heights[(obj_heights >= 5) & (obj_heights < 20)] = 1
obj_heights[(obj_heights >= 20) & (obj_heights < 50)] = 2
obj_heights[obj_heights >= 50] = 3
if small_obj_heights:
obj_heights[obj_heights<0] = 0
obj_heights[obj_heights>150] = 150
obj_heights = np.divide(obj_heights, 150, out=np.zeros_like(obj_heights))
return obj_heights
def __repr__(self):
return "specialLayer({},{})".format(self.locName, self.layer_name)
@staticmethod
def getVegLayer(locName):
vegLayers = ['footprints']
return vegLayers
def listdir_nohidden(path):
'''List all the files in a path that are not hidden (begin with a .)'''
result = []
for f in listdir(path):
if not f.startswith('.') and not f.startswith("_"):
result.append(f)
return result
if __name__ == '__main__':
raw = RawData.load()
|
18,176 | b87d6215c221df508afb41039e78012913bc3f00 | #!/usr/bin/python
__author__ = ['Andrew Wollacott (amw215@u.washington.edu)']
__version__ = "Revision 0.1"
import string, sys, os, commands
from optparse import OptionParser
from file_routines import *
from Enzyme import *
def main():
"""
reports the ligand score (Eatr + Erep + EhbSC)
"""
parser = OptionParser()
parser.add_option("-p", dest="pdbfile", help="pdbfile")
parser.add_option("-P", dest="pdblist", help="pdblist")
parser.set_description(main.__doc__)
(options, args) = parser.parse_args()
pdbfiles = []
if options.pdblist:
pdbfiles = files_from_list(options.pdblist)
elif options.pdbfile:
pdbfiles.append(options.pdbfile)
else:
parser.print_help()
sys.exit()
protein = Enzyme()
for file in pdbfiles:
protein.readPDB(file)
lig = protein.ligand
if lig == None:
print "no ligand found for file:",file
sys.exit()
tot = lig.Erep + lig.Eatr + lig.EhbSC
print file,lig.Erep,lig.Eatr,lig.EhbSC,tot
protein.clear()
if __name__ == "__main__":
main()
|
18,177 | de19f16de3f8e167ddb574037dcf2cfc98a08e1e | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.utils.image_util import *
import random
from PIL import Image
from PIL import ImageDraw
import numpy as np
import xml.etree.ElementTree
import os
import time
import copy
import six
from collections import deque
from roidbs import JsonDataset
import data_utils
class Settings(object):
def __init__(self, args=None):
for arg, value in sorted(six.iteritems(vars(args))):
setattr(self, arg, value)
if 'coco2014' in args.dataset:
self.class_nums = 81
self.train_file_list = 'annotations/instances_train2014.json'
self.train_data_dir = 'train2014'
self.val_file_list = 'annotations/instances_val2014.json'
self.val_data_dir = 'val2014'
elif 'coco2017' in args.dataset:
self.class_nums = 81
self.train_file_list = 'annotations/instances_train2017.json'
self.train_data_dir = 'train2017'
self.val_file_list = 'annotations/instances_val2017.json'
self.val_data_dir = 'val2017'
else:
raise NotImplementedError('Dataset {} not supported'.format(
self.dataset))
self.mean_value = np.array(self.mean_value)[
np.newaxis, np.newaxis, :].astype('float32')
def coco(settings,
mode,
batch_size=None,
total_batch_size=None,
padding_total=False,
shuffle=False):
total_batch_size = total_batch_size if total_batch_size else batch_size
if mode != 'infer':
assert total_batch_size % batch_size == 0
if mode == 'train':
settings.train_file_list = os.path.join(settings.data_dir,
settings.train_file_list)
settings.train_data_dir = os.path.join(settings.data_dir,
settings.train_data_dir)
elif mode == 'test' or mode == 'infer':
settings.val_file_list = os.path.join(settings.data_dir,
settings.val_file_list)
settings.val_data_dir = os.path.join(settings.data_dir,
settings.val_data_dir)
json_dataset = JsonDataset(settings, train=(mode == 'train'))
roidbs = json_dataset.get_roidb()
print("{} on {} with {} roidbs".format(mode, settings.dataset, len(roidbs)))
def roidb_reader(roidb, mode):
im, im_scales = data_utils.get_image_blob(roidb, settings)
im_id = roidb['id']
im_height = np.round(roidb['height'] * im_scales)
im_width = np.round(roidb['width'] * im_scales)
im_info = np.array([im_height, im_width, im_scales], dtype=np.float32)
if mode == 'test' or mode == 'infer':
return im, im_info, im_id
gt_boxes = roidb['gt_boxes'].astype('float32')
gt_classes = roidb['gt_classes'].astype('int32')
is_crowd = roidb['is_crowd'].astype('int32')
return im, gt_boxes, gt_classes, is_crowd, im_info, im_id
def padding_minibatch(batch_data):
if len(batch_data) == 1:
return batch_data
max_shape = np.array([data[0].shape for data in batch_data]).max(axis=0)
padding_batch = []
for data in batch_data:
im_c, im_h, im_w = data[0].shape[:]
padding_im = np.zeros(
(im_c, max_shape[1], max_shape[2]), dtype=np.float32)
padding_im[:, :im_h, :im_w] = data[0]
padding_batch.append((padding_im, ) + data[1:])
return padding_batch
def reader():
if mode == "train":
roidb_perm = deque(np.random.permutation(roidbs))
roidb_cur = 0
batch_out = []
while True:
roidb = roidb_perm[0]
roidb_cur += 1
roidb_perm.rotate(-1)
if roidb_cur >= len(roidbs):
roidb_perm = deque(np.random.permutation(roidbs))
im, gt_boxes, gt_classes, is_crowd, im_info, im_id = roidb_reader(
roidb, mode)
if gt_boxes.shape[0] == 0:
continue
batch_out.append(
(im, gt_boxes, gt_classes, is_crowd, im_info, im_id))
if not padding_total:
if len(batch_out) == batch_size:
yield padding_minibatch(batch_out)
batch_out = []
else:
if len(batch_out) == total_batch_size:
batch_out = padding_minibatch(batch_out)
for i in range(total_batch_size / batch_size):
sub_batch_out = []
for j in range(batch_size):
sub_batch_out.append(batch_out[i * batch_size +
j])
yield sub_batch_out
sub_batch_out = []
batch_out = []
elif mode == "test":
batch_out = []
for roidb in roidbs:
im, im_info, im_id = roidb_reader(roidb, mode)
batch_out.append((im, im_info, im_id))
if len(batch_out) == batch_size:
yield batch_out
batch_out = []
if len(batch_out) != 0:
yield batch_out
else:
for roidb in roidbs:
if settings.image_name not in roidb['image']:
continue
im, im_info, im_id = roidb_reader(roidb, mode)
batch_out = [(im, im_info, im_id)]
yield batch_out
return reader
def train(settings,
batch_size,
total_batch_size=None,
padding_total=False,
shuffle=True):
return coco(
settings,
'train',
batch_size,
total_batch_size,
padding_total,
shuffle=shuffle)
def test(settings, batch_size, total_batch_size=None, padding_total=False):
return coco(settings, 'test', batch_size, total_batch_size, shuffle=False)
def infer(settings):
return coco(settings, 'infer')
|
18,178 | 6165c8982ce9a143500862959990f50c2fa0c398 | from math import ceil
budget = float(input())
students = int(input())
price_flour_package = float(input())
price_single_egg = float(input())
price_single_apron = float(input())
free_flour_packages = students // 5
price = price_single_apron * ceil(students * 1.2) + price_single_egg * 10 * students + price_flour_package * (students - free_flour_packages)
if price <= budget:
print(f"Items purchased for {price:.2f}$.")
else:
print(f"{price-budget:.2f}$ more needed.")
|
18,179 | 694fdf628bcc105bc1f22ad439290aae39c02b02 | from sys import stdin
read = lambda: map(int, stdin.readline().split())
n, m = read()
axis = []
for i in range(n):
l, r = read()
axis.append((l, -1, i))
axis.append((r, 1, -i))
for i, d in enumerate(read()):
axis.append((d, 0, i))
axis.sort()
hits = [0] * m
depth = 0
for x, dec, i in axis:
depth -= dec
if not dec:
hits[i] = depth
print(*hits) |
18,180 | 8980aec43f3458b7cf768e8d5acb3afa6dc1aa13 | #!/usr/bin/env python
from setuptools import setup, find_packages
requires = ['pymongo',
'flask',
'flask_mongoengine',
'mongoengine',
'flask-script',
'yota>=0.3']
setup(name='antelope',
version='0.1',
description='A simple finance management application',
author='Isaac Cook',
author_email='isaac@simpload.com',
install_requires=requires,
url='http://www.python.org/sigs/distutils-sig/',
dependency_links=["https://github.com/icook/yota/tarball/0.3#egg=yota-0.3"],
packages=find_packages('src'),
package_dir={'': 'src'}
)
|
18,181 | 90b8a84817d1db0865aed85a8792b7d745b57886 | from odoo import models, fields, api
class DemoSequence(models.Model):
_name = 'demo.sequence'
_description = 'Demo Sequence'
name = fields.Char(string='Name', required=True)
@api.model
def create(self, vals):
seq = self.env['ir.sequence'].next_by_code('demo.sequence') or '/'
vals['name'] = '{}_{}'.format(seq, vals['name'])
new_record = super().create(vals)
return new_record
|
18,182 | ad667bf825d3adcccaf518630ea454a8e694007e | import os
## Parameters used in script_test.py ##
mcmc=False# if False, plot with linear fit. If True, plots with mcmc
########## Definition of the output file ##########
output_file_mcmc='./results_fit_sed_mcmc'
output_file_linear='./result_fit_sed_mat'
output_file_interpolation='./results_interpolation'
if mcmc==True:
output=output_file_mcmc
else:
output=output_file_linear
if os.path.exists(output):
print('the output directory, '+output + ' exists already')
else:
os.mkdir(output)
########## Definition of the object parameters ##########
z =0
distance_modulus=33.51
explosion_date=0 #in principle, this has to be in jd. For the test data it is in jd-explosion dates
EBV=0.035
csm=False
data_file='./data_files/data_13dqy_formatted_for_package.txt' #must have at least the following fiels, and a header with the fields written as such: jd,flux,fluxerr,filter.
# filter can be one of the following: ('UVW1','UVW2','UVM2','u_swift','v_swift','b_swift','g_sdss','r_sdss','i_sdss','z_sdss'
# ,'r_cousin','i_cousin','h_2mass','j_2mass','u_johnson','b_johnson','v_johnson')
dates_file='./data_files/13dqy_int_dates.txt'
lower_limit_on_flux=1e-40
filters_directory='../PhotoFit/Filters' #put the path to the Filters directory here
# Interpolation step
already_run_interp_errors=dict() #don't touch this line
already_run_interp_errors['UVW1']=True
already_run_interp_errors['UVW2']=True
already_run_interp_errors['UVM2']=True
already_run_interp_errors['u_swift']=False
already_run_interp_errors['b_swift']=False
already_run_interp_errors['v_swift']=False
already_run_interp_errors['r_sdss']=True
already_run_interp_errors['g_sdss']=True
already_run_interp_errors['i_sdss']=True
already_run_interp_errors['r_p48']=True
already_run_interp_errors['g_p48']=True
already_run_interp_errors['z_sdss']=True
already_run_interp_errors['u_johnson']=True
already_run_interp_errors['v_johnson']=True
already_run_interp_errors['b_johnson']=True
already_run_interp_errors['i_cousin']=True
already_run_interp_errors['r_cousin']=True
already_run_interp_errors['j_2mass']=True
already_run_interp_errors['h_2mass']=True
# In case you fit with a linear-fitting algorythm
already_run_matrix=True
num_iterations=100
# In case you fit with mcmc
already_run_mcmc=False
nwalkers=80
num_steps=350
# In both case: either None or a list of Boolean of size the number of epochs, where True is for epochs already ran and False is for epoch left to run
already_run_fit=None
excluded_bands=[]
#Setting the priors on T and R:
priors=True
lowrad=[0.5e14,0.5e14,0.5e14,0.5e14,0.5e14,0.5e14,1e14,1e14,1e14,1e14,1e14,1e14,1e14,1e14,1e14,8e14,8e14,8e14,8e14,8e14,8e14,8e14,8e14,8e14,8e14]
hirad=[5e14,5e14,5e14,5e14,5e14,5e14,1e15,1e15,1e15,1e15,1e15,1e15,1e15,1e15,1e15,2e15,2e15,2e15,2e15,2e15,2e15,2e15,2e15,2e15,2e15]
lowtemp=[1e4,1e4,1e4,1e4,1e4,1e4,1e4,1e4,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,5e3,4e3,4e3,4e3,4e3]
hitemp=[4e4,4e4,3e4,3e4,3e4,3e4,3e4,3e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1.8e4,1e4,1e4,1e4,1e4,1e4,1e4,1e4]
# In case you want to compare your R and T results with existing results from a file
data_compare='./data_files/Yaron_2017_results.txt'#file with column#1: time from explosion, column#2: temperature (K), column#3:radius (cm)
|
18,183 | 3e2bdc1012e6bb035692785a583d91dfc9945b62 | import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('data_PC_PC_STPC_IPC.xlsx')
worksheet = workbook.add_worksheet()
# Some data we want to write to the worksheet.
row = 0
col = 0
with open('PC_PC_STPC_IPC.dat', 'r') as f:
data = f.readlines()
#print data
for line in data:
words = line.split()
worksheet.write(row, col, words[0])
worksheet.write(row, col + 1, words[1])
row += 1
print "Complete"
workbook.close()
workbook = xlsxwriter.Workbook('data_PC_PC_STPC_IPC_2.xlsx')
worksheet = workbook.add_worksheet()
row = 0
col = 0
with open('PC_PC_STPC_IPC_2.dat', 'r') as f:
data = f.readlines()
#print data
for line in data:
words = line.split()
worksheet.write(row, col, words[0])
worksheet.write(row, col + 1, words[1])
row += 1
print "Complete"
workbook.close()
workbook = xlsxwriter.Workbook('data_PC_PC_STPC_IPC_3.xlsx')
worksheet = workbook.add_worksheet()
row = 0
col = 0
with open('PC_PC_STPC_IPC_3.dat', 'r') as f:
data = f.readlines()
#print data
for line in data:
words = line.split()
worksheet.write(row, col, words[0])
worksheet.write(row, col + 1, words[1])
row += 1
print "Complete"
workbook.close() |
18,184 | 5b3acb0e1a398d9cc14a4d4a2d5e751c7e08288d | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 17:27:13 2019
Code to read table from SQL Server from a single sampler, downsample to 1 hr,
and put into a dataframe
Ver 3 - includes loop to do all 3 sources and save in separate worksheets
Reads file with list of sampler locations and loops through them
@author: Brian
"""
import pandas as pd
import pyodbc
import time
# file with list of sampler locations in Excel format
sampler_file = 'List_of_samplers_5.xls'
df_samplers = pd.read_excel(sampler_file)
samp_num = len(df_samplers)
x_long = df_samplers.Longitude.values
y_lat = df_samplers.Latitude.values
print('\nNumber of samplers = ' + str(samp_num))
## loop over list of sampler locations and save into different files
for j in range(samp_num):
x = x_long[j]
y = y_lat[j]
print('\nSampler ' + str(j+1), str(x) + ', ' + str(y) + ': ')
# ******************* read sampler location from 3 sources ******************
sampler_coord = str(x) + '_' + str(y)
time_stamp = str(int(time.time()))[-6:] # add unique stamp to file name
writer = pd.ExcelWriter('s_'+ sampler_coord + '_' + time_stamp+'.xls')
for i in range(3):
source_num = i+1
source = 'Source' + str(source_num)
print('Loading ' + source + '...')
# prepare sql instruction
sql1 = "SELECT * "\
"FROM [CsvImport] "\
"where [x] = '" + str(x) + "' and [y] = '" + str(y) + "' and [folderName] = '" + source + "'"\
" ORDER BY [time] ASC;"
# make DB connection
cnxn = pyodbc.connect('Driver={SQL Server};'
'Server=hvdev-sql-01;'
'Database=brian_csv_import;'
'UID=sa;'
'PWD=Riley++;')
# read into dataframe
sql = sql1
dataf = pd.read_sql(sql, cnxn)
# close connection
cnxn.close()
# select time column for index
df1 = dataf.time.to_frame()
df2 = dataf.time.to_frame()
df1 = df1.iloc[0:len(dataf),:]
df2 = df2.iloc[0:len(dataf),:]
df1['conc']=dataf.ConcTracer_kbm3
df2['dep']=dataf.DepTracer_kbm2
df1.set_index('time', inplace=True)
df2.set_index('time', inplace=True)
# downsample
df1_ds = df1.resample('1H', label='right', closed='right').mean()
df2_ds = df2.resample('1H', label='right', closed='right').mean()
# add extra columns
df1_ds['dep'] = df2_ds.dep
df1_ds['Latitude'] = y
df1_ds['Longitude'] = x
df1_ds['Source'] = source_num
df_ds = df1_ds[['Source', 'Latitude', 'Longitude', 'conc', 'dep']]
# save random list
df_ds.to_excel(writer, sheet_name = source)
writer.save() |
18,185 | 63e17a2d5144de23a7d381cac5747390a3b2a775 | import json
package = open('../package.json').read()
package = json.loads(package)
keysToBind = ['space', 'left', 'right', 'up', 'down', 'esc', 'bs', 'tab', 'cr']
vimKeyToVS = {'esc': 'Escape', 'bs': 'backspace', 'cr': 'enter'}
keybindings = []
for key in keysToBind:
vsKey = key
if key in vimKeyToVS:
vsKey = vimKeyToVS[key]
for modifier in ['ctrl']:
modKey = '{0}+{1}'.format(modifier, vsKey)
vimKey = '<{0}-{1}>'.format(modifier[0], key)
keybind = {'key': modKey,
'command': 'vim.{0}'.format(vimKey),
'when': 'vim.use_{0}'.format(vimKey),
'vimKey': vimKey}
keybindings.append(keybind)
if len(key) > 1:
key = '<{0}>'.format(key)
keybind = {'key': vsKey, 'command': 'vim.{0}'.format(
key), 'when': 'vim.use_{0}'.format(key), 'vimKey': key}
keybindings.append(keybind)
keysToBind = []
for i in range(ord('!'), ord('~') + 1):
keysToBind.append(chr(i).lower())
keysToBind = list(set(keysToBind))
for key in keysToBind:
vsKey = key
if key in vimKeyToVS:
vsKey = vimKeyToVS[key]
modifier = 'ctrl'
modKey = '{0}+{1}'.format(modifier, vsKey)
vimKey = '<{0}-{1}>'.format(modifier[0], key)
keybind = {'key': modKey,
'command': 'vim.{0}'.format(vimKey),
'when': 'vim.use_{0}'.format(vimKey),
'vimKey': vimKey}
keybindings.append(keybind)
package['contributes']['keybindings'] = keybindings
open('../package.json', 'w').write(json.dumps(package, indent=2, sort_keys=False))
# keybind[]
# // let vimToVSMap: {[key: string]: string[]} = {
# // };
# // let vimToVSMap: {[key: string]: string} = {
# // esc: 'escape',
# // };
# for (let i='!'.charCodeAt(0); i <= '~'.charCodeAt(0); i + +) {
# keysToBind.push(String.fromCharCode(i));}
# for (let key of keysToBind) {
# for (let modifier of['c', 's']) {
# const modKey = `${modifier} -${key}`;
|
18,186 | 4633450b0418649b2427856f413831539e37aacd | """
byceps.services.seating.seat_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Dict, List, Optional, Sequence, Set, Tuple
from ...database import db
from ...typing import PartyID
from ..ticketing.dbmodels.category import Category as DbTicketCategory
from ..ticketing.dbmodels.ticket import Ticket as DbTicket
from ..ticketing.transfer.models import TicketCategory, TicketCategoryID
from .dbmodels.area import Area as DbArea
from .dbmodels.seat import Seat as DbSeat
from .transfer.models import AreaID, SeatID, SeatUtilization
def create_seat(
area: DbArea, coord_x: int, coord_y: int, category_id: TicketCategoryID
) -> DbSeat:
"""Create a seat."""
seat = DbSeat(area, category_id, coord_x=coord_x, coord_y=coord_y)
db.session.add(seat)
db.session.commit()
return seat
def delete_seat(seat_id: SeatID) -> None:
"""Delete a seat."""
db.session.query(DbSeat) \
.filter_by(id=seat_id) \
.delete()
db.session.commit()
def count_occupied_seats_by_category(
party_id: PartyID,
) -> List[Tuple[TicketCategory, int]]:
"""Count occupied seats for the party, grouped by ticket category."""
subquery = db.session \
.query(
DbSeat.id,
DbSeat.category_id
) \
.join(DbTicket) \
.filter_by(revoked=False) \
.subquery()
rows = db.session \
.query(
DbTicketCategory.id,
DbTicketCategory.party_id,
DbTicketCategory.title,
db.func.count(subquery.c.id)
) \
.outerjoin(subquery, db.and_(DbTicketCategory.id == subquery.c.category_id)) \
.filter(DbTicketCategory.party_id == party_id) \
.group_by(DbTicketCategory.id) \
.order_by(DbTicketCategory.id) \
.all()
return [(TicketCategory(row[0], row[1], row[2]), row[3]) for row in rows]
def count_occupied_seats_for_party(party_id: PartyID) -> int:
"""Count occupied seats for the party."""
return DbSeat.query \
.join(DbTicket) \
.join(DbTicketCategory) \
.filter(DbTicket.revoked == False) \
.filter(DbTicketCategory.party_id == party_id) \
.count()
def count_seats_for_party(party_id: PartyID) -> int:
"""Return the number of seats in seating areas for that party."""
return DbSeat.query \
.join(DbArea) \
.filter(DbArea.party_id == party_id) \
.count()
def get_seat_utilization(party_id: PartyID) -> SeatUtilization:
"""Return how many seats of how many in total are occupied."""
occupied_seat_count = count_occupied_seats_for_party(party_id)
total_seat_count = count_seats_for_party(party_id)
return SeatUtilization(occupied_seat_count, total_seat_count)
def get_seat_total_per_area(party_id: PartyID) -> Dict[AreaID, int]:
"""Return the number of seats per area for that party."""
area_ids_and_seat_counts = db.session \
.query(
DbArea.id,
db.func.count(DbSeat.id)
) \
.filter_by(party_id=party_id) \
.outerjoin(DbSeat) \
.group_by(DbArea.id) \
.all()
return dict(area_ids_and_seat_counts)
def find_seat(seat_id: SeatID) -> Optional[DbSeat]:
"""Return the seat with that id, or `None` if not found."""
return DbSeat.query.get(seat_id)
def find_seats(seat_ids: Set[SeatID]) -> Set[DbSeat]:
"""Return the seats with those IDs."""
if not seat_ids:
return set()
seats = DbSeat.query \
.filter(DbSeat.id.in_(frozenset(seat_ids))) \
.all()
return set(seats)
def get_seats_with_tickets_for_area(area_id: AreaID) -> Sequence[DbSeat]:
"""Return the seats and their associated tickets (if available) for
that area.
"""
return DbSeat.query \
.filter_by(area_id=area_id) \
.options(
db.joinedload('occupied_by_ticket'),
) \
.all()
|
18,187 | 91bf0432002e585a7ecbceadb566c7e3c26cc030 | """ Display image sequence using matplotlib."""
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
class ImageSequenceRenderer(object):
""" renders sequence of images provided by ImageSequence """
def __init__(self, image_sequence):
# Fix the size of subplot
self.fig, self.axes = plt.subplots(2, sharey=True, figsize=(10, 5))
# Set background to be gray
self.fig.patch.set_facecolor('#333333')
# Store image sequence in self.seq and display the sequence
self.seq = image_sequence
self.image = self.seq.init_image()
self.image2 = self.seq.init_image_original()
self.image_figure = plt.subplot2grid((3, 3), (0, 0), colspan=3, rowspan=2)
self.image_figure.axis('off')
self.image_plot = self.image_figure.imshow(self.image)
self.image_figure.set_title('Dinic', color='white')
self.init_figure = plt.subplot2grid((3, 3), (2, 1))
self.init_figure.axis('off')
self.init_plot = plt.imshow(self.image2)
self.init_figure.set_title('Flow Graph', color = 'white' )
self.text_figure = plt.subplot2grid((3, 3), (2, 2))
self.text_figure.axis('off')
self.text_figure.set_title('',color = 'white')
plt.subplots_adjust(bottom=0.2)
axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
bnext = Button(axnext, 'Next')
bnext.on_clicked(self.next)
plt.show()
def next(self, event):
if self.seq.complete():
self.image_figure.set_title('Completed!') # TODO better way to show it has completed
plt.draw()
return
self.image, self.image2 = self.seq.next_image()
if self.image is not None:
self.image_plot.set_data(self.image)
self.image_figure.set_title(getattr(self.seq, 'title', "Dinic"))
self.text_figure.set_title(getattr(self.seq, 'aux_text', ""))
self.init_plot.set_data(self.image2)
plt.draw()
class ImageSequence(object):
""" Class interface to give to ImageSequenceRenderer """
def __init__(self):
self.i = 0
self.count = 0
self.pics = map(mpimg.imread, ['bw.png', 'hello.jpg'])
def init_image(self):
return self.pics[self.i]
def init_image_original(self):
return self.pics[self.i]
def next_image(self):
self.i = 1 - self.i
self.count = self.count + 1
return self.pics[self.i]
def complete(self):
return self.count >= 10
def start_gui(imageSequence):
imageSequenceRenderer = ImageSequenceRenderer(imageSequence)
|
18,188 | f4f01c681708d8958108a91fd0dabbd1f94ed163 | from odoo import api, fields, models
class productpreverse(models.Model):
_name = "product.preserve"
_description = "RMA Product Preserve"
name = fields.Char(String="Tên Nghiệp Vụ", required=True)
preserve_lines = fields.One2many('product.preserve.lines', 'preserve_id', string="Product Preserve")
company_id = fields.Many2one('res.company', string="Công Ty")
Nhietdo = fields.Selection([
('1', '10 - 20'),
('2', '20 - 0'),
], string="Nhiệt độ bảo quản", required=True, default='1')
datebq = fields.Date(string='Ngày nhập kho bảo quản', required=True)
datehh = fields.Date(string='Ngày nhập kho hàng hóa', required=True)
thoigianbaoquan = fields.Date(string='Thời gian bảo quản')
datechamsocdinhki = fields.Date(string='Ngày theo dõi hàng hóa định kì')
Kho = fields.Many2one('temporary.warehouse', String="Nơi Bảo Quản")
diachi = fields.Text(related="Kho.diachi", string="Địa chỉ")
dientich = fields.Integer(related="Kho.dientich", string="Diện Tích")
dientich1 = fields.Integer(related="Kho.dientich1", string="Diện Tích")
tinhtranghang = fields.Selection([
('1', 'Dưới 40%'),
('2', 'Trên 40%'),
], string="Tình trạng hàng", required=True, default='1')
class productpreverselines(models.Model):
_name = "product.preserve.lines"
product_id = fields.Many2one('product.template', string="Tên Sản Phẩm")
product_qty = fields.Integer(string="Số Lượng")
list_price = fields.Float(string='Giá Trị Sản Phẩm', related="product_id.list_price")
sum_price = fields.Integer(string="Tổng Giá Trị Sản Phẩm", compute="_compute_sum_price")
preserve_id = fields.Many2one('product.preserve', string="Product Preserve")
def _compute_sum_price(self):
for productpreverselines in self:
sum_price = 0
if productpreverselines.product_id and productpreverselines.product_id.list_price:
sum_price += (productpreverselines.product_id.list_price
* productpreverselines.product_qty)
productpreverselines.sum_price = sum_price
|
18,189 | 3ad6b30d216864608a16c57f3994ff4c0b49c5c0 | # -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
class Graphs:
def __init__(self):
pass
def histogram(self, title, x_label, y_label, X, Y):
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': X, 'y': Y, 'type': 'bar', 'name': 'SF'},
{'x': X, 'y': Y, 'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': title
}
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
def histogram2(self, title, x_label, y_label, X, Y):
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': X, 'y': Y, 'type': 'bar', 'name': 'SF'},
{'x': X, 'y': Y, 'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': title
}
}
)
])
app.run_server(debug=True)
def showData(self, df):
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
app = dash.Dash()
app.layout = html.Div(children=[
html.H4(children='Sample Data'),
generate_table(df)
])
app.run_server(debug=True)
def pieChart(self):
from dash.dependencies import Input, Output, State
app = dash.Dash()
def app_layout():
return (
html.Div([
dcc.Tabs(
tabs=[{'label': 'Pie1', 'value': 1},
{'label': 'Pie2', 'value': 2},
{'label': 'Pie3', 'value': 3},
{'label': 'Pie4', 'value': 4}
],
value=1,
id='tabs'
),
html.Div(id='output-tab')
])
)
app.layout = app_layout()
@app.callback(Output('output-tab', 'children'),
[Input('tabs', 'value')])
def display_content(value):
data = [
{
'values': [[10, 90], [5, 95], [15, 85], [20, 80]][int(value) - 1],
'type': 'pie',
},
]
return html.Div([
dcc.Graph(
id='graph',
figure={
'data': data,
'layout': {
'margin': {
'l': 30,
'r': 0,
'b': 30,
't': 0
},
'legend': {'x': 0, 'y': 1}
}
}
)
])
#if __name__ == '__main__':
app.server.run(debug=True)
def scatterPlot(self, X, y):
app = dash.Dash()
app.scripts.config.serve_locally = True
app.layout = html.Div([
dcc.Tabs(
tabs=[
{'label': 'Tab {}'.format(i), 'value': i} for i in range(1, 5)
],
value=3,
id='tabs'
),
html.Div(id='tab-output')
], style={
'width': '80%',
'fontFamily': 'Sans-Serif',
'margin-left': 'auto',
'margin-right': 'auto'
})
@app.callback(Output('tab-output', 'children'), [Input('tabs', 'value')])
def display_content(value):
data = [
{
'x': X,
'y': y,
'name': 'x',
'marker': {
'color': 'rgb(55, 83, 109)'
},
'type': ['bar', 'scatter', 'box'][int(value) % 3]
},
{
'x': X,
'y': y,
'name': '',
'marker': {
'color': 'rgb(26, 118, 255)'
},
'type': ['bar', 'scatter', 'box'][int(value) % 3]
}
]
return html.Div([
dcc.Graph(
id='graph',
figure={
'data': data,
'layout': {
'margin': {
'l': 30,
'r': 0,
'b': 30,
't': 0
},
'legend': {'x': 0, 'y': 1}
}
}
),
#html.Div(' '.join(get_sentences(10)))
])
app.run_server(debug=True)
#x = [1, 2, 3]
#y= [4, 1, 2]
#obj = Graphs()
#obj.histogram2("sample", "x-axis", "y-axis", x, y)
#x = list(action3(action_type3, filtered_data))[-20:]
#obj = Graphs()
#obj.scatterPlot(list(x))
|
18,190 | 52561f2fb007bef71ac797925a730876eb5990f2 |
"""supersior, check if heartbeat of app stoped"""
import time
import gevent
import redis
#app_server
#restart_strategy
#register
SLEEP_SECONDS = 5
HEARTBEAT_INTERVAL = 1
THRESHHOLD = 10
LAST_ALERT = ""
TOTAL_ALERT_TH = 3
# send email
# stop alert if total alert > threadhold
REDIS_HOST = "localhost"
REDIS_PORT = 6379
REDIS_DB = 0
class Supervisor(object):
"""class"""
def __init__(self):
self.app_list = ["app1", "app2", "app3"]
self.rclient = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, \
db=REDIS_DB)
def add_app(self):
"""add app while running
impl: check etcd and get app list every minute?
"""
pass
def remove_app(self):
"""remove app whild running"""
pass
@classmethod
def notify(cls, app, msg):
"""
send restart msg
"""
#self.rclient.lpush()
print("[%s,%s]send alert to job queue" % (app, msg))
@classmethod
def has_issue(cls, now, last_heartbeat):
"""check if heartbeat stoped"""
delta = now - last_heartbeat
if delta > THRESHHOLD:
return True
else:
return False
def check(self, app):
"""check app heartbeat"""
val = self.rclient.get("aac")
now = time.time()
self.rclient.set("check_hb", now)
print (val)
if self.has_issue(now, 0):
msg = ""
self.notify(app, msg)
def run(self):
"""supervisor, check heartbeart & restart """
#
# forever loop
while True:
for app in self.app_list:
self.check(app)
#print("check")
gevent.sleep(SLEEP_SECONDS)
def main():
"""main"""
supervisor = Supervisor()
supervisor.run()
if __name__ == "__main__":
main() |
18,191 | 8cbef48fc84054ee8cd0c06c2f6c29149dad6cb0 | # -*- coding: utf-8 -*-
import sys
import socket
import settings
# fabric settings
FABRIC = {
'live': {
'HOSTS': ['host.com'],
'WEB_USER': 'www-data',
'ADMIN_USER': 'admin',
'PROJECT_ROOT': '/srv/flt',
}
}
EVERNOTE_SANDBOX = True
# trying to get a clean windows virtual env
PRODUCTION_SERVERS = ['xc',]
if socket.gethostname() in PRODUCTION_SERVERS:
PRODUCTION = True
sys.path.append(FABRIC['live']['PROJECT_ROOT'])
else:
PRODUCTION = False
DEBUG = not PRODUCTION
TEMPLATE_DEBUG = DEBUG
MEDIA_DEV_MODE = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
CONTACT_EMAIL = "larry@fltsys.org"
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.mysql',
#'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT,'db/fltsys.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_7pr#bc70r5ck6$lf)ydgk*vjsfod5rl*cz8ao8&07+a-7ia3m'
GOOGLE_ANALYTICS_CODE = ""
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
DEFAULT_FROM_EMAIL = ""
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_EMAIL_AUTHENTICATION = True
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
import os
import sys
KMLDIR = os.path.join(settings.PROJECT_ROOT, "private", "kml")
JSONDIR = os.path.join(settings.PROJECT_ROOT, "private", "json")
GOOGLE_API_KEY = ''
if EVERNOTE_SANDBOX:
EVERNOTE_HOST = "sandbox.evernote.com"
EVERNOTE_CONSUMER_KEY = ''
EVERNOTE_CONSUMER_SECRET = ''
EVERNOTE_USER = ''
EVERNOTE_PW = ''
else:
EVERNOTE_HOST = "evernote.com"
EVERNOTE_CONSUMER_KEY = ''
EVERNOTE_CONSUMER_SECRET = ''
EVERNOTE_USER = ''
EVERNOTE_PW = ''
|
18,192 | fc1319966617d342e5f7fbcaa6bf665441a9c556 | class Job:
def __init__(self, name, deadline, duration, profit):
self.name = name
self.deadline = deadline
self.duration = duration
self.profit = profit
def __lt__(self, other):
return self.profit < other.profit
def __str__(self):
return self.name
# Greedy algorithm
def JobSequencing(jobs, time_slots):
jobs = sorted(jobs)
sequence = [jobs.pop()]
while len(jobs) > 0 and sum([j.duration for j in sequence]) < time_slots:
job = jobs.pop()
finish_time = sum([j.duration for j in sequence]) + job.duration
if finish_time < time_slots and finish_time < job.deadline:
sequence.append(job)
return sequence
# Example execution
# jobs = [Job('a', 3, 1, 2), Job('b', 2, 2, 5), Job('c', 4, 1,3)]
# print([str(j) for j in JobSequencing(jobs, 3)])
|
18,193 | 834c14e61b01e9b4f022a5b981040405d9a57ae1 | import d2l_dx
from mxnet import autograd, np, npx, gluon, init
from mxnet.gluon import nn
import matplotlib.pyplot as plt
npx.set_np()
T = 1000
time = np.arange(0, T)
x = np.sin(0.01 * time) + 0.2 * np.random.normal(size=T)
# plt.plot(x)
# plt.show()
tau = 4
features = np.zeros((T - tau, tau))
for i in range(tau):
features[:, i] = x[i: T - tau + i]
labels = x[tau:]
|
18,194 | bdcbc41f5fd8834fff0258e41c14dc6d9b6678d3 | '''
imageViewThreshEdgeHSV
Loads one image, display edge, HSV, RGB and several threshold methods of image
Uses keyboard to dynamically change method parameters
Tom Zimmerman, IBM Research-Almaden, Center for Cellular Construction
V1 7/15/21
This material is based upon work supported by the NSF under Grant No. DBI-1548297.
Disclaimer: Any opinions, findings and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation
'''
import cv2
import numpy as np
############ PUT YOUR IMAGE HERE ##################
image1=r'C:\Users\ThomasZimmerman\Pictures\CCC_Riggs\iPhone\OreoWT_jpeg\IMG_4818.jpg' # image location
SCALE=(320,240) # display resolution
SCALE=(640,480) # display resolution
# keyboard variable setup and initial values
keyState=0;
key=0;
keyBuf=np.zeros((256),dtype=int)
keyBuf[ord('t')]=90 # quantization threshold
keyBuf[ord('e')]=5 # canny low thresh
keyBuf[ord('E')]=30 # canny high thresh
keyBuf[ord('k')]=10 # kernal size (*2 + 1)
keyBuf[ord('T')]=2 # threshold for adaptive
############# Funcions #################
def processKey(key):
global keyState;
if key==ord('='):
keyBuf[keyState]+=1
elif key==ord('+'):
keyBuf[keyState]+=10
elif key==ord('-'):
if keyBuf[keyState]>0:
keyBuf[keyState]-=1
elif key==ord('_'):
if keyBuf[keyState]>10:
keyBuf[keyState]-=10
else:
keyState=key
print (chr(keyState),'=',keyBuf[keyState])
return
def title():
print('Keyboard Controls')
print('Click on any image to direct key activity to program')
print('Press key to select parameter')
print('Push + and - keys to change value')
print('Hold shift while pressing + or - to change value by increments of 10')
print()
print('Key Parameter')
print('=== =========')
print('t quantization threshold')
print('e Canny low threshold')
print('E Canny high threshold')
print('k kernal size')
print('T threshold for adaptive')
print('q to quit program')
def iKernal(im,k): # integrate kernal
print('doing kim')
(yRez,xRez)=im.shape
kim=np.zeros((yRez,xRez),dtype='uint8')
scale=(k*k); skip=10
for x in range(0,xRez-k,skip):
for y in range(0,yRez-k,skip):
acc=np.sum(im[y:y+k,x:x+k])
normAcc=np.clip(int(acc/scale),0,255)
#print(acc,normAcc)
kim[y,x]=int(acc)
return(kim)
################################ MAIN ##########################################
title() # display instructions
rawColorIM=cv2.imread(image1) # read in color image
rawIM=cv2.cvtColor(rawColorIM, cv2.COLOR_BGR2GRAY) # read in as grayscale image
(x,y)=rawIM.shape
run=1
while(run):
# read keyboard and update variables
key=cv2.waitKey(10) & 0xFF
if key == ord('q'):
run=0
if key!=255:
processKey(key)
thresh=keyBuf[ord('t')]
eLow=keyBuf[ord('e')]
eHigh=eLow+keyBuf[ord('E')]
kernalSize=2*keyBuf[ord('k')]+1
threshAdapt=keyBuf[ord('T')]
edgeIM = cv2.Canny(rawIM,eLow,eHigh)
cv2.imshow('edge',cv2.resize(edgeIM,SCALE))
ret,th1= cv2.threshold(rawIM,thresh,255,cv2.THRESH_BINARY_INV)
cv2.imshow('THRESH_BINARY_INV',cv2.resize(th1,SCALE))
ret,th2 = cv2.threshold(rawIM,thresh,255,cv2.THRESH_TRUNC)
cv2.imshow('THRESH_TRUNC',cv2.resize(th2,SCALE))
ret,th3 = cv2.threshold(rawIM,thresh,255,cv2.THRESH_TOZERO_INV)
cv2.imshow('THRESH_TOZERO_INV',cv2.resize(th3,SCALE))
th4 = cv2.adaptiveThreshold(rawIM,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,kernalSize,threshAdapt)
cv2.imshow('ADAPTIVE_THRESH_MEAN',cv2.resize(th4,SCALE))
th5 = cv2.adaptiveThreshold(rawIM,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,kernalSize,threshAdapt)
cv2.imshow('ADAPTIVE_THRESH_GAUSSIAN',cv2.resize(th5,SCALE))
#kim=iKernal(th5,100) # integrate kernal
#cv2.imshow('KIM_GAUSS',cv2.resize(kim,SCALE))
cv2.imshow('Blur',cv2.resize(cv2.blur(th5,(110,110)),SCALE))
cv2.imshow('Original Color',cv2.resize(rawColorIM,SCALE))
cv2.imshow('Original Gray',cv2.resize(rawIM,SCALE))
cv2.imshow('Red',cv2.resize(rawColorIM[:,:,2],SCALE))
cv2.imshow('Green',cv2.resize(rawColorIM[:,:,1],SCALE))
cv2.imshow('Blue',cv2.resize(rawColorIM[:,:,0],SCALE))
hsvIM = cv2.cvtColor(rawColorIM, cv2.COLOR_BGR2HSV)
cv2.imshow('Hue',cv2.resize(hsvIM[:,:,0],SCALE))
cv2.imshow('Saturation',cv2.resize(hsvIM[:,:,1],SCALE))
cv2.imshow('Value',cv2.resize(hsvIM[:,:,2],SCALE))
cv2.destroyAllWindows()
|
18,195 | 4d92f15cc683a5bd05aebddda8edfc2c79529ead | class Person(object):
# 创建对象时调用
def __new__(cls, name, sex):
print('调用自己的构造方法创建对象')
return object.__new__(cls)
# 初始化方法
def __init__(self, name, sex):
self.name = name
self.sex = sex
# 打印类的对象时调用,该方法必须返回一个字符串
def __str__(self):
return '我是%s,我的性别%s' % (self.name, self.sex)
# 析构函数,当类的对象的所有引用被删除时调用
def __del__(self):
print('%s对象引用全被删除,执行析构函数' % self.name)
# __new__,__init__,__str__,__del__已两个下划线开始,两个下划线结束的方法都叫魔术方法,不会调用,程序会在特定情况下自动调用
p1 = Person('laoxiao', '男')
p2 = Person('xiaoli', '女')
p3 = p1
print(id(p1))
print(id(p2))
print(id(p3))
del (p1)
print(p2)
del (p3)
|
18,196 | ddd7d1d871b248fc217c01528240a76139839cbe | import numpy as np
import torch
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
chromatogram = torch.from_numpy(np.asarray(sample[0])).float()
label = torch.from_numpy(np.asarray(sample[1])).float()
return chromatogram, label
|
18,197 | f41a0f3f14873337d8b391dd9bed07be470c647e | """Prevent Duplicate Windows.
Sublime Text 3 package to prevent duplicate windows from being opened.
Instead, switch to the corresponding, existing window.
[duplicate window]
1. window with the same folders has an existing one
"""
import os
import time
import sublime
import sublime_plugin
from subprocess import Popen, PIPE
class PreventDuplicateWindows(sublime_plugin.EventListener):
# using on_new without async, the window does't have any folders yet
def on_new_async(self, view):
# if a single file is opened in a new window, view doesn't contain a view. api bug?
current_window = view.window() or sublime.windows()[-1]
# if a single file is opened, view might not contain a file name. api bug?
current_file = view.file_name() or current_window.active_view().file_name()
current_folders = current_window.folders()
# dont do anything if we have an empty buffer on a new window
if not current_file and not current_window:
return
# loop through all windows except current one
for existing_window in sublime.windows()[:-1]:
existing_folders = existing_window.folders()
# folders need to match
if existing_folders == current_folders:
# if the folders are empty, then the files need to match
if existing_folders or current_file == existing_window.active_view().file_name():
# close current window
current_window.run_command('close_window')
# switch window unless current window is the right one
if existing_window != sublime.windows()[-1]:
self.focus(existing_window)
return
def focus(self, window_to_move_to):
active_view = window_to_move_to.active_view()
active_group = window_to_move_to.active_group()
# In Sublime Text 2 if a folder has no open files in it the active view
# will return None. This tries to use the actives view and falls back
# to using the active group
# Calling focus then the command then focus again is needed to make this
# work on Windows
if active_view is not None:
window_to_move_to.focus_view(active_view)
window_to_move_to.run_command(
'focus_neighboring_group')
window_to_move_to.focus_view(active_view)
elif active_group is not None:
window_to_move_to.focus_group(active_group)
window_to_move_to.run_command(
'focus_neighboring_group')
window_to_move_to.focus_group(active_group)
if sublime.platform() == 'osx':
self._osx_focus()
elif sublime.platform() == 'linux':
self._linux_focus(window_to_move_to)
def _osx_focus(self):
name = 'Sublime Text'
if int(sublime.version()) < 3000:
name = 'Sublime Text 2'
# This is some magic. I spent many many hours trying to find a
# workaround for the Sublime Text bug. I found a bunch of ugly
# solutions, but this was the simplest one I could figure out.
#
# Basically you have to activate an application that is not Sublime
# then wait and then activate sublime. I picked "Dock" because it
# is always running in the background so it won't screw up your
# command+tab order. The delay of 1/60 of a second is the minimum
# supported by Applescript.
cmd = """
tell application "System Events"
activate application "Dock"
delay 1/60
activate application "%s"
end tell""" % name
Popen(['/usr/bin/osascript', "-e", cmd],
stdout=PIPE, stderr=PIPE)
# Focus a Sublime window using wmctrl. wmctrl takes the title of the window
# that will be focused, or part of it.
def _linux_focus(self, window_to_move_to):
window_variables = window_to_move_to.extract_variables()
if 'project_base_name' in window_variables:
window_title = window_variables['project_base_name']
elif 'folder' in window_variables:
window_title = os.path.basename(
window_variables['folder'])
try:
Popen(["wmctrl", "-a", window_title + ") - Sublime Text"],
stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
msg = "`wmctrl` is required by GotoWindow but was not found on " \
"your system. Please install it and try again."
sublime.error_message(msg)
|
18,198 | 0c2110a514875031d5dc19c4f89ae8040e4b73f4 | import cv2
import numpy as np
import os
import pandas as pd
import time
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.cluster import KMeans
from imutils.video import FPS
index=["color","color_name","hex","R","G","B"]
csv = pd.read_csv('colors.csv', names=index, header=None)
CONFIDENCE = 0.1
SCORE_THRESHOLD = 0.20
IOU_THRESHOLD = 0.3
whT =320
config_path = "yolov3.cfg"
weights_path = "yolov3.weights"
# loading all the class labels (objects)
labels = open("coco.names").read().strip().split("\n")
# generating colors for each object for later plotting
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
def getColorName(R ,G,B):
minimum = 10000
for i in range(len(csv)):
d = abs(R- int(csv.loc[i,"R"])) + abs(G- int(csv.loc[i,"G"]))+ abs(B- int(csv.loc[i,"B"]))
if(d<=minimum):
minimum = d
cname = csv.loc[i,"color_name"]
return cname
vehicles = ['car', 'motorbike', 'bus', 'truck', 'aeroplane']
def getcars(image):
# print("It's working ...")
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
layer_outputs = net.forward(ln)
font_scale = 1
thickness = 1
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
clt = KMeans(n_clusters = 4)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
if i in idxs:
if labels[class_ids[i]] in vehicles:
copied_image = image.copy()
x,y,w,h = boxes[i]
scaleTop = int(h * 0.30) # scaling the top with the %
scaleBottom = int(h * 0.15) # scaling the bottom with the %
x1 = x
y1 = y + scaleTop
x2 = x + w
y2 = (y + h) - scaleBottom
#print("x: {}, y: {}, w: {}, h: {}, scaleTop: {}, scaleBottom: {}".format(x,y, w, h, scaleTop, scaleBottom))
#print("scaleTop:y + scaleBottom, x:x + w")
crop_img = copied_image[y1:y2, x1:x2]
#print(crop_img.shape)
crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)
crop_img = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)
#cv2.imwrite(filename + str(i) + "_Object." + ext, crop_img)
pixels = crop_img.reshape((crop_img.shape[0] * crop_img.shape[1], 3))
labelsinvehicle = clt.fit_predict(pixels)
label_counts = Counter(labelsinvehicle)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
r, g, b = dominant_color
color_present = getColorName(b, g, r)
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
if labels[class_ids[i]] in vehicles:
text = f"{color_present} { labels[class_ids[i]]}: {confidences[i]:.2f} "
else:
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
#overlay = image = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
# cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
# image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image,text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
cap = cv2.VideoCapture('input.mp4')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('Final_Output.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
while True:
# Save our image and tell us if it was done succesfully
success,img = cap.read()
getcars(img)
result.write(img)
cv2.imshow("video",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Add the delay
result.release
cv2.waitKey(0)
|
18,199 | 51b43d51d864ca1097f332230d76fa4225dca691 | # import os
# print(os.listdir())
def compterArbresRencontres(data,pasVertical,pasHorizontal):
positionHorizontale=0
nombreArbresRencontres=0
compteurVertical=0
while compteurVertical+pasVertical <len(data):
compteurVertical = compteurVertical+pasVertical
ligne = data[compteurVertical]
positionHorizontale= (positionHorizontale+pasHorizontal)%len(ligne.strip())
if(ligne[positionHorizontale]== "#"):
nombreArbresRencontres=nombreArbresRencontres+1
return nombreArbresRencontres
def part1(path):
file=open(path)
data=file.readlines()
return compterArbresRencontres(data,1,3)
print(part1("/home/coder/adventOfCodePython2020/dataJour3.txt"))
def part2(path):
file=open(path)
data=file.readlines()
return compterArbresRencontres(data,1,1)*compterArbresRencontres(data,1,3)*compterArbresRencontres(data,1,5)*compterArbresRencontres(data,1,7)*compterArbresRencontres(data,2,1)
print(part2("/home/coder/adventOfCodePython2020/dataJour3.txt")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.