text stringlengths 38 1.54M |
|---|
import hmac
import hashlib
digest_maker = hmac.new('secret-key', '', hashlib.sha256)
f = open('sample-file.txt', 'rb')
try:
while True:
block = f.read(1024)
if not block:
break
digest_maker.update(block)
finally:
f.close()
digest = digest_maker.hexdigest()
print digest |
from trax.supervised import training
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: train_model
def train_model(model, data_generator, batch_size=32, max_length=64, lines=lines, eval_lines=eval_lines, n_steps=1, output_dir='model/'):
"""Function that trains the model
Args:
model (trax.layers.combinators.Serial): GRU model.
data_generator (function): Data generator function.
batch_size (int, optional): Number of lines per batch. Defaults to 32.
max_length (int, optional): Maximum length allowed for a line to be processed. Defaults to 64.
lines (list, optional): List of lines to use for training. Defaults to lines.
eval_lines (list, optional): List of lines to use for evaluation. Defaults to eval_lines.
n_steps (int, optional): Number of steps to train. Defaults to 1.
output_dir (str, optional): Relative path of directory to save model. Defaults to "model/".
Returns:
trax.supervised.training.Loop: Training loop for the model.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
bare_train_generator = data_generator(batch_size, max_length, lines)
infinite_train_generator = itertools.cycle(bare_train_generator)
bare_eval_generator = data_generator(batch_size, max_length, eval_lines)
infinite_eval_generator = itertools.cycle(bare_eval_generator)
train_task = training.TrainTask(
labeled_data=bare_train_generator, # Use infinite train data generator
loss_layer=tl.CrossEntropyLoss(), # Don't forget to instantiate this object
optimizer=trax.optimizers.Adam(learning_rate=0.0005) # Don't forget to add the learning rate parameter
)
eval_task = training.EvalTask(
labeled_data=infinite_eval_generator, # Use infinite eval data generator
metrics=[tl.CrossEntropyLoss(), tl.Accuracy()], # Don't forget to instantiate these objects
n_eval_batches=3 # For better evaluation accuracy in reasonable time
)
training_loop = training.Loop(model,
train_task,
eval_task=eval_task,
output_dir=output_dir)
training_loop.run(n_steps=n_steps)
### END CODE HERE ###
# We return this because it contains a handle to the model, which has the weights etc.
return training_loop
|
#Write your code below this row 👇
for num in range(1,100):
if num % 3 == 0:
print(f"{num} : Fizz")
elif num % 5 == 0:
print(f"{num} : Buzz")
elif (num % 3) and (num%5) ==0:
print(f"{num} : FizzBuzz")
else:
print(num)
|
import socket
from Crypto.Util.number import long_to_bytes
from hashlib import sha512
remoteip = "133.9.81.203"
remoteport = 1337
def sock(remoteip, remoteport):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((remoteip, remoteport))
return s, s.makefile('rw', bufsize=0)
def read_until(f, delim='\n'):
data = ''
while not data.endswith(delim):
data += f.read(1)
return data
def get_bb(f):
read_until(f, "send ")
bb = int(read_until(f))
return bb
def get_enc(f):
read_until(f, ": ")
enc = int(read_until(f))
return enc
def get_hash(s):
return int(sha512(str(s)).hexdigest(), 16)
p = 285370232948523998980902649176998223002378361587332218493775786752826166161423082436982297888443231240619463576886971476889906175870272573060319231258784649665194518832695848032181036303102119334432612172767710672560390596241136280678425624046988433310588364872005613290545811367950034187020564546262381876467
pwds = []
for k in range(11):
for pwd in range(1, 17):
print pwd
s1, f1 = sock(remoteip, remoteport)
s2, f2 = sock(remoteip, remoteport)
for i in range(11):
if i != k:
bb1 = get_bb(f1)
bb2 = get_bb(f2)
s1.send(str(bb2)+"\n")
s2.send(str(bb1)+"\n")
else:
num1 = get_hash(get_bb(f1))
num2 = get_hash(get_bb(f2))
aa = pow(get_hash(pwd), 2, p)
assert aa > 514 and aa <= p - 514
s1.send(str(aa)+"\n")
s2.send(str(aa)+"\n")
enc1 = get_enc(f1)
enc2 = get_enc(f2)
if enc1^num1 == enc2^num2:
print "[+] pwd %d: %d" % (k, pwd)
pwds.append(pwd)
break
s1.close()
s2.close()
s, f = sock(remoteip, remoteport)
bbs = []
key = 0
for pwd in pwds:
key ^= get_hash(get_bb(f))
aa = pow(get_hash(pwd), 2, p)
s.send(str(aa)+"\n")
enc = get_enc(f)
print long_to_bytes(enc ^ key)
s.close()
|
import json
from domino.core import log
from pages._base import Page as BasePage
from pages._base import Title, Toolbar, Input, InputText, Button, Table, IconButton, Select, Row
from tables.postgres.printer import Printer
from tables.postgres.server import Server
#from tables.dept import Dept
from sqlalchemy import or_, and_
class Page(BasePage):
def __init__(self, application, request):
super().__init__(application, request)
def delete(self):
printer_id = self.get('printer_id')
#printer = self.postgres.query(Printer).get(printer_id)
#sql = 'delete from "printer" where "id"=%s'
#self.pg_cursor.execute(sql, [ID])
self.Row('table', printer_id)
self.message(f'Удален принтрер {printer_id}')
def change_disabled(self):
printer_id = self.get('printer_id')
printer = self.postgres.query(Printer).get(printer_id)
server = self.postgres.query(Server).get(printer.server_id)
printer.disabled = not printer.disabled
row = Row(self, 'table', printer.id)
self.print_row(row, printer, server)
def print_complex_cell(self, cell, name, params):
if params is not None and len(params) > 0:
cell.html(f'''{name}<p style="font-size:small;color:gray; line-height: 1em">{', '.join(params)}</p>''')
else:
cell.text(name)
def print_row(self, row, printer, server):
cell = row.cell(width=2)
if printer.disabled:
IconButton(cell, 'check', style='color:lightgray')\
.onclick('.change_disabled', {'printer_id':printer.id})
else:
IconButton(cell, 'check', style='color:green')\
.onclick('.change_disabled', {'printer_id':printer.id})
row.cell(width=1).text(printer.id)
self.print_complex_cell(row.cell(), server.name, [server.id])
params = []
if printer.description is not None:
params.append(json.dumps(printer.description, ensure_ascii=False))
self.print_complex_cell(row.cell(), printer.name, params)
#row.cell().text(printer.width)
#row.cell().text(printer.height)
cell = row.cell(width=6, align='right')
#IconButton(cell, 'edit', style='color:lightgray').onclick('pages/printer', {'printer_id' : printer.id})
IconButton(cell, 'close', style='color:red').onclick('.delete', {'printer_id' : printer.id})
def print_table(self):
table = Table(self, 'table').mt(0.5)
table.column()
table.column().text('#')
table.column().text('Сервер')
table.column().text('Принтер')
#table.column().text('Подразделение')
#table.column().text('Ширина')
#table.column().text('Высота')
table.column()
query = self.postgres.query(Printer, Server).filter(Printer.server_id == Server.id)
server_id = self.get('server_id')
if server_id:
query = query.filter(Server.id == server_id)
mode = self.get('mode')
if mode != 'all':
query = query.filter(or_(Printer.disabled == False, Printer.disabled == None))
for printer, server in query.order_by(Printer.id.desc()):
row = table.row(printer.id)
self.print_row(row, printer, server)
def print_toolbar(self):
toolbar = Toolbar(self, 'toolbar')
select = Select(toolbar.item(), label='Режим просмотра', name='mode')\
.onclick('.print_table', forms=[toolbar])
select.option('','ТОЛЬКО АКТИВНЫЕ УСТРОЙСТВА')
select.option('all','ВСЕ УСТРОЙСТВА')
select = Select(toolbar.item(ml=1), label='Сервер', name='server_id')\
.onclick('.print_table', forms=[toolbar])
select.option('','')
for server in self.postgres.query(Server):
select.option(server.id, server.name)
Button(toolbar.item(ml='auto'), 'Добавить вручную').onclick('pages/add_printer')
def __call__(self):
Title(self, 'Устройства печати')
self.print_toolbar()
self.print_table()
|
from flask import Flask
from flask_wtf.csrf import CsrfProtect
from views import Index
app = Flask(__name__)
CsrfProtect(app)
app.add_url_rule('/', view_func=Index.as_view('index'))
if __name__ == '__main__':
app.run(debug=True)
|
from django.db import connections
from django.conf import settings
from django.http import HttpResponse
from beans import ComputeNodeMana,InstanceManager,KeyStoneManager,NetWorkManager,EvaLog
from django.shortcuts import render_to_response
import json
import checker
import ks_auth
from public import NOVA_DB,NEUTRON_DB,NOVA,NEUTRON,RTN_200,RTN_500
import framework
from django.utils.encoding import smart_text
DATABASES=settings.DATABASES
REGIONS=settings.REGIONS
nova_list=[region for region in DATABASES if(region.endswith("nova"))]
neutron_list=[region for region in DATABASES if(region.endswith("neutron"))]
def controller(req,ip,region=settings.REGIONS):
if not checker.IsIpAddr(ip):
rtn="check ip(%s) address failed" % ip
else:
ip_addr=req.META.get("REMOTE_ADDR",None)
user=framework.getApiUserByToken(req)
if not user:
return HttpResponse(RTN_500 % "Unknow auth token request." )
regions=[]
if not region ==settings.REGIONS:
regions.append(region)
else:
regions=region
rtn=eva(ip,regions)
EvaLog().addLog(user,ip,rtn,ip_addr)
return HttpResponse(rtn)
def eva(ip,regions):
apitoken=ks_auth.getToken()
if not apitoken:
return RTN_500 % "Can't get token from keystone"
for region in regions:
if not NOVA(region) in nova_list:
print "DB %s_nova doesn't configure." % region
continue
if not NEUTRON(region) in neutron_list:
print "DB %s_neutron doesn't configure." % region
continue
print "---------Start to query instanceId from db(%s)-------------" % region
instanceBean=InstanceManager().findInstanceIdByIp(NEUTRON_DB(region),NOVA_DB(region),ip)
if not instanceBean:
print "can't find instance in db(%s) by vir" % region
print "------- check if it's a physical machine --------"
childs=InstanceManager().getChildrens(NOVA_DB(region),ip)
print childs
if not childs:
if isinstance(childs,list):
return RTN_500 % ("[] instance find in nova.instances by physical machine(%s) in Region(%s)" % (ip,region))
print "can't find it as a physical machine in db(%s)" % region
continue
msg={}
for child in childs:
msg["uuid_%s" % child.uuid]=evacuate(apitoken,child,region,child.host)
return RTN_200 % msg
else:
mess=evacuate(apitoken,instanceBean,region,instanceBean.host)
return RTN_200 % mess
return RTN_500 % ("Can't find machine|virtual by ip (%s)" % ip)
from su import runScript
def evacuate(apitoken,instanceBean,region,filterHost=None):
node=getFilterAvailabilityHost(region,instanceBean.vcpus,instanceBean.memory_mb,filterHost,apitoken)
if not node:
node=getAvailabilityHost(region,instanceBean.vcpus,instanceBean.memory_mb,filterHost,apitoken)
else:
print "find filter node %s" % node
if not node:
return "no compute node match"
else:
rtn=ks_auth.evacuate(apitoken,region,instanceBean.uuid,node.hypervisor_hostname)
print "%s <br/> adapt by<br/> %s <br/> %s" % (instanceBean,node,rtn)
if rtn.has_key("evacuate"):
runScript(region,instanceBean.uuid,"EVACUATE")
return rtn
def getFreeResByRegion(req,region=None):
if not region or not region in nova_list:
return HttpResponse("region(%s) doesn't exist." % region)
nodes=ComputeNodeMana().getComputeNodes(NOVA_DB(region))
#return HttpResponse(json.dumps(nodes), content_type="application/json")
return HttpResponse(nodes)
def getAvailabilityHost(region,cpu,mem,filterHost=None,apitoken=None):
print "cpu %s mem %s" % (cpu,mem)
nodes=ComputeNodeMana().getComputeNodes(NOVA_DB(region))
print "nodes %s " % nodes
service=ks_auth.getOsServices(apitoken,region)
if not service:
return HttpResponse("Can't get service status.")
for node in nodes:
if filterHost and filterHost==node.hypervisor_hostname:
continue
if not service.has_key(node.hypervisor_hostname) or service[node.hypervisor_hostname]=="down":
continue
if node.availability(cpu,mem):
return node
return None
def getFilterAvailabilityHost(region,cpu,mem,filterHost=None,apitoken=None):
zones=ks_auth.getAvaZones(apitoken,region)
if not zones.has_key(settings.BACK_UP_AZ) or not zones[settings.BACK_UP_AZ]:
print "Can't find backupAZ compute nodes"
return None
backupAZ=zones[settings.BACK_UP_AZ]
#filters=",".join(backupAZ)
nodes=ComputeNodeMana().getFilterComputeNodes(NOVA_DB(region),backupAZ)
for node in nodes:
if node.availability(cpu,mem):
return node
return None
def test(req):
addr=req.META.get("REMOTE_ADDR",None)
return HttpResponse("%s" % addr)
def getMachineInfoByIp(req,ip):
apitoken=ks_auth.getToken()
if not apitoken:
return HttpResponse("Can't get token from keystone")
for region in REGIONS:
if not NOVA(region) in nova_list:
print "DB %s_nova doesn't configure." % region
continue
if not NEUTRON(region) in neutron_list:
print "DB %s_neutron doesn't configure." % region
continue
print "---------Start to query instanceId from db(%s)-------------" % region
instanceBean=InstanceManager().findInstanceIdByIp(connections[NEUTRON(region)],connections[NOVA(region)],ip)
if not instanceBean:
print "can't find instance in db(%s) by vir" % region
print "------- check if it's a physical machine --------"
cnode=ComputeNodeMana().getComputeNodeByIp(ip,connections[NOVA(region)])
if cnode:
return HttpResponse("Region(%s),physical machine info:%s" % (region,cnode))
else:
return HttpResponse("Region(%s),virtual instance info:%s" % (region,instanceBean))
return HttpResponse("Can't get machine info.")
def getServiceStatus(req):
apitoken=ks_auth.getToken()
if not apitoken:
return HttpResponse("Can't get token from keystone")
response=""
for region in REGIONS:
service=ks_auth.getOsServices(apitoken,region)
if not service:
return HttpResponse("Can't get service status from Region %s" % region)
else:
response+="<hr><span style='color:blue'>Region:%s</span><br/>" % region
for k,v in service.items():
if v=="down":
response+=k+"  <span style='color:red'>"+v+"</span><br/>"
else:
response+=k+"  "+v+"<br/>"
return HttpResponse(response)
def az_list(req):
apitoken=ks_auth.getToken()
if not apitoken:
return HttpResponse("Can't get token from keystone")
zones=ks_auth.getAvaZones(apitoken,"RegionOne")
if not zones:
return HttpResponse("Can't get az - compute nodes.")
else:
json_str=json.dumps(zones)
return render_to_response('json.html',locals())
def getFreeRes(req):
regions={}
for nova_db in nova_list:
nodes=ComputeNodeMana().getComputeNodes(connections[nova_db])
regions[nova_db]=nodes
print nodes
#nodes=ComputeNodeMana().getComputeNodes(connections["dev112_nova"])
return render_to_response('free_res.html',locals())
def ip_list(req):
regions={}
totals={}
print neutron_list
for neutron_db in neutron_list:
ips=NetWorkManager().getFreeIp(connections[neutron_db])
print "ips %s" % ips
regions[neutron_db]=ips
totals[neutron_db]=json.dumps(NetWorkManager().getAllTotalNum(ips,connections[neutron_db]))
print regions
print totals
return render_to_response('ip_list.html',locals())
def ip_list_region(req,region):
if not region in REGIONS:
return HttpResponse("""{"code":500,"message":"region doesn't exist"}""")
neutron_db=NEUTRON_DB(region)
ips=NetWorkManager().getFreeIp(neutron_db)
obj=json.dumps(NetWorkManager().getAllTotalNum(ips,neutron_db))
return HttpResponse(obj)
def get_ava_network(req,region,nets):
if not region in REGIONS:
return HttpResponse("""{"code":500,"message":"region doesn't exist"}""")
neutron_db=NEUTRON_DB(region)
ips=NetWorkManager().getFreeIp(neutron_db)
nodes=NetWorkManager().getAllTotalNum(ips,neutron_db)
array=nets.split("_")
obj=[]
for tag in array:
check=getAvaNetworkId(nodes,tag)
if check==0:
return HttpResponse("""{"code":500,"message":"no free ip in %s."}""" % tag)
obj.append(check)
return HttpResponse("""{"code":200,"message":"ok","data":"%s"}""" % ",".join(obj))
def getAvaNetworkId(nodes,tag):
for k,v in nodes.items():
if tag in k and v["freeNum"]>0:
return v["network_id"]
return 0
"""
def virs_list(req,region):
REGION=region
virs=InstanceManager().getallActiveInstances(NOVA_DB(region))
data = []
for vir in virs:
item= {}
item['instance_id']= vir.uuid
#item['user'] = KeyStoneManager().getUserByUserID(vir.user_id).encode('latin-1').decode('utf-8')
#item['project'] = KeyStoneManager().getProjectByProjectID(vir.project_id).encode('latin-1').decode('utf-8')
item['user'] = KeyStoneManager().getUserByUserID(vir.user_id)
item['project'] = KeyStoneManager().getProjectByProjectID(vir.project_id)
#smart_text(item['user'], encoding='latin-1', strings_onliy=False, error='strict')
#smart_text(item['project'], encoding='latin-1', strings_onliy=False, error='strict')l
item['instance_name'] = vir.hostname
#data.append(vir.uuid)
data.append(item)
body = json.dumps({"code":200,"message":"ok","data":data},ensure_ascii=False, indent=2)
return HttpResponse(body)
"""
|
#
# abc165 c
#
import sys
from io import StringIO
import unittest
sys.setrecursionlimit(100000)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """3 4 3
1 3 3 100
1 2 2 10
2 3 2 10"""
output = """110"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """4 6 10
2 4 1 86568
1 4 0 90629
2 3 0 90310
3 4 1 29211
3 4 3 78537
3 4 2 8580
1 2 1 96263
1 4 2 2156
1 2 0 94325
1 4 3 94328"""
output = """357500"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """10 10 1
1 10 9 1"""
output = """1"""
self.assertIO(input, output)
def resolve():
global N, M, ABCD
N, M, Q = map(int, input().split())
ABCD = [list(map(int, input().split())) for _ in range(Q)]
print(dfs([1]))
def dfs(L):
if len(L) == N:
score = 0
for abcd in ABCD:
a, b, c, d = abcd
if L[b-1] - L[a-1] == c:
score += d
return score
ans = 0
for i in range(L[-1], M+1):
NL = L + [i]
ans = max(ans, dfs(NL))
return ans
if __name__ == "__main__":
# unittest.main()
resolve()
|
users = [
{"username": "samuel", "tweets": ["I love cake", "I love pie", "hello world!"]},
{"username": "katie", "tweets": ["I love my cat"]},
{"username": "jeff", "tweets": []},
{"username": "bob123", "tweets": []},
{"username": "doggo_luvr", "tweets": ["dogs are the best", "I'm hungry"]},
{"username": "guitar_gal", "tweets": []}
]
#extract inactive users using list comprehension:
inactive_users = [d for d in users if len(d.get("tweets")) == 0]
print(inactive_users)
# extract usernames with list comp
inactive_usernames = [d["username"] for d in users if len(d.get("tweets")) == 0]
print(inactive_usernames) |
# NOTE: Generated By HttpRunner v3.1.4
# FROM: testcases\demo_testcase_request.yml
from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
class TestCaseDemoTestcaseRequest(HttpRunner):
config = Config("request methods testcase with functions").variables(**{'foo1': 'config_bar1', 'foo2': 'config_bar2', 'expect_foo1': 'config_bar1', 'expect_foo2': 'config_bar2'}).base_url("https://postman-echo.com").verify(False).export(*['foo3'])
teststeps = [
Step(RunRequest("get with params").with_variables(**{'foo1': 'bar11', 'foo2': 'bar21', 'sum_v': '${sum_two(1, 2)}'}).get("/get").with_params(**{'foo1': '$foo1', 'foo2': '$foo2', 'sum_v': '$sum_v'}).with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}'}).extract().with_jmespath('body.args.foo2', 'foo3').validate().assert_equal("status_code", 200).assert_equal("body.args.foo1", "bar11").assert_equal("body.args.sum_v", "3").assert_equal("body.args.foo2", "bar21")),
Step(RunRequest("post raw text").with_variables(**{'foo1': 'bar12', 'foo3': 'bar32'}).post("/post").with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}', 'Content-Type': 'text/plain'}).with_data("This is expected to be sent back as part of response body: $foo1-$foo2-$foo3.").validate().assert_equal("status_code", 200).assert_equal("body.data", "This is expected to be sent back as part of response body: bar12-$expect_foo2-bar32.")),
Step(RunRequest("post form data").with_variables(**{'foo2': 'bar23'}).post("/post").with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}', 'Content-Type': 'application/x-www-form-urlencoded'}).with_data("foo1=$foo1&foo2=$foo2&foo3=$foo3").validate().assert_equal("status_code", 200).assert_equal("body.form.foo1", "$expect_foo1").assert_equal("body.form.foo2", "bar23").assert_equal("body.form.foo3", "bar21")),
]
if __name__ == "__main__":
TestCaseDemoTestcaseRequest().test_start()
|
'''########################################################'''
'''########################################################'''
'''########################################################'''
'''Hands On'''
# Take your street address and make it a list variable myaddress
# where each token is an element.
# What would be the code to set the sum of the numerical portions of
# your address list to a variable called address sum?
# What would be the code to change one of the string elements of the
# list to another string (e.g., if your address had "West" in it, how would
# you change that string to "North")?
# Change the street portion of myaddress to have the street first
# and the building number at the end.
"""
Name:
Infinite input
Filename:
infinite.py
Problem Statement:
Write a program that asks the user, again and again, to enter a number.
When the user enters an empty string, then stop asking for additional inputs.
Along the way, as the user is entering numbers,
I want you to store those numbers in a list.
I also want you to keep track of the minimum and maximum values that you've seen so far.
When the user has finished entering numbers, your program should print out:
The sum of these numbers
The average (mean) of these numbers
The largest and smallest numbers you received from the user
Data:
Not required
Extension:
Not Available
Hint:
Use infinite while loop
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
''' Hand on '''
# remove all 3 from the list
some_list = [1,2,3,5,6,2,4,3,5,6,7,8,1,2,3]
'''########################################################'''
'''########################################################'''
'''########################################################'''
'''Hands On'''
# Take the list of the parts of your street address
# Write a loop that goes through that list and prints out each item in that list
myaddress = [3225, 'West', 'Foster', 'Avenue', 'Chicago', 'IL', 60625]
'''Hands On'''
#Looping through a list of temperatures and applying a test
#Pretend you have the following list of temperatures T:
T = [273.4, 265.5, 277.7, 285.5]
#and a list of flags called Tflags that is initialized to all False
Tflags = [False, False, False, False]
#Write a loop that checks each temperature in T and sets the corresponding
#Tflags element to True if the temperature is above the freezing point of water.
'''Hands On'''
# Clean the Messy salaries into integers for Data Processing
salaries = ['$876,001', '$543,903', '$2453,896']
'''Hands On'''
# Create a list of Fibonnaci numbers up to the 50th term.
# The program will then ask the user for which position in the sequence
# they want to know the Fibonacci value for
# The Fibonacci sequence was originally used as a basic model for rabbit population growth:
'''Hands On'''
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
#words = ['aba', 'xyz', 'aa', 'x', 'bbb']
#words = ['', 'x', 'xy', 'xyx', 'xx']
#words = ['aaa', 'be', 'abc', 'hello']
'''Hands On'''
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
words =['bbb', 'ccc', 'axx', 'xzz', 'xaa']
#words =['ccc', 'bbb', 'aaa', 'xcc', 'xaa']
#words =['mix', 'xyz', 'apple', 'xanadu', 'aardvark']
'''Hands On'''
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
nums = [1, 2, 2, 3]
#nums = [2, 2, 3, 3, 3]
#nums = []
'''Hands On'''
# Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
list1 = ['aa', 'xx', 'zz'] #['aa', 'xx'] ['aa', 'aa']
list2 = ['bb', 'cc'] #['bb', 'cc', 'zz'] ['aa', 'bb', 'bb']
"""
Name:
2 Dimensional Random List
Filename:
random_list.py
Problem Statement:
Create a 2-Dimensional list of list of integers 10 by 10.
Fill the 2-Dimensional list with random numbers in the range 0 to 255
Display the array on the screen showing the numbers
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Code Challenge
Name:
Pattern Builder
Filename:
pattern.py
Problem Statement:
Write a Python program to construct the following pattern.
Take input from User.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
5
Sample Output:
Below is the output of execution of this program.
*
* *
* * *
* * * *
* * * * *
* * * *
* * *
* *
*
"""
"""
Name:
Treasure Hunt Game
Filename:
treasure.py
Problem Statement:
Create a simple treasure hunt game.
Create a list of list of integers 10 by 10.
In a random position in the array store the number 1.
Get the user to enter coordinates where they think the treasure is.
If there is a 1 at this position display ‘success’.
Repeat Until they find the treasure
Add a feature to say 'hot' 'cold' 'warm' depending on how close their guess
was to the actual hidden location.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
CodeBreaker
Filename:
code_breaker.py
Problem Statement:
The computer generates a 4 digit code
The user types in a 4 digit code. Their guess.
The computer tells them how many digits they guessed correctly
Data:
Not required
Extension:
the computer tells them how many digits are guessed correctly
in the correct place and how many digits have
been guessed correctly but in the wrong place.
The user gets 12 guesses to either
WIN – guess the right code.
Or
LOSE – run out of guesses.
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Vowels Finder
Filename:
vowels.py
Problem Statement:
Remove all the vowels from the list of states
Hint:
Use nested for loops and while
Data:
Not required
Extension:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
state_name = [ 'Alabama', 'California', 'Oklahoma', 'Florida']
Sample Output:
['lbm', 'clfrn', 'klhm', 'flrd']
"""
"""
Name:
Pallindromic Integer
Filename:
pallindromic.py
Problem Statement:
You are given a space separated list of integers.
If all the integers are positive and if any integer is a palindromic integer,
then you need to print True else print False.
(Take Input from User)
Data:
Not required
Extension:
Not Available
Hint:
A palindromic number or numeral palindrome is a number that remains the same
when its digits are reversed.
Like 16461, for example, it is "symmetrical"
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
12 9 61 5 14
Sample Output:
Flase
"""
"""
Name:
Centered Average
Filename:
centered.py
Problem Statement:
Return the "centered" average of an array of integers, which we'll say is the
mean average of the values, except ignoring the largest and smallest values in the array.
If there are multiple copies of the smallest value, ignore just one copy,
and likewise for the largest value. Use int division to produce the final average.
You may assume that the array is length 3 or more.
Take input from user
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
1, 2, 3, 4, 100
Sample Output:
3
"""
"""
Name:
Unlucky 13
Filename:
unlucky.py
Problem Statement:
Return the sum of the numbers in the array, returning 0 for an empty array.
Except the number 13 is very unlucky, so it does not count and numbers that
come immediately after a 13 also do not count
Take input from user
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
13, 1, 2, 13, 2, 1, 13
Sample Output:
3
"""
"""
Name:
Random Game 2
Filename:
randon_game2.py
Problem Statement:
Write a program for a game where the computer generates a
random starting number between 20 and 30.
The player and the computer can remove 1,2 or 3 from the number
in turns. Something like this...
Starting number : 25
How many do you want to remove? 3
22 left
Computer removes 2
20 left
The player who has to remove the last value to bring the number
down to 0 is the loser.
1 left
Computer removes 1
You win!
Easy option
Get the computer to choose a number between 1—3 at random
Harder option
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Random Game 3
Filename:
randon_game3.py
Problem Statement:
Write a program for a Higher / Lower guessing game
The computer randomly generates a sequence of up to 10 numbers
between 1 and 13. The player each after seeing each number
in turn has to decide whether the next number is higher or lower.
If you can remember Brucie’s ‘Play your cards right’ it’s basically
that. If you get 10 guesses right you win the game.
Starting number : 12
Higher(H) or lower(L)? L
Next number 8
Higher(H) or lower(L)? L
Next number 11
You lose
Data:
Not required
Extension:
Give the players two lives
Make sure only H or L can
be entered
Hint:
Use a condition controlled loop (do until, while etc) to control the
game. Do not find yourself repeating the same code over and over!
You don't need to remember all 10 numbers just the current number
/next number. Don’t forget you’ll have to keep a count of the
number of turns they’ve had.
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
'''########################################################'''
'''########################################################'''
'''########################################################'''
'''Hands On'''
# Make a function days_in_month to return the number of days in a specific month of a year
"""
Name:
Pangram
Filename:
pangram.py
Problem Statement:
Write a Python function to check whether a string is PANGRAM or not
Take input from User and give the output as PANGRAM or NOT PANGRAM.
Data:
Not required
Extension:
Not Available
Hint:
Pangrams are words or sentences containing every letter of the alphabet at least once.
For example: "The quick brown fox jumps over the lazy dog" is a PANGRAM.
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
The five boxing wizards jumps.
Sphinx of black quartz, judge my vow.
The jay, pig, fox, zebra and my wolves quack!
Pack my box with five dozen liquor jugs.
Sample Output:
NOT PANGRAM
PANGRAM
PANGRAM
PANGRAM
"""
"""
Name:
Bricks
Filename:
bricks.py
Problem Statement:
We want to make a row of bricks that is target inches long.
We have a number of small bricks (1 inch each) and big bricks (5 inches each).
Make a function that prints True if it is possible to make the exact target
by choosing from the given bricks or False otherwise.
Take list as input from user where its 1st element represents number of small bricks,
middle element represents number of big bricks and 3rd element represents the target.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
2, 2, 11
Sample Output:
True
"""
"""
Name:
Reverse Function
Filename:
reverse.py
Problem Statement:
Define a function reverse() that computes the reversal of a string.
Without using Python's inbuilt function
Take input from User
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
I am testing
Sample Output:
gnitset ma I
"""
"""
Name:
Translate Function
Filename:
translate.py
Problem Statement:
Write a function translate() that will translate a text into "rövarspråket"
Swedish for "robber's language".
That is, double every consonant and place an occurrence of "o" in between.
Take Input from User
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
This is fun
Sample Output:
ToThohisos isos fofunon
"""
"""
Name:
Operations Function
Filename:
operation.py
Problem Statement:
Write following functions for list operations. Take list as input from the User
Add(), Multiply(), Largest(), Smallest(), Sorting(), Remove_Duplicates(), Print()
Only call Print() function to display the results in the below displayed
format (i.e all the functions must be called inside the print() function
and only the Print() is to be called in the main script)
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
5,2,6,2,3
Sample Output:
Sum = 18
Multiply = 360
Largest = 6
Smallest = 2
Sorted = [2, 2, 3, 5, 6]
Without Duplicates = [2, 3, 5, 6]
"""
"""
Name:
Anagram
Filename:
anagram.py
Problem Statement:
Two words are anagrams if you can rearrange the letters of one to spell the second.
For example, the following words are anagrams:
['abets', 'baste', 'bates', 'beast', 'beats', 'betas', 'tabes']
create a function which takes two arguments and return whether they are angram or no ( True or False)
Data:
Not required
Extension:
Not Available
Hint:
How can you tell quickly if two words are anagrams?
Try to use set
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Playing Cards
Filename:
Playing_Cards.py
Problem Statement:
Write a program that will generate a random playing card
e.g. ‘9 Hearts’, ‘Queen Spades’ when the return key is pressed.
Rather than generate a random number from 1 to 52.
Create two random numbers – one for the suit and one for the card.
However we don't want the same card drawn twice.
Data:
Not required
Extension:
Update this program by using an list to prevent the same card being dealt
twice from the pack of cards.
Convert this code into a procedure ‘DealCard’ that will display the card dealt or ‘no more cards’.
Call your procedure 53 times!
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Blackjack
Filename:
Blackjack.py
Problem Statement:
Play a game that draws two random cards.
The player then decides to draw or stick.
If the score goes over 21 the player loses (goes ‘bust’).
Keep drawing until the player sticks.
After the player sticks draw two computer cards.
If the player beats the score they win.
Data:
Not required
Extension:
Aces can be 1 or 11! The number used is whichever gets the highest score.
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
'''########################################################'''
'''########################################################'''
'''########################################################'''
"""
Name:
Sorting
Filename:
sorting.py
Problem Statement:
You are required to write a program to sort the (name, age, height)
tuples by ascending order where name is string, age and height are numbers.
The tuples are input by console. The sort criteria is:
1: Sort based on name;
2: Then sort based on age;
3: Then sort by score.
The priority is that name > age > score
Data:
Not required
Extension:
Aces can be 1 or 11! The number used is whichever gets the highest score.
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Tom,19,80
John,20,90
Jony,17,91
Jony,17,93
Json,21,85
Sample Output:
[('John', 20, 90), ('Jony', 17, 91), ('Jony', 17, 93), ('Json', 21, 85), ('Tom', 19, 80)]
"""
"""
Name:
generator
Filename:
generator.py
Problem Statement:
This program accepts a sequence of comma separated numbers from user
and generates a list and tuple with those numbers.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
2, 4, 7, 8, 9, 12
Sample Output:
List : ['2', ' 4', ' 7', ' 8', ' 9', '12']
Tuple : ('2', ' 4', ' 7', ' 8', ' 9', '122')
"""
"""
Name:
weeks
Filename:
weeks.py
Problem Statement:
Write a program that adds missing days to existing tuple of days
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
('Monday', 'Wednesday', 'Thursday', 'Saturday')
Sample Output:
('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
"""
'''########################################################'''
'''########################################################'''
'''########################################################'''
'''Hands On'''
#Create a dictionary myaddress using your address.
#Choose relevant keys(they will probably be strings),
#and separate your address into street address,
#city, state, and postal code portions, all of which are strings (for your ZIP
#Code, don’t enter it in as a number).
myaddress = {'street':'3225 West Foster Avenue','city':'Chicago', 'state':'IL','zip':'60625'}
"""
Name:
List of File Names
Filename:
list_dict.py
Problem Statement:
Assume you’re given the following list of files:
ist_of_files = ['data0001.txt', 'data0002.txt','data0003.txt']
Create a dictionary filenum where the keys are the filenames and the
value is the file number (i.e., data0001.txt has a file number of 1)
as an integer.
Make your code fill the dictionary automatically, assuming that you
have a list list of files.
Data:
Not required
Extension:
Not Available
Hint:
To convert a string to an integer, use the int function on the string,
and the list and array sub-range slicing syntax also works on strings
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Supermarket
Filename:
supermarket.py
Problem Statement:
You are the manager of a supermarket.
You have a list of items together with their prices that consumers bought on a particular day.
Your task is to print each item_name and net_price in order of its first occurrence.
Take Input from User
Data:
Not required
Extension:
Not Available
Hint:
item_name = Name of the item.
net_price = Quantity of the item sold multiplied by the price of each item.
try to use new class for dictionary : OrderedDict
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
BANANA FRIES 12
POTATO CHIPS 30
APPLE JUICE 10
CANDY 5
APPLE JUICE 10
CANDY 5
CANDY 5
CANDY 5
POTATO CHIPS 30
Sample Output:
BANANA FRIES 12
POTATO CHIPS 60
APPLE JUICE 20
CANDY 20
"""
"""
Name:
Teen Calculator
Filename:
teen_cal.py
Problem Statement:
Take dictionary as input from user with keys, a b c, with some integer
values and print their sum. However, if any of the values is a teen --
in the range 13 to 19 inclusive -- then that value counts as 0, except
15 and 16 do not count as a teens. Write a separate helper "def
fix_teen(n):"that takes in an int value and returns that value fixed for
the teen rule. In this way, you avoid repeating the teen code 3 times
Data:
Not required
Extension:
Not Available
Hint:
from ast import literal_eval
dict1 = literal_eval("{'a': 2, 'b' : 15, 'c' : 13}")
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
{'a' : 2, 'b' : 15, 'c' : 13}
Sample Output:
Sum = 17
"""
"""
Name:
Character Frequency
Filename:
frequency.py
Problem Statement:
This program accepts a string from User and counts the number of characters
(character frequency) in the input string.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
www.google.com
Sample Output:
{'c': 1, 'e': 1, 'g': 2, 'm': 1, 'l': 1, 'o': 3, '.': 2, 'w': 3}
"""
"""
Name:
Letter Distribution
Filename:
letter_dist.py
Problem Statement:
Ask the user to enter some text.
Display the distribution of letters from within the text.
Data:
Not required
Extension:
Not Available
Hint:
Use dictionaries to solve
import string and use string.ascii_lowercase
Algorithm:
Convert all letters to lowercase
Ignore characters that aren't lowercase letters
Create a dictionary in which the keys are letters and the values are the counts.
Boiler Plate Code:
Not Available
Sample Input:
This is a test. Show me the distribution, already!
Sample Output:
t: 6 15%
h: 3 7%
i: 5 12%
s: 5 12%
a: 3 7%
e: 4 10%
o: 2 5%
w: 1 2%
m: 1 2%
d: 2 5%
r: 2 5%
b: 1 2%
u: 1 2%
n: 1 2%
l: 1 2%
y: 1 2%
"""
"""
Name:
Digit Letter Counter
Filename:
digit_letter_counter.py
Problem Statement:
Write a Python program that accepts a string from User and calculate the number of digits
and letters.
Data:
Not required
Extension:
Not Available
Hint:
Store the letters and Digits as keys in the dictionary
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Python 3.2
Sample Output:
Letters 6
Digits 2
"""
"""
Name:
Anagram 2
Filename:
anagram2.py
Problem Statement:
Two words are anagrams if you can rearrange the letters of one to spell the second.
For example, the following words are anagrams:
['abets', 'baste', 'bates', 'beast', 'beats', 'betas', 'tabes']
create a function which takes two arguments and return whether they are angram or no ( True or False)
Data:
Not required
Extension:
Not Available
Hint:
Use dictionary to solve it
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Sentence
Filename:
Sentence.py
Problem Statement:
You are given a sentence, and want to shift each letter by 2 in alphabet to create a secret code.
The sentence you want to encode is the lazy dog jumped over the quick brown
fox and the output should be ’vjg ncba fqi lworgf qxgt vjg swkem dtqyp hqz’
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Create a dictionary mapping each letter to its number in the alphabet
Create a dictionary mapping each number to its letter in the alphabet
Go through each letter in the sentence and find the corresponding number, add 2 and then find the new corresponding letter
Make sure to take care of the edge cases so that if you get the letter z, it maps to b… ect
Print the encoded string
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
'''########################################################'''
'''########################################################'''
'''########################################################'''
"""
Name:
Intersection
Filename:
Intersection.py
Problem Statement:
With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155]
Write a program to make a list whose elements are intersection of the above given lists.
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Duplicate
Filename:
Duplicate.py
Problem Statement:
With a given list [12,24,35,24,88,120,155,88,120,155]
Write a program to print this list after removing all duplicate values with original
order reserved
Data:
Not required
Extension:
Not Available
Hint:
Distance = (Acceleration*Time*Time ) / 2
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
"""
Name:
Mailing List
Filename:
mailing.py
Problem Statement:
I recently decided to move a popular community mailing list
(3,000 subscribers, 60-80 postings/day) from my server to Google Groups.
I asked people to join the Google-based list themselves,
and added many others myself, as the list manager.
However, after nearly a week, only half of the list had been moved.
I somehow needed to learn which people on the old list hadn't yet signed up
for the new list.
Fortunately, Google will let you export a list of members of a group to CSV format.
Also, Mailman (the list-management program I was using on
my server) allows you to list all of the e-mail addresses being used
for a list. Comparing these lists, I think, offers a nice chance to look
at several different aspects of Python, and to consider how we can
solve this real-world problem in a "Pythonic" way.
The goal of this project is thus to find all of the e-mail addresses on
the old list that aren't on the new list. The old list is in a file
containing one e-mail address per line
Data:
Not required
Extension:
Not Available
Hint:
Not Available
Algorithm:
Not Available
Boiler Plate Code:
Not Available
Sample Input:
Not Available
Sample Output:
Not Available
"""
|
import turtle
window=turtle.Screen()
window.bgcolor("blue")
bepo=turtle.Turtle()
bepo.forward(50)
bepo.left(90)
bepo.forward(100)
bepo.left(90)
bepo.forward(50)
bepo.left(90)
bepo.forward(100)
window.mainloop()
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
db = SQLAlchemy()
ma = Marshmallow()
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
SQLALCHEMY_DATABASE_URI="postgresql://postgres:postgres@localhost:5432/Aliens",
SQLALCHEMY_TRACK_MODIFICATIONS=False,
DEBUG=True
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
db.init_app(app)
ma.init_app(app)
from .rest import api, queries
api.init_app(app)
app.register_blueprint(queries, url_prefix="/api/queries")
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
from aliens.models.abduction import Abduction
from aliens.models.objects import Alien, Human, Spaceship
from aliens.models.cemetery import Cemetery
from aliens.models.commutation import Commutation
from aliens.models.escape import Escape
from aliens.models.experiment import Experiment
from aliens.models.excursion import Excursion
# a simple page that says hello
@app.route('/')
def hello():
return {"name": "df"}
return app
|
'print the last k lines from an input file'
def getLastkLines(fpath, k):
'given a filepath, returns the last k lines from file'
total = 0
currk, lastk = [None]*k, [None]*k
csize = 0
fHandle = open(fpath)
while True:
line = fHandle.readline()
if line == '':
break
total += 1
currk[csize] = line
csize += 1
if csize == k:
lastk = currk
currk = [None]*k
csize = 0
if csize == k:
return currk
else:
if total >= k:
return lastk[-(k-csize):] + currk[:csize]
else:
return currk[:csize]
def printLastkLines(fpath, k):
'given filepath, prints the last k lines from file'
lastk = getLastkLines(fpath, k)
print "".join(lastk)
def test_printLastkLines():
'test for printLastkLines'
fpath = '/tmp/plkl.txt'
k = 30
total = 150
with open(fpath, 'w') as fh:
for x in range(total):
fh.write('%s\n' % x)
lastk = getLastkLines(fpath, k)
print lastk
assert lastk == ['%s\n'%x for x in range(total-k, total)]
print 'Test passed'
if __name__ == '__main__':
test_printLastkLines()
|
import os
import subprocess
import traceback
class Git:
def __init__(self):
self.PATH_BASE = os.path.join(os.path.expanduser("~"), "repo_sync")
if not os.path.isdir(self.PATH_BASE):
os.mkdir(self.PATH_BASE)
def repo_exists(self, name):
return os.path.isdir(os.path.join(self.PATH_BASE, name))
def repo_path(self, name):
return os.path.join(self.PATH_BASE, name)
def repo_create(self, repo, mirror):
try:
out = subprocess.check_output(['git', 'clone', repo['ssh_url']], cwd=self.PATH_BASE, stderr=subprocess.STDOUT)
out += subprocess.check_output(['git', 'remote', 'add', 'gitlab', mirror['ssh_url_to_repo']], cwd=self.repo_path(repo['name']), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
traceback.print_exc()
return out.decode()
def repo_sync(self, repo):
try:
out = subprocess.check_output(['git', 'pull', '--all'], cwd=self.repo_path(repo['name']), stderr=subprocess.STDOUT)
out += subprocess.check_output(['git', 'push', 'origin', '*:*'], cwd=self.repo_path(repo['name']), stderr=subprocess.STDOUT)
out += subprocess.check_output(['git', 'push', 'gitlab', '*:*'], cwd=self.repo_path(repo['name']), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
traceback.print_exc(),
return out.decode() |
#-*- coding:utf-8 -*-
import urllib2
import json
import string
import time
import signal
def handler(signum, frame):
raise AssertionError
def get_url_data(url, num_retries = 2):
try:
print 'Downloading http...'
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=10)
data = response.read()
except urllib2.URLError as e:
print 'Download http error:'.e.reason
data = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
return get_url_data(url, num_retries -1)
return data
def decode_url(url):
in_table = u'0123456789abcdefghijklmnopqrstuvw'
out_table = u'7dgjmoru140852vsnkheb963wtqplifca'
translate_table = string.maketrans(in_table, out_table)
mapping = {'_z2C$q': ':', '_z&e3B': '.', 'AzdH3F': '/'}
for k, v in mapping.items():
url = url.replace(k, v)
url = url.encode()
return url.translate(translate_table)
def get_img_url(url_data):
true_urls = []
jd = json.loads(url_data)
for i in range(len(jd['data'])):
try:
true_url = decode_url(jd['data'][i]['objURL'])
except:
print '没有找到第',i, '个objURL...'
continue
true_urls.append(true_url)
return true_urls
def save_img(img, save_addr, num):
with open(save_addr + str(num) + '.jpg', 'wb') as w:
print 'downloading ' + str(num) + ' image ......',
w.write(img.read())
print 'success'
def download_img(img_url, save_addr, num, num_retries = 2):
try:
img = urllib2.urlopen(img_url, timeout=10)
except urllib2.URLError as e:
print 'Download img error:', e.reason
if num_retries > 0:
print '正在第',3-num_retries, '次尝试重新下载图片'
if hasattr(e, 'code') and 500 <= e.code < 600:
return download_img(img_url, save_addr, num, num_retries -1)
return None
try:
save_img(img, save_addr, num)
except:
print "save image timeout"
def get_dst_url(dst_url_head, page):
pn = dst_url_head.find('&pn=')
rn = dst_url_head.find('&rn=')
gsm = dst_url_head.find('&gsm=')
dst_url = dst_url_head[0:pn+4] + str(page * 30) + dst_url_head[rn:gsm+5] + str(hex(page * 30).lstrip('0x'))
dst_url_head = r'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E4%BA%A4%E8%AD%A6+%E6%8C%87%E6%8C%A5&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=%E4%BA%A4%E8%AD%A6+%E6%8C%87%E6%8C%A5&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=60&rn=30&gsm=3c '
save_addr = 'E:\\download_img\\baidu\\'
#Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36
headers = { "Accept":"text/html,application/xhtml+xml,application/xml;",
"Accept-Encoding":"gzip",
"Accept-Language":"zh-CN,zh;q=0.8",
"Referer":"http://http://www.baidu.com/",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36"
}
if __name__ == '__main__':
num_d = 0
for page in range(1,201):
dst_url = get_dst_url(dst_url_head, page)
print '开始下载第' + str(page) + '页.....'
src_url_data = get_url_data(dst_url_head)
if src_url_data is None:
continue
imgs_url = get_img_url(src_url_data)
for img_url in imgs_url:
try:
download_img(img_url, save_addr, num_d)
except:
print '下载失败:',img_url,
continue
num_d += 1
print '下载完成......'
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 30 16:48:10 2022
stock market analysis with pandas datareader
- extract data
- build candlestick chart
"""
from pandas_datareader import data
import datetime
start = datetime.datetime(2022,1,1)
end = datetime.datetime(2022,6,30)
df = data.DataReader(name="AAPL", data_source="yahoo", start=start, end=end)
|
from operator import itemgetter
with open("Data/Frequency(root).txt","w",encoding="utf-8") as outputfile:
#print (outputfile)
keyword = {}
with open("Data/Separeted_word(root).txt","r",encoding="utf-8") as ins:
for line in ins:
if line=="</news>\n":
#sorted(keyword.values())
#sorted(keyword.items(), key=lambda x:x[1])
#print (keyword)
d_view = [ (v,k) for k,v in keyword.items() ]
d_view.sort(reverse=True)
for v,k in d_view:
#if keyword[word] > 10:
#print (word+" "+str(keyword[word]))
#tword = word.replace("\n","")
#outputfile.write(word+str(keyword[word])+"\n\n");
if v>=10:
outputfile.write(k+str(v)+"\n\n");
keyword = {}
outputfile.write(line)
dont = 1
continue
elif dont==1:
outputfile.write(line)
dont = 0
continue
elif line in keyword:
keyword[line] += 1
else:
keyword[line] = 1
outputfile.close()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class AdminexConfig(AppConfig):
name = 'adminex'
|
#Richard Xie
"""
A script that reads multiple monthly tick data csv files.
Can be used to detect any flash crash based on the percent change
of min and max ask price for each day’s ticks in 10 years of monthly
tick data for each currency cross.
It will return specific dates and the percent change of those days
whenever the threshold (can be set manually) is reached.
Written in Python 3
"""
import pandas as pd
from os import listdir, getcwd
from os.path import isfile, isdir, join, splitext, basename, abspath
import time
QUIET = 0 #set to 1 to suppress progam feedback
data_dir = abspath("D:/John-tick-data/2007")
#Process months (Kevin's code for reading multiple csv files)
months = [month for month in listdir(data_dir) if (isdir(join(data_dir, month)))]
for month in months:
if not QUIET: print("\nProcessing " + month + " data:\n")
#Save the absolute path to the current month
month_dir = join(data_dir, month)
#Select all .csv files in the current directory
currency_crosses = [currency for currency in listdir(month_dir) if \
( isfile(join(month_dir, currency)) and (splitext(currency)[1] == '.csv') )]
#Process tick data (Print out the date based on the percent change of ask price)
for currency_cross in currency_crosses:
print("Processing " + currency_cross[0:6] + ":")
df = pd.read_csv(join(month_dir,currency_cross), sep=',', header=[0], parse_dates=["Time (UTC)"])
df.set_index("Time (UTC)", drop=True, inplace=True)
daily_ask = df.resample("D")["Ask"]
df["daily_ask_min"] = daily_ask.transform("min")
df["daily_ask_max"] = daily_ask.transform("max")
df["daily_ask_change"] = (df["daily_ask_max"] - df["daily_ask_min"]) / df["daily_ask_max"]
# Change the percent change threshold below
print(df[df.daily_ask_change > 0.05]["daily_ask_change"].resample("D").mean())
print("\n")
#break #single currency
#break #single month |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 15:03:04 2022
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import glob
import pickle
import copy
run_folder = 'stage3_all/' #!!!
filt = 'F150W'
files = glob.glob(run_folder+'fit_material/data_process_idx0_*{0}*_*FOVpsf*.pkl'.format(filt))
files.sort()
collect_info = []
for i in range(len(files)):
_file = files[i]
idx_info = _file.split('idx')[1].split('_')[0]
filt_info = _file.split('W_')[0].split('_')[-1] + 'W'
this_info = [idx_info, filt_info]
if this_info not in collect_info:
collect_info.append(this_info)
#%%
PSF_lib_files = glob.glob('stage3_all/'+'material/*'+filt[:-1]+'*_PSF_Library_idx?.pkl')[0]
PSF_list, PSF_list_clean, PSF_RA_DEC_list, PSF_from_file_list = pickle.load(open(PSF_lib_files,'rb'))
#%%
if_printshow = False
item = collect_info[0]
fit_run_list = []
idx, filt= item
fit_files = glob.glob(run_folder+'fit_material/fit_run_idx{0}_{1}_*FOVpsf*.pkl'.format(idx, filt))
fit_files.sort()
for i in range(len(fit_files)):
fit_run_list.append(pickle.load(open(fit_files[i],'rb')))
chisqs = np.array([fit_run_list[i].reduced_Chisq for i in range(len(fit_run_list))])
idx_counts = chisqs.argsort()
ct = 3
PSF_RA_DEC_info = [PSF_RA_DEC_list[i] for i in idx_counts[:ct]]
zp = 27.980780691581828
PSF_F356W_mag = [-2.5*np.log10(np.sum(PSF_list_clean[i]))+zp for i in idx_counts[:ct]]
#%%
PSF_lib_files = glob.glob('stage3_all/'+'material/*'+'150'+'*_PSF_Library_idx?.pkl')[0]
PSF_list, PSF_list_clean, PSF_RA_DEC_list, PSF_from_file_list = pickle.load(open(PSF_lib_files,'rb'))
PSF_RA_DEC_list = np.array(PSF_RA_DEC_list)
PSF_F150W_mag = [None]*ct
zp = 28.03341727868797
for i in range(len(PSF_RA_DEC_info)):
dis = np.sqrt(np.sum((PSF_RA_DEC_info[i] - PSF_RA_DEC_list)**2,axis = 1))*3600
if np.min(dis) < 5*0.03:
psf_id = np.where(dis ==dis.min())[0][0]
PSF_F150W_mag[i] = -2.5*np.log10(np.sum(PSF_list_clean[psf_id][40:-40, 40:-40])) + zp
print(PSF_RA_DEC_info, PSF_F356W_mag, PSF_F150W_mag) |
#%%
import logging
import yaml
import os
#if you want to utilize a config file try this.
def parse_config():
config_path = "./conf/config.yml"
current_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_path, config_path)) as file:
cfg = yaml.load(file, Loader=yaml.FullLoader)
return cfg
cfg = parse_config()
#db_config = {"registry": cfg["databases"]["registry"]}
|
import time
from time import sleep
import RPi.GPIO as GPIO
import getWindowStatus
windowStatus = getWindowStatus.windowStatus()
rainSensor = 11
motorLeft = 37
motor2Right = 33
GPIO.setmode (GPIO.BOARD)
GPIO.setup(rainSensor, GPIO.IN)
GPIO.setup(motorLeft, GPIO.OUT)
GPIO.setup(motor2Right, GPIO.OUT)
GPIO.output(motor2Right, GPIO.LOW)
GPIO.output(motorLeft, GPIO.LOW)
print(windowStatus)
try:
while True:
if(GPIO.input(rainSensor) == GPIO.LOW and windowStatus == 0):
print('Closing!')
GPIO.output(motorLeft, GPIO.HIGH)
GPIO.output(motor2Right, GPIO.HIGH)
sleep(4.65)
GPIO.output(motorLeft, GPIO.LOW)
sleep(1.25)
GPIO.output(motor2Right, GPIO.LOW)
windowStatus = getWindowStatus.windowStatus()
sleep(6000)
finally:
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Ari Stark <ari.stark@netcourrier.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import unittest
from ansible_collections.ari_stark.ansible_oracle_modules.plugins.module_utils.ora_object import (ContentType, Datafile,
FileType, Size)
class TestSize(unittest.TestCase):
def test_to_string_unlimited(self):
size = Size('unlimited')
self.assertEqual('unlimited', str(size))
def test_to_string_int(self):
size = Size(123)
self.assertEqual('123', str(size))
size = Size(125952)
self.assertEqual('123K', str(size))
def test_to_string_oracle_format(self):
size = Size('15M')
self.assertEqual('15M', str(size))
size = Size('125952K')
self.assertEqual('123M', str(size))
size = Size('0.5M')
self.assertEqual('512K', str(size))
size = Size('1024E')
self.assertEqual('1Z', str(size))
size = Size('1280K')
self.assertEqual('1280K', str(size))
def test_equals(self):
self.assertEqual(Size('10M'), Size('10M'))
self.assertNotEqual(Size('10M'), Size('20M'))
self.assertNotEqual(Size('10M'), Size('unlimited'))
self.assertEqual(Size('unlimited'), Size('unlimited'))
self.assertNotEqual(Size('1M'), 'foo')
def test_compare(self):
size1 = Size('1M')
size2 = Size('1.5M')
self.assertGreater(size2, size1)
self.assertLess(size1, size2)
self.assertFalse(size1 < size1)
self.assertFalse(size1 > size1)
self.assertGreater(Size('unlimited'), size2)
self.assertFalse(Size('unlimited') < size2)
self.assertLess(size2, Size('unlimited'))
self.assertFalse(size2 > Size('unlimited'))
class TestDataFile(unittest.TestCase):
def test_constructor_with_default(self):
d = Datafile('/path/to/dbf', '0.5K')
self.assertEqual('/path/to/dbf', d.path)
self.assertEqual(512, d.size.size)
self.assertFalse(d.autoextend)
self.assertIsNone(d.nextsize)
self.assertIsNone(d.maxsize)
def test_constructor_with_value(self):
d = Datafile('/path/to/dbf', '0.5K', True, '1M', 'unlimited', False, 16384)
self.assertEqual('/path/to/dbf', d.path)
self.assertEqual('512', str(d.size))
self.assertTrue(d.autoextend)
self.assertEqual('1M', str(d.nextsize))
self.assertEqual('unlimited', str(d.maxsize))
self.assertEqual(16384, d.block_size)
def test_needs_resize(self):
new = Datafile('/path/to/dbf', 1024, True)
prev = Datafile('/path/to/dbf', 512)
self.assertFalse(new.needs_resize(prev), 'no need to resize because of autoextend')
new = Datafile('/path/to/dbf', 1024)
prev = Datafile('/path/to/dbf', 512)
self.assertTrue(new.needs_resize(prev), 'need to resize because new is bigger')
new = Datafile('/path/to/dbf', 512)
prev = Datafile('/path/to/dbf', 1024)
self.assertFalse(new.needs_resize(prev), 'no resize because new is smaller')
def test_needs_change_autoextend(self):
new = Datafile('/path/to/dbf', 1024, True)
prev = Datafile('/path/to/dbf', 512, False)
self.assertTrue(new.needs_change_autoextend(prev), 'from autoextend off to autoextend on')
new = Datafile('/path/to/dbf', 1024, False, '2M', '20M')
prev = Datafile('/path/to/dbf', 512, False, '1M', '10M')
self.assertFalse(new.needs_change_autoextend(prev), 'autoextend off, even if nextsize and maxsize change')
new = Datafile('/path/to/dbf', 512, False)
prev = Datafile('/path/to/dbf', 1024, True)
self.assertTrue(new.needs_change_autoextend(prev), 'from autoextend on to autoextend off')
new = Datafile('/path/to/dbf', 512, True, '1M', '20M')
prev = Datafile('/path/to/dbf', 1024, True, '1M', '10M')
self.assertTrue(new.needs_change_autoextend(prev), 'sizes change')
new = Datafile('/path/to/dbf', 512, True, '1M', '20M')
prev = Datafile('/path/to/dbf', 1024, True, '1M', '20M')
self.assertFalse(new.needs_change_autoextend(prev), 'same values')
new = Datafile('/path/to/dbf', 512, True, '1M', '34359721984', False)
prev = Datafile('/path/to/dbf', 1024, True, '1M', 'unlimited', False)
self.assertFalse(new.needs_change_autoextend(prev), '32G and unlimited are same value for small files')
new = Datafile('/path/to/dbf', 512, True, '1M', '34359721984', True)
prev = Datafile('/path/to/dbf', 1024, True, '1M', 'unlimited', True)
self.assertTrue(new.needs_change_autoextend(prev), '32G and unlimited are different values for big files')
def test_autoextend_clause(self):
d = Datafile('/path/to/dbf', 512, False)
self.assertEqual(' autoextend off', d.autoextend_clause())
d = Datafile('/path/to/dbf', 1024, False, '2M', '20M')
self.assertEqual(' autoextend off', d.autoextend_clause())
d = Datafile('/path/to/dbf', 1024, True)
self.assertEqual(' autoextend on', d.autoextend_clause())
d = Datafile('/path/to/dbf', 512, True, '1M', '20M')
self.assertEqual(' autoextend on next 1M maxsize 20M', d.autoextend_clause())
def test_file_specification_clause(self):
d = Datafile('/path/to/dbf', 512, False)
self.assertEqual('size 512 reuse autoextend off', d.file_specification_clause())
d = Datafile('/path/to/dbf', 1024, True)
self.assertEqual('size 1K reuse autoextend on', d.file_specification_clause())
def test_data_file_clause(self):
d = Datafile('/path/to/dbf', 512, False)
self.assertEqual("'/path/to/dbf' size 512 reuse autoextend off", d.data_file_clause())
def test_as_dict(self):
d = Datafile('/path/to/dbf', 512, False)
self.assertDictEqual({'path': '/path/to/dbf', 'size': '512', 'autoextend': False}, d.asdict())
d = Datafile('/path/to/dbf', 512, True)
self.assertDictEqual({'path': '/path/to/dbf', 'size': '512', 'autoextend': True}, d.asdict())
d = Datafile('/path/to/dbf', 512, True, '1M', '10M')
self.assertDictEqual(
{'path': '/path/to/dbf', 'size': '512', 'autoextend': True, 'nextsize': '1M', 'maxsize': '10M'},
d.asdict())
class TestFileType(unittest.TestCase):
type_big = FileType(True)
type_small = FileType(False)
def test_to_string(self):
self.assertEqual('bigfile', str(self.type_big))
self.assertEqual('smallfile', str(self.type_small))
def test_equals(self):
type_s_too = FileType(False)
self.assertNotEqual(self.type_big, self.type_small)
self.assertEqual(self.type_small, type_s_too)
self.assertNotEqual(self.type_big, 'foo')
def test_is_big(self):
self.assertTrue(self.type_big.is_bigfile())
self.assertFalse(self.type_small.is_bigfile())
class TestContentType(unittest.TestCase):
perm = ContentType('permanent')
undo = ContentType('undo')
temp = ContentType('temp')
def test_to_string(self):
self.assertEqual('permanent', str(self.perm))
self.assertEqual('undo', str(self.undo))
self.assertEqual('temp', str(self.temp))
def test_equals(self):
other_perm = ContentType('permanent')
self.assertEqual(self.perm, other_perm)
self.assertNotEqual(self.perm, self.undo)
self.assertNotEqual(self.perm, self.temp)
self.assertNotEqual(self.perm, 'foo')
def test_create_clause(self):
self.assertEqual('', self.perm.create_clause())
self.assertEqual('undo', self.undo.create_clause())
self.assertEqual('temporary', self.temp.create_clause())
def test_datafile_clause(self):
self.assertEqual('datafile', self.perm.datafile_clause())
self.assertEqual('datafile', self.undo.datafile_clause())
self.assertEqual('tempfile', self.temp.datafile_clause())
if __name__ == '__main__':
unittest.main()
|
# absolute.py
# Lab 3.2
# This program takes in a number and gives its absolute value
# Author: Amanda Murray
number = float (input ("enter a number:"))
absolutevalue = abs (number)
print ('the absolute value of {} is {}'.format(number, absolutevalue)) |
#!/usr/bin/env python
# encoding: utf-8
from peewee import *
from playhouse.pool import PooledMySQLDatabase
import config
mysql_db = PooledMySQLDatabase(config.get('database_db_name'), max_connections=8, stale_timeout=300,
**{'host': config.get('database_host'),
'user': config.get('database_user'),
'password': config.get('database_password'),
'port': config.get('database_port'),
'charset': config.get('database_charset')}
)
class BaseModel(Model):
class Meta:
database = mysql_db
|
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s=set(nums)
# vis=set()
best=0
for i in s:
if i-1 not in nums:
y=i+1
while y in s:
# vis.add(y)
y+=1
best=max(best,y-i)
return best
|
import bs #Created By MythB # http://github.com/MythB
import bsInternal
import bsPowerup
import bsUtils
import random
import os
import MythBAdminList as mbal
class chatOptions(object):
def __init__(self):
self.MythBWasHere = True
def checkDevice(self,nick):# check if in adminlist
client_str = []
for i in bsInternal._getForegroundHostSession().players:#FIXME when player's nick contain lots of emoticon It's break equality!!!
if (i.getName()).encode('utf-8') == nick: # use i.getName(True) <-- if u need fullname
client_str = i.get_account_id()
if client_str in mbal.AdminList:
return True
else:
bsInternal._chatMessage("Commands Only For Admins")
return False
#bs.gameTimer(100,call=self.checkDevice,repeat=True)
def opt(self,nick,msg):
if self.checkDevice(nick):
m = msg.split(' ')[0] # command
a = msg.split(' ', 1)[1:] # arguments
activity = bsInternal._getForegroundHostActivity()
with bs.Context(activity):
if m == '/kick': #just remove from the game
if a == []:
bsInternal._chatMessage("MUST USE KICK ID")
else:
try:
kickedPlayerID = int(a[0])
except Exception:
bsInternal._chatMessage("PLAYER NOT FOUND")
else:
if not kickedPlayerID == -1:
bsInternal._disconnectClient(kickedPlayerID)
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
bsInternal._chatMessage("CANT KICK HOST")
elif m == '/list': #list of current players id
bsInternal._chatMessage("==========PLAYER KICK IDS==========")
for i in bsInternal._getGameRoster():
try:
bsInternal._chatMessage(i['players'][0]['nameFull'] + " kick ID " + str(i['clientID']))
except Exception:
pass
bsInternal._chatMessage("==========PLAYER IDS=============")
for s in bsInternal._getForegroundHostSession().players:
bsInternal._chatMessage(s.getName() +" ID = "+ str(bsInternal._getForegroundHostSession().players.index(s)))
elif m == '/ban':# add id to banlist=autokick list
if a == []:
bsInternal._chatMessage("MUST USE PLAYER ID OR NICK") #also FIX this every time bsInternal ChatMessage thing!! for stop loops "update-FIXED"
else: #firstly try nick if nick len is more then 2 else try as player id FIX ME
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
bannedClient = i.getInputDevice().getClientID()
bannedName = i.getName().encode('utf-8')
bannedPlayerID = i.get_account_id()
foolist = []
foolist = mbal.autoKickList
if bannedPlayerID not in foolist:
foolist.append(bannedPlayerID)
bsInternal._chatMessage(str(bannedName) + " Banned")
i.removeFromGame()
else:
bsInternal._chatMessage(str(bannedName) + " Already Banned")
with open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py") as file:
s = [row for row in file]
s[7] = 'autoKickList = '+ str(foolist) + '\n'
f = open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py",'w')
for i in s:
f.write(i)
f.close()
reload(mbal)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bannedClient = bsInternal._getForegroundHostSession().players[int(a[0])]
except Exception:
bsInternal._chatMessage("PLAYER NOT FOUND")
else:
foolist = []
foolist = mbal.autoKickList
bannedPlayerID = bannedClient.get_account_id()
if bannedPlayerID not in foolist:
foolist.append(bannedPlayerID)
bsInternal._chatMessage(str(bannedClient) + " Banned")
bannedClient.removeFromGame()
else:
bsInternal._chatMessage(str(bannedClient) + " Already Banned")
with open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py") as file:
s = [row for row in file]
s[7] = 'autoKickList = '+ str(foolist) + '\n'
f = open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py",'w')
for i in s:
f.write(i)
f.close()
reload(mbal)
elif m == '/unban':# remove id from banlist=autokick list
if a == []:
bsInternal._chatMessage("MUST USE PLAYER ID OR NICK")
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
bannedClient = i.getInputDevice().getClientID()
bannedName = i.getName().encode('utf-8')
bannedPlayerID = i.get_account_id()
foolist = []
foolist = mbal.autoKickList
if bannedPlayerID in foolist:
foolist.remove(bannedPlayerID)
bsInternal._chatMessage(str(bannedName) + " be free now!")
else:
bsInternal._chatMessage(str(bannedName) + " Already Not Banned")
with open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py") as file:
s = [row for row in file]
s[7] = 'autoKickList = '+ str(foolist) + '\n'
f = open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py",'w')
for i in s:
f.write(i)
f.close()
reload(mbal)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bannedClient = bsInternal._getForegroundHostSession().players[int(a[0])]
except Exception:
bsInternal._chatMessage("PLAYER NOT FOUND")
else:
foolist = []
foolist = mbal.autoKickList
bannedPlayerID = bannedClient.get_account_id()
if bannedPlayerID in foolist:
foolist.remove(bannedPlayerID)
bsInternal._chatMessage(str(bannedClient) + " be free now!")
else:
bsInternal._chatMessage(str(bannedClient) + " Already Not Banned")
with open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py") as file:
s = [row for row in file]
s[7] = 'autoKickList = '+ str(foolist) + '\n'
f = open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py",'w')
for i in s:
f.write(i)
f.close()
reload(mbal)
elif m == '/amnesty': # reset blacklist
foolist = []
bsInternal._chatMessage("==========FREEDOM TO ALL==========")
bsInternal._chatMessage("=========BLACKLİST WIPED=========")
with open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py") as file:
s = [row for row in file]
s[7] = 'autoKickList = '+ str(foolist) + '\n'
f = open(bs.getEnvironment()['systemScriptsDirectory'] + "/MythBAdminList.py",'w')
for i in s:
f.write(i)
f.close()
reload(mbal)
elif m == '/camera': #change camera mode
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
try:
if bs.getSharedObject('globals').cameraMode == 'follow':
bs.getSharedObject('globals').cameraMode = 'rotate'
else:
bs.getSharedObject('globals').cameraMode = 'follow'
except Exception:
bsInternal._chatMessage('AN ERROR OCCURED')
elif m == '/maxplayers': #set maxplayers limit
if a == []:
bsInternal._chatMessage('MUST USE NUMBERS')
else:
try:
bsInternal._getForegroundHostSession()._maxPlayers = int(a[0])
bsInternal._setPublicPartyMaxSize(int(a[0]))
bsInternal._chatMessage('MaxPlayers = '+str(int(a[0])))
except Exception:
bsInternal._chatMessage('AN ERROR OCCURED')
elif m == '/help': #show help
bsInternal._chatMessage("=====================COMMANDS=====================")
bsInternal._chatMessage("list-kick-remove-ban-unban-amnesty-kill-curse-end-heal")
bsInternal._chatMessage("freeze-thaw-headless-shield-punch-maxplayers-headlessall")
bsInternal._chatMessage("killall-freezeall-shieldall-punchall-camera-slow")
elif m == '/remove': #remove from game
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
i.removeFromGame()
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].removeFromGame()
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/curse': #curse
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.curse()
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.curse()
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/curseall': #curse all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.curse()
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/kill': #kill
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.DieMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.DieMessage())
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/killall': #kill all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.DieMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/freeze': #freeze
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.FreezeMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.FreezeMessage())
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/freezeall': #freeze all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.FreezeMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/thaw': #thaw
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.ThawMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.ThawMessage())
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/thawall': #thaw all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.ThawMessage())
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/headless': #headless
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.headModel = None
i.actor.node.style = "cyborg"
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.headModel = None
bs.getActivity().players[int(a[0])].actor.node.style = "cyborg"
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/headlessall': #headless all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.headModel = None
i.actor.node.style = "cyborg"
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/heal': #heal
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'health'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.PowerupMessage(powerupType = 'health'))
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/healall': #heal all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'health'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/shield': #shield
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'shield'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.PowerupMessage(powerupType = 'shield'))
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/shieldall': #shield all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'shield'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/punch': #punch
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'punch'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage(bs.PowerupMessage(powerupType = 'punch'))
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/punchall': #punch all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage(bs.PowerupMessage(powerupType = 'punch'))
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/knock': #knock him
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage("knockout",5000)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage("knockout",5000)
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/knockall': #knock all
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage("knockout",5000)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/celebrate': #celebrate him
if a == []:
bsInternal._chatMessage('MUST USE PLAYER ID OR NICK')
else:
if len(a[0]) > 2:
for i in bs.getActivity().players:
try:
if (i.getName()).encode('utf-8') == (a[0]):
if i.actor.exists():
i.actor.node.handleMessage('celebrate', 30000)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
else:
try:
bs.getActivity().players[int(a[0])].actor.node.handleMessage('celebrate', 30000)
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
except Exception:
bsInternal._chatMessage('PLAYER NOT FOUND')
elif m == '/celebrateall': #celebrate
for i in bs.getActivity().players:
try:
if i.actor.exists():
i.actor.node.handleMessage('celebrate', 30000)
except Exception:
pass
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
elif m == '/slow': # slow-mo
bsInternal._chatMessage(bs.getSpecialChar('logoFlat'))
try:
if bs.getSharedObject('globals').slowMotion == True:
bs.getSharedObject('globals').slowMotion = False
else:
bs.getSharedObject('globals').slowMotion = True
except Exception:
bsInternal._chatMessage('AN ERROR OCCURED')
elif m == '/end': # just end game
try:
bsInternal._getForegroundHostActivity().endGame()
bsInternal._chatMessage('THE END')
except Exception:
bsInternal._chatMessage('AN ERROR OCCURED')
c = chatOptions()
def cmd(msg):
if bsInternal._getForegroundHostActivity() is not None:
n = msg.split(': ')
c.opt(n[0],n[1])
bs.realTimer(5000,bs.Call(bsInternal._setPartyIconAlwaysVisible,True))
import bsUI
bs.realTimer(10000,bs.Call(bsUI.onPartyIconActivate,(0,0)))## THATS THE TRICKY PART check ==> 23858 bsUI / _handleLocalChatMessage
|
print('enter 1 for addition\nenter 2 for multiplication\nenter 3 for subtraction\nenter 4 for division')
c = int(input("please enter number:"))
number1 = int(input('give us a number '))
number2 = int(input('give us second number '))
if c == '1':
print(number1 + number2)
elif c == '2':
print(number1 * number2)
elif c == '3':
print(number1 - number2)
elif c == '4':7
print(number1 / number2)
else:
print('invalid number') |
from history import *
class Video:
def __init__(self,name,uri,duration,desc,uid):
self.__name = name
self.__uri = uri
self.__duration = duration
self.__desc = desc
self.__likes = 0
self.__views = 0
self.__uid = uid
def disp(self):
print(f"name: {self.__name}")
print(f"uri: {self.__uri}")
print(f"duration: {self.__duration}")
print(f"description: {self.__desc}")
print(f"UID: {self.__uid}")
print(f"Likes: {self.__likes}")
print(f"Views: {self.__views}")
def Play(self,history):
history.add(self)
file_name = str(self.__uri) + '.mp4'
def Pause(self):
None
def Resume(self):
None
def Rewind(self):
None
def Forward(self):
None
|
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import imaplib
import smtplib
import email
import os
class harpo:
def __init__(self, email, password, server):
"""
@brief Config of email
@param [email]: (String) User name.\n
@param [password]: (String) Key of email.\n
@param [server]: (String) Server of email.\n
index 0 = SMTP // index 1 = IMAP
"""
self.email = email
self.password = password
self.server = server
def send(self, destiny, subject, context, send_image=False, img=""):
"""
@brief Send email for N destinations
@param [destiny]: (List) User of destiny.\n
@param [subject]: (String) Email subject.\n
@param [context]: (String) Email context.\n
@param [send_image]: (Bool) Send image.\n
send_image = True // send_image = False (default)
@param [img]: (String) Name image in "/home/rpasistel/RPA/".\n
@return False/True
"""
try:
if len(self.server) > 1:
server = self.server[1]
if len(self.server) == 0:
server = self.server[0]
if send_image:
img_data = open(img, 'rb').read()
message = MIMEMultipart()
message['subject'] = subject
message['from'] = self.email
message['to'] = destiny
text = MIMEText(context)
message.attach(text)
image = MIMEImage(img_data, name=os.path.basename(img))
message.attach(image)
if send_image == False:
message = MIMEText(context)
message['subject'] = subject
message['from'] = self.email
message['to'] = destiny
mail = smtplib.SMTP(server, 587)
mail.login(self.email, self.password)
mail.sendmail(self.email, destiny, message.as_string())
mail.quit()
except Exception as error:
return error
else:
return True
def __clean(self, mail_ids, mail):
"""
@brief Internal method, clean inbox after obtained
@param [mail_ids]: (List) Email received.\n
@param [mail]: (Class) Class of type IMAP4_SSL.\n
"""
try:
start = mail_ids[0].decode()
end = mail_ids[-1].decode()
print(end, start, "\n\n\n\n\n")
mail.store(f'{start}:{end}'.encode(), 'FLAGS', '\\Deleted')
mail.close()
mail.logout()
except Exception as error:
print(error)
def receive(self, filters="ALL"):
"""
@brief Receive email of N destinations and delete it.
@param [filters]: (String) Filter by time, from, text, ...
filters = ALL (default)
@return False/True
"""
try:
mail = imaplib.IMAP4(self.server[0])
mail.login(self.email, self.password)
mail.select('inbox')
status, data = mail.search(None, filters.upper())
mail_ids = []
for block in data:
mail_ids += block.split()
for i in mail_ids:
status, data = mail.fetch(i, '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
message = email.message_from_bytes(response_part[1])
mail_from = message['from']
mail_subject = message['subject']
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
if part.get_content_type() == 'text/plain':
mail_content += part.get_payload()
else:
mail_content = message.get_payload()
print(f'From: {mail_from}')
print(f'Subject: {mail_subject}')
print(f'Content: {mail_content}')
except Exception as error:
return error
else:
self.__clean(mail_ids, mail)
return True
|
import json
import warnings
import pipeline
warnings.simplefilter('ignore')
class serializer:
def __init__(self, ques, results, error_text):
self.query = ques
self.results = results
self.errorText = error_text
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class answerSerializer:
def __init__(self, score, answer):
self.score = score
self.answer = answer
class Querying(object):
def __init__(self, ques):
self.domain = "model"
self.ques = ques
self.n = 3
self.basePath = self.domain
self.doc_db = './db_file/' + self.basePath + '.db'
self.retriever_model = './output_model/' + self.basePath + '-tfidf-ngram=2-hash=16777216-tokenizer=corenlp.npz'
self.DrQA = pipeline.DrQA(
cuda=None,
fixed_candidates=None,
reader_model=None,
ranker_config={'options': {'tfidf_path': self.retriever_model}},
db_config={'options': {'db_path': self.doc_db}},
tokenizer='corenlp'
)
def main_function(self, ):
print('Initializing pipeline...')
results = []
try:
context, scores = self.process(self.ques, top_n=self.n, n_docs=self.n)
for answer, score in zip(context, scores):
results.append(answerSerializer(score, answer))
response = serializer(self.ques, results, "")
except Exception as e:
response = serializer(self.ques, results, str(e))
response = response.to_json()
return response
def process(self, question, candidates=None, top_n=1, n_docs=1):
try:
predictions = self.DrQA.process(question, candidates, top_n, n_docs, return_context=True)
except Exception as e:
response = serializer(question, [], str(e))
return response.to_json()
scores = []
context = []
for p in predictions:
scores.append(p['doc_score'])
context.append(p['context']['text'])
return context, scores
def query(ques):
response = Querying(ques).main_function()
return response
|
"""
1.1插入排序
基本思想:通过构建有序序列,对于未排序数据,在已排序序列中从后往前扫描,找到相应位置并插入;
算法步骤:1.将第一待排序序列第一个元素看做一个有序序列,把第二个元素到最后一个元素当成是未排序序列;
2.从头到尾一次扫描未排序序列,将扫描到的每个元素插入有序序列的适当位置。
(如果待插入元素与有序序列中的某个元素相等,则将待插入的元素插入到相等元素的后面《稳定》)。
"""
def insert_sort(lists):
count = len(lists)
for i in range(1, count):
key = lists[i]
j = i - 1
while j >= 0:
if lists[j] > key:
lists[j + 1] = lists[j]
lists[j] = key
j -= 1
return lists
"""
1.2希尔排序
基本思想:先将整个待排序的记录序列分割成为若干子序列分别进行直接插入排序,待整个序列中的记录基本有序时,再对全体记录进行一次直接插入排序;
算法步骤:1.选择一个增量序列t1,t2,...,tk,其中ti>tj,tk=1;
2.按增量序列个数k,对序列进行k趟排序;
3.每趟排序,根据对应的增量ti,将待排序列分割成若干长度为m的子序列,分别对各子序列进行直接插入排序。
仅增量因子为1时,整个序列作为一个表来处理,表长度即为整个序列的长度;
"""
def shell_sort(lists):
count = len(lists)
step = 2
group = count / step
while group > 0:
for i in range(0, group):
j = i + group
while j < count:
k = j - group
key = lists[j]
while k >= 0:
if lists[k] > key:
lists[k + group] = lists[k]
lists[k] = key
k -= group
j += group
group /= step
return lists
"""
2.1直接选择排序
基本思想:
算法步骤:1.首先在未排序序列中找到最小(大)元素,存放到排序序列的起始位置;
2.再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾;
3.重复第二步,直到所有元素均排序完毕;
"""
def select_sort(lists):
count = len(lists)
for i in range(0, count):
min = i
for j in range(i + 1, count):
if lists[min] > lists[j]:
min = j
lists[min], lists[i] = lists[i], lists[min]
return lists
"""
2.2堆排序
基本思想:堆积是一个近似完全二叉树的结构,并同时满足堆积的性质:即子节点的键值或索引总是小于(或大于)它的父节点;
算法步骤:1.创建一个堆H[0...n-1]
2.把堆首(最大值)和堆尾互换;
3.把堆的尺寸缩小1,并调用shift_down(0),目的是把新的数组顶端数据调整到相应位置;
4.重复步骤2,直到堆的尺寸为1;
"""
def adjust_heap(lists, i, size):
lchild = 2 * i + 1
rchild = 2 * i + 2
max = i
if i < size / 2:
if lchild < size and lists[lchild] > lists[max]:
max = lchild
if rchild < size and lists[rchild] > lists[max]:
max = rchild
if max != i:
lists[max], lists[i] = lists[i], lists[max]
adjust_heap(lists, max, size)
def build_heap(lists, size):
for i in range(0, (size/2))[::-1]:
adjust_heap(lists, i, size)
def heap_sort(lists):
size = len(lists)
build_heap(lists, size)
for i in range(0, size)[::-1]:
lists[0], lists[i] = lists[i], lists[0]
adjust_heap(lists, 0, 1)
"""
3.1交换排序--冒泡排序
基本思想:重复地走访过要排序的数列,一次比较两个元素,小元素交换至前面;
算法步骤:1.比较相邻的元素,如果第一个比第二个大,就交换;
2.对每一对相邻元素做同样的工作,从开始第一对到结尾的最后一对;
3.针对所有元素重复以上的步骤;
4.持续每次对越来越少的元素重复以上的步骤,直到没有任何一对数字需要比较;
"""
def bubble_sort(lists):
count = len(lists)
for i in range(0, count):
for j in range(i + 1, count):
if lists[i] > lists[j]:
lists[i], lists[j] = lists[j], lists[i]
return lists
"""
3.2交换排序--快速排序
基本思想:使用分治法策略把一个串行分为两个子串行;
算法步骤:1.从数列中挑出一个元素,称为基准;
2.重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准后面;在这个分区退出之后,该基准就处于数列中间,这称为分区操作;
3.递归地把小于基准值元素的子数列和大于基准值元素的字数列排序;
"""
def quick_sort(lists, left, right):
if left >= right:
return lists
key = lists[left]
low = left
high = right
while left < right:
while left < right and lists[right] >= key:
right -= 1
lists[left] = lists[right]
while left < right and lists[left] <= key:
left += 1
lists[right] = lists[left]
lists[right] = key
quick_sort(lists, low, left - 1)
quick_sort(lists, left + 1, high)
return lists
pass
#另一种写法
def QuickSort(myList, start, end):
#判断是否小于high,如果为false,直接返回
if start < end:
i, j = start, end
#设置基准数
base = myList[i]
while i < j:
#如果列表后边的数,比基准大或相等,则前移一位直到有比基准小的数出现
while (i < j) and (myList[j] >= base):
j = j - 1
#如找到,则把第j个元素赋值给第i个元素,此时表中i,j个元素相等
myList[i] = myList[j]
while (i < j) and (myList[i] <= base):
i = i + 1
myList[j] = myList[i]
#做完第一轮比较后,列表被分成了两个半区,并且i=j,需要设置回base
myList[i] = base
#递归前后半区
QuickSort(myList, start, i - 1)
QuickSort(myList, j + 1, end)
return myList
"""
4.1归并排序
算法步骤:1.申请空间,使其大小为两个已经排序序列之和,该空间用来存放合并后的序列;
2.设定两个指针,最初位置分别为两个已经排序序列的起始位置;
3.比较两个指针所指向的元素,选择相对小的元素放入到合并空间,并移动指针到下一位置;
4.重复步骤3直到某一指针达到序列尾;
5.将另一序列剩下的所有元素直接复制到合并序列尾;
"""
def merge(left, right):
i, j = 0, 0
result = []
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
def merge_sort(lists):
if len(lists) <= 1:
return lists
num = len(lists) / 2
left = merge_sort(lists[:num])
right = merge_sort(lists[num:])
return merge(left, right)
"""
5.1基数排序
基本思想:属于分配式排序,又称桶子法bucker sort或bin sort,是通过键值的部分资讯,将要排序的元素分配至某些桶中,藉以达到排序的作用;
"""
import math
def radix_sort(lists, radix = 10):
k = int(math.ceil(math.log(max(lists), radix)))
bucket = [[] for i in range(radix)]
for i in range(1, k + 1):
for j in lists:
bucket[j/(radix**(i-1)) % (radix**i)].append(j)
del lists[:]
for z in bucket:
lists += z
del z[:]
return lists
|
import threading
local_school = threading.local()
def process_student():
std = local_school.student
print('Hello, %s (in %s)' % (std,threading.current_thread().name))
def process_thread(name:str):
local_school.student = name
process_student()
t1 = threading.Thread(target=process_thread,args=("Peter",),name="Thread-A")
t2 = threading.Thread(target=process_thread,args=("Raymond",),name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
#如果需要在线程中使用到局部变量,可以使用local来保存
#可以将threading.local看成一个字典: key = 进程名 value = 局部变量 |
import json
import requests
from idgenie_django.models import IDGenieSession
from django.contrib.auth import logout
from django.http.response import JsonResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render
from rest_framework.decorators import api_view
from django.conf import settings
relying_party_endpoint_push_mfa = settings.ID_GENIE_SESSION_ENDPOINT_MFA_PUSH
def id_genie_hello(request):
return render(request, 'id-genie.html', {})
def id_genie_send_mfa(request):
if session_id := request.POST.get('session_id', None):
res = requests.post(relying_party_endpoint_push_mfa, data={'session_id': session_id})
return JsonResponse({'message': 'sent'})
def validate_id_genie_session(request):
session = request.POST.get('session', None)
res = requests.post(settings.ID_GENIE_VALIDATION_URL, data={'session': session})
valid = json.loads(res.text).get('valid', None)
if valid == True:
request.session["IDGenieAuthenticated"] = True
session = IDGenieSession.objects.get(code=session)
session.is_valid = True
session.save()
return JsonResponse({'valid': valid})
def id_genie_status(request):
obj = None
if code := request.POST.get('code', None):
try:
obj = IDGenieSession.objects.get(code=code)
except:
pass
if obj:
if is_valid := obj.is_valid:
request.session['IDGenieAuthenticated'] = True
obj.delete()
return JsonResponse({'valid': is_valid})
return JsonResponse({'error': 'InvalidSessionID'})
def cancel_session(request):
code = request.POST.get('code', '')
IDGenieSession.objects.filter(code=code).delete()
logout(request)
return JsonResponse({'message': 'OK'})
|
num = int(input())
num_list = list(map(int,input().split(' ')))
max = num_list[0]
min = num_list[0]
for i in num_list:
if(max < i):
max = i
if(min > i):
min = i
print(min, max)
|
import copy, glob
def func(filename):
s=set()
with open(filename) as fd:
next(fd)
for l in fd:
line = l.strip()
s.add(line)
s2 = set()
for l in s:
r = l.split()[1]+" "+l.split()[0]
if (r not in s2) and (l not in s2):
s2.add(l)
print(len(s), len(s2))
with open(filename, "w+") as fd:
fd.write("200\n")
for l in s2:
fd.write(l+"\n")
for source_file in glob.glob("[0-9]*.txt"):
func(source_file)
|
#temp&RH
import time
from micropython import const
import board
import busio
import adafruit_si7021
import csv
i2c_port = busio.I2C(board.SCL, board.SDA)
_USER1_VAL = const(0x3A)
sensor = adafruit_si7021.SI7021(i2c_port)
print('Temperature: {} degrees C'.format(sensor.temperature))
print('Humidity: {}%'.format(sensor.relative_humidity))
temp = []
humid = []
endtime = time.time() + 21
while True:
temp.append(sensor.temperature)
humid.append(sensor.relative_humidity)
if time.time() > endtime:
break
time.sleep(2)
with open('temprh.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(temp)
writer.writerow(humid)
print("Done, new file") |
import pandas as pd
import numpy as np
from tools import make_distr
distrib = dict()
containers = pd.read_csv('DS_1.csv', header=0, sep=';')
containers['val'] = (containers['Container type'])**2 - 7/2*(containers['Container type']) + 7/2
distrib = make_distr(containers, 40000, 20, 1, 1, 1, 1, 1, 5000) #containers, Wp, L, bias, lambd_val, lambd_weight, number_of_exp, num_res_with_min_energy, num_reads |
from django.urls import path
from . import views
urlpatterns = [
path(r'', views.index,name='index'), #Acá redirigirá al index.html
path(r'base_layout',views.base_layout,name='base_layout'), #Y acá al base.html
] |
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
graph = collections.defaultdict(set)
weights = dict()
visited = set()
def bfs(start, end):
if (start, end) in weights:
return weights[(start, end)]
if start not in graph or end not in graph:
return -1.0
if start == end:
return 1.0
visited.add(start)
queue = collections.deque()
for n in graph[start]:
queue.append((n, weights[(start, n)]))
while queue:
n, w = queue.popleft()
if n == end:
return w
for neig in graph[n]:
if neig not in visited:
visited.add(neig)
weights[(start, neig)] = w * weights[(n, neig)]
queue.append((neig, weights[(start, neig)]))
return -1.0
for g, v in zip(equations, values):
graph[g[0]].add(g[1])
graph[g[1]].add(g[0])
weights[(g[0], g[1])] = v
weights[(g[1], g[0])] = 1.0 / v
res = list()
for q in queries:
visited.clear()
res.append(bfs(q[0], q[1]))
return res
|
import matplotlib.pyplot as plt
import numpy as np
# 绘制直方图、饼图、箱线图
plt.rcParams['font.sans-serif'] = "SimHei"
plt.rcParams['axes.unicode_minus'] = False
data = np.load('../data/国民经济核算季度数据.npz')
name = data['columns']
values = data['values']
label = ['第一产业', '第二产业', '第三产业']
plt.figure(figsize=(6, 5))
# 绘制直方图
plt.bar(range(3), values[-1, 3:6], width=0.5)
plt.xlabel('产业')
plt.ylabel('生产总值(亿元)')
plt.xticks(range(3), label)
plt.title('2017年第一季度各产业国民生产总值直方图')
plt.savefig('../tmp/直方图.png')
plt.show()
# 绘制饼图
plt.figure(figsize=(6, 6))
label = ['第一产业', '第二产业', '第三产业']
# 设置各个数据的半径
explode = [0.01, 0.01, 0.01]
# 绘制饼图
plt.pie(values[-1, 3:6], explode=explode, labels=label, autopct='%1.1f%%')
plt.title('2017年第一季度个产业国民生产总值饼图')
plt.savefig('../tmp/生产总值饼图.png')
plt.show()
# 绘制箱线图
plt.figure(figsize=(6, 4))
label = ['第一产业', '第二产业', '第三产业']
gdp = (list(values[:, 3]), list(values[:, 4]), list(values[:, 5]))
plt.boxplot(gdp, notch=True, labels=label, meanline=True, sym='o')
plt.title('2017年第一季度各产业国民生产总值箱线图')
plt.savefig('../tmp/生产总值箱线图.png')
plt.show()
|
from django.urls import path, re_path
from . import views
app_name = "newsletters"
urlpatterns = [
path("subscription/", views.SubscriptionCreateView.as_view(), name="subscription"),
re_path(
r"^subscription/confirm/(?P<key>[-:\w]+)/$",
views.SubscriptionConfirmView.as_view(),
name="subscription-confirm",
),
]
|
from django.urls import path
from doctors.views import DoctorView, ListDoctorsView
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Doctor Store APIs",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="edwardman917@gmail.com"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
authentication_classes=()
)
urlpatterns = [
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui')
]
urlpatterns += [
path('doctor/<uuid:doctor_id>', DoctorView.as_view(), name='get_doctor'),
path('doctor', ListDoctorsView.as_view(), name='list_doctors')
]
|
from PIL import Image
import glob
for file in glob.glob("*.png"):
img = Image.open(file)
rgb_im = img.convert('RGB')
rgb_im.save(file.replace("png", "bmp"), quality=95)
|
import plistlib, sys, os
parameters = dict(
hipay=dict(
username = os.environ.get('HIPAY_FULLSERVICE_API_USERNAME', 'xxxxxx'),
password = os.environ.get('HIPAY_FULLSERVICE_API_PASSWORD', 'xxxxxx')
),
hockeyapp=dict(
app_identifier = os.environ.get('HOCKEY_APP_IDENTIFIER', 'xxxxxx'),
)
)
filename = "Example/HiPayFullservice/Resources/Parameters/parameters.plist"
path = "../" + filename
# Merge with current parameters
if os.path.isfile(path):
currentParameters = plistlib.readPlist(path)
parameters["hipay"].update(currentParameters["hipay"])
parameters["hockeyapp"].update(currentParameters["hockeyapp"])
# Save file
plistlib.writePlist(parameters, path)
sys.stdout.write("\n\nA new parameters file was created at:\n")
sys.stdout.write(filename + "\n")
sys.stdout.write("You need add your HiPay parameters in it.\n\n\n")
|
import logging
from app.config_common import *
DEBUG = False
LOG_LEVEL = logging.ERROR
LOG_MAXBYTES = 1000000
LOG_BACKUPS = 30
|
#--------------------------------------------------
# limits.py
# this file serves to hold the important limits
# that are used in the project
# introduced on R2 (pyFlask)
#--------------------------------------------------
################################################################
# Essential Limits (to be used for each pyFlask deployment)
################################################################
MAX_USERNAME_SIZE = 40
MIN_USERNAME_SIZE = 4
MAX_EMAIL_SIZE = 20
MAX_PASSWORD_SIZE = 80 #sha256 outputs 80 char
MIN_PASSWORD_SIZE = 5
MAX_NAME_SIZE = 50 #Legal Name
MAX_CONFIG_NAME_SIZE = 50
MAX_CONFIG_VALU_SIZE = 50
MAX_DESCRIPTION_SIZE = 250
MAX_UUPARAM_SIZE = 100
MAX_IPADDR_SIZE = 16
MAX_FILENAME_SIZE = 30
LOGS_MAX_BYTES = 100000
TOKEN_LENGTH = 10
MAX_MQTT_TOPIC_SIZE=100
MAX_MQTT_MSGCT_SIZE=1000
DEF_MQTT_MSGST_DURA=86400 # default store duration
################################################################
|
class Solution:
def isInterleave(self,s1,s2,s3):
if len(s1)+len(s2) != len(s3):
return False
else:
return self.helper(s1,s2,s3)
def helper(self,s1,s2,s3):
if s1 and s2:
if s1[0] == s3[0] and s2[0] == s3[0]:
return self.helper(s1[1:],s2,s3[1:]) or self.helper(s1,s2[1:],s3[1:])
elif s1[0] == s3[0]:
return self.helper(s1[1:],s2,s3[1:])
elif s2[0] == s3[0]:
return self.helper(s1,s2[1:],s3[1:])
else:
return False
elif s1 and not s2:
if s1[0] == s3[0]:
return self.helper(s1[1:],s2,s3[1:])
else:
return False
elif s2 and not s1:
if s2[0] == s3[0]:
return self.helper(s1,s2[1:],s3[1:])
else:
return False
else:
return True
S = Solution()
print S.isInterleave('aabcc','dbbca','aadbbcbcac')
print S.isInterleave('aabcc','dbbca','aadbbbaccc')
|
u, v = list(map(int, input().split()))
arr = [v-1, 1]
for i in range(0, v): # length of array
if sum(arr) == v:
base = val[0]
for j in range(1, len(arr)):
base ^= arr[j]
if base == u:
print(len(arr), " ".join(arr))
break
arr[0] -= 1
arr[1] += 1
|
from neopixel_helpers import cycle, bounce, fade_in_out, np_setup, COLORS
np = np_setup()
for i, color in enumerate(COLORS):
cycle(np, color)
bounce(np, COLORS[i-1])
fade_in_out(np, COLORS[i-2])
|
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import numpy as np
from func_utils import *
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
keras.backend.set_session(session)
if __name__ == "__main__":
#data processing
prefix = "Thesis Dataset"
datasets = ['MSL'] # SMAP,MSL,SMD (machine-1-1,....)
#use window = np.arange(10,110,10) to study the effect of different window size
window = [100]
#alpha is the loss weight assigned to the forecasting loss, and 1-alpha is assigned to the reconstruction loss
alphas = [0.0, 0.25, 0.5, 0.75, 1.0]
for d, dataset in enumerate(datasets):
for i, window_length in enumerate(window):
for j, alpha in enumerate(alphas):
tf.reset_default_graph()
print(10*"="+"Training Dataset "+str(dataset)+10*"=")
print(10*"="+"Training Window "+str(window_length)+10*"=")
print(10*"="+"Loss weight alpha "+str([(1-alpha), alpha])+10*"=")
#pre-processing the dataset
x_train, y_train_, x_test, y_test_, x_train_labels, x_test_labels = preprocess(dataset, window_length)
#take the last x, i.e. xt as the target for the forecasting model in y_train as well as in y_test
y_train = x_train[:,-1,:]
y_test = x_test[:,-1,:]
#create folder to store the results
datasets = dataset+"_"+str(window_length)
#Hyperparameters
input_dim = x_train.shape[-1] #
timesteps = x_train.shape[1] #
batch_size = 1
intermediate_dim = 100
latent_dim = 3
epochs = 30
loss_weights = [(1-alpha), alpha]
#creating model
model, vae, enc, dec = create_lstm_vae_multitask_variance(input_dim,
timesteps=timesteps,
batch_size=batch_size,
intermediate_dim=intermediate_dim,
latent_dim=latent_dim,
epsilon_std=1.,
loss_weight=loss_weights,
dropout = 0.3)
#additional attribute on saved file
file_att=str(loss_weights)
#start training model and save model after training is done
train_model(x_train, y_train, model, epochs, datasets, save_model=True,
load_model=False,
file_att=file_att)
#computing reconstruction probability,reconstruction error and prediction error
x_score, x_test_score, mae_train, mae_test, mse_train, mse_test = get_score(x_train, y_train, x_test, y_test,
model, enc, dec, datasets, window_length,
file_att)
#rec is an indicator passed to the get_eval_param so that the function knows which mode is actually on.
#rec=True if the model only learns from reconstruction model
#rec=None if the model only learns from the forecasting model
#rec=False if the model learns from both models
if alpha == 0:
rec = True
elif alpha == 1:
rec = None
else:
rec = False
#preparing train_score, test_score and test_labels
train_score, test_score, test_labels = get_eval_param(x_score, x_test_score, x_test_labels, mae_train, mae_test,
window_length,
datasets,
alpha,
file_att=file_att,
save_res=True,
rec_only=rec)
#lvl is the assumption of extreme quantile of the anomalies in the dataset.
if dataset=='MSL':
lvl = np.arange(0.001,0.1,0.001)
elif dataset=='SMAP':
lvl = np.arange(0.01,0.1,0.01)
else:
lvl = np.arange(0.0005, 0.1, 0.0005)
#grid search to find the best extreme quantile assumption
f1_max = 0
lvl_max = 0
for i, level in enumerate(lvl):
#evaluating the model
_,_,result = test_model(train_score, test_score, test_labels, window_length, datasets, level=level, file_att=file_att,
save_res=False)
#store the current max f1-score along with the respective lvl
if(result['pot-f1'])>f1_max:
f1_max = result['pot-f1']
lvl_max = level
#get the final results using the best extreme quantile assumption
result = test_model(train_score, test_score, test_labels, window_length,datasets, level=lvl_max, file_att=file_att,
save_res=True)
|
import requests
import matplotlib.pyplot as plt
import networkx as nx
from time import sleep
print('This program is designed to build a graph of the friendship relations between users of social network VK.\n'
'ATTENTION!!! If you choose a depth of search greater than 1, be ready to face a lack of RAM.\n'
'If you have a very fast computer, please, input nonzero delay. Otherwise antiDoS system may reject you.\n')
def get_friends_dict(user_id=1):
return requests.get(HOST + 'friends.get', params={'user_id': user_id, 'fields': 'first_name', 'v': VERSION}).json()
def add_account_to_graph(graph, id):
try:
for friend in get_friends_dict(id)['response']['items']:
graph.add_edge(id, friend['id'])
except KeyError:
pass
def graph_builder(graph, id, depth=1, delay=float(0), inscription='', enrich_inscription=True):
add_account_to_graph(graph, id)
sleep(delay)
if enrich_inscription:
inscription = str(len(graph.nodes()))
if depth == 1:
return
iterator = 1
for friend_id in graph.nodes():
iteration = str(iterator) + '/' + inscription
print('Iteration:', iteration)
graph_builder(graph, friend_id, depth - 1, delay, iteration, False)
iterator += 1
HOST = 'https://api.vk.com/method/'
VERSION = '5.62'
print('Please, input ID of some VK user:')
my_id = int(input())
print('Please, input a depth of search:')
my_depth = int(input())
if my_depth < 1:
my_depth = 1
print('Invalid value. The depth was changed to 1.')
print('Please, input a time delay in seconds:')
my_delay = float(input())
my_graph = nx.Graph()
graph_builder(my_graph, my_id, my_depth, my_delay)
print('Calculating space configuration of vertexes…')
positions = nx.spring_layout(my_graph)
edges = [element for element in my_graph.edges(data=True)]
print('Drawing a graph…')
nx.draw_networkx_nodes(my_graph, positions, node_shape='s', node_size=500, node_color='y')
nx.draw_networkx_labels(my_graph, positions, font_size=4, font_family='sans-serif', font_color='r')
nx.draw_networkx_edges(my_graph, positions, edgelist=edges, width=1)
plt.axis('off')
plt.savefig('friends_graph.png')
plt.show()
|
from car import Car
class UberX(Car):
"""
UberX Class
"""
brand = str
model = str
def __init__(self,license,driver,brand,model):
super(UberX,self).__init__(license,driver)
#super.__init__(license,driver)
""" si ocurre el error descriptor '__init__' requires a 'super' object but received a 'str'
enrtonces usar super().__init__(license,driver) """
self.brand = brand
self.model = model
|
import pandas as pd
from preprocessing_util import *
def processing_for_both(data):
# fill blank entries in the following columns
nan_columns = ["Age", "SibSp", "Parch"]
data = nan_padding(data, nan_columns)
# drop columns
not_concerned_columns = ["PassengerId","Name", "Ticket", "Fare", "Cabin", "Embarked"]
data = drop_not_concerned(data, not_concerned_columns)
# encode class to onehot
class_columns = ["Pclass"]
data = class_to_onehot(data, class_columns)
# encode sex as 1 and 0
data = sex_to_int(data)
# scale age
data = normalize_age(data)
return data
def preprocess_training_set(train_path):
data = pd.read_csv(train_path)
data = processing_for_both(data)
# split validation set from training set
train_x, train_y, valid_x, valid_y = split_valid_test_data(data)
return train_x, train_y, valid_x, valid_y
def preprocess_test_set(test_path):
data = pd.read_csv(test_path)
PassengerId = data["PassengerId"]
data = processing_for_both(data)
return data, PassengerId
# def preprocess(train_path, test_path):
# # read files
# train_data = pd.read_csv(train_path)
# test_data = pd.read_csv(test_path)
# # fill blank entries in the following columns
# nan_columns = ["Age", "SibSp", "Parch"]
# train_data = nan_padding(train_data, nan_columns)
# test_data = nan_padding(test_data, nan_columns)
# test_passenger_id = test_data["PassengerId"]
# # drop columns
# not_concerned_columns = ["PassengerId","Name", "Ticket", "Fare", "Cabin", "Embarked"]
# train_data = drop_not_concerned(train_data, not_concerned_columns)
# test_data = drop_not_concerned(test_data, not_concerned_columns)
# # encode class to onehot
# class_columns = ["Pclass"]
# train_data = class_to_onehot(train_data, class_columns)
# test_data = class_to_onehot(test_data, class_columns)
# # encode sex as 1 and 0
# train_data = sex_to_int(train_data)
# test_data = sex_to_int(test_data)
# # scale age
# train_data = normalize_age(train_data)
# test_data = normalize_age(test_data)
# # split validation set from training set
# train_x, train_y, valid_x, valid_y = split_valid_test_data(train_data)
# return train_x, train_y, valid_x, valid_y, test_data, test_passenger_id
|
import albumentations as albu
from albumentations.pytorch import ToTensor
import numpy as np
import cv2
import torch
from .transforms import AlbuRandomErasing, ResizeWithKp, MultiScale
def get_training_albumentations(size=(256, 256), pad=10, re_prob=0.5, with_keypoints=False, ms_prob=0.5):
h, w = size
train_transform = [
MultiScale(p=ms_prob),
ResizeWithKp(h, w, interpolation=cv2.INTER_CUBIC),
albu.PadIfNeeded(h + 2 * pad, w + 2 * pad, border_mode=cv2.BORDER_CONSTANT, value=0),
albu.RandomCrop(height=h, width=w, always_apply=True),
AlbuRandomErasing(re_prob),
]
if with_keypoints:
return albu.Compose(train_transform, keypoint_params=albu.KeypointParams(format='xy', remove_invisible=False))
else:
return albu.Compose(train_transform)
def get_validation_augmentations(size=(256, 256), with_keypoints=False):
h, w = size
test_transform = [
ResizeWithKp(h, w),
]
if with_keypoints:
return albu.Compose(test_transform, keypoint_params=albu.KeypointParams(format='xy', remove_invisible=False))
else:
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
x = np.transpose(x, [2, 0, 1])
return torch.tensor(x)
def get_preprocessing(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
_transform = [
albu.Normalize(mean, std),
albu.Lambda(image=to_tensor, mask=to_tensor)
]
return albu.Compose(_transform)
|
# Animation clip demonstrating solution of points when they're near parallel and orthogonal
# FILEID: H4
from manimlib.imports import *
import numpy as np
from decimal import Decimal
class Scene1(LinearTransformationScene):
CONFIG = {
"include_background_plane": True,
"include_foreground_plane": False,
"show_coordinates": False,
"show_basis_vectors": False,
}
def construct(self):
self.setup()
self.text = []
text = self.text
slopeval = [1, 1.1, 11]
constantval = str(0.5)
eqcolors = [
[MAROON_E, YELLOW_E],
[MAROON_C, YELLOW_C],
[MAROON_B, YELLOW_B],
]
for i in range(3): #0 - 5
slope = "{m_" + str(i+1) + "}"
constant = "{c_" + str(i+1) + "}"
text.append(TexMobject("{y}", "{ = }", slope, "{x}", "{ + }", constant,
tex_to_color_map = {slope: eqcolors[i][0], constant: eqcolors[i][1]}))
text.append(TexMobject("{y}", "{ = }", str(slopeval[i]), "{x}", "{ + }", constantval,
tex_to_color_map = {str(slopeval[i]): eqcolors[i][0], constantval: eqcolors[i][1]}))
text[2*i].add_background_rectangle()
text[2*i+1].add_background_rectangle()
text[2*i].to_edge(UP)
text[2*i+1].to_edge(UP)
[text.append([TexMobject("{m_" + str(i+1)+ "}", "{=}", tex_to_color_map = {"{m_" + str(i+1)+ "}": eqcolors[i][0]}),
TexMobject("{c_" + str(i+1)+ "}", "{=}", tex_to_color_map = {"{c_" + str(i+1)+ "}": eqcolors[i][1]})])
for i in range(3)] #6[], 7[], 8[]
[[text[i][j].scale(0.8) for i in range(6,9)] for j in range(2)]
text[6][0].shift(6.5*LEFT+3.5*UP)
text[6][1].next_to(text[6][0], RIGHT, 0.8*LARGE_BUFF)
text[7][0].next_to(text[6][0], DOWN, MED_SMALL_BUFF)
text[7][1].next_to(text[7][0], RIGHT, 0.8*LARGE_BUFF)
text[8][0].next_to(text[6][0], DOWN, MED_SMALL_BUFF)
text[8][1].next_to(text[7][0], RIGHT, 0.8*LARGE_BUFF)
text.append(TexMobject(r"\text{Freeze the first line and vary}").scale(0.7)) #9
text.append(TextMobject(r"{the y-intercept (c) of the other line}").scale(0.7)) #10
text[9].to_edge(DOWN)
text[9].shift(MED_SMALL_BUFF*UP)
text[10].next_to(text[9], DOWN, SMALL_BUFF)
text[9].add_background_rectangle()
text[10].add_background_rectangle()
m1 = TexMobject("{1}", color = LIGHT_GREY).scale(0.75)
c1 = TexMobject("{0.5}", color = LIGHT_GREY).scale(0.75)
m1.next_to(text[6][0], RIGHT, SMALL_BUFF)
c1.next_to(text[6][1], RIGHT, SMALL_BUFF)
m2 = TexMobject("{1.1}").scale(0.75)
c2 = TexMobject("{0.5}").scale(0.75)
m2.next_to(text[7][0], RIGHT, SMALL_BUFF)
c2.next_to(text[7][1], RIGHT, SMALL_BUFF)
m3 = TexMobject("{11}").scale(0.75)
c3 = TexMobject("{0.5}").scale(0.75)
m3.next_to(text[7][0], RIGHT, SMALL_BUFF)
c3.next_to(text[7][1], RIGHT, SMALL_BUFF)
line1 = Line(start = 7*LEFT+ 6.5*DOWN, end = 7*RIGHT + 7.5*UP, stroke_width = 3, color = BLUE_E)
line2 = Line(start = 7*LEFT+ 7.2*DOWN, end = 7*RIGHT + 8.2*UP, stroke_width = 3, color = BLUE_B)
line3 = Line(start = 0.682*LEFT+ 7*DOWN, end = 0.591*RIGHT + 7*UP, stroke_width = 3, color = BLUE_B)
solution = Dot(color = RED_C, radius = 0.07).shift(0.5*UP)
self.play(ShowCreation(text[0]))
self.wait(1)
self.play(Transform(text[0], text[1]))
self.wait(1)
self.play(ShowCreation(line1))
self.wait(0.5)
self.play(FadeOut(text[0]) ,FadeIn(VGroup(text[6][0],text[6][1], m1, c1)), run_time = 1.5)
self.wait(1)
self.play(ShowCreation(text[2]))
self.wait(1)
self.play(Transform(text[2], text[3]))
self.wait(1)
self.play(ShowCreation(line2))
self.wait(0.5)
self.play(FadeOut(text[2]) ,FadeIn(VGroup(text[7][0],text[7][1], m2, c2)), run_time = 1.5)
self.play(FadeIn(solution))
self.wait(1)
self.play(ShowCreation(text[9]))
self.play(ShowCreation(text[10]))
self.wait(2.5)
self.play(FadeOut(VGroup(*text[9:11])), run_time = 0.5)
self.wait(2)
# np.set_printoptions(precision=2)
for c in np.arange(0.1,0.6, 0.1):
c21 = TexMobject(str(round(c,2)+0.5)).scale(0.75)
c21.shift(c2.get_center())
self.play(Transform(c2, c21), ApplyMethod(line2.shift, 0.1*UP),
ApplyMethod(solution.shift, 1*LEFT+1*DOWN),
run_time = 0.5)
# self.wait()
for c in np.arange(0.1,1.0, 0.1):
c21 = TexMobject(str(round(1.0-c,3))).scale(0.75)
c21.shift(c2.get_center())
self.play(Transform(c2, c21), ApplyMethod(line2.shift, 0.1*DOWN),
ApplyMethod(solution.shift, 1*UP+1*RIGHT),
run_time = 0.5)
self.wait(2.5)
self.play(FadeOut(VGroup(text[7][0],text[7][1], m2, c2, line2, solution)))
solution = SmallDot(color = PINK, radius = 0.05).move_to(0.5*UP)
self.play(ShowCreation(text[4]))
self.wait(1)
self.play(Transform(text[4], text[5]))
self.wait(1)
self.play(ShowCreation(line3))
self.wait(0.5)
self.play(FadeOut(text[4]) , FadeIn(VGroup(text[8][0],text[8][1], m3, c3)), run_time = 1.5)
self.play(FadeIn(solution))
self.wait(1)
for c in np.arange(0.1,0.6, 0.1):
c31 = TexMobject(str(round(c,2)+0.5)).scale(0.75)
c31.shift(c3.get_center())
self.play(Transform(c3, c31), ApplyMethod(line3.shift, 0.1*UP),
ApplyMethod(solution.shift, 0.01*LEFT+0.01*DOWN),
run_time = 0.5)
# self.wait()
for c in np.arange(0.1,1.0, 0.1):
c31 = TexMobject(str(round(1.0-c,3))).scale(0.75)
c31.shift(c3.get_center())
self.play(Transform(c3, c31), ApplyMethod(line3.shift, 0.1*DOWN),
ApplyMethod(solution.shift, 0.01*UP+0.01*RIGHT),
run_time = 0.5)
self.wait(2.5)
self.play(FadeOut(VGroup(text[7][0],text[7][1], m2, c2, line3, solution)))
self.wait(2)
|
two = 2
print(f"""Es gibt {two:b} Arten von Leuten. Diejenigen die Binärzahlen verstehen.
Und die anderen.""")
zahl = 255
print(f"255 hexadezimal {zahl:x}")
e = 2.718281828459045
print(f"Eulersche Zahl: {e:5.2f}")
vorname = "Heidi"
print(f"Liebe {vorname}")
|
from django.templatetags.static import static
from django.utils.html import format_html
from wagtail.core import hooks
@hooks.register("insert_global_admin_js", order=100)
def global_admin_js():
return format_html(
'<script src="{}"></script>',
static("app/js/prototype.js")
) |
# if you need pydot - install using below command
# pip install pydot
# install graphviz 'graphviz-2.38.msi' and set add install directory to path
# add 'C:\Graphviz2.38\bin' to 'Path' environment (system variables), might need admin privileges
import pandas as pd
from sklearn import tree
import pydot
import io
import os
# ----------------------------
# Data collection
os.chdir('e:/titanic')
titanic_train = pd.read_csv("train.csv")
print(type(titanic_train))
# ----------------------------
#explore the dataframe (EDA)
titanic_train.shape
titanic_train.info()
# F E -----------------------------
X_train = titanic_train[['Pclass', 'SibSp']]
y_train = titanic_train['Survived']
# -----------------------------
# MB phase
# 1 model object
tree_model1 = tree.DecisionTreeClassifier(criterion='entropy')
# 2 learning process
tree_model1.fit(X_train, y_train)
tree_model1.n_classes_ # classes in survived column
tree_model1.n_features_ # no of features used for MB
# 3 predict
titanic_test = pd.read_csv('test.csv')
titanic_test.shape
titanic_test.info()
X_test = titanic_test[['Pclass','SibSp']]
titanic_test['Survived'] = tree_model1.predict(X_test)
titanic_test.to_csv('submission5.csv', columns=['PassengerId','Survived'],index=False)
# ---------------------------------------------
#visualize the decision tree
dot_data = io.StringIO()
tree.export_graphviz(tree_model1, out_file = dot_data, feature_names = X_train.columns, filled=True, rounded=True, special_characters=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]
graph.write_pdf("decision-tree21.pdf")
|
#!/usr/bin/env python
"""===========
%(PROG)s
===========
-------------------------------------------------------
compute linear regression of two user-specified fields
-------------------------------------------------------
:Author: skipm@trdlnk.com
:Date: 2014-01-13
:Copyright: TradeLink LLC 2014
:Version: 0.1
:Manual section: 1
:Manual group: data filters
SYNOPSIS
========
%(PROG)s [ -c ] [ -f x,y ] [ -s sep ] [ -o col ]
OPTIONS
=======
-f x,y use columns x and y as inputs to regression.
-s sep use sep as the field separator (default is comma)
-o col write to column col - if not given, just append to output
-c only print correlation coefficient to stdout, no regression data
DESCRIPTION
===========
Data are read from stdin, the regression is computed, the the input is
written to stdout with the new field. Details about the regression
results are written to stderr (unless -c is given).
SEE ALSO
========
* take
* mpl
* avg
* sigavg
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import getopt
import os
import csv
import scipy.stats
from six.moves import zip
PROG = os.path.basename(sys.argv[0])
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "f:s:ho:c")
except getopt.GetoptError:
usage()
return 1
corr = False
sep = ","
field1 = 0
field2 = 1
field3 = 2
for opt, arg in opts:
if opt == "-f":
try:
field1, field2 = [int(x) for x in arg.split(",")]
except ValueError:
# Must have given names
field1, field2 = arg.split(",")
elif opt == "-o":
try:
field3 = int(arg)
except ValueError:
# Must have given names
field3 = arg
elif opt == "-c":
corr = True
elif opt == "-s":
sep = arg
elif opt == "-h":
usage()
return 0
writer_type = type(csv.writer(sys.stdout))
if isinstance(field1, int):
reader = csv.reader(sys.stdin, delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
else:
reader = csv.DictReader(sys.stdin, delimiter=sep)
names = reader.fieldnames[:]
names.append(str(field3))
writer = csv.DictWriter(sys.stdout, fieldnames=names, delimiter=sep)
x = []
y = []
rows = list(reader)
for row in rows:
if row[field1] and row[field2]:
row[field1] = float(row[field1])
row[field2] = float(row[field2])
x.append(row[field1])
y.append(row[field2])
(slope, intercept, r, p, stderr) = scipy.stats.linregress(x, y)
if corr:
print(r)
return 0
print("slope:", slope, "intercept:", intercept, file=sys.stderr)
print("corr coeff:", r, "p:", p, "err:", stderr, file=sys.stderr)
writer.writeheader()
for row in rows:
if row[field1]:
val = slope * float(row[field1]) + intercept
if isinstance(writer, writer_type):
row.append(val)
else:
row[field3] = val
writer.writerow(row)
return 0
def usage():
print(__doc__ % globals(), file=sys.stderr)
if __name__ == "__main__":
sys.exit(main())
|
def process_file(f):
"""
This function extracts data from the file given as the function argument in
a list of dictionaries. This is example of the data structure you should
return:
data = [{"courier": "FL",
"airport": "ATL",
"year": 2012,
"month": 12,
"flights": {"domestic": 100,
"international": 100}
},
{"courier": "..."}
]
Note - year, month, and the flight data should be integers.
You should skip the rows that contain the TOTAL data for a year.
"""
data = []
info = {}
info["courier"], info["airport"] = f[:6].split("-")
# Note: create a new dictionary for each entry in the output data list.
# If you use the info dictionary defined here each element in the list
# will be a reference to the same info dictionary.
with open("{}/{}".format(datadir, f), "r") as html:
soup = BeautifulSoup(html)
result = soup.find('table',class_='dataTDRight')
for r in result.find_all('tr', class_='dataTDRight'):
cols = r.find_all('td')
i = info
if cols[1].text != 'TOTAL':
i["year"] = int(cols[0].text)
i["month"] = int(cols[1].text)
i["flights"] = {"domestic": int(cols[2].text.replace(',', '')),
"international": int(cols[3].text.replace(',', ''))}
data.append(i)
return data
|
from abc import ABC, abstractmethod
# Base Abstract Class
class Animal(ABC):
def __init__(self, group, color):
self.mygroup = group
self._mycolor = color
print("My tone is",self._mycolor,"in colour")
# common method
def isgroup(self):
print("I belong to the group of", self.mygroup) # group refers to mammals, reptiles, birds
# Abstract method
@abstractmethod
def sounds(self):
print(" I can make sounds")
#base classes and derived classes form a hierarchy
#Polymorpism is shown by function class Animal, where init has overriding functionality
#Derived classes
class Dog(Animal): #mammal
def isgroup(self):
print("I belong to the group of", self.mygroup)
#implementation of abstract methods
def sounds(self):
print("I Bark")
class Cat(Animal): #mammal
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I mew")
#Interfaces has implicitly only abstract method as followed
class Bear(Animal):
def sounds(self):
print("I growl")
class Giraffes(Animal):
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I bleat")
class Lizard(Animal): #reptiles, carnivores
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I squeak")
class Crows(Animal): #birds , omnivores
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I Caw")
class Lion(Animal):
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I roar")
class Grasshopper(Animal): #insect
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I chirp")
class Frogs(Animal):
def isgroup(self):
print("I belong to the group of", self.mygroup)
def sounds(self):
print("I croak")
x = Frogs("Amphibian","green ")
x.sounds()
|
import time
def find_routes(rows,columns):
dictionary = {}
for i in range (0,rows+1):
for j in range (0,columns+1):
current_point = (i,j)
if j == 0 or i ==0:
dictionary[current_point] = 1
else:
val1 = i-1
val2 = j-1
current_val = dictionary[(val1,j)] + dictionary[(i,val2)]
dictionary[current_point] = current_val
print dictionary
return(dictionary[rows,columns])
start = time.time()
n = find_routes(20,20)
elapsed =(time.time() - start)
print "%s found in %s seconds" % (n,elapsed)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import viewsets
from . import models
from . import serializers
class ProductViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows products to be viewed or edited.
"""
queryset = models.Product.objects.all().order_by('-id')
serializer_class = serializers.ProductSerializer
class CartViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows carts to be viewed or edited.
"""
queryset = models.Cart.objects.all().order_by('-id')
serializer_class = serializers.CartSerializer
class CartItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows cart items to be viewed or edited.
"""
queryset = models.CartItem.objects.all().order_by('-id')
serializer_class = serializers.CartItemsSerializer
|
import secret
import twitter
import time
api = twitter.Api(
consumer_key = secret.dict['consumer_key'],
consumer_secret = secret.dict['consumer_secret'],
access_token_key = secret.dict['access_token_key'],
access_token_secret = secret.dict['access_token_secret']
)
|
from django.db import models
from django.conf import settings
from .cohort import Cohort
from .project import Project
from ...utilities.base_model import BaseModel
class ProjectCohortManager(models.Manager):
def get_all_active_project_cohort_details(self, project_id):
project_cohorts = self.filter(is_active=True).filter(project_id=project_id)
return project_cohorts
class ProjectCohort(BaseModel):
class Meta:
# https://docs.djangoproject.com/en/1.10/ref/models/options/#db-table
db_table = 'curation_project_cohort'
unique_together = (("cohort", "project"),)
objects = ProjectCohortManager()
id = models.AutoField(primary_key=True)
cohort = models.ForeignKey(Cohort, on_delete=models.CASCADE, related_name='related_project')
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='related_cohorts')
|
import serial
import matplotlib.pyplot as plt
ser = serial.Serial('/dev/ttyUSB0')
print(ser.name)
plt.plot(0,0)
plt.xlim(0,4096)
plt.ylim(0,4096)
try:
while True:
line = ser.readline().decode("utf-8")
try:
dt, ch1_x, ch1_y, ch2_x, ch2_y = line.split(',')
except ValueError:
pass
else:
dt = int(dt)
ch1_x = int(ch1_x)
ch1_y = int(ch1_y)
ch2_x = int(ch2_x)
ch2_y = int(ch2_y)
print(f"t: {dt}, x: {ch1_x} y: {ch1_y}, x: {ch2_x} y: {ch2_y}")
plt.cla()
plt.scatter(ch1_x, ch1_y)
plt.xlim(0,4096)
plt.ylim(0,4096)
# plt.line(dt)
plt.pause(0.01)
except:
print("Exiting")
raise
finally:
ser.close() |
# Keypirinha launcher (keypirinha.com)
import keypirinha_util as kpu
import keypirinha as kp
import time
import os
class Launchy(kp.Plugin):
"""
Populate catalog using Launchy's configuration format.
This plugin allows you to populate your catalog the same way you would
in Launchy. You can simply copy your configuration over and this plugin
will be able to parse and replicate the same list as in Launchy.
"""
def __init__(self):
super().__init__()
def _update_config(self):
self.dir_configs = []
settings = self.load_settings()
size = settings.get_int('size', 'directories')
if size is None:
self.warn('No size parameter specified')
return
for i in range(size):
k = str(i + 1)
self.dir_configs.append({
'name': settings.get_stripped(k + '\\name', 'directories'),
'types': settings.get_stripped(k + '\\types', 'directories', fallback=''),
'depth': settings.get_int(k + '\\depth', 'directories', fallback=0),
'indexdirs': settings.get_bool(k + '\\indexdirs', 'directories', fallback=False),
})
self.settings = settings
loaded_msg = "Successfully updated the configuration, found {} entries"
self.info(loaded_msg.format(len(self.dir_configs)))
def _load_dir(self, i, config):
if config['name'] is None:
self.warn("No 'name' provided for config #{}".format(i + 1))
return 0
path_name = config['name'].replace('\\\\', '\\')
root_path = os.path.expandvars(path_name)
if not os.path.exists(root_path):
self.warn("Path '{}' in config #{} does not exist".format(path_name, i + 1))
return 0
paths = []
for glob in config['types'].split(','):
if glob.strip() in ['', '@Invalid()']:
continue
self.should_terminate()
files = kpu.scan_directory(root_path, name_patterns=glob.strip(),
flags=kpu.ScanFlags.FILES, max_level=config['depth'])
paths.extend(files)
if config['indexdirs']:
self.should_terminate()
dirs = kpu.scan_directory(root_path, name_patterns='*',
flags=kpu.ScanFlags.DIRS, max_level=config['depth'])
paths.extend(dirs)
self.merge_catalog([
self.create_item(
category=kp.ItemCategory.FILE,
label=os.path.basename(path),
short_desc="",
target=os.path.join(root_path, path),
args_hint=kp.ItemArgsHint.ACCEPTED,
hit_hint=kp.ItemHitHint.KEEPALL)
for path in paths])
return len(paths)
def on_start(self):
self._update_config()
def on_catalog(self):
catalog_size = 0
self.set_catalog([])
start_time = time.time()
for i, config in enumerate(self.dir_configs):
catalog_size += self._load_dir(i, config)
elapsed = time.time() - start_time
stat_msg = "Cataloged {} items in {:0.1f} seconds"
self.info(stat_msg.format(catalog_size, elapsed))
def on_execute(self, item, action):
kpu.execute_default_action(self, item, action)
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self._update_config()
|
while True:
while True:
try:
num1 = int(input('\nEnter a number: '))
except ValueError:
print("Please enter only number")
else:
break
n1 = int( "%d" % num1)
n2 = int( "%d%d" % (num1,num1) )
n3 = int( "%d%d%d" % (num1,num1,num1))
print(n1)
print(n2)
print(n3)
print(n1,'+',n2,'+',n3,'=',(n1+n2+n3))
while True:
Repeat=input("\nDo you want to calculate again?\n\nYes or No:")
Repeat=Repeat.lower()
if Repeat not in ["yes","y","no","n"]:
print("\nPlease select correct option")
else:
break
if Repeat in ["yes","y"]:
continue
else:
if Repeat in ["no","n"]:
print("\n-----Thank you for using-----")
input()
break
|
#!/usr/bin/env python
'''
================================================================================
### Motif-Mark ###
A program to convert sequences motifs to a visual representation of the locations of motifs in sequences. This script can take DNA or RNA sequence input. It requires
a fasta file and a text file with the motifs. This script can take up to 13 motifs. This script returns a visual image displaying the sequence
as a line with exons overlapped on the line as empty black boxes and motifs as solid colored areas. It alo returns a text list of motif locations on the genes given.
For each sequence the entire fasta header line (with chromosome, location and such) is given.
===============================================================================
================================================================================
To start the program in an Ubuntu shell:
Make sure that you are in the directory where your files are at. Ex: cd /mnt/c/Users/david/OneDrive/Documents/Winter_Genomics_2020
conda install -c conda-forge pycairo
Start program, including entering arg parse for fasta file and motifs.text file as follows:
./motif_mark.py -s -s <directory path>/<sequence_file>.fasta -m <directory path>/<motif_file.text
For me that, once in the correct directory, is ./motif_mark.py -s ./motif_mrker_sequence.fasta -m ./motif_marker_motifs.txt.text
Your motif file should look something like the following:
AACTGYat
YYYSYYN
AAaaggcR
ccgtgtca
================================================================================
'''
#===============================================================================
#Import necessary libraries
#===============================================================================
import cairo
import re
import argparse
import sys
import math
import numpy as np
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from re import finditer
#===============================================================================_
# Stand-alone Functions
#===============================================================================
def getargs():
#A function to input sequence file, motif sequence, returns these as arguments, then variable, uses argparse module.
parser = argparse.ArgumentParser(description = 'input fasta file and motif list .txt file and output an image of where these motifs are along particular genes')
parser.add_argument('-s', '--seqfile', required =True, type= str, help= "Enter -s or --seqfile followed by your absolute fasta file path")
parser.add_argument('-m', '--motifs', required = True, type = str, help= "-m 0r --motifs followed by the absolute file path to a text file with one motif per line.")
return parser.parse_args()
# Set variables for Arg items
args = getargs()
seqfile = args.seqfile
motifs = args.motifs
#unit test
#for line in open(seqfile,'r'):
# print(line)
#for line in open(motifs,'r'):
# print(line)
#===============================================================================
def motif_iupac_translater(motifs_list):
#A Function to translate a IUPAC Motif lists into amino acids. This is set to handle DNA motifs, with lower or upper case [exons] and
#lower or upper case IUPAC variable nucleotide symbols. This function turns a DNA motif-list, upper or lower case into a list of IUPAC
#equivalents in regex ready form that can be compared to a DNA or an RNA sequence'''
search_motifs =[]
# translation_dict = {"A":"[Aa]", "a":"[Aa]", "}
for string in motifs_list:
string = string.replace("A", "a")
string = string.replace("a","[Aa]")
string = string.replace("G","g")
string = string.replace("g","[Gg]")
string = string.replace("C","c")
string = string.replace("c","[Cc]")
string = string.replace("T","t")
string = string.replace("t","[TtUu]")
string = string.replace("R","r")
string = string.replace("r","[AaGg]")
string = string.replace("Y","y")
string = string.replace("y","[CcTtUu]")
string = string.replace("S","s")
string = string.replace("s","[GgCc]")
string = string.replace("W","w")
string = string.replace("w","[TtUu]")
string = string.replace("K","k")
string = string.replace("k","[GgTtUu]")
string = string.replace("S","s")
string = string.replace("s","[GgCc]")
string = string.replace("W","w]")
string = string.replace("w","[AaTtUu]")
string = string.replace("M","m")
string = string.replace("m","[AaCc]")
string = string.replace("B","b")
string = string.replace("b","[GgCcTtUu]")
string = string.replace("D","d")
string = string.replace("d","[AaGgTtUu]")
string = string.replace("H","h")
string = string.replace("h","[AaCcTtUu]")
string = string.replace("V","v")
string = string.replace("v","[AaCcGg]")
string = string.replace("N","n")
string = string.replace("n","[GgCcAaTtUu]")
regexmotif = string
search_motifs.append(regexmotif)
return(search_motifs)
#Unit Tests
#search_motifs = motif_iupac_translater(motifs_list)
#print(search_motifs)
#===============================================================================_
def gene_capture(seqfile):
#A function to capture genes from a fasta file as a dictionary with gene_id as key and sequence as value. This uses the seqfile from arg parse
with open(seqfile,'r')as fh:
for line in fh:
line = line.strip()
if line.startswith(">"): #captures IDs
seq_name = line[1:]
genes_dict[seq_name] = []
else:
sequence = line
#print(sequence)
#print(genes)
genes_dict[seq_name].append(sequence) #captures sequences
for key in genes_dict:#turns value (sequence lines in a list) into value(one string of sequences)
new_value = "".join(genes_dict[key])
genes_dict[key] = new_value #reassigns value
return(genes_dict)
seqfile.close()
#===============================================================================
# Get a list of motifs from the inputted motifs_list.txt
#===============================================================================
motifs_list = [line.rstrip('\n') for line in open(motifs)]
#===============================================================================
# create a debugger object to identify prints for debugging
#===============================================================================
debugger = print
#===============================================================================
# Create a list of regex ready motifs and a dictionary that pairs the iupac motifs with the regex ready versions
#===============================================================================
# This calls on the motif_iupac_translater finction
search_motifs = motif_iupac_translater(motifs_list)
#debugger(search_motifs)
motif_regex_dict = dict(zip(motifs_list,search_motifs))
#===============================================================================
# Call gene_capture function
#===============================================================================
genes_dict = {}
genes = gene_capture(seqfile)
#===============================================================================
# Measure the lengths of the sequences and get the longest sequence
#===============================================================================
#This will be used to calibrate the pycairo drawing sheet.
values_list = list(genes.values())
sequence_lengths = []
for item in values_list:
# debugger(item)
sequence_length = len(item)
#debugger(sequence_length)
sequence_lengths.append(sequence_length)
longest_sequence = max(sequence_lengths)
#debugger(longest_sequence)
#===============================================================================
# Make color dictionary to match each motif with a color
#===============================================================================
color_list = [(1,0,0),(0,0.8,0),(0,0,1),(0.4,0,0.8),(1,0.2,1),(1,1,0.2),(0.4,1,1),(1,0.5,0),(0.3,0.6,0),(0.6,0,0.3),(1,0.6,0.6),(0.4,0.2,0),(0.5,0.5,0.5)]
color_dict = {}
for i in range(len(motifs_list)):
color_dict[motifs_list[i]] = [color_list[i]]
#debugger(color_dict)
#===============================================================================
# Set other global variables, while outside the loops
#===============================================================================
motif_locations = {} #To store the location in the sequence, where motifs are found, paired with the motif
for header in genes: #popuate motif_locations with fasta file header
motif_locations[header] = []
#debugger(motif_locations)
exon_locations = {} #To store the locations of exons
margin = 100 # Sets margin for the cairo page
spans_info = []
#===============================================================================
# Set Up Cairo and Title for motif marker Page
#===============================================================================
surface = cairo.SVGSurface("Motif_marker_Results.svg",longest_sequence + margin, 100 * len(genes) + 200) #sets the drawing surface by the longest_sequence and the number of genes in the fasta file.
context = cairo.Context(surface)
context.move_to(margin - 50, 30)
context.set_line_width(1) #This and the next four commands create the Title
context.set_font_size(20)
context.select_font_face("Calibri")
context.text_path("Motifs found in these sequences. (Exons are marked with a rectangle.)")
context.stroke()
#===============================================================================
#Loops to get name of genes(from header), exons and motifs in each gene and draw them
#===============================================================================
spacer = 70 # A counter to put spaces between sequences
for header in genes: #Do the following for each gene sequence in the fasta file.
sequence_length = len(genes[header]) #get the length of the sequences to be used to calibrate images to the space
#debugger(sequence_length)
# These next three steps draw a line for each gene, the length of the sequence.
context.set_source_rgb(0,0,0)
context.move_to(margin, spacer) #Set up sequence lines
thin_line = context.set_line_width(1)
context.line_to(margin + sequence_length, spacer) #The x is determined by the sequence length, the y is spaced from the last sequence
context.stroke()
# These next six steps use the entire header lines of the fasta file to name the genes
context.move_to(margin, spacer - 10)
context.set_line_width(1)
context.set_font_size(10)
context.select_font_face("calibri")
context.text_path(header)
context.stroke()
# get the exon locations and map them
exon_locations = [] # initialize local storage of exon locations for one gene to mark exons
string = genes[header] # make it easy to place sequence from genes dictionary into regeax
#debugger(string)
exon = re.search(r"[ACTGU]+", string) # Find Exons, which will be capitalized
#debugger(exon)
if exon:
exon_locations.append(exon.span()) # find span of exons
#debugger(exon_locations)
# This nested loop draws box for each exon found in exon_locations
for item in exon_locations:
context.set_line_width(1)
context.set_source_rgb(0,0,0)
context.rectangle(item[0] + margin,spacer -5,item[1] - item[0], 6)
context.stroke()
# This nested loop does the following for each motif within each gene
for motif in motif_regex_dict:
motif_locations[motif] = [] #sets key of motif_locations to motif in list
#debugger(motif_locations)
search_motif = str(motif_regex_dict[motif]) #sets regex ready motif as string
#debugger(search_motif)
#debugger(type(search_motif))
spans = [] # set local list to collect motif location span
matches = re.finditer(search_motif, string, re.IGNORECASE) #Finds motifs
#debugger(matches)
#debugger(type(matches))
context.set_source_rgb(*color_dict[motif][0])
for match in matches: #For each motif found do the following
span = match.span() #gets span of motif from finditer iterable search objest
#debugger(span)
group = match.group() #may not use. Saved in case of need
#print(group)
spans.append(span) #Add span to local list
#debugger(spans)
#debugger(spans_info)
# This nested loop draws a thick vertical looking colored line for each span
context.set_line_width(10)
context.move_to(span[0] + margin, spacer)
context.line_to(span[1] + margin, spacer)
context.stroke()
spans_info.append((header,motif,spans))
#debugger(spans_info)
spacer += 100 #Adds to the spacer for readability
#===============================================================================
# Draw key from color_dict, displaying motif and the associated color
#===============================================================================
for i in range(len(color_dict)):
#Draw motif name. Ex: YYYYYY
context.set_line_width(1)
context.set_source_rgb(0,0,0)
context.move_to(margin, spacer)
context.set_font_size(10)
context.select_font_face("Calibri")
context.text_path(list(color_dict.keys())[i] + " = ")
context.stroke()
#Draw color block to match motif
context.set_line_width(15)
context.set_source_rgb(*list(color_dict.values())[i][0])
context.move_to(margin + 80, spacer)
context.line_to(margin + 100, spacer)
context.stroke()
spacer += 20
surface.finish()
#===============================================================================
# Create and output text list of motif locations within each gene.
#===============================================================================
f = open("Motifs_text.txt", "w")
spans_text_info = ""
for item in spans_info:
spans_text_info += (str(item) + "\n")
f.write("List of motifs found for each gene. \n" + spans_text_info)
|
#!/usr/bin/env python3
import argparse
import os
import sys
import tempfile
import subprocess
import json
import jmespath
from pprint import pprint
from urllib.parse import urlparse
def nagios_exit(message, code):
print(message)
sys.exit(code)
severities = {
'LOW': 1,
'MEDIUM': 2,
'HIGH': 3,
'CRITICAL': 4,
}
try:
parser = argparse.ArgumentParser(description='Test support of TLS/SSL ciphers, '
'protocols as well as cryptographic flaws and much more. This is a wrapper '
'around testssl.sh (https://github.com/drwetter/testssl.sh')
parser.add_argument('--uri', help='host|host:port|URL|URL:port.'
'Port 443 is default, URL can only contain HTTPS protocol', required=True)
parser.add_argument('--testssl', help='Path to the testssl.sh script', required=True)
parser.add_argument('--ignore-ids', help='Comma separated list of test IDs to ignore', default='')
parser.add_argument('--critical', help='Findings of this severity level trigger a CRITICAL',
choices=severities.keys(), default='CRITICAL')
parser.add_argument('--warning', help='Findings of this severity level trigger a WARNING',
choices=severities.keys(), default='HIGH')
parser.add_argument('trailing_args', help='Provide extra arguments to testssl.sh at the end, '
'separated by \'--\'', nargs=argparse.REMAINDER)
args = parser.parse_args()
if severities[args.critical] < severities[args.warning]:
parser.error('The severity level to raise a WARNING can not be higher'
'than the level to raise a CRITICAL')
if urlparse(args.uri).scheme != 'https':
parser.error('The scheme of the URI must be \'https\'')
uri = args.uri
testssl = args.testssl
critical = args.critical
warning = args.warning
ignore_ids = args.ignore_ids.split(',')
trailing_args = args.trailing_args
# Possible nagios statuses
# start with clean slate
msg = {
'ok': [],
'warning': [],
'critical': []
}
# Create temp file
fd, temp_path = tempfile.mkstemp()
# Set command and arguments
subproc_args = [
testssl,
'--append',
'--jsonfile-pretty',
temp_path,
]
# Remove '--' separator from the trailing arguments
if '--' in trailing_args:
trailing_args.remove('--')
# Add the trailing arguments
subproc_args.extend(trailing_args)
# Add the URI as the last argument
subproc_args.extend([uri])
# Run it
proc = subprocess.run(subproc_args, stdout=subprocess.PIPE)
# temp_path = os.path.expanduser('~/work/testssl_results/reset.json')
with open(temp_path) as f:
json = json.load(f)
os.close(fd)
# pprint(temp_path)
os.remove(temp_path)
r = jmespath.search('scanResult[].[*][*]|[0][0][][]|[?severity]', json)
# Filter out only supported severity levels
r = [x for x in r if x['severity'] in severities.keys()]
# Filter out ignore_ids
r = [x for x in r if x['id'] not in ignore_ids]
# Add integer severity level
for item in r:
item['severity_int'] = severities[item['severity']]
def get_severity_count_aggregated(severity_int):
return len([f for f in r if f['severity_int'] >= severity_int])
def get_severity_items_aggregated(severity_int):
_results = sorted([f for f in r if f['severity_int'] >= severity_int], key = lambda i: i['severity_int'], reverse=True)
return list(map(lambda x: x['severity'] + ": " + x['id'] + " (" + x['finding'] + ")", _results))
if get_severity_count_aggregated(severities[critical]) > 0:
msg['critical'].append("{0} issue{1} found for {2} with severity {3} or higher.\n{4}".format(
get_severity_count_aggregated(severities[critical]),
's' if get_severity_count_aggregated(severities[critical]) > 1 else '',
uri,
critical,
'\n'.join(get_severity_items_aggregated(severities[critical])),
))
if get_severity_count_aggregated(severities[warning]) > 0:
msg['warning'].append("{0} issue{1} found for {2} with severity {3} or higher.\n{4}".format(
get_severity_count_aggregated(severities[warning]),
's' if get_severity_count_aggregated(severities[warning]) > 1 else '',
uri,
warning,
'\n'.join(get_severity_items_aggregated(severities[warning])),
))
else:
msg['ok'].append("No issues found for {0} with severity {1} or higher.".format(
uri,
warning,
))
except Exception as e:
nagios_exit("UNKNOWN: Unknown error: {0}.".format(e), 3)
# Exit with accumulated message(s)
if len(msg['critical']) > 0:
nagios_exit("CRITICAL: " + ' '.join(msg['critical']), 2)
elif len(msg['warning']) > 0:
nagios_exit("WARNING: " + ' '.join(msg['warning']), 1)
else:
nagios_exit("OK: " + ' '.join(msg['ok']), 0)
|
import argparse
from pathlib import Path
# import sys
# print(sys.path)
# exit()
import torch
from gans import GAN, LSGAN, WGAN, WGAN_GP
from libs import str2bool, make_gif_with_samples
def parse_args():
parser = argparse.ArgumentParser(description="Personal implementations of GAN")
# Specify GAN type and dataset
parser.add_argument('--desc', type=str, default=None, help='Experiment identifier')
parser.add_argument('--gan_type', type=str, default='GAN',
choices=['gan', 'lsgan', 'wgan', 'wgan_gp', 'dragan', 'ebgan', 'began'],
help='GAN TYPE')
parser.add_argument('--dataset', type=str, default='mnist',
choices=['mnist', 'fmnist', 'cifar10', 'cifar100', 'svhn', 'stl10'],
help='DATASET')
# Hyper-parameters
parser.add_argument('--epoch', type=int, default=100, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=64, help='The size of batch')
parser.add_argument('--input_size', type=int, default=32, help='The size of input image')
parser.add_argument('--lrG', type=float, default=0.0002)
parser.add_argument('--lrD', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
# Logging interval
parser.add_argument('--use_tensorboard', type=str2bool, default=True, help='Whether use tensorboard')
parser.add_argument('--ckpt_save_freq', type=int, default=5000, help='The number of iterations to save checkpoint')
parser.add_argument('--img_save_freq', type=int, default=1000, help='The number of iterations to save images')
parser.add_argument('--log_freq', type=int, default=10, help='The number of iterations to print logs')
args = parser.parse_args()
return process_args(args=args)
def create_folder_ifnotexist(path):
path = Path(path)
if not path.exists():
path.mkdir(parents=True, exist_ok=False)
return path
def process_args(args):
# if desc is none, make desc using gan_type and dataset
if args.desc is None:
args.desc = f'{args.gan_type}_{args.dataset}'
# make directories
print('Making directories...')
STORAGE_PATH = create_folder_ifnotexist('./storage')
IMG_PATH = create_folder_ifnotexist(STORAGE_PATH / 'images' / args.desc)
CKPT_PATH = create_folder_ifnotexist(STORAGE_PATH / 'checkpoints'/ args.desc)
args.img_path = IMG_PATH
args.ckpt_path = CKPT_PATH
if args.use_tensorboard:
TB_PATH = create_folder_ifnotexist(STORAGE_PATH / 'tb' / args.desc)
args.tb_path = TB_PATH
# device
is_gpu = torch.cuda.is_available()
print(f'Using GPU:{is_gpu}')
args.device = torch.device('cuda' if is_gpu else 'cpu')
return args
def main():
# parse arguments
args = parse_args()
# specify gan type
if args.gan_type == 'gan':
gan = GAN(args)
elif args.gan_type == 'lsgan':
gan = LSGAN(args)
elif args.gan_type == 'wgan':
gan = WGAN(args)
elif args.gan_type == 'wgan_gp':
gan = WGAN_GP(args)
else:
raise Exception(f" [!] There is no option for {args.gan_type}")
# train gan
gan.train()
print(" [*] Training finished!")
# visualize outputs
make_gif_with_samples(args.img_path)
print(" [*] Visualizing finished!")
if __name__ == '__main__':
main()
|
a = [10,40,30,10,70,40]
print("a awal =",a)
# Menghapus 10 dari list
a.remove(10)
# Menghapus 40 dari list
a.remove(40)
print("hasil =",a) |
"""Variables for gallery.py."""
"""root = '/Volumes/Local_stuff/zdjecia_ubranek/chlopiec_56' # path to jpgs or folders of jpgs and output root"""
tmp = '/tmp' # temporary folder to move corrupt files to
index = 'index.html' # filename for html files
index_mini = 'index_mini.html' # index with only thumbs gallery'
n_thumbs = 6 # number of thumbnails to display on index page
min_size = 1000,1000 # minimum dimensions required to create thumbnail
thumb_size = 1000,1000 # dimensions of thumbnail to create
header = ("""<!doctype html>
<html>
<head>
<title>%s</title>
<meta charset="utf-8" />
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style type="text/css">
#photos {
/* Prevent vertical gaps */
line-height: 0;
-webkit-column-count: 5;
-webkit-column-gap: 0px;
-moz-column-count: 5;
-moz-column-gap: 0px;
column-count: 5;
column-gap: 0px;
}
#photos img {
/* Just in case there are inline attributes */
width: 100%% !important;
height: auto !important;
}
@media (max-width: 1200px) {
#photos {
-moz-column-count: 4;
-webkit-column-count: 4;
column-count: 4;
}
}
@media (max-width: 1000px) {
#photos {
-moz-column-count: 3;
-webkit-column-count: 3;
column-count: 3;
}
}
@media (max-width: 800px) {
#photos {
-moz-column-count: 2;
-webkit-column-count: 2;
column-count: 2;
}
}
@media (max-width: 400px) {
#photos {
-moz-column-count: 1;
-webkit-column-count: 1;
column-count: 1;
}
}
body {
background-color: #FFFFFF;
color: gray;
font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
margin: 2;
padding: 2;
}
div {
background-color: #FFFFFF;
border-radius: 0.25em;
border-color: #000000;
margin: 1em auto;
width: 1000px;
}
p {
font-size: 16px;
padding-bottom: 1.5em;
}
a:link, a:visited {
color: #93a1a1;
font-size: 24px;
text-decoration: underline;
}
.image {
margin-right: 5px;
padding: 2px;
background-color: #fff;
}
tr {
padding: 3px;
}
td {
padding: 3px;
}
table {
width: 1000px;
}
img {
background-color: #FFF;
width: 100%%;
height: 100%%
padding: 3px;
border-color: #000;
border-radius: 0.4em;
align: center;
}
</style>
</head>
<body>
<div>
""")
br = '\n<br>'
columns = 2
threads = 2
thread_sleep = 2
font_size_full = 100
font_size_thumb = 50
footer = '\n</div></body></html>'
img_src = '\n<img src="%s">'
timestamp = '\n<p>This page was created on %s</p>'
url_dir = '\n<p><a href="%s" target="_blank">%s</a></p>'
url_img = '\n<td class="image"><a href="%s" target="_blank"><img title="%s" src="%s"></a></td>'
images = '\n<img title="%s" src="%s">'
|
import dropbox
app_key = '8w7ts322hfkb9ek'
app_secret = '8dp228du8qsvj46'
access_token = 'kSVIwQWSbHMAAAAAAAAL64PmaxHp3J-LHwFp-f0XC9J2nx5Ef_MCNHYGbFAeG2LA'
def upload_file_to_dropbox(file_location, filename):
metadata = None
with open(file_location, 'rb') as f:
dbx = dropbox.Dropbox(access_token)
dbx.users_get_current_account()
metadata = dbx.files_upload(f.read(), "/" + str(filename))
return metadata
|
import json
from typing import Dict, List, Sequence
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
class MemorySpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that saves spans in memory.
This class can be used for diagnostic purposes, multi-threaded scenarios etc.
"""
__instance = None
__spans = []
@staticmethod
def get_instance():
if MemorySpanExporter.__instance is None:
MemorySpanExporter()
return MemorySpanExporter.__instance
def __init__(self):
if MemorySpanExporter.__instance is not None:
raise Exception("This class is a singleton!")
else:
MemorySpanExporter.__instance = self
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
for span in spans:
self.__spans.append(span)
return SpanExportResult.SUCCESS
def reset(self):
self.__spans = []
@property
def spans(self) -> List[ReadableSpan]:
return self.__spans
@property
def span_dicts(self) -> List[Dict]:
return [json.loads(span.to_json()) for span in self.spans]
|
# Write a program that asks the user to enter a number and the number of multiplications of that number to display.
#
# Sample Output:
# > Enter a number: 2
# > Enter the multiplication: 5
# > 2 x 1 = 2
# > 2 x 2 = 4
# > 2 x 3 = 6
# > 2 x 4 = 8
# > 2 x 5 = 10 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import post
from django.db import connection
from collections import namedtuple
from django.utils import timezone
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from datetime import datetime as dt
import datetime
import math
# Create your views here.
# user collections.namedtuple() from the Python standard library
def namedtuplefetchall(cursor):
"Return all rows from a cursor as a namedtuple"
desc = cursor.description
nt_result = namedtuple('Result', [col[0] for col in desc])
return [nt_result(*row) for row in cursor.fetchall()]
# list the posts
def index(request):
exclude_post = []
if request.user.is_authenticated:
with connection.cursor() as c:
c.execute("select id from appUser_appuser where user_id = %s", [request.user.id])
user = namedtuplefetchall(c)
id_id = user[0].id
recommendation_result = recommend(id_id, 20, 15, 5, 1, 3, 0.4)
for item in recommendation_result:
exclude_post.append(item.Post_id)
number = 5 - len(recommendation_result)
top_view_result = top_views(number, exclude_post)
for item in top_view_result:
exclude_post.append(item.Post_id)
else:
recommendation_result = []
top_view_result = top_views(5, exclude_post)
for item in top_view_result:
exclude_post.append(item.Post_id)
query = []
input_query = " select Post_id, Post_title, Description, Price\
from post_post "
if exclude_post:
input_query += 'where '
for i in range(len(exclude_post)):
input_query += ' Post_id != %s '
if i != len(exclude_post) - 1:
input_query += ' and '
input_query += 'order by Pub_date desc'
# print(input_query)
# print(exclude_post)
with connection.cursor() as c:
c.execute(input_query, exclude_post)
post = namedtuplefetchall(c)
result = recommendation_result + top_view_result + post
if request.method=='POST':
if request.POST.get('search'):
title = '%' + str(request.POST.get('search')) + '%'
with connection.cursor() as c1:
c1.execute("select Post_id, Post_title, Description\
from post_post\
where Post_title like %s\
order by Pub_date desc", [title])
search_result = namedtuplefetchall(c1)
return render(request, 'post/post.html', {'postlist': search_result})
else:
return render(request, 'post/post.html', {'postlist': result})
else:
return render(request, 'post/post.html', {'postlist': result})
# detail of the post
def detail(request, Post_id):
with connection.cursor() as c:
c.execute("drop view if exists post_detail")
c.execute("create view post_detail as select * from post_post where Post_id = %s", [Post_id])
c.execute("select * from post_detail p join apartment_apartment a on p.ApartmentID_id = a.ApartmentID;\
update post_post set Views = Views + 1 where Post_id = %s",[Post_id])
result = namedtuplefetchall(c)
with connection.cursor() as c:
c.execute("drop view if exists post_detail")
if request.user.is_authenticated:
with connection.cursor() as c:
c.execute("select id from appUser_appuser where user_id = %s", [request.user.id])
user = namedtuplefetchall(c)
id_id = user[0].id
view_time = timezone.now()
with connection.cursor() as c1:
c1.execute("insert into post_view_history(id_id, Post_id_id, View_time) \
values(%s, %s, %s)", [id_id, Post_id, view_time])
# join user + post
with connection.cursor() as c2:
c2.execute("""
SELECT Users.username, Users.phone, Users.email FROM\
(SELECT appUser_appuser.username AS username, appUser_appuser.phone AS phone, auth_user.email AS email, appUser_appuser.id AS id FROM appUser_appuser JOIN auth_user ON appUser_appuser.user_id = auth_user.id) AS Users\
WHERE Users.id = {}
""".format(result[0].id_id))
user_info_list = namedtuplefetchall(c2)
if len(user_info_list) > 0:
user_info = user_info_list[0]
return render(request, 'post/detail.html', {'info': result[0], 'user_info':user_info})
@login_required(login_url='../../appUser/login')
def Insertrecord(request):
# return apartment list
with connection.cursor() as c:
c.execute("select Name, ApartmentID from apartment_apartment")
result = namedtuplefetchall(c)
date = str(dt.date(dt.now()))
# form
if request.method=='POST':
if request.POST.get('Post_title') \
and request.POST.get('ApartmentID') and request.POST.get('Move_in_date')\
and request.POST.get('Move_out_date') and request.POST.get('Price')\
and request.POST.get('Bedroom') and request.POST.get('Bathroom')\
and request.POST.get('Move_out_date') > request.POST.get('Move_in_date'):
# store the values
saverecord=post()
saverecord.Post_title = request.POST.get('Post_title')
saverecord.ApartmentID_id = request.POST.get('ApartmentID')
saverecord.Pub_date = timezone.now()
saverecord.Move_out_date = request.POST.get('Move_out_date')
saverecord.Move_in_date = request.POST.get('Move_in_date')
saverecord.Price = request.POST.get('Price')
saverecord.Bedroom = request.POST.get('Bedroom')
saverecord.Bathroom = request.POST.get('Bathroom')
date1 = datetime.datetime.strptime(saverecord.Move_out_date, "%Y-%m-%d").date()
date2 = datetime.datetime.strptime(saverecord.Move_in_date, "%Y-%m-%d").date()
saverecord.Duration = int(round((date1 - date2).days / 30))
# id is the primary key of appuser, but request.user.id gets the id of auth_user table
with connection.cursor() as c1:
c1.execute("select id from appUser_appuser where user_id = %s", [request.user.id])
user = namedtuplefetchall(c1)
saverecord.id_id = user[0].id
# find apartment name in the apartment table
with connection.cursor() as c1:
c1.execute("select Name from apartment_apartment where ApartmentID = %s",[saverecord.ApartmentID_id])
apartment = namedtuplefetchall(c1)
saverecord.Apartment = apartment[0].Name
with connection.cursor() as c1:
c1.execute("select Post_title from post_post\
where Post_title = %s and\
ApartmentID_id = %s and\
Move_out_date = %s and\
Move_in_date = %s and\
Price = %s and\
Bedroom = %s and\
Bathroom = %s and\
Duration = %s and\
id_id = %s",[saverecord.Post_title, saverecord.ApartmentID_id, saverecord.Move_out_date, saverecord.Move_in_date, saverecord.Price, saverecord.Bedroom, saverecord.Bathroom, saverecord.Duration, saverecord.id_id])
value = namedtuplefetchall(c1)
# check if the post exist
if not value:
# if there is Description
if request.POST.get('Description'):
saverecord.Description = request.POST.get('Description')
with connection.cursor() as c:
c.execute(" insert into post_post(Post_title, id_id, ApartmentID_id, Pub_date, Move_out_date, Move_in_date, Price, Bedroom, Bathroom, Description, Duration, Apartment)\
values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",[saverecord.Post_title, saverecord.id_id, saverecord.ApartmentID_id, saverecord.Pub_date, saverecord.Move_out_date, saverecord.Move_in_date, saverecord.Price, saverecord.Bedroom, saverecord.Bathroom, saverecord.Description, saverecord.Duration, saverecord.Apartment])
# no Description
else:
with connection.cursor() as c:
c.execute(" insert into post_post(Post_title, id_id, ApartmentID_id, Pub_date, Move_out_date, Move_in_date, Price, Bedroom, Bathroom, Duration, Apartment)\
values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",[saverecord.Post_title, saverecord.id_id, saverecord.ApartmentID_id, saverecord.Pub_date, saverecord.Move_out_date, saverecord.Move_in_date, saverecord.Price, saverecord.Bedroom, saverecord.Bathroom, saverecord.Duration, saverecord.Apartment])
# update number of post of the user
with connection.cursor() as c:
c.execute(" update appUser_appuser\
set num_of_post = num_of_post + 1\
where id = %s", [saverecord.id_id])
return redirect('../')
else:
return render(request, 'post/insertpost.html', {'apartments': result, 'date' : date})
else:
return render(request, 'post/insertpost.html', {'apartments': result, 'date' : date})
def Filter(request):
# return apartment list
with connection.cursor() as c:
c.execute("select Name, ApartmentID from apartment_apartment")
apartment = namedtuplefetchall(c)
with connection.cursor() as c:
c.execute("select Post_id, Post_title, Description from post_post order by Pub_date desc")
result = namedtuplefetchall(c)
query = []
value = []
history_query = []
search = False
if request.method=='POST':
if request.POST.get('Pet_friendly'):
query.append('a.Pet_friendly = %s ')
value.append(1)
history_query.append('Pet_friendly')
search = True
if request.POST.get('Swimming_pool'):
query.append('a.Swimming_pool = %s ')
value.append(1)
history_query.append('Swimming_pool')
search = True
if request.POST.get('Printer'):
query.append('a.Printer = %s ')
value.append(1)
history_query.append('Printer')
search = True
if request.POST.get('Gym'):
query.append('a.Gym = %s ')
value.append(1)
history_query.append('Gym')
search = True
if request.POST.get('Price') and request.POST.get('Price') != '0':
query.append('p.Price <= %s ')
value.append(request.POST.get('Price'))
history_query.append('Price')
search = True
if request.POST.get('Move_in_date'):
query.append('p.Move_in_date <= %s ')
value.append(request.POST.get('Move_in_date'))
history_query.append('Move_in_date')
search = True
if request.POST.get('Move_out_date'):
query.append('p.Move_in_date >= %s ')
value.append(request.POST.get('Move_out_date'))
history_query.append('Move_out_date')
search = True
if request.POST.get('Duration'):
query.append('p.Duration = %s ')
value.append(request.POST.get('Duration'))
history_query.append('Duration')
search = True
if request.POST.get('Bathroom'):
query.append('p.Bathroom = %s ')
value.append(request.POST.get('Bathroom'))
history_query.append('Bathroom')
search = True
if request.POST.get('Bedroom'):
query.append('p.Bedroom = %s ')
value.append(request.POST.get('Bedroom'))
history_query.append('Bedroom')
search = True
if search is False:
return render(request, 'post/filter_result.html', {'postlist': result, 'apartments': apartment})
order = request.POST.get('order')
input_query = 'select * from post_post p left join apartment_apartment a on p.ApartmentID_id = a.ApartmentID '
for i in range(len(query)):
if i == 0:
input_query += 'where '
input_query += query[i]
if i != len(query) - 1:
input_query += 'and '
input_query = input_query + 'order by ' + order
with connection.cursor() as c:
c.execute(input_query, value)
result = namedtuplefetchall(c)
if request.user.is_authenticated:
input_history = 'insert into post_search_history(id_id,Search_time,'
input_value = ' values(%s,%s,'
for i in range(len(history_query)):
input_history += history_query[i]
input_value += '%s'
if (i != len(history_query) - 1):
input_history += ','
input_value += ','
input_history = input_history + ')' + input_value + ')'
with connection.cursor() as c:
c.execute("select id from appUser_appuser where user_id = %s", [request.user.id])
user = namedtuplefetchall(c)
id_id = user[0].id
search_time = timezone.now()
value.insert(0, search_time)
value.insert(0, id_id)
with connection.cursor() as c1:
c1.execute(input_history, value)
return render(request, 'post/filter_result.html', {'postlist': result, 'apartments': apartment})
else:
return render(request, 'post/filter.html', {'apartments': apartment})
def top_views(number, recommend_post):
start_time = timezone.now() - datetime.timedelta(days=45)
query = []
input_query = " select Post_id, Post_title, Description, Price\
from post_post\
where Pub_date > %s"
for i in range(len(recommend_post)):
input_query = input_query + ' and ' + ' Post_id != %s '
input_query += 'order by Views desc limit %s'
value = [start_time]
value = value + recommend_post
value.append(number)
# print(input_query)
# print(value)
with connection.cursor() as c:
c.execute(input_query, value)
post = namedtuplefetchall(c)
return post
def top_likes(number):
start_time = timezone.now() - datetime.timedelta(days=45)
with connection.cursor() as c:
c.execute(" select Post_id, Post_title, Description, Price\
from post_post\
where Pub_date > %s\
order by Likes desc\
limit %s", [number, start_time])
post = namedtuplefetchall(c)
return post
def recommend(id_id, num_post, num_valid_post, num_search, num_valid_search, num_return_post, valid_percentage):
valid_time = timezone.now() - datetime.timedelta(days=45)
post_valid = False
# validate post history
with connection.cursor() as c:
c.execute("drop view if exists history")
c.execute(" create view history as\
select *\
from post_view_history\
where id_id = %s and View_time > %s\
order by View_time desc\
limit %s\
",[id_id, valid_time, num_post])
c.execute(" select count(View_time) as num\
from history")
count = namedtuplefetchall(c)
# post history is valid
if count[0].num >= num_valid_post:
with connection.cursor() as c:
c.execute(" select sum(p.Price) as sum_price, sum(p.Bedroom) as sum_bedroom, sum(p.Bathroom) as sum_bathroom, count(h.View_time) as num\
from history h left outer join post_post p on h.Post_id_id = p.Post_id\
")
post_history = namedtuplefetchall(c)
post_valid = True
with connection.cursor() as c:
c.execute("drop view if exists history")
# validate search history
with connection.cursor() as c:
c.execute("drop view if exists history")
c.execute(" create view history as\
select *\
from post_search_history\
where id_id = %s and Search_time > %s\
order by Search_time desc\
limit %s", [id_id, valid_time, num_search])
c.execute(" select count(Search_time) as num\
from history")
count = namedtuplefetchall(c)
# recommend only base on post
if count[0].num < num_valid_search:
if post_valid is False:
print("false")
with connection.cursor() as c:
c.execute("drop view if exists history")
return []
else:
return_list = []
price = math.floor(post_history[0].sum_price / post_history[0].num)
bedroom = int(round(post_history[0].sum_bedroom / post_history[0].num))
bathroom = int(round(post_history[0].sum_bathroom / post_history[0].num))
print("price: " + str(price) + "; " + "bedroom: " + str(bedroom) + "; " + "bathroom: " + str(bathroom))
with connection.cursor() as c:
c.execute("drop view if exists history")
c.execute(" select Post_id, Post_title, Description, Price \
from post_post\
where Price > %s and Price < %s and Bedroom = %s and Bathroom = %s\
order by Views desc\
limit %s", [price - 50, price + 50, bedroom, bathroom, num_return_post])
posts = namedtuplefetchall(c)
return posts
with connection.cursor() as c:
c.execute(" select min(Move_in_date) as move_in, max(Move_out_date) as move_out, sum(Duration) as sum_duration, count(Duration) as num_duration, sum(Price) as sum_price, count(Price) as num_price, sum(Bedroom) as sum_bedroom, count(Bedroom) as num_bedroom, sum(Bathroom) as sum_bathroom, count(Bathroom) as num_bathroom, count(Pet_friendly) as num_pet, count(Printer) as num_printer, count(Swimming_pool) as num_swimming, count(Gym) as num_gym, count(Search_time) as num\
from history")
search_history = namedtuplefetchall(c)
with connection.cursor() as c:
c.execute("drop view history")
query = []
input_query = 'create view recommendation as select * from post_post where '
value = []
flag = False
if search_history[0].move_in != None:
query.append("Move_in_date > %s ")
value.append(search_history[0].move_in - datetime.timedelta(days=5))
flag = True
if search_history[0].move_out != None:
query.append("Move_out_date < %s ")
value.append(search_history[0].move_out + datetime.timedelta(days=5))
flag = True
# eliminate post with duration, price, bedroom, bathroom
if search_history[0].sum_duration != None:
if search_history[0].num_duration / search_history[0].num > valid_percentage:
duration = math.floor(search_history[0].num_duration / search_history[0].sum_duration)
query.append("(Duration = %s or Duration = %s) ")
value.append(duration)
value.append(duration + 1)
flag = True
if post_valid is False:
if search_history[0].sum_price != None:
if search_history[0].num_price / search_history[0].num > valid_percentage:
price = math.floor(search_history[0].sum_price / search_history[0].num_price)
query.append("price < %s ")
value.append(price)
flag = True
if search_history[0].sum_bedroom != None:
if search_history[0].num_bedroom / search_history[0].num > valid_percentage:
bedroom = int(round(search_history[0].sum_bedroom / search_history[0].num_bedroom))
query.append("Bedroom = %s ")
value.append(bedroom)
flag = True
if search_history[0].sum_bathroom != None:
if search_history[0].num_bathroom / search_history[0].num > valid_percentage:
bathroom = int(round(search_history[0].sum_bathroom / search_history[0].num_bathroom))
query.append("Bathroom = %s ")
value.append(bathroom)
flag = True
else:
flag = True
if search_history[0].sum_price != None:
if search_history[0].num_price / search_history[0].num > valid_percentage:
price = math.floor((float(search_history[0].sum_price) / float(search_history[0].num_price)) * 0.7 +\
(float(post_history[0].sum_price) / float(post_history[0].num)) * 0.3)
else:
price = math.floor(float(post_history[0].sum_price) / float(post_history[0].num))
else:
price = math.floor(float(post_history[0].sum_price) / float(post_history[0].num))
query.append("price < %s ")
value.append(price)
if search_history[0].sum_bedroom != None:
if search_history[0].num_bedroom / search_history[0].num > valid_percentage:
bedroom = int(round((float(search_history[0].sum_bedroom) / float(search_history[0].num_bedroom)) * 0.7 +\
(float(post_history[0].sum_bedroom) / float(post_history[0].num)) * 0.3))
else:
bedroom = int(round(float(post_history[0].sum_bedroom) / float(post_history[0].num)))
else:
bedroom = int(round(float(post_history[0].sum_bedroom) / float(post_history[0].num)))
query.append("Bedroom = %s ")
value.append(bedroom)
if search_history[0].sum_bathroom != None:
if search_history[0].num_bathroom / search_history[0].num > valid_percentage:
bathroom = int(round((float(search_history[0].sum_bathroom) / float(search_history[0].num_bathroom)) * 0.7 +\
(float(post_history[0].sum_bathroom) / float(post_history[0].num)) * 0.3))
else:
bathroom = int(round(float(post_history[0].sum_bathroom) / float(post_history[0].num)))
else:
bathroom = int(round(float(post_history[0].sum_bathroom) / float(post_history[0].num)))
query.append("Bathroom = %s ")
value.append(bathroom)
if flag is True:
for i in range(len(query)):
input_query += query[i]
if i != len(query) - 1:
input_query += 'and '
with connection.cursor() as c:
c.execute("drop view if exists recommendation")
c.execute(input_query, value)
else:
with connection.cursor() as c:
c.execute("drop view if exists recommendation")
c.execute("create view recommendation as select * from post_post")
# print(input_query)
# print(value)
with connection.cursor() as c:
c.execute(" select Post_id, Post_title, Description, Price\
from (\
select r.Post_id, r.Post_title, r.Description, r.Price, (a.Pet_friendly * %s + a.Swimming_pool * %s + a.Printer * %s + a.Gym * %s) as score\
from recommendation r left outer join apartment_apartment a on r.ApartmentID_id = a.ApartmentID\
) as sub\
order by score desc\
limit %s",\
[search_history[0].num_pet, search_history[0].num_swimming, search_history[0].num_printer, search_history[0].num_gym, num_return_post])
posts = namedtuplefetchall(c)
c.execute("drop view if exists recommendation")
# print(posts)
return posts
|
"""Radicale extension forms."""
from django import forms
from django.utils.translation import gettext_lazy
from modoboa.lib import form_utils
from modoboa.parameters import forms as param_forms
class ParametersForm(param_forms.AdminParametersForm):
"""Global parameters."""
app = "modoboa_radicale"
server_settings = form_utils.SeparatorField(
label=gettext_lazy("Server settings")
)
server_location = forms.URLField(
label=gettext_lazy("Server URL"),
help_text=gettext_lazy(
"The URL of your Radicale server. "
"It will be used to construct calendar URLs."
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
rights_management_sep = form_utils.SeparatorField(
label=gettext_lazy("Rights management"))
rights_file_path = forms.CharField(
label=gettext_lazy("Rights file's path"),
initial="/etc/modoboa_radicale/rights",
help_text=gettext_lazy(
"Path to the file that contains rights definition"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
allow_calendars_administration = form_utils.YesNoField(
label=gettext_lazy("Allow calendars administration"),
initial=False,
help_text=gettext_lazy(
"Allow domain administrators to manage user calendars "
"(read and write)"
)
)
misc_sep = form_utils.SeparatorField(
label=gettext_lazy("Miscellaneous"))
max_ics_file_size = forms.CharField(
label=gettext_lazy("Maximum size of ICS files"),
initial="10240",
help_text=gettext_lazy(
"Maximum size in bytes of imported ICS files "
"(or KB, MB, GB if specified)")
)
|
import mechanize
import httplib
import re
import scraperwiki
import xml.sax.saxutils as saxutils
import json
import urllib2
import urllib
import time
from BeautifulSoup import BeautifulSoup
# Useful stuff for parsing
rcs_pattern = re.compile("[0-9]{3} [0-9]{3} [0-9]{3}")
name_pattern = re.compile("Nom commercial :")
count_pattern = re.compile("([1-9][0-9]*) entreprises")
def expand_task_queue(name, dept):
'''
Expand the tasks queue for this name in the departement
Concretely, add a new a-z at the end of name and append
'''
for extra_letter in range(ord('a'),ord('z')+1):
if chr(extra_letter) != name[-1]:
create_task(name + chr(extra_letter), dept)
def load_task_queue():
'''
Get the queue of tasks
Returns a list of {name, dept} pairs
'''
try:
# Here we don't expect to have the time to deal with more than 500 tasks per run
tasks = scraperwiki.sqlite.select("dept,name from tasks where done=0 limit 500")
except:
return []
return tasks
def init_task_queue():
'''
Initialise the queue of tasks
Fetch the list of departements and fill the table with {a-z, dept} pairs
'''
# Get the list of departements from an other scraper
depts = json.loads(scraperwiki.scrape("http://api.scraperwiki.com/api/1.0/datastore/sqlite?format=jsondict&name=french-departments&query=select%20name%2Cnumber%20from%20swdata"))
for dept_index in range(scraperwiki.sqlite.get_var('index', 0), len(depts)):
for name in range(ord('a'),ord('z')+1):
create_task(chr(name), depts[dept_index]['number'])
def create_task(name, dept):
'''
Creates a new task
'''
record = {'name' : name, 'dept' : dept, 'done' : False}
scraperwiki.sqlite.save(['name', 'dept'], record, table_name="tasks")
def mark_task_done(name, dept, results):
'''
Mark a task as done and save the number of results
'''
record = {'name' : name, 'dept' : dept, 'done' : True, 'number_records' : results}
scraperwiki.sqlite.save(['name', 'dept'], record, table_name="tasks")
def get_page(br, page, params = None):
'''
Fetch a page using the globally defined browser "br"
'''
#start = time.time()
if params != None:
params = urllib.urlencode(params)
response = br.open(page, params)
document = BeautifulSoup(response.get_data())
#print "Get page = %d seconds" % (time.time()-start)
return document
def get_companies(br, name, dept):
'''
Issue the search for all the companies in "dept" having "name" as part of their name
'''
# Issue the search
document = get_page(br, "http://www.infogreffe.fr/infogreffe/newRechercheEntreprise.xml", {"denomination" : name, "departement" : dept})
# Process pages until there is no next page
count = 0
stop = False
while not stop:
# Process all the companies listed on the page
for entreprise in document.findAll('td', attrs={'class':'entreprise'}):
process_entreprise(dept, entreprise)
count = count + 1
# Get the next page if there is one
stop = True
links = [l for l in br.links(text_regex=r"Suivant")]
if len(links) > 0:
offset=re.sub(r'javascript:switchPage\(([0-9]*)\)', '\g<1>', links[0].url)
document = get_page(br, "http://www.infogreffe.fr/infogreffe/includeEntrepListe.do?_=&entrepGlobalIndex=%s&index=rcs&tri=PERTINENCE" % offset)
stop = False
# Return the number of results fetched (maximum is 100)
return count
def process_entreprise(dept, entreprise):
'''
Process an HTML block with the description of the entreprise and store a new record for it
'''
# Create the record and get the name of the company and the url
record = {}
record['CompanyName'] = re.sub(r'[\t\r\n]', '', entreprise.contents[1].text)
record['RegistryUrl'] = 'http://www.infogreffe.fr' + entreprise.contents[1].get('href')
# Process all the other data in the description
for i in range(2,len(entreprise.contents)):
item = re.sub(r'[\t\r\n]', '', saxutils.unescape(str(entreprise.contents[i]), entities = {'è' : 'è', '&' : '&', '"' : '"', ' ' : ''}))
# Get the company number
if rcs_pattern.search(item):
record['CompanyNumber'] = re.sub(r' ', '', re.findall(r'[0-9]{3} [0-9]{3} [0-9]{3}', item)[0])
blocks = item.split('R.C.S. ')
if len(blocks) == 2:
record['RegistrationCity'] = blocks[1]
# Get its location
zipcode_pattern = re.compile(dept + "[0-9]{3} ")
if zipcode_pattern.search(item):
address=item.split(' - ')
record['Location'] = re.sub(r' {2,30}', '', address[0])
if len(address) == 2:
record['BuildingAtLocation'] = re.sub(r' {2,30}', '', address[1])
# Get its activity (last info when it is not an address)
if i==len(entreprise.contents)-1 and zipcode_pattern.search(item) == None:
record['EntityType'] = item
# Get its commercial name
if name_pattern.search(item):
record['CommercialName'] = re.sub(r' {2,30}', '', item.replace('Nom commercial :', ''))
# Save the new record
if 'CompanyNumber' in record.keys():
scraperwiki.sqlite.save(["CompanyNumber"], record)
def go():
'''
Main procedure of the scraper. Creates a browser, load the list of tasks and execute them
'''
try:
# Prepare the browser
cookies = mechanize.CookieJar()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cookies))
mechanize.install_opener(opener)
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(False)
br.set_handle_referer(False)
br.open("http://www.infogreffe.fr/infogreffe/process.do")
# Get the list of tasks
tasks = load_task_queue()
if len(tasks) == 0:
# If there is no task to execute, init/reset the table
init_task_queue()
tasks = load_task_queue()
for task in tasks:
try:
# Execute the task
results = get_companies(br, task['name'], task['dept'])
# If we hit the soft limit, add more refined searches to the queue
if results == 100:
print "Limit reached for %s in %s, adding new tasks" % (task['name'], task['dept'])
expand_task_queue(task['name'], task['dept'])
# Mark the task as done
mark_task_done(task['name'], task['dept'], results)
except Exception as detail:
# We may get an exception for using too much CPU time.
print "Exception raised", detail
except Exception as detail:
# If we can't open the browser, just skip running the scraper
print "Failed starting browser ", detail
#
# Run the scraper
#
go()
#results = get_companies(br, "a", "95")
#print "Got %d results " % results
|
import xml.etree.ElementTree as xmltree
tree = xmltree.ElementTree(file='my.xml')
root = tree.getroot()
print(root.tag)
for a in root:
print('標籤', a.tag, ',屬性', a.attrib, ',值', a.text)
for b in a:
print('標籤', b.tag, ',屬性', b.attrib, ',值', b.text)
for item in root.iter('item'):
print(item.attrib , item.text)
for item in root.findall('./morning/item'):
print('標籤為', item.tag, ',屬性', item.attrib, ',值', item.text) |
from keras.models import load_model
from utils import *
import matplotlib.image as mpimg
import sys
dir_name=input("Enter images dir")
original_data_dir = os.path.dirname(os.path.realpath(__file__))+"/"+str(dir_name)
base_folder=os.path.basename(original_data_dir)
print(original_data_dir+'/model_data/model.h5')
model=load_model(original_data_dir+'/model_data/model.h5')
filename=sys.argv[1]
squared_image=square_image(filename,300,300)
x=mpimg.imread(squared_image)
x=x[:,:,:3] #removing alpha channel
x=x.reshape([1,x.shape[0],x.shape[1],x.shape[2]])
print(convert_to_labels(model.predict(x),base_folder)[0]) |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""List and compare most used OpenStack cloud resources."""
import io
import json
import subprocess
from ansible.module_utils.basic import AnsibleModule
from rally.cli import cliutils
from rally.common.plugin import discover
from rally import plugins
try:
from rally_openstack.common import consts
from rally_openstack.common import credential
except ImportError:
# backward compatibility for stable branches
from rally_openstack import consts
from rally_openstack import credential
def skip_if_service(service):
def wrapper(func):
def inner(self):
if service in self.clients.services().values():
return []
return func(self)
return inner
return wrapper
class ResourceManager(object):
REQUIRED_SERVICE = None
STR_ATTRS = ("id", "name")
def __init__(self, clients):
self.clients = clients
def is_available(self):
if self.REQUIRED_SERVICE:
return self.REQUIRED_SERVICE in self.clients.services().values()
return True
@property
def client(self):
return getattr(self.clients, self.__class__.__name__.lower())()
def get_resources(self):
all_resources = []
cls = self.__class__.__name__.lower()
for prop in dir(self):
if not prop.startswith("list_"):
continue
f = getattr(self, prop)
resources = f() or []
resource_name = prop[5:][:-1]
for raw_res in resources:
res = {"cls": cls, "resource_name": resource_name,
"id": {}, "props": {}}
if not isinstance(raw_res, dict):
raw_res = {k: getattr(raw_res, k) for k in dir(raw_res)
if not k.startswith("_")
if not callable(getattr(raw_res, k))}
for key, value in raw_res.items():
if key.startswith("_"):
continue
if key in self.STR_ATTRS:
res["id"][key] = value
else:
try:
res["props"][key] = json.dumps(value, indent=2)
except TypeError:
res["props"][key] = str(value)
if not res["id"] and not res["props"]:
print("1: %s" % raw_res)
print("2: %s" % cls)
print("3: %s" % resource_name)
raise ValueError("Failed to represent resource %r" %
raw_res)
all_resources.append(res)
return all_resources
class Keystone(ResourceManager):
REQUIRED_SERVICE = consts.Service.KEYSTONE
def list_users(self):
return self.client.users.list()
def list_tenants(self):
if hasattr(self.client, "projects"):
return self.client.projects.list() # V3
return self.client.tenants.list() # V2
def list_roles(self):
return self.client.roles.list()
class Magnum(ResourceManager):
REQUIRED_SERVICE = consts.Service.MAGNUM
def list_cluster_templates(self):
result = []
marker = None
while True:
ct_list = self.client.cluster_templates.list(marker=marker)
if not ct_list:
break
result.extend(ct_list)
marker = ct_list[-1].uuid
return result
def list_clusters(self):
result = []
marker = None
while True:
clusters = self.client.clusters.list(marker=marker)
if not clusters:
break
result.extend(clusters)
marker = clusters[-1].uuid
return result
class Mistral(ResourceManager):
REQUIRED_SERVICE = consts.Service.MISTRAL
def list_workbooks(self):
return self.client.workbooks.list()
def list_workflows(self):
return self.client.workflows.list()
def list_executions(self):
return self.client.executions.list()
class Nova(ResourceManager):
REQUIRED_SERVICE = consts.Service.NOVA
def list_flavors(self):
return self.client.flavors.list()
def list_aggregates(self):
return self.client.aggregates.list()
def list_hypervisors(self):
return self.client.hypervisors.list()
def list_keypairs(self):
return self.client.keypairs.list()
def list_servers(self):
return self.client.servers.list(
search_opts={"all_tenants": True})
def list_server_groups(self):
return self.client.server_groups.list(all_projects=True)
def list_services(self):
return self.client.services.list()
def list_availability_zones(self):
return self.client.availability_zones.list()
class Neutron(ResourceManager):
REQUIRED_SERVICE = consts.Service.NEUTRON
def has_extension(self, name):
extensions = self.client.list_extensions().get("extensions", [])
return any(ext.get("alias") == name for ext in extensions)
def list_networks(self):
return self.client.list_networks()["networks"]
def list_subnets(self):
return self.client.list_subnets()["subnets"]
def list_routers(self):
return self.client.list_routers()["routers"]
def list_ports(self):
return self.client.list_ports()["ports"]
def list_floatingips(self):
return self.client.list_floatingips()["floatingips"]
def list_security_groups(self):
return self.client.list_security_groups()["security_groups"]
def list_trunks(self):
if self.has_extension("trunks"):
return self.client.list_trunks()["trunks"]
def list_health_monitors(self):
if self.has_extension("lbaas"):
return self.client.list_health_monitors()["health_monitors"]
def list_pools(self):
if self.has_extension("lbaas"):
return self.client.list_pools()["pools"]
def list_vips(self):
if self.has_extension("lbaas"):
return self.client.list_vips()["vips"]
def list_bgpvpns(self):
if self.has_extension("bgpvpn"):
return self.client.list_bgpvpns()["bgpvpns"]
class Glance(ResourceManager):
REQUIRED_SERVICE = consts.Service.GLANCE
def list_images(self):
return self.client.images.list()
class Heat(ResourceManager):
REQUIRED_SERVICE = consts.Service.HEAT
def list_resource_types(self):
return self.client.resource_types.list()
def list_stacks(self):
return self.client.stacks.list()
class Cinder(ResourceManager):
REQUIRED_SERVICE = consts.Service.CINDER
def list_availability_zones(self):
return self.client.availability_zones.list()
def list_backups(self):
return self.client.backups.list()
def list_volume_snapshots(self):
return self.client.volume_snapshots.list()
def list_volume_types(self):
return self.client.volume_types.list()
def list_encryption_types(self):
return self.client.volume_encryption_types.list()
def list_transfers(self):
return self.client.transfers.list()
def list_volumes(self):
return self.client.volumes.list(search_opts={"all_tenants": True})
def list_qos(self):
return self.client.qos_specs.list()
class Senlin(ResourceManager):
REQUIRED_SERVICE = consts.Service.SENLIN
def list_clusters(self):
return self.client.clusters()
def list_profiles(self):
return self.client.profiles()
class Manila(ResourceManager):
REQUIRED_SERVICE = consts.Service.MANILA
def list_shares(self):
return self.client.shares.list(detailed=False,
search_opts={"all_tenants": True})
def list_share_networks(self):
return self.client.share_networks.list(
detailed=False, search_opts={"all_tenants": True})
def list_share_servers(self):
return self.client.share_servers.list(
search_opts={"all_tenants": True})
class Gnocchi(ResourceManager):
REQUIRED_SERVICE = consts.Service.GNOCCHI
def list_resources(self):
result = []
marker = None
while True:
resources = self.client.resource.list(marker=marker)
if not resources:
break
result.extend(resources)
marker = resources[-1]["id"]
return result
def list_archive_policy_rules(self):
return self.client.archive_policy_rule.list()
def list_archive_policys(self):
return self.client.archive_policy.list()
def list_resource_types(self):
return self.client.resource_type.list()
def list_metrics(self):
result = []
marker = None
while True:
metrics = self.client.metric.list(marker=marker)
if not metrics:
break
result.extend(metrics)
marker = metrics[-1]["id"]
return result
class Ironic(ResourceManager):
REQUIRED_SERVICE = consts.Service.IRONIC
def list_nodes(self):
return self.client.node.list()
class Sahara(ResourceManager):
REQUIRED_SERVICE = consts.Service.SAHARA
def list_node_group_templates(self):
return self.client.node_group_templates.list()
class Murano(ResourceManager):
REQUIRED_SERVICE = consts.Service.MURANO
def list_environments(self):
return self.client.environments.list()
def list_packages(self):
return self.client.packages.list(include_disabled=True)
class Designate(ResourceManager):
REQUIRED_SERVICE = consts.Service.DESIGNATE
def list_zones(self):
return self.clients.designate("2").zones.list()
def list_recordset(self):
client = self.clients.designate("2")
results = []
results.extend(client.recordsets.list(zone_id)
for zone_id in client.zones.list())
return results
class Trove(ResourceManager):
REQUIRED_SERVICE = consts.Service.TROVE
def list_backups(self):
return self.client.backup.list()
def list_clusters(self):
return self.client.cluster.list()
def list_configurations(self):
return self.client.configuration.list()
def list_databases(self):
return self.client.database.list()
def list_datastore(self):
return self.client.datastore.list()
def list_instances(self):
return self.client.list(include_clustered=True)
def list_modules(self):
return self.client.module.list(datastore="all")
class Monasca(ResourceManager):
REQUIRED_SERVICE = consts.Service.MONASCA
def list_metrics(self):
return self.client.metrics.list()
class Watcher(ResourceManager):
REQUIRED_SERVICE = consts.Service.WATCHER
REPR_KEYS = ("uuid", "name")
def list_audits(self):
return self.client.audit.list()
def list_audit_templates(self):
return self.client.audit_template.list()
def list_goals(self):
return self.client.goal.list()
def list_strategies(self):
return self.client.strategy.list()
def list_action_plans(self):
return self.client.action_plan.list()
class Octavia(ResourceManager):
REQUIRED_SERVICE = consts.Service.OCTAVIA
def list_load_balancers(self):
return self.client.load_balancer_list()["loadbalancers"]
def list_listeners(self):
return self.client.listener_list()["listeners"]
def list_pools(self):
return self.client.pool_list()["pools"]
def list_l7policies(self):
return self.client.l7policy_list()["l7policies"]
def list_health_monitors(self):
return self.client.health_monitor_list()["healthmonitors"]
def list_amphoras(self):
return self.client.amphora_list()["amphorae"]
class CloudResources(object):
"""List and compare cloud resources.
resources = CloudResources(auth_url=..., ...)
saved_list = resources.list()
# Do something with the cloud ...
changes = resources.compare(saved_list)
has_changed = any(changes)
removed, added = changes
"""
def __init__(self, **kwargs):
self.clients = credential.OpenStackCredential(**kwargs).clients()
def list(self):
managers_classes = discover.itersubclasses(ResourceManager)
resources = []
for cls in managers_classes:
manager = cls(self.clients)
if manager.is_available():
resources.extend(manager.get_resources())
return resources
def compare(self, with_list):
def make_uuid(res):
return "%s.%s:%s" % (
res["cls"], res["resource_name"],
";".join(["%s=%s" % (k, v)
for k, v in sorted(res["id"].items())]))
current_resources = dict((make_uuid(r), r) for r in self.list())
saved_resources = dict((make_uuid(r), r) for r in with_list)
removed = set(saved_resources.keys()) - set(current_resources.keys())
removed = [saved_resources[k] for k in sorted(removed)]
added = set(current_resources.keys()) - set(saved_resources.keys())
added = [current_resources[k] for k in sorted(added)]
return removed, added
def _print_tabular_resources(resources, table_label):
def dict_formatter(d):
return "\n".join("%s:%s" % (k, v) for k, v in d.items())
out = io.StringIO()
cliutils.print_list(
objs=[dict(r) for r in resources],
fields=("cls", "resource_name", "id", "fields"),
field_labels=("service", "resource type", "id", "fields"),
table_label=table_label,
formatters={"id": lambda d: dict_formatter(d["id"]),
"fields": lambda d: dict_formatter(d["props"])},
out=out
)
out.write("\n")
print(out.getvalue())
def dump_resources(resources_mgr, json_output):
resources_list = resources_mgr.list()
_print_tabular_resources(resources_list, "Available resources.")
if json_output:
with open(json_output, "w") as f:
f.write(json.dumps(resources_list))
return 0, resources_list
def check_resource(resources_mgs, compare_with, json_output):
with open(compare_with) as f:
compare_to = f.read()
compare_to = json.loads(compare_to)
changes = resources_mgs.compare(with_list=compare_to)
removed, added = changes
# Cinder has a feature - cache images for speeding-up time of creating
# volumes from images. let's put such cache-volumes into expected list
volume_names = [
"image-%s" % i["id"]["id"] for i in compare_to
if i["cls"] == "glance" and i["resource_name"] == "image"]
# filter out expected additions
expected = []
for resource in added:
if (False # <- makes indent of other cases similar
or (resource["cls"] == "keystone"
and resource["resource_name"] == "role"
and resource["id"].get("name") == "_member_")
or (resource["cls"] == "neutron"
and resource["resource_name"] == "security_group"
and resource["id"].get("name") == "default")
or (resource["cls"] == "cinder"
and resource["resource_name"] == "volume"
and resource["id"].get("name") in volume_names)
or resource["cls"] == "murano"
# Glance has issues with uWSGI integration...
# or resource["cls"] == "glance"
or resource["cls"] == "gnocchi"):
expected.append(resource)
for resource in expected:
added.remove(resource)
if removed:
_print_tabular_resources(removed, "Removed resources")
if added:
_print_tabular_resources(added, "Added resources (unexpected)")
if expected:
_print_tabular_resources(expected, "Added resources (expected)")
result = {"removed": removed, "added": added, "expected": expected}
if json_output:
with open(json_output, "w") as f:
f.write(json.dumps(result, indent=4))
rc = 1 if any(changes) else 0
return rc, result
@plugins.ensure_plugins_are_loaded
def do_it(json_output, compare_with):
out = subprocess.check_output(
["rally", "env", "show", "--only-spec", "--env", "devstack"])
config = json.loads(out.decode("utf-8"))
config = config["existing@openstack"]
config.update(config.pop("admin"))
if "users" in config:
del config["users"]
resources = CloudResources(**config)
if compare_with:
return check_resource(resources, compare_with, json_output)
else:
return dump_resources(resources, json_output)
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
json_output=dict(required=False, type="str"),
compare_with=dict(required=False, type="path")
)
)
rc, json_result = do_it(
json_output=module.params.get("json_output"),
compare_with=module.params.get("compare_with")
)
if rc:
module.fail_json(
msg="Unexpected changes of resources are detected.",
rc=1,
resources=json_result
)
module.exit_json(rc=0, changed=True, resources=json_result)
if __name__ == "__main__":
ansible_main()
|
# -*- coding: utf-8 -*-
import logging
import os
from pathlib import Path
import click
import pandas as pd
from dotenv import find_dotenv, load_dotenv
from src.constants import (TARGET_FEATURE)
from src.models.model import SoilClassifier
@click.command()
@click.option('--input_filepath', type=click.Path(exists=True))
@click.option('--model_file_name', type=str)
def main(input_filepath, model_file_name):
"""Trains a model and dumps it."""
logger = logging.getLogger(__name__)
logger.info('Training Model')
df = pd.read_csv(os.path.join(input_filepath), sep='|')
model = SoilClassifier(
feature_names=['not_correlated', 'cadastral_ordinal_encoder_onehot', 'log_area', 'log_antiquity',
'squared_geoms', 'pssr', 'savi'],
classifier='gradient_boosting',
min_samples=1000,
max_samples=15000)
model.fit(df, df[TARGET_FEATURE])
model.dump('models/{}.pkl'.format(model_file_name))
logger.info('Training model finished, find the model into models/{}.pkl'.format(model_file_name))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import (
TokenRefreshView,
)
from .views import (
UserViewSet,
PostViewSet,
CommentViewSet,
HistoryDestroyViewSet,
HashtagGenericViewSet,
LikeCreateViewSet,
MyTokenObtainPairView,
MyTokenBlacklistViewView,
FileCreateViewSet,
)
urlpatterns = []
user_router = DefaultRouter(trailing_slash=False)
post_router = DefaultRouter(trailing_slash=False)
history_router = DefaultRouter(trailing_slash=False)
comment_router = DefaultRouter(trailing_slash=False)
hashtag_router = DefaultRouter(trailing_slash=False)
like_router = DefaultRouter(trailing_slash=False)
file_router = DefaultRouter(trailing_slash=False)
user_router.register(r'user', UserViewSet, basename='user')
post_router.register(r'tip', PostViewSet, basename='tip')
comment_router.register(r'comment', CommentViewSet, basename='comment')
history_router.register(r'history', HistoryDestroyViewSet, basename='history')
hashtag_router.register(r'hashtag', HashtagGenericViewSet, basename='hashtag')
like_router.register(r'like', LikeCreateViewSet, basename='like')
file_router.register(r'file', FileCreateViewSet, basename='file')
urlpatterns += user_router.urls
urlpatterns += post_router.urls
urlpatterns += comment_router.urls
urlpatterns += history_router.urls
urlpatterns += hashtag_router.urls
urlpatterns += like_router.urls
urlpatterns += file_router.urls
urlpatterns += [
path('token', MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh', TokenRefreshView.as_view(), name='token_refresh'),
path('token/blacklist', MyTokenBlacklistViewView.as_view(), name='token_blacklist'),
]
|
__author__ = 'adrian'
from PyQt4 import QtCore, QtGui
import sys
class myWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(myWindow, self).__init__(parent)
myLayout = QtGui.QVBoxLayout(self)
Button = QtGui.QPushButton('Resize')
myLayout.addWidget(Button)
Button.setMinimumWidth(200)
self.setMaximumWidth(300)
Button.clicked.connect(self.resizeDialog)
def resizeDialog(self):
self.animation = QtCore.QPropertyAnimation(self, "size")
if self.size().width() == 200:
self.animation.setEndValue(QtCore.QSize(600,300))
else:
print(3)
self.animation.setEndValue(QtCore.QSize(400,100))
self.animation.start()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
app.setApplicationName('myApp')
dialog = myWindow()
dialog.resize(200, 100)
dialog.show()
sys.exit(app.exec_()) |
#!C:\Python27
# -*- coding: utf-8 -*-
import chardet
from kitchen.text.converters import to_unicode
from openpyxl import Workbook
def normalize_name(name):
"""Convert name to camel case."""
# print name
if name:
name = get_unicode(name).strip().title()
return name
def write_ws(ws, record):
"""Add a record to a worksheet."""
new_row = []
# extract data with field keys from record
for element in record:
new_row.append(element)
# add new row to worksheet
ws.append(new_row)
def convert_to_float(strValue):
strValue = strValue.strip().replace(".", "").replace(",", ".")
floatValue = float(strValue)
return floatValue
def find_nth(s, x, n):
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i
def dict_list_to_excel(dictList, xlName="Output.xlsx"):
"""toma una lista de diccionarios iguales y devuelve una tabla
en excel con campos tomados de las claves del primer diccionario"""
# creo el libro y tomo la hoja
wb = Workbook()
ws = wb.get_active_sheet()
# tomo las claves del primer diccionario para usar en todos ellos
keys = dictList[0].keys()
# copio los nombres como encabezados
iCol = 0
for key in keys:
ws.cell(row=0, column=iCol).value = key
iCol += 1
# itero entre los diccionarios de la lista
iRow = 1
for dictionary in dictList:
iCol = 0
for key in keys:
ws.cell(row=iRow, column=iCol).value = dictionary[key]
iCol += 1
iRow += 1
# guarda el excel
wb.save(xlName)
def get_unicode(string, encoding='utf-8', errors='replace'):
"""fuerza una conversion a unicode a prueba de fallas"""
# si el valor no es None, intenta convertir a unicode
if string:
try:
RV = to_unicode(string, encoding, errors)
except Exception:
encoding = chardet.detect(string)["encoding"]
RV = to_unicode(string, encoding, errors)
# si es None, no convierte a unicode
else:
RV = string
return RV
|
from Bio import AlignIO, SeqIO
import copy
import time
from Bio.Phylo import BaseTree
from Bio._py3k import zip, range
import numpy as np
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
from phyloGenie.DistanceMatrixCalculatorGPU import DistanceCalculator_GPU
# perform multiple sequence alignment using MUSCLE and write the alignment to a fasta file
class FullGpuDistanceCalculation:
def full_gpu_calculate_distance_matrix(self, type, file):
in_file = file
if type == 'DNA':
matrix_type = 'blastn'
else:
matrix_type = 'blosum62'
calculator = DistanceCalculator_GPU(matrix_type)
alignment = AlignIO.read(in_file, "fasta")
dm = calculator.get_distance(alignment)
return dm
class FullGpuUpgmaTreeConstructor:
def full_gpu_upgma(self, distance_matrix):
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
dm_count = copy.deepcopy(dm)
for i in range(1, len(dm_count)):
for j in range(0, i):
dm_count[i, j] = 1
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
# GPU kernel to find the minimum index and minimum distance
mod = SourceModule("""
__global__ void findMin(double *dm, long long *index, double *local_min, int c, int l)
{
int k = threadIdx.y + blockIdx.y*blockDim.y;
double min_dist = dm[k*c];
int id = 0 ;
for(int i= k*c ; i< (k+1)*c; i++){
if(i<l)
{
if(min_dist >= dm[i])
{
min_dist = dm[i];
id = i;
}
}
}
local_min[k]=min_dist;
index[k]= id;
}""")
while len(dm) > 1:
# host array creation
time_gpu_start = time.time()
mat = dm.matrix
dm_cpu = np.array(mat[1][:-1])
for i in range(2, len(dm)):
dm_cpu = np.append(dm_cpu, mat[i][:-1])
combinations = int(((len(dm) - 1) * len(dm)) / 2)
if combinations < 1024*256:
block_size = int(round((len(dm))/2))
elif combinations < 1024*1024:
block_size = 512
else:
block_size = 1024
local_count = int(round(combinations/block_size))
if local_count < 1024:
grid_size = 1
else:
grid_size = int(round(local_count/1024))+1
index = np.zeros(block_size, dtype=int)
min_val = np.zeros(block_size, dtype=float)
local_min_array_gpu = drv.mem_alloc(dm_cpu.nbytes)
local_index_gpu = drv.mem_alloc(index.nbytes)
local_min_gpu = drv.mem_alloc(min_val.nbytes)
drv.memcpy_htod(local_min_array_gpu, dm_cpu)
drv.memcpy_htod(local_index_gpu, index)
drv.memcpy_htod(local_min_gpu, min_val)
func = mod.get_function("findMin")
# start.record()
func(local_min_array_gpu, local_index_gpu, local_min_gpu, np.int32(local_count), np.int32(len(dm_cpu)),
block=(1, block_size, 1), grid =(1, grid_size, 1))
# end.record()
# end.synchronize()
drv.memcpy_dtoh(min_val, local_min_gpu)
drv.memcpy_dtoh(index, local_index_gpu)
min_val_new = min_val
min_val = min_val.tolist()
local_min_gpu.free()
local_index_gpu.free()
min_dist = min(min_val)
global_id = 0
for i in range(len(min_val_new)):
if min_dist == min_val_new[i]:
global_id = index[i]
break
for i in range(1, len(distance_matrix)):
if global_id == 0:
min_i = 1
min_j = 0
break
else:
t_val = ((i+1)*(i+2))/2
if global_id < t_val:
min_i = i+1
min_j = global_id-(t_val - i-1)
break
elif global_id == t_val:
min_i = i+2
min_j = 0
break
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
r = 0
if k != min_i and k != min_j:
r = dm_count[min_i, k] + dm_count[min_j, k]
dm[min_j, k] = ((dm[min_i, k] * dm_count[min_i, k]) + (dm[min_j, k] * dm_count[min_j, k])) / r
dm_count[min_j, k] = r
dm_count.names[min_j] = "Inner" + str(inner_count)
del dm_count[min_i]
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
del dm_cpu
return BaseTree.Tree(inner_clade)
def _height_of(self, clade):
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
for terminal in clade.get_terminals():
path = clade.get_path(target=terminal)
height = 0
for value in path:
height = height + value.branch_length
return height
# Phylo.draw_ascii(upgma_tree)
|
#!/usr/bin/env python
# coding: utf-8
import cv2
import numpy as np
import shapely.geometry as geom
import shapedetector as sd
def detect_sq(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.medianBlur(gray, 3)
edges = cv2.Canny(blurred, 60, 200)
# edges = cv2.Canny(gray, 100, 200)
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if hierarchy is None:
return
hierarchy = hierarchy[0]
found = []
for cnt_idx in range(len(contours)):
area = cv2.contourArea(contours[cnt_idx], oriented=True)
if area < 400:
continue
k = cnt_idx
c = 0
while hierarchy[k][2] != -1:
k = hierarchy[k][2]
c = c + 1
if c > 2:
found.append(cnt_idx)
for i, cnt_idx in enumerate(found):
is_square, c = sd.detect_square(contours[cnt_idx])
if is_square:
print 'found squared'
x, y, width, height = cv2.boundingRect(contours[cnt_idx])
roi = image[y:y + height, x:x + width]
cv2.imwrite("roi.png", roi)
colors, dst = sd.detect_color_in(roi, c)
print 'color:%d:' % i, colors
def cluster_boxes(box_list):
if len(box_list) < 2:
return box_list
box_list = sorted(box_list, key=lambda b: b.area, reverse=True)
results = []
b0 = box_list[0]
results.append(b0)
for b in box_list[1:]:
if b0.intersects(b) or b0.touches(b):
b0 = b0.union(b)
else:
results.append(b)
return results
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
fgbg.setHistory(30)
# tracker = cv2.TrackerKCF_create()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
track_init = False
while True:
_, frame = cap.read()
fg_mask = fgbg.apply(frame)
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel)
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)
_, fg_mask = cv2.threshold(fg_mask, 60, 255, cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for c in contours:
area = cv2.contourArea(c)
if area < 200:
continue
x, y, w, h = cv2.boundingRect(c)
boxes.append(geom.box(x, y, (x + w), (y + h)))
boxes = cluster_boxes(boxes)
for box in boxes:
x0, y0, x1, y1 = np.int32(box.bounds)
# cv2.rectangle(frame, (x0, y0), (x1, y1), (0, 255, 0), 2)
detect_sq(frame)
cv2.imshow('video_fg', frame)
k = cv2.waitKey(30) & 0xff
# wait for 's' key to save and exit
if k == ord('s'):
cv2.imwrite('frame.png', frame)
elif k == 27 or k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
from Tools.Initialize import initialize
class Longin(object):
def __init__(self):
self.driver=initialize().driver
def LonginName(self):
LonginName=self.driver.find_element_by_xpath("//input[@id='loginName']")
return LonginName
def PassWord(self):
PassWord=self.driver.find_element_by_xpath("//input[@id='password']")
return PassWord
def SureLongin(self):
SureLogin=self.driver.find_element_by_xpath("//input[@onclick='loginWeb();']")
return SureLogin |
def test_solution():
import solution
assert solution.reverse('hello') == 'oellh'
assert solution.reverse('') == ''
|
import sys # noqa
import subprocess # noqa
from pip.req import parse_requirements
from setuptools import setup, find_packages
commit = subprocess.Popen(
'git rev-parse --short HEAD'.split(),
stdout=subprocess.PIPE,
).stdout.read().decode('utf-8').strip()
install_reqs = parse_requirements('requirements.txt', session='')
setup(
name='bosnet-prototype-fba',
version='0.1+%s' % commit,
description='simulation for fba consensus protocol',
author='BOSNet team',
license='GPLv3+',
keywords='bosnet blockchainos blockchain fba stellar quorum python byzantine agreement',
zip_safe=False,
install_requires=list(map(lambda x: str(x.req), install_reqs)),
package_dir={'': 'src'},
packages=find_packages('src', exclude=('test',)),
scripts=(
'scripts/run-blockchain.py',
'scripts/run-client.py',
'scripts/run-client-new.py',
'scripts/send-message.py',
'scripts/metric-analyzer.py',
'examples/experiment/run-consensus-performance.py',
),
)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 19:39:31 2015
@author: leben
Simulate charged particle motion within magnetic field with axial gradient
"""
import numpy as np
import matplotlib.pyplot as plt
from plot_utility import extractData
import settings
import magnetic as mag
from Fields import SmoothField
class Smooth(object):
# particles = [ 'e-', 'de+', 'tr+', 'p-' ]
# particles = ['de+', 'tr+']
particles = ['tr+']
# particles = [ 'de+', 'tr+', 'p-' ]
cmaps = [ 'Reds', 'Greens', 'Blues', 'Oranges' ]
lmaps = [ 'red', 'green', 'blue', 'orange' ]
style = { 'de+': 'b-', 'tr+': 'g:', 'p-': 'm-', 'e-': 'k-' }
def __init__(self):
app = mag.Application()
app.fieldCode = 'Smooth'
app.x0 = 0.0
app.y0 = 0.0
app.z0 = 0.0
app.useKineticEnergy = True
app.kineticEnergy = 15
app.fieldBaseStrength = [4.7]
app.initialTime = 0.0
app.timeStep = 1.0E-9
app.endTime = 5.0E-4
# app.endTime = 2.5E-6
app.tolerance = 1.0E-05
app.save()
def simulate(self, alpha, beta):
app = mag.Application()
app.fieldGradient = [ alpha, beta ]
app.save()
s = settings.Settings()
for particle in self.particles:
outfile = self.filename(particle, alpha, beta)
s.outfile = outfile
s.save()
app.particleCode = particle
app.save()
app.execute()
def plotSuperimposed(self, alpha, beta):
s = settings.Settings()
app = mag.Application()
field = SmoothField()
field.alpha = alpha
field.beta = beta
field.Bz0 = app.fieldBaseStrength[0]
for particle in self.particles:
outfile = self.filename(particle, alpha, beta)
s.outfile = outfile
with open(s.outpath()) as f:
t, x, y, z = extractData(f, [0, 1, 2, 3])
start = len(z) / 4 - 100
end = len(z) / 4 + 260
# start = 0
# end = len(z)
plt.plot(z[start:end], y[start:end], self.style[particle],
linewidth=1, label=self.label(particle))
# self.plotField(field, -1.6, 1.6, 0.35, 0.6)
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
self.plotField(field, xmin, xmax, ymin, ymax)
plt.xlabel('z (m)')
plt.ylabel('y (m)')
plt.tight_layout()
plt.show()
def animate(self, alpha, beta, div=4, particle='tr+'):
s = settings.Settings()
app = mag.Application()
field = SmoothField()
field.alpha = alpha
field.beta = beta
field.Bz0 = app.fieldBaseStrength[0]
outfile = self.filename(particle, alpha, beta)
s.outfile = outfile
with open(s.outpath()) as f:
t, x, y, z = extractData(f, [0, 1, 2, 3])
length = len(z) / div
for i in xrange(div):
start = i * length
# if i > 0:
# start = (i-1) * length
# start=0
end = (i+1) * length
if i > 0:
plt.plot(z[0:start], y[0:start], 'm-', alpha=0.5,
linewidth=0.5, label=self.label(particle))
plt.plot(z[start:end], y[start:end], self.style[particle],
linewidth=1, label=self.label(particle))
self.plotField(field, -1.6, 1.6, 0.35, 0.6)
# xmin, xmax = plt.xlim()
# ymin, ymax = plt.ylim()
# self.plotField(field, xmin, xmax, ymin, ymax)
plt.xlabel('$z$ (m)')
plt.ylabel('$y$ (m)')
plt.tight_layout()
s.outext = '_{index:03}.pdf'.format(index=i)
plt.savefig(s.outpath())
plt.show()
plt.clf()
# The main interest in this particular field is conveyed by the right view
def plotField(self, field, xmin, xmax, ymin, ymax, res=1000):
XX = np.arange(xmin, xmax, (xmax-xmin)/res)
YY = np.arange(ymin, ymax, (ymax-ymin)/res)
Z, Y = np.meshgrid(XX, YY)
CS = plt.contour(Z, Y, field.zField(0.5, Y, Z), cmap='autumn_r')
plt.clabel(CS, fontsize=9, inline=1, colors='k')
def execute(self, alpha, beta, animate):
self.simulate(alpha, beta)
if animate:
for particle in self.particles:
self.animate(alpha, beta, particle=particle)
else:
self.plotSuperimposed(alpha, beta)
def fileSuffix(self, particle):
if particle == 'e-':
return 'electron'
elif particle == 'de+':
return 'deuterium'
elif particle == 'tr+':
return 'tritium'
elif particle == 'p-':
return 'protide'
def label(self, particle):
if particle == 'e-':
return '$e^-$'
elif particle == 'de+':
return '$De^+$'
elif particle == 'tr+':
return '$Tr^+$'
elif particle == 'p-':
return '$H^-$'
def filename(self, particle, alpha, beta, prefix='smooth_'):
return prefix + self.fileSuffix(particle) + \
'_{alpha:>02}_{beta:>02}'.format(alpha=int(alpha*10), beta=int(beta*10))
if __name__ == '__main__':
import argparse as ap
parser = ap.ArgumentParser(description='Simulation for magnetic field with radial gradient')
parser.add_argument('--alpha', default=1.0, type=float)
parser.add_argument('--beta', default=1.0, type=float)
parser.add_argument('--animate', action='store_true')
args = parser.parse_args()
d = Smooth()
d.execute(args.alpha, args.beta, args.animate)
|
def input_to_int(value):
"""
Checks that user input is an integer.
Parameters
----------
n : user input to check
Returns
-------
integer : n cast to an integer
Raises
------
ValueError : if n is not an integer
"""
if str(value).isdigit():
return int(value)
else:
raise ValueError('Expecting integer. Got: "{0}" ({1})'
.format(value, type(value)))
|
"""import random
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_list = [alphabet[i] for i in range(len(alphabet))]
#alphabet_list = list(alphabet)
print(alphabet_list)
message_text = "what is a baggins"
cipher_list = list(alphabet_list)
random.shuffle(cipher_list)
print(cipher_list)
cipher_text = ""
for x in message_text:
index = alphabet_list.index(x)
cipher_char = cipher_list[index]
cipher_text += cipher_char
print cipher_text"""
import random
alphabet = "abcdefghijklmnopqrstuvwxyz "
alphabet_list = [alphabet[i] for i in range(len(alphabet))]
#alphabet_list = list(alphabet)
print(alphabet_list)
message_text = "gvxpuogvxogvxoyapt"
cipher_list = ['z', 'k', 'q', 'p', 's', ' ', 'd', 'g', 'a', 'e', 'y', 'h', 'f', 'n', 'v', 'm', 'b', 'l', 't', 'r', 'c', 'i', 'x', 'w', 'u', 'j', 'o']
#random.shuffle(cipher_list)
cipher_text = ""
for x in message_text:
index = cipher_list.index(x)
cipher_char = alphabet_list[index]
cipher_text += cipher_char
print cipher_text
|
# Lambda : 함수 이름 없이, 함수처럼 쓸수 있는 익명함수
f = lambda x, y: x + y
print(f(1, 4))
# Map & Reduce
ex = [1, 2, 3, 4, 5]
f = lambda x: x ** 2
print(list(map(f, ex)))
# Reduce
from functools import reduce
print(reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])) |
"""DCG_API URL Configuration
The `urlpatterns` list routes URLs to viewsets. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function viewsets
1. Add an import: from my_app import viewsets
2. Add a URL to urlpatterns: path('', viewsets.home, name='home')
Class-based viewsets
1. Add an import: from other_app.viewsets import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, URLPattern
from rest_framework.routers import SimpleRouter
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from api import viewsets
schema_view = get_schema_view(
openapi.Info(
title="DCG API",
default_version='v3.14',
description="read the docs",
contact=openapi.Contact(email="lelu.awen@hacari.org"),
),
public=False,
permission_classes=(permissions.IsAuthenticatedOrReadOnly,),
)
router = SimpleRouter()
router.register('user', viewsets.UserViewset)
router.register('interests', viewsets.InterestViewset)
router.register('places', viewsets.PlaceViewset)
router.register('promos', viewsets.PromoViewset)
router.register('events', viewsets.EventViewset)
urlpatterns = [
path('docs/redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
urlpatterns += router.urls
print(urlpatterns)
# i: URLPattern
# for i in router.urls:
# print(i.name, i.pattern)
|
#usr/bin/env python3
'''
__author__ = 'abba y abdullahi ,'
'''
import turtle
turtle.pencolor('red')
turtle.forward(200)
turtle.left(90)
turtle.pencolor('blue')
turtle.forward(150)
turtle.left(90)
turtle.forward(200)
turtle.pencolor('green')
turtle.forward(200)
turtle.left(90)
turtle.pencolor('black')
turtle.forward(150)
turtle.hideturtle()
turtle.exitonclick()
|
'''
Created on Oct 2, 2012
@author: Gary
'''
import threading
from housemonitor.lib.hmqueue import HMQueue
from housemonitor.lib.base import Base
from send import COSMSend
from housemonitor.lib.constants import Constants
class COSMOutputThread( Base, threading.Thread ):
'''
This thread will remove the data off the cosm queue and send it to the COSM web site.
'''
forever = True
@property
def logger_name( self ):
""" Set the logger level. This needs to be added to house_monitoring_logging.conf"""
return Constants.LogKeys.outputsCOSM
_queue = None
''' The HMQueue object. Used to receive data. '''
_cosm_send = None
''' Object that sends data to COSM. '''
def __init__( self, queue, options, send=None, name=None ):
'''
Constructor
args:
:param queue: Queue for sending data between threads
:type HMQueue:
:param options: options from the command line
:type dict:
:param send: optional argument used for test
:type COSMSend:
:returns: None
:raises: None
'''
self._queue = queue
if ( send == None ):
self._cosm_send = COSMSend( options )
else:
self._cosm_send = send
super( COSMOutputThread, self ).__init__()
threading.Thread.__init__( self )
def process( self ):
'''
This function does the following:
#. Wait on data from the queue.
#. Remove data from previous send.
#. Unpack data from received packet
#. send data to COSM_send
'''
packet = self._queue.receive()
data = packet[Constants.Cosm.packet.data]
data[Constants.Cosm.packet.current_value] = packet[Constants.Cosm.packet.current_value]
self._cosm_send.output( data )
def run( self ):
'''
The COSM thread will loop forever calling process.
'''
while self.forever:
self.process()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.