text stringlengths 0 1.05M | meta dict |
|---|---|
# Adafruit CC3000 Library Test Data Generator
# Created by Tony DiCola (tony@tonydicola.com)
# Released with the same license as the Adafruit CC3000 library (BSD)
# Create a simple server to listen by default on port 9000 (or on the port specified in
# the first command line parameter), accept any connections, and generate random data for
# testing purposes. If a number + newline is received as input from the connected client
# then random data of that number characters will be sent back in response. Must be
# terminated by hitting ctrl-c to kill the process!
from socket import *
import random
import sys
import threading
SERVER_PORT = 9000
if len(sys.argv) > 1:
SERVER_PORT = sys.argv[1]
# Create listening socket
server = socket(AF_INET, SOCK_STREAM)
# Ignore waiting for the socket to close if it's already open. See the python socket
# doc for more info (very bottom of http://docs.python.org/2/library/socket.html).
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# Listen on any network interface for the specified port
server.bind(('', SERVER_PORT))
server.listen(5)
def generate_data(size):
# Generate a string with size characters that are random values.
return ''.join([chr(random.randrange(0, 255)) for i in range(size)])
# Worker process to take input and generate random output.
def process_connection(client, address):
stream = client.makefile()
# Read lines until the socket is closed.
for line in stream:
try:
# Try to interpret the input as a number, and generate that amount of
# random test data to output.
count = int(line.strip())
print 'Sending', count, 'bytes of data to', address[0]
stream.write(generate_data(count))
stream.flush()
except ValueError:
# Couldn't parse the provided count. Ignore it and wait for more input.
pass
print 'Closing client connection from', address[0]
client.close()
try:
# Wait for connections and spawn worker threads to process them.
print 'Waiting for new connections on port', SERVER_PORT
while True:
client, address = server.accept()
print 'Client connected from', address[0]
thread = threading.Thread(target=process_connection, args=(client, address))
thread.setDaemon(True) # Don't block exiting if any threads are still running.
thread.start()
except:
server.close() | {
"repo_name": "tdicola/adafruit_cc3000",
"path": "Rxspeed/generator.py",
"copies": "1",
"size": "2298",
"license": "mit",
"hash": 6969161989438765000,
"line_mean": 34.921875,
"line_max": 89,
"alpha_frac": 0.7458659704,
"autogenerated": false,
"ratio": 3.6246056782334386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9734496759343563,
"avg_score": 0.027194977857975047,
"num_lines": 64
} |
from __future__ import print_function
import os
import click
import ampy.files as files
import ampy.pyboard as pyboard
_board = None
@click.group()
@click.option('--port', '-p', envvar='AMPY_PORT', required=True, type=click.STRING,
help='Name of serial port for connected board. Can optionally specify with AMPY_PORT environemnt variable.',
metavar='PORT')
@click.option('--baud', '-b', envvar='AMPY_BAUD', default=115200, type=click.INT,
help='Baud rate for the serial connection (default 115200). Can optionally specify with AMPY_BAUD environment variable.',
metavar='BAUD')
@click.version_option()
def cli(port, baud):
"""ampy - Adafruit MicroPython Tool
Ampy is a tool to control MicroPython boards over a serial connection. Using
ampy you can manipulate files on the board's internal filesystem and even run
scripts.
"""
global _board
_board = pyboard.Pyboard(port, baudrate=baud)
@cli.command()
@click.argument('remote_file')
@click.argument('local_file', type=click.File('wb'), required=False)
def get(remote_file, local_file):
"""
Retrieve a file from the board.
Get will download a file from the board and print its contents or save it
locally. You must pass at least one argument which is the path to the file
to download from the board. If you don't specify a second argument then
the file contents will be printed to standard output. However if you pass
a file name as the second argument then the contents of the downloaded file
will be saved to that file (overwriting anything inside it!).
For example to retrieve the boot.py and print it out run:
ampy --port /board/serial/port get boot.py
Or to get main.py and save it as main.py locally run:
ampy --port /board/serial/port get main.py main.py
"""
# Get the file contents.
board_files = files.Files(_board)
contents = board_files.get(remote_file)
# Print the file out if no local file was provided, otherwise save it.
if local_file is None:
print(contents.decode('utf-8'))
else:
local_file.write(contents)
@cli.command()
@click.argument('directory')
def mkdir(directory):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory)
@cli.command()
@click.argument('directory', default='/')
def ls(directory):
"""List contents of a directory on the board.
Can pass an optional argument which is the path to the directory. The
default is to list the contents of the root, /, path.
For example to list the contents of the root run:
ampy --port /board/serial/port ls
Or to list the contents of the /foo/bar directory on the board run:
ampy --port /board/serial/port ls /foo/bar
"""
# List each file/directory on a separate line.
board_files = files.Files(_board)
for f in board_files.ls(directory):
print(f)
@cli.command()
@click.argument('local_file', type=click.File('rb'))
@click.argument('remote_file', required=False)
def put(local_file, remote_file):
"""Put a file on the board.
Put will upload a local file to the board. If the file already exists on
the board it will be overwritten with no warning! You must pass at least
one argument which is the path to the local file to upload. You can pass
a second optional argument which is the path and name of the file to put to
on the connected board.
For example to upload a main.py from the current directory to the board's
root run:
ampy --port /board/serial/port put main.py
Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py
in the board's root run:
ampy --port /board/serial/port put ./foo/board_boot.py boot.py
"""
# Use the local filename if no remote filename is provided.
if remote_file is None:
remote_file = os.path.basename(local_file.name)
# Put the file on the board.
board_files = files.Files(_board)
board_files.put(remote_file, local_file.read())
@cli.command()
@click.argument('remote_file')
def rm(remote_file):
"""Remove a file from the board.
Remove the specified file from the board's filesystem. Must specify one
argument which is the path to the file to delete. Note that this can't
delete directories which have files inside them, but can delete empty
directories.
For example to delete main.py from the root of a board run:
ampy --port /board/serial/port rm main.py
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rm(remote_file)
@cli.command()
@click.argument('local_file')
@click.option('--no-output', '-n', is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.")
@click.argument('run_arguments', required=False, nargs=-1)
def run(local_file, no_output, run_arguments):
"""Run a script and print its output.
Run will send the specified file to the board and execute it immediately.
Any output from the board will be printed to the console (note that this is
not a 'shell' and you can't send input to the program).
Note that if your code has a main or infinite loop you should add the --no-output
option. This will run the script and immediately exit without waiting for
the script to finish and print output.
For example to run a test.py script and print any output after it finishes:
ampy --port /board/serial/port run test.py
Or to run test.py and not wait for it to finish:
ampy --port /board/serial/port run --no-output test.py
"""
# Run the provided file and print its output.
board_files = files.Files(_board)
output = board_files.run(local_file, not no_output, run_arguments)
#if output is not None:
# print(output.decode('utf-8'), end='')
@cli.command()
def reset():
"""Perform soft reset/reboot of the board.
Will connect to the board and perform a soft reset. No arguments are
necessary:
ampy --port /board/serial/port reset
"""
# Enter then exit the raw REPL, in the process the board will be soft reset
# (part of enter raw REPL).
_board.enter_raw_repl()
_board.exit_raw_repl()
if __name__ == '__main__':
cli()
| {
"repo_name": "Neradoc/ampy",
"path": "ampy/cli.py",
"copies": "1",
"size": "8092",
"license": "mit",
"hash": 2411394328299610600,
"line_mean": 36.2903225806,
"line_max": 148,
"alpha_frac": 0.7016806723,
"autogenerated": false,
"ratio": 3.9338842975206614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135564969820661,
"avg_score": null,
"num_lines": null
} |
import ast
import textwrap
from pprint import pformat
from ampy.pyboard import PyboardError
BUFFER_SIZE = 32 # Amount of data to read or write to the serial port at a time.
# This is kept small because small chips and USB to serial
# bridges usually have very small buffers.
class Files(object):
"""Class to interact with a MicroPython board files over a serial connection.
Provides functions for listing, uploading, and downloading files from the
board's filesystem.
"""
def __init__(self, pyboard):
"""Initialize the MicroPython board files class using the provided pyboard
instance. In most cases you should create a Pyboard instance (from
pyboard.py) which connects to a board over a serial connection and pass
it in, but you can pass in other objects for testing, etc.
"""
self._pyboard = pyboard
def get(self, filename):
"""Retrieve the contents of the specified file and return its contents
as a byte string.
"""
# Open the file and read it a few bytes at a time and print out the
# raw bytes. Be careful not to overload the UART buffer so only write
# a few bytes at a time, and don't use print since it adds newlines and
# expects string data.
command = """
import sys
with open('{0}', 'rb') as infile:
while True:
result = infile.read({1})
if result == b'':
break
len = sys.stdout.write(result)
""".format(filename, BUFFER_SIZE)
self._pyboard.enter_raw_repl()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #2, i.e. file doesn't exist and
# rethrow it as something more descriptive.
if ex.args[2].decode('utf-8').find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such file: {0}'.format(filename))
else:
raise ex
self._pyboard.exit_raw_repl()
return out
def ls(self, directory='/'):
"""List the contents of the specified directory (or root if none is
specified). Returns a list of strings with the names of files in the
specified directory.
"""
# Execute os.listdir() command on the board.
command = """
import os
print(os.listdir('{0}'))
""".format(directory)
self._pyboard.enter_raw_repl()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #2, i.e. directory doesn't exist and
# rethrow it as something more descriptive.
if ex.args[2].decode('utf-8').find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such directory: {0}'.format(directory))
else:
raise ex
self._pyboard.exit_raw_repl()
# Parse the result list and return it.
return ast.literal_eval(out.decode('utf-8'))
def mkdir(self, directory):
"""Create the specified directory. Note this cannot create a recursive
hierarchy of directories, instead each one should be created separately.
"""
# Execute os.mkdir command on the board.
command = """
import os
os.mkdir('{0}')
""".format(directory)
self._pyboard.enter_raw_repl()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #17, i.e. directory already exists.
if ex.args[2].decode('utf-8').find('OSError: [Errno 17] EEXIST') != -1:
raise RuntimeError('Directory already exists: {0}'.format(directory))
else:
raise ex
self._pyboard.exit_raw_repl()
def put(self, filename, data):
"""Create or update the specified file with the provided data.
"""
# Open the file for writing on the board and write chunks of data.
self._pyboard.enter_raw_repl()
self._pyboard.exec_("f = open('{0}', 'wb')".format(filename))
size = len(data)
# Loop through and write a buffer size chunk of data at a time.
for i in range(0, size, BUFFER_SIZE):
chunk_size = min(BUFFER_SIZE, size-i)
chunk = repr(data[i:i+chunk_size])
# Make sure to send explicit byte strings (handles python 2 compatibility).
if not chunk.startswith('b'):
chunk = 'b' + chunk
self._pyboard.exec_("f.write({0})".format(chunk))
self._pyboard.exec_('f.close()')
self._pyboard.exit_raw_repl()
def rm(self, filename):
"""Remove the specified file or directory."""
command = """
import os
os.remove('{0}')
""".format(filename)
self._pyboard.enter_raw_repl()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
message = ex.args[2].decode('utf-8')
# Check if this is an OSError #2, i.e. file/directory doesn't exist
# and rethrow it as something more descriptive.
if message.find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such file/directory: {0}'.format(filename))
# Check for OSError #13, the directory isn't empty.
if message.find('OSError: [Errno 13] EACCES') != -1:
raise RuntimeError('Directory is not empty: {0}'.format(filename))
else:
raise ex
self._pyboard.exit_raw_repl()
def run(self, filename, wait_output=True, run_arguments=[]):
"""Run the provided script and return its output. If wait_output is True
(default) then wait for the script to finish and then print its output,
otherwise just run the script and don't wait for any output.
"""
self._pyboard.enter_raw_repl()
if len(run_arguments) > 0:
argexec = "import sys\n"
argexec += "sys.argv.append("+pformat(filename)+")\n"
for arg in run_arguments:
argQuoted = pformat(str(arg))
argexec += "sys.argv.append("+argQuoted+")\n"
self._pyboard.exec_raw_no_follow(argexec)
out = None
if wait_output:
# Run the file and wait for output to return.
out = self._pyboard.execfile(filename)
else:
# Read the file and run it using lower level pyboard functions that
# won't wait for it to finish or return output.
with open(filename, 'rb') as infile:
self._pyboard.exec_raw_no_follow(infile.read())
self._pyboard.exit_raw_repl()
return out
| {
"repo_name": "Neradoc/ampy",
"path": "ampy/files.py",
"copies": "1",
"size": "8192",
"license": "mit",
"hash": -7889592218539680000,
"line_mean": 43.2810810811,
"line_max": 87,
"alpha_frac": 0.6042480469,
"autogenerated": false,
"ratio": 4.268890046899426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5373138093799426,
"avg_score": null,
"num_lines": null
} |
import ast
import textwrap
# Amount of data to read or write to the serial port at a time.
BUFFER_SIZE = 32
# This is kept small because small chips and USB to serial
# bridges usually have very small buffers.
SELECTED_TEXT = None
class DirectoryExistsError(Exception):
pass
class PyboardError(BaseException):
pass
class Files(object):
"""Class to interact with a MicroPython board files over a serial connection.
Provides functions for listing, uploading, and downloading files from the
board's filesystem.
"""
def __init__(self, pyboard):
"""Initialize the MicroPython board files class using the provided pyboard
instance. In most cases you should create a Pyboard instance (from
pyboard.py) which connects to a board over a serial connection and pass
it in, but you can pass in other objects for testing, etc.
"""
self._pyboard = pyboard
def get(self, filename):
"""Retrieve the contents of the specified file and return its contents
as a byte string.
"""
# Open the file and read it a few bytes at a time and print out the
# raw bytes. Be careful not to overload the UART buffer so only write
# a few bytes at a time, and don't use print since it adds newlines and
# expects string data.
command = """
import sys
file = b''
with open('{0}', 'rb') as infile:
while True:
result = infile.read({1})
if result == b'':
break
len = sys.stdout.write(result)
""".format(filename, BUFFER_SIZE)
self._pyboard.enter_raw()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #2, i.e. file doesn't exist and
# rethrow it as something more descriptive.
if ex.args[2].decode('utf-8').find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such file: {0}'.format(filename))
else:
raise ex
self._pyboard.exit_raw()
return out
def ls(self, directory):
"""List the contents of the specified directory (or root if none is
specified). Returns a list of strings with the names of files in the
specified directory.
"""
# Execute os.listdir() command on the board.
import stat
command = """
try:
import os
except ImportError:
import uos as os
ls = []
for item in os.listdir('{0}'):
try:
os.chdir(item)
os.chdir("..")
item += '/'
except:
pass
ls.append(item)
print(ls)
""".format(directory)
self._pyboard.enter_raw()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #2, i.e. directory doesn't exist and
# rethrow it as something more descriptive.
if ex.args[2].decode('utf-8').find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such directory: {0}'.format(directory))
else:
raise ex
self._pyboard.exit_raw()
# Parse the result list and return it.
return ast.literal_eval(out.decode('utf-8'))
def mkdir(self, directory):
"""Create the specified directory. Note this cannot create a recursive
hierarchy of directories, instead each one should be created separately
"""
# Execute os.mkdir command on the board.
command = """
try:
import os
except ImportError:
import uos as os
os.mkdir('{0}')
""".format(directory)
self._pyboard.enter_raw()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
# Check if this is an OSError #17, i.e. directory already exists.
if ex.args[2].decode('utf-8').find('OSError: [Errno 17] EEXIST') != -1:
raise DirectoryExistsError(
'Directory already exists: {0}'.format(directory))
else:
raise ex
self._pyboard.exit_raw()
def put(self, filename, data):
"""Create or update the specified file with the provided data.
"""
# Open the file for writing on the board and write chunks of data.
self._pyboard.enter_raw()
self._pyboard.exec_("f = open('{0}', 'wb')".format(filename))
size = len(data)
# Loop through and write a buffer size chunk of data at a time.
for i in range(0, size, BUFFER_SIZE):
chunk_size = min(BUFFER_SIZE, size-i)
chunk = repr(data[i:i+chunk_size])
# Make sure to send explicit byte strings (handles python 2
# compatibility).
if not chunk.startswith('b'):
chunk = 'b' + chunk
self._pyboard.exec_("f.write({0})".format(chunk))
self._pyboard.exec_('f.close()')
self._pyboard.exit_raw()
def rm(self, filename):
"""Remove the specified file or directory."""
command = """
try:
import os
except ImportError:
import uos as os
os.remove('{0}')
""".format(filename)
self._pyboard.enter_raw()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
message = ex.args[2].decode('utf-8')
# Check if this is an OSError #2, i.e. file/directory doesn't exist
# and rethrow it as something more descriptive.
if message.find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError(
'No such file/directory: {0}'.format(filename))
# Check for OSError #13, the directory isn't empty.
if message.find('OSError: [Errno 13] EACCES') != -1:
raise RuntimeError(
'Directory is not empty: {0}'.format(filename))
else:
raise ex
self._pyboard.exit_raw()
def rmdir(self, directory):
"""Forcefully remove the specified directory and all its children."""
# Build a script to walk an entire directory structure and delete every
# file and subfolder. This is tricky because MicroPython has no os.walk
# or similar function to walk folders, so this code does it manually
# with recursion and changing directories. For each directory it lists
# the files and deletes everything it can, i.e. all the files. Then
# it lists the files again and assumes they are directories (since they
# couldn't be deleted in the first pass) and recursively clears those
# subdirectories. Finally when finished clearing all the children the
# parent directory is deleted.
command = """
try:
import os
except ImportError:
import uos as os
def rmdir(directory):
os.chdir(directory)
for f in os.listdir():
try:
os.remove(f)
except OSError:
pass
for f in os.listdir():
rmdir(f)
os.chdir('..')
os.rmdir(directory)
rmdir('{0}')
""".format(directory)
self._pyboard.enter_raw()
try:
out = self._pyboard.exec_(textwrap.dedent(command))
except PyboardError as ex:
message = ex.args[2].decode('utf-8')
# Check if this is an OSError #2, i.e. directory doesn't exist
# and rethrow it as something more descriptive.
if message.find('OSError: [Errno 2] ENOENT') != -1:
raise RuntimeError('No such directory: {0}'.format(directory))
else:
raise ex
self._pyboard.exit_raw()
def run(self, filename):
"""Run the provided script and show the output in realtime.
If a print callback was provided in the pyboard module, it will be
used to print the output instead of print() used by the ST console
"""
global SELECTED_TEXT
if(SELECTED_TEXT):
filename = SELECTED_TEXT
self._pyboard.enter_raw()
self._pyboard.execfile(filename)
self._pyboard.exit_raw()
SELECTED_TEXT = None
| {
"repo_name": "gepd/uPiotMicroPythonTool",
"path": "tools/ampy/files.py",
"copies": "1",
"size": "10021",
"license": "mit",
"hash": 6284555676198870000,
"line_mean": 37.102661597,
"line_max": 83,
"alpha_frac": 0.5776868576,
"autogenerated": false,
"ratio": 4.5119315623592975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5589618419959298,
"avg_score": null,
"num_lines": null
} |
import atexit
import _rpi_ws281x as ws
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
self.channel = channel
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self.channel, n) for n in xrange(*pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self.channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided values.
if isinstance(pos, slice):
index = 0
for n in xrange(*pos.indices(self.size)):
ws.ws2811_led_set(self.channel, n, value[index])
index += 1
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self.channel, pos, value)
class Adafruit_NeoPixel(object):
def __init__(self, num, pin, freq_hz=800000, dma=5, invert=False,
brightness=255, channel=0, strip_type=ws.WS2811_STRIP_RGB):
"""Class to represent a NeoPixel/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 5), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
ws.ws2811_channel_t_strip_type_set(self._channel, strip_type)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, num)
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
def __del__(self):
# Required because Python will complain about memory leaks
# However there's no guarantee that "ws" will even be set
# when the __del__ method for this class is reached.
if ws != None:
self._cleanup()
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
# Note that ws2811_fini will free the memory used by led_data internally.
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, message))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, message))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue, white = 0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue, white))
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
| {
"repo_name": "vinhui/chistmastree",
"path": "neopixel.py",
"copies": "1",
"size": "5396",
"license": "mit",
"hash": 2701223327000364000,
"line_mean": 36.2137931034,
"line_max": 87,
"alpha_frac": 0.704966642,
"autogenerated": false,
"ratio": 3.0314606741573034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9004229545572136,
"avg_score": 0.046439554117033474,
"num_lines": 145
} |
import atexit
import _rpi_ws281x as ws
def Color(red, green, blue, white = 0):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (white << 24) | (red << 16)| (green << 8) | blue
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
self.channel = channel
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self.channel, n) for n in range(pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self.channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided values.
if isinstance(pos, slice):
index = 0
for n in range(pos.indices(self.size)):
ws.ws2811_led_set(self.channel, n, value[index])
index += 1
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self.channel, pos, value)
class Adafruit_NeoPixel(object):
def __init__(self, num, pin, freq_hz=800000, dma=5, invert=False,
brightness=255, channel=0, strip_type=ws.WS2811_STRIP_RGB):
"""Class to represent a NeoPixel/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 5), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
ws.ws2811_channel_t_strip_type_set(self._channel, strip_type)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, num)
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
def __del__(self):
# Required because Python will complain about memory leaks
# However there's no guarantee that "ws" will even be set
# when the __del__ method for this class is reached.
if ws != None:
self._cleanup()
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
# Note that ws2811_fini will free the memory used by led_data internally.
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, message))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, message))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue, white = 0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue, white))
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
| {
"repo_name": "weidnerm/pi-ws2812",
"path": "python/neopixel.py",
"copies": "2",
"size": "5683",
"license": "bsd-2-clause",
"hash": -1593879514440192500,
"line_mean": 35.9025974026,
"line_max": 87,
"alpha_frac": 0.7035016717,
"autogenerated": false,
"ratio": 3.045551982851018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47490536545510176,
"avg_score": null,
"num_lines": null
} |
import _rpi_ws281x as ws
import atexit
def Color(red, green, blue):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (red << 16) | (green << 8) | blue
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
self.channel = channel
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self.channel, n) for n in range(pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self.channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided values.
if isinstance(pos, slice):
index = 0
for n in range(pos.indices(self.size)):
ws.ws2811_led_set(self.channel, n, value[index])
index += 1
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self.channel, pos, value)
class Adafruit_NeoPixel(object):
def __init__(self, num, pin, freq_hz=800000, dma=5, invert=False, brightness=255, channel=0):
"""Class to represent a NeoPixel/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 5), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, num)
atexit.register(self._cleanup)
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
# Note that ws2811_fini will free the memory used by led_data internally.
def __del__(self):
if ws != None:
self._cleanup()
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != 0:
raise RuntimeError('ws2811_init failed with code {0}'.format(resp))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != 0:
raise RuntimeError('ws2811_render failed with code {0}'.format(resp))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue))
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
def getPixelColorRGB(self, n):
c = lambda: None
setattr(c, 'r', self._led_data[n] >> 16 & 0xff)
setattr(c, 'g', self._led_data[n] >> 8 & 0xff)
setattr(c, 'b', self._led_data[n] & 0xff)
return c
| {
"repo_name": "wannabeCitizen/FireflySim",
"path": "neopixel.py",
"copies": "2",
"size": "5337",
"license": "mit",
"hash": -4299203039890732500,
"line_mean": 34.1118421053,
"line_max": 94,
"alpha_frac": 0.6987071388,
"autogenerated": false,
"ratio": 3.003376477208779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9502913402027131,
"avg_score": 0.03983404279632967,
"num_lines": 152
} |
import _rpi_ws281x as ws
def Color(red, green, blue):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (red << 16) | (green << 8) | blue
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
self.channel = channel
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self.channel, n) for n in range(pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self.channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided values.
if isinstance(pos, slice):
index = 0
for n in range(pos.indices(self.size)):
ws.ws2811_led_set(self.channel, n, value[index])
index += 1
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self.channel, pos, value)
class Adafruit_NeoPixel(object):
def __init__(self, num, pin, freq_hz=800000, dma=5, invert=False, brightness=255, channel=0):
"""Class to represent a NeoPixel/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 5), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, num)
def __del__(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
# Note that ws2811_fini will free the memory used by led_data internally.
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != 0:
raise RuntimeError('ws2811_init failed with code {0}'.format(resp))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != 0:
raise RuntimeError('ws2811_render failed with code {0}'.format(resp))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue))
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
| {
"repo_name": "hemstreet/Spruce-Sign",
"path": "node_modules/rpi-ws281x-native/src/rpi2_ws281x/python/neopixel.py",
"copies": "18",
"size": "5022",
"license": "mit",
"hash": -3563247331529087000,
"line_mean": 35.3913043478,
"line_max": 94,
"alpha_frac": 0.704699323,
"autogenerated": false,
"ratio": 3.0473300970873787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0392143726260069,
"num_lines": 138
} |
import binascii
from functools import reduce
import logging
import time
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
PN532_PREAMBLE = 0x00
PN532_STARTCODE1 = 0x00
PN532_STARTCODE2 = 0xFF
PN532_POSTAMBLE = 0x00
PN532_HOSTTOPN532 = 0xD4
PN532_PN532TOHOST = 0xD5
# PN532 Commands
PN532_COMMAND_DIAGNOSE = 0x00
PN532_COMMAND_GETFIRMWAREVERSION = 0x02
PN532_COMMAND_GETGENERALSTATUS = 0x04
PN532_COMMAND_READREGISTER = 0x06
PN532_COMMAND_WRITEREGISTER = 0x08
PN532_COMMAND_READGPIO = 0x0C
PN532_COMMAND_WRITEGPIO = 0x0E
PN532_COMMAND_SETSERIALBAUDRATE = 0x10
PN532_COMMAND_SETPARAMETERS = 0x12
PN532_COMMAND_SAMCONFIGURATION = 0x14
PN532_COMMAND_POWERDOWN = 0x16
PN532_COMMAND_RFCONFIGURATION = 0x32
PN532_COMMAND_RFREGULATIONTEST = 0x58
PN532_COMMAND_INJUMPFORDEP = 0x56
PN532_COMMAND_INJUMPFORPSL = 0x46
PN532_COMMAND_INLISTPASSIVETARGET = 0x4A
PN532_COMMAND_INATR = 0x50
PN532_COMMAND_INPSL = 0x4E
PN532_COMMAND_INDATAEXCHANGE = 0x40
PN532_COMMAND_INCOMMUNICATETHRU = 0x42
PN532_COMMAND_INDESELECT = 0x44
PN532_COMMAND_INRELEASE = 0x52
PN532_COMMAND_INSELECT = 0x54
PN532_COMMAND_INAUTOPOLL = 0x60
PN532_COMMAND_TGINITASTARGET = 0x8C
PN532_COMMAND_TGSETGENERALBYTES = 0x92
PN532_COMMAND_TGGETDATA = 0x86
PN532_COMMAND_TGSETDATA = 0x8E
PN532_COMMAND_TGSETMETADATA = 0x94
PN532_COMMAND_TGGETINITIATORCOMMAND = 0x88
PN532_COMMAND_TGRESPONSETOINITIATOR = 0x90
PN532_COMMAND_TGGETTARGETSTATUS = 0x8A
PN532_RESPONSE_INDATAEXCHANGE = 0x41
PN532_RESPONSE_INLISTPASSIVETARGET = 0x4B
PN532_WAKEUP = 0x55
PN532_SPI_STATREAD = 0x02
PN532_SPI_DATAWRITE = 0x01
PN532_SPI_DATAREAD = 0x03
PN532_SPI_READY = 0x01
PN532_MIFARE_ISO14443A = 0x00
# Mifare Commands
MIFARE_CMD_AUTH_A = 0x60
MIFARE_CMD_AUTH_B = 0x61
MIFARE_CMD_READ = 0x30
MIFARE_CMD_WRITE = 0xA0
MIFARE_CMD_TRANSFER = 0xB0
MIFARE_CMD_DECREMENT = 0xC0
MIFARE_CMD_INCREMENT = 0xC1
MIFARE_CMD_STORE = 0xC2
MIFARE_ULTRALIGHT_CMD_WRITE = 0xA2
# Prefixes for NDEF Records (to identify record type)
NDEF_URIPREFIX_NONE = 0x00
NDEF_URIPREFIX_HTTP_WWWDOT = 0x01
NDEF_URIPREFIX_HTTPS_WWWDOT = 0x02
NDEF_URIPREFIX_HTTP = 0x03
NDEF_URIPREFIX_HTTPS = 0x04
NDEF_URIPREFIX_TEL = 0x05
NDEF_URIPREFIX_MAILTO = 0x06
NDEF_URIPREFIX_FTP_ANONAT = 0x07
NDEF_URIPREFIX_FTP_FTPDOT = 0x08
NDEF_URIPREFIX_FTPS = 0x09
NDEF_URIPREFIX_SFTP = 0x0A
NDEF_URIPREFIX_SMB = 0x0B
NDEF_URIPREFIX_NFS = 0x0C
NDEF_URIPREFIX_FTP = 0x0D
NDEF_URIPREFIX_DAV = 0x0E
NDEF_URIPREFIX_NEWS = 0x0F
NDEF_URIPREFIX_TELNET = 0x10
NDEF_URIPREFIX_IMAP = 0x11
NDEF_URIPREFIX_RTSP = 0x12
NDEF_URIPREFIX_URN = 0x13
NDEF_URIPREFIX_POP = 0x14
NDEF_URIPREFIX_SIP = 0x15
NDEF_URIPREFIX_SIPS = 0x16
NDEF_URIPREFIX_TFTP = 0x17
NDEF_URIPREFIX_BTSPP = 0x18
NDEF_URIPREFIX_BTL2CAP = 0x19
NDEF_URIPREFIX_BTGOEP = 0x1A
NDEF_URIPREFIX_TCPOBEX = 0x1B
NDEF_URIPREFIX_IRDAOBEX = 0x1C
NDEF_URIPREFIX_FILE = 0x1D
NDEF_URIPREFIX_URN_EPC_ID = 0x1E
NDEF_URIPREFIX_URN_EPC_TAG = 0x1F
NDEF_URIPREFIX_URN_EPC_PAT = 0x20
NDEF_URIPREFIX_URN_EPC_RAW = 0x21
NDEF_URIPREFIX_URN_EPC = 0x22
NDEF_URIPREFIX_URN_NFC = 0x23
PN532_GPIO_VALIDATIONBIT = 0x80
PN532_GPIO_P30 = 0
PN532_GPIO_P31 = 1
PN532_GPIO_P32 = 2
PN532_GPIO_P33 = 3
PN532_GPIO_P34 = 4
PN532_GPIO_P35 = 5
PN532_ACK = bytearray([0x01, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00])
PN532_FRAME_START = bytearray([0x01, 0x00, 0x00, 0xFF])
logger = logging.getLogger(__name__)
class PN532(object):
"""PN532 breakout board representation. Requires a SPI connection to the
breakout board. A software SPI connection is recommended as the hardware
SPI on the Raspberry Pi has some issues with the LSB first mode used by the
PN532 (see: http://www.raspberrypi.org/forums/viewtopic.php?f=32&t=98070&p=720659#p720659)
"""
def __init__(self, cs, sclk=None, mosi=None, miso=None, gpio=None,
spi=None):
"""Create an instance of the PN532 class using either software SPI (if
the sclk, mosi, and miso pins are specified) or hardware SPI if a
spi parameter is passed. The cs pin must be a digital GPIO pin.
Optionally specify a GPIO controller to override the default that uses
the board's GPIO pins.
"""
# Default to platform GPIO if not provided.
self._gpio = gpio
if self._gpio is None:
self._gpio = GPIO.get_platform_gpio()
# Initialize CS line.
self._cs = cs
self._gpio.setup(self._cs, GPIO.OUT)
self._gpio.set_high(self._cs)
# Setup SPI provider.
if spi is not None:
logger.debug('Using hardware SPI.')
# Handle using hardware SPI.
self._spi = spi
self._spi.set_clock_hz(1000000)
else:
logger.debug('Using software SPI')
# Handle using software SPI. Note that the CS/SS pin is not used
# as it will be manually controlled by this library for better
# timing.
self._spi = SPI.BitBang(self._gpio, sclk, mosi, miso)
# Set SPI mode and LSB first bit order.
self._spi.set_mode(0)
self._spi.set_bit_order(SPI.LSBFIRST)
def _uint8_add(self, a, b):
"""Add add two values as unsigned 8-bit values."""
return ((a & 0xFF) + (b & 0xFF)) & 0xFF
def _busy_wait_ms(self, ms):
"""Busy wait for the specified number of milliseconds."""
start = time.time()
delta = ms/1000.0
while (time.time() - start) <= delta:
pass
def _write_frame(self, data):
"""Write a frame to the PN532 with the specified data bytearray."""
assert data is not None and 0 < len(data) < 255, 'Data must be array of 1 to 255 bytes.'
# Build frame to send as:
# - SPI data write (0x01)
# - Preamble (0x00)
# - Start code (0x00, 0xFF)
# - Command length (1 byte)
# - Command length checksum
# - Command bytes
# - Checksum
# - Postamble (0x00)
length = len(data)
frame = bytearray(length+8)
frame[0] = PN532_SPI_DATAWRITE
frame[1] = PN532_PREAMBLE
frame[2] = PN532_STARTCODE1
frame[3] = PN532_STARTCODE2
frame[4] = length & 0xFF
frame[5] = self._uint8_add(~length, 1)
frame[6:-2] = data
checksum = reduce(self._uint8_add, data, 0xFF)
frame[-2] = ~checksum & 0xFF
frame[-1] = PN532_POSTAMBLE
# Send frame.
logger.debug('Write frame: 0x{0}'.format(binascii.hexlify(frame)))
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
self._spi.write(frame)
self._gpio.set_high(self._cs)
def _read_data(self, count):
"""Read a specified count of bytes from the PN532."""
# Build a read request frame.
frame = bytearray(count)
frame[0] = PN532_SPI_DATAREAD
# Send the frame and return the response, ignoring the SPI header byte.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer(frame)
self._gpio.set_high(self._cs)
return response
def _read_frame(self, length):
"""Read a response frame from the PN532 of at most length bytes in size.
Returns the data inside the frame if found, otherwise raises an exception
if there is an error parsing the frame. Note that less than length bytes
might be returned!
"""
# Read frame with expected length of data.
response = self._read_data(length+8)
logger.debug('Read frame: 0x{0}'.format(binascii.hexlify(response)))
# Check frame starts with 0x01 and then has 0x00FF (preceeded by optional
# zeros).
if response[0] != 0x01:
raise RuntimeError('Response frame does not start with 0x01!')
# Swallow all the 0x00 values that preceed 0xFF.
offset = 1
while response[offset] == 0x00:
offset += 1
if offset >= len(response):
raise RuntimeError('Response frame preamble does not contain 0x00FF!')
if response[offset] != 0xFF:
raise RuntimeError('Response frame preamble does not contain 0x00FF!')
offset += 1
if offset >= len(response):
raise RuntimeError('Response contains no data!')
# Check length & length checksum match.
frame_len = response[offset]
if (frame_len + response[offset+1]) & 0xFF != 0:
raise RuntimeError('Response length checksum did not match length!')
# Check frame checksum value matches bytes.
checksum = reduce(self._uint8_add, response[offset+2:offset+2+frame_len+1], 0)
if checksum != 0:
raise RuntimeError('Response checksum did not match expected value!')
# Return frame data.
return response[offset+2:offset+2+frame_len]
def _wait_ready(self, timeout_sec=1):
"""Wait until the PN532 is ready to receive commands. At most wait
timeout_sec seconds for the PN532 to be ready. If the PN532 is ready
before the timeout is exceeded then True will be returned, otherwise
False is returned when the timeout is exceeded.
"""
start = time.time()
# Send a SPI status read command and read response.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
# Loop until a ready response is received.
while response[1] != PN532_SPI_READY:
# Check if the timeout has been exceeded.
if time.time() - start >= timeout_sec:
return False
# Wait a little while and try reading the status again.
time.sleep(0.01)
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
return True
def call_function(self, command, response_length=0, params=[], timeout_sec=1):
"""Send specified command to the PN532 and expect up to response_length
bytes back in a response. Note that less than the expected bytes might
be returned! Params can optionally specify an array of bytes to send as
parameters to the function call. Will wait up to timeout_secs seconds
for a response and return a bytearray of response bytes, or None if no
response is available within the timeout.
"""
# Build frame data with command and parameters.
data = bytearray(2+len(params))
data[0] = PN532_HOSTTOPN532
data[1] = command & 0xFF
data[2:] = params
# Send frame and wait for response.
self._write_frame(data)
if not self._wait_ready(timeout_sec):
return None
# Verify ACK response and wait to be ready for function response.
response = self._read_data(len(PN532_ACK))
if response != PN532_ACK:
raise RuntimeError('Did not receive expected ACK from PN532!')
if not self._wait_ready(timeout_sec):
return None
# Read response bytes.
response = self._read_frame(response_length+2)
# Check that response is for the called function.
if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)):
raise RuntimeError('Received unexpected command response!')
# Return response data.
return response[2:]
def begin(self):
"""Initialize communication with the PN532. Must be called before any
other calls are made against the PN532.
"""
# Assert CS pin low for a second for PN532 to be ready.
self._gpio.set_low(self._cs)
time.sleep(1.0)
# Call GetFirmwareVersion to sync up with the PN532. This might not be
# required but is done in the Arduino library and kept for consistency.
self.get_firmware_version()
self._gpio.set_high(self._cs)
def get_firmware_version(self):
"""Call PN532 GetFirmwareVersion function and return a tuple with the IC,
Ver, Rev, and Support values.
"""
response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)
if response is None:
raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')
return (response[0], response[1], response[2], response[3])
def SAM_configuration(self):
"""Configure the PN532 to read MiFare cards."""
# Send SAM configuration command with configuration for:
# - 0x01, normal mode
# - 0x14, timeout 50ms * 20 = 1 second
# - 0x01, use IRQ pin
# Note that no other verification is necessary as call_function will
# check the command was executed as expected.
self.call_function(PN532_COMMAND_SAMCONFIGURATION, params=[0x01, 0x14, 0x01])
def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1):
"""Wait for a MiFare card to be available and return its UID when found.
Will wait up to timeout_sec seconds and return None if no card is found,
otherwise a bytearray with the UID of the found card is returned.
"""
# Send passive read command for 1 card. Expect at most a 7 byte UUID.
response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET,
params=[0x01, card_baud],
response_length=17)
# If no response is available return None to indicate no card is present.
if response is None:
return None
# Check only 1 card with up to a 7 byte UID is present.
if response[0] != 0x01:
raise RuntimeError('More than one card detected!')
if response[5] > 7:
raise RuntimeError('Found card with unexpectedly long UID!')
# Return UID of card.
return response[6:6+response[5]]
def mifare_classic_authenticate_block(self, uid, block_number, key_number, key):
"""Authenticate specified block number for a MiFare classic card. Uid
should be a byte array with the UID of the card, block number should be
the block to authenticate, key number should be the key type (like
MIFARE_CMD_AUTH_A or MIFARE_CMD_AUTH_B), and key should be a byte array
with the key data. Returns True if the block was authenticated, or False
if not authenticated.
"""
# Build parameters for InDataExchange command to authenticate MiFare card.
uidlen = len(uid)
keylen = len(key)
params = bytearray(3+uidlen+keylen)
params[0] = 0x01 # Max card numbers
params[1] = key_number & 0xFF
params[2] = block_number & 0xFF
params[3:3+keylen] = key
params[3+keylen:] = uid
# Send InDataExchange request and verify response is 0x00.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00
def mifare_classic_read_block(self, block_number):
"""Read a block of data from the card. Block number should be the block
to read. If the block is successfully read a bytearray of length 16 with
data starting at the specified block will be returned. If the block is
not read then None will be returned.
"""
# Send InDataExchange request to read block of MiFare data.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=[0x01, MIFARE_CMD_READ, block_number & 0xFF],
response_length=17)
# Check first response is 0x00 to show success.
if response[0] != 0x00:
return None
# Return first 4 bytes since 16 bytes are always returned.
return response[1:]
def mifare_classic_write_block(self, block_number, data):
"""Write a block of data to the card. Block number should be the block
to write and data should be a byte array of length 16 with the data to
write. If the data is successfully written then True is returned,
otherwise False is returned.
"""
assert data is not None and len(data) == 16, 'Data must be an array of 16 bytes!'
# Build parameters for InDataExchange command to do MiFare classic write.
params = bytearray(19)
params[0] = 0x01 # Max card numbers
params[1] = MIFARE_CMD_WRITE
params[2] = block_number & 0xFF
params[3:] = data
# Send InDataExchange request.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00
| {
"repo_name": "adafruit/Adafruit_Python_PN532",
"path": "Adafruit_PN532/PN532.py",
"copies": "1",
"size": "19600",
"license": "mit",
"hash": 3700621445730146000,
"line_mean": 43.6469248292,
"line_max": 247,
"alpha_frac": 0.608622449,
"autogenerated": false,
"ratio": 3.6621823617339313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47708048107339307,
"avg_score": null,
"num_lines": null
} |
import binascii
import logging
import time
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
PN532_PREAMBLE = 0x00
PN532_STARTCODE1 = 0x00
PN532_STARTCODE2 = 0xFF
PN532_POSTAMBLE = 0x00
PN532_HOSTTOPN532 = 0xD4
PN532_PN532TOHOST = 0xD5
# PN532 Commands
PN532_COMMAND_DIAGNOSE = 0x00
PN532_COMMAND_GETFIRMWAREVERSION = 0x02
PN532_COMMAND_GETGENERALSTATUS = 0x04
PN532_COMMAND_READREGISTER = 0x06
PN532_COMMAND_WRITEREGISTER = 0x08
PN532_COMMAND_READGPIO = 0x0C
PN532_COMMAND_WRITEGPIO = 0x0E
PN532_COMMAND_SETSERIALBAUDRATE = 0x10
PN532_COMMAND_SETPARAMETERS = 0x12
PN532_COMMAND_SAMCONFIGURATION = 0x14
PN532_COMMAND_POWERDOWN = 0x16
PN532_COMMAND_RFCONFIGURATION = 0x32
PN532_COMMAND_RFREGULATIONTEST = 0x58
PN532_COMMAND_INJUMPFORDEP = 0x56
PN532_COMMAND_INJUMPFORPSL = 0x46
PN532_COMMAND_INLISTPASSIVETARGET = 0x4A
PN532_COMMAND_INATR = 0x50
PN532_COMMAND_INPSL = 0x4E
PN532_COMMAND_INDATAEXCHANGE = 0x40
PN532_COMMAND_INCOMMUNICATETHRU = 0x42
PN532_COMMAND_INDESELECT = 0x44
PN532_COMMAND_INRELEASE = 0x52
PN532_COMMAND_INSELECT = 0x54
PN532_COMMAND_INAUTOPOLL = 0x60
PN532_COMMAND_TGINITASTARGET = 0x8C
PN532_COMMAND_TGSETGENERALBYTES = 0x92
PN532_COMMAND_TGGETDATA = 0x86
PN532_COMMAND_TGSETDATA = 0x8E
PN532_COMMAND_TGSETMETADATA = 0x94
PN532_COMMAND_TGGETINITIATORCOMMAND = 0x88
PN532_COMMAND_TGRESPONSETOINITIATOR = 0x90
PN532_COMMAND_TGGETTARGETSTATUS = 0x8A
PN532_RESPONSE_INDATAEXCHANGE = 0x41
PN532_RESPONSE_INLISTPASSIVETARGET = 0x4B
PN532_WAKEUP = 0x55
PN532_SPI_STATREAD = 0x02
PN532_SPI_DATAWRITE = 0x01
PN532_SPI_DATAREAD = 0x03
PN532_SPI_READY = 0x01
PN532_MIFARE_ISO14443A = 0x00
# Mifare Commands
MIFARE_CMD_AUTH_A = 0x60
MIFARE_CMD_AUTH_B = 0x61
MIFARE_CMD_READ = 0x30
MIFARE_CMD_WRITE = 0xA0
MIFARE_CMD_TRANSFER = 0xB0
MIFARE_CMD_DECREMENT = 0xC0
MIFARE_CMD_INCREMENT = 0xC1
MIFARE_CMD_STORE = 0xC2
MIFARE_ULTRALIGHT_CMD_WRITE = 0xA2
# Prefixes for NDEF Records (to identify record type)
NDEF_URIPREFIX_NONE = 0x00
NDEF_URIPREFIX_HTTP_WWWDOT = 0x01
NDEF_URIPREFIX_HTTPS_WWWDOT = 0x02
NDEF_URIPREFIX_HTTP = 0x03
NDEF_URIPREFIX_HTTPS = 0x04
NDEF_URIPREFIX_TEL = 0x05
NDEF_URIPREFIX_MAILTO = 0x06
NDEF_URIPREFIX_FTP_ANONAT = 0x07
NDEF_URIPREFIX_FTP_FTPDOT = 0x08
NDEF_URIPREFIX_FTPS = 0x09
NDEF_URIPREFIX_SFTP = 0x0A
NDEF_URIPREFIX_SMB = 0x0B
NDEF_URIPREFIX_NFS = 0x0C
NDEF_URIPREFIX_FTP = 0x0D
NDEF_URIPREFIX_DAV = 0x0E
NDEF_URIPREFIX_NEWS = 0x0F
NDEF_URIPREFIX_TELNET = 0x10
NDEF_URIPREFIX_IMAP = 0x11
NDEF_URIPREFIX_RTSP = 0x12
NDEF_URIPREFIX_URN = 0x13
NDEF_URIPREFIX_POP = 0x14
NDEF_URIPREFIX_SIP = 0x15
NDEF_URIPREFIX_SIPS = 0x16
NDEF_URIPREFIX_TFTP = 0x17
NDEF_URIPREFIX_BTSPP = 0x18
NDEF_URIPREFIX_BTL2CAP = 0x19
NDEF_URIPREFIX_BTGOEP = 0x1A
NDEF_URIPREFIX_TCPOBEX = 0x1B
NDEF_URIPREFIX_IRDAOBEX = 0x1C
NDEF_URIPREFIX_FILE = 0x1D
NDEF_URIPREFIX_URN_EPC_ID = 0x1E
NDEF_URIPREFIX_URN_EPC_TAG = 0x1F
NDEF_URIPREFIX_URN_EPC_PAT = 0x20
NDEF_URIPREFIX_URN_EPC_RAW = 0x21
NDEF_URIPREFIX_URN_EPC = 0x22
NDEF_URIPREFIX_URN_NFC = 0x23
PN532_GPIO_VALIDATIONBIT = 0x80
PN532_GPIO_P30 = 0
PN532_GPIO_P31 = 1
PN532_GPIO_P32 = 2
PN532_GPIO_P33 = 3
PN532_GPIO_P34 = 4
PN532_GPIO_P35 = 5
PN532_ACK = bytearray([0x01, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00])
PN532_FRAME_START = bytearray([0x01, 0x00, 0x00, 0xFF])
logger = logging.getLogger(__name__)
class PN532(object):
"""PN532 breakout board representation. Requires a SPI connection to the
breakout board. A software SPI connection is recommended as the hardware
SPI on the Raspberry Pi has some issues with the LSB first mode used by the
PN532 (see: http://www.raspberrypi.org/forums/viewtopic.php?f=32&t=98070&p=720659#p720659)
"""
def __init__(self, cs, sclk=None, mosi=None, miso=None, gpio=None,
spi=None):
"""Create an instance of the PN532 class using either software SPI (if
the sclk, mosi, and miso pins are specified) or hardware SPI if a
spi parameter is passed. The cs pin must be a digital GPIO pin.
Optionally specify a GPIO controller to override the default that uses
the board's GPIO pins.
"""
# Default to platform GPIO if not provided.
self._gpio = gpio
if self._gpio is None:
self._gpio = GPIO.get_platform_gpio()
# Initialize CS line.
self._cs = cs
self._gpio.setup(self._cs, GPIO.OUT)
self._gpio.set_high(self._cs)
# Setup SPI provider.
if spi is not None:
logger.debug('Using hardware SPI.')
# Handle using hardware SPI.
self._spi = spi
self._spi.set_clock_hz(1000000)
else:
logger.debug('Using software SPI')
# Handle using software SPI. Note that the CS/SS pin is not used
# as it will be manually controlled by this library for better
# timing.
self._spi = SPI.BitBang(self._gpio, sclk, mosi, miso)
# Set SPI mode and LSB first bit order.
self._spi.set_mode(0)
self._spi.set_bit_order(SPI.LSBFIRST)
def _uint8_add(self, a, b):
"""Add add two values as unsigned 8-bit values."""
return ((a & 0xFF) + (b & 0xFF)) & 0xFF
def _busy_wait_ms(self, ms):
"""Busy wait for the specified number of milliseconds."""
start = time.time()
delta = ms/1000.0
while (time.time() - start) <= delta:
pass
def _write_frame(self, data):
"""Write a frame to the PN532 with the specified data bytearray."""
assert data is not None and 0 < len(data) < 255, 'Data must be array of 1 to 255 bytes.'
# Build frame to send as:
# - SPI data write (0x01)
# - Preamble (0x00)
# - Start code (0x00, 0xFF)
# - Command length (1 byte)
# - Command length checksum
# - Command bytes
# - Checksum
# - Postamble (0x00)
length = len(data)
frame = bytearray(length+8)
frame[0] = PN532_SPI_DATAWRITE
frame[1] = PN532_PREAMBLE
frame[2] = PN532_STARTCODE1
frame[3] = PN532_STARTCODE2
frame[4] = length & 0xFF
frame[5] = self._uint8_add(~length, 1)
frame[6:-2] = data
checksum = reduce(self._uint8_add, data, 0xFF)
frame[-2] = ~checksum & 0xFF
frame[-1] = PN532_POSTAMBLE
# Send frame.
logger.debug('Write frame: 0x{0}'.format(binascii.hexlify(frame)))
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
self._spi.write(frame)
self._gpio.set_high(self._cs)
def _read_data(self, count):
"""Read a specified count of bytes from the PN532."""
# Build a read request frame.
frame = bytearray(count)
frame[0] = PN532_SPI_DATAREAD
# Send the frame and return the response, ignoring the SPI header byte.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer(frame)
self._gpio.set_high(self._cs)
return response
def _read_frame(self, length):
"""Read a response frame from the PN532 of at most length bytes in size.
Returns the data inside the frame if found, otherwise raises an exception
if there is an error parsing the frame. Note that less than length bytes
might be returned!
"""
# Read frame with expected length of data.
response = self._read_data(length+8)
logger.debug('Read frame: 0x{0}'.format(binascii.hexlify(response)))
# Check frame starts with 0x010000FF.
if response[0:len(PN532_FRAME_START)] != PN532_FRAME_START:
raise RuntimeError('Response frame does not start with 0x010000FF!')
# Check length & length checksum match.
frame_len = response[4]
if (frame_len + response[5]) & 0xFF != 0:
raise RuntimeError('Response length checksum did not match length!')
# Check frame checksum value matches bytes.
checksum = reduce(self._uint8_add, response[6:6+frame_len+1], 0)
if checksum != 0:
raise RuntimeError('Response checksum did not match expected value!')
# Return frame data.
return response[6:6+frame_len]
def _wait_ready(self, timeout_sec=1):
"""Wait until the PN532 is ready to receive commands. At most wait
timeout_sec seconds for the PN532 to be ready. If the PN532 is ready
before the timeout is exceeded then True will be returned, otherwise
False is returned when the timeout is exceeded.
"""
start = time.time()
# Send a SPI status read command and read response.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
# Loop until a ready response is received.
while response[1] != PN532_SPI_READY:
# Check if the timeout has been exceeded.
if time.time() - start >= timeout_sec:
return False
# Wait a little while and try reading the status again.
time.sleep(0.01)
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
return True
def call_function(self, command, response_length=0, params=[], timeout_sec=1):
"""Send specified command to the PN532 and expect up to response_length
bytes back in a response. Note that less than the expected bytes might
be returned! Params can optionally specify an array of bytes to send as
parameters to the function call. Will wait up to timeout_secs seconds
for a response and return a bytearray of response bytes, or None if no
response is available within the timeout.
"""
# Build frame data with command and parameters.
data = bytearray(2+len(params))
data[0] = PN532_HOSTTOPN532
data[1] = command & 0xFF
data[2:] = params
# Send frame and wait for response.
self._write_frame(data)
if not self._wait_ready(timeout_sec):
return None
# Verify ACK response and wait to be ready for function response.
response = self._read_data(len(PN532_ACK))
if response != PN532_ACK:
raise RuntimeError('Did not receive expected ACK from PN532!')
if not self._wait_ready(timeout_sec):
return None
# Read response bytes.
response = self._read_frame(response_length+2)
# Check that response is for the called function.
if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)):
raise RuntimeError('Received unexpected command response!')
# Return response data.
return response[2:]
def begin(self):
"""Initialize communication with the PN532. Must be called before any
other calls are made against the PN532.
"""
# Assert CS pin low for a second for PN532 to be ready.
self._gpio.set_low(self._cs)
time.sleep(1.0)
# Call GetFirmwareVersion to sync up with the PN532. This might not be
# required but is done in the Arduino library and kept for consistency.
self.get_firmware_version()
self._gpio.set_high(self._cs)
def get_firmware_version(self):
"""Call PN532 GetFirmwareVersion function and return a tuple with the IC,
Ver, Rev, and Support values.
"""
response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)
if response is None:
raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')
return (response[0], response[1], response[2], response[3])
def SAM_configuration(self):
"""Configure the PN532 to read MiFare cards."""
# Send SAM configuration command with configuration for:
# - 0x01, normal mode
# - 0x14, timeout 50ms * 20 = 1 second
# - 0x01, use IRQ pin
# Note that no other verification is necessary as call_function will
# check the command was executed as expected.
self.call_function(PN532_COMMAND_SAMCONFIGURATION, params=[0x01, 0x14, 0x01])
def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1):
"""Wait for a MiFare card to be available and return its UID when found.
Will wait up to timeout_sec seconds and return None if no card is found,
otherwise a bytearray with the UID of the found card is returned.
"""
# Send passive read command for 1 card. Expect at most a 7 byte UUID.
response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET,
params=[0x01, card_baud],
response_length=17)
# If no response is available return None to indicate no card is present.
if response is None:
return None
# Check only 1 card with up to a 7 byte UID is present.
if response[0] != 0x01:
raise RuntimeError('More than one card detected!')
if response[5] > 7:
raise RuntimeError('Found card with unexpectedly long UID!')
# Return UID of card.
return response[6:6+response[5]]
def mifare_classic_authenticate_block(self, uid, block_number, key_number, key):
"""Authenticate specified block number for a MiFare classic card. Uid
should be a byte array with the UID of the card, block number should be
the block to authenticate, key number should be the key type (like
MIFARE_CMD_AUTH_A or MIFARE_CMD_AUTH_B), and key should be a byte array
with the key data. Returns True if the block was authenticated, or False
if not authenticated.
"""
# Build parameters for InDataExchange command to authenticate MiFare card.
uidlen = len(uid)
keylen = len(key)
params = bytearray(3+uidlen+keylen)
params[0] = 0x01 # Max card numbers
params[1] = key_number & 0xFF
params[2] = block_number & 0xFF
params[3:3+keylen] = key
params[3+keylen:] = uid
# Send InDataExchange request and verify response is 0x00.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00
def mifare_classic_read_block(self, block_number):
"""Read a block of data from the card. Block number should be the block
to read. If the block is successfully read a bytearray of length 16 with
data starting at the specified block will be returned. If the block is
not read then None will be returned.
"""
# Send InDataExchange request to read block of MiFare data.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=[0x01, MIFARE_CMD_READ, block_number & 0xFF],
response_length=17)
# Check first response is 0x00 to show success.
if response[0] != 0x00:
return None
# Return first 4 bytes since 16 bytes are always returned.
return response[1:]
def mifare_classic_write_block(self, block_number, data):
"""Write a block of data to the card. Block number should be the block
to write and data should be a byte array of length 16 with the data to
write. If the data is successfully written then True is returned,
otherwise False is returned.
"""
assert data is not None and len(data) == 16, 'Data must be an array of 16 bytes!'
# Build parameters for InDataExchange command to do MiFare classic write.
params = bytearray(19)
params[0] = 0x01 # Max card numbers
params[1] = MIFARE_CMD_WRITE
params[2] = block_number & 0xFF
params[3:] = data
# Send InDataExchange request.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00
| {
"repo_name": "t0adie/Adafruit_python_PN532",
"path": "build/lib.linux-armv7l-2.7/Adafruit_PN532/PN532.py",
"copies": "2",
"size": "19014",
"license": "mit",
"hash": 5865809241019065000,
"line_mean": 43.6338028169,
"line_max": 247,
"alpha_frac": 0.6087619649,
"autogenerated": false,
"ratio": 3.6320916905444127,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240853655444413,
"avg_score": null,
"num_lines": null
} |
import atexit
import logging
import subprocess
import sys
import time
import Adafruit_MPR121.MPR121 as MPR121
import RPi.GPIO as GPIO
import uinput
# Define mapping of capacitive touch pin presses to keyboard button presses.
KEY_MAPPING = {
0: uinput.KEY_UP, # Each line here should define a dict entry
1: uinput.KEY_DOWN, # that maps the capacitive touch input number
2: uinput.KEY_LEFT, # to an appropriate key press.
3: uinput.KEY_RIGHT, #
4: uinput.KEY_B, # For reference the list of possible uinput.KEY_*
5: uinput.KEY_A, # values you can specify is defined in linux/input.h:
6: uinput.KEY_ENTER, # http://www.cs.fsu.edu/~baker/devices/lxr/http/source/linux/include/linux/input.h?v=2.6.11.8
7: uinput.KEY_SPACE, #
} # Make sure a cap touch input is defined only
# once or else the program will fail to run!
# Input pin connected to the capacitive touch sensor's IRQ output.
# For the capacitive touch HAT this should be pin 26!
IRQ_PIN = 26
# Don't change the below values unless you know what you're doing. These help
# adjust the load on the CPU vs. responsiveness of the key detection.
MAX_EVENT_WAIT_SECONDS = 0.5
EVENT_WAIT_SLEEP_SECONDS = 0.1
# Uncomment to enable debug message logging (might slow down key detection).
#logging.basicConfig(level=logging.DEBUG)
# Make sure uinput kernel module is loaded.
subprocess.check_call(['modprobe', 'uinput'])
# Configure virtual keyboard.
device = uinput.Device(KEY_MAPPING.values())
# Setup the MPR121 device.
cap = MPR121.MPR121()
if not cap.begin():
print 'Failed to initialize MPR121, check your wiring!'
sys.exit(1)
# Configure GPIO library to listen on IRQ pin for changes.
# Be sure to configure pin with a pull-up because it is open collector when not
# enabled.
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(IRQ_PIN, GPIO.FALLING)
atexit.register(GPIO.cleanup)
# Clear any pending interrupts by reading touch state.
cap.touched()
# Event loop to wait for IRQ pin changes and respond to them.
print 'Press Ctrl-C to quit.'
while True:
# Wait for the IRQ pin to drop or too much time ellapses (to help prevent
# missing an IRQ event and waiting forever).
start = time.time()
while (time.time() - start) < MAX_EVENT_WAIT_SECONDS and not GPIO.event_detected(IRQ_PIN):
time.sleep(EVENT_WAIT_SLEEP_SECONDS)
# Read touch state.
touched = cap.touched()
# Emit key presses for any touched keys.
for pin, key in KEY_MAPPING.iteritems():
# Check if pin is touched.
pin_bit = 1 << pin
if touched & pin_bit:
# Emit key event when touched.
logging.debug('Input {0} touched.'.format(pin))
device.emit_click(key)
| {
"repo_name": "sgillet1007/raspi_capacitive_drums_scripts",
"path": "examples/keyboard.py",
"copies": "3",
"size": "5758",
"license": "mit",
"hash": -4640473046591304000,
"line_mean": 39.5492957746,
"line_max": 130,
"alpha_frac": 0.7110107676,
"autogenerated": false,
"ratio": 3.585305105853051,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018963363399025733,
"num_lines": 142
} |
##--Adafruit tutorials -- http://learn.adafruit.com/adafruits-raspberry-pi-lesson-10-stepper-motors
##--Library that controls a stepper motor
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
delay = 5 #milliseconds
#Setup Stepper Motor
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
#Stepper Motor Functions
def forward(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
| {
"repo_name": "flyinactor91/Raspi-Hardware",
"path": "Motors/StepperLib.py",
"copies": "1",
"size": "1088",
"license": "mit",
"hash": -1012725041734509600,
"line_mean": 21.6666666667,
"line_max": 99,
"alpha_frac": 0.6746323529,
"autogenerated": false,
"ratio": 2.120857699805068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3295490052705068,
"avg_score": null,
"num_lines": null
} |
"""Adagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
@@__init__
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the initial_accumulator_value is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
# Created in Initialize.
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
val = constant_op.constant(self._initial_accumulator_value,
shape=v.get_shape())
self._get_or_make_slot(v, val, "accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var, acc, self._learning_rate_tensor, grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var, acc, self._learning_rate_tensor, grad.values, grad.indices,
use_locking=self._use_locking)
| {
"repo_name": "pavlovml/tensorflow",
"path": "tensorflow/python/training/adagrad.py",
"copies": "5",
"size": "2358",
"license": "apache-2.0",
"hash": -2534742444182775300,
"line_mean": 37.0322580645,
"line_max": 78,
"alpha_frac": 0.6675148431,
"autogenerated": false,
"ratio": 4.06551724137931,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032831985370993426,
"num_lines": 62
} |
"""Adagrad for TensorFlow."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
@@__init__
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the initial_accumulator_value is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
# Created in Initialize.
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
val = constant_op.constant(self._initial_accumulator_value,
shape=v.get_shape())
self._get_or_make_slot(v, val, "accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var, acc, self._learning_rate_tensor, grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var, acc, self._learning_rate_tensor, grad.values, grad.indices,
use_locking=self._use_locking)
| {
"repo_name": "brendandburns/tensorflow",
"path": "tensorflow/python/training/adagrad.py",
"copies": "5",
"size": "2248",
"license": "apache-2.0",
"hash": -8380016622748206000,
"line_mean": 37.7586206897,
"line_max": 78,
"alpha_frac": 0.6632562278,
"autogenerated": false,
"ratio": 4.0577617328519855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7221017960651985,
"avg_score": null,
"num_lines": null
} |
""" AdaHessian Optimizer
Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py
Originally licensed MIT, Copyright 2020, David Samuel
"""
import torch
class Adahessian(torch.optim.Optimizer):
"""
Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning"
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate (default: 0.1)
betas ((float, float), optional): coefficients used for computing running averages of gradient and the
squared hessian trace (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)
hessian_power (float, optional): exponent of the hessian trace (default: 1.0)
update_each (int, optional): compute the hessian trace approximation only after *this* number of steps
(to save time) (default: 1)
n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)
"""
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,
hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= hessian_power <= 1.0:
raise ValueError(f"Invalid Hessian power value: {hessian_power}")
self.n_samples = n_samples
self.update_each = update_each
self.avg_conv_kernel = avg_conv_kernel
# use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training
self.seed = 2147483647
self.generator = torch.Generator().manual_seed(self.seed)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]["hessian step"] = 0
@property
def is_second_order(self):
return True
def get_params(self):
"""
Gets all parameters in all param_groups with gradients
"""
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
"""
Zeros out the accumalated hessian traces.
"""
for p in self.get_params():
if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0:
p.hess.zero_()
@torch.no_grad()
def set_hessian(self):
"""
Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.
"""
params = []
for p in filter(lambda p: p.grad is not None, self.get_params()):
if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step
params.append(p)
self.state[p]["hessian step"] += 1
if len(params) == 0:
return
if self.generator.device != params[0].device: # hackish way of casting the generator to the right device
self.generator = torch.Generator(params[0].device).manual_seed(self.seed)
grads = [p.grad for p in params]
for i in range(self.n_samples):
# Rademacher distribution {-1.0, 1.0}
zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]
h_zs = torch.autograd.grad(
grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)
for h_z, z, p in zip(h_zs, zs, params):
p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)
"""
loss = None
if closure is not None:
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if p.grad is None or p.hess is None:
continue
if self.avg_conv_kernel and p.dim() == 4:
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
# Perform correct stepweight decay as in AdamW
p.mul_(1 - group['lr'] * group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 1:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of Hessian diagonal square values
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])
# make update
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/optim/adahessian.py",
"copies": "1",
"size": "6535",
"license": "apache-2.0",
"hash": 489241746170147260,
"line_mean": 40.891025641,
"line_max": 129,
"alpha_frac": 0.5834736037,
"autogenerated": false,
"ratio": 3.7994186046511627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874345636724107,
"avg_score": 0.0017093143254110676,
"num_lines": 156
} |
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment uses the `Sampling`_ method to assess the effect of
uncertain mean annual temperature and total annual precipitation
values on the median value of suspended sediment load of the Waipaoa
River over a 10-year interval. The temperature (T) and precipitation
(P) values are assumed to be uniformly distributed random variables,
with bounds set at +/- 10 percent from their default values. One
hundred samples are chosen from the T-P parameter space using Latin
hypercube sampling, then used as inputs to the Hydrotrend model. A
time series of daily Qs values is generated for each 10-year
run. Dakota calculates the median Qs value for each of the 100 runs
and uses them to calculate moments, 95 percent confidence intervals,
and a PDF and a CDF of the Qs values. From these measures, we can
quantify the probability that Qs exceeds a threshold value due to
uncertainty in the input T and P parameters.
Example
--------
Run this experiment with::
$ python hydrotrend-sampling-study.py
Notes
-----
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
.. _Sampling
http://csdms-dakota.readthedocs.io/en/latest/analysis_methods.html#module-dakotathon.method.sampling
"""
import os
from pymt.components import Sampling, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), Sampling()
experiment = {
'component': type(model).__name__,
'run_duration': 10, # years
'auxiliary_files': 'HYDRO0.HYPS', # Waipaoa hypsometry
'samples': 100,
'sample_type': 'lhs',
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'response_levels': [5.0],
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
| {
"repo_name": "mdpiper/AGU-2016",
"path": "hydrotrend-Qs-sampling-study/hydrotrend-sampling-study.py",
"copies": "1",
"size": "2647",
"license": "mit",
"hash": -1752108335903589000,
"line_mean": 35.2602739726,
"line_max": 103,
"alpha_frac": 0.7321496033,
"autogenerated": false,
"ratio": 3.350632911392405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9579096858931979,
"avg_score": 0.0007371311520853311,
"num_lines": 73
} |
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment uses the `Sampling`_ method to assess the effect of
uncertain mean annual temperature (*T*) and total annual precipitation
(*P*) on the maximum value of suspended sediment concentration (*Cs*)
in the Waipaoa River over 1000-year intervals. The *T* and *P* values
are assumed to be uniformly distributed random variables, with bounds
set approximately +/- 25 percent from their default values. One
hundred samples are chosen from the *T-P* parameter space using Latin
hypercube sampling and used as inputs to the Hydrotrend model. A time
series of daily *Cs* values is generated for each 1000-year
run. Dakota calculates the maximum *Cs* value for each of the 100 runs
and uses them to calculate moments, 95 percent confidence intervals,
and a PDF and a CDF of the response. From these measures, we can
quantify the probability that *Cs* exceeds a threshold value due to
uncertainty in the input *T* and *P* parameters, and from this
calculate the return period of a hyperpycnal event.
Example
--------
Run this experiment with::
$ python hydrotrend-Cs-sampling-study.py
Notes
-----
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
.. _Sampling
http://csdms-dakota.readthedocs.io/en/latest/analysis_methods.html#module-dakotathon.method.sampling
"""
import os
from pymt.components import Sampling, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), Sampling()
experiment = {
'component': type(model).__name__,
'run_duration': 1000, # years
'auxiliary_files': 'HYDRO0.HYPS', # Waipaoa hypsometry
'bqrt_anthropogenic_factor': 8.0, # default is 6.0
'samples': 100,
'sample_type': 'lhs',
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'response_levels': [40.0], # Kettner et al. 2007
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
# 'lower_bounds': [10.7, 1.19], # -25%
# 'upper_bounds': [17.8, 1.99], # +25%
'lower_bounds': [10., 1.],
'upper_bounds': [20., 2.],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_concentration',
'response_statistics': 'max',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
| {
"repo_name": "mdpiper/AGU-2016",
"path": "hydrotrend-Cs-sampling-study/hydrotrend-Cs-sampling-study.py",
"copies": "1",
"size": "2914",
"license": "mit",
"hash": 3669670358931460600,
"line_mean": 36.8441558442,
"line_max": 103,
"alpha_frac": 0.7148249828,
"autogenerated": false,
"ratio": 3.2963800904977374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9509683574790764,
"avg_score": 0.00030429970139479593,
"num_lines": 77
} |
'''adalfns - place to store azurerm functions which call adal routines'''
import json
import codecs
import os
import requests
from datetime import datetime as dt
import adal
from .settings import get_auth_endpoint, get_resource_endpoint
def get_access_token(tenant_id, application_id, application_secret):
'''get an Azure access token using the adal library.
Args:
tenant_id (str): Tenant id of the user's account.
application_id (str): Application id of a Service Principal account.
application_secret (str): Application secret (password) of the Service Principal account.
Returns:
An Azure authentication token string.
'''
context = adal.AuthenticationContext(
get_auth_endpoint() + tenant_id, api_version=None)
token_response = context.acquire_token_with_client_credentials(
get_resource_endpoint(), application_id, application_secret)
return token_response.get('accessToken')
def get_access_token_from_cli():
'''Get an Azure authentication token from CLI's cache.
Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login'
recently), or if you are running in Azure Cloud Shell (aka cloud console)
Returns:
An Azure authentication token string.
'''
# check if running in cloud shell, if so, pick up token from MSI_ENDPOINT
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
endpoint = os.environ['MSI_ENDPOINT']
headers = {'Metadata': 'true'}
body = {"resource": "https://management.azure.com/"}
ret = requests.post(endpoint, headers=headers, data=body)
return ret.json()['access_token']
else: # not running cloud shell
home = os.path.expanduser('~')
sub_username = ""
# 1st identify current subscription
azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json'
if os.path.isfile(azure_profile_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path)
return None
with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd:
subs = json.load(azure_profile_fd)
for sub in subs['subscriptions']:
if sub['isDefault'] == True:
sub_username = sub['user']['name']
if sub_username == "":
print('Error from get_access_token_from_cli(): Default subscription not found in ' + \
azure_profile_path)
return None
# look for acces_token
access_keys_path = home + os.sep + '.azure' + os.sep + 'accessTokens.json'
if os.path.isfile(access_keys_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path)
return None
with open(access_keys_path, 'r') as access_keys_fd:
keys = json.load(access_keys_fd)
# loop through accessTokens.json until first unexpired entry found
for key in keys:
if key['userId'] == sub_username:
if 'accessToken' not in keys[0]:
print('Error from get_access_token_from_cli(): accessToken not found in ' + \
access_keys_path)
return None
if 'tokenType' not in keys[0]:
print('Error from get_access_token_from_cli(): tokenType not found in ' + \
access_keys_path)
return None
if 'expiresOn' not in keys[0]:
print('Error from get_access_token_from_cli(): expiresOn not found in ' + \
access_keys_path)
return None
expiry_date_str = key['expiresOn']
# check date and skip past expired entries
if 'T' in expiry_date_str:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ')
else:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f')
if exp_date < dt.now():
continue
else:
return key['accessToken']
# if dropped out of the loop, token expired
print('Error from get_access_token_from_cli(): token expired. Run \'az login\'')
return None
| {
"repo_name": "gbowerman/azurerm",
"path": "azurerm/adalfns.py",
"copies": "1",
"size": "4400",
"license": "mit",
"hash": 2724203943865479000,
"line_mean": 40.9047619048,
"line_max": 99,
"alpha_frac": 0.5893181818,
"autogenerated": false,
"ratio": 4.127579737335835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034139299792991917,
"num_lines": 105
} |
# ADALINE MODULE
######################################################
DEBUG_W = 0;
class Adaline:
# > Global constants
# > Constructor
# > > n = number of dendrites of the neuron
# > > r = neuron learning rate in (0,1]
# > > inf = lower value of activation function (-1 or 0), default -1
def __init__(self, n, r, inf=-1):
self.inf=inf;
self.W = [0] * n;
self.r = r;
self.bias = 0;
self.noise = 1;
# > Weighted sum of inputs
# > > X = array of binary inputs [-1, 1]
def weighted_sum(self, X):
N = len(X);
sum = self.bias * self.noise;
for i in range (0, N):
sum = sum + X[i] * self.W[i];
return sum;
# > Activation Function
# > > s = weighted input (weights * input values)
def f(self, s):
if(s <= 0):
return self.inf;
return 1;
# > Delta-Rule weights adjustment
# > > l = input label (expected output)
# > > g = sum (given output)
# > > returns 'true' if any adjustment was needed, 'false' otherwise
def adjustment(self, X, l, g):
if(l != g):
factor = self.r * (l - g);
self.bias = self.bias + factor * self.noise;
for i in range (0, len(self.W)):
self.W[i] = self.W[i] + factor * X[i];
return True;
return False;
# > train(X, l): changes the synaptic weights of the artificial neuron based on the input and label
# > > X = array of binary inputs [-1, 1]
# > > l = label of the given input
# > > returns 'true' if there was any adjustment
def train(self, X, l):
sum_val = self.weighted_sum(X);
y = self.f(sum_val);
adjust = self.adjustment(X, l, y);
return adjust;
def classify(self, X):
sum_val = self.weighted_sum(X);
y = self.f(sum_val);
return y;
# > Debug function:
# > > v = debug constant value
def debug(self, v):
print("----=DEBUG=----");
if(v == DEBUG_W):
print(self.bias);
for i in range (0, len(self.W)):
print(self.W[i]);
print("---------------");
######################################################
| {
"repo_name": "cabraile/RNA-Modules",
"path": "rna_module.py",
"copies": "1",
"size": "2292",
"license": "mit",
"hash": -5527180974364159000,
"line_mean": 29.972972973,
"line_max": 103,
"alpha_frac": 0.4659685864,
"autogenerated": false,
"ratio": 3.6265822784810124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45925508648810126,
"avg_score": null,
"num_lines": null
} |
import logging
import os
import platform
import re
import sys
import subprocess
import threading
import time
from .base import Programmer
from ..errors import AdaLinkError
# OSX GUI-based app does not has the same PATH as terminal-based
if platform.system() == 'Darwin':
os.environ["PATH"] = os.environ["PATH"] + ':/usr/local/bin'
logger = logging.getLogger(__name__)
class RasPi2(Programmer):
# Name used to identify this programmer on the command line.
name = 'raspi2'
def __init__(self, openocd_exe=None, openocd_path='', params=None):
"""Create a new instance of the Raspberry Pi 2 communication class. By default
OpenOCD should be accessible in your system path on the Raspberry Pi
and it will be used to twiddle GPIOs.
You can override the OpenOCD executable name by specifying a value in
the openocd_exe parameter. You can also manually specify the path to the
OpenOCD executable in the openocd_path parameter.
Optional command line arguments to OpenOCD can be provided in the
params parameter as a string.
"""
# If not provided, pick the appropriate OpenOCD name based on the
# platform:
# - Linux = openocd
# - Mac = openocd
# - Windows = openocd.exe
if openocd_exe is None:
system = platform.system()
if system == 'Linux' or system == 'Darwin':
openocd_exe = 'openocd'
elif system == 'Windows':
openocd_exe = 'openocd.exe'
else:
raise AdaLinkError('Unsupported system: {0}'.format(system))
# Store the path to the OpenOCD tool so it can later be run.
self._openocd_path = os.path.join(openocd_path, openocd_exe)
logger.info('Using path to OpenOCD: {0}'.format(self._openocd_path))
# Apply command line parameters if specified.
self._openocd_params = []
if params is not None:
self._openocd_params.extend(params.split())
logger.info('Using parameters to OpenOCD: {0}'.format(params))
# Make sure we have OpenOCD in the system path
self._test_openocd()
def _test_openocd(self):
"""Checks if OpenOCD 0.9.0 is found in the system path or not."""
# Spawn OpenOCD process with --version and capture its output.
args = [self._openocd_path, '--version']
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = process.communicate()
# Parse out version number from response.
match = re.search('^Open On-Chip Debugger (\S+)', output,
re.IGNORECASE | re.MULTILINE)
if not match:
return
# Simple semantic version check to see if OpenOCD version is greater
# or equal to 0.9.0.
version = match.group(1).split('.')
if int(version[0]) > 0:
# Version 1 or greater, assume it's good (higher than 0.9.0).
return
if int(version[0]) == 0 and int(version[1]) >= 9:
# Version 0.9 or greater, assume it's good.
return
# Otherwise assume version is too old because it's below 0.9.0.
raise RuntimError
except Exception as ex:
print('ERROR', ex)
raise AdaLinkError('Failed to find OpenOCD 0.9.0 or greater! Make '
'sure OpenOCD 0.9.0 is installed and in your '
'system path.')
def run_commands(self, commands, timeout_sec=60):
"""Run the provided list of commands with OpenOCD. Commands should be
a list of strings with with OpenOCD commands to run. Returns the
output of OpenOCD. If execution takes longer than timeout_sec an
exception will be thrown. Set timeout_sec to None to disable the timeout
completely.
"""
# Spawn OpenOCD process and capture its output.
args = [self._openocd_path]
args.extend(self._openocd_params)
for c in commands:
args.append('-c')
args.append('"{0}"'.format(c))
args = ' '.join(args)
logger.debug('Running OpenOCD command: {0}'.format(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if timeout_sec is not None:
# Use a timer to stop the subprocess if the timeout is exceeded.
# This helps prevent very subtle issues with deadlocks on reading
# subprocess output. See: http://stackoverflow.com/a/10012262
def timeout_exceeded(p):
# Stop the subprocess and kill the whole program.
p.kill()
raise AdaLinkError('OpenOCD process exceeded timeout!')
timeout = threading.Timer(timeout_sec, timeout_exceeded, [process])
timeout.start()
# Grab output of OpenOCD.
output, err = process.communicate()
if timeout_sec is not None:
# Stop timeout timer when communicate call returns.
timeout.cancel()
logger.debug('OpenOCD response: {0}'.format(output))
return output
def _readmem(self, address, command):
"""Read the specified register with the provided register read command.
"""
# Build list of commands to read register.
address = '0x{0:08X}'.format(address) # Convert address value to hex string.
commands = [
'init',
'{0} {1}'.format(command, address),
'exit'
]
# Run command and parse output for register value.
output = self.run_commands(commands)
match = re.search('^{0}: (\S+)'.format(address), output,
re.IGNORECASE | re.MULTILINE)
if match:
return int(match.group(1), 16)
else:
raise AdaLinkError('Could not find expected memory value, is the board connected?')
def is_connected(self):
"""Return true if the device is connected to the programmer."""
output = self.run_commands(['init', 'exit'])
return output.find('Error:') == -1
def wipe(self):
"""Wipe clean the flash memory of the device. Will happen before any
programming if requested.
"""
# There is no general mass erase function with OpenOCD, instead only
# chip-specific functions. For that reason don't implement a default
# wipe and instead force cores to subclass and provide their own
# wipe functionality.
raise NotImplementedError
def program(self, hex_files=[], bin_files=[]):
"""Program chip with provided list of hex and/or bin files. Hex_files
is a list of paths to .hex files, and bin_files is a list of tuples with
the first value being the path to the .bin file and the second value
being the integer starting address for the bin file."""
# Build list of commands to program hex files.
commands = [
'init',
'reset init',
'halt'
]
# Program each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
self.run_commands(commands)
def readmem32(self, address):
"""Read a 32-bit value from the provided memory address."""
return self._readmem(address, 'mdw')
def readmem16(self, address):
"""Read a 16-bit value from the provided memory address."""
return self._readmem(address, 'mdh')
def readmem8(self, address):
"""Read a 8-bit value from the provided memory address."""
return self._readmem(address, 'mdb')
def escape_path(self, path):
"""Escape the path with Tcl '{}' chars to prevent spaces,
backslashes, etc. from being misinterpreted.
"""
return '{{{0}}}'.format(path)
| {
"repo_name": "adafruit/Adafruit_Adalink",
"path": "adalink/programmers/raspi2.py",
"copies": "1",
"size": "8576",
"license": "mit",
"hash": 8282561190954568000,
"line_mean": 41.0392156863,
"line_max": 102,
"alpha_frac": 0.6028451493,
"autogenerated": false,
"ratio": 4.149008224479923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5251853373779923,
"avg_score": null,
"num_lines": null
} |
# adalink Segger JLink Programmer
#
# Python interface to communicate with a JLink device using the native JLinkExe
# tool provided by Segger. Note that you must have installed Segger JLink
# software from:
# https://www.segger.com/jlink-software.html
#
# Additionally the JLinkExe should be in your system path (or explicitly
# provided to the JLink class initializer).
#
# Author: Tony DiCola
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
import threading
import time
from .base import Programmer
from ..errors import AdaLinkError
logger = logging.getLogger(__name__)
class JLink(Programmer):
# Name used to identify this programmer on the command line.
name = 'jlink'
def __init__(self, connected, jlink_exe=None, jlink_path='', params=None):
"""Create a new instance of the JLink communication class. By default
JLinkExe should be accessible in your system path and it will be used
to communicate with a connected JLink device.
You can override the JLinkExe executable name by specifying a value in
the jlink_exe parameter. You can also manually specify the path to the
JLinkExe executable in the jlink_path parameter.
Optional command line arguments to JLinkExe can be provided in the
params parameter as a string.
"""
self._connected = connected
# If not provided, pick the appropriate JLinkExe name based on the
# platform:
# - Linux = JLinkExe
# - Mac = JLinkExe
# - Windows = JLink.exe
if jlink_exe is None:
system = platform.system()
if system == 'Linux':
jlink_exe = 'JLinkExe'
elif system == 'Windows':
jlink_exe = 'JLink.exe'
elif system == 'Darwin':
jlink_exe = 'JLinkExe'
else:
raise AdaLinkError('Unsupported system: {0}'.format(system))
# Store the path to the JLinkExe tool so it can later be run.
self._jlink_path = os.path.join(jlink_path, jlink_exe)
logger.info('Using path to JLinkExe: {0}'.format(self._jlink_path))
# Apply command line parameters if specified.
self._jlink_params = []
if params is not None:
self._jlink_params.extend(params.split())
logger.info('Using parameters to JLinkExe: {0}'.format(params))
# Make sure we have the J-Link executable in the system path
self._test_jlinkexe()
def _test_jlinkexe(self):
"""Checks if JLinkExe is found in the system path or not."""
# Spawn JLinkExe process and capture its output.
args = [self._jlink_path]
args.append('?')
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE)
process.wait()
except OSError:
raise AdaLinkError("'{0}' missing. Is the J-Link folder in your system "
"path?".format(self._jlink_path))
def run_filename(self, filename, timeout_sec=60):
"""Run the provided script with JLinkExe. Filename should be a path to
a script file with JLinkExe commands to run. Returns the output of
JLinkExe. If execution takes longer than timeout_sec an exception will
be thrown. Set timeout_sec to None to disable the timeout completely.
"""
# Spawn JLinkExe process and capture its output.
args = [self._jlink_path]
args.extend(self._jlink_params)
args.append(filename)
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if timeout_sec is not None:
# Use a timer to stop the subprocess if the timeout is exceeded.
# This helps prevent very subtle issues with deadlocks on reading
# subprocess output. See: http://stackoverflow.com/a/10012262
def timeout_exceeded(p):
# Stop the subprocess and kill the whole program.
p.kill()
raise AdaLinkError('JLink process exceeded timeout!')
timeout = threading.Timer(timeout_sec, timeout_exceeded, [process])
timeout.start()
# Grab output of JLink.
output, err = process.communicate()
if timeout_sec is not None:
# Stop timeout timer when communicate call returns.
timeout.cancel()
logger.debug('JLink response: {0}'.format(output))
return output
def run_commands(self, commands, timeout_sec=60):
"""Run the provided list of commands with JLinkExe. Commands should be
a list of strings with with JLinkExe commands to run. Returns the
output of JLinkExe. If execution takes longer than timeout_sec an
exception will be thrown. Set timeout_sec to None to disable the timeout
completely.
"""
# Create temporary file to hold script.
script_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
commands = '\n'.join(commands)
script_file.write(commands)
script_file.close()
logger.debug('Using script file name: {0}'.format(script_file.name))
logger.debug('Running JLink commands: {0}'.format(commands))
return self.run_filename(script_file.name, timeout_sec)
def _readmem(self, address, command):
"""Read the specified register with the provided register read command.
"""
# Build list of commands to read register.
address = '{0:08X}'.format(address) # Convert address value to hex string.
commands = [
'{0} {1} 1'.format(command, address),
'q'
]
# Run command and parse output for register value.
output = self.run_commands(commands)
match = re.search('^{0} = (\S+)'.format(address), output,
re.IGNORECASE | re.MULTILINE)
if match:
return int(match.group(1), 16)
else:
raise AdaLinkError('Could not find expected memory value, are the JLink and board connected?')
def is_connected(self):
"""Return true if the device is connected to the programmer."""
output = self.run_commands(['q'])
return output.find('Info: Found {0}'.format(self._connected)) != -1
def wipe(self):
"""Wipe clean the flash memory of the device. Will happen before any
programming if requested.
"""
# Build list of commands to wipe memory.
commands = [
'r', # Reset
'erase', # Erase
'r', # Reset
'q' # Quit
]
# Run commands.
self.run_commands(commands)
def program(self, hex_files=[], bin_files=[]):
"""Program chip with provided list of hex and/or bin files. Hex_files
is a list of paths to .hex files, and bin_files is a list of tuples with
the first value being the path to the .bin file and the second value
being the integer starting address for the bin file."""
# Build list of commands to program hex files.
commands = ['r'] # Reset
# Program each hex file.
for f in hex_files:
f = os.path.abspath(f)
commands.append('loadfile "{0}"'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = os.path.abspath(f)
commands.append('loadbin "{0}" 0x{1:08X}'.format(f, addr))
commands.extend([
'r', # Reset
'g', # Run the MCU
'q' # Quit
])
# Run commands.
self.run_commands(commands)
def readmem32(self, address):
"""Read a 32-bit value from the provided memory address."""
return self._readmem(address, 'mem32')
def readmem16(self, address):
"""Read a 16-bit value from the provided memory address."""
return self._readmem(address, 'mem16')
def readmem8(self, address):
"""Read a 8-bit value from the provided memory address."""
return self._readmem(address, 'mem8')
| {
"repo_name": "georgeredinger/Adafruit_Adalink",
"path": "adalink/programmers/jlink.py",
"copies": "1",
"size": "8160",
"license": "mit",
"hash": 935020230436525700,
"line_mean": 39.8,
"line_max": 106,
"alpha_frac": 0.6118872549,
"autogenerated": false,
"ratio": 4.1421319796954315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5254019234595432,
"avg_score": null,
"num_lines": null
} |
import logging
import os
import platform
import re
import sys
import subprocess
import threading
import time
from .base import Programmer
from ..errors import AdaLinkError
logger = logging.getLogger(__name__)
class STLink(Programmer):
# Name used to identify this programmer on the command line.
name = 'stlink'
def __init__(self, openocd_exe=None, openocd_path='', params=None):
"""Create a new instance of the STLink communication class. By default
OpenOCD should be accessible in your system path and it will be used
to communicate with a connected STLink device.
You can override the OpenOCD executable name by specifying a value in
the openocd_exe parameter. You can also manually specify the path to the
OpenOCD executable in the openocd_path parameter.
Optional command line arguments to OpenOCD can be provided in the
params parameter as a string.
"""
# If not provided, pick the appropriate OpenOCD name based on the
# platform:
# - Linux = openocd
# - Mac = openocd
# - Windows = openocd.exe
if openocd_exe is None:
system = platform.system()
if system == 'Linux' or system == 'Darwin':
openocd_exe = 'openocd'
elif system == 'Windows':
openocd_exe = 'openocd.exe'
else:
raise AdaLinkError('Unsupported system: {0}'.format(system))
# Store the path to the OpenOCD tool so it can later be run.
self._openocd_path = os.path.join(openocd_path, openocd_exe)
logger.info('Using path to OpenOCD: {0}'.format(self._openocd_path))
# Apply command line parameters if specified.
self._openocd_params = []
if params is not None:
self._openocd_params.extend(params.split())
logger.info('Using parameters to OpenOCD: {0}'.format(params))
# Make sure we have OpenOCD in the system path
self._test_openocd()
def _test_openocd(self):
"""Checks if OpenOCD 0.9.0 is found in the system path or not."""
# Spawn OpenOCD process with --version and capture its output.
args = [self._openocd_path, '--version']
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = process.communicate()
# Parse out version number from response.
match = re.search('^Open On-Chip Debugger (\S+)', output,
re.IGNORECASE | re.MULTILINE)
if not match:
return
# Simple semantic version check to see if OpenOCD version is greater
# or equal to 0.9.0.
version = match.group(1).split('.')
if int(version[0]) > 0:
# Version 1 or greater, assume it's good (higher than 0.9.0).
return
if int(version[0]) == 0 and int(version[1]) >= 9:
# Version 0.9 or greater, assume it's good.
return
# Otherwise assume version is too old because it's below 0.9.0.
raise RuntimError
except Exception as ex:
print 'ERROR', ex
raise AdaLinkError('Failed to find OpenOCD 0.9.0 or greater! Make '
'sure OpenOCD 0.9.0 is installed and in your '
'system path.')
def run_commands(self, commands, timeout_sec=60):
"""Run the provided list of commands with OpenOCD. Commands should be
a list of strings with with OpenOCD commands to run. Returns the
output of OpenOCD. If execution takes longer than timeout_sec an
exception will be thrown. Set timeout_sec to None to disable the timeout
completely.
"""
# Spawn OpenOCD process and capture its output.
args = [self._openocd_path]
args.extend(self._openocd_params)
for c in commands:
args.append('-c')
args.append(c)
logger.debug('Running OpenOCD command: {0}'.format(' '.join(args)))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if timeout_sec is not None:
# Use a timer to stop the subprocess if the timeout is exceeded.
# This helps prevent very subtle issues with deadlocks on reading
# subprocess output. See: http://stackoverflow.com/a/10012262
def timeout_exceeded(p):
# Stop the subprocess and kill the whole program.
p.kill()
raise AdaLinkError('OpenOCD process exceeded timeout!')
timeout = threading.Timer(timeout_sec, timeout_exceeded, [process])
timeout.start()
# Grab output of STLink.
output, err = process.communicate()
if timeout_sec is not None:
# Stop timeout timer when communicate call returns.
timeout.cancel()
logger.debug('OpenOCD response: {0}'.format(output))
return output
def _readmem(self, address, command):
"""Read the specified register with the provided register read command.
"""
# Build list of commands to read register.
address = '0x{0:08X}'.format(address) # Convert address value to hex string.
commands = [
'init',
'{0} {1}'.format(command, address),
'exit'
]
# Run command and parse output for register value.
output = self.run_commands(commands)
match = re.search('^{0}: (\S+)'.format(address), output,
re.IGNORECASE | re.MULTILINE)
if match:
return int(match.group(1), 16)
else:
raise AdaLinkError('Could not find expected memory value, are the STLink and board connected?')
def is_connected(self):
"""Return true if the device is connected to the programmer."""
output = self.run_commands(['init', 'exit'])
return output.find('Error:') == -1
def wipe(self):
"""Wipe clean the flash memory of the device. Will happen before any
programming if requested.
"""
# There is no general mass erase function with OpenOCD, instead only
# chip-specific functions. For that reason don't implement a default
# wipe and instead force cores to subclass and provide their own
# wipe functionality.
raise NotImplementedError
def program(self, hex_files=[], bin_files=[]):
"""Program chip with provided list of hex and/or bin files. Hex_files
is a list of paths to .hex files, and bin_files is a list of tuples with
the first value being the path to the .bin file and the second value
being the integer starting address for the bin file."""
# Build list of commands to program hex files.
commands = [
'init',
'reset init',
'halt'
]
# Program each hex file.
for f in hex_files:
f = os.path.abspath(f)
commands.append('flash write_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = os.path.abspath(f)
commands.append('flash write_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
self.run_commands(commands)
def readmem32(self, address):
"""Read a 32-bit value from the provided memory address."""
return self._readmem(address, 'mdw')
def readmem16(self, address):
"""Read a 16-bit value from the provided memory address."""
return self._readmem(address, 'mdh')
def readmem8(self, address):
"""Read a 8-bit value from the provided memory address."""
return self._readmem(address, 'mdb')
| {
"repo_name": "georgeredinger/Adafruit_Adalink",
"path": "adalink/programmers/stlink.py",
"copies": "1",
"size": "8130",
"license": "mit",
"hash": 1547223673029283600,
"line_mean": 40.9072164948,
"line_max": 107,
"alpha_frac": 0.6022140221,
"autogenerated": false,
"ratio": 4.223376623376623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325590645476623,
"avg_score": null,
"num_lines": null
} |
import logging
import os
import platform
import re
import sys
import subprocess
import threading
import time
from .base import Programmer
from ..errors import AdaLinkError
# OSX GUI-based app does not has the same PATH as terminal-based
if platform.system() == 'Darwin':
os.environ["PATH"] = os.environ["PATH"] + ':/usr/local/bin'
logger = logging.getLogger(__name__)
class STLink(Programmer):
# Name used to identify this programmer on the command line.
name = 'stlink'
def __init__(self, openocd_exe=None, openocd_path='', params=None):
"""Create a new instance of the STLink communication class. By default
OpenOCD should be accessible in your system path and it will be used
to communicate with a connected STLink device.
You can override the OpenOCD executable name by specifying a value in
the openocd_exe parameter. You can also manually specify the path to the
OpenOCD executable in the openocd_path parameter.
Optional command line arguments to OpenOCD can be provided in the
params parameter as a string.
"""
# If not provided, pick the appropriate OpenOCD name based on the
# platform:
# - Linux = openocd
# - Mac = openocd
# - Windows = openocd.exe
if openocd_exe is None:
system = platform.system()
if system == 'Linux' or system == 'Darwin':
openocd_exe = 'openocd'
elif system == 'Windows':
openocd_exe = 'openocd.exe'
else:
raise AdaLinkError('Unsupported system: {0}'.format(system))
# Store the path to the OpenOCD tool so it can later be run.
self._openocd_path = os.path.join(openocd_path, openocd_exe)
logger.info('Using path to OpenOCD: {0}'.format(self._openocd_path))
# Apply command line parameters if specified.
self._openocd_params = []
if params is not None:
self._openocd_params.extend(params.split())
logger.info('Using parameters to OpenOCD: {0}'.format(params))
# Make sure we have OpenOCD in the system path
self._test_openocd()
def _test_openocd(self):
"""Checks if OpenOCD 0.9.0 is found in the system path or not."""
# Spawn OpenOCD process with --version and capture its output.
args = [self._openocd_path, '--version']
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = process.communicate()
# Parse out version number from response.
match = re.search('^Open On-Chip Debugger (\S+)', output,
re.IGNORECASE | re.MULTILINE)
if not match:
return
# Simple semantic version check to see if OpenOCD version is greater
# or equal to 0.9.0.
version = match.group(1).split('.')
if int(version[0]) > 0:
# Version 1 or greater, assume it's good (higher than 0.9.0).
return
if int(version[0]) == 0 and int(version[1]) >= 9:
# Version 0.9 or greater, assume it's good.
return
# Otherwise assume version is too old because it's below 0.9.0.
raise RuntimError
except Exception as ex:
print('ERROR', ex)
raise AdaLinkError('Failed to find OpenOCD 0.9.0 or greater! Make '
'sure OpenOCD 0.9.0 is installed and in your '
'system path.')
def run_commands(self, commands, timeout_sec=60):
"""Run the provided list of commands with OpenOCD. Commands should be
a list of strings with with OpenOCD commands to run. Returns the
output of OpenOCD. If execution takes longer than timeout_sec an
exception will be thrown. Set timeout_sec to None to disable the timeout
completely.
"""
# Spawn OpenOCD process and capture its output.
args = [self._openocd_path]
args.extend(self._openocd_params)
for c in commands:
args.append('-c')
args.append('"{0}"'.format(c))
args = ' '.join(args)
logger.debug('Running OpenOCD command: {0}'.format(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if timeout_sec is not None:
# Use a timer to stop the subprocess if the timeout is exceeded.
# This helps prevent very subtle issues with deadlocks on reading
# subprocess output. See: http://stackoverflow.com/a/10012262
def timeout_exceeded(p):
# Stop the subprocess and kill the whole program.
p.kill()
raise AdaLinkError('OpenOCD process exceeded timeout!')
timeout = threading.Timer(timeout_sec, timeout_exceeded, [process])
timeout.start()
# Grab output of STLink.
output, err = process.communicate()
if timeout_sec is not None:
# Stop timeout timer when communicate call returns.
timeout.cancel()
logger.debug('OpenOCD response: {0}'.format(output))
return output
def _readmem(self, address, command):
"""Read the specified register with the provided register read command.
"""
# Build list of commands to read register.
address = '0x{0:08X}'.format(address) # Convert address value to hex string.
commands = [
'init',
'{0} {1}'.format(command, address),
'exit'
]
# Run command and parse output for register value.
output = self.run_commands(commands)
match = re.search('^{0}: (\S+)'.format(address), output,
re.IGNORECASE | re.MULTILINE)
if match:
return int(match.group(1), 16)
else:
raise AdaLinkError('Could not find expected memory value, are the STLink and board connected?')
def is_connected(self):
"""Return true if the device is connected to the programmer."""
output = self.run_commands(['init', 'exit'])
return output.find('Error:') == -1
def wipe(self):
"""Wipe clean the flash memory of the device. Will happen before any
programming if requested.
"""
# There is no general mass erase function with OpenOCD, instead only
# chip-specific functions. For that reason don't implement a default
# wipe and instead force cores to subclass and provide their own
# wipe functionality.
raise NotImplementedError
def program(self, hex_files=[], bin_files=[]):
"""Program chip with provided list of hex and/or bin files. Hex_files
is a list of paths to .hex files, and bin_files is a list of tuples with
the first value being the path to the .bin file and the second value
being the integer starting address for the bin file."""
# Build list of commands to program hex files.
commands = [
'init',
'reset init',
'halt'
]
# Program each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('flash write_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
self.run_commands(commands)
def readmem32(self, address):
"""Read a 32-bit value from the provided memory address."""
return self._readmem(address, 'mdw')
def readmem16(self, address):
"""Read a 16-bit value from the provided memory address."""
return self._readmem(address, 'mdh')
def readmem8(self, address):
"""Read a 8-bit value from the provided memory address."""
return self._readmem(address, 'mdb')
def escape_path(self, path):
"""Escape the path with Tcl '{}' chars to prevent spaces,
backslashes, etc. from being misinterpreted.
"""
return '{{{0}}}'.format(path)
| {
"repo_name": "adafruit/Adafruit_Adalink",
"path": "adalink/programmers/stlink.py",
"copies": "1",
"size": "8581",
"license": "mit",
"hash": 5563684736325253000,
"line_mean": 41.0637254902,
"line_max": 107,
"alpha_frac": 0.6027269549,
"autogenerated": false,
"ratio": 4.1817738791423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5284500834042299,
"avg_score": null,
"num_lines": null
} |
""" adalist_LBLA.py - list images is part of the
LOB Demo Center mod_python/APACHE web application
$Date: 2008-08-29 16:46:45 +0200 (Fri, 29 Aug 2008) $
$Rev: 67 $
"""
# Copyright 2004-2008 Software AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mod_python import apache
import string
import adabas
import sys
from time import sleep,time
from adabas.api import *
from adabas.datamap import *
from struct import *
STARTISN=0
RCOUNT=1100
# define the mapping of data in record buffer to attributes
# for records of Adabas image file
rec=Datamap( 'AdaList',
String( 'name', 25),
String( 'extension', 4),
String( 'size', 8),
String( 'length', 2),
)
#
# HTML template pieces
#
shead="""
<html>
<head><meta HTTP-EQUIV="content-type" CONTENT="text/html">
<title>ADABAS list of JPG or GIF files</title></head>
<body>
<font face=arial,sans-serif color=#000000>
<table width=100%><tr>
<td><img src="../../Images/adabas2006.jpg" height="70" >
</td><td><font size=+2> ADABAS list of JPG or GIF files </font></td>
</tr></table>
"""
simglist2="""
<p><p>
<table cellpadding=0 cellspacing=0 border=0 width=100%>
<tr><td bgcolor=#ffcc33><img width=1 height=1 alt=""></td>
</tr></table>
<table border=0 cellpadding=3 cellspacing=0 width="100%">
<tr bgcolor=#fff8c0><font size=-1><b>
<td nowrap width=01%> ISN </td>
<td nowrap width=01%>Size [bytes]</td>
<td nowrap width=01%>Name</td>
<td nowrap width=01%>Extension</td>
<td nowrap width=01%>Thumbnail</td>
</b></font></tr>
"""
simgline="""
<tr bgcolor=#f0f0f0 valign=top><font face=arial,sans-serif>
<td nowrap align=left>
<a href="../menu_LBLA.py/l1ADAimg?DBID=%s&FNR=%s&ISN=%s&TYPE=%s" target="_blank"> %s </a></td>
<td nowrap align=left>%s</td>
<td nowrap align=left>%s</td>
<td nowrap align=left>%s</td>
<td nowrap align=left>
<a href="../menu_LBLA.py/l1ADAimg?DBID=%s&FNR=%s&ISN=%s&TYPE=%s" target="_blank">
<img src="../menu_LBLA.py/l2ADAimg?DBID=%s&FNR=%s&ISN=%s" width="40" height="30"> </a> </td>
</font></tr>
"""
simgline2="""
<tr bgcolor=#f0f0f0 valign=top><font face=arial,sans-serif>
<td nowrap align=left> <a href="../menu_LBLA.py/l1ADAimg?DBID=%s&FNR=%s&ISN=%s&TYPE=%s" target="_blank"> %s </a></td>
<td nowrap align=left>%s</td>
<td nowrap align=left>%s</td>
<td nowrap align=left>%s</td>
</font></tr>
"""
stail="""
</font>
<br clear=all></center><p><hr class=z>
<center>
<img src="../../Images/adabas.gif" width="150" height="32" ><br>
<font size=-1><a href="http://www.softwareag.com/adabas/" target="_blank" >powered</a><br>
© 2007 Software AG
</font>
</center>
</body></html>
"""
def select(req,DBID,FNR):
req.content_type="text/html"
req.write(shead)
c1=Adabas(rbl=64,fbl=16,sbl=0,vbl=0,ibl=0)
c1.cb.dbid=int(DBID)
c1.cb.fnr=int(FNR)
c1.cb.cid='LBLA'
c1.fb.value='A1,A2,A3.'
c1.cb.isn=STARTISN
rec.buffer=c1.rb
# write to buffer starting with pos. 0
count=0
counter=0
req.write(simglist2)
for count in range(RCOUNT):
try:
c1.readByIsn(getnext=1)
# req.write ('\nget rsp= %s isn= %s \n' % (c1.cb.rsp,c1.cb.isn) )
if rec.extension in [ 'jpg', 'jpeg' , 'gif' , 'JPG' , 'JPEG' , 'GIF' ] :
req.write(simgline \
%( DBID, \
FNR,
c1.cb.isn, \
rec.extension, \
c1.cb.isn, \
rec.size , \
rec.name, \
rec.extension , \
DBID,
FNR,
c1.cb.isn, \
rec.extension, \
DBID,
FNR,
c1.cb.isn ), \
)
time.sleep(0.08)
else:
req.write(simgline2 \
%( DBID, \
FNR, \
c1.cb.isn, \
rec.extension, \
c1.cb.isn, \
rec.size , \
rec.name, \
rec.extension ), \
)
except DataEnd:
req.write('</table><br>Sequential Read by ISN returned '+str(count)+' record(s).')
req.write(stail)
return
except DatabaseError, (line, c1):
req.write('</table><br>Database Error:'+line )
return
except InterfaceError, (line):
counter=counter + 1
if counter == 100:
req.write('counter= %d' % (counter) )
req.write('\n')
req.write ('Database %s -- not active !! -- ( %s )' % (DBID,line) )
return
sleep(0.5)
# c1.close()
| {
"repo_name": "flavio-casacurta/Nat2Py",
"path": "Adabas/demo/LobDemoCenter/adalist_LBLA.py",
"copies": "1",
"size": "5399",
"license": "mit",
"hash": 977428143709212300,
"line_mean": 28.1837837838,
"line_max": 119,
"alpha_frac": 0.5426930913,
"autogenerated": false,
"ratio": 3.029741863075196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4072434954375196,
"avg_score": null,
"num_lines": null
} |
"""adam.adam_2
"""
# pylint: disable=C0111
import ctypes
import functools
import operator
def trigger_infinite_loop():
result = None
# When `break` becomes `continue`, this should enter an infinite loop. This
# helps us test timeouts.
# Any object which isn't None passes the truth value testing so here
# we use `while object()` instead of `while True` b/c the later becomes
# `while False` when ReplaceTrueFalse is applied and we don't trigger an
# infinite loop.
while object():
result = object()
break
# when `while object()` becomes `while not object()`
# the code below will be triggered
return result
def single_iteration():
result = None
iterable = [object()]
for i in iterable: # pylint: disable=W0612
result = True
return result
def handle_exception():
result = None
try:
raise IOError
except IOError:
result = True
return result
def decorator(func):
func.cosmic_ray = True
return func
@decorator
def decorated_func():
result = None
if decorated_func.cosmic_ray:
result = True
return result
def use_ctypes(size):
array_type = ctypes.c_char * size
chars_a = array_type(*(b"a" * size))
chars_b = array_type(*(b"b" * size))
# This odd construct ensures that, under number mutation to increase number
# values, `size` varies by amounts big enough to trigger a segfault on the
# subsequent memmove.
size = functools.reduce(operator.mul, [10, 10, 10, 10, 10, 10])
ctypes.memmove(chars_a, chars_b, size)
return chars_a.value
| {
"repo_name": "sixty-north/cosmic-ray",
"path": "tests/resources/example_project/adam/adam_2.py",
"copies": "1",
"size": "1630",
"license": "mit",
"hash": -5060598191471573000,
"line_mean": 21.3287671233,
"line_max": 79,
"alpha_frac": 0.6484662577,
"autogenerated": false,
"ratio": 3.8902147971360383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5038681054836038,
"avg_score": null,
"num_lines": null
} |
'''Adam Chyb's personal website.'''
from __future__ import absolute_import, print_function
import json
import os
import flask_assetrev
from flask import Flask, g, render_template, send_from_directory
app = Flask(__name__)
app.config.from_object('config')
flask_assetrev.AssetRev(app)
def after_this_request(func):
'''Adds the given function to the current request's callbacks.'''
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(func)
return func
@app.route('/', methods=['GET'])
def homepage():
'''Render the homepage.'''
@after_this_request
def add_no_cache_header(response):
'''Adds a no cache header to the response.'''
response.cache_control.no_store = True
return response
projects = json.load(open('projects.json'))
return render_template('homepage.html', projects=projects)
@app.route('/favicon.ico')
def favicon():
'''Delivers the favicon'''
return send_from_directory(os.path.join(app.root_path, 'static/favicon'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@app.after_request
def call_after_request_callbacks(response):
'''Calls the after request callbacks and sets a default caching policy.'''
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
if not response.cache_control:
response.cache_control.public = True
response.cache_control.max_age = 1800
return response
if __name__ == '__main__':
app.run()
| {
"repo_name": "Chybby/chybby.com",
"path": "chybby.py",
"copies": "1",
"size": "1613",
"license": "mit",
"hash": -5223848490077572000,
"line_mean": 25.8833333333,
"line_max": 78,
"alpha_frac": 0.6540607564,
"autogenerated": false,
"ratio": 3.9245742092457423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 60
} |
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
@@__init__
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
Implementation is based on: http://arxiv.org/pdf/1412.6980v7.pdf
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize initial 2nd moment vector)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
```
t <- t + 1
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- beta2 * v_{t-1} + (1 - beta2) * g * g
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
```
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2st moment estimates.
epsilon: A small constant for numerical stability.
use_locking: If True use locks for update operation.s
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
# Variables to accumulate the powers of the beta parameters.
# Created in _create_slots when we know the variables to optimize.
self._beta1_power = None
self._beta2_power = None
# Created in SparseApply if needed.
self._updated_lr = None
def _get_beta_accumulators(self):
return self._beta1_power, self._beta2_power
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable.
if self._beta1_power is None:
with ops.device(var_list[0].device):
self._beta1_power = variables.Variable(self._beta1, name="beta1_power")
self._beta2_power = variables.Variable(self._beta2, name="beta2_power")
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.apply_adam(
var, m, v, self._beta1_power, self._beta2_power,
self._lr_t, self._beta1_t, self._beta2_t,
self._epsilon_t, grad, use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
lr = (self._lr_t *
math_ops.sqrt(1 - self._beta2_power)
/ (1 - self._beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad.values * (1 - self._beta1_t)
m_t = state_ops.assign(m, m * self._beta1_t,
use_locking=self._use_locking)
m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
use_locking=self._use_locking)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + self._epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
with ops.device(self._beta1_power.device):
update_beta1 = self._beta1_power.assign(
self._beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(
self._beta2_power * self._beta2_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
| {
"repo_name": "brodyh/tensorflow",
"path": "tensorflow/python/training/adam.py",
"copies": "5",
"size": "5747",
"license": "apache-2.0",
"hash": -2943852589925192000,
"line_mean": 38.3630136986,
"line_max": 79,
"alpha_frac": 0.6293718462,
"autogenerated": false,
"ratio": 3.3786008230452675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024536279373371384,
"num_lines": 146
} |
"""Adam for TensorFlow."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
@@__init__
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
Implementation is based on: http://arxiv.org/pdf/1412.6980v7.pdf
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize initial 2nd moment vector)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
```
t <- t + 1
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- beta2 * v_{t-1} + (1 - beta2) * g * g
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
```
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2st moment estimates.
epsilon: A small constant for numerical stability.
use_locking: If True use locks for update operation.s
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
# Variables to accumulate the powers of the beta parameters.
# Created in _create_slots when we know the variables to optimize.
self._beta1_power = None
self._beta2_power = None
# Created in SparseApply if needed.
self._updated_lr = None
def _get_beta_accumulators(self):
return self._beta1_power, self._beta2_power
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable.
if self._beta1_power is None:
with ops.device(var_list[0].device):
self._beta1_power = variables.Variable(self._beta1, name="beta1_power")
self._beta2_power = variables.Variable(self._beta2, name="beta2_power")
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.apply_adam(
var, m, v, self._beta1_power, self._beta2_power,
self._lr_t, self._beta1_t, self._beta2_t,
self._epsilon_t, grad, use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
lr = (self._lr_t *
math_ops.sqrt(1 - self._beta2_power)
/ (1 - self._beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad.values * (1 - self._beta1_t)
m_t = state_ops.assign(m, m * self._beta1_t,
use_locking=self._use_locking)
m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
use_locking=self._use_locking)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + self._epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
with ops.device(self._beta1_power.device):
update_beta1 = self._beta1_power.assign(
self._beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(
self._beta2_power * self._beta2_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
| {
"repo_name": "javierblasco/tensorflow",
"path": "tensorflow/python/training/adam.py",
"copies": "5",
"size": "5637",
"license": "apache-2.0",
"hash": -2820761840088296000,
"line_mean": 38.6971830986,
"line_max": 79,
"alpha_frac": 0.6269292177,
"autogenerated": false,
"ratio": 3.365373134328358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6492302352028357,
"avg_score": null,
"num_lines": null
} |
# a damped, driven pendulum, from Pang, Ch. 4
import math
import numpy
import pylab
class ddpend:
def __init__(self, theta0, omega0, q, b, omega_d):
self.theta0 = theta0 # initial angular displacement
self.omega0 = omega0 # initial angular displacement
self.q = q # damping parameter
self.b = b # forcing amplitude
self.omega_d = omega_d # driving frequency
self.t = None
self.theta = None
self.omega = None
def rhs(self, t, theta, omega):
""" return the RHS (thetadot(t), omegadot(t)) """
thetadot = omega
omegadot = -self.q*omega - math.sin(theta) \
+ self.b*math.cos(self.omega_d*t)
return thetadot, omegadot
def intRK4(self, dt, tmax):
""" integrate the system using 4th-order Runge-Kutta """
# initial condition
t = 0.0
theta = self.theta0
omega = self.omega0
# store the solution
tHist = [t]
thetaHist = [theta]
omegaHist = [omega]
# integrate
while (t < tmax):
thetadot1, omegadot1 = self.rhs(t, theta, omega)
thetadot2, omegadot2 = self.rhs(t+0.5*dt,
theta+0.5*dt*thetadot1,
omega+0.5*dt*omegadot1)
thetadot3, omegadot3 = self.rhs(t+0.5*dt,
theta+0.5*dt*thetadot2,
omega+0.5*dt*omegadot2)
thetadot4, omegadot4 = self.rhs(t+dt,
theta+dt*thetadot3,
omega+dt*omegadot3)
theta += (dt/6.0)*(thetadot1 + 2.0*thetadot2 + 2.0*thetadot3 + thetadot4)
omega += (dt/6.0)*(omegadot1 + 2.0*omegadot2 + 2.0*omegadot3 + omegadot4)
t += dt
tHist.append(t)
thetaHist.append(theta)
omegaHist.append(omega)
self.t = numpy.array(tHist)
self.theta = numpy.array(thetaHist)
self.omega = numpy.array(omegaHist)
def restrictTheta(self):
""" convert theta in place to be restricted to lie between -pi
and pi. This is done in a periodic fashion, with theta' =
theta +/- 2n pi """
# shift everything by pi, then restrict to lie between [0,
# 2pi], then shift back by pi
self.theta += math.pi
n = 0
while (n < len(self.theta)):
self.theta[n] += - 2.0*math.pi*math.floor(self.theta[n]/(2.0*math.pi))
n += 1
self.theta -= math.pi
def powerSpectrum(self):
""" return the power spectrum of theta. For the frequency
component, return it in terms of omega """
# power spectrum
N = len(self.t)
F = (2.0/N)*numpy.fft.rfft(self.theta)
k = numpy.fft.fftfreq(N)[range(0,N/2+1)]
if N % 2 == 0:
k[-1] *= -1
kfreq = 2.0*math.pi*k*N/max(self.t)
return kfreq, F
#-----------------------------------------------------------------------------
# normal (non-damped, non-driven) pendulum
# Note, without damping or driving, all the power should be at the
# natural oscillation period of the pendulum. For a small amplitude,
# with L = g, then the period is T = 2 pi, and the frequency is nu_k =
# 1/(2 pi). We plot things in terms of the angular frequency, omega_k
# = 2 pi nu_k, so all the power will be at omega_k = 1
# For a large amplitude perturbation, the period will be longer, so
# the power will be at an omega_k < 1
q = 0.0
b = 0.0
omega_d = 2./3.
T_d = 2.0*math.pi/omega_d
dt = T_d/200.0
# these conditons give a large amplitude perturbation
#theta0 = 0.0
#omega0 = 2.0
# these conditions give a small amplitude, so the power for the undamped,
# non-driven pendulum should be at omega_k = 1
theta0 = 0.1
omega0 = 0.0
p0 = ddpend(theta0, omega0, q, b, omega_d)
p0.intRK4(dt, 100.0*T_d)
pylab.subplot(211)
pylab.plot(p0.theta, p0.omega)
pylab.xlabel(r"$\theta$")
pylab.ylabel(r"$\omega$")
# power spectrum
omega_k, F = p0.powerSpectrum()
pylab.subplot(212)
pylab.plot(omega_k, numpy.abs(F)**2)
pylab.xlim(0.,2.)
#pylab.ylim(1.e-4,1.0)
ax = pylab.gca()
#ax.set_yscale('log')
pylab.xlabel(r"$\omega_k$")
pylab.ylabel(r"power spectrum")
pylab.tight_layout()
pylab.savefig("pend_nodamping.png")
#-----------------------------------------------------------------------------
# non-chaotic pendulum
q = 0.5
b = 0.9
omega_d = 2./3.
T_d = 2.0*math.pi/omega_d
dt = T_d/200.0
theta0 = 0.0
omega0 = 2.0
p1 = ddpend(theta0, omega0, q, b, omega_d)
p1.intRK4(dt, 100.0*T_d)
pylab.clf()
pylab.subplot(211)
pylab.plot(p1.theta, p1.omega)
pylab.xlabel(r"$\theta$")
pylab.ylabel(r"$\omega$")
# power spectrum
omega_k, F = p1.powerSpectrum()
pylab.subplot(212)
pylab.plot(omega_k, numpy.abs(F)**2)
pylab.plot([omega_d, omega_d], [1.e-10,2.0*max(numpy.abs(F)**2)], ls=":")
pylab.xlim(0.,1.)
pylab.ylim(1.e-4,1.0)
ax = pylab.gca()
ax.set_yscale('log')
pylab.xlabel(r"$\omega_k$")
pylab.ylabel(r"power spectrum")
pylab.tight_layout()
pylab.savefig("pend_q0.5_b0.9_om0.666.png")
#-----------------------------------------------------------------------------
# Chaotic pendulum
q = 0.5
bmin = 0.9
db = 0.05
N = 20
B = numpy.arange(N)*db + bmin
omega_d = 2./3.
T_d = 2.0*math.pi/omega_d
dt = T_d/200.0
theta0 = 0.0
omega0 = 2.0
for b in B:
p2 = ddpend(theta0, omega0, q, b, omega_d)
p2.intRK4(dt, 500.0*T_d)
p2.restrictTheta()
pylab.clf()
pylab.subplot(211)
pylab.plot(p2.theta, p2.omega)
pylab.title(r"$q = %3.2f, \, \omega_d = %4.3f, \, b = %3.2f$" % (q, omega_d, b))
pylab.xlabel(r"$\theta$")
pylab.ylabel(r"$\omega$")
# power spectrum
omega_k, F = p2.powerSpectrum()
pylab.subplot(212)
pylab.plot(omega_k, numpy.abs(F)**2)
pylab.plot([omega_d, omega_d], [1.e-10,2.0*max(numpy.abs(F)**2)], ls=":")
pylab.xlim(0.,6.)
pylab.ylim(1.e-4,1.0)
ax = pylab.gca()
ax.set_yscale('log')
pylab.xlabel(r"$\omega_k$")
pylab.ylabel(r"power spectrum")
pylab.tight_layout()
pylab.savefig("pend_q0.5_b%3.2f_om0.666.png" % (b))
| {
"repo_name": "bt3gl/Numerical-Methods-for-Physics",
"path": "others/FFT/chaotic_pend.py",
"copies": "1",
"size": "6348",
"license": "apache-2.0",
"hash": -5895859036236177000,
"line_mean": 22.2527472527,
"line_max": 85,
"alpha_frac": 0.5411153119,
"autogenerated": false,
"ratio": 2.8517520215633425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8846668640925986,
"avg_score": 0.009239738507471158,
"num_lines": 273
} |
# Adam Petrone
# August, 2014
import os
import sys
import logging
import subprocess
import platform
BOOTSTRAP_VIRTUALENV_PATH = "env"
REQUIREMENTS_FILE = "requirements"
def get_platform():
platform_line = platform.platform().lower()
if "linux" in platform_line:
return "linux"
elif "darwin" in platform_line:
return "macosx"
elif "nt" or "windows" in platform_line:
return "windows"
else:
return "unknown"
def get_virtualenv_path(root_path, name):
# if the system is posix, the virtualenv binaries are placed
# into a "bin" folder. Windows places these into "Scripts"
intermediate_paths = {
"posix": "bin",
"nt": "Scripts"
}
extensions = {
"posix": "",
"nt": ".exe"
}
path = intermediate_paths[os.name]
binary_name = name + extensions[os.name]
return os.path.join(root_path, path, binary_name)
def setup_environment(after_install):
try:
import virtualenv
except:
raise Exception("virtualenv not installed! This is required.")
root_path = os.path.dirname(__file__)
virtualenv_root = os.path.join(root_path, BOOTSTRAP_VIRTUALENV_PATH)
if os.path.exists(virtualenv_root):
logging.info(
"virtualenv already exists at \"%s\". Nothing to do." %
virtualenv_root
)
return virtualenv_root
logging.info("creating virtualenv at \"%s\"" % virtualenv_root)
sys.argv.append("--distribute")
sys.argv.append(virtualenv_root)
virtualenv.after_install = after_install
virtualenv.main()
return virtualenv_root
def install_packages(root_path):
pip = get_virtualenv_path(root_path, "pip")
abs_requirements_path = os.path.abspath(
os.path.join(root_path, os.path.pardir, REQUIREMENTS_FILE)
)
if get_platform() == "macosx":
os.environ["CFLAGS"] = "-Wno-unused-command-line-argument-hard-error-in-future"
command = [pip, "install", "-r", abs_requirements_path]
subprocess.call(command)
def build_docs(root_path):
sphinx_build = get_virtualenv_path(root_path, "sphinx-build")
command = [
sphinx_build,
"-b",
"html",
"docs",
"docs/html"
]
subprocess.call(command)
def post_install(options, root_path):
# after the virtualenv is installed, call the following
#
# install via requirements file
install_packages(root_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
root_path = setup_environment(post_install)
# (this should be moved) build documentation
build_docs(root_path)
| {
"repo_name": "apetrone/gemini",
"path": "tools/bootstrap.py",
"copies": "1",
"size": "2384",
"license": "bsd-2-clause",
"hash": -5418729175523367000,
"line_mean": 22.3725490196,
"line_max": 81,
"alpha_frac": 0.7088926174,
"autogenerated": false,
"ratio": 3.0761290322580646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42850216496580645,
"avg_score": null,
"num_lines": null
} |
#Adam Pikielny
#Graphics: Assets and Looping
from ggame import App, RectangleAsset, ImageAsset, SoundAsset, Sprite, Sound
from ggame import LineStyle, Color
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
green = Color(0x00ff00, 1)
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, green)
bg = Sprite(bg_asset, (0,0))
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
# A ball! This is already in the ggame-tutorials repository
ball_asset = ImageAsset("images/orb-150545_640.png")
ball = Sprite(ball_asset, (0, 0))
# Original image is too big. Scale it to 1/10 its original size
ball.scale = 0.1
ball.y = 200
# custom attributes
ball.dir = 1
ball.go = True
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
def reverse(b):
b.dir *= -1
pop.play()
# Set up function for handling screen refresh
def step():
if ball.go:
ball.x += ball.dir
if ball.x + ball.width > SCREEN_WIDTH or ball.x < 0:
ball.x -= ball.dir
reverse(ball)
# Handle the space key
def spaceKey(event):
ball.go = not ball.go
# Handle the "reverse" key
def reverseKey(event):
reverse(ball)
# Handle the mouse click
def mouseClick(event):
ball.x = event.x
ball.y = event.y
pew1.play()
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
# Set up event handlers for the app
myapp.listenKeyEvent('keydown', 'space', spaceKey)
myapp.listenKeyEvent('keydown', 'r', reverseKey)
myapp.listenMouseEvent('click', mouseClick)
myapp.run(step) | {
"repo_name": "APikielny/Final-Project-Tanks",
"path": "tutorial3.py",
"copies": "2",
"size": "1710",
"license": "mit",
"hash": 386538838710562560,
"line_mean": 23.7971014493,
"line_max": 76,
"alpha_frac": 0.698245614,
"autogenerated": false,
"ratio": 2.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4412531328285714,
"avg_score": null,
"num_lines": null
} |
#adam-use# I THINK: that this is only useful if you have some spec-z's, have already run redsequence.py, and want to examine how well you did getting photo-z's for red-sequence galaxies
# usage: python redsequence [options]
# Identifies and fits the red sequence using apparent magnitude and one color.
# Option of identifying star column and only using objects larger.
def twodhist(xs,ys,fname):
import scipy
xbins = scipy.arange(0,1.5,0.0225)
ybins = scipy.arange(0,1.5,0.0225)
prob_matrix,X,Y = scipy.histogram2d(ys,xs,bins=[xbins,ybins])
prob_matrix = prob_matrix / prob_matrix.max()
import pylab
#X, Y = pylab.meshgrid(zs_copy,zs_copy)
print prob_matrix.shape, X.shape, Y.shape
import pylab
pylab.rcdefaults()
params = {'backend' : 'ps',
'text.usetex' : True,
'ps.usedistiller' : 'xpdf',
'ps.distiller.res' : 6000}
pylab.rcParams.update(params)
fig_size = [8,8]
params = {'axes.labelsize' : 20,
'text.fontsize' : 22,
'legend.fontsize' : 22,
'xtick.labelsize' : 20,
'ytick.labelsize' : 20,
'scatter.s' : 0.1,
'scatter.marker': 'o',
'figure.figsize' : fig_size}
pylab.rcParams.update(params)
pylab.clf()
print prob_matrix.max()
prob_matrix[prob_matrix>1] =1.
#pylab.axes([0.125,0.125,0.95-0.125,0.95-0.125])
#pylab.axes([0.125,0.25,0.95-0.125,0.95-0.25])
pylab.axes([0.125,0.15,0.95-0.125,0.95-0.15])
#pylab.axes([0.225,0.15,0.95-0.225,0.95-0.15])
pylab.axis([0,1.5,0,1.5])
pylab.pcolor(X, Y,-1.*prob_matrix,cmap='gray',alpha=0.9,shading='flat',edgecolors='None')
pylab.axhline(y=1.2,color='black')
pylab.plot(scipy.array([0,2]),scipy.array([0,2]),color='black')
pylab.figtext(0.16,0.89,'HDFN',fontsize=20)
pylab.xlabel('Spectroscopic z')
pylab.ylabel('Photometric z')
#pylab.plot([0,1],[0,1],color='red')
#pylab.xlabel('SpecZ')
#pylab.ylabel('PhotZ')
pylab.savefig(fname) #,figsize=fig_size)
def plot_residuals(cluster, detect_band, base='/nfs/slac/g/ki/ki04/pkelly/photoz/',SPECTRA='CWWSB_capak.list',photoz_code='BPZ'): #,outbase,SPECTRA,type='bpz'):
outbase = base + cluster + '/'
import os
subarudir = os.environ['subdir'] + '/'
photdir = subarudir + cluster + '/PHOTOMETRY_' + detect_band + '_aper/'
mergecat = photdir + cluster + '.matched.tab'
papbase = base + 'papfigs/'
os.system('mkdir -p ' + papbase)
import os, sys, anydbm, time
import lib, scipy, pylab , pyfits
from scipy import arange
print mergecat
t = pyfits.open(mergecat)['STDTAB'].data
zspec = t['z_spec']
if photoz_code=='BPZ':
zphot = t['BPZ_Z_B_data']
odds = t['BPZ_ODDS_data']
NFILT = t['NFILT_data']
mag = t['BPZ_M_0_data']
elif photoz_code=='EAZY':
zphot = t['EAZY_z_p_data']
odds = t['EAZY_odds_data']
NFILT = t['EAZY_nfilt_data']
diff = []
z = []
z_spec = []
for i in range(len(zspec)):
#print line
if NFILT[i]>=4 and odds[i] > 0.9: # and mag[i] > 23:
diff_val = (zphot[i] - zspec[i])/(1 + zspec[i])
diff.append(diff_val)
z.append(zphot[i])
z_spec.append(zspec[i])
if False:
z_phot_array = scipy.array(z)
z_spec_array = scipy.array(z_spec)
upper_lim = 1.0
lower_lim = 0.8
out_zs = (z_phot < upper_lim) * (z_phot > lower_lim)
phots = z_phot_array[out_zs]
specs = z_spec_array[out_zs]
bins = scipy.arange(0,2.,0.01*multiple)
n, bins, patches = pylab.hist(cosmos_zs, bins=bins, histtype='bar')
pylab.bar(x,y_cosmos,width=x[1]-x[0],facecolor='red',linewidth=0, label='COSMOS')
pylab.bar(x,y,width=x[1]-x[0],facecolor='none',edgecolor='black', label='BPZ')
#print results
list = diff[:]
import pylab
params = {'backend' : 'ps',
'text.usetex' : True,
'ps.usedistiller' : 'xpdf',
'ps.distiller.res' : 6000}
pylab.rcParams.update(params)
if cluster == 'HDFN':
fig_size = [8.5,8.5]
else:
fig_size = [8.5,3]
params = {'axes.labelsize' : 20,
'text.fontsize' : 22,
'legend.fontsize' : 22,
'xtick.labelsize' : 20,
'ytick.labelsize' : 20,
'scatter.s' : 0.1,
'scatter.marker': 'o',
'figure.figsize' : fig_size}
pylab.rcParams.update(params)
varps = []
#print diff
if cluster == 'HDFN':
pylab.axes([0.125,0.15,0.95-0.125,0.95-0.15])
else:
pylab.axes([0.125,0.25,0.95-0.125,0.95-0.25])
#pylab.axis([0,1,0,1])
print diff
a, b, varp = pylab.hist(diff,bins=arange(-0.2,0.2,0.015),color='blue',edgecolor='black')
#print a,b,varp
varps.append(varp[0])
diffB = []
for d in diff:
if abs(d) < 0.1:
diffB.append(d)
diff = diffB
list = scipy.array(diff)
mu = list.mean()
sigma = list.std()
#print 'mu', mu
#print 'sigma', sigma
from scipy import stats
pdf_x = arange(-0.2,0.2,0.005)
pdf = scipy.stats.norm.pdf(pdf_x, mu, sigma)
#print 'pdf', pdf
height = scipy.array(a).max()
pylab.plot(pdf_x,3*len(diff)*pdf/pdf.sum(),color='red')
print b,len(diff)*pdf/pdf.sum()
pylab.xlabel(r"(z$_{phot}$ - z$_{spec}$)/(1 + z$_{spec}$)")
pylab.ylabel("Galaxies")
if cluster == 'HDFN':
pylab.figtext(0.76,0.89,'$\mu_{\Delta z}$=%.3f' % mu, fontsize=20)
pylab.figtext(0.76,0.85,'$\sigma_{\Delta z}$=%.3f' % sigma, fontsize=20)
else:
pylab.figtext(0.76,0.82,'$\mu_{\Delta z}$=%.3f' % mu, fontsize=20)
pylab.figtext(0.76,0.73,'$\sigma_{\Delta z}$=%.3f' % sigma, fontsize=20)
#pylab.title(['mu ' + str(mu),'sigma ' + str(sigma)])
os.system('mkdir -p ' + outbase + '/' + SPECTRA)
file = open(outbase + '/' + SPECTRA + '/redshifterrors.html','w')
file.write('<h1>Spectroscopic vs. Photometric Redshifts</h1><br>')
from datetime import datetime
t2 = datetime.now()
file.write('<br><h3>' + t2.strftime("%Y-%m-%d %H:%M:%S") + '</h3><br><br><img src="RedshiftErrors.png"></img>')
file.write('<br><img src="RedshiftScatter01.png"></img>\n')
file.write('<br><img src="RedshiftScatter02.png"></img>\n')
file.close()
if cluster == 'HDFN':
pylab.figtext(0.16,0.89,cluster,fontsize=20)
else:
pylab.figtext(0.16,0.79,cluster,fontsize=20)
pylab.savefig(outbase + '/' + SPECTRA + '/RedshiftErrors.png')
pylab.savefig(papbase + '/' + cluster + 'RedshiftErrors.ps')
pylab.savefig(papbase + '/' + cluster + 'RedshiftErrors.pdf')
print papbase + '/' + cluster + 'RedshiftErrors.pdf'
#save_db(cluster,{'mu':mu,'sigma':sigma})
pylab.clf()
if cluster == 'HDFN':
pylab.axes([0.125,0.15,0.95-0.125,0.95-0.15])
else:
pylab.axes([0.125,0.25,0.95-0.125,0.95-0.25])
pylab.axis([0,2,0,2])
pylab.scatter(z_spec,z,linewidth=0,s=3, marker='o',c='black')
pylab.plot(scipy.array([0,2]),scipy.array([0,2]),color='black')
pylab.ylabel("Photometric z")
pylab.xlabel("Spectroscopic z")
if cluster == 'HDFN':
pylab.figtext(0.16,0.89,cluster,fontsize=20)
else:
pylab.figtext(0.16,0.79,cluster,fontsize=20)
pylab.savefig(outbase + '/' + SPECTRA + '/RedshiftScatter02.png')
pylab.savefig(papbase + '/' + cluster + 'RedshiftScatter02.ps')
pylab.savefig(papbase + '/' + cluster + 'RedshiftScatter02.pdf')
pylab.clf()
if cluster == 'HDFN':
pylab.axes([0.125,0.15,0.95-0.125,0.95-0.15])
else:
pylab.axes([0.125,0.25,0.95-0.125,0.95-0.25])
pylab.axis([0,1.5,0,1.5])
pylab.scatter(z_spec,z,linewidth=0,s=3, marker='o',c='black')
pylab.plot(scipy.array([0,3]),scipy.array([0,3]),color='black')
pylab.ylabel("Photometric z")
pylab.xlabel("Spectroscopic z")
if cluster == 'HDFN':
pylab.figtext(0.16,0.89,cluster,fontsize=20)
else:
pylab.figtext(0.16,0.79,cluster,fontsize=20)
pylab.savefig(outbase + '/' + SPECTRA + '/RedshiftScatter01.png')
pylab.savefig(papbase + '/' + cluster + 'RedshiftScatter01.ps')
pylab.savefig(papbase + '/' + cluster + 'RedshiftScatter01.pdf')
print papbase + '/' + cluster
fname = papbase + '/' + cluster + '2dhist.pdf'
twodhist(z_spec,z,fname )
p = open(papbase + '/' + cluster + '_galaxies','w')
p.write('galaxies ' + str(len(z_spec)) )
p.close()
import MySQLdb
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-sr01')
c = db2.cursor()
commandst = 'update clusters_db set specmatches=' + str(len(z_spec)) + ' where objname="' + cluster + '"'
c.execute(commandst)
#pylab.show()
pylab.clf()
def join_cats(cs,outputfile):
import astropy.io.fits as pyfits
tables = {}
i = 0
cols = []
seqnr = 0
for c in cs:
print c
if len(c) > 1:
TAB = c[1]
c = c[0]
else: TAB = 'STDTAB'
i += 1
print c
tables[str(i)] = pyfits.open(c)
for column in tables[str(i)][TAB].columns:
if column.name == 'SeqNr':
if not seqnr:
seqnr += 1
else:
column.name = column.name + '_' + str(seqnr)
seqnr += 1
cols.append(column)
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hduFIELDS = pyfits.open(cs[1][0])['FIELDS']
hdulist.append(hduFIELDS)
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='STDTAB'
import os
os.system('rm ' + outputfile)
print outputfile
hdulist.writeto(outputfile)
class file_iter:
def __init__(self,name):
self.name = name
self.suffix = 1
self.file = self.name + str(self.suffix)
def next(self):
self.suffix += 1
self.file = self.name + str(self.suffix)
return self.file
def __iter__(self):
self.file = self.name + str(self.suffix)
def run():
from optparse import OptionParser
usage = "usage: python redsequence [options] \n\nIdentifies and fits the red sequence using apparent magnitude and one color.\nOption of identifying star column and only using objects larger.\n"
parser = OptionParser(usage)
parser.add_option("-c", "--cluster",
help="name of cluster (i.e. MACS0717+37)")
parser.add_option("-d", "--detectband",
help="detection band (i.e. W-J-V)",default='W-J-V')
parser.add_option("-p", "--photozcode",
help="photoz code",default='BPZ')
parser.add_option("-s", "--short",
help="short output",action='store_false')
(options, args) = parser.parse_args()
photoz_code = options.photozcode
short = options.short
import os
#os.system('python mk_ldac_spec.py ' + options.cluster + ' ' + options.detectband)
import mk_ldac_spec
found = reload(mk_ldac_spec).run(options.cluster, options.detectband)
print 'found', str(found)
if found:
subarudir = '/nfs/slac/g/ki/ki05/anja/SUBARU/'
import astropy.io.fits as pyfits, os
SPECTRA = 'CWWSB_capak.list'
#SPECTRA = 'CWWSB4.list'
photdir = subarudir + options.cluster + '/PHOTOMETRY_' + options.detectband + '_aper/'
phot_cat = photdir + options.cluster + '.slr.cat' # '.APER1.1.CWWSB_capak.list.all.bpz.tab'
if short and not photoz_code == 'noz':
phot_cat = photdir + options.cluster + '.short.cat' # '.APER1.1.CWWSB_capak.list.all.bpz.tab'
if photoz_code == 'BPZ':
photoz_cat = photdir + options.cluster + '.APER1.1.' + SPECTRA + '.all.bpz.tab'
elif photoz_code == 'EAZY':
photoz_cat = '/tmp/pkelly/OUTPUT/photz.zout.tab'
elif photoz_code == 'noz': photoz_cat = None
print photoz_cat, phot_cat
cat = photdir + options.cluster + '.merge.cat'
import utilities
utilities.run("ldacrentab -i " + phot_cat + " -t OBJECTS STDTAB -o " + phot_cat+'.STDTAB',\
[phot_cat+'.STDTAB'])
print phot_cat
#join_cats([[photoz_cat,'STDTAB'],[phot_cat + '.STDTAB','STDTAB']],cat)
if photoz_code == 'noz':
command = 'cp ' + phot_cat + '.STDTAB ' + cat
elif photoz_code == 'BPZ':
command = 'ldacjoinkey -i ' + phot_cat + '.STDTAB -o ' + cat + ' -p ' + photoz_cat + ' -k BPZ_Z_B BPZ_ODDS BPZ_M_0 NFILT -t STDTAB'
elif photoz_code == 'EAZY':
command = 'ldacjoinkey -i ' + phot_cat + '.STDTAB -o ' + cat + ' -p ' + photoz_cat + ' -k EAZY_z_p EAZY_odds EAZY_nfilt -t STDTAB'
print command
os.system(command)
matchedcat = photdir + options.cluster + '.matched.tab'
p = pyfits.open(cat)
photoz = p['STDTAB'].data
zero_IDs = len(photoz[photoz.field('SeqNr')==0])
if zero_IDs > 0:
print 'Wrong photoz catalog?', cat
print str(zero_IDs) + ' many SeqNr=0'
raise Exception
import utilities
speccat = photdir + options.cluster + 'spec.cat'
from glob import glob
if not glob(speccat):
os.system('cp ' + subarudir + '/' + options.cluster + '/PHOTOMETRY_' + options.detectband + '/' + options.cluster + 'spec.cat ' + speccat)
print speccat
specfile = file_iter(speccat+'spec')
from glob import glob
if not glob(speccat):
print 'NO SPECTRA FILE'
raise Exception
os.system('rm ' + specfile.file[:-1] + '*')
os.system('cp '+ speccat +' '+specfile.file)
utilities.run("ldacrentab -i " + specfile.file + " -t OBJECTS STDTAB FIELDS NULL -o " + specfile.next(),[specfile.file])
utilities.run("ldacrenkey -i " + specfile.file + " -t STDTAB -k Ra ALPHA_J2000 Dec DELTA_J2000 Z z -o " + specfile.next(),[specfile.file])
utilities.run("ldaccalc -i " + specfile.file + " -t STDTAB -c '(Nr);' -k LONG -n SeqNr '' -o " + specfile.next(),[specfile.file] )
print specfile.file
# inputtable = ldac.openObjectFile(cat)
if os.environ['USER'] == 'dapple':
os.chdir('/a/wain001/g.ki.ki02/dapple/pipeline/wtgpipeline/')
print os.environ['USER'], os.system('pwd')
command = "./match_neighbor.sh " + matchedcat + " STDTAB " + specfile.file + " spec " + cat + " data "
else:
os.chdir('/u/ki/pkelly/pipeline/wtgpipeline/')
print os.environ['USER'], os.system('pwd')
os.system('rm /tmp/' + os.environ['USER'] + 'combined.cat')
os.system('rm ' + matchedcat)
command = "/u/ki/pkelly/pipeline/wtgpipeline//match_neighbor.sh " + matchedcat + " STDTAB " + specfile.file + " spec " + cat + " data "
print command
os.system('pwd')
utilities.run(command, [matchedcat])
''' save IDs of matched spectra '''
matched_seqnr = photdir + 'spec_match_id.list'
command = 'ldactoasc -i ' + matchedcat + ' -b -t STDTAB -k SeqNr_data z_spec BPZ_ODDS_data > ' + matched_seqnr
utilities.run(command,[matched_seqnr])
print matchedcat, specfile.file
import astropy.io.fits as pyfits
spectable = pyfits.open(matchedcat)['STDTAB']
#print "looking at "+varname+'-'+filterlist[0]+'_data'
print spectable
print matchedcat
if photoz_code != 'noz':
plot_residuals(options.cluster, options.detectband,photoz_code=photoz_code)
if __name__ == '__main__':
run()
| {
"repo_name": "deapplegate/wtgpipeline",
"path": "plot_rederr.py",
"copies": "1",
"size": "25190",
"license": "mit",
"hash": -1687274840036644400,
"line_mean": 45.3904235727,
"line_max": 309,
"alpha_frac": 0.3617705439,
"autogenerated": false,
"ratio": 4.514336917562724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5376107461462724,
"avg_score": null,
"num_lines": null
} |
#adam-use# I THINK: you run redsequence.py before plot_rederr.py. This code identifies red-sequence galaxies!
# usage: python redsequence [options]
# Identifies and fits the red sequence using apparent magnitude and one color.
# Option of identifying star column and only using objects larger.
import pylab
params_pylab = {'backend' : 'ps',
'text.usetex' : False,
'ps.usedistiller' : 'xpdf',
'ps.distiller.res' : 6000}
pylab.rcParams.update(params_pylab)
fig_size = [5,5]
params_pylab = {'axes.labelsize' : 14,
'text.fontsize' : 14,
'legend.fontsize' : 12,
'xtick.labelsize' : 10,
'ytick.labelsize' : 10,
'scatter.s' : 0.1,
'scatter.marker': 'o',
'figure.figsize' : fig_size}
pylab.rcParams.update(params_pylab)
def sortit(x,y):
if x[0] > y[0]: return -1
else: return 1
def sortit_rev(x,y):
if x[0] > y[0]: return 1
else: return -1
def fit_starcolumn(size, savepng):
import pylab, scipy
boxes = []
coords = []
for increment in [0,0.03]:# ,0.075,0.1]: #1,0.125,0.15,0.175]:
#print size
a,b,varp = pylab.hist(size,bins=scipy.arange(0+increment,2+increment,0.06))
#print a, b
boxes += list(a)
coords += list(b[:-1] + scipy.ones(len(b[:-1]))*(0.03))
tot = scipy.array(boxes).sum()
print tot
all = zip(coords,boxes)
all.sort(sortit_rev)
print all
sum = 0
max = 0
min = 1000000
foundCenter = False
from copy import copy
print all, 'all'
for x,y in all:
print x, y, sum, tot
sum += y
if float(sum)/tot > 0.05:
if y > max and not foundCenter:
max = copy(y)
max_x = copy(x)
print 'max', max
if y/max < 0.98 and not foundCenter:
center = copy(max_x)
print center, 'center'
foundCenter = True
if foundCenter:
print 'min', min, y
if min > y:
min = copy(y)
min_x = copy(x)
print y, min
if y/float(min) > 1.05:
right = copy(min_x)
break
left = center - 1.*abs(right-center)
print center,right, 'center, right'
print len(boxes), len(coords)
pylab.clf()
pylab.scatter(coords,boxes)
pylab.xlim(0,2.5)
pylab.xlabel('SIZE (arcsec)')
pylab.axvline(x=center,ymin=-10,ymax=10)
pylab.axvline(x=left,ymin=-10,ymax=10)
pylab.axvline(x=right,ymin=-10,ymax=10)
pylab.savefig(savepng)
pylab.clf()
return left, right
def fit(colors, c1, c2, m, savepng):
import pylab, scipy
''' essentially fine resolution binning '''
boxes = []
coords = []
for increment in [0,0.025,0.05,0.075,0.1,0.125,0.15,0.175]:
a,b,varp = pylab.hist(colors,bins=scipy.arange(-4+increment,4+increment,0.2))
#print a, b
boxes += list(a)
coords += list(b[:-1] + scipy.ones(len(b[:-1]))*(0.1))
print len(colors), colors, 'len'
tot = scipy.array(boxes).sum()
print tot
solutions = []
for version in ['reverse']: #:,'forward']:
left = -99
center = -99
all = zip(coords,boxes)
if version == 'reverse':
all.sort(sortit)
if version == 'forward':
all.sort(sortit_rev)
print all
pylab.clf()
pylab.scatter(coords,boxes)
#pylab.show()
print 'plotted'
sum = 0
max_y = 0
min = 1000000
foundCenter = False
from copy import copy
print all, 'all'
rev = zip(all[:][1],all[:][0])
a = zip(boxes, coords)
a.sort()
peak = a[-1][1]
foundCenter = False
for x,y in all:
print x, y, sum, tot
print max_y, min, foundCenter, peak
sum += y
#print all[-1][0], all[0][0]
if sum > 0:
if float(tot)/sum > 0.05 and y > 100: #True: # (all[-1][0] < all[0][0] and x < peak ) or (all[-1][0] > all[0][0] and x > peak ): #
if y > max_y and not foundCenter:
max_y = copy(y)
max_x = copy(x)
print 'max', max_y
print y/max_y, (max_y-y)
if y/max_y < 0.98 and (max_y-y) > 15 and not foundCenter:
center = copy(max_x)
print center, 'center', max_y
foundCenter = True
#center = peak
if foundCenter:
print 'min', min, y
if min > y:
min = copy(y)
min_x = copy(x)
print y, min, x
if y/float(min) > 1.04:
left = copy(min_x)
print peak, left, center, 'FOUND ONE'
break
if left != -99:
if left > center:
left = center - max(0.05,abs(center - left))
right = center + max(0.4,1.*abs(left-center))
print center, left, right, peak
print right - peak, peak - left
if True: #right - peak > 0 and peak - left > 0:
solutions.append([center,left,right])
''' pick out the narrower solution '''
if len(solutions) > 1:
if solutions[0][0] - solutions[0][1] < solutions[1][0] - solutions[1][1]:
solution = solutions[0]
else: solution = solutions[1]
else: solution = solutions[0]
center, left, right = solution
print center, left, right
print len(boxes), len(coords)
#print boxes, coords
pylab.clf()
pylab.scatter(coords,boxes)
pylab.xlabel(c1 + ' - ' + c2)
pylab.axvline(x=center,ymin=-10,ymax=10)
pylab.axvline(x=left,ymin=-10,ymax=10)
pylab.axvline(x=right,ymin=-10,ymax=10)
pylab.savefig(savepng)
return left, right
def run():
from optparse import OptionParser
usage = "usage: python redsequence [options] \n\nIdentifies and fits the red sequence using apparent magnitude and one color.\nOption of identifying star column and only using objects larger.\n"
parser = OptionParser(usage)
parser.add_option("-c", "--cluster",
help="name of cluster (i.e. MACS0717+37)")
parser.add_option("-d", "--detectband",
help="detection band (i.e. W-J-V)",default='W-J-V')
parser.add_option("--c1",
help="name of first filter in 'galaxy color' (i.e. MAG_APER1-SUBARU-COADD-1-W-J-V)",default='MAG_APER1-SUBARU-COADD-1-W-J-V')
parser.add_option("--c2",
help="name of second filter in 'galaxy color' (i.e. MAG_APER1-SUBARU-COADD-1-W-C-RC)",default='MAG_APER1-SUBARU-COADD-1-W-C-RC')
parser.add_option("-m",'--m',
help="name of filter to be used as 'galaxy magnitude' (default is '--c2')",default=None)
parser.add_option("-s", "--starcolumn",
help="add to filter out star column",action="store_true",default=False)
parser.add_option('--lm',
help="limiting magnitude applied to 'galaxy magnitude'",default=False)
parser.add_option('-r',"--center_radius",
help="maximum galaxy radius from cluster center (in arcsec) (default=440)",default=660.)
parser.add_option("-l","--location",
help="write output directory",default=None)
parser.add_option("-w","--web",
help="instead write to web (Pat's space)",action="store_true",default=False)
parser.add_option("-z", "--z",
help="see what the photometric redshifts are of redsequence galaxies (requires redshift catalog, obviously)",action='store_true',default=False)
parser.add_option("--cat",
help="name of alternate input catalog (if you don't want to use the default photometry catalog)",default=None)
parser.add_option("--existingcolor",
help="use existing colors of red sequence fit",action="store_true",default=False)
parser.add_option("-e","--existing",
help="use existing red sequence fit",action="store_true",default=False)
(options, args) = parser.parse_args()
if options.m is None:
options.m = options.c2
if options.location is not None and options.web:
print 'Either specify location or web but not both at once'
raise Exception
if options.location is None and options.web is False:
options.location = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + options.cluster + '/PHOTOMETRY_' + options.detectband + '_iso/'
elif options.web:
options.location = '/nfs/slac/g/ki/ki04/pkelly/photoz/' + options.cluster + '/CWWSB_capak.list/'
if options.location[-1] != '/':
options.location = options.location + '/'
print options.location
import os
if options.existingcolor or options.existing:
dir = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + options.cluster + '/LENSING_' + options.detectband + '_' + options.detectband + '_aper/good/'
dict = {}
print 'file', dir + 'redseqfit_2.orig'
redseqfit = open(dir + 'redseqfit_2.orig','r').readlines()
slope = float(redseqfit[1].split('=')[1].split('*')[0])
intercept = float(redseqfit[1][:-1].split('+')[1])
upper_intercept = float(redseqfit[3][:-1].split('+')[1])
lower_intercept = float(redseqfit[4][:-1].split('+')[1])
polycoeffs = [slope, intercept]
std = (upper_intercept - intercept) / 1.2
info = open(dir + 'redseq_all.params','r').readlines()
print info, dir + 'redseq_all.params'
for l in info:
if len(l.split(':')) > 1:
key, value = l[:-1].split(': ')
dict[key] = value
print dict
#options.center_radius = dict['radcut']
def prefix(filt):
if filt is 'g' or filt is 'r' or filt is 'u':
return 'MAG_APER1-MEGAPRIME-COADD-1-' + filt
else:
return 'MAG_APER1-SUBARU-COADD-1-' + filt
dict['slope'] = slope
dict['intercept'] = intercept
dict['lower_intercept'] = lower_intercept
dict['upper_intercept'] = upper_intercept
if options.existing:
options.m = prefix(dict['xmag'])
options.c1 = prefix(dict['greenmag'])
options.c2 = prefix(dict['redmag'])
options.lm = dict['magcut2']
print 'finished'
elif options.existingcolor:
options.c1 = prefix(dict['greenmag'])
options.c2 = prefix(dict['redmag'])
cluster = options.cluster
c1 = options.c1
c2 = options.c2
m = options.m
if options.z:
import astropy, astropy.io.fits as pyfits
cat = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.APER1.1.CWWSB_capak.list.all.bpz.tab'
p = pyfits.open(cat)
photoz = p['STDTAB'].data
zero_IDs = len(photoz[photoz.field('SeqNr')==0])
if zero_IDs > 0:
print 'Wrong photoz catalog?', cat
print str(zero_IDs) + ' many SeqNr=0'
raise Exception
print cat
if options.cat is None: #not hasattr(options,'cat'):
input_mags = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.slr.alter.cat'
else: input_mags = options.cat
import astropy, astropy.io.fits as pyfits, os, sys, pylab, do_multiple_photoz, commands, re, math, scipy
from copy import copy
print 'input magnitude catalog:', input_mags, options.cat, hasattr(options,'cat')
filterlist = do_multiple_photoz.get_filters(input_mags,'OBJECTS')
#print filterlist
print input_mags
w = pyfits.open(input_mags)
mags = w['OBJECTS'].data
#print mags.field('Xpos')
mask = mags.field(c1) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field(c2) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field(m) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field('Flag') == 0
if options.z: photoz_star = photoz[mask]
mags_star = mags[mask]
#mask = mags_star.field(c2) < 23
''' get cluster redshift '''
command = 'grep ' + cluster + ' ' + '/nfs/slac/g/ki/ki05/anja/SUBARU/' + '/clusters.redshifts '
print command
cluster_info = commands.getoutput(command)
cluster_redshift = float(re.split('\s+',cluster_info)[1])
print cluster_redshift
if options.lm:
mag_cut = float(options.lm)
else:
''' compute faint magnitude cutoff '''
if m[-6:] == 'W-C-RC' or m[-1] == 'r':
mag_cut = 21.5 + 2.5*math.log10((cluster_redshift/0.19)**2.)
if m[-5:] == 'W-J-V' or m[-5:] == 'W-J-B' or m[-1] == 'g':
mag_cut = 22. + 2.5*math.log10((cluster_redshift/0.19)**2.)
if not options.center_radius:
''' compute radial size of cut '''
options.center_radius = 400 / (z/0.4)
options.center_radius = 400
print mag_cut, options.lm
if True: #not options.existing:
''' identify star column (optional) '''
if options.starcolumn:
savepng = '/nfs/slac/g/ki/ki04/pkelly/photoz/' + cluster + '/seeing.png'
left, right = fit_starcolumn(mags_star[mask].field('FLUX_RADIUS')*0.2,savepng)
savepng = options.location + 'column.png'
pylab.axvline(x=left,ymin=-10,ymax=100)
pylab.axvline(x=right,ymin=-10,ymax=100)
pylab.scatter(mags.field('FLUX_RADIUS')*0.2,mags.field(m),s=0.25)
pylab.xlim(0,2.5)
pylab.xlabel('SIZE (arcsec)')
pylab.ylabel(m)
pylab.savefig(savepng)
pylab.clf()
mask = mags.field('FLUX_RADIUS')*0.2 > right
if options.z: photoz = photoz[mask]
mags = mags[mask]
''' select galaxies near center of field '''
#options.center_radius=240
mask = ((mags.field('Xpos') - 5000.*scipy.ones(len(mags)))**2. + (mags.field('Ypos') - 5000.*scipy.ones(len(mags)))**2.)**0.5 * 0.2 < float(options.center_radius)
if options.z: photoz = photoz[mask]
mags = mags[mask]
print len(mags)
if options.z: print len(photoz)
from copy import copy
mags_mask = copy(mags)
x = copy(mags.field(m))
y = copy(mags.field(c1)-mags.field(c2))
print mags.field(c1), mags.field(c2), c1, c2
mask = x < mag_cut
print mag_cut
#print x, y
savedir= options.location
os.system('mkdir -p ' + savedir)
savepng = options.location + 'redselection.png'
print options.center_radius, len(y[mask])
left, right = fit(y[mask],c1,c2,m,savepng)
if options.z:
mask = photoz.field('NFILT') > 3
reg_mags = mags_mask[mask]
reg_photoz = photoz[mask]
mask = photoz.field('BPZ_ODDS') > 0.95
reg_mags = mags_mask[mask]
reg_photoz = photoz[mask]
print len(reg_photoz)
print 'making reg'
reg = open('all.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(reg_mags.field('Xpos'))):
reg.write('circle('+str(reg_mags.field('Xpos')[i]) + ',' + str(reg_mags.field('Ypos')[i]) + ',' + str(5) + ') # color=red width=2 text={' + str(reg_photoz.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
print 'finished reg'
mask = x < mag_cut
if options.z:
photoz2 = photoz[mask]
mags_mask = mags_mask[mask]
x2 = x[mask]
y2 = y[mask]
#print sorted(x2)
print savepng
print left, right
if not options.existing:
mask = y2 > left
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
x2 = x2[mask]
y2 = y2[mask]
mask = y2 < right
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
x2 = x2[mask]
y2 = y2[mask]
if not options.existing: polycoeffs = scipy.polyfit(x2,y2,1)
print polycoeffs
yfit = scipy.polyval(polycoeffs, x2)
print x2, yfit
if not options.existing: std = scipy.std(abs(yfit - y2))
print std
mask = abs(yfit - y2) < std*2.5
if options.z: photoz3 = photoz2[mask]
x3 = x2[mask]
y3 = y2[mask]
if not options.existing: polycoeffs = scipy.polyfit(x3,y3,1)
print polycoeffs
yfit = scipy.polyval(polycoeffs, sorted(x2))
print x2, yfit
if not options.existing: std = scipy.std(abs(yfit - y2))
print std
std_fac = 1.2
mask = abs(yfit - y2) < std*std_fac
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
print photoz2.field('SeqNr')
print photoz2.field('BPZ_Z_B')
fred = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.redseq'
f = open(fred,'w')
for id in photoz2.field('SeqNr'):
f.write(str(id) + '\n')
f.close()
reg = open('regseq.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags_mask.field('Xpos'))):
reg.write('circle('+str(mags_mask.field('Xpos')[i]) + ',' + str(mags_mask.field('Ypos')[i]) + ',' + str(5) + ') # color=green width=2 text={' + str(photoz2.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.clf()
savepng = options.location + 'redhistogram.png'
savepdf = options.location + 'redhistogram.pdf'
if options.z:
lower_lim = cluster_redshift - 0.3
if lower_lim < 0: lower_lim = 0.0001
print photoz2.field('BPZ_Z_B')
a,b,varp = pylab.hist(photoz2.field('BPZ_Z_B'),bins=scipy.arange(lower_lim,cluster_redshift+0.3,0.01),color='red')
pylab.axvline(x=cluster_redshift,ymin=0,ymax=100,color='blue',linewidth=3)
pylab.xlabel('Redshift')
pylab.ylabel('Galaxies')
pylab.savefig(savepng)
pylab.savefig(savepdf)
reg = open('reg.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags_mask.field('Xpos'))):
reg.write('circle('+str(mags_mask.field('Xpos')[i]) + ',' + str(mags_mask.field('Ypos')[i]) + ',' + str(5) + ') # color=blue width=2 text={' + str(photoz2.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.clf()
pylab.plot(sorted(x2),yfit,'b-')
pylab.plot(sorted(x2),yfit+scipy.ones(len(yfit))*std*std_fac,'b-')
pylab.plot(sorted(x2),yfit-scipy.ones(len(yfit))*std*std_fac,'b-')
pylab.scatter(x,y,color='red',s=0.5)
pylab.axhline(y=left,xmin=-10,xmax=100)
pylab.axvline(x=mag_cut,ymin=-10,ymax=10)
pylab.axhline(y=right,xmin=-10,xmax=100)
pylab.xlabel(m)
pylab.ylabel(c1 + ' - ' + c2)
if options.z:
mask = abs(photoz.field('BPZ_Z_B') - cluster_redshift) < 0.04
mags = mags[mask]
photoz = photoz[mask]
mask = photoz.field('NFILT') > 4
mags = mags[mask]
photoz = photoz[mask]
print 'priormag'
print photoz.field('priormag')
print 'nfilt'
print photoz.field('NFILT')
import pylab
x = mags.field(m)
y = mags.field(c1)-mags.field(c2)
pylab.scatter(x,y,s=0.5)
reg = open('reg.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags.field('Xpos'))):
reg.write('circle('+str(mags.field('Xpos')[i]) + ',' + str(mags.field('Ypos')[i]) + ',' + str(5) + ') # color=red width=2 text={' + str(photoz.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.xlim(sorted(x)[0],sorted(x)[-2])
span = (sorted(y)[-2]-sorted(y)[2])/2
if span > 1: span=1
median = scipy.median(scipy.array(y))
pylab.ylim(median -2, median + 2)
savepng = options.location + 'cmd.png'
pylab.savefig(savepng)
pylab.clf()
pylab.scatter(mags.field('Xpos'),mags.field('Ypos'), s=0.02)
pylab.xlim([0,10000])
pylab.ylim([0,10000])
pylab.xlabel('X Pixel')
pylab.ylabel('Y Pixel')
savepng = options.location + '/positions.png'
print savepng
pylab.savefig(savepng)
s = "\nBest fit: y = "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]) + '\n'
s += "\nCut: y < "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]+std_fac*std) + '\n'
s += "Cut: y > "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]-std_fac*std ) + '\n'
s += "x < "+str(mag_cut) + '\n'
s += 'x = ' + m + '\n'
s += 'y = ' + c1 + ' - ' + c2 + '\n'
print s
f = open(options.location + '/redseqfit','w')
f.write(s)
f.close()
from datetime import datetime
t2 = datetime.now()
print options.location
f = open(options.location + '/redsequence.html','w')
f.write('<html><tr><td>' + t2.strftime("%Y-%m-%d %H:%M:%S") + '</td></tr><tr><td><h2>Photometric Redshifts of the Red Sequence</h2></td></tr><tr><td><img src="redhistogram.png"></img></td></tr><tr><td><img src="seeing.png"></img></td></tr><<tr><td><img src="column.png"></img></td></tr><tr><td><img src="redselection.png"></img></td></tr><tr><td><img src="cmd.png"></img></td></tr><tr><td><img src="positions.png"></img></td></tr><tr><td>' + s.replace('\n','<br>') + '</td></tr> </html>')
print 'Wrote output to:', options.location
print 'Best fit parameters in:', options.location + '/redseqfit'
if __name__ == '__main__':
run()
| {
"repo_name": "deapplegate/wtgpipeline",
"path": "redsequence.py",
"copies": "1",
"size": "24118",
"license": "mit",
"hash": 7445156299636904000,
"line_mean": 33.1614730878,
"line_max": 499,
"alpha_frac": 0.5111120325,
"autogenerated": false,
"ratio": 3.4243930143404797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.443550504684048,
"avg_score": null,
"num_lines": null
} |
""" AdamW Optimizer
Impl copied from PyTorch master
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/optim/adamw.py",
"copies": "1",
"size": "4965",
"license": "apache-2.0",
"hash": 4645784281835121000,
"line_mean": 41.4358974359,
"line_max": 116,
"alpha_frac": 0.5605236657,
"autogenerated": false,
"ratio": 4.089785831960461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150309497660461,
"avg_score": null,
"num_lines": null
} |
"""AdaNet metrics objects and functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
from absl import logging
from adanet import tf_compat
import six
import tensorflow.compat.v2 as tf
def _call_eval_metrics(eval_metrics):
if not eval_metrics:
return {}
fn, args = eval_metrics
if isinstance(args, dict):
return fn(**args)
else:
return fn(*args)
class _EvalMetricsStore(object):
"""Stores and manipulate eval_metric tuples."""
def __init__(self):
self._metric_fns = []
self._args = []
def add_eval_metrics(self, metric_fn, args):
"""Adds an eval_metrics tuple to the internal store."""
self._metric_fns.append(metric_fn)
self._args.append(args)
@property
def metric_fns(self):
return self._metric_fns
def flatten_args(self):
"""Flattens the eval_metrics arguments to a list."""
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
return nest.flatten(self._args)
def pack_args(self, args):
"""Packs the given list of arguments into the internal args structure."""
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
return nest.pack_sequence_as(self._args, args)
class _SubnetworkMetrics(object):
"""A object which creates evaluation metrics for Subnetworks."""
def __init__(self, use_tpu=False):
"""Creates a _SubnetworkMetrics.
Args:
use_tpu: Whether to use TPU-specific variable sharing logic. This ensures
that eval metrics created on TPU can be written to disk on the host CPU.
Returns:
A `_SubnetworkMetrics` instance.
"""
self._use_tpu = use_tpu
self._eval_metrics_store = _EvalMetricsStore()
def create_eval_metrics(self, features, labels, estimator_spec, metric_fn):
"""Creates evaluation metrics from the given arguments.
Args:
features: Input `dict` of `Tensor` objects.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head).
estimator_spec: The `EstimatorSpec` created by a `Head` instance.
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `estimator`s existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
"""
# If estimator_spec is not a TPUEstimatorSpec we create dummy metric_fn
# and args.
if isinstance(estimator_spec, tf.estimator.EstimatorSpec):
spec_fn, spec_args = lambda: estimator_spec.eval_metric_ops, []
else:
spec_fn, spec_args = estimator_spec.eval_metrics
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(spec_fn), spec_args)
loss_fn = lambda loss: {"loss": tf_compat.v1.metrics.mean(loss)}
loss_fn_args = [tf.reshape(estimator_spec.loss, [1])]
if not self._use_tpu:
loss_ops = _call_eval_metrics((loss_fn, loss_fn_args))
loss_fn, loss_fn_args = lambda: loss_ops, []
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(loss_fn), loss_fn_args)
# NOTE: the user supplied metrics_fn must be added last. This is because we
# want user metrics to override AdaNet's metrics.
if metric_fn:
metric_fn_args = {}
# Calling low level getargs for py_2_and_3 compatibility.
argspec = inspect.getargs(metric_fn.__code__).args
if "features" in argspec:
metric_fn_args["features"] = features
if "labels" in argspec:
metric_fn_args["labels"] = labels
if "predictions" in argspec:
metric_fn_args["predictions"] = estimator_spec.predictions
if not self._use_tpu:
metric_fn_ops = _call_eval_metrics((metric_fn, metric_fn_args))
metric_fn, metric_fn_args = lambda: metric_fn_ops, []
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(metric_fn), metric_fn_args)
def _templatize_metric_fn(self, metric_fn):
"""Wraps the given metric_fn with a template so it's Variables are shared.
Hooks on TPU cannot depend on any graph Tensors. Instead the eval metrics
returned by metric_fn are stored in Variables. These variables are later
read from the evaluation hooks which run on the host CPU.
Args:
metric_fn: The function to wrap with a template.
Returns:
The original metric_fn wrapped with a template function.
"""
def _metric_fn(*args, **kwargs):
"""The wrapping function to be returned."""
# We can only be passed in either a dict or a list of tensors.
args = args if args else kwargs
metrics = _call_eval_metrics((metric_fn, args))
if not self._use_tpu:
return metrics
logging.log_first_n(logging.INFO,
"Writing eval metrics to variables for TPU", 1)
wrapped_metrics = {}
for i, key in enumerate(sorted(metrics)):
tensor, op = tf_compat.metric_op(metrics[key])
# key cannot be in var name since it may contain illegal chars.
var = tf_compat.v1.get_variable(
"metric_{}".format(i),
shape=tensor.shape,
dtype=tensor.dtype,
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
if isinstance(op, tf.Operation) or op.shape != tensor.shape:
with tf.control_dependencies([op]):
op = var.assign(tensor)
metric = (var, var.assign(op))
wrapped_metrics[key] = metric
return wrapped_metrics
return tf_compat.v1.make_template("metric_fn_template", _metric_fn)
def eval_metrics_tuple(self):
"""Returns tuple of (metric_fn, tensors) which can be executed on TPU."""
if not self._eval_metrics_store.metric_fns:
return None
def _metric_fn(*args):
metric_fns = self._eval_metrics_store.metric_fns
metric_fn_args = self._eval_metrics_store.pack_args(args)
eval_metric_ops = {}
for metric_fn, args in zip(metric_fns, metric_fn_args):
eval_metric_ops.update(_call_eval_metrics((metric_fn, args)))
return eval_metric_ops
return _metric_fn, self._eval_metrics_store.flatten_args()
def eval_metrics_ops(self):
"""Returns the eval_metrics_ops."""
return _call_eval_metrics(self.eval_metrics_tuple())
class _EnsembleMetrics(_SubnetworkMetrics):
"""A object which creates evaluation metrics for Ensembles."""
def create_eval_metrics(self, features, labels, estimator_spec, metric_fn,
architecture):
"""Overrides parent's method to also add the ensemble's architecture."""
super(_EnsembleMetrics, self).create_eval_metrics(features, labels,
estimator_spec, metric_fn)
self._eval_metrics_store.add_eval_metrics(
self._architecture_as_metric(architecture), [])
def _architecture_as_metric(self, architecture):
"""Returns a representation of an ensemble's architecture as a tf.metric."""
def _architecture_metric_fn():
"""Manually creates the tf.metric with a serialized tf.Summary proto."""
# TODO: Should architecture.subnetworks be sorted by iteration
# number first? Or perhaps, to make this more general, to have one line
# for each iteration, with "|" as a delimiter if there are multiple
# subnetworks in one iteration? Something like:
# 0 linear
# 1 dnn_width_32_depth_1 | dnn_width_64_depth_1
# 2
# 3 dnn_with_32_depth_2
# Also consider adding ensemble candidate's name, though that is already
# included in the ensemble name.
architecture_ = " | ".join([name for _, name in architecture.subnetworks])
architecture_ = "| {} |".format(architecture_)
summary_metadata = tf_compat.v1.SummaryMetadata(
plugin_data=tf_compat.v1.SummaryMetadata.PluginData(
plugin_name="text"))
summary_proto = tf_compat.v1.summary.Summary()
summary_proto.value.add(
metadata=summary_metadata,
tag="architecture/adanet",
tensor=tf_compat.v1.make_tensor_proto(architecture_, dtype=tf.string))
architecture_summary = tf.convert_to_tensor(
value=summary_proto.SerializeToString(), name="architecture")
return {
"architecture/adanet/ensembles": (architecture_summary, tf.no_op())
}
if not self._use_tpu:
ops = _architecture_metric_fn()
return lambda: ops
else:
return _architecture_metric_fn
class _IterationMetrics(object):
"""A object which creates evaluation metrics for an Iteration."""
def __init__(self,
iteration_number,
candidates,
subnetwork_specs,
use_tpu=False,
replay_indices_for_all=None):
self._iteration_number = iteration_number
self._candidates = candidates
self._subnetwork_specs = subnetwork_specs
self._use_tpu = use_tpu
self._replay_indices_for_all = replay_indices_for_all
self._candidates_eval_metrics_store = self._build_eval_metrics_store(
[candidate.ensemble_spec for candidate in self._candidates])
self._subnetworks_eval_metrics_store = self._build_eval_metrics_store(
self._subnetwork_specs)
self._best_eval_metrics_tuple = None
def _build_eval_metrics_store(self, specs):
"""Creates an _EvalMetricsStore from Subnetwork or Ensemble specs."""
store = _EvalMetricsStore()
for spec in specs:
if not spec.eval_metrics or not spec.eval_metrics.eval_metrics_tuple():
continue
metric_fn, args = spec.eval_metrics.eval_metrics_tuple()
store.add_eval_metrics(metric_fn, args)
return store
def best_eval_metric_ops(self, best_candidate_index, mode):
"""Returns best ensemble's metrics."""
return _call_eval_metrics(
self.best_eval_metrics_tuple(best_candidate_index, mode))
def best_eval_metrics_tuple(self, best_candidate_index, mode):
"""Returns (metric_fn, tensors) which computes the best ensemble's metrics.
Specifically, when metric_fn(tensors) is called, it separates the metric ops
by metric name. All candidates are not required to have the same metrics.
When they all share a given metric, an additional metric is added which
represents that of the best candidate.
Args:
best_candidate_index: `Tensor` index of the best candidate in the list.
mode: Defines whether this is training, evaluation or inference. Eval
metrics are only defined during evaluation. See `ModeKeys`.
Returns:
Dict of metric results keyed by name. The values of the dict are the
results of calling a metric function.
"""
if mode != tf.estimator.ModeKeys.EVAL:
return None
candidate_args = self._candidates_eval_metrics_store.flatten_args()
subnetwork_args = self._subnetworks_eval_metrics_store.flatten_args()
args = candidate_args + subnetwork_args
args.append(tf.reshape(best_candidate_index, [1]))
def _replay_eval_metrics(best_candidate_idx, eval_metric_ops):
"""Saves replay indices as eval metrics."""
# _replay_indices_for_all is a dict: {candidate: [list of replay_indices]}
# We are finding the max length replay list.
pad_value = max([len(v) for _, v in self._replay_indices_for_all.items()])
# Creating a matrix of (#candidate) times (max length replay indices).
# Entry i,j is the jth replay index of the ith candidate (ensemble).
replay_indices_as_tensor = tf.constant([
value + [-1] * (pad_value - len(value))
for _, value in self._replay_indices_for_all.items()
])
# Passing the right entries (entries of the best candidate). Note: we use
# TensorShape.as_list here so the code works on both TF 1.0 and 2.0.
for iteration in range(replay_indices_as_tensor.get_shape().as_list()[1]):
index_t = replay_indices_as_tensor[best_candidate_idx, iteration]
eval_metric_ops["best_ensemble_index_{}".format(iteration)] = (index_t,
index_t)
def _best_eval_metrics_fn(*args):
"""Returns the best eval metrics."""
with tf_compat.v1.variable_scope("best_eval_metrics"):
args = list(args)
idx, idx_update_op = tf_compat.v1.metrics.mean(args.pop())
idx = tf.cast(idx, tf.int32)
metric_fns = self._candidates_eval_metrics_store.metric_fns
metric_fn_args = self._candidates_eval_metrics_store.pack_args(
args[:len(candidate_args)])
candidate_grouped_metrics = self._group_metric_ops(
metric_fns, metric_fn_args)
metric_fns = self._subnetworks_eval_metrics_store.metric_fns
metric_fn_args = self._subnetworks_eval_metrics_store.pack_args(
args[(len(args) - len(subnetwork_args)):])
subnetwork_grouped_metrics = self._group_metric_ops(
metric_fns, metric_fn_args)
eval_metric_ops = {}
for metric_name in sorted(candidate_grouped_metrics):
metric_ops = candidate_grouped_metrics[metric_name]
if len(metric_ops) != len(self._candidates):
continue
if metric_name == "loss":
continue
values, ops = list(six.moves.zip(*metric_ops))
best_value = tf.stack(values)[idx]
# All tensors in this function have been outfed from the TPU, so we
# must update them manually, otherwise the TPU will hang indefinitely
# for the value of idx to update.
ops = list(ops)
ops.append(idx_update_op)
# Bundle subnetwork eval metric ops and ensemble "loss"" ops (which
# is a restricted Estimator keyword) into other metric ops so that
# they are computed.
ensemble_loss_ops = candidate_grouped_metrics.get("loss", tf.no_op())
all_ops = tf.group(ops, ensemble_loss_ops, subnetwork_grouped_metrics)
eval_metric_ops[metric_name] = (best_value, all_ops)
iteration_number = tf.constant(self._iteration_number)
eval_metric_ops["iteration"] = (iteration_number, iteration_number)
if self._replay_indices_for_all:
_replay_eval_metrics(idx, eval_metric_ops)
# tf.estimator.Estimator does not allow a "loss" key to be present in
# its eval_metrics.
assert "loss" not in eval_metric_ops
return eval_metric_ops
if not self._use_tpu:
if not self._best_eval_metrics_tuple:
best_ops = _call_eval_metrics((_best_eval_metrics_fn, args))
self._best_eval_metrics_tuple = lambda: best_ops, []
return self._best_eval_metrics_tuple
return _best_eval_metrics_fn, args
def _group_metric_ops(self, metric_fns, metric_fn_args):
"""Runs the metric_fns and groups the returned metric ops by name.
Args:
metric_fns: The eval_metrics functions to run.
metric_fn_args: The eval_metrics function arguments.
Returns:
The metric ops grouped by name.
"""
grouped_metrics = collections.defaultdict(list)
for metric_fn, args in zip(metric_fns, metric_fn_args):
eval_metric_ops = _call_eval_metrics((metric_fn, args))
for metric_name in sorted(eval_metric_ops):
metric_op = tf_compat.metric_op(eval_metric_ops[metric_name])
grouped_metrics[metric_name].append(metric_op)
return grouped_metrics
| {
"repo_name": "tensorflow/adanet",
"path": "adanet/core/eval_metrics.py",
"copies": "1",
"size": "16930",
"license": "apache-2.0",
"hash": -4162207008953322500,
"line_mean": 38.6487119438,
"line_max": 109,
"alpha_frac": 0.6600708801,
"autogenerated": false,
"ratio": 3.8955361251725726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5055607005272573,
"avg_score": null,
"num_lines": null
} |
# adaped from https://www.djangosnippets.org/snippets/1376/
from os.path import dirname, join, abspath, isdir
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.template import TemplateDoesNotExist
from django.template.loaders.filesystem import Loader
class NameSpacedLoader(Loader):
def _get_template_vars(self, template_name):
app_name, template_name = template_name.split(":", 1)
try:
template_dir = abspath(join(apps.get_app_config(app_name).path, 'templates'))
except ImproperlyConfigured:
raise TemplateDoesNotExist()
return template_name, template_dir
def load_template_from_app(self, template_name, template_dirs=None):
"""
Template loader that only serves templates from specific app's template directory.
Works for template_names in format app_label:some/template/name.html
"""
if ":" not in template_name:
raise TemplateDoesNotExist()
template_name, template_dir = self._get_template_vars(template_name)
if not isdir(template_dir):
raise TemplateDoesNotExist()
return super().load_template_source(template_name, template_dirs=[template_dir])
def load_template_source(self, template_name, template_dirs=None):
return self.load_template_from_app(template_name)
| {
"repo_name": "ojousima/asylum",
"path": "project/asylum/apptemplateloader.py",
"copies": "1",
"size": "1422",
"license": "mit",
"hash": 9164304903014735000,
"line_mean": 37.4324324324,
"line_max": 90,
"alpha_frac": 0.6842475387,
"autogenerated": false,
"ratio": 4.375384615384616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5559632154084615,
"avg_score": null,
"num_lines": null
} |
# Adaptado por Artur Augusto - arturaugusto@gmail.com
# This is a near-verbatim translation of the example program
# C:\Program Files\National Instruments\NI-DAQ\Examples\DAQmx ANSI C\Analog In\Measure Voltage\Acq-Int Clk\Acq-IntClk.c
import ctypes
import numpy
import time
class DAQmx:
##############################
# Setup some typedefs and constants
# to correspond with values in
# C:\Program Files\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h
##############################
nidaq = ctypes.windll.nicaiu # load the DLL
physicalChannel = ""
# the constants
DAQmx_Val_Volts = 10348
DAQmx_Val_Rising = 10280
DAQmx_Val_FiniteSamps = 10178
int32 = ctypes.c_long
DAQmx_Val_Cfg_Default = int32(-1)
DAQmx_Val_RSE = 10083
DAQmx_Val_NRSE = 10078
DAQmx_Val_Diff = 10106
DAQmx_Val_PseudoDiff = 12529
terminalConfig = DAQmx_Val_Cfg_Default
def __init__(self, physicalChannel):
DAQmx.physicalChannel = physicalChannel
def create_task(self):
return self.DAQmxTask()
class DAQmxTask:
def __init__(self):
self.nidaq = DAQmx.nidaq
self.int32 = ctypes.c_long
self.uInt32 = ctypes.c_ulong
self.uInt64 = ctypes.c_ulonglong
self.float64 = ctypes.c_double
self.TaskHandle = self.uInt32
# initialize variables
self.taskHandle = self.TaskHandle(0)
self.CHK(self.nidaq.DAQmxCreateTask("",ctypes.byref(self.taskHandle)))
# list of channels
self.__channels = []
def CHK(self, err):
"""a simple error checking routine"""
if err < 0:
buf_size = 100
buf = ctypes.create_string_buffer('\000' * buf_size)
self.nidaq.DAQmxGetErrorString(err,ctypes.byref(buf),buf_size)
raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))
def add_channel(self, channel):
self.__channels.append(channel)
self.__create_voltage_channel(channel)
self.n_ch = len(self.__channels)
def config_sampling(self, samples, rate):
self.num_samples = samples
self.rate = rate
self.CHK(self.nidaq.DAQmxCfgSampClkTiming(self.taskHandle,"",self.float64(self.rate),
DAQmx.DAQmx_Val_Rising,DAQmx.DAQmx_Val_FiniteSamps,
self.uInt64(self.num_samples*self.n_ch)));
def __create_voltage_channel(self, channel):
self.CHK(
self.nidaq.DAQmxCreateAIVoltageChan(
self.taskHandle,
DAQmx.physicalChannel + "/ai" + channel,"",
DAQmx.DAQmx_Val_Cfg_Default,
self.float64(-10.0),
self.float64(10.0),
DAQmx.DAQmx_Val_Volts,
None
)
)
def start(self):
self.CHK(self.nidaq.DAQmxStartTask(self.taskHandle))
def end(self):
if self.taskHandle.value != 0:
self.nidaq.DAQmxStopTask(self.taskHandle)
self.nidaq.DAQmxClearTask(self.taskHandle)
def get_volt_samples(self):
self.data = numpy.zeros((self.num_samples*self.n_ch,),dtype=numpy.float64)
read = self.int32()
self.CHK(
self.nidaq.DAQmxReadAnalogF64(
self.taskHandle, #taskHandle TaskHandle
self.num_samples, #numSampsPerChan int32
self.float64(10.0), #timeout float64
0, #fillMode bool32
self.data.ctypes.data, #arraySizeInSamps uInt32
self.num_samples*self.n_ch,
ctypes.byref(read),
None
)
)
return self.data
| {
"repo_name": "arturaugusto/nidaq",
"path": "nidaq.py",
"copies": "1",
"size": "3248",
"license": "mit",
"hash": -7518471657855507000,
"line_mean": 29.3551401869,
"line_max": 119,
"alpha_frac": 0.6761083744,
"autogenerated": false,
"ratio": 2.734006734006734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3910115108406734,
"avg_score": null,
"num_lines": null
} |
"""Adapt an HTTP server."""
import time
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start:
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
on_what = "%s:%s" % (host, port)
else:
on_what = "socket file: %s" % self.bind_addr
if self.running:
self.bus.log("Already serving on %s" % on_what)
return
self.interrupt = None
if not self.httpserver:
raise ValueError("No HTTP server has been created.")
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName("HTTPServer " + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log("Serving on %s" % on_what)
start.priority = 75
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt, exc:
self.bus.log("<Ctrl-C> hit: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
except SystemExit, exc:
self.bus.log("SystemExit raised: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
raise
except:
import sys
self.interrupt = sys.exc_info()[1]
self.bus.log("Error in HTTP server: shutting down",
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, "ready", False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
wait_for_occupied_port(host, port)
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log("HTTP Server %s shut down" % self.httpserver)
else:
self.bus.log("HTTP Server %s already shut down" % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host == '::':
# :: is IN6ADDR_ANY, which should answer on localhost.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise an error if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
import socket
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
raise IOError("Port %s is in use on %s; perhaps the previous "
"httpserver did not shut down properly." %
(repr(port), repr(host)))
except socket.error:
if s:
s.close()
def wait_for_free_port(host, port):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in xrange(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=0.1)
except IOError:
# Give the old server thread time to free the port.
time.sleep(0.1)
else:
return
raise IOError("Port %r not free on %r" % (port, host))
def wait_for_occupied_port(host, port):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in xrange(50):
try:
check_port(host, port)
except IOError:
return
else:
time.sleep(.1)
raise IOError("Port %r not bound on %r" % (port, host))
| {
"repo_name": "Apsistec/vboxweb",
"path": "cherrypy/process/servers.py",
"copies": "13",
"size": "9565",
"license": "mit",
"hash": 5483303350625130000,
"line_mean": 34.9586466165,
"line_max": 85,
"alpha_frac": 0.5766858338,
"autogenerated": false,
"ratio": 4.032462057335581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019616400734299353,
"num_lines": 266
} |
"""Adapt an HTTP server."""
import time
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start:
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
on_what = "%s:%s" % (host, port)
else:
on_what = "socket file: %s" % self.bind_addr
if self.running:
self.bus.log("Already serving on %s" % on_what)
return
self.interrupt = None
if not self.httpserver:
raise ValueError("No HTTP server has been created.")
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName("HTTPServer " + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log("Serving on %s" % on_what)
start.priority = 75
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt, exc:
self.bus.log("<Ctrl-C> hit: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
except SystemExit, exc:
self.bus.log("SystemExit raised: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
raise
except:
import sys
self.interrupt = sys.exc_info()[1]
self.bus.log("Error in HTTP server: shutting down",
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, "ready", False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
wait_for_occupied_port(host, port)
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log("HTTP Server %s shut down" % self.httpserver)
else:
self.bus.log("HTTP Server %s already shut down" % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host == '::':
# :: is IN6ADDR_ANY, which should answer on localhost.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise an error if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
import socket
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
raise IOError("Port %s is in use on %s; perhaps the previous "
"httpserver did not shut down properly." %
(repr(port), repr(host)))
except socket.error:
if s:
s.close()
def wait_for_free_port(host, port):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in xrange(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=0.1)
except IOError:
# Give the old server thread time to free the port.
time.sleep(0.1)
else:
return
raise IOError("Port %r not free on %r" % (port, host))
def wait_for_occupied_port(host, port):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in xrange(50):
try:
check_port(host, port)
except IOError:
return
else:
time.sleep(.1)
raise IOError("Port %r not bound on %r" % (port, host))
| {
"repo_name": "cread/ec2id",
"path": "cherrypy/process/servers.py",
"copies": "1",
"size": "9831",
"license": "apache-2.0",
"hash": -6917413908885389000,
"line_mean": 34.9586466165,
"line_max": 85,
"alpha_frac": 0.5610822907,
"autogenerated": false,
"ratio": 4.135885570046277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196967860746277,
"avg_score": null,
"num_lines": null
} |
'''Adaptation of Keras' 'conv_filter_visualization':
Visualization of the filters of foo_two, via gradient ascent in input space.
'''
from __future__ import print_function
from scipy.misc import imsave
import numpy as np
import time
from foo_three import foo
from keras import backend as K
import random
#flags to determine what to do
viz=False
mk_img=True
# dimensions of the generated pictures for each filter.
img_width = 224
img_height = 224
# the name of the layers we want to visualize - see model definition
#layer_list = ['conv1_2', 'conv2_3', 'conv3_2', 'conv4_2']
layer_name = 'conv4_2'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# build the network with best weights
model = foo()
weights='best_weights_3_santiago.h5'
model.load_weights(weights)
print('Model and weights loaded.')
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def filter_viz():
#make a list of 50 random filter indexes
randsample = random.sample(xrange(512), 10)
kept_filters = []
for filter_index in randsample:
# scanning 50 filters
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_dim_ordering() == 'th':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img,
K.learning_phase()],
[loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_dim_ordering() == 'th':
input_img_data = np.random.random((1, 1, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 2000 steps
for i in range(100):
loss_value, grads_value = iterate([input_img_data, 1])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best 25 filters on a 5 x 5 grid.
n = 7
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
return stitched_filters, n
def max_act(output_index):
#find input that maximizes activation for specific class
#generate 1000 random positive or negative images, and pick the best 49
kept_imgs=[]
for k in xrange(10):
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = model.layers[-1].output
print (layer_output)
loss = K.mean(layer_output[:, output_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img,
K.learning_phase()],
[loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_dim_ordering() == 'th':
input_img_data = np.random.random((1, 1, img_width, img_height))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 100 steps
for i in range(2000):
loss_value, grads_value = iterate([input_img_data, 1])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
#if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
#break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_imgs.append((img, loss_value))
end_time = time.time()
# we will stich the best 49 images on a 7 x 7 grid.
n = 3
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_imgs.sort(key=lambda x: x[1], reverse=True)
kept_imgs = kept_imgs[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_imgs = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_imgs[i * n + j]
stitched_imgs[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
return stitched_imgs, output_index, n
if viz:
stitched_filters, n =filter_viz()
# save the result to disk
imsave('stitched_filters_{0}_{1}_%dx%d.png'.format(layer_name, weights) % (n, n), stitched_filters)
if mk_img:
img,output_index,n = max_act(0)
imsave('max_activation_{0}_{1}_{2}x{2}.png'.format(output_index, weights, n),img)
| {
"repo_name": "santiagolopezg/MODS_ConvNet",
"path": "STM_Code/filter_visualize.py",
"copies": "2",
"size": "6943",
"license": "mit",
"hash": -2294245628684149500,
"line_mean": 28.1722689076,
"line_max": 100,
"alpha_frac": 0.6557683998,
"autogenerated": false,
"ratio": 3.144474637681159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9631911514516956,
"avg_score": 0.033666304592840555,
"num_lines": 238
} |
'''Adaptation of the UJI dataset for the "sequential" version of the problem,
rather than the rasterized Dataset from dlt.data.
'''
import numpy as np
import matplotlib.pyplot as plt
import json
class Dataset:
def __init__(self, vocab, points, breaks, masks, labels):
self.vocab = vocab
self.points = points
self.breaks = breaks
self.masks = masks
self.labels = labels
def find(self, char):
label = int(np.where(self.vocab == char)[0])
return np.where(self.labels == label)[0]
def show(self, indices=None, limit=64):
plt.figure(figsize=(16, 16))
indices = list(range(limit) if indices is None else indices)
dim = int(np.ceil(np.sqrt(len(indices))))
for plot_index, index in enumerate(indices):
plt.subplot(dim, dim, plot_index+1)
plt.plot(*zip(*self.points[index, self.masks[index]]))
ends = self.masks[index] & (
self.breaks[index] | np.roll(self.breaks[index], -1))
plt.plot(*zip(*self.points[index, ends]), '.')
plt.title('%d : %s' % (index, self.vocab[self.labels[index]]))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal')
plt.gca().axis('off')
@classmethod
def load(cls, path, max_length=200):
'''Read the dataset from a JSONlines file.'''
with open(path) as f:
data = [json.loads(line) for line in f]
vocab = np.array(sorted(set(d['target'] for d in data)))
char_to_index = {ch: n for n, ch in enumerate(vocab)}
labels = np.array([char_to_index[d['target']] for d in data],
dtype=np.int32)
nsamples = min(max_length, max(
sum(len(stroke) for stroke in d['strokes']) for d in data))
points = np.zeros((len(data), nsamples, 2), dtype=np.float32)
breaks = np.zeros((len(data), nsamples), dtype=np.bool)
masks = np.zeros((len(data), nsamples), dtype=np.bool)
for n, d in enumerate(data):
stroke = np.concatenate(d['strokes'])[:nsamples]
points[n, :len(stroke)] = stroke
masks[n, :len(stroke)] = True
all_breaks = np.cumsum([len(stroke) for stroke in d['strokes']])
breaks[n, all_breaks[all_breaks < nsamples]] = True
return cls(vocab=vocab,
points=points,
breaks=breaks,
masks=masks,
labels=labels)
| {
"repo_name": "DouglasOrr/DeepLearnTute",
"path": "dlt/sequence.py",
"copies": "1",
"size": "2510",
"license": "mit",
"hash": 2157138035160680400,
"line_mean": 38.21875,
"line_max": 77,
"alpha_frac": 0.5637450199,
"autogenerated": false,
"ratio": 3.6588921282798834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47226371481798834,
"avg_score": null,
"num_lines": null
} |
"""Adaptation of Trust Region Reflective algorithm for a linear least-squares
problem."""
import numpy as np
from numpy.linalg import norm
from scipy.linalg import qr, solve_triangular
from scipy.sparse.linalg import lsmr
from scipy.optimize import OptimizeResult
from .givens_elimination import givens_elimination
from .common import (
EPS, step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
print_header_linear, print_iteration_linear, compute_grad,
regularized_lsq_operator, right_multiplied_operator)
def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
"""Efficiently solve a regularized least-squares problem provided
information from QR decomposition.
The initial problem is to solve the following system in a least-squares
sense:
::
A x = b
D x = 0
Where D is diagonal matrix. The method is based on QR decomposition
of the form A P = Q R, where P is a column permutation matrix, Q is an
orthogonal matrix and R is an upper triangular matrix.
Parameters
----------
m, n : int
Initial shape of A.
R : ndarray, shape (n, n)
Upper triangular matrix from QR decomposition of A.
QTb : ndarray, shape (n,)
First n components of Q^T b.
perm : ndarray, shape (n,)
Array defining column permutation of A, such that i-th column of
P is perm[i]-th column of identity matrix.
diag : ndarray, shape (n,)
Array containing diagonal elements of D.
Returns
-------
x : ndarray, shape (n,)
Found least-squares solution.
"""
if copy_R:
R = R.copy()
v = QTb.copy()
givens_elimination(R, v, diag[perm])
abs_diag_R = np.abs(np.diag(R))
threshold = EPS * max(m, n) * np.max(abs_diag_R)
nns, = np.nonzero(abs_diag_R > threshold)
R = R[np.ix_(nns, nns)]
v = v[nns]
x = np.zeros(n)
x[perm[nns]] = solve_triangular(R, v)
return x
def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
alpha = 1
while True:
x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
step = x_new - x
cost_change = -evaluate_quadratic(A, g, step)
if cost_change > -0.1 * alpha * p_dot_g:
break
active = find_active_constraints(x_new, lb, ub)
if np.any(active != 0):
x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
step = x_new - x
cost_change = -evaluate_quadratic(A, g, step)
return x, step, cost_change
def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
if in_bounds(x + p, lb, ub):
return p
p_stride, hits = step_size_to_bound(x, p, lb, ub)
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Find the step size along reflected direction.
r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Stay interior.
r_stride_l = (1 - theta) * r_stride_u
r_stride_u *= theta
if r_stride_u > 0:
a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h = p_h + r_h * r_stride
r = d * r_h
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p_h *= theta
p *= theta
p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
ag_h = -g_h
ag = d * ag_h
ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
ag_stride_u *= theta
a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p
elif r_value < p_value and r_value < ag_value:
return r
else:
return ag
def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter,
verbose):
m, n = A.shape
x, _ = reflective_transformation(x_lsq, lb, ub)
x = make_strictly_feasible(x, lb, ub, rstep=0.1)
if lsq_solver == 'exact':
QT, R, perm = qr(A, mode='economic', pivoting=True)
QT = QT.T
if m < n:
R = np.vstack((R, np.zeros((n - m, n))))
QTr = np.zeros(n)
k = min(m, n)
elif lsq_solver == 'lsmr':
r_aug = np.zeros(m + n)
auto_lsmr_tol = False
if lsmr_tol is None:
lsmr_tol = 1e-2 * tol
elif lsmr_tol == 'auto':
auto_lsmr_tol = True
r = A.dot(x) - b
g = compute_grad(A, r)
cost = 0.5 * np.dot(r, r)
initial_cost = cost
termination_status = None
step_norm = None
cost_change = None
if max_iter is None:
max_iter = 100
if verbose == 2:
print_header_linear()
for iteration in range(max_iter):
v, dv = CL_scaling_vector(x, g, lb, ub)
g_scaled = g * v
g_norm = norm(g_scaled, ord=np.inf)
if g_norm < tol:
termination_status = 1
if verbose == 2:
print_iteration_linear(iteration, cost, cost_change,
step_norm, g_norm)
if termination_status is not None:
break
diag_h = g * dv
diag_root_h = diag_h ** 0.5
d = v ** 0.5
g_h = d * g
A_h = right_multiplied_operator(A, d)
if lsq_solver == 'exact':
QTr[:k] = QT.dot(r)
p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
diag_root_h, copy_R=False)
elif lsq_solver == 'lsmr':
lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
r_aug[:m] = r
if auto_lsmr_tol:
eta = 1e-2 * min(0.5, g_norm)
lsmr_tol = max(EPS, min(0.1, eta * g_norm))
p_h = -lsmr(lsmr_op, r_aug, atol=lsmr_tol, btol=lsmr_tol)[0]
p = d * p_h
p_dot_g = np.dot(p, g)
if p_dot_g > 0:
termination_status = -1
theta = 1 - min(0.005, g_norm)
step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
cost_change = -evaluate_quadratic(A, g, step)
# Perhaps almost never executed, the idea is that `p` is descent
# direction thus we must find acceptable cost decrease using simple
# "backtracking", otherwise algorithm's logic would break.
if cost_change < 0:
x, step, cost_change = backtracking(
A, g, x, p, theta, p_dot_g, lb, ub)
else:
x = make_strictly_feasible(x + step, lb, ub, rstep=0)
step_norm = norm(step)
r = A.dot(x) - b
g = compute_grad(A, r)
if cost_change < tol * cost:
termination_status = 2
cost = 0.5 * np.dot(r, r)
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=tol)
return OptimizeResult(
x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
nit=iteration + 1, status=termination_status,
initial_cost=initial_cost)
| {
"repo_name": "drpeteb/scipy",
"path": "scipy/optimize/_lsq/trf_linear.py",
"copies": "3",
"size": "7430",
"license": "bsd-3-clause",
"hash": -3934778815466446300,
"line_mean": 29.0809716599,
"line_max": 77,
"alpha_frac": 0.5582772544,
"autogenerated": false,
"ratio": 3.0425880425880427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5100865296988043,
"avg_score": null,
"num_lines": null
} |
# adapted after https://github.com/oduerr/dl_tutorial/blob/master/lasagne/MinimalLasagneCNN.ipynb
import matplotlib.pyplot as plt
import matplotlib.image as imgplot
from lasagne import layers
from lasagne import nonlinearities
from nolearn.lasagne import NeuralNet
import cPickle as pickle
import gzip
import operator
import numpy as np
import warnings
#warnings.filterwarnings('ignore', '.*topo.*')
with gzip.open('mnist_4000.pkl.gz', 'rb') as f:
(X, y) = pickle.load(f)
PIXELS = len(X[0, 0, 0, :])
print X.shape, y.shape, PIXELS
net1 = NeuralNet(
# Geometry of the network
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('hidden4', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, PIXELS, PIXELS), # None in the first axis indicates that the batch size can be set later
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2), # pool_size used to be called ds in old versions of lasagne
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
hidden4_num_units=500,
output_num_units=10, output_nonlinearity=nonlinearities.softmax,
# learning rate parameters
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
# We only train for 10 epochs
max_epochs=10,
verbose=1,
# Training test-set split
eval_size=0.2
)
net = net1.fit(X[0:100, :, :, :], y[0:100])
convParams = net.get_all_params()
toTest = range(3001, 3026)
preds = net1.predict(X[toTest, :, :, :])
fig = plt.figure(figsize=(10, 10))
for i, num in enumerate(toTest):
a = fig.add_subplot(5, 5, (i + 1)) # NB the one based API sucks!
plt.axis('off')
a.set_title(str(preds[i]) + " (" + str(y[num]) + ")")
plt.imshow(-X[num, 0, :, :], interpolation='none', cmap=plt.get_cmap('gray'))
weights = [w.get_value() for w in net.get_all_params()]
numParas = 0
for i, weight in enumerate(weights):
n = reduce(operator.mul, np.shape(weight))
print(str(i), " ", str(np.shape(weight)), str(n))
numParas += n
print("Number of parameters " + str(numParas))
conv = net.get_all_params()
ws = conv[0].get_value() # Use the layernumber for the '(32, 1, 3, 3)', '288' layer from above
fig = plt.figure(figsize=(6, 6))
for i in range(0, 32):
a = fig.add_subplot(6, 6, (i + 1)) # NB the one based API sucks!
plt.axis('off')
plt.imshow(ws[i, 0, :, :],
interpolation='none',
cmap=plt.get_cmap('gray'))
plt.show() | {
"repo_name": "big-data-research/nn_bucharest_workshop_2015",
"path": "nn_demo/cnn_example01.py",
"copies": "2",
"size": "2633",
"license": "apache-2.0",
"hash": 3726758901218209000,
"line_mean": 30.3571428571,
"line_max": 136,
"alpha_frac": 0.6483099126,
"autogenerated": false,
"ratio": 2.9451901565995526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9579170781776871,
"avg_score": 0.002865857484536342,
"num_lines": 84
} |
# Adapted based on sample code by Christian Vogel (vogelchr@vogel.cx)
import math
import operator
import serial
class AM03127():
"""Runs a AM03127-based signboard (http://www.amplus.com.hk/ aka Maplin N00GA)"""
special_map = {
u'\n': ' ',
u'\r': '',
u'<': '<UBC>',
u'>': '<UBE>'
}
def __init__ (self, signport=None, baud=None, signid=None):
default_signport = "/dev/ttyUSB0"
default_baud = 9600
default_signid = 1
if not signport:
signport = default_signport
if not baud:
baud = default_baud
if not signid:
signid = 1
self.signport = signport
self.baud = baud
self.signid = signid
def isAsciiRange (self, c, first, last) :
if type(c) != str or len(c) != 1 :
return False
if ord(c) < ord(first) or ord(c) > ord(last) :
return False
return True
def encodeCharset (self, unicode_str) :
s = ''
i = iter(unicode(unicode_str))
for u in i :
if u == '\033' :
s = s + '<' + i.next() + i.next() + '>'
elif u in self.special_map :
s = s + self.special_map[u]
else :
s = s + u.encode('cp1252')
return s
def sendPageMessage (self, line=1, page='A', lead=None, disp='A', wait=5, lag=None, msg='') :
default_lead_lag = 'E'
if not lead :
lead = default_lead_lag
if not lag :
lag = default_lead_lag
rmsg = u''.join (map (unicode, msg))
fmsg = self.encodeCharset(rmsg)
if line < 1 or line > 8 :
raise RuntimeError ('Line must be in range 1..8')
if not self.isAsciiRange (page, 'A', 'Z') :
raise RuntimeError ('Page must be in range A..Z')
if not self.isAsciiRange (lead, 'A', 'S') :
raise RuntimeError ('Lead must be in range A..S')
if not (disp in 'ABCDEQRSTUabcdeqrstu') :
raise RuntimeError ('Display must be one of {ABCDEQRSTUabcdeqrstu}')
if not self.isAsciiRange (wait, 'A', 'Z') :
raise RuntimeError ('Waittime must be in range A..Z (A=0.5 sec)')
if not self.isAsciiRange (lag, 'A', 'S') :
raise RuntimeError ('Lag must be in range A..S')
return '<L%d><P%c><F%c><M%c><W%c><F%c>'%(line, page, lead, disp, wait, lag) + fmsg
def setBrightness (self, brightness) :
default_brightness='D'
if not brightness :
brightness = default_brightness
if not self.isAsciiRange(brightness, 'A', 'D') :
raise RuntimeError('Brightness must be in range A..D (100%..25%)')
return '<B%c>'%(brightness)
def displayMessage (self, line=1, page='A', lead=None, disp='A', wait=5, lag=None, msg='', brightness='A') :
packets = []
data = self.sendPageMessage (line, page, lead, disp, wait, lag, msg)
packets.append (self.setBrightness(brightness))
packets.append (data)
self.sendPackets (packets)
def encodeMessage (self, board_id, data) :
if board_id < 0 or board_id > 255 :
raise RuntimeError ('Sign ID must be in range 0..255')
chksum = 0
for c in data :
chksum ^= ord(c)
return '<ID%02X>'%(board_id) + data + '%02X<E>'%(chksum)
def sendData (self, port, board_id, data) :
port.setTimeout(1)
encodedMessage = self.encodeMessage (board_id, data)
print "TX:[" + encodedMessage + "]"
port.write(encodedMessage)
replies = [ 'ACK', 'NACK' ]
buf = ''
while True :
c = port.read(1)
if c == '' :
return 'TIMEOUT'
buf = buf + c
valid_start = False
for r in replies :
if len(buf) > len(r) :
continue
if buf == r[0:len(buf)] :
valid_start = True
if len(buf) == len(r) :
return buf
if not valid_start :
return buf # invalid
def sendPackets (self, packets_list):
tty = serial.Serial(self.signport, self.baud)
for data in packets_list:
ret = self.sendData(tty, self.signid, data);
if ret != 'ACK' :
# We can't do anything at this point anyway, so pass
pass
| {
"repo_name": "barfle/signboard",
"path": "py/AM03127.py",
"copies": "1",
"size": "4461",
"license": "bsd-3-clause",
"hash": 2198427868855969000,
"line_mean": 31.8014705882,
"line_max": 112,
"alpha_frac": 0.5144586416,
"autogenerated": false,
"ratio": 3.626829268292683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568180765997758,
"avg_score": 0.014621428778984988,
"num_lines": 136
} |
# Adapted by Ralph Haygood from the late, lamented sklearn.ensemble.partial_dependence.partial_dependence by Peter Prettenhofer.
#
# The function sklearn.ensemble.partial_dependence.partial_dependence, on which this package (sklearn-gbmi) depended, doesn't exist anymore. It was partially
# replaced by sklearn.inspection.partial_dependence, but only partially, in that the new function doesn't accept a specified grid as the old one did; compare
#
# https://docs.w3cub.com/scikit_learn/modules/generated/sklearn.ensemble.partial_dependence.partial_dependence/
#
# about the old function with
#
# https://scikit-learn.org/stable/modules/generated/sklearn.inspection.partial_dependence.html
#
# about the new function, and note the disappearance of the grid argument. Accordingly, I've more or less copied Peter Prettenhofer's old code into this file
# and _partial_dependence_tree.pyx, omitting parts unneeded here.
import numpy as np
from sklearn.tree._tree import DTYPE
from ._partial_dependence_tree import _partial_dependence_tree
def partial_dependence(gbrt, target_variables, grid):
target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel()
grid = np.asarray(grid, dtype=DTYPE, order='C')
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k])
return pdp
| {
"repo_name": "ralphhaygood/sklearn-gbmi",
"path": "sklearn_gbmi/partial_dependence.py",
"copies": "1",
"size": "1655",
"license": "mit",
"hash": -3790374796825950000,
"line_mean": 52.3870967742,
"line_max": 157,
"alpha_frac": 0.7510574018,
"autogenerated": false,
"ratio": 3.405349794238683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643658313325689,
"avg_score": 0.0025497765425985934,
"num_lines": 31
} |
# Adapted by raspberrypilearning/the-all-seeing-pi from some original code by bennuttall and waveform80
# -------------------------------------------------------------
from PIL import Image
from itertools import chain #cycle
# EDIT THESE VALUES ------------------------
overlays_dir = "/home/pi/photobooth/tests/count_overlays"
overlays = ['1', '2', '3']
# ------------------------------------------
overlay = overlays[0] # Starting value
def _get_overlay_image(overlay):
# Open the overlay as an Image object
return Image.open(overlays_dir + "/" + overlay + ".png")
def _pad(resolution, width=32, height=16):
# Pads the specified resolution
# up to the nearest multiple of *width* and *height*; this is
# needed because overlays require padding to the camera's
# block size (32x16)
return (
((resolution[0] + (width - 1)) // width) * width,
((resolution[1] + (height - 1)) // height) * height,
)
def remove_overlays(camera):
# Remove all overlays from the camera preview
for o in camera.overlays:
camera.remove_overlay(o)
def preview_overlay(camera=None, overlay=None):
# Remove all overlays
remove_overlays(camera)
# Get an Image object of the chosen overlay
overlay_img = _get_overlay_image(overlay)
# Pad it to the right resolution
pad = Image.new('RGB', _pad(camera.resolution))
pad.paste(overlay_img, (0, 0))
# Add the overlay
camera.add_overlay(pad.tobytes(), alpha=128, layer=3)
def output_overlay(output=None, overlay=None):
# Take an overlay Image
overlay_img = _get_overlay_image(overlay)
# ...and a captured photo
output_img = Image.open(output).convert('RGBA')
# Combine the two and save the image as output
new_output = Image.alpha_composite(output_img, overlay_img)
new_output.save(output)
all_overlays = chain(overlays)
| {
"repo_name": "CaptFennec/photobooth",
"path": "tests/overlay_functions.py",
"copies": "1",
"size": "1894",
"license": "mit",
"hash": 5235295696794256000,
"line_mean": 28.59375,
"line_max": 103,
"alpha_frac": 0.626187962,
"autogenerated": false,
"ratio": 3.649325626204239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9684623955763366,
"avg_score": 0.018177926488174498,
"num_lines": 64
} |
# Adapted by Willie Lawrence from http://wiki.tcl.tk/27638
_DEFAULT_FONT = {
"a": [[4, 0, 1, 0, 0, 1, 0, 3, 1, 4, 4, 4, 4, 0]],
"b" : [[0, 6, 0, 0, 3, 0, 4, 1, 4, 3, 3, 4, 0, 4]],
"c" : [[4, 0, 1, 0, 0, 1, 0, 3, 1, 4, 4, 4]],
"d" : [[4, 6, 4, 0, 1, 0, 0, 1, 0, 3, 1, 4, 4, 4]],
"e" : [[4, 0, 1, 0, 0, 1, 0, 3, 1, 4, 3, 4, 4, 3, 4, 2, 0, 2]],
"f" : [[2, 0, 2, 5, 3, 6, 4, 6], [0, 3, 4, 3]],
"g" : [[0, 0, 3, 0, 4, 1, 4, 4, 3, 5, 1, 5, 0, 4, 0, 3, 1, 2, 4, 2]],
"h" : [[0, 6, 0, 0], [0, 4, 3, 4, 4, 3, 4, 0]],
"i" : [[2, 0, 2, 4], [2, 5, 2, 6]],
"j" : [[0, 0, 1, 0, 2, 1, 2, 4], [2, 5, 2, 6]],
"k" : [[0, 0, 0, 6], [4, 0, 0, 2, 3, 5]],
"l" : [[1, 6, 2, 6, 2, 0], [1, 0, 3, 0]],
"m" : [[0, 0, 0, 4, 1, 4, 2, 3, 3, 4, 4, 4, 4, 0], [2, 0, 2, 3]],
"n" : [[0, 4, 0, 0], [0, 3, 1, 4, 3, 4, 4, 3, 4, 0]],
"o" : [[0, 1, 0, 3, 1, 4, 3, 4, 4, 3, 4, 1, 3, 0, 1, 0, 0, 1]],
"p" : [[0, 0, 0, 5, 3, 5, 4, 4, 4, 3, 3, 2, 0, 2]],
"q" : [[4, 0, 4, 5, 1, 5, 0, 4, 0, 3, 1, 2, 4, 2]],
"r" : [[0, 0, 0, 4, 3, 4, 4, 3]],
"s" : [[0, 0, 4, 0, 4, 2, 0, 2, 0, 4, 4, 4]],
"t" : [[1, 6, 1, 1, 2, 0, 3, 0, 4, 1], [0, 5, 3, 5]],
"u" : [[4, 4, 4, 0], [4, 1, 3, 0, 1, 0, 0, 1, 0, 4]],
"v" : [[0, 4, 2, 0, 4, 4]],
"w" : [[0, 4, 0, 0, 2, 2, 4, 0, 4, 4]],
"x" : [[0, 0, 4, 4], [0, 4, 4, 0]],
"y" : [[0, 5, 0, 3, 1, 2, 3, 2, 4, 3], [4, 5, 4, 1, 3, 0, 0, 0]],
"z" : [[0, 4, 4, 4, 0, 0, 4, 0]],
"A" : [[0, 0, 0, 4, 2, 6, 4, 4, 4, 0], [0, 2, 4, 2]],
"B" : [[0, 0, 0, 6, 3, 6, 4, 5, 4, 4, 3, 3, 4, 2, 4, 1, 3, 0, 0, 0], [0, 3, 3, 3]],
"C" : [[4, 0, 0, 0, 0, 6, 4, 6]],
"D" : [[0, 0, 0, 6, 2, 6, 4, 4, 4, 2, 2, 0, 0, 0]],
"E" : [[4, 0, 0, 0, 0, 6, 4, 6], [0, 3, 4, 3]],
"F" : [[0, 0, 0, 6, 4, 6], [0, 3, 3, 3]],
"G" : [[2, 2, 4, 2, 4, 0, 0, 0, 0, 6, 4, 6, 4, 4]],
"H" : [[0, 0, 0, 6], [4, 0, 4, 6], [0, 3, 4, 3]],
"I" : [[0, 0, 4, 0], [2, 0, 2, 6], [0, 6, 4, 6]],
"J" : [[0, 2, 2, 0, 4, 0, 4, 6]],
"K" : [[0, 0, 0, 6], [4, 6, 0, 3, 4, 0]],
"L" : [[4, 0, 0, 0, 0, 6]],
"M" : [[0, 0, 0, 6, 2, 4, 4, 6, 4, 0]],
"N" : [[0, 0, 0, 6, 4, 0, 4, 6]],
"O" : [[0, 0, 0, 6, 4, 6, 4, 0, 0, 0]],
"P" : [[0, 0, 0, 6, 4, 6, 4, 3, 0, 3]],
"Q" : [[0, 0, 0, 6, 4, 6, 4, 2, 2, 0, 0, 0], [2, 2, 4, 0]],
"R" : [[0, 0, 0, 6, 4, 6, 4, 3, 0, 3], [1, 3, 4, 0]],
"S" : [[0, 0, 3, 0, 4, 1, 4, 2, 3, 3, 1, 3, 0, 4, 0, 5, 1, 6, 4, 6]],
"T" : [[2, 0, 2, 6], [0, 6, 4, 6]],
"U" : [[0, 6, 0, 0, 4, 0, 4, 6]],
"V" : [[0, 6, 2, 0, 4, 6]],
"W" : [[0, 6, 0, 0, 2, 2, 4, 0, 4, 6]],
"X" : [[0, 0, 4, 6], [0, 6, 4, 0]],
"Y" : [[0, 6, 2, 4, 4, 6], [2, 0, 2, 4]],
"Z" : [[0, 6, 4, 6, 0, 0, 4, 0], [1, 3, 3, 3]],
"0" : [[0, 0, 0, 6, 4, 6, 4, 0, 0, 0], [0, 0, 4, 6]],
"1" : [[2, 0, 2, 6, 0, 4], [0, 0, 4, 0]],
"2" : [[0, 6, 4, 6, 4, 3, 0, 3, 0, 0, 4, 0]],
"3" : [[0, 6, 4, 6, 4, 0, 0, 0], [0, 3, 4, 3]],
"4" : [[0, 6, 0, 3, 4, 3], [4, 6, 4, 0]],
"5" : [[0, 0, 4, 0, 4, 3, 0, 3, 0, 6, 4, 6]],
"6" : [[4, 6, 0, 6, 0, 0, 4, 0, 4, 3, 0, 3]],
"7" : [[0, 6, 4, 6, 4, 0]],
"8" : [[0, 0, 0, 6, 4, 6, 4, 0, 0, 0], [0, 3, 4, 3]],
"9" : [[4, 0, 4, 6, 0, 6, 0, 3, 4, 3]],
"~" : [[0, 4, 0, 5, 2, 5, 2, 4, 4, 4, 4, 5]],
"`" : [[1, 6, 3, 4]],
"!" : [[2, 0, 2, 1], [2, 2, 2, 6]],
"@" : [[3, 2, 3, 4, 1, 4, 1, 2, 3, 2, 4, 1, 4, 6, 0, 6, 0, 0, 3, 0]],
"#" : [[1, 0, 1, 6], [3, 0, 3, 6], [0, 2, 4, 2], [0, 4, 4, 4]],
"$" : [[0, 2, 0, 1, 4, 1, 4, 3, 0, 3, 0, 5, 4, 5, 4, 4], [2, 0, 2, 6]],
"%" : [[0, 6, 0, 4, 2, 4, 2, 6, 0, 6], [2, 0, 4, 0, 4, 2, 2, 2, 2, 0], [0, 0, 4, 6]],
"^" : [[0, 4, 2, 6, 4, 4]],
"&" : [[4, 0, 1, 0, 0, 1, 0, 2, 3, 5, 2, 6, 1, 6, 0, 5, 4, 0]],
"*" : [[2, 0, 2, 6], [0, 3, 4, 3], [0, 1, 4, 5], [0, 5, 4, 1]],
"(" : [[4, 0, 3, 0, 1, 2, 1, 4, 3, 6, 4, 6]],
")" : [[0, 0, 1, 0, 3, 2, 3, 4, 1, 6, 0, 6]],
"_" : [[0, 0, 4, 0]],
"-" : [[0, 3, 4, 3]],
"+" : [[0, 3, 4, 3], [2, 1, 2, 5]],
"=" : [[0, 2, 4, 2], [0, 4, 4, 4]],
"[" : [[4, 0, 2, 0, 2, 6, 4, 6]],
"]" : [[0, 0, 2, 0, 2, 6, 0, 6]],
"{" : [[4, 0, 2, 0, 2, 2, 1, 3, 2, 4, 2, 6, 4, 6]],
"}" : [[0, 0, 2, 0, 2, 2, 3, 3, 2, 4, 2, 6, 0, 6]],
"|" : [[2, 0, 2, 2], [2, 4, 2, 6]],
"\\" : [[0, 6, 4, 0]],
":" : [[2, 1, 2, 2], [2, 4, 2, 5]],
";" : [[1, 0, 2, 1, 2, 2], [2, 4, 2, 5]],
'"' : [[1, 6, 1, 4], [3, 6, 3, 4]],
"'" : [[2, 6, 2, 4]],
"," : [[1, 0, 2, 1, 2, 2]],
"." : [[2, 0, 2, 1]],
"/" : [[0, 0, 4, 6]],
"?" : [[2, 0, 2, 1], [2, 2, 4, 4, 4, 6, 0, 6, 0, 4]],
"<" : [[4, 6, 0, 3, 4, 0]],
">" : [[0, 0, 4, 3, 0, 6]],
}
def draw_letter(c, basecoords, letter, scale, **args):
"""
Draws a given letter on canvas c, scaling the size of
the letter according to scale. Returns a list of
handles of canvas objects (lines) that form the new
object.
"""
fontarray = args.pop("font", _DEFAULT_FONT)
xbase, ybase = basecoords
retlist = []
for coordset in fontarray[letter]:
coords = []
for coord in coordset:
coords.append(scale * coord)
newcoords = []
for i in range(len(coords)):
# set cvalue [lindex $coords $i]
cvalue = coords[i]
if i % 2:
newcoords.append(ybase - cvalue)
else:
newcoords.append(cvalue + xbase)
retlist.append(c.create_line(*newcoords, **args))
return retlist
def draw_string(c, basecoords, string, scale, **args):
"""
Draws a string at the given basecoords on canvas c
and at the given scale. Args are passed to the canvas
line object creation command. Returns a list of all
canvas object IDs corresponding to the vectors in
the letters of the string.
"""
xbase, ybase = basecoords
retlist = []
xcoord, ycoord = xbase, ybase
for i in range(len(string)):
char = string[i]
if char == " ":
xcoord += 4*scale
elif char == "\n" or char == "\r":
xcoord = xbase
ycoord += 8 * scale
else:
r = draw_letter(c, [xcoord, ycoord], char, scale, **args)
retlist.extend(r)
xcoord += 5.5 * scale
return retlist
if __name__ == "__main__":
# example
from Tkinter import *
top = Tk()
top.title("VectorFont")
top.state("zoom")
ca = Canvas(top)
top.bind("<Escape>", lambda e: top.destroy(), "+")
ca.pack(expand=YES,fill=BOTH)
# draw_letter(ca, [100,100], 'A', 5.0,width=5, tag="A")
TEXT = "Vector Font a"
draw_string(ca, [100,100], TEXT, 5, tag="my_text")
G_SCALE = 5
BASE_COORD = [100,100]
def _g(*args):
global G_SCALE
evt = args[0]
ca.delete("my_text")
if evt.delta > 0:
G_SCALE += 1.0
if evt.delta < 0:
G_SCALE -= 1.0
draw_string(ca, BASE_COORD, TEXT, G_SCALE, tag="my_text")
def reset_base(evt):
global BASE_COORD
BASE_COORD = [top.winfo_pointerx()-top.winfo_rootx(),
top.winfo_pointery()-top.winfo_rooty()]
ca.delete("my_text")
draw_string(ca, BASE_COORD, TEXT, G_SCALE, tag="my_text")
ca.bind("<Motion>", reset_base, "+")
ca.focus_force()
top.bind("<MouseWheel>", _g, "+")
top.mainloop() | {
"repo_name": "cptx032/miniprojects",
"path": "vectorfont.py",
"copies": "1",
"size": "6682",
"license": "unlicense",
"hash": -8162188007733962000,
"line_mean": 36.1277777778,
"line_max": 86,
"alpha_frac": 0.3977850943,
"autogenerated": false,
"ratio": 1.8241878241878242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.27219729184878244,
"avg_score": null,
"num_lines": null
} |
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2016-2020 Peter Hinch
# buttons.py For TFT driver.
# Adapted for (and requires) uasyncio V3
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2016-2020 Peter Hinch
import uasyncio as asyncio
from tft.driver.ugui import Touchable, dolittle, print_centered
from tft.primitives.delay_ms import Delay_ms
from tft.driver.constants import *
# *********** PUSHBUTTON CLASSES ***********
# Button coordinates relate to bounding box (BB). x, y are of BB top left corner.
# likewise width and height refer to BB, regardless of button shape
# If font is None button will be rendered without text
class Button(Touchable):
lit_time = 1000
long_press_time = 1000
def __init__(self, location, *, font, shape=CIRCLE, height=50, width=50, fill=True,
fgcolor=None, bgcolor=None, fontcolor=None, litcolor=None, text='',
callback=dolittle, args=[], onrelease=True, lp_callback=None, lp_args=[]):
super().__init__(location, font, height, width, fgcolor, bgcolor, fontcolor, None, False, text, None)
self.shape = shape
self.radius = height // 2
self.fill = fill
self.litcolor = litcolor
self.text = text
self.callback = callback
self.callback_args = args
self.onrelease = onrelease
self.lp_callback = lp_callback
self.lp_args = lp_args
self.lp = False # Long press not in progress
self.orig_fgcolor = fgcolor
if self.litcolor is not None:
self.delay = Delay_ms(self.shownormal)
self.litcolor = litcolor if self.fgcolor is not None else None
def show(self):
tft = self.tft
x = self.location[0]
y = self.location[1]
if not self.visible: # erase the button
tft.usegrey(False)
tft.fill_rectangle(x, y, x + self.width, y + self.height, self.bgcolor)
return
if self.shape == CIRCLE: # Button coords are of top left corner of bounding box
x += self.radius
y += self.radius
if self.fill:
tft.fill_circle(x, y, self.radius, self.fgcolor)
else:
tft.draw_circle(x, y, self.radius, self.fgcolor)
if self.font is not None and len(self.text):
print_centered(tft, x, y, self.text, self.fontcolor, self.font)
else:
x1 = x + self.width
y1 = y + self.height
if self.shape == RECTANGLE: # rectangle
if self.fill:
tft.fill_rectangle(x, y, x1, y1, self.fgcolor)
else:
tft.draw_rectangle(x, y, x1, y1, self.fgcolor)
if self.font is not None and len(self.text):
print_centered(tft, (x + x1) // 2, (y + y1) // 2, self.text, self.fontcolor, self.font)
elif self.shape == CLIPPED_RECT: # clipped rectangle
if self.fill:
tft.fill_clipped_rectangle(x, y, x1, y1, self.fgcolor)
else:
tft.draw_clipped_rectangle(x, y, x1, y1, self.fgcolor)
if self.font is not None and len(self.text):
print_centered(tft, (x + x1) // 2, (y + y1) // 2, self.text, self.fontcolor, self.font)
def shownormal(self):
self.fgcolor = self.orig_fgcolor
self.show_if_current()
def _touched(self, x, y): # Process touch
if self.litcolor is not None:
self.fgcolor = self.litcolor
self.show() # must be on current screen
self.delay.trigger(Button.lit_time)
if self.lp_callback is not None:
asyncio.create_task(self.longpress())
if not self.onrelease:
self.callback(self, *self.callback_args) # Callback not a bound method so pass self
def _untouched(self):
self.lp = False
if self.onrelease:
self.callback(self, *self.callback_args) # Callback not a bound method so pass self
async def longpress(self):
self.lp = True
await asyncio.sleep_ms(self.long_press_time)
if self.lp:
self.lp_callback(self, *self.lp_args)
# Group of buttons, typically at same location, where pressing one shows
# the next e.g. start/stop toggle or sequential select from short list
class ButtonList:
def __init__(self, callback=dolittle):
self.user_callback = callback
self.lstbuttons = []
self.current = None # No current button
self._greyed_out = False
def add_button(self, *args, **kwargs):
button = Button(*args, **kwargs)
self.lstbuttons.append(button)
active = self.current is None # 1st button added is active
button.visible = active
button.callback = self._callback
if active:
self.current = button
return button
def value(self, button=None):
if button is not None and button is not self.current:
old = self.current
new = button
self.current = new
old.visible = False
old.show()
new.visible = True
new.show()
self.user_callback(new, *new.callback_args)
return self.current
def greyed_out(self, val=None):
if val is not None and self._greyed_out != val:
self._greyed_out = val
for button in self.lstbuttons:
button.greyed_out(val)
self.current.show()
return self._greyed_out
def _callback(self, button, *args):
old = button
old_index = self.lstbuttons.index(button)
new = self.lstbuttons[(old_index + 1) % len(self.lstbuttons)]
self.current = new
old.visible = False
old.show()
new.visible = True
new.busy = True # Don't respond to continued press
new.show()
self.user_callback(new, *args) # user gets button with args they specified
# Group of buttons at different locations, where pressing one shows
# only current button highlighted and oes callback from current one
class RadioButtons:
def __init__(self, highlight, callback=dolittle, selected=0):
self.user_callback = callback
self.lstbuttons = []
self.current = None # No current button
self.highlight = highlight
self.selected = selected
self._greyed_out = False
def add_button(self, *args, **kwargs):
button = Button(*args, **kwargs)
self.lstbuttons.append(button)
button.callback = self._callback
active = len(self.lstbuttons) == self.selected + 1
button.fgcolor = self.highlight if active else button.orig_fgcolor
if active:
self.current = button
return button
def value(self, button=None):
if button is not None and button is not self.current:
self._callback(button, *button.callback_args)
return self.current
def greyed_out(self, val=None):
if val is not None and self._greyed_out != val:
self._greyed_out = val
for button in self.lstbuttons:
button.greyed_out(val)
return self._greyed_out
def _callback(self, button, *args):
for but in self.lstbuttons:
if but is button:
but.fgcolor = self.highlight
self.current = button
else:
but.fgcolor = but.orig_fgcolor
but.show()
self.user_callback(button, *args) # user gets button with args they specified
| {
"repo_name": "peterhinch/micropython-tft-gui",
"path": "tft/widgets/buttons.py",
"copies": "1",
"size": "7685",
"license": "mit",
"hash": 5603410194329479000,
"line_mean": 38.0101522843,
"line_max": 109,
"alpha_frac": 0.5927130774,
"autogenerated": false,
"ratio": 3.836744882675986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9915672840844127,
"avg_score": 0.002757023846371677,
"num_lines": 197
} |
"""Adapted from A3CTFPolicy to add V-trace.
Keep in sync with changes to A3CTFPolicy and VtraceSurrogatePolicy."""
import numpy as np
import logging
import gym
import ray
from ray.rllib.agents.impala import vtrace_tf as vtrace
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_ops import explained_variance
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
class VTraceLoss:
def __init__(self,
actions,
actions_logp,
actions_entropy,
dones,
behaviour_action_logp,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
config,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0):
"""Policy gradient loss with vtrace importance weighting.
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Args:
actions: An int|float32 tensor of shape [T, B, ACTION_SPACE].
actions_logp: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_action_logp: Tensor of shape [T, B].
behaviour_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
target_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
dist_class: action distribution class for logits.
valid_mask: A bool tensor of valid RNN input elements (#2992).
config: Trainer config dict.
"""
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_action_log_probs=behaviour_action_logp,
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.cast(~tf.cast(dones, tf.bool), tf.float32) *
discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold,
tf.float32))
self.value_targets = self.vtrace_returns.vs
# The policy gradients loss.
self.pi_loss = -tf.reduce_sum(
tf.boolean_mask(actions_logp * self.vtrace_returns.pg_advantages,
valid_mask))
# The baseline loss.
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
self.vf_loss = 0.5 * tf.reduce_sum(tf.math.square(delta))
# The entropy loss.
self.entropy = tf.reduce_sum(
tf.boolean_mask(actions_entropy, valid_mask))
# The summed weighted loss.
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
def _make_time_major(policy, seq_lens, tensor, drop_last=False):
"""Swaps batch and trajectory axis.
Args:
policy: Policy reference
seq_lens: Sequence lengths if recurrent or None
tensor: A tensor or list of tensors to reshape.
drop_last: A bool indicating whether to drop the last
trajectory item.
Returns:
res: A tensor with swapped axes or a list of tensors with
swapped axes.
"""
if isinstance(tensor, list):
return [
_make_time_major(policy, seq_lens, t, drop_last) for t in tensor
]
if policy.is_recurrent():
B = tf.shape(seq_lens)[0]
T = tf.shape(tensor)[0] // B
else:
# Important: chop the tensor into batches at known episode cut
# boundaries. TODO(ekl) this is kind of a hack
T = policy.config["rollout_fragment_length"]
B = tf.shape(tensor)[0] // T
rs = tf.reshape(tensor, tf.concat([[B, T], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
res = tf.transpose(
rs, [1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0]))))
if drop_last:
return res[:-1]
return res
def build_vtrace_loss(policy, model, dist_class, train_batch):
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space, gym.spaces.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def make_time_major(*args, **kw):
return _make_time_major(policy, train_batch.get("seq_lens"), *args,
**kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_action_logp = train_batch[SampleBatch.ACTION_LOGP]
behaviour_logits = train_batch[SampleBatch.ACTION_DIST_INPUTS]
unpacked_behaviour_logits = tf.split(
behaviour_logits, output_hidden_shape, axis=1)
unpacked_outputs = tf.split(model_out, output_hidden_shape, axis=1)
values = model.value_function()
if policy.is_recurrent():
max_seq_len = tf.reduce_max(train_batch["seq_lens"])
mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
# Prepare actions for loss
loss_actions = actions if is_multidiscrete else tf.expand_dims(
actions, axis=1)
# Inputs are reshaped from [B * T] => [T - 1, B] for V-trace calc.
policy.loss = VTraceLoss(
actions=make_time_major(loss_actions, drop_last=True),
actions_logp=make_time_major(
action_dist.logp(actions), drop_last=True),
actions_entropy=make_time_major(
action_dist.multi_entropy(), drop_last=True),
dones=make_time_major(dones, drop_last=True),
behaviour_action_logp=make_time_major(
behaviour_action_logp, drop_last=True),
behaviour_logits=make_time_major(
unpacked_behaviour_logits, drop_last=True),
target_logits=make_time_major(unpacked_outputs, drop_last=True),
discount=policy.config["gamma"],
rewards=make_time_major(rewards, drop_last=True),
values=make_time_major(values, drop_last=True),
bootstrap_value=make_time_major(values)[-1],
dist_class=Categorical if is_multidiscrete else dist_class,
model=model,
valid_mask=make_time_major(mask, drop_last=True),
config=policy.config,
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.entropy_coeff,
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config["vtrace_clip_pg_rho_threshold"])
return policy.loss.total_loss
def stats(policy, train_batch):
values_batched = _make_time_major(
policy,
train_batch.get("seq_lens"),
policy.model.value_function(),
drop_last=policy.config["vtrace"])
return {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"entropy": policy.loss.entropy,
"entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64),
"var_gnorm": tf.linalg.global_norm(policy.model.trainable_variables()),
"vf_loss": policy.loss.vf_loss,
"vf_explained_var": explained_variance(
tf.reshape(policy.loss.value_targets, [-1]),
tf.reshape(values_batched, [-1])),
}
def grad_stats(policy, train_batch, grads):
return {
"grad_gnorm": tf.linalg.global_norm(grads),
}
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
# not used, so save some bandwidth
del sample_batch.data[SampleBatch.NEXT_OBS]
return sample_batch
def choose_optimizer(policy, config):
if policy.config["opt_type"] == "adam":
if policy.config["framework"] in ["tf2", "tfe"]:
return tf.keras.optimizers.Adam(policy.cur_lr)
else:
return tf1.train.AdamOptimizer(policy.cur_lr)
else:
if tfv == 2:
return tf.keras.optimizers.RMSprop(policy.cur_lr, config["decay"],
config["momentum"],
config["epsilon"])
else:
return tf1.train.RMSPropOptimizer(policy.cur_lr, config["decay"],
config["momentum"],
config["epsilon"])
def clip_gradients(policy, optimizer, loss):
grads_and_vars = optimizer.compute_gradients(
loss, policy.model.trainable_variables())
grads = [g for (g, v) in grads_and_vars]
policy.grads, _ = tf.clip_by_global_norm(grads, policy.config["grad_clip"])
clipped_grads = list(zip(policy.grads, policy.model.trainable_variables()))
return clipped_grads
def setup_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
VTraceTFPolicy = build_tf_policy(
name="VTraceTFPolicy",
get_default_config=lambda: ray.rllib.agents.impala.impala.DEFAULT_CONFIG,
loss_fn=build_vtrace_loss,
stats_fn=stats,
grad_stats_fn=grad_stats,
postprocess_fn=postprocess_trajectory,
optimizer_fn=choose_optimizer,
gradients_fn=clip_gradients,
before_loss_init=setup_mixins,
mixins=[LearningRateSchedule, EntropyCoeffSchedule],
get_batch_divisibility_req=lambda p: p.config["rollout_fragment_length"])
| {
"repo_name": "richardliaw/ray",
"path": "rllib/agents/impala/vtrace_tf_policy.py",
"copies": "1",
"size": "11381",
"license": "apache-2.0",
"hash": -8767064036865366000,
"line_mean": 37.3198653199,
"line_max": 79,
"alpha_frac": 0.5960811879,
"autogenerated": false,
"ratio": 3.762314049586777,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48583952374867767,
"avg_score": null,
"num_lines": null
} |
# Adapted from a Karma test startup script
# developebd by the Jupyter team here;
# https://github.com/jupyter/jupyter-js-services/blob/master/test/run_test.py
#
# Also uses the flow where we assign a os process group id and shut down the
# server based on that - since the subprocess actually executes the kbase-narrative
# script.
# (recipe here)
# http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
from __future__ import print_function
import subprocess
import sys
import argparse
import threading
import time
import os
import signal
KARMA_PORT = 9876
JUPYTER_PORT = 9999
argparser = argparse.ArgumentParser(
description='Run KBase Narrative unit tests'
)
argparser.add_argument('-b', '--browsers', default='Firefox',
help="Browsers to use for Karma test")
argparser.add_argument('-d', '--debug', action='store_true',
help="Whether to enter debug mode in Karma")
options = argparser.parse_args(sys.argv[1:])
nb_command = ['kbase-narrative', '--no-browser', '--NotebookApp.allow_origin="*"', '--port={}'.format(JUPYTER_PORT)]
if not hasattr(sys, 'real_prefix'):
nb_command[0] = 'narrative-venv/bin/kbase-narrative'
nb_server = subprocess.Popen(nb_command,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
preexec_fn = os.setsid
)
# wait for notebook server to start up
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if not line:
continue
print(line)
if 'The Jupyter Notebook is running at: http://localhost:{}/'.format(JUPYTER_PORT) in line:
break
if 'is already in use' in line:
os.killpg(os.getpgid(nb_server.pid), signal.SIGTERM)
# nb_server.terminate()
raise ValueError(
'The port {} was already taken, kill running notebook servers'.format(JUPYTER_PORT)
)
def readlines():
"""Print the notebook server output."""
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if line:
print(line)
thread = threading.Thread(target=readlines)
thread.setDaemon(True)
thread.start()
# time.sleep(15)
test_command = ['grunt', 'test']
resp = 1
try:
print("Jupyter server started, starting test script.")
resp = subprocess.check_call(test_command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
finally:
print("Done running tests, killing server.")
os.killpg(os.getpgid(nb_server.pid), signal.SIGTERM)
# nb_server.terminate()
sys.exit(resp)
| {
"repo_name": "mlhenderson/narrative",
"path": "test/unit/run_tests.py",
"copies": "3",
"size": "2572",
"license": "mit",
"hash": 2487062675549614600,
"line_mean": 29.619047619,
"line_max": 116,
"alpha_frac": 0.6862363919,
"autogenerated": false,
"ratio": 3.4523489932885907,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008588617070759927,
"num_lines": 84
} |
# adapted from an example in the eff-bot library guide: os-path-walk-example-3.py
import os
import fnmatch
class GlobDirectoryWalker:
# a forward iterator that traverses a directory tree
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
#for file in GlobDirectoryWalker(".", "*.py"):
# print file
| {
"repo_name": "p4datasystems/CarnotKE",
"path": "jyhton/ast/globwalk.py",
"copies": "7",
"size": "1121",
"license": "apache-2.0",
"hash": 111662287126218670,
"line_mean": 32.9696969697,
"line_max": 81,
"alpha_frac": 0.5477252453,
"autogenerated": false,
"ratio": 4.413385826771654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8461111072071653,
"avg_score": null,
"num_lines": null
} |
"""Adapted from a portion of the model published in:
Input-output behavior of ErbB signaling pathways as revealed by a mass action
model trained against dynamic data. William W Chen, Birgit Schoeberl, Paul J
Jasper, Mario Niepel, Ulrik B Nielsen, Douglas A Lauffenburger & Peter K
Sorger. Mol Syst Biol. 2009;5:239. Epub 2009 Jan 20. doi:10.1038/msb.2008.74
http://www.nature.com/msb/journal/v5/n1/full/msb200874.html
Implemented by: Jeremy Muhlich
"""
from __future__ import print_function
from pysb import *
from pysb.macros import catalyze_state
Model()
Monomer('Ras', ['k'])
Annotation(Ras, 'http://identifiers.org/uniprot/P01116', 'hasPart')
Annotation(Ras, 'http://identifiers.org/uniprot/P01112', 'hasPart')
Annotation(Ras, 'http://identifiers.org/uniprot/P01111', 'hasPart')
Monomer('Raf', ['s', 'k'], {'s': ['u', 'p']})
Annotation(Raf, 'http://identifiers.org/uniprot/P15056', 'hasPart')
Annotation(Raf, 'http://identifiers.org/uniprot/P04049', 'hasPart')
Annotation(Raf, 'http://identifiers.org/uniprot/P10398', 'hasPart')
Monomer('MEK', ['s218', 's222', 'k'], {'s218': ['u', 'p'], 's222': ['u', 'p']})
Annotation(MEK, 'http://identifiers.org/uniprot/Q02750', 'hasPart')
Annotation(MEK, 'http://identifiers.org/uniprot/P36507', 'hasPart')
Monomer('ERK', ['t185', 'y187'], {'t185': ['u', 'p'], 'y187': ['u', 'p']})
Annotation(ERK, 'http://identifiers.org/uniprot/P27361', 'hasPart')
Annotation(ERK, 'http://identifiers.org/uniprot/P28482', 'hasPart')
Monomer('PP2A', ['ppt'])
Annotation(PP2A, 'http://identifiers.org/mesh/24544')
Monomer('MKP', ['ppt'])
Annotation(MKP, 'http://identifiers.org/mesh/24536')
# Use generic rates for forward/reverse binding and kinase/phosphatase catalysis
kf_bind = 1e-5
kr_bind = 1e-1
kcat_phos = 1e-1
kcat_dephos = 3e-3
# Build handy rate "sets"
klist_bind = [kf_bind, kr_bind]
klist_phos = klist_bind + [kcat_phos]
klist_dephos = klist_bind + [kcat_dephos]
def mapk_single(kinase, pptase, substrate, site):
"""Kinase phos/dephosphorylation."""
ppt_substrate = substrate()
if 'k' in ppt_substrate.monomer.sites:
# Ensure substrates which are themselves kinases don't get
# dephosphorylated while they are bound to *their* substrate.
ppt_substrate = ppt_substrate(k=None)
components = catalyze_state(kinase, 'k',
substrate, site, site, 'u', 'p',
klist_phos)
components |= catalyze_state(pptase, 'ppt',
ppt_substrate, site, site, 'p', 'u',
klist_dephos)
return components
def mapk_double(kinase, pptase, substrate, site1, site2):
"""Distributive + ordered double kinase phos/dephosphorylation."""
components = mapk_single(kinase, pptase, substrate({site2: 'u'}), site1)
components |= mapk_single(kinase, pptase, substrate({site1: 'p'}), site2)
return components
# Ras-Raf-MEK-ERK kinase cascade
mapk_single(Ras, PP2A, Raf, 's')
mapk_double(Raf(s='p'), PP2A, MEK, 's218', 's222')
mapk_double(MEK(s218='p', s222='p'), MKP, ERK, 't185', 'y187')
Initial(Ras(k=None), Parameter('Ras_0', 6e4))
Initial(Raf(s='u', k=None), Parameter('Raf_0', 7e4))
Initial(MEK(s218='u', s222='u', k=None), Parameter('MEK_0', 3e6))
Initial(ERK(t185='u', y187='u'), Parameter('ERK_0', 7e5))
Initial(PP2A(ppt=None), Parameter('PP2A_0', 2e5))
Initial(MKP(ppt=None), Parameter('MKP_0', 1.7e4))
Observable('ppMEK', MEK(s218='p', s222='p'))
Observable('ppERK', ERK(t185='p', y187='p'))
if __name__ == '__main__':
print(__doc__, "\n", model)
print("""
NOTE: This model code is designed to be imported and programatically
manipulated, not executed directly. The above output is merely a
diagnostic aid.""")
| {
"repo_name": "johnbachman/pysb",
"path": "pysb/examples/kinase_cascade.py",
"copies": "5",
"size": "3732",
"license": "bsd-2-clause",
"hash": -8036622948339279000,
"line_mean": 40.010989011,
"line_max": 80,
"alpha_frac": 0.6621114684,
"autogenerated": false,
"ratio": 2.6714387974230496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5833550265823049,
"avg_score": null,
"num_lines": null
} |
import numpy as np
def _wcs_to_celestial_frame_builtin(wcs):
from astropy.coordinates import FK4, FK4NoETerms, FK5, ICRS, Galactic
from astropy.time import Time
from astropy.wcs import WCSSUB_CELESTIAL
# Keep only the celestial part of the axes
wcs = wcs.sub([WCSSUB_CELESTIAL])
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[0][:4]
ycoord = wcs.wcs.ctype[1][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-':
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == 'FK4':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4(equinox=equinox)
elif radesys == 'FK4-NO-E':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4NoETerms(equinox=equinox)
elif radesys == 'FK5':
if equinox is not None:
equinox = Time(equinox, format='jyear')
frame = FK5(equinox=equinox)
elif radesys == 'ICRS':
frame = ICRS()
else:
if xcoord == 'GLON' and ycoord == 'GLAT':
frame = Galactic()
else:
frame = None
return frame
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
class custom_frame_mappings(object):
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_frame_mappings
>>> with custom_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError("Could not determine celestial frame corresponding to "
"the specified WCS object")
| {
"repo_name": "JudoWill/glue",
"path": "glue/external/wcsaxes/wcs_utils.py",
"copies": "1",
"size": "4843",
"license": "bsd-3-clause",
"hash": 4348327445934383000,
"line_mean": 34.0942028986,
"line_max": 89,
"alpha_frac": 0.6659095602,
"autogenerated": false,
"ratio": 3.8497615262321143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00042064476207698715,
"num_lines": 138
} |
# Adapted from boids implementation by Stephen Chappell
# Accessed on 11/20/2012 at http://code.activestate.com/recipes/502240/
# Which was in turn motivated by the following pseudocode:
# http://www.vergenet.net/~conrad/boids/pseudocode.html
# Note: OSC messages (incoming and outgoing) are
# normalized to the range 0-1
import random # FOR RANDOM BEGINNINGS
from Tkinter import * # ALL VISUAL EQUIPMENT
import socket, OSC # OSC COMMUNICATION
import time, threading
import math
DIMLIMIT = 700 # LIMIT OF DIMENSION VALUES
WIDTH = DIMLIMIT # OF SCREEN IN PIXELS
HEIGHT = DIMLIMIT # OF SCREEN IN PIXELS
BOIDS = 1 + 6 + 12 # IN SIMULATION
BOIDMASS = 2 # IN SIMULATION
BLIMIT = 30 # LIMIT FOR BOID PERCEPTION
ATTRACTORS = 9 # IN SIMULATION
ATTRACTION = 3 # ATTRACTOR INFLUENCE
WALL = 100 # FROM SIDE IN PIXELS
WALL_FORCE = 30 # ACCELERATION PER MOVE
SPEED_LIMIT = 1000 # FOR BOID VELOCITY
BOID_RADIUS = 3 # FOR BOIDS IN PIXELS
ATTRACTOR_RADIUS = 5 # FOR BOIDS IN PIXELS
OFFSET_START = 20 # FROM WALL IN PIXELS
FRAMES_PER_SEC = 40 # SCREEN UPDATE RATE
UPDATE_TIME = 500 / FRAMES_PER_SEC
WINDOWED = True # MOVABLE PROGRAM
NDIMS = 6 # MULTIDIMENSIONAL SWARM SPACE
# FOR OSC
RECEIVE_ADDRESS = ('127.0.0.1', 9000) # tupple with ip, port.
SEND_ADDRESS = ('127.0.0.1', 57120) # SuperCollider on local machine.
# FOR CREATING/SENDING NOTE EVENTS
MAXFREQ = 90 #MIDI FREQ
MINFREQ = 20
MAXAMP = 0.9
MAXDUR = 180
MINDUR = 1
MAXIOI = 200
MINIOI = 1
FREQSCALER = float(MAXFREQ - MINFREQ) / float(DIMLIMIT)
AMPSCALER = float(MAXAMP) / float(DIMLIMIT)
DURSCALER = float(MAXDUR - MINDUR) / float(DIMLIMIT)
IOISCALER = float(MAXIOI - MINIOI) / float(DIMLIMIT)
################################################################################
def main():
# Start the program.
initialise()
mainloop()
def initialise():
# Setup simulation variables.
global sim_time # discreet simulation time variable
sim_time = 0 # start at 0
global note_time # time of next note
note_time = 0 # start at zero - see makesound()
build_boids()
build_attractors()
build_graph()
startOSC()
def build_graph():
# Build GUI environment.
global graph
root = Tk()
if WINDOWED:
root.resizable(False, False)
root.title('Swarm')
else:
root.overrideredirect(True)
x = (root.winfo_screenwidth() - WIDTH) / 2
y = (root.winfo_screenheight() - HEIGHT) / 2
root.geometry('%dx%d+%d+%d' % (WIDTH, HEIGHT, x, y))
root.protocol("WM_DELETE_WINDOW", quit_handler)
graph = Canvas(root, width=WIDTH, height=HEIGHT, background='black')
graph.after(1000 / FRAMES_PER_SEC, update)
graph.pack()
def update():
# Main simulation loop.
graph.after(UPDATE_TIME, update)
draw()
move()
makesound()
global sim_time #necessary to prevent UnboundLocalError
sim_time += 1 # iterate discreet time variable
#if not (sim_time % 50): print "%d\n" % (sim_time) #keep track of time
#if not (sim_time % 500): #randomly move attractors every once in a while
# for attractor in attractors:
# attractor.rand_update()
def draw():
# Draw boids and attractors.
# Shows only first 2 dimensions
graph.delete(ALL)
for boid in boids:
x1 = boid.position.x[0] - BOID_RADIUS
y1 = boid.position.x[1] - BOID_RADIUS
x2 = boid.position.x[0] + BOID_RADIUS
y2 = boid.position.x[1] + BOID_RADIUS
graph.create_oval((x1, y1, x2, y2), fill='white')
for attractor in attractors:
x1 = attractor.position.x[0] - ATTRACTOR_RADIUS
y1 = attractor.position.x[1] - ATTRACTOR_RADIUS
x2 = attractor.position.x[0] + ATTRACTOR_RADIUS
y2 = attractor.position.x[1] + ATTRACTOR_RADIUS
graph.create_oval((x1, y1, x2, y2), fill='red')
graph.update()
def move():
# Move all boids.
for boid in boids:
simulate_wall(boid)
boid.update_velocity(boids)
boid.move()
def makesound():
global note_time
global sim_time
if not note_time:
note_time = random.randint(10,100) #time of first note, in sim_time units
ioi = 0 #note ioi is the last dimension
#check to see if it's time to output a note
if (note_time < sim_time):
dimvals = [0.0]*NDIMS; #array for centroid values
for i in range(NDIMS):
dimvals[i] = 0
for boid in boids:
dimvals[i] += boid.position.x[i]
dimvals[i] /= float( len(boids) * DIMLIMIT ) # normalize to range 0-1
dimvals[i] = max(min(dimvals[i],1.0),0.0) # make sure it's in range
sendMsg('/swarmNote',dimvals) # send centroid values via osc
ioi = dimvals[NDIMS-1] * float(MAXIOI - MINIOI) + float(MINIOI) #ioi is last dim
note_time = sim_time + ioi # assign next note time
def simulate_wall(boid):
# Create boundaries.
for dim in range(NDIMS):
if boid.position.x[dim] < WALL:
boid.velocity.x[dim] += WALL_FORCE
elif boid.position.x[dim] > WIDTH - WALL:
boid.velocity.x[dim] -= WALL_FORCE
def limit_speed(boid):
# Limit boid speed.
for dim in range(NDIMS):
if abs(boid.velocity.x[dim]) > SPEED_LIMIT:
boid.velocity.x[dim] /= abs(boid.velocity.x[dim]) / SPEED_LIMIT
def build_boids():
# Create boids variable.
global boids
boids = tuple(Boid([DIMLIMIT]*NDIMS, OFFSET_START, FRAMES_PER_SEC) for boid in xrange(BOIDS))
def build_attractors():
# Create boids variable.
global attractors
attractors = tuple(Attractor(ATTRACTION) for attractor in xrange(ATTRACTORS))
################################################################################
# MULTIDIMENTIONAL SPACE
# Note: we implement dimensional decoupling. Makes more sense musically.
class MultiD:
def __init__(self, x):
self.x = [float(i) for i in x]
def __repr__(self):
return 'MultiD:\n'
for dim in range(NDIMS):
return '$s, ' % (self.x[dim])
return '\n'
def __add__(self, other):
return MultiD((self.x[i] + other.x[i]) for i in range(NDIMS))
def __sub__(self, other):
return MultiD((self.x[i] - other.x[i]) for i in range(NDIMS))
def __mul__(self, other):
return MultiD((self.x[i] * other) for i in range(NDIMS))
def __div__(self, other):
return MultiD((self.x[i] / other) for i in range(NDIMS))
def __iadd__(self, other):
for dim in range(NDIMS):
self.x[dim] += other.x[dim]
return self
def __isub__(self, other):
for dim in range(NDIMS):
self.x[dim] -= other.x[dim]
return self
def __idiv__(self, other):
for dim in range(NDIMS):
self.x[dim] /= other
return self
################################################################################
# BOID RULE IMPLEMENTATION CLASS
class Boid:
def __init__(self, lims, offset, move_divider):
self.velocity = MultiD([0]*NDIMS)
self.position = MultiD([0]*NDIMS)
for dim in range(NDIMS): #random starting position
self.position.x[dim] = random.randint(0,DIMLIMIT)
self.move_divider = move_divider * 5
def update_velocity(self, boids):
v1 = self.rule1(boids)
v2 = self.rule2(boids)
v4 = self.rule4(attractors)
self.__temp = v1 + v2 + v4
def move(self):
self.velocity += self.__temp
limit_speed(self)
self.position += self.velocity / self.move_divider
def rule1(self, boids):
# clumping
vector = MultiD([0]*NDIMS)
for boid in boids:
if boid is not self:
vector += boid.position
vector /= len(boids) - 1
return (vector - self.position) / BOIDMASS
def rule2(self, boids):
# avoidance
vector = MultiD([0]*NDIMS)
for boid in boids:
if boid is not self:
for dim in range(NDIMS):
if abs(self.position.x[dim] - boid.position.x[dim]) < BLIMIT:
vector.x[dim] -= (boid.position.x[dim] - self.position.x[dim])
return vector * 1.5
#NOTE: NO rule3 BECAUSE WE DON'T IMPOSE VELOCITY MATCHING/SCHOOLING
def rule4(self, attractors):
# attractors
vector = MultiD([0]*NDIMS)
for attractor in attractors:
for dim in range(NDIMS):
if abs(self.position.x[dim] - attractor.position.x[dim]) < 30:
vector.x[dim] += (attractor.position.x[dim] - self.position.x[dim]) * attractor.attraction
return vector
################################################################################
# ATTRACTOR CLASS
class Attractor:
def __init__(self, attract):
self.position = MultiD([0]*NDIMS)
for dim in range(NDIMS):
self.position.x[dim] = random.randint(1, DIMLIMIT)
self.attraction = attract
def rand_update(self):
for dim in range(NDIMS):
self.position.x[dim] = random.randint(1, DIMLIMIT)
################################################################################
# RECEIVING OSC
s = OSC.OSCServer(RECEIVE_ADDRESS)
s.addDefaultHandlers()
def attractor_handler(addr, tags, stuff, source):
print "---"
print "Received new osc msg from %s" % OSC.getUrlStr(source)
print "With addr : %s" % addr
print "Typetags %s" % tags
global attractors
attractor = random.choice(attractors) #modify a random attractor
for item in stuff:
print "data %f" % item
# Assign dimension values
for i in range(NDIMS):
attractor.position.x[i] = int( min(max(stuff[i],0.0),1.0) * DIMLIMIT )
print "Dim %d val: %d" % (i,attractor.position.x[i])
print "---"
s.addMsgHandler("/attr", attractor_handler) # adding our function
def startOSC(): # Start OSCServer
print "\nStarting OSCServer.\n"
global st
st = threading.Thread( target = s.serve_forever )
st.start()
def quit_handler(): # close OSC server
print "Closing OSCServer."
s.close()
print "Waiting for Server-thread to finish."
st.join() ##!!!
print "Done."
graph.quit()
################################################################################
# SENDING OSC
client = OSC.OSCClient()
client.connect( SEND_ADDRESS ) # note that the argument is a tupple and not two arguments
def sendMsg(addr,val):
msg = OSC.OSCMessage() # we reuse the same variable msg used above overwriting it
msg.setAddress(addr) # something like "/note"
msg.append(val) # the corresponding value
client.send(msg) # now we dont need to tell the client the address anymore
################################################################################
# Execute the simulation.
if __name__ == '__main__':
main()
| {
"repo_name": "tsob/EighthPlague",
"path": "swarm.py",
"copies": "1",
"size": "11042",
"license": "mit",
"hash": 6431619091029339000,
"line_mean": 32.3595166163,
"line_max": 108,
"alpha_frac": 0.5812352835,
"autogenerated": false,
"ratio": 3.273643640675956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9312138989241292,
"avg_score": 0.008547986986932682,
"num_lines": 331
} |
# Adapted from Borgar: http://stackoverflow.com/questions/222581/python-script-for-minifying-css
# Public Domain as far as I can tell
#
import re
def minify_css(css):
output = ''
# remove comments - this will break a lot of hacks :-P
css = re.sub( r'\s*/\*\s*\*/', "$$HACK1$$", css ) # preserve IE<6 comment hack
css = re.sub( r'/\*[\s\S]*?\*/', "", css )
css = css.replace( "$$HACK1$$", '/**/' ) # preserve IE<6 comment hack
css = re.sub( r'url\((["\'])([^)]*)\1\)', r'url(\2)', css ) # url() doesn't need quotes
css = re.sub( r'\s+', ' ', css )# spaces may be safely collapsed as generated content will collapse them anyway
css = re.sub( r'#([0-9a-f])\1([0-9a-f])\2([0-9a-f])\3(\s|;)', r'#\1\2\3\4', css )# shorten collapsable colors: #aabbcc to #abc
css = re.sub( r':\s*0(\.\d+([cm]m|e[mx]|in|p[ctx]))\s*;', r':\1;', css )# fragment values can loose zeros
for rule in re.findall( r'([^{]+){([^}]*)}', css ):
selectors = [re.sub( r'(?<=[\[\(>+=])\s+|\s+(?=[=~^$*|>+\]\)])', r'', selector.strip() ) for selector in rule[0].split( ',' )]# we don't need spaces around operators
# order is important, but we still want to discard repetitions
properties = {}
porder = []
for prop in re.findall( '(.*?):(.*?)(;|$)', rule[1] ):
key = prop[0].strip().lower()
if key not in porder: porder.append( key )
properties[ key ] = prop[1].strip()
# output rule if it contains any declarations
if properties:
output += "%s{%s}" % ( ','.join( selectors ), ''.join(['%s:%s;' % (key, properties[key]) for key in porder])[:-1] )
return output
if __name__ == '__main__':
import sys
print minify_css(open(sys.argv[1]).read())
| {
"repo_name": "josephwecker/zml",
"path": ".attic/cssmin.py",
"copies": "1",
"size": "1764",
"license": "mit",
"hash": -6554638939583472000,
"line_mean": 52.4545454545,
"line_max": 173,
"alpha_frac": 0.5379818594,
"autogenerated": false,
"ratio": 2.97972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.401771158912973,
"avg_score": null,
"num_lines": null
} |
class PF59int(int):
"""Instances of this object are elements of the field GF(2^8)
Instances are integers in the range 0 to 255
This field is defined using the irreducable polynomial
x^8 + x^4 + x^3 + x + 1
and using 3 as the generator for the exponent table and log table.
"""
# Maps integers to PF59int instances
cache = {}
# multiplicitive inverse table, modulo 59
invtable = (None, 1, 30, 20, 15, 12, 10, 17, 37, 46, 6, 43, 5, 50, 38, 4, 48,
7, 23, 28, 3, 45, 51, 18, 32, 26, 25, 35, 19, 57, 2, 40, 24, 34,
33, 27, 41, 8, 14, 56, 31, 36, 52, 11, 55, 21, 9, 54, 16, 53, 13,
22, 42, 49, 47, 44, 39, 29, 58)
def __new__(cls, value):
# Check cache
# Caching sacrifices a bit of speed for less memory usage. This way,
# there are only a max of 59 instances of this class at any time.
try:
return PF59int.cache[value]
except KeyError:
if value > 58 or value < 0:
raise ValueError("Field elements of PF(59) are between 0 and 58. Cannot be %s" % value)
newval = int.__new__(cls, value)
PF59int.cache[int(value)] = newval
return newval
def __add__(a, b):
"Addition in PF(59) is normal addition modulo 59"
return PF59int((int(a) + int(b)) % 59)
__radd__ = __add__
def __sub__(a, b):
"Subtraction in PF(59) is normal subtraction modulo 59"
# Python's modulo operator handles negitive numbers. If we didn't, we
# could just add 59 to a before subtracting b
return PF59int((int(a) - int(b)) % 59)
def __rsub__(a, b):
# We have to reverse the argument order for rsub
return PF59int((int(b) - int(a)) % 59)
def __neg__(self):
return PF59int((59 - int(self)) % 59)
def __mul__(a, b):
"Multiplication in PF(59)"
return PF59int((int(a) * int(b)) % 59)
__rmul__ = __mul__
def __pow__(self, power):
if isinstance(power, PF59int):
raise TypeError("Raising a Field element to another Field element is not defined. power must be a regular integer")
if (power < 0):
return PF59int(pow(int(self), -power, 59)).inverse()
return PF59int(pow(int(self), power, 59))
def inverse(self):
return PF59int(PF59int.invtable[self])
def __div__(self, other):
return self * PF59int(other).inverse()
def __rdiv__(self, other):
return self.inverse() * other
def __repr__(self):
n = self.__class__.__name__
return "%s(%r)" % (n, int(self))
multiply = __mul__
# def multiply(self, other):
# """A slow multiply method. This method gives the same results as the
# other multiply method, but is implemented to illustrate how it works
# and how the above tables were generated.
#
# This procedure is called Peasant's Algorithm (I believe)
# """
# a = int(self)
# b = int(other)
#
# p = a
# r = 0
# while b:
# if b & 1: r = r ^ p
# b = b >> 1
# p = p << 1
# if p & 0x100: p = p ^ 0x11b
#
# return PF59int(r)
| {
"repo_name": "dennismckinnon/RS-PF59",
"path": "ffp.py",
"copies": "1",
"size": "3403",
"license": "mit",
"hash": 2465249670744333000,
"line_mean": 34.8210526316,
"line_max": 139,
"alpha_frac": 0.5706729356,
"autogenerated": false,
"ratio": 3.2784200385356455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43490929741356454,
"avg_score": null,
"num_lines": null
} |
# Adapted from code written in 2005 by Peter Pearson and placed in the public domain.
from .Point import Point
def _leftmost_bit(x):
# this is closer to constant time than bit-twiddling hacks like those in
# https://graphics.stanford.edu/~seander/bithacks.html
assert x > 0
result = 1
while result <= x:
result <<= 1
return result >> 1
class Curve(object):
"""
Elliptic Curve over the field of integers modulo a prime.
A curve is instantiated with a prime modulus p, and coefficients a and b.
"""
def __init__(self, p, a, b, order=None):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self._p = p
self._a = a
self._b = b
self._order = order
self._infinity = Point(None, None, self)
def p(self):
"""The prime modulus of the curve."""
return self._p
def order(self):
return self._order
def infinity(self):
"""The "point at infinity" (also known as 0)."""
return self._infinity
def contains_point(self, x, y):
"""Is the point (x, y) on the curve?"""
if x is None and y is None:
return True
return (y * y - (x * x * x + self._a * x + self._b)) % self._p == 0
def add(self, p0, p1):
"""Add one point to another point."""
p = self._p
infinity = self._infinity
if p0 == infinity:
return p1
if p1 == infinity:
return p0
x0, y0 = p0
x1, y1 = p1
if (x0 - x1) % p == 0:
if (y0 + y1) % p == 0:
return infinity
else:
l = ((3 * x0 * x0 + self._a) * self.inverse_mod(2 * y0, p)) % p
else:
l = ((y1 - y0) * self.inverse_mod(x1 - x0, p)) % p
x3 = (l * l - x0 - x1) % p
y3 = (l * (x0 - x3) - y0) % p
return self.Point(x3, y3)
def multiply(self, p, e):
"""Multiply a point by an integer."""
if self._order:
e %= self._order
if p == self._infinity or e == 0:
return self._infinity
e3 = 3 * e
i = _leftmost_bit(e3) >> 1
result = p
while i > 1:
result += result
if (e3 & i):
v = [result, result+p]
else:
v = [result-p, result]
result = v[0 if (e & i) else 1]
i >>= 1
return result
def inverse_mod(self, a, m):
"""Inverse of a mod m."""
if a < 0 or m <= a:
a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + m
def Point(self, x, y):
"""
The point constructor for this curve
"""
return Point(x, y, self)
def __repr__(self):
return '{}({!r},{!r},{!r})'.format(self.__class__.__name__, self._p, self._a, self._b)
def __str__(self):
return 'y^2 = x^3 + {}*x + {} (mod {})'.format(self._a, self._b, self._p)
| {
"repo_name": "shivaenigma/pycoin",
"path": "pycoin/ecdsa/Curve.py",
"copies": "1",
"size": "3384",
"license": "mit",
"hash": 3280825975484031500,
"line_mean": 25.6456692913,
"line_max": 94,
"alpha_frac": 0.4651300236,
"autogenerated": false,
"ratio": 3.3307086614173227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42958386850173225,
"avg_score": null,
"num_lines": null
} |
# adapted from cython/tests/run/builtin_abs.pyx
"""
>>> _abs = abs_as_name()
>>> _abs(-5)
5
>>> py_abs(-5)
5
>>> py_abs(-5.5)
5.5
>>> int(int32_abs(-5))
10
>>> int(int_abs(-5))
10
>>> int(long_abs(-5))
10
>>> int(ulong_abs(5))
10
>>> long_long_abs(-(2**33)) == 2**34
True
>>> ulong_long_abs(2**33) == 2**34
True
>>> double_abs(-5)
10.0
>>> double_abs(-5.5)
11.0
>>> float_abs(-5)
10.0
>>> float_abs(-5.5)
11.0
>>> '%.2f' % round(complex64_abs(-10-2j), 2)
'20.40'
>>> '%.2f' % round(complex128_abs(-10-2j), 2)
'20.40'
"""
from numba import *
@jit(backend='ast')
def abs_as_name():
x = abs
return x
@autojit(backend='ast')
def _abs(value):
result = abs(value)
with nopython:
return result * 2 # test return type being non-object
@jit(backend='ast', argtypes=[object_])
def py_abs(a):
return abs(a)
@jit(backend='ast', argtypes=[int_])
def int_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[long_])
def long_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[ulong])
def ulong_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[int32])
def int32_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[longlong])
def long_long_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[ulonglong])
def ulong_long_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[double])
def double_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[float_])
def float_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[complex64])
def complex64_abs(a):
return _abs(a)
@jit(backend='ast', argtypes=[complex128])
def complex128_abs(a):
return _abs(a)
if __name__ == '__main__':
# print long(int32_abs(-5))
import numba
numba.testing.testmod()
| {
"repo_name": "shiquanwang/numba",
"path": "numba/tests/builtins/test_builtin_abs.py",
"copies": "1",
"size": "1733",
"license": "bsd-2-clause",
"hash": -3461256406636048000,
"line_mean": 15.6634615385,
"line_max": 61,
"alpha_frac": 0.5966532025,
"autogenerated": false,
"ratio": 2.4899425287356323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8565857436104405,
"avg_score": 0.004147659026245334,
"num_lines": 104
} |
# Adapted from Daniel Birnbaum's histogram script
import argparse
import gzip
import pipes
import sys
from collections import Counter
import numpy
metrics = ['DP', 'GQ']
def main(args):
f = gzip.open(args.vcf) if args.vcf.endswith('.gz') else open(args.vcf)
if args.output is None: args.output = args.vcf.replace('.vcf', '.hist.vcf')
if not args.output.endswith('.gz'): args.output += '.gz'
pipe = pipes.Template()
pipe.append('bgzip -c /dev/stdin', '--')
g = pipe.open(args.output, 'w')
header = None
for line in f:
line = line.strip()
# Reading header lines to get VEP and individual arrays
if line.startswith('#'):
line = line.lstrip('#')
if line.startswith('CHROM'):
header = line.split()
header = dict(zip(header, range(len(header))))
continue
if header is None:
print >> sys.stderr, "VCF file does not have a header line (CHROM POS etc.). Exiting."
sys.exit(1)
fields = line.split('\t')
# Pull out annotation info from INFO and ALT fields
new_info = fields[header['INFO']].rstrip(';')
for metric in metrics:
data = get_histogram_for_variant(line, metric)
midpoints, hist = data
new_info += ';%s_MID=' % (metric) + '|'.join(map(str, midpoints))
new_info += ';%s_HIST=' % (metric) + '|'.join(map(str, hist))
fields[header['INFO']] = new_info
g.write('\t'.join(fields) + '\n')
f.close()
g.close()
def convert_to_int(val):
"""
Converts string to int if possible, otherwise returns initial string
"""
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
return val
def get_histogram_for_variant(vcf_line, metric="DP", num_bins=40, midpoints=True, variants_only=False):
vcf_line = vcf_line.strip('\n')
if vcf_line.startswith('#'):
return None
else:
fields = vcf_line.split('\t')
# alts = fields[4].split(',')
try:
idx = fields[8].split(':').index(metric)
except Exception, e:
return None
distr = []
# get distribution for metric
for sample in fields[9:]:
# This is only DP/GQ for now
sample_info = sample.split(':')
if sample_info[0] == './.': continue
if idx < len(sample_info) and sample_info[idx] != '.':
distr.append(sample_info[idx])
mids, hist = get_hist_from_distribution(distr, midpoints, num_bins)
return map(str, mids), map(str, hist)
def get_hist_from_distribution(distr, midpoints, num_bins):
distr = [convert_to_int(x) for x in distr]
if any([type(x) == str for x in distr]):
c = Counter(distr)
counts = zip(*c.items())
return counts
else:
hist = numpy.histogram(distr, bins=num_bins)
if midpoints:
edges = hist[1]
mids = [(edges[i]+edges[i+1])/2 for i in range(len(edges)-1)]
return mids, hist[0]
else:
return hist[1], hist[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vcf', '--input', '-i', help='Input VCF file; may be gzipped', required=True)
parser.add_argument('--output', '-o', help='Output VCF file; may be gzipped')
args = parser.parse_args()
main(args) | {
"repo_name": "konradjk/exac_browser",
"path": "src/precompute_histogram.py",
"copies": "4",
"size": "3502",
"license": "mit",
"hash": 8469355165498237000,
"line_mean": 29.4608695652,
"line_max": 103,
"alpha_frac": 0.5619645917,
"autogenerated": false,
"ratio": 3.6252587991718426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022764705707553415,
"num_lines": 115
} |
# Adapted from django.contrib.flatpages.middleware
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404, render
from django.template import loader, engines
from django.utils.safestring import mark_safe
from airmozilla.main.models import Event
from .models import StaticPage
DEFAULT_TEMPLATE = 'staticpages/default.html'
def staticpage(request, url):
if not url.startswith('/'):
url = '/' + url
try:
f = get_object_or_404(StaticPage, url__exact=url)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(StaticPage, url__exact=url)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_staticpage(request, f)
def render_staticpage(request, staticpage):
if not can_view_staticpage(staticpage, request.user):
# We might need to kick you out if you're not allowed to see this.
response = render(
request,
'staticpages/insufficient_privileges.html', {
'staticpage': staticpage,
},
status=403,
)
return response
if staticpage.template_name:
t = loader.select_template(
(staticpage.template_name, DEFAULT_TEMPLATE)
)
else:
t = loader.get_template(DEFAULT_TEMPLATE)
if staticpage.allow_querystring_variables:
title_t = engines['backend'].from_string(staticpage.title)
content_t = engines['backend'].from_string(staticpage.content)
params = {}
for key, value in request.REQUEST.items():
if key.startswith('request'):
continue
params[key] = value
staticpage.title = title_t.render(params, request)
staticpage.content = content_t.render(params, request)
else:
# To avoid having to always use the "|safe" filter in flatpage
# templates, mark the title and content as already safe (since
# they are raw HTML content in the first place).
staticpage.title = mark_safe(staticpage.title)
staticpage.content = mark_safe(staticpage.content)
context = {
'staticpage': staticpage,
# This is specifically to help the main_base.html template
# that tries to decide which nav bar item to put a dot under.
'page': staticpage.url,
}
response = HttpResponse(t.render(context, request))
for key, value in staticpage.headers.items():
response[key] = value
# print repr(staticpage.headers)
# if staticpage.cors_header:
# response['Access-Control-Allow-Origin'] = staticpage.cors_header
return response
def can_view_staticpage(page, user):
if page.privacy == Event.PRIVACY_PUBLIC:
return True
if not user.is_active:
return False
from airmozilla.main.views import is_contributor
if page.privacy == Event.PRIVACY_COMPANY:
if is_contributor(user):
return False
return True
| {
"repo_name": "blossomica/airmozilla",
"path": "airmozilla/staticpages/views.py",
"copies": "2",
"size": "3146",
"license": "bsd-3-clause",
"hash": 5620058522961308000,
"line_mean": 32.1157894737,
"line_max": 76,
"alpha_frac": 0.6468531469,
"autogenerated": false,
"ratio": 4.161375661375661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5808228808275662,
"avg_score": null,
"num_lines": null
} |
# Adapted from Django-MoinMoin
#
# http://code.google.com/p/django-moinmoin/
#
# Visit that page for instructions on how to use ... but instead of copying your settings
# into this file, add this to your moin/wsgi script;
#
# os.environ['DJANGO_SETTINGS_MODULE'] = '<your_django_project>.settings'
#
from MoinMoin.auth import BaseAuth
import traceback
import base64
import cPickle as pickle
import sys
try:
import hashlib
md5_constructor = hashlib.md5
sha_constructor = hashlib.sha1
except ImportError:
import md5
md5_constructor = md5.new
import sha
sha_constructor = sha.new
# This is included in case you want to create a log file during testing
import time
def writeLog(*args):
'''Write an entry in a log file with a timestamp and all of the args.'''
s = time.strftime('%Y-%m-%d %H:%M:%S ',time.localtime())
for a in args:
s = '%s %s;' % (s,a)
log = open('/tmp/cookie.log', 'a') # +++ location for log file
log.write('\n' + s + '\n')
log.close()
return
class DjangoAuth(BaseAuth):
name = 'DjangoAuth'
# +++ The next 2 lines may be useful if you are overriding the username method in your themes.
# +++ If commented out, wiki pages will not have login or logout hyperlinks
login_inputs = ['username', 'password'] # +++ required to get a login hyperlink in wiki navigation area
logout_possible = True # +++ required to get a logout hyperlink in wiki navigation area
def __init__(self, autocreate=False):
self.autocreate = autocreate
BaseAuth.__init__(self)
from django.conf import settings
def get_profile(self, user_id):
from django.contrib.auth.models import User
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
return False, {}
try:
self.user_profile = {}
self.user_profile['username'] = user.username
self.user_profile['name'] = user.first_name + ' ' + user.last_name
self.user_profile['email'] = user.email
except:
return False, {}
return True
def get_session(self, session_id):
try:
from django.contrib.sessions.models import Session
session = Session.objects.get(session_key=session_id)
except Session.DoesNotExist:
return False, ''
try:
from datetime import datetime
#Has the session expired?
if session.expire_date < datetime.now():
return False, ''
return True, session.session_data
except:
return False, ''
def get_decoded(self, session_data):
from django.conf import settings
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
return {}
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
def request(self, request, user_obj, **kw):
if user_obj and user_obj.auth_method == self.name:
user_obj = None
# if we're already authenticated, no need to do anything more
if user_obj and user_obj.valid:
return user_obj, False
"""Return (user-obj,False) if user is authenticated, else return (None,True). """
# login = kw.get('login') # +++ example does not use this; login is expected in other application
# user_obj = kw.get('user_obj') # +++ example does not use this
# username = kw.get('name') # +++ example does not use this
# logout = kw.get('logout') # +++ example does not use this; logout is expected in other application
import Cookie
user = None # user is not authenticated
try_next = True # if True, moin tries the next auth method in auth list
otherAppCookie = "sessionid" # +++ username, email,useralias, session ID separated by #
try:
cookie = Cookie.SimpleCookie(kw.get('cookie',None))
except Cookie.CookieError:
cookie = None # ignore invalid cookies
if cookie and otherAppCookie in cookie: # having this cookie means user auth has already been done in other application
# Work around SimpleCookie parsing bug in 2.6.4
if type(cookie[otherAppCookie]) == unicode:
result, session_raw = self.get_session(cookie[otherAppCookie])
else:
result, session_raw = self.get_session(cookie[otherAppCookie].value)
if not result:
return user, try_next
session_decoded = self.get_decoded(session_raw)
writeLog('Session Decoded', session_decoded)
try:
result = self.get_profile(session_decoded['_auth_user_id'])
except KeyError:
writeLog('Could not find user id in decoded cookie')
return user, try_next
writeLog('got user profile', self.user_profile)
if not result:
return user, try_next
from MoinMoin.user import User
# giving auth_username to User constructor means that authentication has already been done.
user = User(request, name=self.user_profile['username'], auth_username=self.user_profile['username'], auth_method=self.name)
changed = False
if self.user_profile['email'] != user.email: # was the email addr externally updated?
user.email = self.user_profile['email'];
changed = True # yes -> update user profile
if self.user_profile['name'] != user.aliasname: # +++ was the aliasname externally updated?
user.aliasname = self.user_profile['name'] ;
changed = True # yes -> update user profile
if user:
user.create_or_update(changed)
if user and user.valid:
try_next = False # have valid user; stop processing auth method list
writeLog(str(user))
return user, try_next
| {
"repo_name": "dpla/zen",
"path": "etc/django-moin-auth/djangoAuth.py",
"copies": "2",
"size": "6431",
"license": "apache-2.0",
"hash": -9160776804598502000,
"line_mean": 38.4539877301,
"line_max": 136,
"alpha_frac": 0.5986627274,
"autogenerated": false,
"ratio": 4.264588859416445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011481411472356233,
"num_lines": 163
} |
"""Adapted from djcelery.models."""
import collections
import datetime
import itertools
import json
import celery.schedules
import pretty_cron
import sqlalchemy as sqla
from sqlalchemy import orm
from sqlalchemy.ext import declarative
class Base(object):
id = sqla.Column(sqla.Integer, primary_key=True)
Base = declarative.declarative_base(cls=Base)
class IntervalSchedule(Base):
__tablename__ = 'interval_schedule'
every = sqla.Column(sqla.Integer, nullable=False)
period = sqla.Column(sqla.String(24), nullable=False)
@property
def schedule(self):
return celery.schedules.schedule(
datetime.timedelta(**{self.period: self.every})
)
@property
def period_singular(self):
return self.period[:-1]
@property
def description(self):
if self.every == 1:
return 'every {0.period_singular}'.format(self)
return 'every {0.every} {0.period}'.format(self)
def __str__(self):
return self.description
class CrontabSchedule(Base):
__tablename__ = 'crontab_schedule'
minute = sqla.Column(sqla.String(64), default='*')
hour = sqla.Column(sqla.String(64), default='*')
day_of_week = sqla.Column(sqla.String(64), default='*')
day_of_month = sqla.Column(sqla.String(64), default='*')
month_of_year = sqla.Column(sqla.String(64), default='*')
@property
def description(self):
def rfield(f):
return f and str(f).replace(' ', '') or '*'
string = '{0} {1} {2} {3} {4}'.format(
rfield(self.minute), rfield(self.hour), rfield(self.day_of_week),
rfield(self.day_of_month), rfield(self.month_of_year),
)
return pretty_cron.prettify_cron(string)
@property
def schedule(self):
return celery.schedules.crontab(
minute=self.minute,
hour=self.hour,
day_of_week=self.day_of_week,
day_of_month=self.day_of_month,
month_of_year=self.month_of_year,
)
def __str__(self):
return self.description
class PeriodicTask(Base):
__tablename__ = 'periodic_task'
name = sqla.Column(
sqla.String(200),
unique=True,
doc='Useful description',
nullable=False,
)
task = sqla.Column(
sqla.String(200),
doc='Task name',
nullable=False,
)
interval_schedules = orm.relationship(
'IntervalSchedule',
secondary='task_interval_schedules',
backref='periodic_tasks',
)
crontab_schedules = orm.relationship(
'CrontabSchedule',
secondary='task_crontab_schedules',
backref='periodic_tasks',
)
args = sqla.Column(
sqla.String,
nullable=True,
default='[]',
doc='JSON encoded positional arguments',
)
kwargs = sqla.Column(
sqla.String,
nullable=True,
default='{}',
doc='JSON encoded keyword arguments',
)
queue = sqla.Column(
sqla.String(200),
nullable=True,
default=None,
doc='Queue defined in CELERY_QUEUES',
)
exchange = sqla.Column(
sqla.String(200),
nullable=True,
default=None,
)
routing_key = sqla.Column(
sqla.String(200),
nullable=True,
default=None,
)
expires = sqla.Column(sqla.DateTime, nullable=True)
enabled = sqla.Column(sqla.Boolean, default=True)
last_run_at = sqla.Column(sqla.DateTime, nullable=True)
total_run_count = sqla.Column(sqla.Integer, default=0)
date_changed = sqla.Column(
sqla.DateTime,
server_default=sqla.func.now(),
onupdate=sqla.func.now(),
)
description = sqla.Column(sqla.String, nullable=True)
@property
def schedules(self):
return list(
itertools.chain(self.interval_schedules, self.crontab_schedules)
)
@orm.validates('args')
def validate_args(self, key, value):
return _validate_json_string(
value, collections.Sequence, 'kwargs must be a valid JSON array'
)
@orm.validates('kwargs')
def validate_kwargs(self, key, value):
return _validate_json_string(
value, collections.Mapping, 'kwargs must be a valid JSON object'
)
def __str__(self):
return self.name
task_interval_schedules = sqla.Table(
'task_interval_schedules', Base.metadata,
sqla.Column(
'periodic_task_id',
sqla.Integer,
sqla.ForeignKey('periodic_task.id'),
),
sqla.Column(
'interval_schedule_id',
sqla.Integer,
sqla.ForeignKey('interval_schedule.id'),
)
)
task_crontab_schedules = sqla.Table(
'task_crontab_schedules', Base.metadata,
sqla.Column(
'periodic_task_id',
sqla.Integer,
sqla.ForeignKey('periodic_task.id'),
),
sqla.Column(
'crontab_schedule_id',
sqla.Integer,
sqla.ForeignKey('crontab_schedule.id'),
)
)
def _validate_json_string(string, cls, msg, nullable=True):
if nullable and string is None:
return string
try:
obj = json.loads(string)
except Exception:
raise ValueError(msg)
else:
if not isinstance(obj, cls):
raise ValueError(msg)
else:
return string
| {
"repo_name": "adblair/celerycontrib.sqlalchemyscheduler",
"path": "celerycontrib/sqlalchemyscheduler/model.py",
"copies": "1",
"size": "5363",
"license": "mit",
"hash": 4432822859896694000,
"line_mean": 24.6602870813,
"line_max": 77,
"alpha_frac": 0.6009696066,
"autogenerated": false,
"ratio": 3.6632513661202184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9764220972720218,
"avg_score": 0,
"num_lines": 209
} |
# Adapted from djpubsubhubbub. See License: http://git.participatoryculture.org/djpubsubhubbub/tree/LICENSE
from datetime import datetime, timedelta
import feedparser
import requests
import re
from django.conf import settings
from django.db import models
import hashlib
from apps.push import signals
from apps.rss_feeds.models import Feed
from utils import log as logging
from utils.feed_functions import timelimit
DEFAULT_LEASE_SECONDS = (10 * 24 * 60 * 60) # 10 days
class PushSubscriptionManager(models.Manager):
@timelimit(5)
def subscribe(self, topic, feed, hub=None, callback=None,
lease_seconds=None, force_retry=False):
if hub is None:
hub = self._get_hub(topic)
if hub is None:
raise TypeError('hub cannot be None if the feed does not provide it')
if lease_seconds is None:
lease_seconds = getattr(settings, 'PUBSUBHUBBUB_LEASE_SECONDS',
DEFAULT_LEASE_SECONDS)
feed = Feed.get_by_id(feed.pk)
subscription, created = self.get_or_create(feed=feed)
signals.pre_subscribe.send(sender=subscription, created=created)
subscription.set_expiration(lease_seconds)
if len(topic) < 200:
subscription.topic = topic
else:
subscription.topic = feed.feed_link[:200]
subscription.hub = hub
subscription.save()
if callback is None:
# try:
# callback_path = reverse('push-callback', args=(subscription.pk,))
# except Resolver404:
# raise TypeError('callback cannot be None if there is not a reverable URL')
# else:
# # callback = 'http://' + Site.objects.get_current() + callback_path
callback = "http://push.newsblur.com/push/%s" % subscription.pk # + callback_path
try:
response = self._send_request(hub, {
'hub.mode' : 'subscribe',
'hub.callback' : callback,
'hub.topic' : topic,
'hub.verify' : ['async', 'sync'],
'hub.verify_token' : subscription.generate_token('subscribe'),
'hub.lease_seconds' : lease_seconds,
})
except requests.ConnectionError:
response = None
if response and response.status_code == 204:
subscription.verified = True
elif response and response.status_code == 202: # async verification
subscription.verified = False
else:
error = response and response.content or ""
if not force_retry and 'You may only subscribe to' in error:
extracted_topic = re.search("You may only subscribe to (.*?) ", error)
if extracted_topic:
subscription = self.subscribe(extracted_topic.group(1),
feed=feed, hub=hub, force_retry=True)
else:
logging.debug(u' ---> [%-30s] ~FR~BKFeed failed to subscribe to push: %s (code: %s)' % (
unicode(subscription.feed)[:30], error, response and response.status_code))
subscription.save()
feed.setup_push()
if subscription.verified:
signals.verified.send(sender=subscription)
return subscription
def _get_hub(self, topic):
parsed = feedparser.parse(topic)
for link in parsed.feed.links:
if link['rel'] == 'hub':
return link['href']
def _send_request(self, url, data):
return requests.post(url, data=data)
class PushSubscription(models.Model):
feed = models.OneToOneField(Feed, db_index=True, related_name='push')
hub = models.URLField(db_index=True)
topic = models.URLField(db_index=True)
verified = models.BooleanField(default=False)
verify_token = models.CharField(max_length=60)
lease_expires = models.DateTimeField(default=datetime.now)
objects = PushSubscriptionManager()
# class Meta:
# unique_together = [
# ('hub', 'topic')
# ]
def unsubscribe(self):
feed = self.feed
self.delete()
feed.setup_push()
def set_expiration(self, lease_seconds):
self.lease_expires = datetime.now() + timedelta(
seconds=lease_seconds)
self.save()
def generate_token(self, mode):
assert self.pk is not None, \
'Subscription must be saved before generating token'
token = mode[:20] + hashlib.sha1('%s%i%s' % (
settings.SECRET_KEY, self.pk, mode)).hexdigest()
self.verify_token = token
self.save()
return token
def check_urls_against_pushed_data(self, parsed):
if hasattr(parsed.feed, 'links'): # single notification
hub_url = self.hub
self_url = self.topic
for link in parsed.feed.links:
href = link.get('href', '')
if any(w in href for w in ['wp-admin', 'wp-cron']):
continue
if link['rel'] == 'hub':
hub_url = link['href']
elif link['rel'] == 'self':
self_url = link['href']
needs_update = False
if hub_url and self.hub != hub_url:
# hub URL has changed; let's update our subscription
needs_update = True
elif self_url != self.topic:
# topic URL has changed
needs_update = True
if needs_update:
logging.debug(u' ---> [%-30s] ~FR~BKUpdating PuSH hub/topic: %s / %s' % (
unicode(self.feed)[:30], hub_url, self_url))
expiration_time = self.lease_expires - datetime.now()
seconds = expiration_time.days*86400 + expiration_time.seconds
PushSubscription.objects.subscribe(
self_url, feed=self.feed, hub=hub_url,
lease_seconds=seconds)
def __unicode__(self):
if self.verified:
verified = u'verified'
else:
verified = u'unverified'
return u'to %s on %s: %s' % (
self.topic, self.hub, verified)
def __str__(self):
return str(unicode(self))
| {
"repo_name": "eric-stanley/NewsBlur",
"path": "apps/push/models.py",
"copies": "2",
"size": "6465",
"license": "mit",
"hash": 5155356800891188000,
"line_mean": 36.8070175439,
"line_max": 107,
"alpha_frac": 0.5552977572,
"autogenerated": false,
"ratio": 4.267326732673268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007587256036369712,
"num_lines": 171
} |
# Adapted from djpubsubhubbub. See License: http://git.participatoryculture.org/djpubsubhubbub/tree/LICENSE
import feedparser
import random
import datetime
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from apps.push.models import PushSubscription
from apps.push.signals import verified
from apps.rss_feeds.models import MFetchHistory
from utils import log as logging
def push_callback(request, push_id):
if request.method == 'GET':
mode = request.GET['hub.mode']
topic = request.GET['hub.topic']
challenge = request.GET['hub.challenge']
lease_seconds = request.GET.get('hub.lease_seconds')
verify_token = request.GET.get('hub.verify_token', '')
if mode == 'subscribe':
if not verify_token.startswith('subscribe'):
raise Http404
subscription = get_object_or_404(PushSubscription,
pk=push_id,
topic=topic,
verify_token=verify_token)
subscription.verified = True
subscription.set_expiration(int(lease_seconds))
subscription.save()
subscription.feed.setup_push()
logging.debug(' ---> [%-30s] [%s] ~BBVerified PuSH' % (unicode(subscription.feed)[:30], subscription.feed_id))
verified.send(sender=subscription)
return HttpResponse(challenge, content_type='text/plain')
elif request.method == 'POST':
subscription = get_object_or_404(PushSubscription, pk=push_id)
fetch_history = MFetchHistory.feed(subscription.feed_id)
latest_push_date_delta = None
if fetch_history and fetch_history.get('push_history'):
latest_push = fetch_history['push_history'][0]['push_date']
latest_push_date = datetime.datetime.strptime(latest_push, '%Y-%m-%d %H:%M:%S')
latest_push_date_delta = datetime.datetime.now() - latest_push_date
if latest_push_date > datetime.datetime.now() - datetime.timedelta(minutes=1):
logging.debug(' ---> [%-30s] ~SN~FBSkipping feed fetch, pushed %s seconds ago' % (unicode(subscription.feed)[:30], latest_push_date_delta.seconds))
return HttpResponse('Slow down, you just pushed %s seconds ago...' % latest_push_date_delta.seconds, status=429)
# XXX TODO: Optimize this by removing feedparser. It just needs to find out
# the hub_url or topic has changed. ElementTree could do it.
if random.random() < 0.1:
parsed = feedparser.parse(request.body)
subscription.check_urls_against_pushed_data(parsed)
# Don't give fat ping, just fetch.
# subscription.feed.queue_pushed_feed_xml(request.body)
if subscription.feed.active_premium_subscribers >= 1:
subscription.feed.queue_pushed_feed_xml("Fetch me", latest_push_date_delta=latest_push_date_delta)
MFetchHistory.add(feed_id=subscription.feed_id,
fetch_type='push')
else:
logging.debug(' ---> [%-30s] ~FBSkipping feed fetch, no actives: %s' % (unicode(subscription.feed)[:30], subscription.feed))
return HttpResponse('OK')
return Http404
| {
"repo_name": "samuelclay/NewsBlur",
"path": "apps/push/views.py",
"copies": "1",
"size": "3357",
"license": "mit",
"hash": -1604661393562460700,
"line_mean": 48.3676470588,
"line_max": 165,
"alpha_frac": 0.6234733393,
"autogenerated": false,
"ratio": 4.049457177322075,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5172930516622074,
"avg_score": null,
"num_lines": null
} |
# Adapted from djpubsubhubbub. See License: http://git.participatoryculture.org/djpubsubhubbub/tree/LICENSE
import feedparser
import random
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from apps.push.models import PushSubscription
from apps.push.signals import verified
from apps.rss_feeds.models import MFetchHistory
from utils import log as logging
def push_callback(request, push_id):
if request.method == 'GET':
mode = request.GET['hub.mode']
topic = request.GET['hub.topic']
challenge = request.GET['hub.challenge']
lease_seconds = request.GET.get('hub.lease_seconds')
verify_token = request.GET.get('hub.verify_token', '')
if mode == 'subscribe':
if not verify_token.startswith('subscribe'):
raise Http404
subscription = get_object_or_404(PushSubscription,
pk=push_id,
topic=topic,
verify_token=verify_token)
subscription.verified = True
subscription.set_expiration(int(lease_seconds))
subscription.save()
subscription.feed.setup_push()
logging.debug(' ---> [%-30s] [%s] ~BBVerified PuSH' % (unicode(subscription.feed)[:30], subscription.feed_id))
verified.send(sender=subscription)
return HttpResponse(challenge, content_type='text/plain')
elif request.method == 'POST':
subscription = get_object_or_404(PushSubscription, pk=push_id)
# XXX TODO: Optimize this by removing feedparser. It just needs to find out
# the hub_url or topic has changed. ElementTree could do it.
if random.random() < 0.1:
parsed = feedparser.parse(request.raw_post_data)
subscription.check_urls_against_pushed_data(parsed)
# Don't give fat ping, just fetch.
# subscription.feed.queue_pushed_feed_xml(request.raw_post_data)
if subscription.feed.active_premium_subscribers >= 1:
subscription.feed.queue_pushed_feed_xml("Fetch me")
MFetchHistory.add(feed_id=subscription.feed_id,
fetch_type='push')
else:
logging.debug(' ---> [%-30s] ~FBSkipping feed fetch, no actives: %s' % (unicode(subscription.feed)[:30], subscription.feed))
return HttpResponse('')
return Http404
| {
"repo_name": "eric-stanley/NewsBlur",
"path": "apps/push/views.py",
"copies": "3",
"size": "2496",
"license": "mit",
"hash": 6592566531701190000,
"line_mean": 42.7894736842,
"line_max": 138,
"alpha_frac": 0.6181891026,
"autogenerated": false,
"ratio": 4.146179401993355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6264368504593355,
"avg_score": null,
"num_lines": null
} |
import re
iana_schemes = [ # http://www.iana.org/assignments/uri-schemes.html
"ftp", "http", "gopher", "mailto", "news", "nntp", "telnet", "wais",
"file", "prospero", "z39.50s", "z39.50r", "cid", "mid", "vemmi",
"service", "imap", "nfs", "acap", "rtsp", "tip", "pop", "data", "dav",
"opaquelocktoken", "sip", "sips", "tel", "fax", "modem", "ldap",
"https", "soap.beep", "soap.beeps", "xmlrpc.beep", "xmlrpc.beeps",
"urn", "go", "h323", "ipp", "tftp", "mupdate", "pres", "im", "mtqp",
"iris.beep", "dict", "snmp", "crid", "tag", "dns", "info"
]
allowed_schemes = iana_schemes + ['javascript']
rfc2396_re = re.compile("([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]*$")
rfc2396_full_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
urn_re = re.compile(r"^[Uu][Rr][Nn]:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*(#[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*)?$")
def isValidURI(value, uriPattern=rfc2396_re):
scheme=value.split(':')[0].lower()
if scheme == 'tag':
if not tag_re.match(value):
return False, "invalid-tag-uri"
elif scheme == "urn":
if not urn_re.match(value):
return False, "invalid-urn"
elif not uriPattern.match(value):
urichars_re=re.compile("[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]")
for c in value:
if ord(c)<128 and not urichars_re.match(c):
return False, "invalid-uri-char"
else:
try:
if uriPattern.match(value.encode('idna')):
return False, "uri-not-iri"
except:
pass
return False, "invalid-uri"
elif scheme in ['http','ftp']:
if not re.match('^\w+://[^/].*',value):
return False, "invalid-http-or-ftp-uri"
elif value.find(':')>=0 and scheme.isalpha() and scheme not in allowed_schemes:
return False, "invalid-scheme"
return True, ""
def isValidIRI(value):
try:
if value: value = value.encode('idna')
except:
pass
return isValidURI(value)
def isValidFullyQualifiedURI(value):
return isValidURI(value, rfc2396_full_re)
| {
"repo_name": "jpmckinney/wikipedia-names-your-band",
"path": "html5lib/filters/rfc3987.py",
"copies": "4",
"size": "3613",
"license": "mit",
"hash": -7569966508733653000,
"line_mean": 44.7341772152,
"line_max": 166,
"alpha_frac": 0.5898145585,
"autogenerated": false,
"ratio": 2.9761120263591434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018377670148660433,
"num_lines": 79
} |
# adapted from firebase/EventSource-Examples/python/chat.py by Shariq Hashme
from sseclient import SSEClient
import requests
from Queue import Queue
import json
import threading
import socket
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
try:
self.resp.raw._fp.fp._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp._sock.close()
except AttributeError:
pass
class RemoteThread(threading.Thread):
def __init__(self, parent, URL, function):
self.function = function
self.URL = URL
self.parent = parent
super(RemoteThread, self).__init__()
def run(self):
try:
self.sse = ClosableSSEClient(self.URL)
for msg in self.sse:
msg_data = json.loads(msg.data)
if msg_data is None: # keep-alives
continue
msg_event = msg.event
# TODO: update parent cache here
self.function((msg.event, msg_data))
except socket.error:
pass # this can happen when we close the stream
except KeyboardInterrupt:
self.close()
def close(self):
if self.sse:
self.sse.close()
def firebaseURL(URL):
if '.firebaseio.com' not in URL.lower():
if '.json' == URL[-5:]:
URL = URL[:-5]
if '/' in URL:
if '/' == URL[-1]:
URL = URL[:-1]
URL = 'https://' + \
URL.split('/')[0] + '.firebaseio.com/' + URL.split('/', 1)[1] + '.json'
else:
URL = 'https://' + URL + '.firebaseio.com/.json'
return URL
if 'http://' in URL:
URL = URL.replace('http://', 'https://')
if 'https://' not in URL:
URL = 'https://' + URL
if '.json' not in URL.lower():
if '/' != URL[-1]:
URL = URL + '/.json'
else:
URL = URL + '.json'
return URL
class subscriber:
def __init__(self, URL, function):
self.cache = {}
self.remote_thread = RemoteThread(self, firebaseURL(URL), function)
def start(self):
self.remote_thread.start()
def stop(self):
self.remote_thread.close()
self.remote_thread.join()
def wait(self):
self.remote_thread.join()
class FirebaseException(Exception):
pass
def put(URL, msg):
to_post = json.dumps(msg)
response = requests.put(firebaseURL(URL), data=to_post)
if response.status_code != 200:
raise FirebaseException(response.text)
def patch(URL, msg):
to_post = json.dumps(msg)
response = requests.patch(firebaseURL(URL), data=to_post)
if response.status_code != 200:
raise FirebaseException(response.text)
def get(URL):
response = requests.get(firebaseURL(URL))
if response.status_code != 200:
raise FirebaseException(response.text)
return json.loads(response.text) | {
"repo_name": "rliu42/WindowPane",
"path": "server/firebase.py",
"copies": "1",
"size": "3319",
"license": "mit",
"hash": -5598768159721898000,
"line_mean": 25.3492063492,
"line_max": 87,
"alpha_frac": 0.5598071708,
"autogenerated": false,
"ratio": 3.828143021914648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48879501927146485,
"avg_score": null,
"num_lines": null
} |
# Adapted from galaxyzoo2.gz2string
def gal_string(datarow,survey='decals'):
""" Determine a string for the consensus GZ2 classification of a
galaxy's morphology.
Parameters
----------
datarow : astropy.io.fits.fitsrec.FITS_record
Iterated element (row) of a final
GZ2 table, containing all debiased probabilities
and vote counts
survey : string indicating the survey group that defines
the workflow/decision tree. Default is 'decals'.
Possible options should be:
'candels'
'candels_2epoch'
'decals'
'ferengi'
'goods_full'
'illustris'
'sloan',
'sloan_singleband'
'ukidss'
Returns
-------
char: str
String giving the plurality classification from GZ2
eg, '
Notes
-------
"""
weights = datarow
if survey in ('decals',):
max_t1 = weights[:3].argmax()
char = ''
task_eval = [0]*12
task_eval[0] = 1
# Smooth galaxies
if max_t1 == 0:
char += 'E'
# Roundness
char += ('r','i','c')[weights[23:26].argmax()]
task_eval[8] = 1
# Features/disk galaxies
if max_t1 == 1:
char += 'S'
task_eval[1] = 1
edgeon = weights[3:5]
# Edge-on disks
if edgeon[0] >= edgeon[1]:
char += 'e'
# Bulge shape
char += ('r','b','n')[weights[20:23].argmax()]
task_eval[7] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
task_eval[4] = 1
if weights[5] > weights[6]:
# Barred galaxies
char += 'B'
# Bulge prominence
char += ('c','b','a')[weights[9:12].argmax()]
if weights[7] > weights[8]:
# Arms number
char += ('1','2','3','4','+')[weights[15:20].argmax()]
task_eval[6] = 1
# Arms winding
char += ('t','m','l')[weights[12:15].argmax()]
task_eval[5] = 1
# Mergers/tidal debris
char += '[%s]' % ('MG','TD','MT','')[weights[26:30].argmax()]
task_eval[9] = 1
# Odd features
if max(weights[31:38]) > 0.50:
char += '(%s)' % ('n','r','l','d','i','o','v')[weights[31:38].argmax()]
task_eval[10] = 1
# Star/artifact
if max_t1 == 2:
char = 'A'
# Discuss
task_eval[11] = 1
task_ans = [0]*12
task_ans[0] = weights[ 0: 3].argmax()+0
task_ans[1] = weights[ 3: 5].argmax()+3
task_ans[2] = weights[ 5: 7].argmax()+5
task_ans[3] = weights[ 7: 9].argmax()+7
task_ans[4] = weights[ 9:12].argmax()+9
task_ans[5] = weights[12:15].argmax()+12
task_ans[6] = weights[15:20].argmax()+15
task_ans[7] = weights[20:23].argmax()+20
task_ans[8] = weights[23:26].argmax()+23
task_ans[9] = weights[26:30].argmax()+26
task_ans[10] = weights[31:38].argmax()+31
task_ans[11] = weights[38:40].argmax()+38
if survey in ('ferengi'):
# Top-level: elliptical, features/disk, artifact
max_t1 = weights[:3].argmax()
char = ''
task_eval = [0]*12
task_eval[0] = 1
# Smooth galaxies
if max_t1 == 0:
char += 'E'
# Roundness
char += ('r','i','c')[weights[23:26].argmax()]
task_eval[8] = 1
# Features/disk galaxies
if max_t1 == 1:
char += 'S'
task_eval[1] = 1
edgeon = weights[3:5]
# Edge-on disks
if edgeon[0] >= edgeon[1]:
char += 'e'
# Bulge shape
char += ('r','b','n')[weights[20:23].argmax()]
task_eval[7] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
task_eval[4] = 1
if weights[5] > weights[6]:
# Barred galaxies
char += 'B'
# Bulge prominence
char += ('c','b','a')[weights[9:12].argmax()]
if weights[7] > weights[8]:
# Arms number
char += ('1','2','3','4','+')[weights[15:20].argmax()]
task_eval[6] = 1
# Arms winding
char += ('t','m','l')[weights[12:15].argmax()]
task_eval[5] = 1
# Mergers/tidal debris
char += '[%s]' % ('MG','TD','MT','')[weights[26:30].argmax()]
task_eval[9] = 1
# Odd features
if max(weights[31:38]) > 0.50:
char += '(%s)' % ('n','r','l','d','i','o','v')[weights[31:38].argmax()]
task_eval[10] = 1
# Star/artifact
if max_t1 == 2:
char = 'A'
# Discuss
task_eval[11] = 1
task_ans = [0]*12
task_ans[0] = weights[ 0: 3].argmax()+0
task_ans[1] = weights[ 3: 5].argmax()+3
task_ans[2] = weights[ 5: 7].argmax()+5
task_ans[3] = weights[ 7: 9].argmax()+7
task_ans[4] = weights[ 9:12].argmax()+9
task_ans[5] = weights[12:15].argmax()+12
task_ans[6] = weights[15:20].argmax()+15
task_ans[7] = weights[20:23].argmax()+20
task_ans[8] = weights[23:26].argmax()+23
task_ans[9] = weights[26:30].argmax()+26
task_ans[10] = weights[31:38].argmax()+31
task_ans[11] = weights[38:40].argmax()+38
return char,task_eval,task_ans
def plurality(datarow,survey='decals',check_threshold = 0.50):
""" Determine the plurality for the consensus GZ2 classification of a
galaxy's morphology.
Parameters
----------
datarow : astropy.io.fits.fitsrec.FITS_record
Iterated element (row) of a final
GZ2 table, containing all debiased probabilities
and vote counts
survey : string indicating the survey group that defines
the workflow/decision tree. Default is 'decals'.
Possible options should be:
'candels'
'candels_2epoch'
'decals'
'ferengi'
'goods_full'
'illustris'
'sloan',
'sloan_singleband'
'ukidss'
check_threshold: float indicating the threshold plurality level for
checkbox questions. If no questions meet this, don't select any answer.
Returns
-------
task_eval: array [N]
1 if question was answered by the plurality path through the tree; 0 if not
task_ans: array [N]
Each answer gives the index of most common answer
regardless of if it was in the plurality path or not.
Notes
-------
"""
weights = datarow
if survey in ('decals',):
d = { 0:{'idx': 0,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx': 3,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
2:{'idx': 5,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?' ->
3:{'idx': 7,'len':2}, # "Is there any sign of a spiral arm pattern?"
4:{'idx': 9,'len':3}, # "How prominent is the central bulge, compared with the rest of the galaxy?"
5:{'idx':12,'len':3}, # "How tightly wound do the spiral arms appear?"
6:{'idx':15,'len':5}, # "How many spiral arms are there?"
7:{'idx':20,'len':3}, # "Does the galaxy have a bulge at its centre? If so, what shape?"
8:{'idx':23,'len':3}, # 'Round', 'How rounded is it?', ->
9:{'idx':26,'len':4}, # "Is the galaxy currently merging or is there any sign of tidal debris?"
10:{'idx':31,'len':7}, # "Do you see any of these odd features in the image?"
11:{'idx':38,'len':2}} # "Would you like to discuss this object?"
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[8] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
# Disk galaxies
task_eval[1] = 1
# Edge-on disks
if weights[d[1]['idx']] > weights[d[1]['idx']+1]:
# Bulge shape
task_eval[7] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
if weights[d[3]['idx']] > weights[d[3]['idx']+1]:
# Spirals
task_eval[5] = 1
task_eval[6] = 1
task_eval[4] = 1
# Merging/tidal debris
task_eval[9] = 1
# Odd features - only count if it's above some threshold, since this is a checkbox question
if max(weights[d[10]['idx']:d[10]['idx'] + d[10]['len']]) > check_threshold:
task_eval[10] = 1
# Discuss
task_eval[11] = 1
if survey in ('ferengi','goods_full'):
d = { 0:{'idx':0 ,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx':3 ,'len':3}, # 'Round', 'How rounded is it?', leadsTo: 'Is there anything odd?', ->
2:{'idx':6 ,'len':2}, # 'Clumps', 'Does the galaxy have a mostly clumpy appearance?', ->
3:{'idx':8 ,'len':6}, # 'Clumps', 'How many clumps are there?', leadsTo: 'Do the clumps appear in a straight line, a chain, or a cluster?', ->
4:{'idx':14 ,'len':4}, # 'Clumps', 'Do the clumps appear in a straight line, a chain, or a cluster?', leadsTo: 'Is there one clump which is clearly brighter than the others?', ->
5:{'idx':18 ,'len':2}, # 'Clumps', 'Is there one clump which is clearly brighter than the others?', ->
6:{'idx':20 ,'len':2}, # 'Clumps', 'Is the brightest clump central to the galaxy?', ->
7:{'idx':22 ,'len':2}, # 'Symmetry', 'Does the galaxy appear symmetrical?', leadsTo: 'Do the clumps appear to be embedded within a larger object?', ->
8:{'idx':24 ,'len':2}, # 'Clumps', 'Do the clumps appear to be embedded within a larger object?', leadsTo: 'Is there anything odd?', ->
9:{'idx':26 ,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
10:{'idx':28 ,'len':3}, # 'Bulge', 'Does the galaxy have a bulge at its center? If so, what shape?', leadsTo: 'Is there anything odd?', ->
11:{'idx':31 ,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?', leadsTo: 'Is there any sign of a spiral arm pattern?', ->
12:{'idx':33 ,'len':2}, # 'Spiral', 'Is there any sign of a spiral arm pattern?', ->
13:{'idx':35 ,'len':3}, # 'Spiral', 'How tightly wound do the spiral arms appear?', leadsTo: 'How many spiral arms are there?', ->
14:{'idx':38 ,'len':6}, # 'Spiral', 'How many spiral arms are there?', leadsTo: 'How prominent is the central bulge, compared with the rest of the galaxy?', ->
15:{'idx':44 ,'len':4}, # 'Bulge', 'How prominent is the central bulge, compared with the rest of the galaxy?', leadsTo: 'Is there anything odd?', ->
16:{'idx':48 ,'len':2}, # 'Discuss', 'Would you like to discuss this object?', ->
17:{'idx':50 ,'len':2}, # 'Odd', 'Is there anything odd?', ->
18:{'idx':53 ,'len':7}} # 'Odd', 'What are the odd features?', -> # Indexing here skips the a-0 answer.
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[1] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
task_eval[2] = 1
# Clumpy question
if weights[d[2]['idx']] > weights[d[2]['idx']+1]:
# Clumpy galaxies
task_eval[3] = 1
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 0:
# Multiple clumps
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 1:
# One bright clump
task_eval[4] = 1
task_eval[5] = 1
if weights[d[5]['idx']] > weights[d[5]['idx']+1]:
# Bright clump symmetrical
task_eval[6] = 1
if weights[d[6]['idx']] > weights[d[6]['idx']+1]:
task_eval[7] = 1
task_eval[8] = 1
else:
# Disk galaxies
task_eval[9] = 1
# Edge-on disks
if weights[d[9]['idx']] > weights[d[9]['idx']+1]:
# Bulge shape
task_eval[10] = 1
# Not edge-on disks
else:
task_eval[11] = 1
task_eval[12] = 1
if weights[d[12]['idx']] > weights[d[12]['idx']+1]:
# Spirals
task_eval[13] = 1
task_eval[14] = 1
task_eval[15] = 1
# Odd features
task_eval[17] = 1
if weights[d[17]['idx']] > weights[d[17]['idx']+1]:
# Only count if it's above some threshold, since this is a checkbox question
if max(weights[d[18]['idx']:d[18]['idx'] + d[18]['len']]) > check_threshold:
task_eval[18] = 1
# Discuss
task_eval[16] = 1
if survey in ('gzh',):
d = { 0:{'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
2:{'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?', leadsTo: 'Is there any sign of a spiral arm pattern?', ->
3:{'len':2}, # 'Spiral', 'Is there any sign of a spiral arm pattern?', ->
4:{'len':4}, # 'Bulge', 'How prominent is the central bulge, compared with the rest of the galaxy?', leadsTo: 'Is there anything odd?', ->
5:{'len':2}, # 'Odd', 'Is there anything odd?', ->
6:{'len':3}, # 'Round', 'How rounded is it?', leadsTo: 'Is there anything odd?', ->
7:{'len':7}, # 'Odd', 'What are the odd features?', -> Not a checkbox
8:{'len':3}, # 'Bulge', 'Does the galaxy have a bulge at its center? If so, what shape?', leadsTo: 'Is there anything odd?', ->
9:{'len':3}, # 'Spiral', 'How tightly wound do the spiral arms appear?', leadsTo: 'How many spiral arms are there?', ->
10:{'len':6}, # 'Spiral', 'How many spiral arms are there?', leadsTo: 'How prominent is the central bulge, compared with the rest of the galaxy?', ->
11:{'len':2}, # 'Clumps', 'Does the galaxy have a mostly clumpy appearance?', ->
12:{'len':6}, # 'Clumps', 'How many clumps are there?', leadsTo: 'Do the clumps appear in a straight line, a chain, or a cluster?', ->
13:{'len':2}, # 'Clumps', 'Is there one clump which is clearly brighter than the others?', ->
14:{'len':2}, # 'Clumps', 'Is the brightest clump central to the galaxy?', ->
15:{'len':4}, # 'Clumps', 'Do the clumps appear in a straight line, a chain, or a cluster?', leadsTo: 'Is there one clump which is clearly brighter than the others?', ->
16:{'len':2}, # 'Symmetry', 'Does the galaxy appear symmetrical?', leadsTo: 'Do the clumps appear to be embedded within a larger object?', ->
17:{'len':2}} # 'Clumps', 'Do the clumps appear to be embedded within a larger object?', leadsTo: 'Is there anything odd?', ->
# Don't need to skip indices since there's no checkbox question
idx = 0
for i in range(len(d)):
d[i]['idx'] = idx
idx += d[i]['len']
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[1] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
task_eval[2] = 1
# Clumpy question
if weights[d[2]['idx']] > weights[d[2]['idx']+1]:
# Clumpy galaxies
task_eval[3] = 1
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 0:
# Multiple clumps
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 1:
# Clump arrangement
task_eval[4] = 1
if weights[d[4]['idx']:d[4]['idx']+d[4]['len']].argmax() == 3:
# Bar
task_eval[11] = 1
# Spiral structure
task_eval[12] = 1
if weights[d[12]['idx']] > weights[d[12]['idx']+1]:
# Spiral arms
task_eval[13] = 1
task_eval[14] = 1
# Bulge prominence
task_eval[15] = 1
# One clump brighter than others
task_eval[5] = 1
if weights[d[5]['idx']] > weights[d[5]['idx']+1]:
# Bright clump central
task_eval[6] = 1
if weights[d[6]['idx']] > weights[d[6]['idx']+1]:
# Symmetrical clumps
task_eval[7] = 1
# Clumps embedded
task_eval[8] = 1
else:
# Disk galaxies
task_eval[9] = 1
# Edge-on disks
if weights[d[9]['idx']] > weights[d[9]['idx']+1]:
# Bulge shape
task_eval[10] = 1
# Not edge-on disks
else:
# Bar
task_eval[11] = 1
# Spiral
task_eval[12] = 1
if weights[d[12]['idx']] > weights[d[12]['idx']+1]:
# Spiral arm numbers and winding
task_eval[13] = 1
task_eval[14] = 1
# Bulge prominence
task_eval[15] = 1
# Odd features
task_eval[16] = 1
if weights[d[16]['idx']] > weights[d[16]['idx']+1]:
# Only count if it's above some threshold, since this is a checkbox question
if max(weights[d[17]['idx']:d[17]['idx'] + d[17]['len']]) > check_threshold:
task_eval[17] = 1
# Clumpy questions 5-8 not answered if they were organized in a spiral
if weights[d[4]['idx']:d[4]['idx']+d[4]['len']].argmax() == 3 and task_eval[4]:
task_eval[5] = 0
task_eval[6] = 0
task_eval[7] = 0
task_eval[8] = 0
if survey in ('candels','candels_2epoch'):
d = { 0:{'idx':0 ,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx':3 ,'len':3}, # 'Round', 'How rounded is it?', leadsTo: 'Is there anything odd?', ->
2:{'idx':6 ,'len':2}, # 'Clumps', 'Does the galaxy have a mostly clumpy appearance?', ->
3:{'idx':8 ,'len':6}, # 'Clumps', 'How many clumps are there?', leadsTo: 'Do the clumps appear in a straight line, a chain, or a cluster?', ->
4:{'idx':14,'len':4}, # 'Clumps', 'Do the clumps appear in a straight line, a chain, or a cluster?', leadsTo: 'Is there one clump which is clearly brighter than the others?', ->
5:{'idx':18,'len':2}, # 'Clumps', 'Is there one clump which is clearly brighter than the others?', ->
6:{'idx':20,'len':2}, # 'Clumps', 'Is the brightest clump central to the galaxy?', ->
7:{'idx':22,'len':2}, # 'Symmetry', 'Does the galaxy appear symmetrical?', leadsTo: 'Do the clumps appear to be embedded within a larger object?', ->
8:{'idx':24,'len':2}, # 'Clumps', 'Do the clumps appear to be embedded within a larger object?', leadsTo: 'Is there anything odd?', ->
9:{'idx':26,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
10:{'idx':28,'len':2}, # 'Bulge', 'Does the galaxy have a bulge at its center?', leadsTo: 'Is there anything odd?', ->
11:{'idx':30,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?', leadsTo: 'Is there any sign of a spiral arm pattern?', ->
12:{'idx':32,'len':2}, # 'Spiral', 'Is there any sign of a spiral arm pattern?', ->
13:{'idx':34,'len':3}, # 'Spiral', 'How tightly wound do the spiral arms appear?', leadsTo: 'How many spiral arms are there?', ->
14:{'idx':37,'len':6}, # 'Spiral', 'How many spiral arms are there?', leadsTo: 'How prominent is the central bulge, compared with the rest of the galaxy?', ->
15:{'idx':43,'len':3}, # 'Bulge', 'How prominent is the central bulge, compared with the rest of the galaxy?', leadsTo: 'Is there anything odd?', ->
16:{'idx':46,'len':4}, # Merging/tidal debris
17:{'idx':50,'len':2}} # Discuss
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[1] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
task_eval[2] = 1
# Clumpy question
if weights[d[2]['idx']] > weights[d[2]['idx']+1]:
# Clumpy galaxies
task_eval[3] = 1
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 0:
# Multiple clumps
if weights[d[3]['idx']:d[3]['idx'] + d[3]['len']].argmax() > 1:
# One bright clump
task_eval[4] = 1
task_eval[5] = 1
if weights[d[5]['idx']] > weights[d[5]['idx']+1]:
# Bright clump symmetrical
task_eval[6] = 1
if weights[d[6]['idx']] > weights[d[6]['idx']+1]:
task_eval[7] = 1
task_eval[8] = 1
else:
# Disk galaxies
task_eval[9] = 1
# Edge-on disks
if weights[d[9]['idx']] > weights[d[9]['idx']+1]:
# Bulge shape
task_eval[10] = 1
# Not edge-on disks
else:
task_eval[11] = 1
task_eval[12] = 1
if weights[d[12]['idx']] > weights[d[12]['idx']+1]:
# Spirals
task_eval[13] = 1
task_eval[14] = 1
task_eval[15] = 1
# Merging/tidal debris
task_eval[16] = 1
# Discuss
task_eval[17] = 1
if survey in ('illustris',):
d = { 0:{'idx': 0,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx': 3,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
2:{'idx': 5,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?' ->
3:{'idx': 7,'len':2}, # "Is there any sign of a spiral arm pattern?"
4:{'idx': 9,'len':4}, # "How prominent is the central bulge, compared with the rest of the galaxy?"
5:{'idx':14,'len':7}, # Odd features
6:{'idx':21,'len':3}, # Round
7:{'idx':24,'len':3}, # Bulge shape
8:{'idx':27,'len':3}, # arms winding
9:{'idx':30,'len':6}, # arms number
10:{'idx':36,'len':2}, # Is there anything odd?
11:{'idx':38,'len':2}} # "Would you like to discuss this object?"
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[6] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
# Disk galaxies
task_eval[1] = 1
# Edge-on disks
if weights[d[1]['idx']] > weights[d[1]['idx']+1]:
# Bulge shape
task_eval[7] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
if weights[d[3]['idx']] > weights[d[3]['idx']+1]:
# Spirals
task_eval[8] = 1
task_eval[9] = 1
task_eval[4] = 1
# Odd features
task_eval[10] = 1
if weights[d[10]['idx']] > weights[d[10]['idx']+1]:
# Only count if it's above some threshold, since this is a checkbox question
if max(weights[d[5]['idx']:d[5]['idx'] + d[5]['len']]) > check_threshold:
task_eval[5] = 1
# Discuss
task_eval[11] = 1
if survey in ('sloan','sloan_singleband','ukidss'):
d = { 0:{'idx': 0,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx': 3,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
2:{'idx': 5,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?' ->
3:{'idx': 7,'len':2}, # "Is there any sign of a spiral arm pattern?"
4:{'idx': 9,'len':4}, # "How prominent is the central bulge, compared with the rest of the galaxy?"
5:{'idx':13,'len':2}, # Is there anything odd?
6:{'idx':16,'len':7}, # Odd features
7:{'idx':23,'len':3}, # Round
8:{'idx':26,'len':3}, # Bulge shape
9:{'idx':29,'len':3}, # arms winding
10:{'idx':32,'len':6}, # arms number
11:{'idx':38,'len':2}} # "Would you like to discuss this object?"
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[7] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
# Disk galaxies
task_eval[1] = 1
# Edge-on disks
if weights[d[1]['idx']] > weights[d[1]['idx']+1]:
# Bulge shape
task_eval[8] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
if weights[d[3]['idx']] > weights[d[3]['idx']+1]:
# Spirals
task_eval[9] = 1
task_eval[10] = 1
task_eval[4] = 1
# Odd features
task_eval[5] = 1
if weights[d[5]['idx']] > weights[d[5]['idx']+1]:
# Only count if it's above some threshold, since this is a checkbox question
if max(weights[d[6]['idx']:d[6]['idx'] + d[6]['len']]) > check_threshold:
task_eval[6] = 1
# Discuss
task_eval[11] = 1
if survey in ('gz2','stripe82',):
d = { 0:{'idx': 0,'len':3}, # 'Shape', 'Is the galaxy simply smooth and rounded, with no sign of a disk?', ->
1:{'idx': 3,'len':2}, # 'Disk', 'Could this be a disk viewed edge-on?', ->
2:{'idx': 5,'len':2}, # 'Bar', 'Is there any sign of a bar feature through the centre of the galaxy?' ->
3:{'idx': 7,'len':2}, # "Is there any sign of a spiral arm pattern?"
4:{'idx': 9,'len':4}, # "How prominent is the central bulge, compared with the rest of the galaxy?"
5:{'idx':13,'len':2}, # Is there anything odd?
6:{'idx':15,'len':3}, # Round
7:{'idx':18,'len':7}, # Odd features
8:{'idx':25,'len':3}, # Bulge shape
9:{'idx':28,'len':3}, # arms winding
10:{'idx':31,'len':6}} # arms number
task_eval = [0]*len(d)
task_ans = [0]*len(d)
# Top-level: smooth/features/artifact
task_eval[0] = 1
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() < 2:
# Smooth galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 0:
# Roundness
task_eval[6] = 1
# Features/disk galaxies
if weights[d[0]['idx']:d[0]['idx']+d[0]['len']].argmax() == 1:
# Disk galaxies
task_eval[1] = 1
# Edge-on disks
if weights[d[1]['idx']] > weights[d[1]['idx']+1]:
# Bulge shape
task_eval[8] = 1
# Not edge-on disks
else:
task_eval[2] = 1
task_eval[3] = 1
if weights[d[3]['idx']] > weights[d[3]['idx']+1]:
# Spirals
task_eval[9] = 1
task_eval[10] = 1
task_eval[4] = 1
# Odd features
task_eval[5] = 1
if weights[d[5]['idx']] > weights[d[5]['idx']+1]:
task_eval[7] = 1
# Assign the plurality task numbers
for i,t in enumerate(task_ans):
try:
task_ans[i] = weights[d[i]['idx']:d[i]['idx'] + d[i]['len']].argmax() + d[i]['idx']
except ValueError:
print "ValueError in gz_class: {0:} categories, {1:} answers".format(len(weights),len(task_ans))
return task_eval,task_ans
| {
"repo_name": "willettk/decals",
"path": "python/gz_class.py",
"copies": "1",
"size": "33682",
"license": "mit",
"hash": 5583148498534427000,
"line_mean": 43.9093333333,
"line_max": 198,
"alpha_frac": 0.4367020961,
"autogenerated": false,
"ratio": 3.651957063862084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4588659159962084,
"avg_score": null,
"num_lines": null
} |
# Adapted from: github.com/aneesha/RAKE/rake.py
# Snipped from: http://sujitpal.blogspot.co.nz/2013/03/implementing-rake-algorithm-with-nltk.html
from __future__ import division
import operator
import nltk
import string
def isPunct(word):
return len(word) == 1 and word in string.punctuation
def isNumeric(word):
try:
float(word) if '.' in word else int(word)
return True
except ValueError:
return False
class RakeKeywordExtractor:
def __init__(self):
self.stopwords = set(nltk.corpus.stopwords.words())
self.top_fraction = 1 # consider top third candidate keywords by score
def _generate_candidate_keywords(self, sentences):
phrase_list = []
for sentence in sentences:
words = map(lambda x: "|" if x in self.stopwords else x,
nltk.word_tokenize(sentence.lower()))
phrase = []
for word in words:
if word == "|" or isPunct(word):
if len(phrase) > 0:
phrase_list.append(phrase)
phrase = []
else:
phrase.append(word)
return phrase_list
def _calculate_word_scores(self, phrase_list):
word_freq = nltk.FreqDist()
word_degree = nltk.FreqDist()
for phrase in phrase_list:
degree = len([x for x in phrase if not isNumeric(x)]) -1
for word in phrase:
word_freq[word] += 1
word_degree[word, degree] +=1 # other words
for word in word_freq.keys():
word_degree[word] = word_degree[word] + word_freq[word] # itself
# word score = deg(w) / freq(w)
word_scores = {}
for word in word_freq.keys():
word_scores[word] = word_degree[word] / word_freq[word]
return word_scores
def _calculate_phrase_scores(self, phrase_list, word_scores):
phrase_scores = {}
for phrase in phrase_list:
phrase_score = 0
for word in phrase:
phrase_score += word_scores[word]
phrase_scores[" ".join(phrase)] = phrase_score
return phrase_scores
def extract(self, text, incl_scores=False):
sentences = nltk.sent_tokenize(text)
phrase_list = self._generate_candidate_keywords(sentences)
word_scores = self._calculate_word_scores(phrase_list)
phrase_scores = self._calculate_phrase_scores(
phrase_list, word_scores)
sorted_phrase_scores = sorted(phrase_scores.items(),
key=operator.itemgetter(1), reverse=True)
n_phrases = len(sorted_phrase_scores)
if incl_scores:
return sorted_phrase_scores[0:int(n_phrases/self.top_fraction)]
else:
return map(lambda x: x[0],
sorted_phrase_scores[0:int(n_phrases/self.top_fraction)])
def test():
rake = RakeKeywordExtractor()
keywords = rake.extract("""
Compatibility of systems of linear constraints over the set of natural
numbers. Criteria of compatibility of a system of linear Diophantine
equations, strict inequations, and nonstrict inequations are considered.
Upper bounds for components of a minimal set of solutions and algorithms
of construction of minimal generating sets of solutions for all types of
systems are given. These criteria and the corresponding algorithms for
constructing a minimal supporting set of solutions can be used in solving
all the considered types of systems and systems of mixed types.
""", incl_scores=True)
print(keywords)
if __name__ == "__main__":
test() | {
"repo_name": "ToferC/gcclassifier",
"path": "classifier/nltk_rake.py",
"copies": "1",
"size": "3327",
"license": "mit",
"hash": 4881785832597890000,
"line_mean": 34.0315789474,
"line_max": 97,
"alpha_frac": 0.6798917944,
"autogenerated": false,
"ratio": 3.6440306681270536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4823922462527053,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2010, Philip Plante of EndlessPaths.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# from handlers.base import BaseHandler
#
# import tornado.web
# from tornado import gen
# from async_process import call_subprocess, on_subprocess_result
#
# class ShellHandler(BaseHandler):
# @tornado.web.asynchronous
# @gen.engine
# def get(self):
# self.write("Before sleep<br />")
# self.flush()
# response = yield gen.Task(call_subprocess, self, "ls /")
# self.write("Output is:\n%s" % (response.read(),))
# self.finish()
#
# Modifications Copyright (c) 2014, Deng Yue Chen of OnePanel.org
# License remains as above
#
# Modify to:
# * prevent blocking and the zombie process
# * prevent process handup when command fail to run
# * add a callbackable decorator
#
# We change ioloop.READ event to ioloop.ERROR to make callback
# invoke after the process end. And it can prevent blocking when the
# process is running.
# We make a Popen.wait() at the end to prevent the zombie process.
import logging
import shlex
import subprocess
import tornado
def call_subprocess(context, command, callback=None, shell=False):
context.ioloop = tornado.ioloop.IOLoop.instance()
if not shell: command = shlex.split(command)
try:
context.pipe = p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, shell=shell)
context.ioloop.add_handler(p.stdout.fileno(), context.async_callback(on_subprocess_result, context, callback), context.ioloop.ERROR)
except:
if callback: callback((-1, ''))
def on_subprocess_result(context, callback, fd, result):
try:
context.pipe.wait()
if callback:
callback((context.pipe.returncode, context.pipe.stdout.read()))
except Exception, e:
logging.error(e)
finally:
context.ioloop.remove_handler(fd)
#to be continue
def callbackable(func):
"""Make a function callbackable.
"""
def wrapper(*args, **kwds):
callback = kwds['callback']
if callback: del kwds['callback']
result = func(*args, **kwds)
if callback:
return callback(result)
else:
return result
return wrapper | {
"repo_name": "dingzg/onepanel",
"path": "lib/async_process.py",
"copies": "1",
"size": "3422",
"license": "apache-2.0",
"hash": -1971750091696505300,
"line_mean": 35.8064516129,
"line_max": 154,
"alpha_frac": 0.7101110462,
"autogenerated": false,
"ratio": 3.9652375434530707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.517534858965307,
"avg_score": null,
"num_lines": null
} |
# Adapted from: Hidden Markov Models in Python
# Katrin Erk, March 2013
#
# This HMM addresses the problem of disfluency/end of utterance tagging.
# It estimates the probability of a tag sequence for a given word sequence as
# follows:
#
# Say words = w1....wN
# and tags = t1..tN
#
# then
# P(tags | words) is_proportional_to product P(ti | t{i-1}) P(wi | ti)
#
# To find the best tag sequence for a given sequence of words,
# we want to find the tag sequence that has the maximum P(tags | words)
from __future__ import division
import os
import re
from copy import deepcopy
import numpy as np
from collections import defaultdict
import cPickle as pickle
import nltk
import tag_conversion
from hmm_utils import tabulate_cfd
from hmm_utils import log
# boosts for rare classes
SPARSE_WEIGHT_T_ = 3.0 # for <t # with timings this naturally gets boost
SPARSE_WEIGHT_T = 3.0 # for t/>
# results on un-weighted timing classifier for <t boosts:
# 1 0.757 2 0.770 3 0.778 4. 0.781 5.0.783
# 6. 0.785 7. 0.784
SPARSE_WEIGHT_RPS = 4.0
SPARSE_WEIGHT_RPE = 2.0
# the weights for the source language model and the timing duration classifier
TIMING_WEIGHT = 2.0 # 10 gives 0.756, no great gains with higher weight
# NB on 30.04 this is just a weight on the <t class as timer not working
# Given this can improve things from 0.70 -> 0.76 weighting worth looking at
# if using noisy channel model:
SOURCE_WEIGHT = 0.1
class FirstOrderHMM():
"""A standard hmm model which interfaces with any sequential channel model
that outputs the input_distribution over all labels at each time step.
A first order model where the internal state probabilities only depend
on the previous state.
"""
def __init__(self, disf_dict, markov_model_file=None,
timing_model=None, timing_model_scaler=None,
n_history=20, constraint_only=True, noisy_channel=None):
self.tagToIndexDict = disf_dict # dict maps from tags -> indices
self.n_history = n_history # how many steps back we should store
self.observation_tags = set(self.tagToIndexDict.keys())
self.observation_tags.add('s') # all tag sets need a start tag
self.cfd_tags = nltk.ConditionalFreqDist()
self.cpd_tags = None
self.tag_set = None
self.timing_model = None
self.timing_model_scaler = None
self.constraint_only = constraint_only
self.noisy_channel_source_model = noisy_channel
if any(["<ct/>" in x for x in self.observation_tags]):
# if a segmentation problem
if any(["<rm-2" in x for x in self.observation_tags]):
# full set
self.convert_tag = tag_conversion.\
convert_to_disfluency_uttseg_tag
elif any(["<rm-" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_disfluency_uttseg_tag_simple
elif any(["<speaker" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_diact_uttseg_interactive_tag
elif any(["<speaker" in x for x in self.observation_tags]):
# if only dialogue acts
self.convert_tag = tag_conversion.convert_to_diact_uttseg_tag
else:
self.convert_tag = tag_conversion.convert_to_uttseg_tag
else:
# no segmentation in this task
self.observation_tags.add('se') # add end tag in pre-seg mode
if any(["<rm-2" in x for x in self.observation_tags]):
# full set
self.convert_tag = tag_conversion.convert_to_disfluency_tag
elif any(["<rm-" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_disfluency_tag_simple
elif any(["<speaker" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_diact_interactive_tag
else:
# if only dialogue acts
self.convert_tag = tag_conversion.convert_to_diact_tag
if markov_model_file:
print "loading", markov_model_file, "Markov model"
# print "If we have just seen 'DET', \
# the probability of 'N' is", cpd_tags["DET"].prob("N")
# or load from file
mm_path = os.path.dirname(os.path.realpath(__file__)) +\
"/models/{}_tags.pkl".format(markov_model_file)
# if load:
self.cfd_tags = pickle.load(open(mm_path, "rb"))
# else:
# # or create this from scratch
# graph = convert_to_dot("../decoder/models/{}.csv".format(
# markov_model_file))
# # loading MM from the graph/dot representation
# tags = []
# for line in graph.split("\n"):
# spl = line.replace(";", "").split()
# if not len(spl) == 3:
# continue
# assert spl[1] == "->"
# tags.append((spl[0], spl[2]))
# self.cfd_tags += nltk.ConditionalFreqDist(tags)
else:
print 'No Markov model file specified, empty CFD. Needs training.'
# whatever happens turn this into a cond prob dist:
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
# print "Test: If we have just seen 'rpSM',\
# the probability of 'f' is", self.cpd_tags["c_rpSM_c"].prob("c_f_c")
if timing_model:
self.timing_model = timing_model
self.timing_model_scaler = timing_model_scaler
# self.simple_trp_idx2label = {0 : "<cc/>",
# 1 : "<ct/>",
# 2 : "<tc/>",
# 3 : "<tt/>"}
# Only use the Inbetween and Start tags
self.simple_trp_idx2label = {0: "<c", 1: "<t"}
else:
print "No timing model given"
print "Markov Model ready mode:"
if self.constraint_only:
print "constraint only"
else:
print "conditional probability"
def train_markov_model_from_file(self, corpus_path, mm_path, update=False,
non_sparse=False):
"""Adds to the self.cfd_tags conditional frequency distribution
loaded, if there is one, else starts afresh.
Recalculate the conditional prob distribution afresh.
args:
--filepath : filepath to newline separated file to learn sequence
probabilities from.
--mm_path : filepath to markov model distribution path to write to.
--update : whether to update the current cfd, if not start anew.
--non_sparse : whether to omit lines in the corpus without repairs,
gives higher prob to repairs
"""
tags = []
# expects line separated sequences
corpus_file = open(corpus_path)
print "training decoder from", corpus_path
for line in corpus_file:
if line.strip("\n") == "":
continue
if non_sparse and ("<r" not in line):
continue
labels_data = line.strip("\n").split(",")
if "<r" in labels_data[0]:
continue # TODO error with corpus creation
previous = "s"
# print "length sequence", len(labels_data)
for i in range(len(labels_data)):
if labels_data[i] not in self.observation_tags:
print labels_data[i], "not in obs tags"
continue
if any(["<i" in t for t in self.observation_tags]):
if "<e" in labels_data[i] and i < len(labels_data)-1:
rps_onset = None
for j in range(i, len(labels_data)):
if "<rm" in labels_data[j]:
rps_onset = j
break
if "<e" not in labels_data[j]:
break
if rps_onset:
for k in range(i, rps_onset):
labels_data[k] = labels_data[k].replace("<e",
"<i")
# print labels_data[i]
# adjust interregna
# if any(["<i" in t for t in self.observation_tags]):
# if "<rm-" in labels_data[i]:
# b = len(tags)-1
# while ("e" in tags[b][1] and (not tags[b][1]=="se")\
# and b > 0):
# if "i" not in tags[b][1]:
# new_1 = tags[b][1].replace('eR', 'i').\
# replace('e', 'i')
# tags[b] = (tags[b][0], new_1)
# if "e" in tags[b][0] and "i" not in tags[b][0]:
# new_0 = tags[b][0].replace('eR', 'i').\
# replace('e', 'i')
# tags[b] = (new_0, tags[b][1])
# b -= 1
# previous = tags[-1][1]
tag = self.convert_tag(previous, labels_data[i])
tags.append((previous, tag))
previous = tag
if "se" in self.observation_tags:
# add end tag
tags.append((previous, 'se'))
# print "If we have just seen 'DET', \
# the probability of 'N' is", cpd_tags["DET"].prob("N")
# assumes these are added to exisiting one
if update:
self.cfd_tags += nltk.ConditionalFreqDist(tags)
else:
self.cfd_tags = nltk.ConditionalFreqDist(tags)
print "cfd trained, counts:"
self.cfd_tags.tabulate()
print "test:"
print tabulate_cfd(self.cfd_tags)
# save this new cfd for later use
pickle.dump(self.cfd_tags, open(mm_path, "wb"))
# initialize the cpd
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
# print "cpd summary:"
# print self.cpd_tags.viewitems()
print tabulate_cfd(self.cpd_tags)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
def train_markov_model_from_constraint_matrix(self, csv_path, mm_path,
delim="\t"):
table = [line.split(delim) for line in open(csv_path)]
tags = []
range_states = table.pop(0)[1:]
for row in table:
domain = row[0]
for i, r in enumerate(row[1:]):
s = r.replace(" ", "").strip("\n")
if (s == ''):
continue
if int(s) > 0:
for _ in range(0, int(s)):
tags.append((domain, range_states[i]))
self.cfd_tags = nltk.ConditionalFreqDist(tags)
print "cfd trained, counts:"
self.cfd_tags.tabulate()
print "test:"
print tabulate_cfd(self.cfd_tags)
# save this new cfd for later use
pickle.dump(self.cfd_tags, open(mm_path, "wb"))
# initialize the cpd
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
# print "cpd summary:"
# print self.cpd_tags.viewitems()
print tabulate_cfd(self.cpd_tags)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
def viterbi_init(self):
self.best_tagsequence = [] # presume this is for a new sequence
self.viterbi = []
self.backpointer = []
self.converted = []
self.history = []
if self.noisy_channel_source_model:
self.noisy_channel_source_model.reset()
self.noisy_channel = [] # history
def add_to_history(self, viterbi, backpointer, converted):
"""We store a history of n_history steps back in case we need to
rollback.
"""
if len(self.history) == self.n_history:
self.history.pop(-1)
self.history = [{"viterbi": deepcopy(viterbi),
"backpointer": deepcopy(backpointer),
"converted": deepcopy(converted)}] + self.history
def rollback(self, n):
"""Rolling back to n back in the history."""
# print "rollback",n
# print len(self.history)
self.history = self.history[n:]
self.viterbi = self.viterbi[:len(self.viterbi)-n]
self.backpointer = self.backpointer[:len(self.backpointer)-n]
self.converted = self.converted[:len(self.converted)-n]
self.best_tagsequence = self.best_tagsequence[
:len(self.best_tagsequence)-n]
if self.noisy_channel_source_model:
end_idx = len(self.best_tagsequence) - n
self.noisy_channel = self.noisy_channel[: end_idx] # history
def viterbi_step(self, input_distribution, word_index,
sequence_initial=False, timing_data=None):
"""The principal viterbi calculation for an extension to the
input prefix, i.e. not reseting.
"""
# source_weight = 13 # higher for WML
if sequence_initial:
# first time requires initialization with the start of sequence tag
first_viterbi = {}
first_backpointer = {}
first_converted = {}
if self.noisy_channel_source_model:
first_noisy_channel = {}
for tag in self.observation_tags:
# don't record anything for the START tag
# print tag
if tag == "s" or tag == 'se':
continue
# print word_index
# print input_distribution.shape
# print self.tagToIndexDict[tag]
# print input_distribution[word_index][self.tagToIndexDict[tag]]
tag_prob = self.cpd_tags["s"].prob(self.convert_tag("s", tag))
if tag_prob >= 0.00001: # allowing for margin of error
if self.constraint_only:
# TODO for now treating this like a {0,1} constraint
tag_prob = 1.0
else:
tag_prob = 0.0
prob = log(tag_prob) + \
log(input_distribution[word_index][self.tagToIndexDict[tag]])
# no timing bias to start
if self.noisy_channel_source_model:
# noisy channel eliminate the missing tags
source_tags = tag_conversion.\
convert_to_source_model_tags([tag],
uttseg=True)
source_prob, node = self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(source_tags,
n=1)
first_noisy_channel[tag] = node
# prob = (source_weight * source_prob) + \
# ((1 - source_weight) * prob)
prob += (SOURCE_WEIGHT * source_prob)
first_viterbi[tag] = prob
first_backpointer[tag] = "s"
first_converted[tag] = self.convert_tag("s", tag)
assert first_converted[tag] in self.tag_set,\
first_converted[tag] + " not in: " + str(self.tag_set)
# store first_viterbi (the dictionary for the first word)
# in the viterbi list, and record that the best previous tag
# for any first tag is "s" (start of sequence tag)
self.viterbi.append(first_viterbi)
self.backpointer.append(first_backpointer)
self.converted.append(first_converted)
if self.noisy_channel_source_model:
self.noisy_channel.append(first_noisy_channel)
self.add_to_history(first_viterbi, first_backpointer,
first_converted)
return
# else we're beyond the first word
# start a new dictionary where we can store, for each tag, the prob
# of the best tag sequence ending in that tag
# for the current word in the sentence
this_viterbi = {}
# we also store the best previous converted tag
this_converted = {} # added for the best converted tags
# start a new dictionary we we can store, for each tag,
# the best previous tag
this_backpointer = {}
# prev_viterbi is a dictionary that stores, for each tag, the prob
# of the best tag sequence ending in that tag
# for the previous word in the sentence.
# So it stores, for each tag, the probability of a tag sequence
# up to the previous word
# ending in that tag.
prev_viterbi = self.viterbi[-1]
prev_converted = self.converted[-1]
if self.noisy_channel_source_model:
this_noisy_channel = {}
prev_noisy_channel = self.noisy_channel[-1]
# for each tag, determine what the best previous-tag is,
# and what the probability is of the best tag sequence ending.
# store this information in the dictionary this_viterbi
if timing_data and self.timing_model:
# print timing_data
# X = self.timing_model_scaler.transform(np.asarray(
# [timing_data[word_index-2:word_index+1]]))
# TODO may already be an array
# print "calculating timing"
# print timing_data
X = self.timing_model_scaler.transform(np.asarray([timing_data]))
input_distribution_timing = self.timing_model.predict_proba(X)
# print input_distribution_timing
# raw_input()
for tag in self.observation_tags:
# don't record anything for the START/END tag
if tag in ["s", "se"]:
continue
# joint probability calculation:
# if this tag is X and the current word is w, then
# find the previous tag Y such that
# the best tag sequence that ends in X
# actually ends in Y X
# that is, the Y that maximizes
# prev_viterbi[ Y ] * P(X | Y) * P( w | X)
# The following command has the same notation
# that you saw in the sorted() command.
best_previous = None
best_prob = log(0.0) # has to be -inf for log numbers
# the inner loop which makes this quadratic complexity
# in the size of the tag set
for prevtag in prev_viterbi.keys():
# the best converted tag, needs to access the previous one
prev_converted_tag = prev_converted[prevtag]
# TODO there could be several conversions for this tag
converted_tag = self.convert_tag(prev_converted_tag, tag)
assert converted_tag in self.tag_set, tag + " " + \
converted_tag + " prev:" + str(prev_converted_tag)
tag_prob = self.cpd_tags[prev_converted_tag].prob(
converted_tag)
if tag_prob >= 0.000001: # allowing for margin of error
if self.constraint_only:
# TODO for now treating this like a {0,1} constraint
tag_prob = 1.0
test = converted_tag.lower()
# check for different boosts for different tags
if "rps" in test: # boost for start tags
# boost for rps
tag_prob = tag_prob * SPARSE_WEIGHT_RPS
if "rpe" in test:
# boost for rp end tags
tag_prob = tag_prob * SPARSE_WEIGHT_RPE
if "t_" in test[:2]:
# boost for t tags
tag_prob = tag_prob * SPARSE_WEIGHT_T_
if "_t" in test:
tag_prob = tag_prob * SPARSE_WEIGHT_T
if timing_data and self.timing_model:
found = False
for k, v in self.simple_trp_idx2label.items():
if v in tag:
timing_tag = k
found = True
break
if not found:
raw_input("warning")
# using the prob from the timing classifier
# array over the different classes
timing_prob = input_distribution_timing[0][timing_tag]
if self.constraint_only:
# just adapt the prob of the timing tag
# tag_prob = timing_prob
# the higher the timing weight the more influence
# the timing classifier has
tag_prob = (TIMING_WEIGHT * timing_prob) + tag_prob
# print tag, timing_tag, timing_prob
else:
tag_prob = (TIMING_WEIGHT * timing_prob) + tag_prob
else:
tag_prob = 0.0
# the principal joint log prob
prob = prev_viterbi[prevtag] + log(tag_prob) + \
log(input_distribution[word_index][self.tagToIndexDict[tag]])
# gets updated by noisy channel if in this mode
if self.noisy_channel_source_model:
prev_n_ch_node = prev_noisy_channel[prevtag]
# The noisy channel model adds the score
# if we assume this tag and the backpointed path
# from the prev tag
# Converting all to source tags first
# NB this is what is slowing things down
# Need to go from the known index
# in the nc model
full_backtrack_method = False
if full_backtrack_method:
inc_best_tag_sequence = [prevtag]
# invert the list of backpointers
inc_backpointer = deepcopy(self.backpointer)
inc_backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we have inverted the
# backpointer list)
inc_current_best_tag = prevtag
for b_count, bp in enumerate(inc_backpointer):
inc_best_tag_sequence.append(
bp[inc_current_best_tag])
inc_current_best_tag = bp[inc_current_best_tag]
if b_count > 9:
break
inc_best_tag_sequence.reverse()
inc_best_tag_sequence.append(tag) # add tag
source_tags = tag_conversion.\
convert_to_source_model_tags(
inc_best_tag_sequence[1:],
uttseg=True)
source_prob, nc_node = \
self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(
source_tags,
n=1)
else:
# NB these only change if there is a backward
# looking tag
if "<rm-" in tag:
m = re.search("<rm-([0-9]+)\/>", tag)
if m:
back = min([int(m.group(1)),
len(self.backpointer)])
suffix = ["<e/>"] * back + ["<f/>"]
# to get the change in probability due to this
# we need to backtrack further
n = len(suffix)
else:
suffix = tag_conversion.\
convert_to_source_model_tags([tag])
n = 1 # just monotonic extention
# print back, i, source_tags
source_prob, nc_node = \
self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(
suffix,
start_node_ID=prev_n_ch_node,
n=n)
prob += (SOURCE_WEIGHT * source_prob)
if prob >= best_prob:
best_converted = converted_tag
best_previous = prevtag
best_prob = prob
if self.noisy_channel_source_model:
best_n_c_node = nc_node
# if best result is 0 do not add, pruning, could set this higher
if best_prob > log(0.0):
this_converted[tag] = best_converted
this_viterbi[tag] = best_prob
# the most likely preceding tag for this current tag
this_backpointer[tag] = best_previous
if self.noisy_channel_source_model:
this_noisy_channel[tag] = best_n_c_node
# done with all tags in this iteration
# so store the current viterbi step
self.viterbi.append(this_viterbi)
self.backpointer.append(this_backpointer)
self.converted.append(this_converted)
if self.noisy_channel_source_model:
self.noisy_channel.append(this_noisy_channel)
self.add_to_history(this_viterbi, this_backpointer, this_converted)
return
def get_best_n_tag_sequences(self, n, noisy_channel_source_model=None):
# Do a breadth-first search
# try the best final tag and its backpointers, then the second
# best final tag etc.
# once all final tags are done and n > len(final tags)
# move to the second best penult tags for each tag
# from the best to worst, then the 3rd row
# it terminates when n is reached
# use the history self.history = [{"viterbi": deepcopy(viterbi),
# "backpointer": deepcopy(backpointer),
# "converted": deepcopy(converted)}] + self.history
# num_seq = n if not noisy_channel_source_model else 1000
num_seq = n
best_n = [] # the tag sequences with their probability (tuple)
# print "len viterbi", len(self.viterbi)
# print "len backpoint", len(self.backpointer)
for viterbi_depth in range(len(self.viterbi)-1, -1, -1):
if len(best_n) == num_seq:
break
inc_prev_viterbi = deepcopy(self.viterbi[viterbi_depth])
# inc_best_previous = max(inc_prev_viterbi.keys(),
# key=lambda prevtag:
# inc_prev_viterbi[prevtag])
inc_previous = sorted(inc_prev_viterbi.items(),
key=lambda x: x[1], reverse=True)
for tag, prob in inc_previous:
# print tag, prob
# prob = inc_prev_viterbi[inc_best_previous]
# assert(prob != log(0)), "highest likelihood is 0!"
if prob == log(0):
continue
inc_best_tag_sequence = [tag]
# invert the list of backpointers
inc_backpointer = deepcopy(self.backpointer)
inc_backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we have inverted the
# backpointer list)
inc_current_best_tag = tag
# print "backpointer..."
d = 0
for bp in inc_backpointer:
d += 1
# print "depth", d, "find bp for", inc_current_best_tag
inc_best_tag_sequence.append(bp[inc_current_best_tag])
inc_current_best_tag = bp[inc_current_best_tag]
# print "..."
inc_best_tag_sequence.reverse()
best_n.append((inc_best_tag_sequence, prob))
if len(best_n) == num_seq:
break
best_n = sorted(best_n, key=lambda x: x[1], reverse=True)
debug = False
if debug:
print "getting best n"
for s, p in best_n:
print s[-1], p
print "***"
assert(best_n[0][1] > log(0.0)), "best prob 0!"
if not noisy_channel_source_model:
# return inc_best_tag_sequence
return [x[0] for x in best_n]
# if noisy channel do the interpolation
# need to entertain the whole beam for the channel model and source
# model
# channel_beam = best_n # the tag sequences with their probability
# source_beam = noisy_channel.get_best_n_tag_sequences(1000)
# self.interpolate_(channel_beam, source_beam)
channel_beam = [lambda x: (x[0], tag_conversion.
convert_to_source_model_tags(x[0]),
x[1]) for x in best_n]
best_seqs = noisy_channel_source_model.\
interpolate_probs_with_n_best(
channel_beam,
source_beam_width=1000,
output_beam_width=n)
return best_seqs
# def get_best_tag_sequence(self):
# """Returns the best tag sequence from the input so far.
# """
# inc_prev_viterbi = deepcopy(self.viterbi[-1])
# inc_best_previous = max(inc_prev_viterbi.keys(),
# key=lambda prevtag: inc_prev_viterbi[prevtag])
# assert(inc_prev_viterbi[inc_best_previous]) != log(0),\
# "highest likelihood is 0!"
# inc_best_tag_sequence = [inc_best_previous]
# # invert the list of backpointers
# inc_backpointer = deepcopy(self.backpointer)
# inc_backpointer.reverse()
# # go backwards through the list of backpointers
# # (or in this case forward, we have inverted the backpointer list)
# inc_current_best_tag = inc_best_previous
# for bp in inc_backpointer:
# inc_best_tag_sequence.append(bp[inc_current_best_tag])
# inc_current_best_tag = bp[inc_current_best_tag]
# inc_best_tag_sequence.reverse()
# return inc_best_tag_sequence
def get_best_tag_sequence(self, noisy_channel_source_model=None):
l = self.get_best_n_tag_sequences(1, noisy_channel_source_model)
return l[0]
def viterbi(self, input_distribution, incremental_best=False):
"""Standard non incremental (sequence-level) viterbi over input_distribution input
Keyword arguments:
input_distribution -- the emmision probabilities of each step in the sequence,
array of width n_classes
incremental_best -- whether the tag sequence prefix is stored for
each step in the sequence (slightly 'hack-remental'
"""
incrementalBest = []
sentlen = len(input_distribution)
self.viterbi_init()
for word_index in range(0, sentlen):
self.viterbi_step(input_distribution, word_index, word_index == 0)
# INCREMENTAL RESULTS (hack-remental. doing it post-hoc)
# the best result we have so far, not given the next one
if incremental_best:
inc_best_tag_sequence = self.get_best_tag_sequence()
incrementalBest.append(deepcopy(inc_best_tag_sequence[1:]))
# done with all words/input in the sentence/sentence
# find the probability of each tag having "se" next (end of utterance)
# and use that to find the overall best sequence
prev_converted = self.converted[-1]
prev_viterbi = self.viterbi[-1]
best_previous = max(prev_viterbi.keys(),
key=lambda prevtag: prev_viterbi[prevtag] +
log(self.cpd_tags[prev_converted[prevtag]].
prob("se")))
self.best_tagsequence = ["se", best_previous]
# invert the list of backpointers
self.backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we've inverted the backpointer list)
# in each case:
# the following best tag is the one listed under
# the backpointer for the current best tag
current_best_tag = best_previous
for bp in self.backpointer:
self.best_tagsequence.append(bp[current_best_tag])
current_best_tag = bp[current_best_tag]
self.best_tagsequence.reverse()
if incremental_best:
# NB also consumes the end of utterance token! Last two the same
incrementalBest.append(self.best_tagsequence[1:-1])
return incrementalBest
return self.best_tagsequence[1:-1]
def viterbi_incremental(self, soft_max, a_range=None,
changed_suffix_only=False, timing_data=None,
words=None):
"""Given a new input_distribution input, output the latest labels.
Effectively incrementing/editing self.best_tagsequence.
Keyword arguments:
changed_suffix_only -- boolean, output the changed suffix of
the previous output sequence of labels.
i.e. if before this function is called the sequence is
[1:A, 2:B, 3:C]
and after it is
[1:A, 2:B, 3:E, 4:D]
then output is:
[3:E, 4:D]
(TODO maintaining the index/time spans is important
to acheive this, even if only externally)
"""
previous_best = deepcopy(self.best_tagsequence)
# print "previous best", previous_best
if not a_range:
# if not specified consume the whole soft_max input
a_range = (0, len(soft_max))
for i in xrange(a_range[0], a_range[1]):
if self.noisy_channel_source_model:
self.noisy_channel_source_model.consume_word(words.pop(0))
self.viterbi_step(soft_max, i, sequence_initial=self.viterbi == [],
timing_data=timing_data)
# slice the input if multiple steps
# get the best tag sequence we have so far
self.best_tagsequence = self.get_best_tag_sequence()
# print "best_tag", self.best_tagsequence
if changed_suffix_only:
# print "current best", self.best_tagsequence
# only output the suffix of predictions which has changed-
# TODO needs IDs to work
for r in range(1, len(self.best_tagsequence)):
if r > len(previous_best)-1 or \
previous_best[r] != self.best_tagsequence[r]:
return self.best_tagsequence[r:]
return self.best_tagsequence[1:]
# def adjust_incremental_viterbi_with_source_channel(self, source_channel):
# """This reranks the current hypotheses with the noisy channel
# decode, adding a weighted log prob of the language model
# scores from the source model to the probs in viterbi.
# Note this should be done before the backpointer is computed
# for each new tag?
# """
if __name__ == '__main__':
def load_tags(filepath):
"""Returns a tag dictionary from word to a n int indicating index
by an integer.
"""
tag_dictionary = defaultdict(int)
f = open(filepath)
for line in f:
l = line.strip('\n').split(",")
tag_dictionary[l[1]] = int(l[0])
f.close()
return tag_dictionary
tags_name = "swbd_disf1_uttseg_simple_033"
tags = load_tags(
"../data/tag_representations/{}_tags.csv".format(
tags_name)
)
if "disf" in tags_name:
intereg_ind = len(tags.keys())
interreg_tag = "<i/><cc/>" if "uttseg" in tags_name else "<i/>"
tags[interreg_tag] = intereg_ind # add the interregnum tag
print tags
h = FirstOrderHMM(tags, markov_model_file=None)
mm_path = "models/{}_tags.pkl".format(tags_name)
# corpus_path = "../data/tag_representations/{}_tag_corpus.csv".format(
# tags_name).replace("_021", "")
# h.train_markov_model_from_file(corpus_path, mm_path, non_sparse=True)
csv_file = "models/{}.csv".format(tags_name)
h.train_markov_model_from_constraint_matrix(csv_file,
mm_path,
delim=",")
table = tabulate_cfd(h.cpd_tags)
test_f = open("models/{}_tags_table.csv".format(tags_name), "w")
test_f.write(table)
test_f.close()
| {
"repo_name": "dsg-bielefeld/deep_disfluency",
"path": "deep_disfluency/decoder/hmm.py",
"copies": "1",
"size": "38494",
"license": "mit",
"hash": 5625781579425193000,
"line_mean": 46.8781094527,
"line_max": 90,
"alpha_frac": 0.5206525692,
"autogenerated": false,
"ratio": 4.185495270196803,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5206147839396803,
"avg_score": null,
"num_lines": null
} |
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from grovepi import *
from grove_oled import *
import threading
dht_sensor_port = 7 # Connect the DHt sensor to port 7
#Start and initialize the OLED
oled_init()
oled_clearDisplay()
oled_setNormalDisplay()
oled_setVerticalMode()
time.sleep(.1)
def get_outside_weather(location='Bucharest,ro'):
import pyowm # Do a 'sudo pip install pyowm' to get this module
owm = pyowm.OWM()
#forecast = owm.daily_forecast(location)
observation = owm.weather_at_place(location)
weather = observation.get_weather()
return weather
def update_outside_weather():
# This uses OpenWeatherMap via the PyOWM module;
# pywom module needs to be installed via pip,
# see https://github.com/csparpa/pyowm
weather = get_outside_weather()
# by default location is Bucharest,ro; change it to your own
oled_setTextXY(5, 1)
oled_putString("OUTSIDE")
oled_setTextXY(7, 0)
oled_putString("Temp:")
oled_putString(str(weather.get_temperature("celsius")['temp']) + "C")
oled_setTextXY(8, 0)
oled_putString("Hum :")
oled_putString(str(weather.get_humidity()) + "%")
oled_setTextXY(9, 0)
oled_putString("Rain:")
rain = weather.get_rain()
if len(rain) > 0:
pass
else:
oled_putString("0%")
print(("Weather: ", weather.get_temperature("celsius")))
print(("Humidity: ", weather.get_humidity()))
while True:
try:
# Get the temperature and Humidity from the DHT sensor
[temp, hum] = dht(dht_sensor_port, 1)
print(("Temp =", temp, "C\tHumidity =", hum, "%"))
t = str(temp)
h = str(hum)
#outside_thread = threading.Thread(target=update_outside_weather)
#outside_thread.start()
oled_setTextXY(0, 1) # Print "INSIDE" at line 1
oled_putString("INSIDE")
oled_setTextXY(2, 0) # Print "TEMP" and the temperature in line 3
oled_putString("Temp:")
oled_putString(t + "C")
oled_setTextXY(3, 0) # Print "HUM :" and the humidity in line 4
oled_putString("Hum :")
oled_putString(h + "%")
#outside_thread.join()
update_outside_weather()
except (IOError, TypeError, Exception) as e:
print(("Error:" + str(e)))
finally:
#outside_thread.join()
pass
| {
"repo_name": "tienfuc/iotivity-democlient-snap",
"path": "extlibs/GrovePi/Projects/OLED Weather Station/weather_station.py",
"copies": "9",
"size": "3567",
"license": "apache-2.0",
"hash": 5960213971571810000,
"line_mean": 29.75,
"line_max": 103,
"alpha_frac": 0.6804037006,
"autogenerated": false,
"ratio": 3.5492537313432835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8729657431943284,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://code.activestate.com/recipes/578668-encode-multipart-form-data-for-uploading-files-via/
import random
def toMultipartMessage(fields, files):
def escape_quote(s):
return s.replace(b'"', b'\\"')
boundary = ''.join(random.choice("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz") for i in range(30)).encode("ascii")
__boundary = b'--' + boundary
lines = []
for name, value in fields.items():
lines.extend((
__boundary,
b'Content-Disposition: form-data; name="' + escape_quote(name.encode("ascii")) + b'"',
b'',
value,
))
for name, value in files.items():
filename = value['filename']
mimetype = value['mimetype'] if 'mimetype' in value else b'application/octet-stream'
lines.extend((
__boundary,
b'Content-Disposition: form-data; name="'+escape_quote(name.encode("ascii"))+b'"; filename="'+escape_quote(filename)+b'"',
b'Content-Type: ' + mimetype,
b'',
value['content'],
))
lines.extend((
__boundary + b'--',
b'',
))
body = b'\r\n'.join(lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary.decode("utf8")),
'Content-Length': str(len(body)),
}
return (body, headers)
| {
"repo_name": "JanSiebert/async-telegram-bot-python",
"path": "AsyncTelegramBot/Multipart.py",
"copies": "1",
"size": "1386",
"license": "mit",
"hash": -1639685851504686800,
"line_mean": 32,
"line_max": 138,
"alpha_frac": 0.575036075,
"autogenerated": false,
"ratio": 3.96,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019271422187798766,
"num_lines": 42
} |
# adapted from: http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode#Restarting_Daemon_Processes
import os
import sys
import time
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
print >> sys.stderr, '%s Triggering process restart.' % prefix
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if not path in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Starting change monitor.' % prefix
_running = True
_thread.start()
_lock.release()
| {
"repo_name": "pztrick/librewary",
"path": "monitor.py",
"copies": "1",
"size": "3042",
"license": "mit",
"hash": -9073830461368036000,
"line_mean": 24.1404958678,
"line_max": 101,
"alpha_frac": 0.5894148586,
"autogenerated": false,
"ratio": 4.133152173913044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002598918708544377,
"num_lines": 121
} |
# adapted from http://djangosnippets.org/snippets/2236/
import math
from itertools import chain
from django.forms.util import flatatt
from django import forms
from django.forms import widgets
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
class ColumnCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
"""
Widget that renders multiple-select checkboxes in columns.
Constructor takes number of columns and css class to apply
to the <ul> elements that make up the columns.
"""
def __init__(self, columns=2, css_class=None, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.columns = columns
self.css_class = css_class
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
choices_enum = list(enumerate(chain(self.choices, choices)))
# This is the part that splits the choices into columns.
# Slices vertically. Could be changed to slice horizontally, etc.
column_sizes = columnize(len(choices_enum), self.columns)
columns = []
for column_size in column_sizes:
columns.append(choices_enum[:column_size])
choices_enum = choices_enum[column_size:]
output = []
for column in columns:
if self.css_class:
output.append(u'<ul class="%s"' % self.css_class)
else:
output.append(u'<ul>')
# Normalize to strings
str_values = set([force_unicode(v) for v in value])
for i, (option_value, option_label) in column:
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (
attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(
final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_unicode(option_label))
output.append(u'<li><label%s>%s %s</label></li>' % (
label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
def columnize(items, columns):
"""
Return a list containing numbers of elements per column if `items` items
are to be divided into `columns` columns.
>>> columnize(10, 1)
[10]
>>> columnize(10, 2)
[5, 5]
>>> columnize(10, 3)
[4, 3, 3]
>>> columnize(3, 4)
[1, 1, 1, 0]
"""
elts_per_column = []
for col in range(columns):
col_size = int(math.ceil(float(items) / columns))
elts_per_column.append(col_size)
items -= col_size
columns -= 1
return elts_per_column
class RoomSelectionWidget(widgets.MultiWidget):
def __init__(self, buildings=None, attrs=None):
rswidgets = (widgets.Select(choices=buildings), widgets.TextInput)
super(RoomSelectionWidget, self).__init__(rswidgets, attrs=attrs)
def decompress(self, value):
if value:
print value
return [None, None]
class RadioInputHTML(widgets.RadioChoiceInput):
"""
subclassed for 1 character fix... no <input /> in valid html4
"""
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s>' % flatatt(final_attrs))
class RatingRadioFieldRenderer(widgets.RadioFieldRenderer):
"""
Custom version of the RadioFieldRenderer that only labels the
first and last items in the list.
"""
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInputHTML(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInputHTML(self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
"""Outputs a <ul> for this set of radio fields."""
input_tags = list(self)
last = input_tags.pop()
first = input_tags.pop(0)
output = u'<label class="low">{0}</label><label class="mobile_high">{1}</label> <span class="rating_control">{2}\n'.format(
conditional_escape(force_unicode(first.choice_label)),
conditional_escape(force_unicode(last.choice_label)),
first.tag())
for choice in input_tags:
output += u'{0}\n'.format(choice.tag())
output += u'{1}</span> <label class="high">{0}</label>\n'.format(conditional_escape(force_unicode(last.choice_label)), last.tag())
return mark_safe(output) | {
"repo_name": "theworldbright/mainsite",
"path": "aspc/housing/forms/widgets.py",
"copies": "1",
"size": "5382",
"license": "mit",
"hash": -3948499760578862000,
"line_mean": 38.8740740741,
"line_max": 138,
"alpha_frac": 0.5994054255,
"autogenerated": false,
"ratio": 3.888728323699422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49881337491994215,
"avg_score": null,
"num_lines": null
} |
# adapted from http://djangosnippets.org/snippets/2236/
import math
from itertools import chain
from django.forms.utils import flatatt
from django import forms
from django.forms import widgets
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
class ColumnCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
"""
Widget that renders multiple-select checkboxes in columns.
Constructor takes number of columns and css class to apply
to the <ul> elements that make up the columns.
"""
def __init__(self, columns=2, css_class=None, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.columns = columns
self.css_class = css_class
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
choices_enum = list(enumerate(chain(self.choices, choices)))
# This is the part that splits the choices into columns.
# Slices vertically. Could be changed to slice horizontally, etc.
column_sizes = columnize(len(choices_enum), self.columns)
columns = []
for column_size in column_sizes:
columns.append(choices_enum[:column_size])
choices_enum = choices_enum[column_size:]
output = []
for column in columns:
if self.css_class:
output.append(u'<ul class="%s"' % self.css_class)
else:
output.append(u'<ul>')
# Normalize to strings
str_values = set([force_unicode(v) for v in value])
for i, (option_value, option_label) in column:
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (
attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(
final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_unicode(option_label))
output.append(u'<li><label%s>%s %s</label></li>' % (
label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
def columnize(items, columns):
"""
Return a list containing numbers of elements per column if `items` items
are to be divided into `columns` columns.
>>> columnize(10, 1)
[10]
>>> columnize(10, 2)
[5, 5]
>>> columnize(10, 3)
[4, 3, 3]
>>> columnize(3, 4)
[1, 1, 1, 0]
"""
elts_per_column = []
for col in range(columns):
col_size = int(math.ceil(float(items) / columns))
elts_per_column.append(col_size)
items -= col_size
columns -= 1
return elts_per_column
class RoomSelectionWidget(widgets.MultiWidget):
def __init__(self, buildings=None, attrs=None):
rswidgets = (widgets.Select(choices=buildings), widgets.TextInput)
super(RoomSelectionWidget, self).__init__(rswidgets, attrs=attrs)
def decompress(self, value):
if value:
print value
return [None, None]
class RadioInputHTML(widgets.RadioChoiceInput):
"""
subclassed for 1 character fix... no <input /> in valid html4
"""
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s>' % flatatt(final_attrs))
class RatingRadioFieldRenderer(widgets.RadioFieldRenderer):
"""
Custom version of the RadioFieldRenderer that only labels the
first and last items in the list.
"""
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInputHTML(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInputHTML(self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
"""Outputs a <ul> for this set of radio fields."""
input_tags = list(self)
last = input_tags.pop()
first = input_tags.pop(0)
output = u'<label class="low">{0}</label><label class="mobile_high">{1}</label> <span class="rating_control">{2}\n'.format(
conditional_escape(force_unicode(first.choice_label)),
conditional_escape(force_unicode(last.choice_label)),
first.tag())
for choice in input_tags:
output += u'{0}\n'.format(choice.tag())
output += u'{1}</span> <label class="high">{0}</label>\n'.format(conditional_escape(force_unicode(last.choice_label)), last.tag())
return mark_safe(output) | {
"repo_name": "aspc/mainsite",
"path": "aspc/housing/forms/widgets.py",
"copies": "1",
"size": "5383",
"license": "mit",
"hash": 2036253818505839900,
"line_mean": 38.8814814815,
"line_max": 138,
"alpha_frac": 0.599479844,
"autogenerated": false,
"ratio": 3.889450867052023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9981224785771015,
"avg_score": 0.001541185056201837,
"num_lines": 135
} |
# Adapted from http://github.com/ethereum/pyethapp/
from ethereum.utils import is_string, is_numeric, int_to_big_endian, encode_hex
def decode_number(data):
"""Decode `data` representing a number."""
if hasattr(data, '__int__'):
return data
elif not is_string(data):
success = False
elif not data.startswith('0x'):
success = False # must start with 0x prefix
elif len(data) > 3 and data[2] == '0':
success = False # must not have leading zeros (except `0x0`)
else:
data = data[2:]
# ensure even length
if len(data) % 2 == 1:
data = '0' + data
try:
return int(data, 16)
except ValueError:
success = False
assert not success
raise Exception('Invalid number encoding: %s' % data)
def encode_number(i):
"""Encode interger quantity `data`."""
assert is_numeric(i)
data = int_to_big_endian(i)
return '0x' + (encode_hex(data).lstrip('0') or '0')
def encode_address(address):
assert len(address) in (20, 0)
return '0x' + encode_hex(address)
def encode_data(data, length=None):
"""Encode unformatted binary `data`.
If `length` is given, the result will be padded like this: ``quantity_encoder(255, 3) ==
'0x0000ff'``.
"""
s = encode_hex(data)
if length is None:
return '0x' + s
else:
return '0x' + s.rjust(length * 2, '0')
def encode_loglist(loglist):
"""Encode a list of log"""
# l = []
# if len(loglist) > 0 and loglist[0] is None:
# assert all(element is None for element in l)
# return l
result = []
for l in loglist:
result.append({
'logIndex': encode_number(l['log_idx']),
'transactionIndex': encode_number(l['tx_idx']),
'transactionHash': encode_data(l['txhash']),
'blockHash': encode_data(l['block'].hash),
'blockNumber': encode_number(l['block'].number),
'address': encode_address(l['log'].address),
'data': encode_data(l['log'].data),
'topics': [encode_data(int_to_big_endian(topic), 32) for topic in l['log'].topics],
'type': 'pending' if l['pending'] else 'mined'
})
return result
| {
"repo_name": "ryepdx/eth-testrpc",
"path": "testrpc/utils.py",
"copies": "2",
"size": "2268",
"license": "mit",
"hash": 6229898716961939000,
"line_mean": 30.9436619718,
"line_max": 95,
"alpha_frac": 0.5714285714,
"autogenerated": false,
"ratio": 3.5108359133126936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082264484712694,
"avg_score": null,
"num_lines": null
} |
# * adapted from http://goo.gl/Us9ps
# * backported from python-3.2+
# * numpy array compatible using joblib.hash
from collections import namedtuple
from functools import update_wrapper
from threading import Lock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
def lru_cache(maxsize=128, typed=True, use_joblib_hash=True):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
if use_joblib_hash:
import joblib
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
kwd_mark = (object(),) # separate positional and keyword args
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = Lock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
nonlocal_root = [root] # make updateable non-locally
root[:] = [root, root, None, None] # initialize by pointing to self
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def make_key(args, kwds, typed, tuple=tuple, sorted=sorted, type=type):
# helper function to build a cache key from positional and keyword args
key = args
if kwds:
sorted_items = tuple(sorted(kwds.items()))
key += kwd_mark + sorted_items
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
if use_joblib_hash:
key = joblib.hash(key)
return key
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed) if kwds or typed else args
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root = nonlocal_root[0]
if _len(cache) < maxsize:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
cache[key] = last[NEXT] = root[PREV] = link
else:
# use root to store the new key and result
root[KEY] = key
root[RESULT] = result
cache[key] = root
# empty the oldest link and make it the new root
root = nonlocal_root[0] = root[NEXT]
del cache[root[KEY]]
root[KEY] = None
root[RESULT] = None
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| {
"repo_name": "nsf-ri-ubicv/sthor",
"path": "sthor/util/cache.py",
"copies": "1",
"size": "6141",
"license": "bsd-3-clause",
"hash": -2242901729756532500,
"line_mean": 40.7755102041,
"line_max": 95,
"alpha_frac": 0.5196222114,
"autogenerated": false,
"ratio": 4.617293233082707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5636915444482707,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://inventwithpython.com/chapter10.html
# Uses AIMA code under the MIT license http://opensource.org/licenses/MIT
# Tic Tac Toe game using AI and Myro
from Myro import *
from Graphics import *
import random
from games import *
# Make the board window
win = Window("Tic Tac Toe!", 600, 600)
win.setBackground(Color('white'))
# This function prints out the board that it was passed.
# "board" is a list of 10 strings representing the board (ignore index 0)
# The X and O strings designate where to put the X and O images on the Window
def drawBoard(board):
for i in range(3):
for j in range(3):
t = board[1 + i + 3*j]
if t == 'X':
pic = makePicture("blackx.jpg")
pic.moveTo(100 + 200*i, 500 - 200*j)
pic.draw(win)
elif t == 'O':
pic = makePicture("blacko.jpg")
pic.moveTo(100 + 200*i, 500 - 200*j)
pic.draw(win)
# Convert a move's coordinates to a tile number
def to_num(move):
(x,y) = move
if (x, y) == (1,1):
return 7
if (x, y) == (1,2):
return 8
if (x, y) == (1,3):
return 9
if (x, y) == (2,1):
return 4
if (x, y) == (2,2):
return 5
if (x, y) == (2,3):
return 6
if (x, y) == (3,1):
return 1
if (x, y) == (3,2):
return 2
if (x, y) == (3,3):
return 3
# Convert a move's tile number to coordinates
def to_xy(move):
if move == 1:
return (3, 1)
if move == 2:
return (3, 2)
if move == 3:
return (3, 3)
if move == 4:
return (2, 1)
if move == 5:
return (2, 2)
if move == 6:
return (2, 3)
if move == 7:
return (1, 1)
if move == 8:
return (1, 2)
if move == 9:
return (1, 3)
# This function returns True if the player wants to play again, otherwise it returns False.
def playAgain():
# Clear the graphics window to draw a screen that asks them a question
win.clear()
if isWinner(theBoard, playerLetter):
#speak('Congratulations. You have won.')
win.setBackground(Color('Green'))
t = Text((300, 50), "YOU WIN! :)")
t.draw(win)
elif isWinner(theBoard, computerLetter):
#speak('You have lost.')
win.setBackground(Color('Red'))
t = Text((300, 50), "YOU LOSE! :(")
t.draw(win)
else:
#speak('It\'s a tie game.')
win.setBackground(Color('Yellow'))
t = Text((300, 50), "you tie! 0_o")
t.draw(win)
# Draw some lines to make a border around the clickable regions. Clicks are handled below
l = Line((300, 100), (300, 600))
l.setWidth(10)
l.draw(win)
l = Line((0, 100), (600, 100))
l.setWidth(10)
l.draw(win)
t = Text((150, 300), 'Play Again')
t.draw(win)
t = Text((450, 300), 'Quit')
t.draw(win)
#speak('Do you want to play again, or quit?')
# Wait for them to click somewhere. Check the x coordinate of their click (y is ignored) to see if they said yes or no
x, y = getMouse()
if x < 300:
return True #Play again
else:
win.clear()
#speak('Please close this window')
t = Text((300, 300), "Please close this window...")
t.draw(win)
return False
# Write a move to the board
def makeMove(board, letter, move):
board[move] = letter
# Given a board and a player's letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don't have to type as much.
def isWinner(bo, le):
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
# Make a duplicate of the board list and return it the duplicate.
def getBoardCopy(board):
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
# Return true if the passed move is free on the passed board.
def isSpaceFree(board, move):
return board[move] == ' '
# Let the player type in his/her move by clicking on the board
def getPlayerMove(board):
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
#print('What is your next move? (Please click on the desired square)')
x, y = getMouse()
x = x//200
y = 2 - y//200
move = str(int(1 + x + 3*y))
return int(move)
# Returns a valid move from the passed list on the passed board.
# Returns None if there is no valid move.
def chooseRandomMoveFromList(board, movesList):
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
# Given a board and the computer's letter, determine where to move and return that move.
# NOTE: WE DO NOT USE THIS FUNCTION / "preprogrammed" AI, we use minimax from AIMA instead.
# We left it here in case you want to compare or play with it.
def getComputerMove(board, computerLetter):
if computerLetter == 'X':
playerLetter = 'O'
else:
playerLetter = 'X'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, computerLetter, i)
if isWinner(copy, computerLetter):
return i
# Check if the player could win on his next move, and block them.
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, playerLetter, i)
if isWinner(copy, playerLetter):
return i
# Try to take one of the corners, if they are free.
move = chooseRandomMoveFromList(board, [1, 3, 7, 9])
if move != None:
return move
# Try to take the center, if it is free.
if isSpaceFree(board, 5):
return 5
# Move on one of the sides.
return chooseRandomMoveFromList(board, [2, 4, 6, 8])
# Return True if every space on the board has been taken. Otherwise return False.
def isBoardFull(board):
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
# The loop that runs the game
while True:
# Reset the board, draw the gridlines
win.clear()
l = Line((200, 0), (200, 600))
l.setWidth(10)
l.draw(win)
l = Line((400, 0), (400, 600))
l.setWidth(10)
l.draw(win)
l = Line((0, 200), (600, 200))
l.setWidth(10)
l.draw(win)
l = Line((0, 400), (600, 400))
l.setWidth(10)
l.draw(win)
# Define the game for AIMA TicTacToe
tgame = TicTacToe()
state = tgame.initial
# Initialize the board to be blank
theBoard = [' '] * 10
# You could ask, but we'll just have the player always be 'X' and always go first. You can't beat the computer anyway :p
playerLetter, computerLetter = 'X', 'O'
turn = 'player'
print(' ')
print('You\'ve started a new game. The player, X, always goes first')
gameIsPlaying = True
# Make this true if you don't want to wait for long calculations on the first turn...
# If this is true the computer actually makes mistakes... needs to be fixed?
random_first_move = False
while gameIsPlaying:
if turn == 'player':
# Player's turn. Update the board
drawBoard(theBoard)
# Get the player's move
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
drawBoard(theBoard)
# Update game state. AIMA.
state = tgame.make_move(to_xy(move),state)
# Check if they won or tied, otherwise move to next turn
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
#print('Hooray! You have won the game!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
#print('The game is a tie!')
gameIsPlaying = False
else:
turn = 'computer'
else:
# Computer's turn.
# This is to make it possible to win if you're feeling depressed because the AI is too good..
# It makes the computer to move randomly on its first turn. It actually needs to be fixed, because
# the computer makes mistakes if this is used..
if random_first_move:
move = chooseRandomMoveFromList(theBoard, [1, 2, 3, 4, 5, 6, 7, 8, 9])
while not isSpaceFree(theBoard, move):
move = chooseRandomMoveFromList(theBoard, [1, 2, 3, 4, 5, 6, 7, 8, 9])
#print(state)
move = to_xy(move)
random_first_move = False
print('Moved Randomly...')
# It uses a minimax decision to make its move! To use alphabeta instead, switch which line is commented out. Uses AIMA
else:
#move = alphabeta_full_search(state,tgame)
move = minimax_decision(state,tgame)
makeMove(theBoard, computerLetter, to_num(move))
drawBoard(theBoard)
# Update game state. AIMA
state = tgame.make_move(move,state)
# Check if they won
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
#print('The computer has beaten you! You lose.')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
#print('The game is a tie!')
gameIsPlaying = False
else:
turn = 'player'
# If it's game over, let them see the board and wait to ask to play again until they click somewhere
t = Text((300, 10), "GAME OVER. CLICK TO CONTINUE")
t.draw(win)
# Wait for click, where doesn't matter
dummy, variable_that_doesnt_matter = getMouse()
# Ask if they want to play again or quit
if not playAgain():
break
| {
"repo_name": "CSavvy/python",
"path": "extras/ai_tic_tac_toe/Tic Tac Toe.py",
"copies": "1",
"size": "10776",
"license": "mit",
"hash": -995242770800222600,
"line_mean": 31.9571865443,
"line_max": 130,
"alpha_frac": 0.5698775056,
"autogenerated": false,
"ratio": 3.547070441079658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46169479466796576,
"avg_score": null,
"num_lines": null
} |
# Adapted from: http://jared.geek.nz/2013/feb/linear-led-pwm
INPUT_SIZE = 255 # Input integer size
OUTPUT_SIZE = 255 # Output integer size
INT_TYPE = 'uint8_t'
TABLE_NAME = 'cie';
def cie1931(L):
L = L*100.0
if L <= 8:
return (L/902.3)
else:
return ((L+16.0)/116.0)**3
x = range(0,int(INPUT_SIZE+1))
brightness = [cie1931(float(L)/INPUT_SIZE) for L in x]
numerator = 1
denominator = 255
on = []
off = []
for bright in brightness:
while float(numerator) / float(denominator) < bright:
if numerator >= 128:
numerator += 1
else:
denominator -= 1
if denominator == 128:
denominator = 255
numerator *= 2
on.append(numerator)
off.append(denominator)
# for i in range(256):
# print on[i], " / ", off[i]
f = open('gamma_correction_table.h', 'w')
f.write('// CIE1931 correction table\n')
f.write('// Automatically generated\n\n')
f.write('%s %s[%d] = {\n' % (INT_TYPE, "on", INPUT_SIZE+1))
f.write('\t')
for i,L in enumerate(on):
f.write('%d, ' % int(L))
if i % 10 == 9:
f.write('\n\t')
f.write('\n};\n\n')
f.write('%s %s[%d] = {\n' % (INT_TYPE, "off", INPUT_SIZE+1))
f.write('\t')
for i,L in enumerate(off):
f.write('%d, ' % int(L))
if i % 10 == 9:
f.write('\n\t')
f.write('\n};\n\n') | {
"repo_name": "mct/soma",
"path": "pier14/utils/gamma_table_generator.py",
"copies": "2",
"size": "1281",
"license": "apache-2.0",
"hash": -8260972789305646000,
"line_mean": 21.1034482759,
"line_max": 60,
"alpha_frac": 0.5745511319,
"autogenerated": false,
"ratio": 2.5774647887323945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.906583597325752,
"avg_score": 0.017235989474974886,
"num_lines": 58
} |
# Adapted from http://nbviewer.ipython.org/github/JonathanRaiman/theano_lstm/blob/master/Tutorial.ipynb
import numpy as np
from ..util import file_handling as fh
class Vocab:
def __init__(self, prefix, read_from_filename=None, tokens_to_add=None, add_oov=True):
self.token2index = {}
self.index2token = []
self.oov_index = -1
self.oov_token = prefix + '__OOV__'
if add_oov:
self.oov_index = 0
self.add_tokens([self.oov_token])
#self.token2index[self.oov_token] = self.oov_index
#self.index2token.append(self.oov_token)
if read_from_filename is not None:
self.read_from_file(read_from_filename)
if tokens_to_add is not None:
self.add_tokens(tokens_to_add)
def add_tokens(self, tokens):
for token in tokens:
if token not in self.token2index:
self.token2index[token] = len(self.token2index)
self.index2token.append(token)
def get_token(self, index):
return self.index2token[index]
def get_tokens(self, indices):
tokens = [self.index2token[i] for i in indices]
return tokens
def get_all_tokens(self):
return self.index2token
def get_index(self, token):
return self.token2index.get(token, self.oov_index)
def get_indices(self, tokens):
indices = np.zeros(len(tokens), dtype=np.int32)
for i, token in enumerate(tokens):
indices[i] = self.token2index.get(token, self.oov_index)
return indices
@property
def size(self):
return len(self.index2token)
def __len__(self):
return len(self.index2token)
def sort(self):
self.index2token.sort()
self.token2index = dict(zip(self.index2token, range(len(self.token2index))))
def write_to_file(self, filename):
fh.write_to_json(self.index2token, filename, sort_keys=False)
def read_from_file(self, filename):
self.index2token = fh.read_json(filename)
self.token2index = dict(zip(self.index2token, range(len(self.index2token))))
| {
"repo_name": "dallascard/guac",
"path": "core/feature_extractors/vocabulary.py",
"copies": "1",
"size": "2141",
"license": "apache-2.0",
"hash": -7175240341714396000,
"line_mean": 30.0289855072,
"line_max": 103,
"alpha_frac": 0.6249416161,
"autogenerated": false,
"ratio": 3.3716535433070867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9412841693849587,
"avg_score": 0.016750693111499945,
"num_lines": 69
} |
#Adapted from http://reviewboard.googlecode.com/svn/trunk/reviewboard/admin/siteconfig.py
from django.conf import settings
from django.contrib.sites.models import Site
from courant.core.siteconfig.models import SiteConfiguration
from courant.core.siteconfig.settings import usersettings
def load_site_config():
"""
Loads any stored site configuration settings and populates the Django
settings object with any that need to be there.
"""
try:
siteconfig = SiteConfiguration.objects.get_current()
except SiteConfiguration.DoesNotExist:
siteconfig = SiteConfiguration(site=Site.objects.get_current(),
version="1.0")
siteconfig.save()
except:
# We got something else. Likely, this doesn't exist yet and we're
# doing a syncdb or something, so silently ignore.
return
#Create defaults dictionary and set them on siteconfig object
defaults = {}
for set in usersettings:
for fieldset in set.values():
for key, value in fieldset.items():
defaults[key] = value['default']
if not siteconfig.get_defaults():
siteconfig.add_defaults(defaults)
#Merge the defaults and currently set settings - the current settings will override the defaults
merged = dict(defaults, **siteconfig.settings)
for key in merged:
try:
getattr(settings, key)
except AttributeError:
setattr(settings, key, merged[key])
| {
"repo_name": "maxcutler/Courant-News",
"path": "courant/core/siteconfig/siteconfig.py",
"copies": "1",
"size": "1557",
"license": "bsd-3-clause",
"hash": -2484264889436468700,
"line_mean": 35.0714285714,
"line_max": 100,
"alpha_frac": 0.6563904945,
"autogenerated": false,
"ratio": 4.579411764705882,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5735802259205882,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://audiodestrukt.wordpress.com/2013/06/23/midi-programming-in-python/
import sys, pygame, pygame.midi
from rdflib import Graph, Namespace, Literal, RDF
import uuid
# set up pygame
pygame.init()
pygame.midi.init()
# initialize rdf graph
g = Graph()
# Namespaces
mid = Namespace("http://example.org/midi/")
pattern_id = uuid.uuid4()
m = Namespace("http://example.org/midi/" + str(pattern_id) + "/")
g.bind('mid', mid)
# list all midi devices
for x in range( 0, pygame.midi.get_count() ):
print pygame.midi.get_device_info(x)
print pygame.midi.Input
# open a specific midi device
inp = pygame.midi.Input(1)
# run the event loop
while True:
if inp.poll():
# no way to find number of messages in queue
# so we just specify a high max value
e = inp.read(1000)
el = eval(str(e))
# Format is [[status,data1,data2,data3],timestamp],...]
# status = midi event (144 is NoteOn, 128 is NoteOff)
# data1 = pitch
# data2 = velocity
# data3 = channel
# Loop over other possible simultaneous events
for event in el:
status = None
if event[0][0] == 144:
status = "NoteOnEvent"
elif event[0][0] == 128:
status = "NoteOffEvent"
else:
print "BIG ERROR, unexpected event type {}".format(event[0][0])
pitch = event[0][1]
velocity = event[0][2]
channel = event[0][3]
timestamp = event[1]
#print status, pitch, velocity, channel, timestamp
# Creating triples!
track_id = uuid.uuid4()
event = m['track' + str(track_id) + '/event' + str(uuid.uuid4())]
g.add((event, RDF.type, mid[status]))
g.add((event, mid.tick, Literal(timestamp)))
g.add((event, mid.channel, Literal(channel)))
g.add((event, mid.pitch, Literal(pitch)))
g.add((event, mid.velocity, Literal(velocity)))
for s,p,o in g.triples((None, None, None)):
print g.qname(s),g.qname(p),o,'.'
g = Graph()
# wait 10ms - this is arbitrary, but wait(0) still resulted
# in 100% cpu utilization
pygame.time.wait(10)
| {
"repo_name": "albertmeronyo/midi-rdf",
"path": "src/stream-midi-rdf.py",
"copies": "2",
"size": "2271",
"license": "mit",
"hash": 6194014601617474000,
"line_mean": 31.4428571429,
"line_max": 89,
"alpha_frac": 0.5746367239,
"autogenerated": false,
"ratio": 3.5484375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5123074223899999,
"avg_score": null,
"num_lines": null
} |
### Adapted from https://blog.dominodatalab.com/creating-interactive-crime-maps-with-folium/
import folium
import pandas as pd
SF_COORDINATES = (69.74, 19.38)
crimedata = pd.read_csv('SFPD_Incidents_-_Current_Year__2015_.csv')
# for speed purposes
MAX_RECORDS = 1000
# create empty map zoomed in on San Francisco
map = folium.Map(location=SF_COORDINATES, zoom_start=12)
# add a marker for every record in the filtered data, use a clustered view
for each in crimedata[0:MAX_RECORDS].iterrows():
map.simple_marker(
location=[each[1]['Y'], each[1]['X']],
clustered_marker=True)
display(map)
# definition of the boundaries in the map
district_geo = r'sfpddistricts.geojson'
# calculating total number of incidents per district
crimedata2 = pd.DataFrame(crimedata['PdDistrict'].value_counts().astype(float))
crimedata2.to_json('crimeagg.json')
crimedata2 = crimedata2.reset_index()
crimedata2.columns = ['District', 'Number']
# creation of the choropleth
map1 = folium.Map(location=SF_COORDINATES, zoom_start=12)
map1.geo_json(geo_path=district_geo,
data_out='crimeagg.json',
data=crimedata2,
columns=['District', 'Number'],
key_on='feature.properties.DISTRICT',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Number of incidents per district')
display(map1)
| {
"repo_name": "kmunve/APS",
"path": "viz/geo/aval_active_clusters.py",
"copies": "1",
"size": "1418",
"license": "mit",
"hash": 5370080586126767000,
"line_mean": 31.976744186,
"line_max": 92,
"alpha_frac": 0.6854724965,
"autogenerated": false,
"ratio": 3.023454157782516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42089266542825154,
"avg_score": null,
"num_lines": null
} |
# ADAPTED FROM <https://developers.google.com/appengine/articles/sharding_counters>
"""
' Common Package Imports
"""
from google.appengine.ext import ndb
"""
' PURPOSE
' Given a ShardModel and shard manager name, create
' a new Manager for it if none exists, or return
' the current one.
' PARAMETERS
' <ndb.Model ShardModel>
' <String name>
' <String **kwarg namespace>
' Returns
' <ndb.Model DynamicShardManager> instance
"""
def getOrCreate(ShardModel, name, namespace=None):
MANAGER_KEY_TEMPLATE = 'shard-manager:-name:{}-modelname:{}'
manager = DynamicShardManager.get_or_insert(MANAGER_KEY_TEMPLATE.format(name, ShardModel.__name__), namespace=namespace)
manager.__setModel__(ShardModel)
return manager
"""
' PURPOSE
' The ndb.Model that runs all the shards and tracks data.
' It's used for everything.
' NOTES
' Use the 'getOrCreate' method to access DynamicShardManagers
"""
class DynamicShardManager(ndb.Model):
num_shards = ndb.IntegerProperty(default=20)
SHARD_KEY_TEMPLATE = 'shard-type:({})-name:({})-id:({:d})'
__shardmodel__ = None
"""
' PURPOSE
' A private method that sets the Shard ndb Model
' for this ShardManager
' PARAMETERS
' <ndb.Model ShardModel>
' RETURNS
' Nothing
' NOTES
' This is needed because arguments cannot be sent to the
' constructor when using .get_or_insert
"""
def __setModel__(self, ShardModel):
self.__shardmodel__ = ShardModel
"""
' PURPOSE
' A simple object used for pass-by-reference
' function calling so that as function mutate the
' data, it is persistant.
"""
class ValueObject(object):
pass
"""
' PURPOSE
' Returns the current value of this shard manager.
' Calculated by the shards themselves. What the value means
' may vary.
' PARAMETERS
' None
' Returns
' The shard manager's 'value'
"""
def getValue(self):
name = self.getName()
value = self.ValueObject()
value.value = self.__shardmodel__.getDefaultValue()
all_keys = self.getAllShardKeys()
for shard in ndb.get_multi(all_keys):
if shard is not None:
shard.computeValue(value)
return value.value
"""
' PURPOSE
' Runs a particular function on a random shard, such as Add on
' an Integer shard counter
' PARAMETERS
' <String functname>
' Returns
' Nothing
' NOTES
' This function increases the amount of shards if a transaction
' fails for memory locks.
"""
def run(self, functname, *args, **kwargs):
# run a static function
funct = getattr(self.__shardmodel__, functname)
if hasattr(funct, '__static__') and getattr(funct, '__static__'):
funct(self, *args, **kwargs)
return
DATA = dict(tries=0)
self._run(DATA, functname, args, kwargs)
if DATA['tries'] > 1:
self._increase_shards(2)
"""
' PURPOSE
' The private method that handles accessing random
' shards and mutating their data.
' PARAMETERS
' <dict retrydata>
' <String functname>
' <*args args>
' <**kwargs args>
' RETURNS
' Nothing
"""
@ndb.transactional
def _run(self, retrydata, functname, args, kwargs):
import random
name = self.getName()
num_shards = self.num_shards
index = random.randint(0, num_shards - 1)
shard_key_string = self.SHARD_KEY_TEMPLATE.format(self.__shardmodel__.__name__, name, index)
shard = self.__shardmodel__.get_by_id(shard_key_string, namespace=self.getNamespace())
if shard is None:
shard = self.__shardmodel__(id=shard_key_string, namespace=self.getNamespace())
funct = getattr(shard, functname)
if funct == None:
return
funct(*args, **kwargs)
retrydata['tries'] += 1
"""
' PURPOSE
' Returns the serialized name associated with this counter
' PARAMETERS
' None
' RETURNS
' Nothing
"""
def getName(self):
return self.key.id()
"""
' PURPOSE
' Returns the namespace of this counter
' PARAMETERS
' None
' RETURNS
' Nothing
"""
def getNamespace(self):
return self.key.namespace()
"""
' PURPOSE
' Returns all the possible shard keys associated with this counter
' PARAMETERS
' None
' RETURNS
' Nothing
"""
def getAllShardKeys(self):
name = self.getName()
shard_key_strings = [self.SHARD_KEY_TEMPLATE.format(self.__shardmodel__.__name__, name, index)
for index in range(self.num_shards)]
return [ndb.Key(self.__shardmodel__, shard_key_string, namespace=self.getNamespace())
for shard_key_string in shard_key_strings]
"""
' PURPOSE
' A 'private' method that increases the amount of shards by the
' provided amount.
' PARAMETERS
' <int amount>
' RETURNS
' Nothing
' PUTS
' 1 - on changing the amount of shards
"""
@ndb.transactional
def _increase_shards(self, amount):
name = self.getName()
if amount > 0:
self.num_shards += amount
self.put()
"""
' PURPOSE
' Provides some basic statistics about this counter
' PARAMETERS
' None
' RETURNS
' Nothing
"""
def profile(self):
all_keys = self.getAllShardKeys()
keycount = 0
for counter in ndb.get_multi(all_keys):
if counter is not None:
keycount += 1
return dict(
shards_available = len(all_keys),
shards_used = keycount
) | {
"repo_name": "HunterLarco/sealed",
"path": "server/api/lib/shards/generic.py",
"copies": "1",
"size": "5469",
"license": "apache-2.0",
"hash": -1721267742577071900,
"line_mean": 22.9912280702,
"line_max": 122,
"alpha_frac": 0.6319253977,
"autogenerated": false,
"ratio": 3.6146728354263056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9465924071600836,
"avg_score": 0.05613483230509392,
"num_lines": 228
} |
# adapted from https://gist.github.com/cliffano/9868180
import os
import time
import json
from json import JSONEncoder
def json_log(res, host):
if type(res) == type(dict()):
if 'verbose_override' not in res:
res.update({"host": host})
combined_json = JSONEncoder().encode(res)
print(combined_json)
class CallbackModule(object):
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
json_log(res, host)
def runner_on_ok(self, host, res):
json_log(res, host)
def runner_on_error(self, host, msg):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
json_log(res, host)
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
json_log(res, host)
def runner_on_async_ok(self, host, res, jid):
json_log(res, host)
def runner_on_async_failed(self, host, res, jid):
json_log(res, host)
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
pass
def playbook_on_stats(self, stats):
pass | {
"repo_name": "petems/ansible-json",
"path": "callback_plugins/json_logs.py",
"copies": "1",
"size": "1719",
"license": "mit",
"hash": -3937561130576649000,
"line_mean": 21.3376623377,
"line_max": 142,
"alpha_frac": 0.6707388016,
"autogenerated": false,
"ratio": 3.183333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354072134933333,
"avg_score": null,
"num_lines": null
} |
from datetime import datetime
from itertools import chain
from django.conf import settings
import pytz
TZ_DETECT_TIMEZONES = getattr(settings, 'TZ_DETECT_TIMEZONES',
('Australia/Sydney', 'Asia/Tokyo'))
TZ_DETECT_COUNTRIES = getattr(settings, 'TZ_DETECT_COUNTRIES',
('AU', 'CN', 'US', 'BR', 'RU', 'GB'))
def get_prioritized_timezones(country_hints=None):
country_hints = country_hints or []
def tz_gen():
yield TZ_DETECT_TIMEZONES
for c in country_hints:
yield pytz.country_timezones(c)
for c in TZ_DETECT_COUNTRIES:
yield pytz.country_timezones(c)
yield pytz.common_timezones
return chain.from_iterable(tz_gen())
def offset_to_timezone(offset, country_hints=None):
"""Convert a minutes offset (JavaScript-style) into a pytz timezone"""
closest_tz = None
closest_delta = 1440
# JS offsets are flipped and can be negative, so
# unflip and put into range 0 - 1440
user_offset = (offset * -1)
user_offset = (user_offset + 1440) % 1440
for tz_name in get_prioritized_timezones(country_hints):
try:
tz = pytz.timezone(tz_name)
except KeyError:
continue
tz_offset = datetime.now(tz=tz).utcoffset().seconds / 60
delta = tz_offset - user_offset
if abs(delta) < abs(closest_delta):
closest_tz = tz
closest_delta = delta
if delta == 0:
break
return closest_tz
def safe_offset_to_timezone_name(offset_str, country_hint=''):
# takes a string, and returns a string of the timezone name. Returns empty string on error
try:
offset = int(offset_str)
tz = offset_to_timezone(offset, [country_hint])
if tz:
return tz.zone
except ValueError:
pass
return ''
| {
"repo_name": "pierxco/djel",
"path": "djel/utils/tz_detector.py",
"copies": "1",
"size": "3049",
"license": "mit",
"hash": 2837947112853777400,
"line_mean": 35.734939759,
"line_max": 94,
"alpha_frac": 0.6726795671,
"autogenerated": false,
"ratio": 3.9546044098573283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007961671246389065,
"num_lines": 83
} |
"""Adapted from https://github.com/aio-libs/aiobotocore"""
from botocore.exceptions import PaginationError
from botocore.paginate import PageIterator
from botocore.utils import set_value_from_jmespath, merge_dicts
class AsyncPageIterator(PageIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._is_stop = False
self._current_kwargs = self._op_kwargs
self._previous_next_token = None
self._next_token = dict((key, None) for key in self._input_token)
# The number of items from result_key we've seen so far.
self._total_items = 0
self._first_request = True
self._primary_result_key = self.result_keys[0]
self._starting_truncation = 0
self._inject_starting_params(self._current_kwargs)
async def next_page(self):
if self._is_stop:
return None
response = await self._make_request(self._current_kwargs)
parsed = self._extract_parsed_response(response)
if self._first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
self._starting_truncation = self._handle_first_request(
parsed, self._primary_result_key,
self._starting_truncation)
self._first_request = False
self._record_non_aggregate_key_values(parsed)
current_response = self._primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (self._total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, self._primary_result_key,
truncate_amount, self._starting_truncation,
self._next_token)
self._is_stop = True
return response
else:
self._total_items += num_current_response
self._next_token = self._get_next_token(parsed)
if all(t is None for t in self._next_token.values()):
self._is_stop = True
return response
if self._max_items is not None and \
self._total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = self._next_token
self._is_stop = True
return response
if self._previous_next_token is not None and \
self._previous_next_token == self._next_token:
message = ("The same next token was received "
"twice: %s" % self._next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(self._current_kwargs,
self._next_token)
self._previous_next_token = self._next_token
return response
def __iter__(self):
raise NotImplementedError
async def __aiter__(self):
return self
async def __anext__(self):
if self._is_stop:
raise StopAsyncIteration # noqa
return await self.next_page()
def result_key_iters(self):
raise NotImplementedError
async def build_full_result(self):
complete_result = {}
async for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, str)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
| {
"repo_name": "quantmind/pulsar-pusher",
"path": "cloud/asyncbotocore/paginate.py",
"copies": "2",
"size": "6121",
"license": "bsd-3-clause",
"hash": -5166071308594861000,
"line_mean": 44.6791044776,
"line_max": 79,
"alpha_frac": 0.5704950172,
"autogenerated": false,
"ratio": 4.708461538461538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 134
} |
# Adapted from https://github.com/ansible/ansible/blob/devel/plugins/callbacks/log_plays.py
# And https://gist.github.com/cliffano/9868180
import os
import time
import json
from datetime import datetime
datenow = datetime.now()
datenow = datenow.strftime('%Y-%m-%d')
if not os.path.exists("/var/log/ansible/hosts/html"):
os.makedirs("/var/log/ansible/hosts/html")
FIELDS = ['cmd', 'command', 'start', 'end', 'delta', 'msg', 'stdout', 'stderr']
def human_log(res, host):
filnename = ("/var/log/ansible/hosts/html/"+host+datenow+".html")
path = os.path.join(filnename)
fd = open(path, "a")
fd.write("<h1>"+host+" "+datenow+"</h1>")
fd.close()
if type(res) == type(dict()):
for field in FIELDS:
if field in res.keys():
html_string = '<b>{0}</b>: <p>{1}</p>'.format(field, res[field])
path = os.path.join(filnename)
fd = open(path, "a")
fd.write(html_string)
fd.close()
print("HTML log created at "+filnename)
class CallbackModule(object):
"""
logs playbook results, per host, as a basic html file in /var/log/ansible/hosts/html/
"""
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
human_log(res, host)
def runner_on_ok(self, host, res):
human_log(res, host)
def runner_on_error(self, host, msg):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
human_log(res, host)
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
human_log(res, host)
def runner_on_async_ok(self, host, res, jid):
human_log(res, host)
def runner_on_async_failed(self, host, res, jid):
human_log(res, host)
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
pass
def playbook_on_stats(self, stats):
pass | {
"repo_name": "petems/ansible-html",
"path": "callback_plugins/html_logs.py",
"copies": "1",
"size": "2482",
"license": "mit",
"hash": 7810453140946592000,
"line_mean": 24.0808080808,
"line_max": 142,
"alpha_frac": 0.6543110395,
"autogenerated": false,
"ratio": 3.0794044665012406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9174955673609779,
"avg_score": 0.01175196647829217,
"num_lines": 99
} |
# adapted from https://github.com/ashabbir/Traceroute/blob/master/icmppinger.py
# with more help from https://blogs.oracle.com/ksplice/entry/learning_by_doing_writing_your
# and from http://en.wikipedia.org/wiki/Binary_search_algorithm#Iterative
# and from http://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
from socket import *
import socket
import os
import sys
import struct
import time
import select
TIMEOUT = 2.0
TRIES = 2
ABSOLUTE_TTL_MAX = 64
DEBUG_MODE = 1
def main():
with open('targets.txt') as file_name:
ips = file_name.readlines()
for ip in ips:
binary_traceroute(ip.strip('\n'))
# calculates the checksum of the packet, returns the checksum
def checksum(given_string):
checksum_val = 0
count_upper_bound = (len(given_string) / 2) * 2
count = 0
while count < count_upper_bound:
current_value = ord(given_string[count + 1]) * 256 + ord(given_string[count])
count += 2
checksum_val += current_value
checksum_val &= 0xffffffffL
if count_upper_bound < len(given_string):
checksum_val += ord(given_string[len(given_string) - 1])
checksum_val &= 0xffffffffL
checksum_val = (checksum_val >> 16) + (checksum_val & 0xffff)
checksum_val += (checksum_val >> 16)
answer = ~checksum_val
answer &= 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
# builds a packet, with checksum, returns the created packet
def build_packet():
checksum_val = 0
header = struct.pack("bbHHh", 8, 0, checksum_val, os.getpid() & 0xFFFF, 1)
data = struct.pack("d", time.time())
checksum_val = checksum(header + data)
if sys.platform == 'darwin':
checksum_val = socket.htons(checksum_val) & 0xffff
else:
checksum_val = socket.htons(checksum_val)
header = struct.pack("bbHHh", 8, 0, checksum_val, os.getpid() & 0xFFFF, 1)
packet = header + data
return packet
# gets the hostname of a given IP, returns the hostname
def get_name(host_ip):
try:
host = socket.gethostbyaddr(host_ip)
name = '{0} ({1})'.format(host_ip, host[0])
except Exception:
name = '{0} ({1})'.format(host_ip, host_ip)
return name
# probes the host at the given ttl, using the route specified from the given IP.
# returns a string representation of the response, and the ICMP return value
def probe(ip, ttl):
time_remaining = TIMEOUT
return_array = []
icmp_type = -1
for tries in xrange(TRIES):
send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.getprotobyname("udp"))
send_socket.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, struct.pack('I', ttl))
send_socket.settimeout(TIMEOUT)
recv_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp"))
recv_socket.settimeout(TIMEOUT)
try:
send_socket.sendto(build_packet(), (ip, 33434))
t = time.time()
is_not_timeout = select.select([recv_socket], [], [], time_remaining)
time_in_select = (time.time() - t)
if not is_not_timeout[0]:
return_array.append(" A Timeout occurred (type 1)")
received_packet, address = recv_socket.recvfrom(1024)
time_received = time.time()
time_remaining -= time_in_select
if time_remaining <= 0:
return_array.append(" A Timeout occurred (type 2)")
except timeout:
return_array.append(" A Timeout occurred (type 3)")
return return_array, 11
else:
icmp_header_content = received_packet[20:28] # grab the header from the packet
# unpack the ICMP header: unsigned short, unsigned short, signed short
icmp_type, a, b, c, d = struct.unpack("bbHHh", icmp_header_content)
readable_name = get_name(address[0])
if icmp_type == 11: # time exceeded
return_array.append("11: %d rtt=%.0f ms %s" % (ttl, (time_received - t) * 1000, readable_name))
elif icmp_type == 3: # destination unreachable, interpreted as success, oddly enough
return_array.append("03: %d rtt=%.0f ms %s" % (ttl, (time_received - t) * 1000, readable_name))
elif icmp_type == 0: # echo reply, doesn't really happen
packet_bytes = struct.calcsize("d")
time_sent = struct.unpack("d", received_packet[28:28 + packet_bytes])[0]
return_array.append("00: %d rtt=%.0f ms %s" % (ttl, (time_received - time_sent) * 1000, readable_name))
return
else:
return_array.append(" A Timeout occurred (type 4)")
break
finally:
send_socket.close()
return return_array, icmp_type
def get_ip(hostname):
return socket.gethostbyname(hostname)
# runs a traceroute against the host_name, using a binary search to calculate the optimal TTL
# algorithm adapted from: http://en.wikipedia.org/wiki/Binary_search_algorithm#Iterative
def binary_traceroute(host_ip):
rapid_increase_phase = 1
ttl_ub = 16 # initialized to an invalid value
ttl_lb = 0
ttl_current = 16
print "**********BEGIN BINARY SEARCH PHASE**********"
while ttl_ub - ttl_lb > 1 or rapid_increase_phase:
_, icmp_value = probe(host_ip, ttl_current)
if DEBUG_MODE:
print "probed %s with %d hops, returning an icmp of %s" % (host_ip, ttl_current, icmp_value)
# icmp_value of 3 (dest_unreachable) indicates ttl was too high, OR just right (tricky)
# icmp_value of 11 (ttl_expired) indicates ttl was too low, and packet was dropped before destination
if icmp_value is 11 and rapid_increase_phase is 1:
ttl_lb = ttl_current*2
ttl_ub *= 2
if ttl_ub >= ABSOLUTE_TTL_MAX:
ttl_ub = ABSOLUTE_TTL_MAX
print "TTL Maximum exceeded!"
break
elif icmp_value is 11:
ttl_lb = ttl_current
elif icmp_value is 3:
rapid_increase_phase = 0
ttl_ub = ttl_current
ttl_current = (ttl_lb + ttl_ub) / 2
print "**********END BINARY SEARCH PHASE**********"
# exited while loop, run the traceroute with ttl_ub.
print "**********BEGIN TRACEROUTE PHASE**********"
print "ICMP_Value Hop_number rtt host_IP(hostname)"
for i in xrange(1, ttl_ub+1):
output, _ = probe(host_ip, i)
print output[0]
if _ is 3:
break
print "**********END TRACEROUTE PHASE**********"
if __name__ == '__main__':
main() | {
"repo_name": "raidancampbell/ICMP-traceroute",
"path": "rttMeasurement.py",
"copies": "1",
"size": "6695",
"license": "mit",
"hash": -1113825537242093200,
"line_mean": 39.5818181818,
"line_max": 120,
"alpha_frac": 0.6034353996,
"autogenerated": false,
"ratio": 3.595596133190118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46990315327901183,
"avg_score": null,
"num_lines": null
} |
#Adapted from https://github.com/FakeNewsChallenge/fnc-1/blob/master/scorer.py
#Original credit - @bgalbraith
LABELS = ['agree', 'disagree', 'discuss', 'unrelated']
LABELS_RELATED = ['unrelated','related']
RELATED = LABELS[0:3]
def score_submission(gold_labels, test_labels, labels=LABELS, relatedLabels = RELATED):
score = 0.0
cm_side = len(labels)
cm = [0] * cm_side
for i in range(0, cm_side):
cm[i] = [0] * cm_side
for i, (g, t) in enumerate(zip(gold_labels, test_labels)):
g_stance, t_stance = g, t
if g_stance == t_stance:
score += 0.25
if g_stance in relatedLabels:
score += 0.50
if g_stance in relatedLabels and t_stance in relatedLabels:
score += 0.25
cm[labels.index(g_stance)][labels.index(t_stance)] += 1
return score, cm
def print_confusion_matrix(cm, labels=LABELS,relatedLabels = RELATED):
lines = []
header = ("|{:^11}" + "|{:^11}"*len(labels) + "|").format('', *labels)
line_len = len(header)
lines.append("-"*line_len)
lines.append(header)
lines.append("-"*line_len)
hit = 0
total = 0
for i, row in enumerate(cm):
hit += row[i]
total += sum(row)
lines.append(("|{:^11}" + "|{:^11}"*len(labels) + "|").format(labels[i],
*row))
lines.append("-"*line_len)
print('\n'.join(lines))
def report_score(actual,predicted,labels=LABELS,relatedLabels = RELATED):
score,cm = score_submission(actual,predicted,labels,relatedLabels)
best_score, _ = score_submission(actual,actual,labels,relatedLabels)
print_confusion_matrix(cm,labels,relatedLabels)
print("Score: " +str(score) + " out of " + str(best_score) + "\t("+str(score*100/best_score) + "%)")
return score*100/best_score
if __name__ == "__main__":
actual = [0,0,0,0,1,1,0,3,3]
predicted = [0,0,0,0,1,1,2,3,3]
report_score([LABELS[e] for e in actual],[LABELS[e] for e in predicted]) | {
"repo_name": "lolotobg/FakeNewsChallenge",
"path": "classic/utils/score.py",
"copies": "1",
"size": "2044",
"license": "mit",
"hash": -7910759830254728000,
"line_mean": 31.9838709677,
"line_max": 104,
"alpha_frac": 0.5807240705,
"autogenerated": false,
"ratio": 3.120610687022901,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4201334757522901,
"avg_score": null,
"num_lines": null
} |
# original copyright:
# Copyright (c) 2012 Kyle J. Temkin
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class TapState(object):
def __init__(self, name, onZero, onOne):
object.__init__(self)
self._name = name
self._transitions = { 0: onZero, 1: onOne }
def next_state(self, on):
return getattr(Tap, self._transitions[on])
def advance_toward(self, state):
if self._name == 'SELECT_DR' and state._name.endswith('_DR'):
return (0, self._transitions[0])
zero = getattr(Tap, self._transitions[0])
one = getattr(Tap, self._transitions[1])
if zero == state or one == self or self._transitions[1] == 'RESET':
return (0, self._transitions[0])
return (1, self._transitions[1])
class Tap(object):
RESET = TapState('RESET', 'IDLE', 'RESET')
IDLE = TapState('IDLE', 'IDLE', 'SELECT_DR')
SELECT_DR = TapState('SELECT_DR', 'CAPTURE_DR', 'SELECT_IR')
CAPTURE_DR = TapState('CAPTURE_DR', 'SHIFT_DR', 'EXIT1_DR')
SHIFT_DR = TapState('SHIFT_DR', 'SHIFT_DR', 'EXIT1_DR')
EXIT1_DR = TapState('EXIT1_DR', 'PAUSE_DR', 'UPDATE_DR')
PAUSE_DR = TapState('PAUSE_DR', 'PAUSE_DR', 'EXIT2_DR')
EXIT2_DR = TapState('EXIT2_DR', 'SHIFT_DR', 'UPDATE_DR')
UPDATE_DR = TapState('UPDATE_DR', 'IDLE', 'SELECT_DR')
SELECT_IR = TapState('SELECT_IR', 'CAPTURE_IR', 'RESET')
CAPTURE_IR = TapState('CAPTURE_IR', 'SHIFT_IR', 'EXIT1_IR')
SHIFT_IR = TapState('SHIFT_IR', 'SHIFT_IR', 'EXIT1_IR')
EXIT1_IR = TapState('EXIT1_IR', 'PAUSE_IR', 'UPDATE_IR')
PAUSE_IR = TapState('PAUSE_IR', 'PAUSE_IR', 'EXIT2_IR')
EXIT2_IR = TapState('EXIT2_IR', 'SHIFT_IR', 'UPDATE_IR')
UPDATE_IR = TapState('UPDATE_IR', 'IDLE', 'SELECT_DR')
__all__ = ['TapState', 'Tap']
| {
"repo_name": "asgeir/pydigilent",
"path": "pydigilent/util/djtg/tap.py",
"copies": "1",
"size": "2815",
"license": "mit",
"hash": -5258900534212269000,
"line_mean": 39.7971014493,
"line_max": 72,
"alpha_frac": 0.6788632327,
"autogenerated": false,
"ratio": 3.0432432432432432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4222106475943243,
"avg_score": null,
"num_lines": null
} |
# adapted from: https://github.com/kuza55/keras-extras/blob/master/utils/multi_gpu.py
# Original license: Apache License 2.0
# https://github.com/kuza55/keras-extras/blob/master/LICENSE
#
# - data-parallelism at the level of computing predictions and gradients
# in parallel
# - no gradient averaging
# - feed_dict, no queues
#
## Example usage:
#
# # parameter server device
# ps_device = '/gpu:0'
#
# def basic_model():
# #...
#
# with tf.device(ps_device):
# model = make_parallel(basic_model(), gpu_count, ps_device)
# model.compile(loss='categorical_crossentropy', optimizer='sgd')
from keras.layers.merge import concatenate
from keras.layers import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count, ps_device=None):
if gpu_count <= 1:
return model
if ps_device is None:
ps_device = '/gpu:0'
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
# Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
# Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape,
arguments={'idx': i, 'parts': gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on parameter server
with tf.device(ps_device):
merged = []
for outputs in outputs_all:
merged.append(concatenate(outputs, axis=0))
return Model(inputs=model.inputs, outputs=merged)
| {
"repo_name": "rossumai/keras-multi-gpu",
"path": "keras_tf_multigpu/kuza55.py",
"copies": "1",
"size": "2410",
"license": "mit",
"hash": -1185708536268974800,
"line_mean": 32.0136986301,
"line_max": 85,
"alpha_frac": 0.5937759336,
"autogenerated": false,
"ratio": 3.7422360248447206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48360119584447203,
"avg_score": null,
"num_lines": null
} |
# adapted from https://github.com/lisa-lab/DeepLearningTutorials
from collections import OrderedDict
import copy
import os
import re
import codecs
import random
import timeit
from hyperopt import STATUS_OK
import numpy as np
import pandas as pd
from scipy import stats
import theano
from theano import tensor as T
import common
from ..util import defines
from ..util import file_handling as fh
from ..experiment import reusable_holdout
from ..experiment import evaluation
# Otherwise the deepcopy fails
import sys
sys.setrecursionlimit(5000)
THEANO_FLAGS='floatX=float32'
# utils functions
def shuffle(lol, seed=None):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
class RNN(object):
''' elman neural net model '''
def __init__(self, nh, nc, ne, de, cs, init_scale=0.2, initial_embeddings=None,
rnn_type='basic', # 'basic', 'GRU', or 'LSTM'
pooling_method='max', #'max', 'mean', 'attention1' or 'attention2',
extra_input_dims=0, train_embeddings=True,
bidirectional=True, bi_combine='concat' # 'concat', 'sum', or 'mean'
):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# initialize parameters
dx = de * cs
if extra_input_dims > 0:
dx += extra_input_dims
bi = 1
if bidirectional and bi_combine == 'concat':
bi = 2
if initial_embeddings is None:
self.emb = theano.shared(name='embeddings',
value=init_scale * np.random.uniform(-1.0, 1.0,
(ne, de)).astype(theano.config.floatX))
#(ne+1, de)) # add one for padding at the end
else:
self.emb = theano.shared(name='embeddings', value=initial_embeddings.astype(theano.config.floatX))
if extra_input_dims > 0:
self.W_drld = theano.shared(name='W_drld', value=init_scale * np.random.uniform(-1.0, 1.0, (1, nh))
.astype(theano.config.floatX))
# common paramters (feeding into hidden node)
self.W_xh = theano.shared(name='W_xh', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hh = theano.shared(name='W_hh', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_h = theano.shared(name='b_h', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# output layer parameters
self.W_s = theano.shared(name='W_s', value=init_scale * np.random.uniform(-1.0, 1.0, (nh * bi, nc))
.astype(theano.config.floatX))
self.b_s = theano.shared(name='b_s', value=np.zeros(nc, dtype=theano.config.floatX))
# temporary parameters
#self.h_i_f = theano.shared(name='h_i_f', value=np.zeros(nh, dtype=theano.config.floatX))
if bidirectional:
self.h_i_r = theano.shared(name='h_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
# Attention parameters
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.W_a = theano.shared(name='W_a', value=init_scale * np.random.uniform(-1.0, 1.0, (bi*nh, 1))
.astype(theano.config.floatX))
self.b_a = theano.shared(name='b_a', value=0.0)
# GRU parameters
if rnn_type == 'GRU':
self.W_xr = theano.shared(name='W_xr', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hr = theano.shared(name='W_hr', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_r = theano.shared(name='b_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.W_xz = theano.shared(name='W_xz', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hz = theano.shared(name='W_hz', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=np.zeros(nh, dtype=theano.config.floatX))
# LSTM paramters
if rnn_type == 'LSTM':
# forget gate (needs special initialization)
self.W_xf = theano.shared(name='W_xf', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hf = theano.shared(name='W_hf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_cf = theano.shared(name='W_cf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_f = theano.shared(name='b_f', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# input gate
self.W_xi = theano.shared(name='W_xi', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hi = theano.shared(name='W_hi', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_ci = theano.shared(name='W_ci', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_i = theano.shared(name='b_i', value=np.zeros(nh, dtype=theano.config.floatX))
# output gate
self.W_xo = theano.shared(name='W_xo', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_ho = theano.shared(name='W_ho', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_co = theano.shared(name='W_co', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_o = theano.shared(name='b_o', value=np.zeros(nh, dtype=theano.config.floatX))
# use normal ->hidden weights for memory cell
# temp
self.c_i_f = theano.shared(name='c_i_f', value=np.zeros(nh, dtype=theano.config.floatX))
if bidirectional:
self.c_i_r = theano.shared(name='c_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.params = [self.W_xh, self.W_hh, self.b_h,
self.W_s, self.b_s]
#self.params += [self.h_i_f]
if train_embeddings:
self.params += [self.emb]
if pooling_method == 'attention':
self.params += [self.W_a, self.b_a]
if rnn_type == 'GRU':
self.params += [self.W_xr, self.W_hr, self.b_r,
self.W_xz, self.W_hz, self.b_z]
if rnn_type == 'LSTM':
self.params += [self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.c_i_f]
if bidirectional:
self.params += [self.c_i_r]
if bidirectional:
self.params += [self.h_i_r]
# create an X object based on the size of the object at the index [elements, emb_dim * window]
idxs = T.imatrix()
if extra_input_dims:
extra = T.imatrix()
x = T.concatenate([self.emb[idxs].reshape((idxs.shape[0], de*cs)),
T.repeat(extra, idxs.shape[0], axis=0)], axis=1)
else:
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
# create a vector for y
y = T.ivector('y')
def recurrence_basic(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
return h_t
def recurrence_basic_reverse(x_t, h_tp1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
return h_t
def recurrence_gru(x_t, h_tm1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tm1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tm1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tm1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tm1 + z_t * g_t
return h_t
def recurrence_gru_reverse(x_t, h_tp1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tp1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tp1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tp1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tp1 + z_t * g_t
return h_t
def recurrence_lstm(x_t, h_tm1, c_tm1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + T.dot(c_tm1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + T.dot(c_tm1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
c_t = f_t * c_tm1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [h_t, c_t]
def recurrence_lstm_reverse(x_t, h_tp1, c_tp1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tp1, self.W_hi) + T.dot(c_tp1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tp1, self.W_hf) + T.dot(c_tp1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
c_t = f_t * c_tp1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tp1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [h_t, c_t]
h_r = None
if rnn_type == 'GRU':
h_f, _ = theano.scan(fn=recurrence_gru, sequences=x, outputs_info=[self.h_i_f], n_steps=x.shape[0])
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_gru_reverse, sequences=x, outputs_info=[self.h_i_r],
go_backwards=True)
elif rnn_type == 'LSTM':
[h_f, c_f], _ = theano.scan(fn=recurrence_lstm, sequences=x,
outputs_info=[self.h_i_f, self.c_i_f], n_steps=x.shape[0])
if bidirectional:
[h_r, c_r], _ = theano.scan(fn=recurrence_lstm_reverse, sequences=x,
outputs_info=[self.h_i_r, self.c_i_r], go_backwards=True)
else:
h_f, _ = theano.scan(fn=recurrence_basic, sequences=x,
outputs_info=[T.alloc(np.array(0.), nh)],
n_steps=x.shape[0])
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_basic_reverse, sequences=x, outputs_info=[self.h_i_r],
go_backwards=True)
if bidirectional:
# reverse the second hidden layer so it lines up with the first
h_r = h_r[::-1, :]
if bi_combine == 'max':
h = T.maximum(h_f, h_r)
elif bi_combine == 'mean':
h = (h_f + h_r) / 2.0
else: # concatenate
#h = theano.printing.Print('h:')(T.concatenate([h_fp, h_rp], axis=1))
h = T.concatenate([h_f, h_r], axis=1)
else:
h = h_f
a_sum = T.sum([1])
if pooling_method == 'attention1': # combine hidden nodes, then transform and sigmoid
# SOFTMAX normalizes across the row (axis=1)
a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a).T) # [1, n_elements]: normalized vector
a_sum = T.sum(a) # to check a is normalized
p_y_given_x_sentence = T.nnet.sigmoid(T.dot(T.dot(a, h), self.W_s) + self.b_s) # [1, nc] in R(0,1)
y_pred = T.max(p_y_given_x_sentence, axis=0) > 0.5 # note, max is just to coerce into proper shape
element_weights = T.outer(a, p_y_given_x_sentence) # [ne, nc]
elif pooling_method == 'attention2': # transform hidden nodes, sigmoid, then combine
a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a).T) # [1, n_elements]: normalized vector
a_sum = T.sum(a)
temp = T.nnet.sigmoid(T.dot(h, self.W_s) + self.b_s) # [ne x nc]
p_y_given_x_sentence = T.dot(a, temp) # [1, nc] in R(0,1)
y_pred = T.max(p_y_given_x_sentence, axis=0) > 0.5 # note, max is just to coerce into proper shape
element_weights = T.repeat(a.T, nc, axis=1) * temp # [ne, nc]
elif pooling_method == 'mean':
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, nc] in R(0,1)
p_y_given_x_sentence = T.mean(s, axis=0)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
else: # pooling_method == 'max'
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, nc] in R(0,1)
p_y_given_x_sentence = T.max(s, axis=0)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
# cost and gradients and learning rate
lr = T.scalar('lr_main')
lr_emb_fac = T.scalar('lr_emb')
sentence_nll = -T.sum(y * T.log(p_y_given_x_sentence) + (1-y)*T.log(1-p_y_given_x_sentence))
sentence_gradients = T.grad(sentence_nll, self.params)
sentence_updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, [lr_emb_fac *
sentence_gradients[0]]
+ sentence_gradients[1:]))
# theano functions to compile
if extra_input_dims > 0:
self.sentence_classify = theano.function(inputs=[idxs, extra], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, extra, y, lr, lr_emb_fac],
outputs=sentence_nll,
updates=sentence_updates)
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.a_sum_check = theano.function(inputs=[idxs, extra], outputs=a_sum)
else:
self.sentence_classify = theano.function(inputs=[idxs], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, y, lr, lr_emb_fac],
outputs=sentence_nll,
updates=sentence_updates)
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.a_sum_check = theano.function(inputs=[idxs], outputs=a_sum)
self.normalize = theano.function(inputs=[],
updates={self.emb: self.emb / T.sqrt((self.emb**2).sum(axis=1))
.dimshuffle(0, 'x')})
def classify(self, x, window_size, extra_input_dims=0, extra=None):
cwords = contextwin(x, window_size)
# make an array of these windows
words = map(lambda x: np.asarray(x).astype('int32'), cwords)
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, extra_input_dims))
return self.sentence_classify(words, extra)
else:
return self.sentence_classify(words)
def train(self, x, y, window_size, learning_rate, emb_lr_factor, extra_input_dims=0, extra=None):
# concatenate words in a window
cwords = contextwin(x, window_size)
# make an array of these windows
words = map(lambda x: np.asarray(x).astype('int32'), cwords)
# train on these sentences and normalize
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, extra_input_dims))
nll = self.sentence_train(words, extra, y, learning_rate, emb_lr_factor)
else:
nll = self.sentence_train(words, y, learning_rate, emb_lr_factor)
self.normalize()
return nll
def save(self, output_dir):
for param in self.params:
np.save(os.path.join(output_dir, param.name + '.npy'), param.get_value())
def load(self, input_dir):
for param in self.params:
param.set_value(np.load(os.path.join(input_dir, param.name + '.npy')))
def print_embeddings(self):
for param in self.params:
print param.name, param.get_value()
def main(params=None):
if params is None:
params = {
'exp_name': 'minibatch_test',
'test_fold': 0,
'n_dev_folds': 1,
'min_doc_thresh': 1,
'initialize_word_vectors': True,
'vectors': 'anes_word2vec', # default_word2vec, anes_word2vec ...
'word2vec_dim': 300,
'init_scale': 0.2,
'add_OOV': True,
'win': 3, # size of context window
'add_DRLD': False,
'rnn_type': 'basic', # basic, GRU, or LSTM
'n_hidden': 3, # size of hidden units
'pooling_method': 'max', # max, mean, or attention1/2
'bidirectional': False,
'bi_combine': 'mean', # concat, max, or mean
'train_embeddings': True,
'lr': 0.1, # learning rate
'lr_emb_fac': 0.2, # factor to modify learning rate for embeddings
'decay_delay': 5, # number of epochs with no improvement before decreasing learning rate
'decay_factor': 0.5, # factor by which to multiply learning rate in case of delay
'n_epochs': 10,
'add_OOV_noise': False,
'OOV_noise_prob': 0.01,
'minibatch_size': 1,
'ensemble': False,
'save_model': True,
'seed': 42,
'verbose': 1,
'reuse': False,
'orig_T': 0.04,
'tau': 0.01
}
# load params from a previous experiment
params = fh.read_json('/Users/dcard/Projects/CMU/ARK/guac/experiments/best_mod.json')
params['exp_name'] += '_minibatch_16'
params['n_hidden'] = int(params['n_hidden'])
params['orig_T'] = 0.02
params['tau'] = 0.005
reuser = None
if params['reuse']:
reuser = reusable_holdout.ReuseableHoldout(T=params['orig_T'], tau=params['tau'])
keys = params.keys()
keys.sort()
for key in keys:
print key, ':', params[key]
# seed the random number generators
np.random.seed(params['seed'])
random.seed(params['seed'])
datasets = ['Democrat-Likes', 'Democrat-Dislikes', 'Republican-Likes', 'Republican-Dislikes']
np.random.seed(params['seed'])
random.seed(params['seed'])
best_valid_f1s = []
best_test_f1s = []
test_prediction_arrays = []
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'])
output_filename = fh.make_filename(output_dir, 'params', 'json')
fh.write_to_json(params, output_filename)
for dev_fold in range(params['n_dev_folds']):
print "dev fold =", dev_fold
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'], 'fold' + str(dev_fold))
results = []
all_data, words2idx, items, all_labels = common.load_data(datasets, params['test_fold'], dev_fold,
params['min_doc_thresh'])
train_xy, valid_xy, test_xy = all_data
train_lex, train_y = train_xy
valid_lex, valid_y = valid_xy
test_lex, test_y = test_xy
train_items, dev_items, test_items = items
vocsize = len(words2idx.keys())
idx2words = dict((k, v) for v, k in words2idx.iteritems())
best_test_predictions = None
n_sentences = len(train_lex)
print "vocsize = ", vocsize, 'n_train', n_sentences
codes = all_labels.columns
n_items, n_codes = all_labels.shape
# get the words in the sentences for the test and validation sets
words_valid = [map(lambda x: idx2words[x], w) for w in valid_lex]
groundtruth_test = test_y[:]
words_test = [map(lambda x: idx2words[x], w) for w in test_lex]
initial_embeddings = common.load_embeddings(params, words2idx)
OOV_index = words2idx['__OOV__']
emb_dim = initial_embeddings.shape[1]
print 'emb_dim =', emb_dim
extra_input_dims = 0
if params['add_DRLD']:
extra_input_dims = 2
print "Building RNN"
rnn = RNN(nh=params['n_hidden'],
nc=n_codes,
ne=vocsize,
de=emb_dim,
cs=params['win'],
extra_input_dims=extra_input_dims,
initial_embeddings=initial_embeddings,
init_scale=params['init_scale'],
rnn_type=params['rnn_type'],
train_embeddings=params['train_embeddings'],
pooling_method=params['pooling_method'],
bidirectional=params['bidirectional'],
bi_combine=params['bi_combine']
)
train_likes = [1 if re.search('Likes', i) else 0 for i in train_items]
dev_likes = [1 if re.search('Likes', i) else 0 for i in dev_items]
test_likes = [1 if re.search('Likes', i) else 0 for i in test_items]
train_dem = [1 if re.search('Democrat', i) else 0 for i in train_items]
dev_dem = [1 if re.search('Democrat', i) else 0 for i in dev_items]
test_dem = [1 if re.search('Democrat', i) else 0 for i in test_items]
train_extra = [[train_likes[i], train_dem[i]] for i, t in enumerate(train_items)]
dev_extra = [[dev_likes[i], dev_dem[i]] for i, t in enumerate(dev_items)]
test_extra = [[test_likes[i], test_dem[i]] for i, t in enumerate(test_items)]
# train with early stopping on validation set
best_f1 = -np.inf
params['clr'] = params['lr']
for e in xrange(params['n_epochs']):
# shuffle
shuffle([train_lex, train_y, train_extra], params['seed']) # shuffle the input data
params['ce'] = e # store the current epoch
tic = timeit.default_timer()
#for i, (x, y) in enumerate(zip(train_lex, train_y)):
for i, orig_x in enumerate(train_lex):
n_words = len(orig_x)
if params['add_OOV_noise']:
draws = np.random.rand(n_words)
x = [OOV_index if draws[idx] < params['OOV_noise_prob'] else orig_x[idx] for idx in range(n_words)]
else:
x = orig_x
y = train_y[i]
extra = train_extra[i]
if i == 0:
print ' '.join([idx2words[w] for w in train_lex[i]])
if i == 0:
print x
print y
nll = rnn.train(x, y, params['win'], params['clr'], params['lr_emb_fac'],
extra_input_dims, extra)
if float(i/100.0) == float(i//100):
print nll
print '[learning] epoch %i >> %2.2f%%' % (
e, (i + 1) * 100. / float(n_sentences)),
print 'completed in %.2f (sec) <<\r' % (timeit.default_timer() - tic),
sys.stdout.flush()
#if i == 0:
# print ' '.join([idx2words[idx] for idx in orig_x])
# print rnn.classify(orig_x, params['win'], extra_input_dims, extra)
if np.isnan(nll) or np.isinf(nll):
return {'loss': nll,
'final_test_f1': 0,
'valid_f1s': [0],
'test_f1s': [0],
'status': STATUS_OK
}
# evaluation // back into the real world : idx -> words
print ""
#print rnn.classify((np.asarray(contextwin(train_lex[0], params['win'])).astype('int32')), train_likes[0], params['win'])
#print rnn.classify(train_lex[0], params['win'], extra_input_dims, train_extra[0])
#print rnn.get_element_weights(np.asarray(contextwin(train_lex[0], params['win'])).astype('int32'))
#if params['pooling_method'] == 'attention1' or params['pooling_method'] == 'attention2':
# if extra_input_dims == 0:
# r = np.random.randint(0, len(train_lex))
# print r, rnn.a_sum_check(np.asarray(contextwin(train_lex[r], params['win'])).astype('int32'))
"""
predictions_train = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in train_lex]
predictions_test = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in test_lex]
predictions_valid = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in valid_lex]
"""
#predictions_train = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in train_lex]
#predictions_test = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in test_lex]
#predictions_valid = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in valid_lex]
predictions_train = [rnn.classify(x, params['win'],
extra_input_dims, train_extra[i]) for i, x in enumerate(train_lex)]
predictions_test = [rnn.classify(x, params['win'],
extra_input_dims, test_extra[i]) for i, x in enumerate(test_lex)]
predictions_valid = [rnn.classify(x, params['win'],
extra_input_dims, dev_extra[i]) for i, x in enumerate(valid_lex)]
train_f1 = common.calc_mean_f1(predictions_train, train_y)
test_f1 = common.calc_mean_f1(predictions_test, test_y)
valid_f1 = common.calc_mean_f1(predictions_valid, valid_y)
if reuser is not None:
valid_f1 = reuser.mask_value(valid_f1, train_f1)
question_f1s = []
question_pps = []
print "train_f1 =", train_f1, "valid_f1 =", valid_f1, "test_f1 =", test_f1
results.append((train_f1, valid_f1, test_f1))
if valid_f1 > best_f1:
best_rnn = copy.deepcopy(rnn)
best_f1 = valid_f1
best_test_predictions = predictions_test
if params['verbose']:
print('NEW BEST: epoch', e,
'valid f1', valid_f1,
'best test f1', test_f1)
params['tr_f1'] = train_f1
params['te_f1'] = test_f1
params['v_f1'] = valid_f1
params['be'] = e # store the current epoch as a new best
# learning rate decay if no improvement in a given number of epochs
if abs(params['be']-params['ce']) >= params['decay_delay']:
params['clr'] *= params['decay_factor']
params['be'] = params['ce']
print "Reverting to current best; new learning rate = ", params['clr']
# also reset to the previous best
rnn = best_rnn
if params['clr'] < 1e-5:
break
if best_f1 == 1.0:
break
if best_f1 == 0 and e > 10:
break
if params['save_model']:
predictions_valid = [rnn.classify(x, params['win'],
extra_input_dims, dev_extra[i]) for i, x in enumerate(valid_lex)]
#predictions_valid = [best_rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')) for x in valid_lex]
best_rnn.save(output_dir)
common.write_predictions(datasets, params['test_fold'], dev_fold, predictions_valid, dev_items, output_dir)
print('BEST RESULT: epoch', params['be'],
'train F1 ', params['tr_f1'],
'valid F1', params['v_f1'],
'best test F1', params['te_f1'],
'with the model', output_dir)
best_valid_f1s.append(params['v_f1'])
best_test_f1s.append(params['te_f1'])
test_prediction_arrays.append(np.array(best_test_predictions, dtype=int))
output_filename = fh.make_filename(output_dir, 'results', 'txt')
with codecs.open(output_filename, 'w') as output_file:
for e, result in enumerate(results):
output_file.write('epoch=' + str(e) + '; train_f1=' + str(result[0]) +
'; valid_f1=' + str(result[1]) + '; test_f1=' + str(result[2]) + '\n')
if params['ensemble']:
test_predictions_stack = np.dstack(test_prediction_arrays)
final_predictions = stats.mode(test_predictions_stack, axis=2)[0][:, :, 0]
predicted_df = pd.DataFrame(final_predictions, index=test_items, columns=codes)
true_df = pd.DataFrame(np.array(test_y), index=test_items, columns=codes)
final_test_f1, final_test_pp = evaluation.calc_macro_mean_f1_pp(true_df, predicted_df)
else:
final_test_f1 = np.median(best_test_f1s)
return {'loss': -np.median(best_valid_f1s),
'final_test_f1': final_test_f1,
'valid_f1s': best_valid_f1s,
'test_f1s': best_test_f1s,
'status': STATUS_OK
}
if __name__ == '__main__':
report = main()
print report | {
"repo_name": "dallascard/guac",
"path": "core/rnn/rnn_general.py",
"copies": "1",
"size": "31928",
"license": "apache-2.0",
"hash": 5815242471849275000,
"line_mean": 45.3410740203,
"line_max": 133,
"alpha_frac": 0.5176960661,
"autogenerated": false,
"ratio": 3.367932489451477,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4385628555551477,
"avg_score": null,
"num_lines": null
} |
# adapted from https://github.com/lisa-lab/DeepLearningTutorials
from collections import OrderedDict
import sys
import random
import numpy as np
import theano
from theano import tensor as T
from ..data_structures.labeled_tree import LabeledTree
# Otherwise the deepcopy fails
sys.setrecursionlimit(5000)
THEANO_FLAGS='floatX=float32'
class OutputLayer(object):
def __init__(self, nh, init_scale=0.2):
self.W = theano.shared(name='W', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, 1))
.astype(theano.config.floatX))
self.b = theano.shared(name='b', value=np.array(0,
dtype=theano.config.floatX))
self.params = [self.b, self.W]
h = T.fmatrix('h')
y = T.fvector('y')
lr = T.scalar('lr')
y_pred = T.dot(h, self.W) + self.b
loss = T.sum(T.square(y_pred[:, 0] - y))
gradients = T.grad(loss, self.params)
updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, gradients))
# These all assume a minibatch size > 1; "mb" functions below will massage single examples as required
self.predict = theano.function(inputs=[h], outputs=y_pred)
self.calc_loss = theano.function(inputs=[h, y], outputs=loss, updates=None)
self.train = theano.function(inputs=[h, y, lr], outputs=loss, updates=updates)
self.calc_gradients = theano.function(inputs=[h, y], outputs=gradients, updates=None)
def mb_train(self, x, y, learning_rate):
if x.ndim == 1:
x = np.reshape(x, (1, x.size))
if y.size == 1:
y = y.reshape((1,))
nll = self.train(x, y, learning_rate)
return nll
def mb_predict(self, x):
if x.ndim == 1:
x = np.reshape(x, (1, x.size))
return self.predict(x)
def print_params(self):
for param in self.params:
print param.name, param.get_value()
class TriangularSummation(object):
def __init__(self, init_scale=0.2):
self.w1 = theano.shared(name='w1', value=np.array(init_scale * np.random.uniform(-1.0, 1.0),
dtype=theano.config.floatX))
self.b1 = theano.shared(name='b1', value=np.array(init_scale * np.random.uniform(-1.0, 1.0),
dtype=theano.config.floatX))
self.w2 = theano.shared(name='w2', value=np.array(init_scale * np.random.uniform(-1.0, 1.0),
dtype=theano.config.floatX))
self.b2 = theano.shared(name='b2', value=np.array(init_scale * np.random.uniform(-1.0, 1.0),
dtype=theano.config.floatX))
self.params = [self.w1, self.b1, self.w2, self.b2]
x = T.matrix('x')
def sum_columns(x_t, h_tm1, const) :
h_t = x_t + h_tm1 + const
return h_t
# inputs to scan fn() are (x, outputs_info, non_sequences) (in whatever order given ?)
# outputs of scan are (results, updates)
#results_hidden, updates = theano.scan(fn=sum_columns, sequences=x, outputs_info=T.zeros(dh), non_sequences=T.ones(dh))
y = T.vector('y')
counter = T.ivector('counter')
mask = T.imatrix('mask')
t = T.scalar('t')
lr = T.scalar('lr')
def trianguar_sum(x_t, mask_t, counter_t, h_tm1):
h_t = h_tm1
temp = T.sum(h_tm1 * mask_t) + x_t
h_t = T.set_subtensor(h_t[counter_t], temp)
return h_t
def triangular_convolution(x_t, mask_t, counter_t, h_tm1):
h_t = h_tm1
temp = self.w2 * (x_t + T.tanh(self.w1 * T.sum(mask_t * h_tm1) + self.b1)) + self.b2
h_t = T.set_subtensor(h_t[counter_t], temp)
return h_t
true_h, _ = theano.scan(fn=trianguar_sum, sequences=[y, mask, counter], outputs_info=T.zeros(T.shape(y)))
pred_h, updates = theano.scan(fn=triangular_convolution, sequences=[y, mask, counter], outputs_info=T.zeros(T.shape(y)))
true = true_h[-1, -1]
pred = pred_h[-1, -1]
loss = T.sum(T.square(pred - t))
gradients = T.grad(loss, self.params)
updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, gradients))
self.train = theano.function(inputs=[y, mask, counter, t, lr], outputs=[pred, loss, pred_h], updates=updates)
self.compute_true = theano.function(inputs=[y, mask, counter], outputs=true)
def print_params(self):
for param in self.params:
print param.name, param.get_value()
class TreeLSTM(object):
def __init__(self, dh, dx, nc, init_scale=0.2, params_init=None):
self.dh = dh
self.dx = dx
self.nc = nc
if params_init is not None:
W_x, W_h, W_z, b_h, b_z = params_init
self.W_x = theano.shared(name='W_x', value=W_x.astype(theano.config.floatX))
self.W_h = theano.shared(name='W_h', value=W_h.astype(theano.config.floatX))
self.W_z = theano.shared(name='W_z', value=W_z.astype(theano.config.floatX))
self.b_h = theano.shared(name='b_h', value=b_h.astype(theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=b_z.astype(theano.config.floatX))
else:
self.W_x = theano.shared(name='W_x', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, dh))
.astype(theano.config.floatX))
self.W_h = theano.shared(name='W_h', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.W_z = theano.shared(name='W_z', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, nc))
.astype(theano.config.floatX))
self.b_h = theano.shared(name='b_h', value=np.array(np.zeros(dh),
dtype=theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=np.array(np.zeros(nc),
dtype=theano.config.floatX))
self.params = [self.W_x, self.W_h, self.W_z, self.b_h, self.b_z]
def vectoral(x_t, mask_t, counter_t, h_tm1):
#h_t = h_tm1
#h_sum = T.sum(h_tm1 * mask_t, axis=0)
#temp = T.tanh(T.dot(self.W_h, h_sum) + T.dot(self.W_x, x_t) + self.b_h)
#h_t = T.set_subtensor(h_t[counter_t], temp)
return h_tm1
def output_layer(h):
return T.dot(h, self.W_z) + self.b_z
counter = T.ivector('counter')
mask = T.imatrix('mask')
x = T.matrix('x')
y = T.iscalar('y')
lr = T.scalar('lr')
def test(x_t, counter_t, h_tm1):
h_t = h_tm1
temp = T.tanh(T.dot(x_t, self.W_x) + T.sum(T.dot(h_tm1, self.W_h), axis=0) + self.b_h)
h_t = T.set_subtensor(h_t[counter_t], temp)
return h_t
# This is a bit annoying; the 0th dimension of x needs to be sequence, so we can iterate over it
# but the 0th dimension of the hidden nodes needs to be hidden-node dimension, so that we can braodcast
# the mask out to it
# hence the transpose of W_h, and retranspose of the dot product
def treefwd(x_t, mask_t, counter_t, h_tm1):
h_t = h_tm1
#temp = T.sum(T.dot(self.W_h, (mask_t * h_tm1)), axis=1)
temp = T.dot(x_t, self.W_x) + T.sum(T.dot(self.W_h.T, (mask_t * h_tm1)).T, axis=0) + self.b_h
h_t = T.set_subtensor(h_t[:, counter_t], temp)
return h_t
ds, dx = T.shape(x)
#full_h, _ = theano.scan(fn=vectoral, sequences=[x, mask, counter], outputs_info=T.zeros((3, dh), dtype=theano.config.floatX))
full_h, _ = theano.scan(fn=treefwd, sequences=[x, mask, counter], outputs_info=T.zeros((dh, ds), dtype=theano.config.floatX))
h = full_h[-1, :, -1]
p_y_given_x = T.nnet.softmax(T.dot(h, self.W_z) + self.b_z)
pred_y = T.argmax(p_y_given_x, axis=1)
penalty = T.sum(self.W_x ** 2) + T.sum(self.W_h ** 2) + T.sum(self.W_z ** 2)
loss = T.sum(-T.log(p_y_given_x[y])) + penalty
gradients = T.grad(loss, self.params)
updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, gradients))
#self.train = theano.function(inputs=[x, mask, counter, y, lr], outputs=[pred_y, loss, full_h, h], updates=updates)
#self.compute_true = theano.function(inputs=[x, mask, counter], outputs=full_h)
self.train = theano.function(inputs=[x, mask, counter, y, lr], outputs=[full_h, h, pred_y, loss], updates=updates)
self.compute_true = theano.function(inputs=[x, mask, counter], outputs=[full_h, h, pred_y])
def print_params(self):
for param in self.params:
print param.name, param.get_value()
def main():
seed = 42
# set size of inputs
n = 50
mb_size = 4
noise = 0
seed = 0
n_epochs = 20
lr = 0.2
lr_shrinkage = 0.95
if seed > 0:
np.random.seed(seed)
random.seed(seed)
dh = 2
dx = 3
ds = 4
nc = 2
# create some data
X = np.array(np.random.randn(ds, dx), dtype=np.int32)
mask = np.array(np.random.randint(0, 2, (ds, ds)) * np.tril(np.ones(ds), -1), dtype=np.int32)
counter = np.array(np.arange(0, ds), dtype=np.int32)
# initialize some matrices for the NN
W_x = np.array(np.random.randn(dx, dh), dtype=np.float32)
W_h = np.array(np.random.randn(dh, dh), dtype=np.float32)
W_z = np.array(np.random.randn(dh, 1), dtype=np.float32)
b_h = np.array(np.random.randn(dh), dtype=np.float32)
b_z = np.float32(0.2)
params = {'W_x': W_x, 'W_h': W_h, 'W_z': W_z, 'b_h': b_h, 'b_z': b_z}
# create a NN with these parameters and produce the true label
rnn_true = TreeLSTM(dh, dx, nc, params_init=(W_x, W_h, W_z, b_h, b_z))
print "X\n", X
print "mask\n", mask
for param, value in params.items():
print param
print value
full_h, final_h, true = rnn_true.compute_true(X, mask, counter)
print "full h\n", full_h
print "true\n", true
if true > 1:
true = 2
elif true > 0:
true = 1
else:
true = 0
# create a randomly initialized NN
rnn_test = TreeLSTM(dh, dx, nc)
#rnn_test.print_params()
# train on the above data
for i in range(n_epochs):
full_h, h, pred_y, loss = rnn_test.train(X, mask, counter, true, lr)
lr *= lr_shrinkage
print '%2d %0.8f %0.8f' % (i, pred_y, loss)
print "full h\n", full_h
rnn_test.print_params()
sys.exit()
"""
max_value = 2
eqs = generate_toy_equation_data(1, max_value=2)
for tree in eqs:
tree.print_tree()
print "value=", compute_tree_value(tree.root)
"""
"""
tree = Tree('Dallas', 'NNP', 0)
tree.add_descendent('is', 'VBZ', 1, 0, 'cop')
tree.add_descendent('name', 'NN', 2, 0, 'nsubj')
tree.add_descendent('My', 'PRP$', 3, 2, 'nmod:poss')
#tree.print_tree()
"""
# create random input data
X = np.array(np.random.randn(n, nh), dtype=np.float32)
W = np.array(np.random.randn(nh))
b = np.random.randn()
y = np.array(np.dot(X, W) + b + (noise * np.random.randn(n)), dtype=np.float32)
print "nh=", nh
print "true b=", b
print "true W=", W
rnn = OutputLayer(nh)
print "Initial parameters:"
rnn.print_params()
best_mse = np.inf
params['clr'] = params['lr']
for e in xrange(params['n_epochs']):
order = range(n)
random.shuffle(order)
params['ce'] = e # store the current epoch
running_mse = 0
if mb_size == n:
# batch training
mse = rnn.mb_train(X, y, params['clr'])
else:
# minibatch training
for i in xrange(0, n/mb_size):
indices = order[i*mb_size:(i+1)*mb_size]
train_x = X[indices, :]
train_y = y[indices]
mse = rnn.mb_train(train_x, train_y, params['clr'])
predictions = rnn.predict(X)
predictions = np.reshape(predictions, y.shape)
full_mse = np.sum(np.square(predictions - y))
print "epoch=%d, lr=%.4f, mse=%.6f" % (e, params['clr'], full_mse)
params['clr'] *= params['lr_shrinkage']
print "Final parameters:"
rnn.print_params()
def generate_toy_sentence_data(n_sentences):
sents = []
for i in xrange(n_sentences):
sents.append(generate_test_sentence())
return sents
def generate_test_sentence():
prps1 = ['he', 'she', 'it', 'we', 'they', 'you', 'I']
prps2 = ['him', 'her', 'it', 'us', 'them', 'you', 'me']
verbs = ['ate', 'kicked', 'threw', 'drew', 'knew']
nouns = ['pie', 'can', 'ball', 'card', 'fact']
dts = ['a', 'the']
cases = ['at', 'with', 'for', 'to']
sentence = []
sentence.append(np.random.choice(prps1))
sentence.append(np.random.choice(verbs))
if np.random.rand() < 0.4:
sentence.append(np.random.choice(dts))
sentence.append(np.random.choice(nouns))
else:
sentence.append(np.random.choice(nouns) + 's')
if np.random.rand() < 0.3:
sentence.append(np.random.choice(cases))
sentence.append(np.random.choice(prps2))
return ' '.join(sentence)
def generate_toy_equation_data(n_equations, max_value=2, min_nodes=2, max_nodes=7):
eqs = []
for i in xrange(n_equations):
eqs.append(generate_test_equation_tree(np.random.randint(min_nodes, max_nodes+1), max_value))
return eqs
def generate_test_equation_tree(n_nodes, max_value=2):
#symbols = ['+', '*']
symbols = ['*']
root_value = np.random.randint(0, max_value+1)
tree = LabeledTree(root_value, 'v', 0)
node_count = 0
while node_count <= n_nodes:
parent = tree.get_random_node_index()
relation = np.random.choice(symbols)
tree.add_descendent(np.random.randint(0, max_value+1), 'v', node_count, parent, relation)
node_count += 1
return tree
def compute_tree_value(node):
node_value = int(node.word)
for d, r in node.descendents:
d_val = compute_tree_value(d)
if r == '*':
node_value *= d_val
elif r == '+':
node_value += d_val
return node_value
if __name__ == '__main__':
main()
| {
"repo_name": "dallascard/guac",
"path": "core/rnn/rnn_tree.py",
"copies": "1",
"size": "14622",
"license": "apache-2.0",
"hash": -4219945808028966400,
"line_mean": 32.7690531178,
"line_max": 134,
"alpha_frac": 0.5490356996,
"autogenerated": false,
"ratio": 3.0602762662201757,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41093119658201754,
"avg_score": null,
"num_lines": null
} |
# Copyright (C) 2012 by Will McCutchen and individual contributors.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import aggdraw
import math
import Image
import itertools
import logging
import util
def pxl(image, tile_size=32):
"""Processes the given image by breaking it down into tiles of the given
size and applying a triangular effect to each tile. Returns the processed
image as a PIL Image object.
The image can be given as anything suitable for passing to `Image.open`
(ie, the path to an image or as a file-like object containing image data).
If tile_size is 0, the tile size will be guessed based on the image
size. It will also be adjusted to be divisible by 2 if it is not already.
"""
# Make sure we have a usable tile size, by guessing based on image size
# and making sure it's a multiple of two.
if tile_size == 0:
tile_size = guess_tile_size(image)
if tile_size % 2 != 0:
tile_size = (tile_size / 2) * 2
logging.info('Input image size: %r', image.size)
logging.info('Tile size: %r', tile_size)
# Preprocess image to make sure it's at a size we can handle
image = prep_image(image, tile_size)
logging.info('Prepped image size: %r', image.size)
# Get pixmap (for direct pixel access) and draw objects for the image.
pix = image.load()
draw = aggdraw.Draw(image)
# Process the image, tile by tile
for x, y in iter_tiles(image, tile_size):
process_tile(x, y, tile_size, pix, draw, image)
draw.flush()
return image
def process_tile(tile_x, tile_y, tile_size, pix, draw, image):
"""Process a tile whose top left corner is at the given x and y
coordinates.
"""
logging.debug('Processing tile (%d, %d)', tile_x, tile_y)
# Calculate average color for each "triangle" in the given tile
n, e, s, w = triangle_colors(tile_x, tile_y, tile_size, pix)
# Calculate distance between triangle pairs
d_ne = get_color_dist(n, e)
d_nw = get_color_dist(n, w)
d_se = get_color_dist(s, e)
d_sw = get_color_dist(s, w)
# Figure out which pair is the closest, which will determine the direction
# we'll split this tile into triangles. A 'right' split runs from top left
# to bottom right. A 'left' split runs bottom left to top right.
closest = sorted([d_ne, d_nw, d_se, d_sw])[0]
if closest in (d_ne, d_sw):
split = 'right'
elif closest in (d_nw, d_se):
split = 'left'
# Figure out the average color for each side of the "split"
if split == 'right':
top_color = get_average_color([n, e])
bottom_color = get_average_color([s, w])
else:
top_color = get_average_color([n, w])
bottom_color = get_average_color([s, e])
draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw)
def triangle_colors(tile_x, tile_y, tile_size, pix):
"""Extracts the average color for each triangle in the given tile. Returns
a 4-tuple of colors for the triangles in this order: North, East, South,
West (clockwise).
"""
quad_size = tile_size / 2
north = []
for y in xrange(tile_y, tile_y + quad_size):
x_off = y - tile_y
for x in xrange(tile_x + x_off, tile_x + tile_size - x_off):
north.append(pix[x, y])
south = []
for y in xrange(tile_y + quad_size, tile_y + tile_size):
x_off = tile_y + tile_size - y
for x in xrange(tile_x + x_off, tile_x + tile_size - x_off):
south.append(pix[x, y])
east = []
for x in xrange(tile_x, tile_x + quad_size):
y_off = x - tile_x
for y in xrange(tile_y + y_off, tile_y + tile_size - y_off):
east.append(pix[x, y])
west = []
for x in xrange(tile_x + quad_size, tile_x + tile_size):
y_off = tile_x + tile_size - x
for y in xrange(tile_y + y_off, tile_y + tile_size - y_off):
west.append(pix[x, y])
return map(get_average_color, [north, east, south, west])
def draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw):
"""Draws a triangle on each half of the tile with the given coordinates
and size.
"""
assert split in ('right', 'left')
# The four corners of this tile
nw = (tile_x, tile_y)
ne = (tile_x + tile_size - 1, tile_y)
se = (tile_x + tile_size - 1, tile_y + tile_size)
sw = (tile_x, tile_y + tile_size)
if split == 'left':
# top right triangle
draw_triangle(nw, ne, se, top_color, draw)
# bottom left triangle
draw_triangle(nw, sw, se, bottom_color, draw)
else:
# top left triangle
draw_triangle(sw, nw, ne, top_color, draw)
# bottom right triangle
draw_triangle(sw, se, ne, bottom_color, draw)
def draw_triangle(a, b, c, color, draw):
"""Draws a triangle with the given vertices in the given color."""
pen = aggdraw.Pen(color)
brush = aggdraw.Brush(color)
draw.polygon(a + b + c, pen, brush)
def get_average_color(colors):
"""Calculate the average color from the list of colors, where each color
is a 3-tuple of (r, g, b) values.
"""
c = reduce(color_reducer, colors)
total = len(colors)
return tuple(v / total for v in c)
def color_reducer(c1, c2):
"""Helper function used to add two colors together when averaging."""
return tuple(v1 + v2 for v1, v2 in itertools.izip(c1, c2))
def get_color_dist(c1, c2):
"""Calculates the "distance" between two colors, where the distance is
another color whose components are the absolute values of the difference
between each component of the input colors.
"""
return tuple(abs(v1 - v2) for v1, v2 in itertools.izip(c1, c2))
def prep_image(image, tile_size):
"""Takes an image and a tile size and returns a possibly cropped version
of the image that is evenly divisible in both dimensions by the tile size.
"""
w, h = image.size
x_tiles = w / tile_size # floor division
y_tiles = h / tile_size
new_w = x_tiles * tile_size
new_h = y_tiles * tile_size
if new_w == w and new_h == h:
return image
else:
crop_bounds = (0, 0, new_w, new_h)
return image.crop(crop_bounds)
def iter_tiles(image, tile_size):
"""Yields (x, y) coordinate pairs for the top left corner of each tile in
the given image, based on the given tile size.
"""
w, h = image.size
for y in xrange(0, h, tile_size):
for x in xrange(0, w, tile_size):
yield x, y
def guess_tile_size(image):
"""Try to pick an appropriate tile size based on the image's size."""
# Formula: 5% of the largest dimension of the image
return int(max(image.size) * 0.05)
if __name__ == '__main__':
image = Image.open(sys.argv[1])
image = image.convert('RGB')
tile_size = 32
width = util.WIDTH + tile_size - (util.WIDTH % tile_size)
height = util.HEIGHT + tile_size- (util.HEIGHT % tile_size)
image = util.resize_jam_background(image, width, height)
image = pxl(image)
image.save(sys.argv[2], quality=90)
| {
"repo_name": "thisismyjam/jam-image-filter",
"path": "jam_image_filter/pxl.py",
"copies": "1",
"size": "8234",
"license": "mit",
"hash": 8581117525959983000,
"line_mean": 34.4913793103,
"line_max": 79,
"alpha_frac": 0.6431867865,
"autogenerated": false,
"ratio": 3.4423076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45854944788076923,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/mdeff/cnn_graph
from .tensorflow_constructions import leaky_relu
import scipy.sparse, networkx, tensorflow as tf, numpy as np, pandas as pd
# TODO(mmd): Use generators properly.
def split_into_components(X_df, G):
"""set(X_df.columns) must == set(G.nodes)"""
components = list(networkx.components.connected_components(G))
X_splits = [X_df.filter(items=component) for component in components]
subgraphs = [G.subgraph(component) for component in components]
return X_splits, subgraphs
def _pool_step(
X,
pool_size, #TODO(mmd): Better name
pooler = tf.nn.max_pool,
):
"""Pooling of size p. Should be a power of 2 greater than 1."""
# TODO(mmd): Why all the expansion squeezing necessary?
x = tf.expand_dims(x, 3) # num_samples x num_features x num_filters_in x 1
x = pooler(x, ksize=[1,pool_size,1,1], strides=[1,pool_size,1,1], padding='SAME')
#tf.maximum
return tf.squeeze(x, [3]) # num_samples x num_features / p x num_filters
# TODO(mmd): Unify shape API for graph_conf layers.
# TODO(mmd): Better name.
def _full_fourier_graph_conv_step(
X,
G,
scope,
nodelist,
receptive_field_size = 10,
num_filters_out = 32,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
"""Graph CNN with full weight matrices, i.e. patch has the same size as input."""
num_samples, num_features, num_filters_in = X.shape.as_list()
L = networkx.normalized_laplacian_matrix(G, nodelist=nodelist)
U = tf.constant(np.linalg.eigh(L.toarray())[1], dtype=tf.float32)
# TODO(mmd): Get the below to work.
#_, U = scipy.sparse.linalg.eigsh(L, k=k, which='SM')
x = tf.transpose(X, [0, 2, 1]) # num_samples x num_filters_in x num_features
x = tf.reshape(x, [num_samples * num_filters_in, num_features])
xf = tf.expand_dims(tf.matmul(x, U), 1)
xf = tf.reshape(xf, [num_samples, num_filters_in, num_features])
xf = tf.transpose(xf, [2, 1, 0]) # num_features x num_filters_in x num_samples
with tf.variable_scope(scope):
# TODO(mmd): Shapes probably wrong.
W = tf.get_variable(
'graph_convolution',
[num_features * num_filters_in, num_filters_out, 1],
tf.float32,
initializer = weights_init,
)
b = tf.get_variable(
'graph_bias',
[1, num_filters_out, 1],
tf.float32,
initializer = bias_init,
)
yf = tf.matmul(W, xf)
yf = tf.reshape(tf.transpose(yf, [2, 1, 0]), [num_samples * num_filters_out, num_features])
y = tf.matmul(yf, tf.transpose(U))
return activation(tf.reshape(y, [num_samples, num_filters_out, num_features]) + b)
# Chebyshev
def _chebyshev_graph_conv_step(
X,
G,
scope,
nodelist,
receptive_field_size = 10,
num_filters_out = 32,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
"""Graph CNN with full weights, i.e. patch has the same size as input."""
num_samples, num_features, num_filters_in = X.shape.as_list()
L = networkx.normalized_laplacian_matrix(G, nodelist=nodelist).astype(np.float32)
L = (L - scipy.sparse.identity(num_features, dtype=L.dtype, format='csr')).tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.sparse_reorder(tf.SparseTensor(indices=indices, values=L.data, dense_shape=L.shape))
# Transform to Chebyshev basis
# TODO(mmd): Are the permutations/reshapes really necessary or would this just work with smart
# broadcasting?
x0 = tf.transpose(X, perm=[1, 2, 0]) # num_features x num_filters_in x num_samples
x0 = tf.reshape(x0, [num_features, num_filters_in*num_samples])
chebyshev_terms = [x0]
if receptive_field_size > 1:
chebyshev_terms.append(tf.sparse_tensor_dense_matmul(L, chebyshev_terms[-1]))
for _ in range(2, receptive_field_size):
chebyshev_terms += [2*tf.sparse_tensor_dense_matmul(L, chebyshev_terms[-1]) - chebyshev_terms[-2]]
x = tf.stack(chebyshev_terms) # receptive_field_size x num_features x num_filters_in*num_samples
x = tf.reshape(x, [receptive_field_size, num_features, num_filters_in, num_samples])
x = tf.transpose(x, perm=[3,1,2,0]) # num_samples x num_features x num_filters_in x receptive_field_size
# TODO(mmd): Do I need to reshape like this or can this be handled fine with tensor multiplications?
x = tf.reshape(x, [num_samples * num_features, num_filters_in * receptive_field_size])
with tf.variable_scope(scope):
# Filter: num_filters_in -> num_filters_out filters of order K, i.e. one filterbank per feature pair.
W = tf.get_variable(
'graph_convolution',
[num_filters_in * receptive_field_size, num_filters_out],
tf.float32,
initializer = weights_init
)
b = tf.get_variable(
'bias',
[1, 1, num_filters_out],
tf.float32,
initializer = bias_init
)
x = activation(tf.matmul(x, W) + b)
return tf.reshape(x, [num_samples, num_features, num_filters_out])
# TODO(mmd): Make accept num_filters_out (right now is effectively (though not in impl.) hard-coded @ 1)
def _graph_localized_ff_step(
X,
G,
scope,
nodelist,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
num_samples, num_features = X.shape.as_list()
A = networkx.adjacency_matrix(G, nodelist=nodelist).astype(np.float32)
A = (A + scipy.sparse.identity(num_features, dtype=A.dtype, format='csr')).tocoo()
indices = np.column_stack((A.row, A.col))
num_edges = len(indices)
with tf.variable_scope(scope):
W = tf.get_variable(
'graph_localized_ff_weights',
[num_edges],
tf.float32,
initializer = weights_init,
)
W_tensor = tf.sparse_reorder(
tf.SparseTensor(indices=indices, values=W, dense_shape=[num_features, num_features])
)
b = tf.get_variable(
'bias',
[num_features],
tf.float32,
initializer = bias_init,
)
return activation(tf.transpose(tf.sparse_tensor_dense_matmul(W_tensor, tf.transpose(X))) + b)
| {
"repo_name": "mmcdermott/ml_toolkit",
"path": "ml_toolkit/graph_layers.py",
"copies": "1",
"size": "6827",
"license": "mit",
"hash": 4869711976794403000,
"line_mean": 38.4624277457,
"line_max": 110,
"alpha_frac": 0.6185733119,
"autogenerated": false,
"ratio": 3.2571564885496183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9300881842532243,
"avg_score": 0.014969591583475241,
"num_lines": 173
} |
# adapted from https://github.com/mozilla/django-csp
from functools import partial
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.functional import SimpleLazyObject
from http.client import INTERNAL_SERVER_ERROR, NOT_FOUND
CSP_HEADER = 'Content-Security-Policy'
DEFAULT_CSP_POLICIES = {
"default-src": "'self'",
"script-src": "'self'",
"base-uri": "'none'",
"frame-ancestors": "'none'",
"object-src": "'none'",
"style-src": "'self' 'unsafe-inline'",
}
def make_csp_nonce(request, length=16):
if not getattr(request, '_csp_nonce', None):
request._csp_nonce = get_random_string(length)
return request._csp_nonce
def build_csp_header(request):
csp_policies = DEFAULT_CSP_POLICIES.copy()
csp_nonce = getattr(request, '_csp_nonce', None)
if csp_nonce:
csp_policies["script-src"] += " 'nonce-{}'".format(csp_nonce)
return ";".join("{} {}".format(k, v) for k, v in csp_policies.items())
def csp_middleware(get_response):
def middleware(request):
nonce_func = partial(make_csp_nonce, request)
request.csp_nonce = SimpleLazyObject(nonce_func)
response = get_response(request)
if CSP_HEADER in response:
# header already present (HOW ???)
return response
if response.status_code in (INTERNAL_SERVER_ERROR, NOT_FOUND) and settings.DEBUG:
# no policies in debug views
return response
response[CSP_HEADER] = build_csp_header(request)
return response
return middleware
def deployment_info_middleware(get_response):
deployment_info = {}
try:
import base.deployment as deployment
except ImportError:
pass
else:
for attr in ("version", "image_id", "instance_id", "setup_at"):
val = getattr(deployment, attr, None)
if val is not None:
deployment_info[attr] = val
def middleware(request):
request.zentral_deployment = deployment_info
return get_response(request)
return middleware
| {
"repo_name": "zentralopensource/zentral",
"path": "server/base/middlewares.py",
"copies": "1",
"size": "2103",
"license": "apache-2.0",
"hash": 3955210482864421400,
"line_mean": 27.4189189189,
"line_max": 89,
"alpha_frac": 0.6462196862,
"autogenerated": false,
"ratio": 3.8166969147005445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4962916600900545,
"avg_score": null,
"num_lines": null
} |
"""Adapted from https://github.com/omarish/django-cprofile-middleware/"""
try:
import cProfile as profile
except ImportError:
import profile
import pstats
from cStringIO import StringIO
from django.conf import settings
class ProfilerMiddleware(object):
"""
Simple profile middleware to profile django views. To run it, add ?prof to
the URL like this:
http://localhost:8000/view/?__prof__=true
Optionally pass the following to modify the output:
?sort => Sort the output by a given metric. Default is time.
See http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
for all sort options.
quick reference:
- time: sort by function execution time
- cum: the cumulative time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions.
?count => The number of rows to display. Default is 100.
?fullpath=<true|false> default false. True to show full path of the source file of each function
?callee=<true|false> default false. True to show the time of a function spent on its sub function.
This is adapted from an example found here:
http://www.slideshare.net/zeeg/django-con-high-performance-django-presentation.
"""
def can(self, request):
return settings.DEBUG and request.GET.get('__prof__', False) == 'true'
def process_view(self, request, callback, callback_args, callback_kwargs):
if self.can(request):
self.profiler = profile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if self.can(request):
self.profiler.create_stats()
io = StringIO()
stats = pstats.Stats(self.profiler, stream=io)
if not request.GET.get('fullpath', False):
stats.strip_dirs()
stats.sort_stats(request.GET.get('sort', 'time'))
if request.GET.get('callee', False):
stats.print_callees()
stats.print_stats(int(request.GET.get('count', 100)))
response.content = '<pre>%s</pre>' % io.getvalue()
return response | {
"repo_name": "miurahr/seahub",
"path": "seahub/base/profile.py",
"copies": "6",
"size": "2286",
"license": "apache-2.0",
"hash": -3615370498469275000,
"line_mean": 35.8870967742,
"line_max": 152,
"alpha_frac": 0.6552930884,
"autogenerated": false,
"ratio": 4.156363636363636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7811656724763636,
"avg_score": null,
"num_lines": null
} |
## Adapted from https://github.com/openai/baselines/blob/master/baselines/ddpg/main.py
import argparse
import time
import os
import logging
from baselines import logger, bench
from baselines.common.misc_util import (
set_global_seeds,
boolean_flag,
)
import baselines.ddpg.training as training
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.noise import *
import gym
import tensorflow as tf
from mpi4py import MPI
from osim.env.utils.mygym import convert_to_gym
from arm import Arm3dEnv
import types
def gymify_osim_env(env):
env.action_space = ( [-1.0] * env.osim_model.get_action_space_size(), [1.0] * env.osim_model.get_action_space_size() )
env.action_space = convert_to_gym(env.action_space)
env._step = env.step
def step(self, action):
return self._step(action * 2 - 1)
env.step = types.MethodType(step, env)
return env
def run(seed, noise_type, layer_norm, evaluation, **kwargs):
# Configure things.
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
# Create envs.
env = gymify_osim_env(Arm3dEnv(visualize = True))
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
if evaluation and rank==0:
eval_env = gymify_osim_env(Arm3dEnv(visualize = True))
eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))
env = bench.Monitor(env, None)
else:
eval_env = None
# Parse noise_type
action_noise = None
param_noise = None
nb_actions = env.action_space.shape[-1]
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# Configure components.
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(layer_norm=layer_norm)
actor = Actor(nb_actions, layer_norm=layer_norm)
# Seed everything to make things reproducible.
seed = seed + 1000000 * rank
logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))
tf.reset_default_graph()
set_global_seeds(seed)
env.seed(seed)
if eval_env is not None:
eval_env.seed(seed)
# Disable logging for rank != 0 to avoid noise.
if rank == 0:
start_time = time.time()
training.train(env=env, eval_env=eval_env, param_noise=param_noise,
action_noise=action_noise, actor=actor, critic=critic, memory=memory, **kwargs)
env.close()
if eval_env is not None:
eval_env.close()
if rank == 0:
logger.info('total runtime: {}s'.format(time.time() - start_time))
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
boolean_flag(parser, 'render-eval', default=False)
boolean_flag(parser, 'layer-norm', default=True)
boolean_flag(parser, 'render', default=False)
boolean_flag(parser, 'normalize-returns', default=False)
boolean_flag(parser, 'normalize-observations', default=True)
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--critic-l2-reg', type=float, default=1e-2)
parser.add_argument('--batch-size', type=int, default=64) # per MPI worker
parser.add_argument('--actor-lr', type=float, default=1e-4)
parser.add_argument('--critic-lr', type=float, default=1e-3)
boolean_flag(parser, 'popart', default=False)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--reward-scale', type=float, default=1.)
parser.add_argument('--clip-norm', type=float, default=None)
parser.add_argument('--nb-epochs', type=int, default=500) # with default settings, perform 1M steps total
parser.add_argument('--nb-epoch-cycles', type=int, default=20)
parser.add_argument('--nb-train-steps', type=int, default=50) # per epoch cycle and MPI worker
parser.add_argument('--nb-eval-steps', type=int, default=100) # per epoch cycle and MPI worker
parser.add_argument('--nb-rollout-steps', type=int, default=100) # per epoch cycle and MPI worker
parser.add_argument('--noise-type', type=str, default='adaptive-param_0.2') # choices are adaptive-param_xx, ou_xx, normal_xx, none
parser.add_argument('--num-timesteps', type=int, default=None)
boolean_flag(parser, 'evaluation', default=False)
args = parser.parse_args()
# we don't directly specify timesteps for this script, so make sure that if we do specify them
# they agree with the other parameters
if args.num_timesteps is not None:
assert(args.num_timesteps == args.nb_epochs * args.nb_epoch_cycles * args.nb_rollout_steps)
dict_args = vars(args)
del dict_args['num_timesteps']
return dict_args
if __name__ == '__main__':
args = parse_args()
if MPI.COMM_WORLD.Get_rank() == 0:
logger.configure()
# Run actual script.
run(**args)
| {
"repo_name": "stanfordnmbl/osim-rl",
"path": "examples/under-construction/train.arm.py",
"copies": "1",
"size": "5830",
"license": "mit",
"hash": 7085158787126972000,
"line_mean": 41.2463768116,
"line_max": 136,
"alpha_frac": 0.6723842196,
"autogenerated": false,
"ratio": 3.3525014376078204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9509679338566912,
"avg_score": 0.003041263728181723,
"num_lines": 138
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.