commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
182a4ceeeb8a9b9e3f5071427da1ca0ec847f368 | Check in first cut at oban code. Taken directly from code used for class. | trunk/metpy/tools/oban.py | trunk/metpy/tools/oban.py | from itertools import izip
import numpy as N
from constants import *
def rms(diffs):
return N.sqrt(N.average(diffs**2))
def grid_point_dists(grid_x, grid_y, ob_x, ob_y):
"Calculates distances for each grid point to every ob point"
return N.hypot(grid_x[...,N.newaxis] - ob_x[N.newaxis,N.newaxis,...],
grid_y[...,N.newaxis] - ob_y[N.newaxis,N.newaxis,...])
def adjust_field(field, xgrad, ygrad, grid_x, grid_y, ob_x, ob_y):
'''Makes a 3D field with the data adjusted by the gradient to each grid point
from every observation point'''
return field + xgrad * (grid_x[...,N.newaxis] - ob_x[N.newaxis,...])\
+ ygrad * (grid_y[...,N.newaxis] - ob_y[N.newaxis,...])
def analyze_grid_multipass(ob_data, grid_x, grid_y, ob_x, ob_y, num_passes,
weight_func, params, background = None):
'''Calculate a value at each grid point using multiple passes of an objective
analysis technique'''
if background is None:
mod_param = (params[0], 1.0)
background = analyze_grid(ob_data, grid_x, grid_y, ob_x, ob_y, weight_func,
mod_param)
num_passes -= 1
for i in range(num_passes):
ob_incs = get_ob_incs(ob_x, ob_y, ob_data, grid_x[0], grid_y[:,0],
background)
print 'pass: %d rms: %f' % (i, rms(ob_incs))
background = analyze_grid(ob_incs, grid_x, grid_y, ob_x, ob_y,
weight_func, params) + background
return background
def analyze_grid(ob_data, grid_x, grid_y, ob_x, ob_y, weight_func, params):
'''Calculates a value at each grid point based on the observed data in
ob_data. grid_point_dists is a 3D array containing the distance for each
grid point to every observation'''
try:
params[0]
except TypeError:
params = (params,)
weights = weight_func(grid_point_dists(grid_x, grid_y, ob_x, ob_y), *params)
final = (weights * ob_data).sum(axis=2)/weights.sum(axis=2)
try:
final[N.isnan(final)] = 0.0
except:
pass
return final
def uniform_weights(dists, radius):
weights = N.ones_like(dists)
weights[dists > radius] = 0.0
return weights
def cressman_weights(dists, radius):
dist_sq = dists * dists
rad_sq = radius * radius
weights = (rad_sq - dist_sq)/(rad_sq + dist_sq)
weights[dists > radius] = 0.0
return weights
def barnes_weights(dists, kappa0, gamma):
weights = N.exp(-dists**2 / (kappa0 * gamma))
critical_radius = N.sqrt(15 * kappa0)
weights[dists > critical_radius] = 0.0
return weights
def bilinear(x, y, data, xloc, yloc):
xind = find_axis_index(x, xloc)
yind = find_axis_index(y, yloc)
xw = (xloc - x[xind])/(x[xind+1] - x[xind])
x_weights = N.array([1-xw, xw])
yw = (yloc - y[yind])/(y[yind+1] - y[yind])
y_weights = N.array([1-yw, yw])
return N.dot(y_weights, N.dot(data[yind:yind+2,xind:xind+2], x_weights))
def find_axis_index(axis_vals, location):
if location > axis_vals[-1] or location < axis_vals[0]:
raise ValueError, "Location out of bounds"
for ind,val in enumerate(axis_vals):
if location < val:
break
return ind - 1
def get_wind_comps(spd, dir):
u = -spd * N.sin(dir * rad_per_deg)
v = -spd * N.cos(dir * rad_per_deg)
return u, v
def get_ob_incs(obx, oby, ob, grid_x, grid_y, field, cressman_radius = None):
ob_inc = list()
mask = N.zeros(ob.size)
for x,y,value in izip(obx,oby,ob):
try:
interp_val = bilinear(grid_x, grid_y, field, x, y)
ob_inc.append(value - interp_val)
except ValueError:
if cressman_radius is None:
mask[len(ob_inc)] = 1
ob_inc.append(0.0)
else:
#Ugly hack here to allow the one station off the grid to be interpolated
xg,yg = N.meshgrid(grid_x, grid_y)
interp_val = analyze_grid(field.flatten(), N.array(x, ndmin = 2),
N.array(y, ndmin = 2), xg.flatten(), yg.flatten(),
cressman_weights, cressman_radius)
if N.isnan(interp_val):
interp_val = value
# mask[len(ob_inc) - 1] = 1
ob_inc.append(value - interp_val.flatten()[0])
return N.ma.array(ob_inc, mask = mask)
def calc_barnes_param(spacing):
'''Calculate the Barnes analysis smoothing parameter, kappa0, from the
average grid spacing'''
return 5.052 * (2.0 * spacing / N.pi)**2
| Python | 0.000001 | |
8a5f5fa11feefec2a81c3c1c2419b14e45a55bd0 | Add dis01.py | trypython/stdlib/dis01.py | trypython/stdlib/dis01.py | # coding: utf-8
"""
dis モジュールについてのサンプルです。
"""
import dis
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import hr
# noinspection SpellCheckingInspection
class Sample(SampleBase):
def exec(self):
##############################################
# dis モジュールは、pythonのバイトコードの
# 解析をサポートしてくれるモジュール。
#
# 大きく分けて2つの使い方がある
# 1) dis.dis()
# 2) dis.Bytecode()
#
# 1) は、指定された内容を逆アセンブルして出力してくれる。
# 引数の file に何も指定しない場合は標準出力に指定してくれる。
#
# 2) は、python 3.4 で追加されたAPI。
# 指定の仕方は 1) とほぼ変わらないが、いきなり結果を
# 出力ではなくて、一旦 Bytecode オブジェクトにラップして
# 返してくれる。
#
##############################################
listcomp_str = 'r = [x for x in range(1000000) if x % 2 == 0]'
forloop_str = '''
r = []
for x in range(1000000):
if x % 2 == 0:
r.append(x)
'''
###############################################
# dis.dis()
###############################################
hr('dis.dis(listcomp_str)')
dis.dis(listcomp_str)
hr('dis.dis(forloop_str)')
dis.dis(forloop_str)
###############################################
# dis.Bytecode()
#
# python 3.4 から dis モジュールに追加されたAPI。
# 内部で code オブジェクトや dis.code_info() の
# 結果を保持してくれたりするので、こちらの方が便利。
###############################################
hr('dis.Bytecode(listcomp_str)')
listcomp_bytecode = dis.Bytecode(listcomp_str)
print(listcomp_bytecode.codeobj)
print(listcomp_bytecode.dis())
print(listcomp_bytecode.info())
hr('dis.Bytecode(forloop_str)')
forloop_bytecode = dis.Bytecode(forloop_str)
print(forloop_bytecode.codeobj)
print(forloop_bytecode.dis())
print(forloop_bytecode.info())
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0.000033 | |
4835cac3b5ea15671f3da25cbc6e6db4bad725c9 | Create crawl-twse.py | crawl/crawl-twse.py | crawl/crawl-twse.py | req=requests.get("http://www.twse.com.tw/ch/trading/fund/BFI82U/BFI82U.php?report1=day&input_date=105%2F05%2F31&mSubmit=%ACd%B8%DF&yr=2016&w_date=20160530&m_date=20160501")
req.encoding='utf-8'
html=req.text.encode('utf-8')
soup=BeautifulSoup(html,"html.parser")
for td in soup.findAll("td",{"class":"basic2"}):
print td.text
| Python | 0.000001 | |
bd597a8f34d6f95bc445550bcc239ff67d0321f4 | Add missing file. | tests/tests/utils.py | tests/tests/utils.py | from django.db import connection
from django.utils import six
def get_table_list():
with connection.cursor() as cursor:
table_list = connection.introspection.get_table_list(cursor)
if table_list and not isinstance(table_list[0], six.string_types):
table_list = [table.name for table in table_list]
return table_list
| Python | 0.000001 | |
99d0c209024b1a2801892cadbe17456c9fbd3f57 | Add tests for tools (work in progress) | tests/tests_tools.py | tests/tests_tools.py | """
SkCode tools test code.
"""
import unittest
from skcode.tools import (escape_attrvalue,
sanitize_url,
slugify,
unique_slugify)
class ToolsTestCase(unittest.TestCase):
""" Test suite for the tools module. """
def test_escape_attrvalue_with_single_quote(self):
""" Test the ``escape_attrvalue`` method with a string containing a single quote. """
output = escape_attrvalue("test'test")
self.assertEqual('"test\'test"', output)
def test_escape_attrvalue_with_double_quote(self):
""" Test the ``escape_attrvalue`` method with a string containing a double quote. """
output = escape_attrvalue('test"test')
self.assertEqual("'test\"test'", output)
def test_escape_attrvalue_with_single_and_double_quotes(self):
""" Test the ``escape_attrvalue`` method with a string containing a single and a double quote. """
output = escape_attrvalue("""test'test"test""")
self.assertEqual('"test\'test\\"test"', output)
def test_escape_attrvalue_with_no_quote(self):
""" Test the ``escape_attrvalue`` method with a string containing no quote. """
output = escape_attrvalue('test')
self.assertEqual('"test"', output)
# -----
def test_sanitize_url(self):
""" Test the ``sanitize_url`` method with a valid URL. """
output = sanitize_url('https://github.com/TamiaLab/PySkCode')
self.assertEqual('https://github.com/TamiaLab/PySkCode', output)
def test_sanitize_url_ipv6(self):
""" Test the ``sanitize_url`` method with a valid URL (using a IPv6 address). """
output = sanitize_url('https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443/')
self.assertEqual('https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443/', output)
def test_sanitize_url_with_no_url(self):
""" Test the ``sanitize_url`` method without any URL. """
output = sanitize_url('')
self.assertEqual('', output)
def test_sanitize_url_with_dangerous_char(self):
""" Test the ``sanitize_url`` method with an URL containing dangerous char inside. """
output = sanitize_url('{https}://github.com/<TamiaLab>/PySkCode')
self.assertEqual('https://github.com/TamiaLab/PySkCode', output)
def test_sanitize_url_with_malformed_url(self):
""" Test the ``sanitize_url`` method with a malformed URL. """
output = sanitize_url('https://[github.com/TamiaLab/PySkCode')
self.assertEqual('', output)
def test_sanitize_url_with_scheme_not_in_whitelist(self):
""" Test the ``sanitize_url`` method with an URL and a scheme not in whitelist. """
output = sanitize_url('https://github.com/TamiaLab/PySkCode', allowed_schemes=('http',))
self.assertEqual('', output)
def test_sanitize_url_with_local_url_without_scheme(self):
""" Test the ``sanitize_url`` method with a local URL without a scheme. """
output = sanitize_url('/TamiaLab/PySkCode', default_scheme='https')
self.assertEqual('/TamiaLab/PySkCode', output)
def test_sanitize_url_with_non_local_url_without_scheme(self):
""" Test the ``sanitize_url`` method with a non local URL and without a scheme. """
output = sanitize_url('github.com/TamiaLab/PySkCode', default_scheme='https')
self.assertEqual('https://github.com/TamiaLab/PySkCode', output)
def test_sanitize_url_with_only_domain_name(self):
""" Test the ``sanitize_url`` method with onyl a domain name. """
output = sanitize_url('github.com', default_scheme='https')
self.assertEqual('https://github.com', output)
def test_sanitize_url_with_force_default_scheme(self):
""" Test the ``sanitize_url`` method with a local URL without a scheme but with force_default_scheme set. """
output = sanitize_url('/TamiaLab/PySkCode', default_scheme='https', force_default_scheme=True)
self.assertEqual('https:///TamiaLab/PySkCode', output)
def test_sanitize_url_with_force_remove_scheme(self):
""" Test the ``sanitize_url`` method with a valid URL and the force_remove_scheme set. """
output = sanitize_url('https://github.com/TamiaLab/PySkCode', force_remove_scheme=True)
self.assertEqual('//github.com/TamiaLab/PySkCode', output)
def test_sanitize_url_with_html_entities(self):
""" Test the ``sanitize_url`` method with an URL containing HTML entities char inside. """
output = sanitize_url("https://github.com/TamiaLab/PySkCode?foo=bar&bar=foo")
self.assertEqual('https://github.com/TamiaLab/PySkCode?foo=bar&bar=foo', output)
def test_sanitize_url_with_html_entities_escape_disabled(self):
""" Test the ``sanitize_url`` method with an URL containing HTML entities char inside. """
output = sanitize_url("https://github.com/TamiaLab/PySkCode?foo=bar&bar=foo", encode_html_entities=False)
self.assertEqual('https://github.com/TamiaLab/PySkCode?foo=bar&bar=foo', output)
def test_sanitize_url_with_javascript_xss(self):
""" Test the ``sanitize_url`` method with a classic ``javascript:`` XSS injection. """
self.assertEqual('', sanitize_url("javascript:alert('XSS');"))
self.assertEqual('', sanitize_url("jav\tascript:alert('XSS');"))
def test_sanitize_url_with_data_xss(self):
""" Test the ``sanitize_url`` method with a classic ``data:`` XSS injection. """
output = sanitize_url("data:image/png;base64,iVBORw0KGgoAA"
"AANSUhEUgAAABAAAAAQAQMAAAAlPW0iAAAABlBMVEUAAAD///+l2Z/dAAAAM0l"
"EQVR4nGP4/5/h/1+G/58ZDrAz3D/McH8yw83NDDeNGe4Ug9C9zwz3gVLMDA/A6"
"P9/AFGGFyjOXZtQAAAAAElFTkSuQmCC")
self.assertEqual('', output)
# -----
# TODO def test_slugify
# TODO def test_unique_slugify
| Python | 0 | |
66a23782438d9c16111c25c56090f4c92f54dde1 | Add integration test for the trivial cycle simulation. | integration_tests/test_trivial_cycle.py | integration_tests/test_trivial_cycle.py | #! /usr/bin/python
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
""" Tests """
def test_source_to_sink():
""" Tests simulations with a facilty that has a conversion factor.
In future, may eliminate checks if needed tables exist, and rely on errors.
In future, may eliminate tests for the existance and uniqueness of the
facilities. In addition, may eliminate other non-integration testing relevant
code and tests.
"""
# Cyclus simulation input for source_to_sink
sim_inputs = ["./Inputs/trivial_cycle_deplete.xml",
"./Inputs/trivial_cycle_steady.xml",
"./Inputs/trivial_cycle_growth.xml"]
# Conversion factors for the three simulations
k_factors = [0.5, 1, 2]
sim_info = zip(sim_inputs, k_factors)
for sim_input, k_factor in sim_info:
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
check_cmd(cmd, '.', holdsrtn)
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/AgentExit", "/Resources", "/Transactions",
"/Info"]
# Check if these tables exist
tables_there = True
for path in paths:
yield assert_true, output.__contains__(path)
# Have to stop further operations after these tests
if tables_there and not output.__contains__(path):
tables_there = False
if not tables_there:
output.close()
os.remove("./output_temp.h5")
return
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
agent_exit = output.get_node("/AgentExit")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
duration = info["Duration"][0]
facility_id = []
i = 0
for impl in agent_impl:
if impl == "KFacility":
facility_id.append(i)
i += 1
yield assert_equal, len(facility_id), 1
facility_id = agent_ids[facility_id[0]]
sender_ids = transactions["SenderId"]
receiver_ids = transactions["ReceiverId"]
expected_sender_array = np.empty(sender_ids.size)
expected_sender_array.fill(facility_id)
expected_receiver_array = np.empty(receiver_ids.size)
expected_receiver_array.fill(sink_id)
yield assert_array_equal, sender_ids, expected_sender_array
yield assert_array_equal, receiver_ids, expected_receiver_array
# Transaction ids must be equal range from 1 to the number of rows
expected_trans_ids = np.arange(0, sender_ids.size, 1)
yield assert_array_equal, transactions["TransactionId"], expected_trans_ids
# Track transacted resources
resource_ids = resources["ResourceId"]
quantities = resources["Quantity"]
expected_quantities = np.empty(resource_ids.size)
# Expect that every transaction quantity is the same amount
initial_inv = quantities[0]
for i in range(expected_quantities.size):
expected_quantities[i] = initial_inv * k_factor ** i
# Should find a logic for almost equal cases
yield assert_array_equal, quantities, expected_quantities
output.close()
os.remove("./output_temp.h5")
| Python | 0 | |
1ba7850e57113e6b1ca1be5064cef5277a15598b | Add script: /Scripts/Others/test.py | Scripts/Others/test.py | Scripts/Others/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# region Description
"""
test.py: Test new technique
Author: Vladimir Ivanov
License: MIT
Copyright 2019, Raw-packet Project
"""
# endregion
# region Import
# region Add project root path
from sys import path
from os.path import dirname, abspath
path.append(dirname(dirname(dirname(abspath(__file__)))))
# endregion
# region Raw-packet modules
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import MDNS_raw, ICMPv6_raw
from raw_packet.Utils.network import Sniff_raw
# endregion
# region Import libraries
from time import sleep
from json import dumps
from socket import socket, AF_PACKET, SOCK_RAW
from os.path import dirname, abspath
project_root_path = dirname(dirname(dirname(abspath(__file__))))
# endregion
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2019, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Check user, platform and print banner
Base = Base()
Base.check_user()
Base.check_platform()
Base.print_banner()
# endregion
# region Main function
if __name__ == "__main__":
try:
print('\n')
Base.print_info('Network functions:')
print('\nMac address:')
print(Base.get_interface_mac_address('eth0', False))
print('\nIPv4 address:')
print(Base.get_interface_ip_address('eth0', False))
print('\nIPv6 link local address:')
print(Base.get_interface_ipv6_link_address('eth0', False))
print('\nIPv6 link local address by mac address:')
print(Base.make_ipv6_link_address(Base.get_interface_mac_address('eth0', False)))
print('\nIPv6 link global address:')
print(Base.get_interface_ipv6_glob_address('eth0', False))
print('\nIPv6 global addresses:')
print(Base.get_interface_ipv6_glob_addresses('eth0'))
print('\nNetwork mask:')
print(Base.get_interface_netmask('eth0', False))
print('\nFirst IPv4:')
print(Base.get_first_ip_on_interface('eth0', False))
print('\nSecond IPv4:')
print(Base.get_second_ip_on_interface('eth0', False))
print('\nPenultimate IPv4:')
print(Base.get_penultimate_ip_on_interface('eth0', False))
print('\nLast IPv4:')
print(Base.get_last_ip_on_interface('eth0', False))
print('\nRandom IPv4:')
print(Base.get_random_ip_on_interface('eth0', False))
print('\nIPv4 network:')
print(Base.get_interface_network('eth0', False))
print('\nIPv4 broadcast:')
print(Base.get_interface_broadcast('eth0', False))
print('\nIPv4 gateway:')
print(Base.get_interface_ipv4_gateway('eth0', False))
print('\nIPv6 gateway:')
print(Base.get_interface_ipv6_gateway('eth0', False))
print('\n')
Base.print_info('Software functions:')
print('\nApt list installed software:')
print(Base.apt_list_installed_packages())
print('\nCheck installed software: apache2')
print(Base.check_installed_software('apache2', False))
print('\n')
Base.print_info('Process functions:')
print('\nProcess apache2 pid:')
print(Base.get_process_pid('apache2'))
print('\nProcess pid by listen port 80:')
print(Base.get_process_pid_by_listen_port(80))
except KeyboardInterrupt:
Base.print_info("Exit")
exit(0)
# endregion
| Python | 0.000008 | |
be904e21db2012ac8f72a141afd9b93da2bfb262 | Create http responses | monarch/base/http/responses.py | monarch/base/http/responses.py | # Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http.response import HttpResponseBase
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponsePermanentRedirect
from . import status
class HttpResponse(HttpResponse):
def __init__(self, content="", *args, **kwarg):
self.content_data = content
super(HttpResponse, self).__init__(content, *args, **kwarg)
@property
def content_data(self):
return self.__content_data
@content_data.setter
def content_data(self, value):
self.__content_data = value
class Ok(HttpResponse):
status_code = status.HTTP_200_OK
class Created(HttpResponse):
status_code = status.HTTP_201_CREATED
class Accepted(HttpResponse):
status_code = status.HTTP_202_ACCEPTED
class NoContent(HttpResponse):
status_code = status.HTTP_204_NO_CONTENT
class MultipleChoices(HttpResponse):
status_code = status.HTTP_300_MULTIPLE_CHOICES
class MovedPermanently(HttpResponsePermanentRedirect):
status_code = status.HTTP_301_MOVED_PERMANENTLY
class Redirect(HttpResponseRedirect):
status_code = status.HTTP_302_FOUND
class SeeOther(HttpResponse):
status_code = status.HTTP_303_SEE_OTHER
class NotModified(HttpResponse):
status_code = status.HTTP_304_NOT_MODIFIED
class TemporaryRedirect(HttpResponse):
status_code = status.HTTP_307_TEMPORARY_REDIRECT
class BadRequest(HttpResponse):
status_code = status.HTTP_400_BAD_REQUEST
class Unauthorized(HttpResponse):
status_code = status.HTTP_401_UNAUTHORIZED
class Forbidden(HttpResponse):
status_code = status.HTTP_403_FORBIDDEN
class NotFound(HttpResponse):
status_code = status.HTTP_404_NOT_FOUND
class MethodNotAllowed(HttpResponse):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
class NotAcceptable(HttpResponse):
status_code = status.HTTP_406_NOT_ACCEPTABLE
class Conflict(HttpResponse):
status_code = status.HTTP_409_CONFLICT
class Gone(HttpResponse):
status_code = status.HTTP_410_GONE
class PreconditionFailed(HttpResponse):
status_code = status.HTTP_412_PRECONDITION_FAILED
class UnsupportedMediaType(HttpResponse):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
class TooManyRequests(HttpResponse):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
class InternalServerError(HttpResponse):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
class NotImplemented(HttpResponse):
status_code = status.HTTP_501_NOT_IMPLEMENTED
| Python | 0 | |
72fcb82d33c4a4317630b6f2c7985e69ff9d3ce3 | add some simple tests for lru_cache | src/unittest/python/backport_tests.py | src/unittest/python/backport_tests.py | #!/usr/bin/env python
from __future__ import print_function
import sys
import unittest
from backports import functools_lru_cache
class Test(unittest.TestCase):
def test_with_bound_cache(self):
@functools_lru_cache.lru_cache()
def cachy(*args):
return True
self.assertTrue(cachy("foo"))
self.assertTrue(cachy("bar"))
self.assertTrue(cachy("foo"))
print(cachy.cache_info())
cachy.cache_clear()
def test_without_cache(self):
@functools_lru_cache.lru_cache(maxsize=None)
def cachy(*args):
return True
self.assertTrue(cachy("foo"))
self.assertTrue(cachy("bar"))
self.assertTrue(cachy("foo"))
print(cachy.cache_info())
cachy.cache_clear()
def test_with_boundless_cache(self):
@functools_lru_cache.lru_cache(maxsize=0)
def cachy(*args):
return True
self.assertTrue(cachy("foo"))
self.assertTrue(cachy("bar"))
self.assertTrue(cachy("foo"))
print(cachy.cache_info())
cachy.cache_clear()
if __name__ == "__main__":
sys.exit(unittest.main())
| Python | 0 | |
75a2c6fb7074e316908d12cfd6f1e03d9e0a1ba6 | add new tool to generate new pipeline easily (outside of sequana repository) | sequana/scripts/start_pipeline.py | sequana/scripts/start_pipeline.py | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from snakemake import shell as shellcmd
import shutil
import glob
import sys
from optparse import OptionParser
import argparse
class Options(argparse.ArgumentParser):
def __init__(self, prog="sequana_mapping"):
usage = """Welcome to SEQUANA - create a new pipeline from scratch
sequana_start_pipeline
"""
description = """DESCRIPTION:
"""
super(Options, self).__init__(usage=usage, prog=prog,
description=description)
#self.add_argument("--use-sambamba", dest="sambamba", action="store_true",
# default=False,
# help="""use sambamba instead of samtools for the sorting """)
def main(args=None):
if args is None:
args = sys.argv[:]
user_options = Options(prog="sequana")
# If --help or no options provided, show the help
if "--help" in args:
user_options.parse_args(["prog", "--help"])
else:
options = user_options.parse_args(args[1:])
cmd = "cookiecutter https://github.com/sequana/sequana_pipeline_template"
import subprocess
subprocess.call(cmd.split())
| Python | 0 | |
48c139172e2eab43919ac9589ee58e3ff2009887 | Work in progress | lexicon/providers/azure.py | lexicon/providers/azure.py | import json
import requests
from lexicon.providers.base import Provider as BaseProvider
MANAGEMENT_URL = 'https://management.azure.com'
API_VERSION = '2018-03-01-preview'
NAMESERVER_DOMAINS = ['azure.com']
def provider_parser(subparser):
subparser.add_argument('--auth-credentials')
class Provider(BaseProvider):
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self._access_token = None
self._subscription_id = None
if self._get_provider_option('auth_credentials').startswith('file::'):
with open(self._get_provider_option('auth_credentials')
.replace('file::', '')) as file_h:
data = file_h.read()
self._credentials = json.loads(data)
def _authenticate(self):
ad_endpoint = self._credentials['activeDirectoryEndpointUrl']
tenant_id = self._credentials['tenantId']
client_id = self._credentials['clientId']
client_secret = self._credentials['clientSecret']
self._subscription_id = self._credentials['subscriptionId']
assert ad_endpoint
assert tenant_id
assert client_id
assert client_secret
assert self._subscription_id
url = '{0}/{1}/oauth2/token'.format(ad_endpoint, tenant_id)
data = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
'resource': MANAGEMENT_URL
}
result = requests.post(url, data=data)
result.raise_for_status()
self._access_token = result.json()['access_token']
url = ('{0}/subscriptions/{1}/providers/Microsoft.Network/dnszones'
.format(MANAGEMENT_URL, self._subscription_id))
headers = {'Authorization': 'Bearer {0}'.format(self._access_token)}
params = {'api-version': API_VERSION}
result = requests.get(url, headers=headers, params=params)
result.raise_for_status()
print(result.json())
def _request(self, action='GET', url='/', data=None, query_params=None):
url = '{0}/subscriptions/{1}' | Python | 0.000003 | |
4485e7dd4b6d5a6199d99cdc9a852ff551fc384b | bump version number | client/version.py | client/version.py | ELECTRUM_VERSION = "0.38"
SEED_VERSION = 4 # bump this everytime the seed generation is modified
| ELECTRUM_VERSION = "0.37"
SEED_VERSION = 4 # bump this everytime the seed generation is modified
| Python | 0.000004 |
229d7e0385f3809267a2d930f93c7c8e17515a25 | initialize final model - validation | code/final_val.py | code/final_val.py | # # Try different hyperparameters and network structure on validation set
from sklearn.model_selection import TimeSeriesSplit, train_test_split
from keras.utils.visualize_util import plot
from keras.models import Sequential
from keras.layers import GRU, Dense, Masking, Dropout, Activation, advanced_activations
from keras.callbacks import EarlyStopping
import numpy as np
from itertools import product
import cPickle as pkl
from scipy.sparse import csr_matrix
from utils import set_trace, plot_ROC
from sklearn.metrics import roc_curve, auc, roc_auc_score
import tensorflow as tf
tf.python.control_flow_ops = tf
# Load saved data
print('Load saved data')
X_train = pkl.load(open('data/X_train.np', 'rb'))
y_train = pkl.load(open('data/y_train.np', 'rb'))
X_train = X_train[1:X_train.shape[0]] # drop first sample so batch size is divisible
y_train = y_train[1:y_train.shape[0]]
# Label shift
lahead = 0 # number of days ahead that are used to make the prediction
if lahead!=0:
y_train = np.roll(y_train,-lahead,axis=0)
else:
pass
# Define network structure
epochs = 3
nb_timesteps = 14
nb_classes = 2
nb_features = X_train.shape[1]
output_dim = 1
# Define cross-validated model parameters
validation_split=0.2
batch_size = 14
dropout = 0.25
activation = 'sigmoid'
nb_hidden = 128
initialization = 'glorot_normal'
# # Reshape X to three dimensions
# # Should have shape (batch_size, nb_timesteps, nb_features)
X_train = csr_matrix.toarray(X_train) # convert from sparse matrix to N dimensional array
X_train = np.resize(X_train, (X_train.shape[0], nb_timesteps, X_train.shape[1]))
print('X_train shape:', X_train.shape)
# Reshape y to two dimensions
# Should have shape (batch_size, output_dim)
y_train = np.resize(y_train, (X_train.shape[0], output_dim))
print('y_train shape:', y_train.shape)
# Initiate sequential model
print('Initializing model')
model = Sequential()
# Stack layers
# expected input batch shape: (batch_size, nb_timesteps, nb_features)
# note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
model.add(Masking(mask_value=0., batch_input_shape=(batch_size, nb_timesteps, nb_features))) # embedding for variable input lengths
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization,
batch_input_shape=(batch_size, nb_timesteps, nb_features)))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(Dense(output_dim, activation=activation))
# Configure learning process
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['binary_accuracy'])
plot(model, to_file='results/baseline_model.png', # Plot graph of model
show_shapes = True,
show_layer_names = False)
# Training /validation
early_stopping = EarlyStopping(monitor='loss', patience=1)
print('Training/validation')
for i in range(epochs):
print('Epoch', i+1, '/', epochs)
model.fit(X_train,
y_train,
batch_size=batch_size,
verbose=1,
nb_epoch=1,
shuffle=False, # turn off shuffle to ensure training data patterns remain sequential
callbacks=[early_stopping], # stop early if training loss not improving after 1 epoch
validation_split=validation_split) # use last 20% of data for validation set
model.reset_states() | Python | 0.000001 | |
93d444215e6ebea3088936b46f79bf335dcfd070 | Create Pentester-Toolkit.py | Pentester-Toolkit.py | Pentester-Toolkit.py | #!/usr/bin/env python
import requests,re,os,sys,time
try:
from termcolor import *
except:
print " -- install termcolor By using: 'sudo easy_install termcolor' -- "
def banner():
os.system('clear')
cprint('''
____ __ __ ______ ____ __ _ __
/ __ \___ ____ / /____ _____/ /____ _____ /_ __/___ ____ / / //_/(_) /_
/ /_/ / _ \/ __ \/ __/ _ \/ ___/ __/ _ \/ ___/_____/ / / __ \/ __ \/ / ,< / / __/
/ ____/ __/ / / / /_/ __(__ ) /_/ __/ / /_____/ / / /_/ / /_/ / / /| |/ / /_
/_/ \___/_/ /_/\__/\___/____/\__/\___/_/ /_/ \____/\____/_/_/ |_/_/\__/
''', 'green', attrs=['bold'])
banner()
cprint(' -- UrduSecurity WebApp Header Analysis Tool Kit --', "green", attrs=['bold'])
print colored("+++", "green") * 16
print " -- Blog: " + colored("http://urdusecurity.blogspot.com", "green") + " -- "
print colored("+++", "green") * 16
print " -- Author: " + colored("Muhammad Adeel", "green") + " --"
print colored("+++", "green") * 16
print " -- PlatForm: " + colored("{0}", "green").format(sys.platform) + " -- "
print colored("+++", "green") * 16
print " -- Scan Started at:" + colored(" {0}", "green").format(time.ctime()) + " -- "
print colored("+++", "green") * 16
print """
[!] Check for Both 'http' & https' For Better Results
[+] 1. Get Server Info
[+] 2. Check For HttpOnly Flag
[+] 3. Check For XFrame-Options
[+] 4. Check For X-XSS-Protection
[+] 5. Check All"""
print colored("+++", "green") * 16
def input():
global select_mode
global target_url
select_mode = raw_input(colored('\n(Select)> ', 'green'))
target_url = raw_input(colored('(Url)> ', 'green'))
input()
def CheckServer():
print colored("+++", "green") * 16
print colored("-- Getting Server Info --", "green")
print colored("+++", "green") * 16
s_check = requests.post(target_url)
regex = '''<meta name="generator" content="(.+?)" />'''
check_regex = re.findall(regex, s_check.content)
try:
if s_check.raw.headers.has_key('server'):
print "[+] Server Header is Present"
ser = s_check.raw.getheader('server')
print "[+] Target is using Following Server: " + colored("{0}", "green").format(ser)
if s_check.raw.headers.has_key('x-content-encoded-by'):
script = s_check.raw.getheader('x-content-encoded-by')
print "[+] Web Based on: " + colored("{0}", "green").format(script)
if check_regex:
print "[+] Web Based on:" + colored(" {0}", "green").format(check_regex)
else:
print "[-] Unable to Find Web Script"
except:
print "-- Exception Catched --"
def HttpOnly():
print colored("+++", "green") * 16
print colored("-- Checking for HttpOnly Flag --", "green")
print colored("+++", "green") * 16
checkfor = requests.post(target_url)
var = checkfor.raw.headers.has_key('set-cookie')
var2 = 'HttpOnly'
if var:
print "[+] Set-Cookie Found..\n[+] Trying for HttpOnly..."
try:
if var2 in checkfor.raw.headers.viewvalues():
print "[+] HttpOnly Flg is:" + colored(" Present", "green")
else:
print "[-] HttpOnly Flag is:" + colored(" Not Present", "green")
except:
print "-- Exception catched --"
pass
else:
print "[-] Set-Cookie Not Found."
def XframeOptions():
print colored("+++", "green") * 16
print colored("-- Checking For X-Frame-Options --", "green")
print colored("+++", "green") * 16
checkforxframe = requests.post(target_url)
if checkforxframe.raw.headers.has_key('x-frame-options'):
print "[+] X-Frame-Options is Present.\n[+] Checking For Methods.."
list_Method = ['DENY', 'SAMEORIGIN', 'ALLOW-FROM', 'Deny', 'SameOrigin', 'Allow-From', 'deny', 'sameorigin', 'allow-from']
for x in range(0, 9):
if list_Method[x] in checkforxframe.raw.getheader('x-frame-options'):
print "[+] Following XFrame Option Found:" + colored(" {0}", "green").format(list_Method[x])
else:
print "[-] Not Found:" + colored(" {0}", "green").format(list_Method[x])
else:
print "[-] X-Frame-Options Not Found."
def XxssPro():
print colored("+++", "green") * 16
print colored(" -- Checking for X-XSS-Protection -- ", "green")
print colored("+++", "green") * 16
checkxsspro = requests.post(target_url)
try:
if checkxsspro.raw.headers.has_key('x-xss-protection'):
print "[+] X-XSS Protection is Present.\n[+] Checking Value.."
x_value = ['0','1','1; mode=block']
if x_value[0] in checkxsspro.raw.getheader('x-xss-protection'):
print "[+] X-XSS-Protection value is: " + colored("{0}", "green").format(x_value[0])
elif x_value[2] in checkxsspro.raw.getheader('x-xss-protection'):
print "[+] X-XSS-Protection value is: " + colored("{0}", "green").format(x_value[2])
else:
print "[+] X-XSS-Protection value is: " + colored("{0}", "green").format(x_value[1])
else:
print "[-] X-XSS-Protection Header Not Found."
except:
print "-- Exception Cacthed --"
def Attack():
if select_mode == '1':
try:
CheckServer()
input()
except:
print "-- Exception Catched --\n"
input()
elif select_mode == '2':
try:
HttpOnly()
input()
except:
print "-- Exception Catched --"
input()
elif select_mode == '3':
try:
XframeOptions()
input()
except:
print "-- Exception Catched --"
input()
elif select_mode == '4':
try:
XxssPro()
input()
except:
print "-- Exception Catched --"
input()
elif select_mode == '5':
try:
CheckServer()
HttpOnly()
XframeOptions()
XxssPro()
input()
except:
print "-- Exception Catched --"
input()
else:
print "[-] Invalid Choice!\n"
input()
while select_mode > 0:
Attack()
def main():
if __name__ == '__main__':
main()
| Python | 0 | |
6486487dc1fc4972dcd18bc0e92bcae602f4d900 | Create blacklist.py | cogs/blacklist.py | cogs/blacklist.py | Python | 0.000006 | ||
b02e308dfc2993123486a5660b6d14c98f19b389 | Create Hamel_ZipCode_API.py | Hamel_ZipCode_API.py | Hamel_ZipCode_API.py | def back_out_unicode(stringval):
return str(stringval.encode('utf-8').decode('ascii', 'ignore'))
def zip_info(zipcode):
"""
Takes a zip code and goes to www.uszip.com/zip/*zipcode and
screen scrapes relevant information down. *zipcode is the 5-digit zipcode parameter
input value zipcode must be a string value
returns a list of tuples, which are (key, value) pairs
Written by Hamel Husain
hamel.husain@gmail.com
"""
#Type Safety
if type(zipcode) <> str or len(zipcode) > 5:
raise Exception('zipcode passed to this function must be a 5-digit string')
from bs4 import BeautifulSoup
import urllib
data = [('zipcode', str(zipcode))] #Initializes zipcode list
webaddress = 'http://www.uszip.com/zip/'+str(zipcode) #build web address
try:
html_collector = urllib.urlopen(webaddress).read() #read contents of HTML into variable
except:
print str(zipcode) #+ ' was an invalid zipcode, please try again - must be a 5 digit string value'
raise
soup = BeautifulSoup(html_collector) #make a Beautiful Soup object from HTML string so that we can parse
raw_html = soup.prettify() #this is so you can inspect html, will dump this into a file called sample_html.txt
with open('sample_html.txt', 'w') as html: #so you can dump a copy of the HTML somewhere
html.write(back_out_unicode(raw_html))
##############
#Checks to see if zipcode returned by website is the one you input!##
#############
zipcode_returned = back_out_unicode(soup.find('strong').text.strip())
if zipcode <> zipcode_returned:
print '%s was not found as a zipcode! Will Skip This' % (zipcode)
zip_valid = False
else:
zip_valid = True
city = back_out_unicode(soup.find('title').text.strip().replace(' zip code', ''))
##Mark Zip Code as Retrieved Or Not##
data.append(('Zip Found', zip_valid))
if zip_valid:
data.append(('City', city))
#return an iterable that has all of the results for 'dt', or the fieldnames
search_results_titles = soup.findAll('dt') #for this websites, titles are tagged 'dt', numbers are tagged 'dd'
for label in search_results_titles:
current_name = label.name #tag name
current_string = back_out_unicode(label.text.strip()) #tag text
next_name = label.find_next_sibling().name #next tag's name
next_string = back_out_unicode(label.find_next_sibling().text.strip()) #next tag's text
#Want a 'dt' tag to be followed by a 'dd' tag, otherwise don't need it to be part of the result
if (current_name <> next_name) and current_name == 'dt' and next_name == 'dd' and zip_valid:
data.append((current_string, next_string))
return data
if __name__ == '__main__':
print 'you have run the main file!'
hamel = zip_info('75019')
| Python | 0.000005 | |
9ec957af0c3d57dff4c05c1b7ed3e66e1c033f6b | Add nagios check for idot snowplow ingest | nagios/check_idot_snowplows.py | nagios/check_idot_snowplows.py | """
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2) | Python | 0 | |
661e69ece73a609d230384874da9722de385d854 | Change links to a dictionary, iterator instead of lambda | uoftscrapers/scrapers/libraries/__init__.py | uoftscrapers/scrapers/libraries/__init__.py | from ..utils import Scraper
from bs4 import BeautifulSoup, NavigableString
from datetime import datetime, date
from collections import OrderedDict
import urllib.parse as urlparse
from urllib.parse import urlencode
import re
class Libraries:
"""A scraper for the Libraries at the University of Toronto."""
host = 'https://onesearch.library.utoronto.ca/visit'
campuses_tags = {
'St. George': 'UTSG',
'U of T Mississauga': 'UTM',
'U of T Scarborough': 'UTSC'
}
@staticmethod
def scrape(location='.'):
Scraper.logger.info('Libraries initialized.')
Scraper.ensure_location(location)
return Libraries.get_library_link()
# ['content_link'] -> 'Teaser Text'
# ['info_link'] -> 'Everything Else'
library_data_links = Libraries.get_library_link()
raise NotImplementedError('This scraper has not been implemented yet.')
Scraper.logger.info('Libraries completed.')
@staticmethod
def get_library_link():
html = Scraper.get(Libraries.host)
soup = BeautifulSoup(html, 'html.parser')
list_obj_arr = soup.select('.view-list-of-libraries')[1].select(
'.view-content')[0].select('.views-row')
library_links = dict()
for l in list_obj_arr:
title = l.h2.text
library_links[title] = {
'content_link': l.select('a')[0]['href'],
'info_link': l.select('a')[1]['href']
}
return library_links | from ..utils import Scraper
from bs4 import BeautifulSoup, NavigableString
from datetime import datetime, date
from collections import OrderedDict
import urllib.parse as urlparse
from urllib.parse import urlencode
import re
class Libraries:
"""A scraper for the Libraries at the University of Toronto."""
host = 'https://onesearch.library.utoronto.ca/visit'
campuses_tags = {'St. George': 'UTSG', 'U of T Mississauga': 'UTM', 'U of T Scarborough': 'UTSC'}
@staticmethod
def scrape(location='.'):
Scraper.logger.info('Libraries initialized.')
Scraper.ensure_location(location)
# ['content'] -> 'Teaser text', ['data]
library_data_links = Libraries.get_library_link()
raise NotImplementedError('This scraper has not been implemented yet.')
Scraper.logger.info('Libraries completed.')
@staticmethod
def get_library_link():
html = Scraper.get(Libraries.host)
soup = BeautifulSoup(html, 'html.parser')
content_links = []
library_info_links = []
list_obj_arr = soup.select('.view-list-of-libraries')[1].select(
'.view-content')[0].select('.views-row')
content_links[:] = [l.select('a')[0]['href'] for l in list_obj_arr]
library_info_links = [l.select('a')[1]['href'] for l in list_obj_arr]
return {'content' : content_links , 'info': library_info_links} | Python | 0 |
717b20e298547685ed0685bd09a4fac541034910 | Add an example map flow | example/map_flows.py | example/map_flows.py | from taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
cd2df0032a3978444d6bd15e3b49a20bef495b75 | add blastp | Modules/f10_blast.py | Modules/f10_blast.py | import subprocess,os
def makeblastdb(fastaFile,datatype,outputname):
"""
this function build database given a fasta file
* fastaFile: can be gzipped or not
"""
if fastaFile.endswith('.gz'):
cmd = ('gunzip -c {input} | makeblastdb -in - -dbtype {type} -title {title} '
'-out {outputname}').format(input=fastaFile,
type=datatype,outputname=outputname,title=outputname)
else:
cmd = ('makeblastdb -in {input} -dbtype {type} -title {title} '
'-out {outputname}').format(input=fastaFile,
type=datatype,outputname=outputname,title=outputname)
subprocess.call(cmd,shell=True)
def blastp(query,database,outputFile,threads,evalue,fmt,mapnum):
"""
This function run blastp
* query: fasta file which you want to map
* database: database path/name
* outputFile: tabular blast result
"""
if query.endswith('.gz'):
cmd = ('gunzip -c {input} | blastp -query - -db {database} '
'-out {outputFile} -evalue {evalue} -outfmt {format} '
'-seg yes -num_threads {thread} -num_alignments {mapnum}').format(input=query,
database=database,outputFile=outputFile,evalue=evalue,
format=str(fmt),thread=str(threads),mapnum=mapnum)
else:
cmd = ('blastp -query {input} -db {database} -out {outputFile} '
'-evalue {evalue} -outfmt {format} -seg yes '
'-num_threads {thread} -num_alignments {mapnum}').format(input=query,
database=database,outputFile=outputFile,evalue=evalue,
format=str(fmt),thread=str(threads),mapnum=mapnum)
subprocess.call(cmd,shell=True)
| Python | 0.000433 | |
7dac3075874a79d51d1b9d0c1551eec9a988f526 | Create Roman_to_Integer.py | Array/Roman_to_Integer.py | Array/Roman_to_Integer.py | Given a roman numeral, convert it to an integer.
Input is guaranteed to be within the range from 1 to 3999.
class Solution:
# @return an integer
def romanToInt(self, s):
numerals = { "M": 1000,
"D": 500,
"C": 100,
"L": 50,
"X": 10,
"V": 5,
"I": 1 }
result = 0
pre = s[0]
for char in s:
if numerals[char] <= numerals[pre]:
result += numerals[char]
else:
result += numerals[char] - 2*numerals[pre]
pre = char
return result
| Python | 0.000617 | |
c810882385e034ca0e888ce093b227198dbb5f76 | Create GPIOTutorialtempLogger.py | GPIOTutorialtempLogger.py | GPIOTutorialtempLogger.py | import RPi.GPIO as GPIO
import time as time
GPIO.setmode (GPIO.BCM)
GPIO.setup (22, GPIO.IN )
GPIO.setup (17,GPIO.OUT )
while True:
if GPIO.input(22):
break
print "start"
datafile = open ("tempreading.log","w")
while True:
GPIO.output (17, GPIO.HIGH)
tfile = open ("/sys/bus/w1/devices/28-000005658920/w1_slave")
text = tfile.read()
tfile.close()
secondline = text.split ("\n")[1]
tempData = secondline.split(" ")[9]
temprature = float (tempData[2:])
temprature = temprature / 1000
print temprature
datafile.write(str(temprature)+ "\n")
time.sleep (1)
GPIO.output (17, GPIO.LOW)
time. sleep (1)
if GPIO.input (22)==1:
break
datafile.close()
GPIO.output (17, GPIO.LOW)
| Python | 0.000004 | |
7b54ac1d1bf8cf6e9869e716940814d2d56cb1de | Create Watchers.py | examples/Watchers.py | examples/Watchers.py |
los = []
url = 'https://stocktwits.com/symbol/'
workbook = openpyxl.load_workbook('Spreadsheet.xlsx')
worksheet = workbook.get_sheet_by_name(name = 'Sheet1')
for col in worksheet['A']:
los.append(col.value)
los2 = []
print(los)
for i in los:
stocksite = url +i + '?q=' +i
print(stocksite)
with contextlib.closing(webdriver.PhantomJS(Phantom_Path)) as driver:
#with contextlib.closing(webdriver.Phantom_Path)) as driver:
driver.get(stocksite)
driver.find_element_by_id('sentiment-tab').click()
Bullish = driver.find_elements_by_css_selector('span.bullish:nth-child(1)')
Sentiment = [x.text for x in Bullish]
los2.append(Sentiment[0])
| Python | 0.000001 | |
66fa9698b40fa8365d91aef1ed16b620494052f0 | Add CUDA + MPI example | examples/cuda_mpi.py | examples/cuda_mpi.py | # Demonstration of using MPI and Numba CUDA to perform parallel computation
# using GPUs in multiple nodes. This example requires MPI4py to be installed.
#
# The root process creates an input data array that is scattered to all nodes.
# Each node calls a CUDA jitted function on its portion of the input data.
# Output data is then gathered back to the master node.
#
# Notes/limitations:
#
# 1. It is generally more efficient to avoid initialising all data on the root
# node then scattering it out to all other nodes, and instead each node
# should initialise its own data, but initialisation is done on the root node
# here to keep the example simple.
# 2. If multiple GPUs are available to a single MPI process, additional code may
# need adding to ensure the correct GPU is used by each process - this will
# depend on the exact configuration of the MPI cluster.
#
# This example can be invoked with:
#
# $ mpirun -np <np> python cuda_mpi.py
#
# where np is the number of processes (e.g. 4). For demonstrating the code, this
# does work with a single node and a single GPU, since multiple processes can
# share a single GPU. However, in a production setting, it may be more
# appropriate to provide one GPU per MPI process.
from __future__ import print_function
from mpi4py import MPI
from numba import cuda
import numpy as np
mpi_comm = MPI.COMM_WORLD
# Input data size
total_n = 10
# Process 0 creates input data
if mpi_comm.rank == 0:
input_data = np.arange(total_n, dtype=np.int32)
print("Input:", input_data)
else:
input_data = None
# Compute partitioning of the input array
proc_n = [ total_n // mpi_comm.size + (total_n % mpi_comm.size > n)
for n in range(mpi_comm.size) ]
pos = 0
pos_n = []
for n in range(mpi_comm.size):
pos_n.append(pos)
pos += proc_n[n]
my_n = proc_n[mpi_comm.rank]
my_offset = pos_n[mpi_comm.rank]
print('Process %d, my_n = %d' % (mpi_comm.rank, my_n))
print('Process %d, my_offset = %d' % (mpi_comm.rank, my_offset))
# Distribute input data across processes
my_input_data = np.zeros(my_n, dtype=np.int32)
mpi_comm.Scatterv([input_data, proc_n, pos_n, MPI.INT], my_input_data)
print('Process %d, my_input_data = %s' % (mpi_comm.rank, my_input_data))
# Perform computation on local data
@cuda.jit
def sqplus2(input_data, output_data):
for i in range(len(input_data)):
d = input_data[i]
output_data[i] = d * d + 2
my_output_data = np.empty_like(my_input_data)
sqplus2(my_input_data, my_output_data)
print('Process %d, my_output_data = %s' % (mpi_comm.rank, my_output_data))
# Bring result back to root process
if mpi_comm.rank == 0:
output_data = np.empty_like(input_data)
else:
output_data = None
mpi_comm.Gatherv(my_output_data, [output_data, proc_n, pos_n, MPI.INT])
if mpi_comm.rank == 0:
print("Output:", output_data)
MPI.Finalize()
| Python | 0.000002 | |
b44977653e57077118cb0eb0d549758f52beed35 | Add basic example | examples/examples.py | examples/examples.py | from pyrho import *
RhO = models['6']()
Prot = protocols['step']()
Prot.phis = [1e16, 1e15, 1e14]
Sim = simulators['Python'](Prot, RhO)
Sim.run()
Sim.plot()
| Python | 0.000159 | |
2d5366f455612373ca87ef4d2c8f890b1e6b255f | Add a compliance tool to export a subset of messages. | zerver/management/commands/export_search.py | zerver/management/commands/export_search.py | import os
from argparse import ArgumentParser
from datetime import datetime
from email.headerregistry import Address
from functools import lru_cache, reduce
from operator import or_
from typing import Any
from django.core.management.base import CommandError
from django.db.models import Q
from django.forms.models import model_to_dict
from zerver.lib.export import floatify_datetime_fields, write_table_data
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message, Recipient, Stream, UserProfile
ignore_keys = [
"realm",
"rendered_content_version",
"sending_client",
"search_tsvector",
]
class Command(ZulipBaseCommand):
help = """Exports the messages matching certain search terms.
This is most often used for legal compliance.
"""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, required=True)
parser.add_argument(
"--output",
metavar="<path>",
help="File to output JSON results to; it must not exist, unless --force is given",
required=True,
)
parser.add_argument(
"--force", action="store_true", help="Overwrite the output file if it exists already"
)
parser.add_argument(
"--file",
metavar="<path>",
help="Read search terms from the named file, one per line",
)
parser.add_argument(
"search_terms",
nargs="*",
metavar="<search term>",
help="Terms to search for in message body or topic",
)
def handle(self, *args: Any, **options: Any) -> None:
terms = set()
if options["file"]:
with open(options["file"], "r") as f:
terms.update(f.read().splitlines())
terms.update(options["search_terms"])
if not terms:
raise CommandError("One or more search terms are required!")
if os.path.exists(options["output"]) and not options["force"]:
raise CommandError(
f"Output path '{options['output']}' already exists; use --force to overwrite"
)
realm = self.get_realm(options)
limits = reduce(
or_,
[Q(content__icontains=term) | Q(subject__icontains=term) for term in terms],
Q(),
)
messages_query = Message.objects.filter(limits, realm=realm).order_by("date_sent")
def format_sender(full_name: str, delivery_email: str) -> str:
return str(Address(display_name=full_name, addr_spec=delivery_email))
@lru_cache(maxsize=None)
def format_recipient(recipient_id: int) -> str:
recipient = Recipient.objects.get(id=recipient_id)
if recipient.type == Recipient.STREAM:
stream = Stream.objects.values("name").get(id=recipient.type_id)
return "#" + stream["name"]
users = (
UserProfile.objects.filter(
subscription__recipient_id=recipient.id,
)
.order_by("full_name")
.values_list("full_name", "delivery_email")
)
return ", ".join([format_sender(e[0], e[1]) for e in users])
message_dicts = []
for message in messages_query:
item = model_to_dict(message)
item["recipient_name"] = format_recipient(message.recipient_id)
item["sender_name"] = format_sender(
message.sender.full_name, message.sender.delivery_email
)
for key in ignore_keys:
del item[key]
message_dicts.append(item)
output = {"zerver_message": message_dicts}
floatify_datetime_fields(output, "zerver_message")
for item in output["zerver_message"]:
item["date_sent_utc"] = datetime.utcfromtimestamp(int(item["date_sent"])).strftime(
"%Y-%m-%d %H:%M:%S"
)
write_table_data(options["output"], output)
| Python | 0 | |
61139332ce1bcfd145f16b8f3c411e178db4054c | Add some unit tests for the hashing protocol of dtype (fail currently). | numpy/core/tests/test_dtype.py | numpy/core/tests/test_dtype.py | import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
| Python | 0.000115 | |
3d11000488ca20e7e34a9f7030a16e69a6b4052f | add examples for trainig 3 | 3-python-intermediate/examples/list_comprehension.py | 3-python-intermediate/examples/list_comprehension.py |
odd = [i for i in range(10) if i % 2]
print(odd) # [1, 3, 5, 7, 9]
odd_squares = [i ** 2 for i in odd]
print(odd_squares) # [1, 9, 25, 49, 81]
first_names = ['Bruce', 'James', 'Alfred']
last_names = ['Wayne', 'Gordon', 'Pennyworth']
heroes = ['{} {}'.format(f, l) for f, l in zip(first_names, last_names)]
print(heroes) # ['Bruce Wayne', 'James Gordon', 'Alfred Pennyworth']
| Python | 0 | |
0886a4efd7b7703d72be4319d7b0295d3bc64151 | Create Tensor_Case.py | Tensor_Case.py | Tensor_Case.py | import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.random_uniform([])
y = tf.random_uniform([])
out1 = tf.cond(tf.greater(x,y), lambda:tf.add(x,y), lambda:(tf.subtract(x,y)))
print(x.eval(), y.eval(), out1.eval())
x = tf.random_uniform([],-1,1)
y = tf.random_uniform([],-1,1)
def f1(): return tf.cast(tf.add(x,y), tf.float32)
def f2(): return tf.cast(tf.subtract(x,y), tf.float32)
def f3(): return tf.cast(tf.constant(0), tf.float32)
out2 = tf.case({tf.less(x, y):f2, tf.greater(x,y):f1}, default=f3)
print(x.eval(), y.eval(), out2.eval())
| Python | 0.000039 | |
dfa492ffc2148d8ffa5c14145e0092be60ef44eb | add an example for pipeline | examples/pipeline.py | examples/pipeline.py | import tornado
import tornadis
@tornado.gen.coroutine
def pipeline_coroutine():
# Let's get a connected client
client = tornadis.Client()
yield client.connect()
# Let's make a pipeline object to stack commands inside
pipeline = tornadis.Pipeline()
pipeline.stack_call("SET", "foo", "bar")
pipeline.stack_call("GET", "foo")
# At this point, nothing is sent to redis
# Let's submit the pipeline to redis and wait for replies
results = yield client.call(pipeline)
# The two replies are in the results array
print results
# >>> ['OK', 'bar']
# Let's disconnect
client.disconnect()
def stop_loop(future):
exception = future.exception()
if exception is not None:
raise(exception)
loop.stop()
loop = tornado.ioloop.IOLoop.instance()
loop.add_future(pipeline_coroutine(), stop_loop)
loop.start()
| Python | 0.000001 | |
b75601e0c6bbb83dba4544f9d80b6f71c75fcdec | add missing ordered field to startup program interest | web/impact/impact/migrations/0003_add_ordered_field_to_startup_program_interest.py | web/impact/impact/migrations/0003_add_ordered_field_to_startup_program_interest.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-01-30 10:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('impact', '0002_set_models_to_managed'),
]
operations = [
migrations.AlterField(
model_name='startupprograminterest',
name='order',
field=models.PositiveIntegerField(db_index=True, editable=False),
),
]
| Python | 0 | |
c3e7b563c3eeb24aa269f23672b8f469470908b7 | Add an option to redirect user to a page if the key is already expired. | onetime/views.py | onetime/views.py | from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
| from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
| Python | 0 |
3922f75424308753dac5beadb75698971960843a | Write convenience functions in a separate file. | Kane1985/Chapter4/util.py | Kane1985/Chapter4/util.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenient utility functions for exercises in Chapter 4 of Kane 1985.
"""
from __future__ import division
from sympy import diff
from sympy.physics.mechanics import ReferenceFrame, Point, Particle, RigidBody
from sympy.physics.mechanics import cross, dot, Vector
from sympy.physics.mechanics import MechanicsStrPrinter
from sympy.physics.mechanics import inertia_of_point_mass
def msprint(expr):
pr = MechanicsStrPrinter()
return pr.doprint(expr)
def subs(x, *args, **kwargs):
if x == 0:
return x
if not hasattr(x, 'subs'):
if hasattr(x, '__iter__'):
return map(lambda x: subs(x, *args, **kwargs), x)
return x.subs(*args, **kwargs).doit()
class PartialVelocity(dict):
def __init__(self, frame, ulist, *args, **kwargs):
self._set_frame(frame)
self._set_ulist(ulist)
dict.__init__(self, *args, **kwargs)
def _set_frame(self, f):
if not isinstance(f, ReferenceFrame):
raise TypeError(
'{0} is not an instance of ReferenceFrame'.format(f))
self._frame = f
def _set_ulist(self, u):
if not isinstance(u, list):
raise TypeError(
'{0} is not an instance of list'.format(f))
self._ulist = u
@property
def frame(self):
return self._frame
@property
def ulist(self):
return self._ulist
def partial_velocities(system, generalized_speeds, frame,
kde_map=None, constraint_map=None, express_frame=None):
partials = PartialVelocity(frame, generalized_speeds)
if express_frame is None:
express_frame = frame
for p in system:
if p in partials:
continue
if isinstance(p, Point):
v = p.vel(frame)
elif isinstance(p, ReferenceFrame):
v = p.ang_vel_in(frame)
if kde_map is not None:
v = subs(v, kde_map)
if constraint_map is not None:
v = subs(v, constraint_map)
v_r_p = {}
for u in generalized_speeds:
v_r_p[u] = Vector([]) if v == 0 else v.diff(u, express_frame)
partials[p] = v_r_p
return partials
def generalized_active_forces(partials, forces, uaux=None):
# use the same frame used in calculating partial velocities
ulist = partials.ulist
if uaux is not None:
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
Fr = [0] * len(ulist)
for pf in forces:
p = pf[0]
f = pf[1]
for i, u in enumerate(ulist):
if partials[p][u] != 0 and f != 0:
r = dot(partials[p][u], f)
# if more than 2 args, 3rd is an integral function, where the
# input is the integrand
if len(pf) > 2:
r = pf[2](r)
# auxilliary speeds have no effect on original active forces
if uaux is not None and u not in uaux:
r = subs(r, uaux_zero)
Fr[i] += r
return Fr, ulist
def _calculate_T_star(rb, frame, kde_map, constraint_map, uaux):
# get central inertia
# I_S/O = I_S/S* + I_S*/O
I = rb.inertia[0] - inertia_of_point_mass(rb.mass,
rb.masscenter.pos_from(rb.inertia[1]), rb.frame)
alpha = rb.frame.ang_acc_in(frame)
omega = rb.frame.ang_vel_in(frame)
if uaux is not None:
# auxilliary speeds do not change alpha, omega
# use doit() to evaluate terms such as
# Derivative(0, t) to 0.
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
alpha = subs(alpha, uaux_zero)
omega = subs(omega, uaux_zero)
if kde_map is not None:
alpha = subs(alpha, kde_map)
omega = subs(omega, kde_map)
if constraint_map is not None:
alpha = subs(alpha, constraint_map)
omega = subs(omega, constraint_map)
return -dot(alpha, I) - dot(cross(omega, I), omega)
def generalized_inertia_forces(partials, bodies,
kde_map=None, constraint_map=None,
uaux=None):
# use the same frame used in calculating partial velocities
ulist = partials.ulist
frame = partials.frame
if uaux is not None:
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
Fr_star = [0] * len(ulist)
for b in bodies:
if isinstance(b, RigidBody):
p = b.masscenter
m = b.mass
elif isinstance(b, Particle):
p = b.point
m = b.mass
else:
raise TypeError('{0} is not a RigidBody or Particle'.format(b))
# get acceleration of point
a = p.acc(frame)
if uaux is not None:
# auxilliary speeds do not change a
a = subs(a, uaux_zero)
if kde_map is not None:
a = subs(a, kde_map)
if constraint_map is not None:
a = subs(a, constraint_map)
# get T* for RigidBodys
if isinstance(b, RigidBody):
T_star = _calculate_T_star(b, frame, kde_map, constraint_map, uaux)
for i, u in enumerate(ulist):
force_term = 0
torque_term = 0
# inertia force term
force_term = dot(partials[p][u], -m*a)
# add inertia torque term for RigidBodys
if isinstance(b, RigidBody):
torque_term = dot(partials[b.frame][u], T_star)
# auxilliary speeds have no effect on original inertia forces
if uaux is not None and u not in uaux:
force_term = subs(force_term, uaux_zero)
torque_term = subs(torque_term, uaux_zero)
Fr_star[i] += force_term + torque_term
return Fr_star, ulist
| Python | 0 | |
92077ecd268a6ca04f2b413fd3535d4cc358c97b | Create serializers.py | app/grandchallenge/algorithms/serializers.py | app/grandchallenge/algorithms/serializers.py | from rest_framework import serializers
from grandchallenge.algorithms.models import Algorithm, Job, Result
class AlgorithmSerializer(serializers.ModelSerializer):
class Meta:
model = Algorithm
fields = ['pk']
class ResultSerializer(serializers.ModelSerializer):
class Meta:
model = Result
fields = ['pk', 'job', 'images', 'output']
class JobSerializer(serializers.ModelSerializer):
class Meta:
model = Job
fields = ['pk', 'algorithm', 'image']
| Python | 0.000002 | |
b269ed70223591c81d13f97e48c74ced12cec661 | Update 4-keys-keyboard.py | Python/4-keys-keyboard.py | Python/4-keys-keyboard.py | # Time: O(n)
# Space: O(1)
class Solution(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
if N < 7:
return N
dp = [i for i in xrange(N+1)]
for i in xrange(7, N+1):
dp[i % 6] = max(dp[(i-4) % 6]*3, dp[(i-5) % 6]*4)
return dp[N % 6]
| # Time: O(n)
# Space: O(1)
class Solution(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
if N < 7:
return N
dp = [i for i in xrange(N+1)]
for i in xrange(7, N+1):
dp[i % 6] = max(dp[(i-4) % 6]*3,dp[(i-5) % 6]*4)
return dp[N % 6]
| Python | 0.000004 |
31f4479194239548bae6eff2650735ddf4279523 | Add files via upload | DatabaseTest.py | DatabaseTest.py | import pygame
import time
import random
import BattlePortDatabase
pygame.init()
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
block_color = (53, 115, 255)
pid = 0
car_width = 73
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('image1.png')
def things_dodged(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: " + str(count), True, black)
gameDisplay.blit(text, (0, 0))
def things(thingx, thingy, thingw, thingh, color):
pygame.draw.rect(gameDisplay, color, [thingx, thingy, thingw, thingh])
def car(x, y):
gameDisplay.blit(carImg, (x, y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf', 115)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def game_loop():
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
thing_startx = random.randrange(0, display_width)
thing_starty = -600
thing_speed = 4
thing_width = 100
thing_height = 100
thingCount = 1
dodged = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
# things(thingx, thingy, thingw, thingh, color)
things(thing_startx, thing_starty, thing_width, thing_height, block_color)
thing_starty += thing_speed
car(x, y)
things_dodged(dodged)
if x > display_width - car_width or x < 0:
crash()
if thing_starty > display_height:
thing_starty = 0 - thing_height
thing_startx = random.randrange(0, display_width)
#
dodged += 1
#
thing_speed += 1
thing_width += (dodged * 1.2)
if y < thing_starty + thing_height:
print('y crossover')
if x > thing_startx and x < thing_startx + thing_width or x + car_width > thing_startx and x + car_width < thing_startx + thing_width:
print('x crossover')
##
score = dodged
#na afloop van het spel worden onderstaande gegevens in de database gezet
BattlePortDatabase.data_entry(random.randint(10000, 100000), 'eenNaam', score)
##
#[crashes the game]BattlePortDatabase.kweerie("SELECT * FROM scores where naam = 'Default'")
crash()
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit() | Python | 0 | |
1dd3e7436c19ba3146be6e34da39bd81dc1efd6e | Implement AES file encryption and decryption | file_crypto_tools.py | file_crypto_tools.py | ############ Module with cryptographics functions for Storj GUI Client ##########
## Based on: <http://stackoverflow.com/questions/16761458/how-to-aes-encrypt-decrypt-files-using-python-pycrypto-in-an-openssl-compatible> ##
from hashlib import md5
from Crypto.Cipher import AES
from Crypto import Random
class FileCrypto():
def encrypt_file (self, algorithm, file_path, encrypted_file_save_path, password):
if algorithm == "AES":
with open(file_path, 'rb') as in_file, open(encrypted_file_save_path, 'wb') as out_file:
self.encrypt_file_aes(in_file, out_file, password)
def decrypt_file (self, algorithm, file_path, decrypted_file_save_path, password):
if algorithm == "AES":
with open(file_path, 'rb') as in_file, open(decrypted_file_save_path, 'wb') as out_file:
self.decrypt_file_aes(in_file, out_file, password)
def derive_key_and_iv(self, password, salt, key_length, iv_length):
d = d_i = ''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length + iv_length]
def encrypt_file_aes(self, in_file, out_file, password, key_length=32):
bs = AES.block_size
salt = Random.new().read(bs - len('Salted__'))
key, iv = self.derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
out_file.write('Salted__' + salt)
finished = False
while not finished:
chunk = in_file.read(1024 * bs)
if len(chunk) == 0 or len(chunk) % bs != 0:
padding_length = bs - (len(chunk) % bs)
chunk += padding_length * chr(padding_length)
finished = True
out_file.write(cipher.encrypt(chunk))
def decrypt_file_aes(self, in_file, out_file, password, key_length=32):
bs = AES.block_size
salt = in_file.read(bs)[len('Salted__'):]
key, iv = self.derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
next_chunk = ''
finished = False
while not finished:
chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(next_chunk) == 0:
padding_length = ord(chunk[-1])
if padding_length < 1 or padding_length > bs:
raise ValueError("bad decrypt pad (%d)" % padding_length)
# all the pad-bytes must be the same
if chunk[-padding_length:] != (padding_length * chr(padding_length)):
# this is similar to the bad decrypt:evp_enc.c from openssl program
raise ValueError("bad decrypt")
chunk = chunk[:-padding_length]
finished = True
out_file.write(chunk)
| Python | 0.005043 | |
1633e8b286ddeec706d496931713e3ac7b93b780 | Declare flaskext a namespace package | flaskext/__init__.py | flaskext/__init__.py | import pkg_resources
pkg_resources.declare_namespace(__name__)
| Python | 0.000006 | |
c69572c42da27357f8cb01299c309e47ff033e7f | Create docker-swarm-dns.py | docker-swarm-dns.py | docker-swarm-dns.py | #!/usr/bin/env python3.6
from time import sleep
import docker, \
dns.resolver, \
dns.query, \
dns.tsigkeyring, \
dns.update, \
os, \
sys
swnodes = ['192.168.15.201','192.168.15.202','192.168.15.203','192.168.15.204','192.168.15.205']
dnservers = {'master':{'ip':'192.168.2.6','key':'EMtUbnXU3as1Eczq2bVZ8g=='},'slave':{'ip':'192.168.2.7','key':'ctWc6TO3tD9YMV1QYgh9Jg=='}}
domain = 'subsident.docker.'
ttl = int(os.environ['UPDATE'])
def docker_query():
conn = docker.from_env()
serv_pre = set()
while True:
serv_cur = set()
for service in conn.services.list():
if 'add.dns' in service.attrs['Spec']['Labels']:
if service.attrs['Spec']['Labels']['add.dns'] == 'true':
serv_cur.add(service.name)
if serv_pre != serv_cur:
add = serv_cur.difference(serv_pre)
rm = serv_pre.difference(serv_cur)
if add:
print('ADD', add)
for svc in add:
dns_add(svc)
if rm:
print('DEL', rm)
for svc in rm:
dns_remove(svc)
serv_pre = serv_cur
sleep(ttl)
def dns_add(svc):
for host, conf in dnservers.items():
print('Add DNS Record \''+svc+'\' sent to',host,'dnserver ('+conf['ip']+')')
keyring = dns.tsigkeyring.from_text({
'rndc-key.' : conf['key']
})
update = dns.update.Update(domain, keyring=keyring)
for swip in swnodes:
update.add(svc, 15, 'a', swip)
resp = dns.query.tcp(update, conf['ip'])
def dns_remove(svc):
for host, conf in dnservers.items():
print('Remove DNS Record \''+svc+'\' sent to',host,'dnserver ('+conf['ip']+')')
keyring = dns.tsigkeyring.from_text({
'rndc-key.' : conf['key']
})
update = dns.update.Update(domain, keyring=keyring)
update.delete(svc, 'a')
resp = dns.query.tcp(update, conf['ip'])
if __name__ == "__main__":
docker_query()
| Python | 0.000006 | |
52dbb4d1f34ef3d637e3d99813591bf12bfa4576 | support for `python -m intelhex`. Let's provide some help on available "-m" executable points. | intelhex/__main__.py | intelhex/__main__.py | # Copyright (c) 2016, Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == '__main__':
print("Welcome to IntelHex Python library.")
print()
print("The intelhex package has some executable points:")
print(" python -m intelhex.test -- easy way to run unit tests.")
| Python | 0 | |
e9e0a0eeaf985e5c8f74dc6cfb9110f7b3c152e4 | test workers | myria/test/test_workers.py | myria/test/test_workers.py | from httmock import urlmatch, HTTMock
from json import dumps as jstr
import unittest
from myria import MyriaConnection
@urlmatch(netloc=r'localhost:8753')
def local_mock(url, request):
global query_counter
if url.path == '/workers':
return jstr({'1': 'localhost:9001', '2': 'localhost:9002'})
elif url.path == '/workers/alive':
return jstr([1, 2])
elif url.path == '/workers/worker-1':
return jstr("localhost:9001")
return None
class TestQuery(unittest.TestCase):
def __init__(self, args):
with HTTMock(local_mock):
self.connection = MyriaConnection(hostname='localhost', port=8753)
unittest.TestCase.__init__(self, args)
def test_workers(self):
with HTTMock(local_mock):
workers = self.connection.workers()
self.assertEquals(workers, {'1': 'localhost:9001',
'2': 'localhost:9002'})
def test_alive(self):
with HTTMock(local_mock):
workers = self.connection.workers_alive()
self.assertEquals(set(workers), set([1, 2]))
def test_worker_1(self):
with HTTMock(local_mock):
worker = self.connection.worker(1)
self.assertEquals(worker, 'localhost:9001')
| Python | 0.00001 | |
2349d603ca887961441b5b3f436d6cffaaecb291 | Add pyMetascanAPI class | pyMetascanAPI.py | pyMetascanAPI.py | import requests
import os
class pyMetascanAPI:
API_ENDPOINT = 'https://api.metascan-online.com/v1/'
API_KEY = ''
FILE_EXT = 'file'
DATA_EXT = 'file/'
HASH_EXT = 'hash/'
def __init__(self, api_key):
self.API_KEY = api_key
def fileUpload(self, file):
r = self.makeRequest(self.getFileEndpoint(), 'POST', file)
return r.json()
def retrieveReport(self, data_id):
r = self.makeRequest(self.getDataEndpoint(data_id))
return r.json()
def hashLookup(self, hash):
r = self.makeRequest(self.getHashEndpoint(hash))
return r.json()
def makeRequest(self, url, method='GET', file=None):
headers = {'apikey' : self.API_KEY}
if method == 'POST':
headers.update({'filename' : os.path.basename(file)})
return requests.post(url, file, headers=headers)
else:
return requests.get(url, headers=headers)
def getFileEndpoint(self):
return self.API_ENDPOINT + self.FILE_EXT
def getDataEndpoint(self, data_id):
return self.API_ENDPOINT + self.DATA_EXT + data_id
def getHashEndpoint(self, hash):
return self.API_ENDPOINT + self.HASH_EXT + hash
| Python | 0 | |
feefc96050d3906730fe6d366430d7478204d168 | Add solution to 121. | 121/121.py | 121/121.py | """
A bag contains one red disc and one blue disc. In a game of chance a player
takes a disc at random and its colour is noted. After each turn the disc is
returned to the bag, an extra red disc is added, and another disc is taken at
random.
The player pays £1 to play and wins if they have taken more blue discs than red
discs at the end of the game.
If the game is played for four turns, the probability of a player winning is
exactly 11/120, and so the maximum prize fund the banker should allocate for
winning in this game would be £10 before they would expect to incur a loss.
Note that any payout will be a whole number of pounds and also includes the
original £1 paid to play the game, so in the example given the player actually
wins £9.
Find the maximum prize fund that should be allocated to a single game in which
fifteen turns are played.
Solution comment: Very quick, ~0.5 ms (would be much less for C++ impl.).
Derived explicitly the N = 4 case above, from which the pattern emerges. The
memoization helps a whole bunch. Would have been slightly better to use DP and
build the table directly, but memoizing a recursive solution is so much nicer.
"""
import time
import numpy as np
from functools import lru_cache
def P(n):
"""Probability of drawing a blue in the n-th round."""
return 1 / (n + 1)
@lru_cache(maxsize=None)
def Prob(blues, n, N):
"""
Return probability of getting some number of blues, starting on round n,
with N rounds total.
"""
if blues <= 0:
return 1 # Can always get 0 blues.
elif blues > N - n + 1:
return 0 # Can never get more blues than draws.
elif blues == N - n + 1:
return np.prod(P(np.arange(n, N+1))) # Prob of getting blue of every draw.
else:
# Prob is prob of getting a blue now and then blues-1 on the remaining throws,
# or prob of getting red now and blues blues on the remaining throws.
return P(n) * Prob(blues - 1, n + 1, N) + (1-P(n)) * Prob(blues, n + 1, N)
if __name__ == "__main__":
t0 = time.time()
rounds = 15
bet = 1
winning_prob = Prob(rounds // 2 + 1, 1, rounds)
print('Execution time: {:.3f} ms'.format((time.time() - t0) * 1e3))
print('Max payout with {} rounds: {}£'.format(rounds, int(bet / winning_prob)))
| Python | 0.000026 | |
7abe3e8039162fcb1eb5a1c40c2b22a89122e103 | Use LLDB in gypv8sh to debug random crashes. | tools/gypv8sh.py | tools/gypv8sh.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is used by chrome_tests.gypi's js2webui action to maintain the
argument lists and to generate inlinable tests.
"""
import json
import optparse
import os
import subprocess
import sys
import shutil
def main ():
parser = optparse.OptionParser()
parser.set_usage(
"%prog v8_shell mock.js test_api.js js2webui.js "
"testtype inputfile inputrelfile cxxoutfile jsoutfile")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-n', '--impotent', action='store_true',
help="don't execute; just print (as if verbose)")
parser.add_option('--deps_js', action="store",
help=("Path to deps.js for dependency resolution, " +
"optional."))
(opts, args) = parser.parse_args()
if len(args) != 9:
parser.error('all arguments are required.')
(v8_shell, mock_js, test_api, js2webui, test_type,
inputfile, inputrelfile, cxxoutfile, jsoutfile) = args
cmd = [v8_shell]
icudatafile = os.path.join(os.path.dirname(v8_shell), 'icudtl.dat')
if os.path.exists(icudatafile):
cmd.extend(['--icu-data-file=%s' % icudatafile])
arguments = [js2webui, inputfile, inputrelfile, opts.deps_js,
cxxoutfile, test_type]
cmd.extend(['-e', "arguments=" + json.dumps(arguments), mock_js,
test_api, js2webui])
if opts.verbose or opts.impotent:
print cmd
if not opts.impotent:
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
if p.returncode:
# TODO(jochen): Remove once crbug.com/370551 is resolved.
if sys.platform == 'darwin':
sys.path.insert(0, '/Developer/Library/PrivateFrameworks/'
'LLDB.framework/Resources/Python')
try:
import lldb
except:
raise Exception("Could not load lldb module")
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTargetWithFileAndArch(
cmd[0], lldb.LLDB_ARCH_DEFAULT)
if not target:
raise Exception("Failed to create d8 target")
process = target.LaunchSimple(cmd[1:], None, os.getcwd())
if not process:
raise Exception("Failed to start d8")
if process.GetState() == lldb.eStateStopped:
for thread in process:
print "Thread (id %d)" % thread.GetThreadID()
for frame in thread:
print frame
print ""
raise Exception(
"d8 crashed, please report this at http://crbug.com/370551")
else:
# For some reason d8 worked this time...
out = ''
while True:
s = process.GetSTDOUT(4096)
if s == '':
break
out += s
with open(cxxoutfile, 'wb') as f:
f.write(out)
shutil.copyfile(inputfile, jsoutfile)
except Exception, ex:
if os.path.exists(cxxoutfile):
os.remove(cxxoutfile)
if os.path.exists(jsoutfile):
os.remove(jsoutfile)
raise
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is used by chrome_tests.gypi's js2webui action to maintain the
argument lists and to generate inlinable tests.
"""
import json
import optparse
import os
import subprocess
import sys
import shutil
def main ():
parser = optparse.OptionParser()
parser.set_usage(
"%prog v8_shell mock.js test_api.js js2webui.js "
"testtype inputfile inputrelfile cxxoutfile jsoutfile")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-n', '--impotent', action='store_true',
help="don't execute; just print (as if verbose)")
parser.add_option('--deps_js', action="store",
help=("Path to deps.js for dependency resolution, " +
"optional."))
(opts, args) = parser.parse_args()
if len(args) != 9:
parser.error('all arguments are required.')
(v8_shell, mock_js, test_api, js2webui, test_type,
inputfile, inputrelfile, cxxoutfile, jsoutfile) = args
cmd = [v8_shell]
icudatafile = os.path.join(os.path.dirname(v8_shell), 'icudtl.dat')
if os.path.exists(icudatafile):
cmd.extend(['--icu-data-file=%s' % icudatafile])
arguments = [js2webui, inputfile, inputrelfile, opts.deps_js,
cxxoutfile, test_type]
cmd.extend(['-e', "arguments=" + json.dumps(arguments), mock_js,
test_api, js2webui])
if opts.verbose or opts.impotent:
print cmd
if not opts.impotent:
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
if p.returncode:
# TODO(jochen): Remove once crbug.com/370551 is resolved.
if sys.platform == 'darwin':
cmd[:0] = ['gdb', '-batch', '-ex', 'run', '-ex', 'bt', '-ex', 'quit',
'-args']
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
raise Exception('Failed to run d8', out, err)
with open(cxxoutfile, 'wb') as f:
f.write(out)
shutil.copyfile(inputfile, jsoutfile)
except Exception, ex:
if os.path.exists(cxxoutfile):
os.remove(cxxoutfile)
if os.path.exists(jsoutfile):
os.remove(jsoutfile)
raise
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000004 |
5e220c5529ca7279979939716c28997876145b7b | Create ac_cover_pic_down.py | ac_cover_pic_down.py | ac_cover_pic_down.py | #coding=utf-8
import urllib
import urllib2
import os
cover='http://cover.acfunwiki.org/cover.php'
face='http://cover.acfunwiki.org/face.php'
now=1
local=os.getcwd()+'\\download\\'
url_1=face#设置来源
exist=0
success=0
fail=0
all=0
def download(num,yes):
global now
global exist
global success
global fail
global all
try:#创建目录
os.makedirs(local)
except WindowsError:
None
if num >0:
while now<=num:
url= urllib2.urlopen(url_1).geturl()
file= url[url.rfind('/')+1:]
if os.path.exists(local+file):
print now,'X',file,u'已存在'
exist=exist+1
if yes==0:
now=now+1
else:
try:#下载
urllib.urlretrieve(url,local+file)
print now,'√',file,u'下载成功'
success=success+1
now=now+1
except IOError:
print now,'X',file,u'下载失败!!'
fail=fail+1
if yes==0:
now=now+1
all=all+1
print u'结束'
print u'共下载',str(all),u'成功',str(success),u'已存在',str(exist),u'失败',str(fail)
now=1
num=0
yes=0
all=0
main()
def main():
input=raw_input(u'输入下载个数:')
print u'当前来源:',url_1
print u'下载目录:',local
download(int(input),1)#参数二为失败或已存在文件是否不算入已下载量'now'(是1/否0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print u'######\n'
print u'用户中断'
| Python | 0.00001 | |
f6b720a2603cc597bdbe4124ad8e13b9a208274e | Create wordcloudtest.py | src/ml/wordcloudtest.py | src/ml/wordcloudtest.py | #encoding=utf8
from pyecharts import WordCloud
from snownlp import SnowNLP
import jieba
##词云
filename = "wdqbs.txt"
with open(filename) as f:
mytext = f.read()
#print mytext
s= SnowNLP(unicode(mytext,'utf8'))
for word in s.keywords(10):
print word.encode('utf8')
seg_list = jieba.cut(mytext)
punct = set(u''':!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒
﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢
々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻
︽︿﹁﹃﹙﹛﹝({“‘-—_…''')
# 对str/unicode
filterpunt = lambda s: ''.join(filter(lambda x: x not in punct, s))
# 对list
filterpuntl = lambda l: list(filter(lambda x: x not in punct, l))
dict={}
for word in filterpuntl(seg_list):
if word in dict:
dict[word]=int(dict[word])+1
else:
dict[word]=1
name=[]
for word in dict.keys():
name.append(word.encode('utf8'))
print name
value = dict.values()
print value
wordcloud = WordCloud(width=1300, height=620)
wordcloud.add("", name, value, word_size_range=[20, 100])
wordcloud.show_config()
wordcloud.render()
| Python | 0.000372 | |
d062a109da7ba5cb6147fac90bb4c6466083c755 | Create __init__.py | SlackBotFramework/utilities/__init__.py | SlackBotFramework/utilities/__init__.py | def send_card(client, channel, title, title_url, text, fields=None,
bot_name="Bot", color="#36a64f",
fallback="There was an error please try again"):
attr = [{
"fallback": fallback,
"color": color,
"title": title,
"title_link": title_url,
"text": text
}]
if fields:
if not isinstance(fields, list):
fields = [fields]
attr[0]['fields'] = fields
return client.api_call(
"chat.postMessage",
as_user=True,
username=bot_name,
channel=channel,
text="",
attachments=json.dumps(attr))
| Python | 0.000429 | |
e64dbcd16959078bc4df1b6a536ea3f36ae52411 | add cli directory | ec2/cli/__init__.py | ec2/cli/__init__.py | #
# Copyright (c) 2007 rPath, Inc.
#
| Python | 0.000001 | |
7bea9ba96c9d036692882fcbae5fcc1974567530 | Add preprocessing.py | preprocessing/preprocessing.py | preprocessing/preprocessing.py | #! /usr/bin/env python
# coding:utf-8
import re
class Preprocess:
def __init__(self):
self.html_regex = re.compile(
r'(http|https)://[a-zA-Z0-9-./"#$%&\':?=_]+')
self.newline_regex = re.compile(r'\n')
self.cont_spaces_regex = re.compile(r'\s+')
def _subs(self, regex: "re obj", repl: str, text: str):
return regex.sub(repl, text)
def remove_link(self, text: str) -> str:
return self._subs(self.html_regex, "", text)
def remove_newline(self, text: str) -> str:
return self._subs(self.newline_regex, "", text)
def convert_cont_spaces(self, text: str) -> str:
return self._subs(self.cont_spaces_regex, " ", text)
def strip(self, text: str) -> str:
return text.strip()
def execute(self, text: str) -> str:
funcs = [
self.remove_newline,
self.remove_link,
self.convert_cont_spaces,
self.strip]
_text = text
for func in funcs:
_text = func(text)
return _text
class Twitter(Preprocess):
def __init__(self):
Preprocess.__init__(self)
username = r'@[a-zA-Z0-9_]+'
self.mention_regex = re.compile(r'{}'.format(username))
self.retweet_regex = re.compile(r'RT {}:'.format(username))
def remove_mention(self, text: str) -> str:
return self._subs(self.mention_regex, "", text)
def remove_retweet(self, text: str) -> str:
return self._subs(self.retweet_regex, "", text)
def execute(self, text: str) -> str:
funcs = [
self.remove_newline,
self.remove_link,
self.remove_retweet,
self.remove_mention,
self.convert_cont_spaces,
self.strip]
_text = text
for func in funcs:
_text = func(_text)
return _text
if __name__ == '__main__':
import sys
pre = Preprocess()
for filename in sys.argv[1:]:
print(filename)
with open(filename, "r") as f:
for line in f:
_line = line.strip()
print(pre.execute(_line))
| Python | 0.000359 | |
c6ff3e3e67194499d1653d530a29e3856191fd1e | Create Grau.py | backend/Models/Grau/Grau.py | backend/Models/Grau/Grau.py | class Departamento(object):
def __init__(self,departamento):
self.id = departamento.getId()
self.nome = departamento.getNome()
| Python | 0 | |
8792e0f3f258c23713f1af7f4eab46eec796c9e3 | Add primitive script for binary assets preparation | utils/convert.py | utils/convert.py | #!/usr/bin/env python
import sys
from PIL import Image, ImageFont, ImageDraw
def write_in_c(data, name='data'):
print('const uint8_t {0}[{1}] = {{\n\t'.format(name, len(data)), end="")
for index, byte in enumerate(data):
print('{0}'.format(byte), end="")
if index != len(data)-1:
print(', ', end="")
if not index % 10 and index > 0:
print('\n\t', end="")
print('};')
def write_in_avr_c(data, name='data'):
print('const uint8_t {0}[{1}] PROGMEM = {{\n\t'.format(name, len(data)), end="")
for index, byte in enumerate(data):
print('{0}'.format(byte), end="")
if index != len(data)-1:
print(', ', end="")
if not index % 10 and index > 0:
print('\n\t', end="")
print('};')
def write_in_hex(data):
print(''.join('0x%02x '%i for i in data))
def write_in_binary(data):
sys.stdout.buffer.write(bytes(data))
def img_format_0(filename, invert=False):
image = Image.open(filename).convert('RGBA')
image = Image.composite(image, Image.new('RGBA', image.size, (255, 255, 255)), image).convert(mode='1')
pixels = image.load()
bytes = [0,]
bytes.extend(image.size)
byte = 0
bit = 0
for y in range(image.size[1]):
for x in range(image.size[0]):
byte |= (bool(pixels[x,y]) ^ (not invert)) << bit
bit += 1
if bit == 8:
bytes.append(byte)
byte = 0
bit = 0
return bytes
def font_format_1(filename, size, width=None):
size += 1
font = ImageFont.truetype(filename, size+1)
char_size = [0,size-1]
chars = list()
if width is None:
for code in range(ord('!'), ord('~')+1):
if char_size[0] < (font.getsize(chr(code))[0]):
char_size[0] = font.getsize(chr(code))[0]
else: char_size[0] = width
bytes = [1]
bytes.extend(char_size)
bytes.append(1)
for code in range(ord('!'), ord('~')+1):
im = Image.new('RGB', char_size, (255, 255, 255))
draw = ImageDraw.Draw(im)
draw.text((0, -1), chr(code), font=font, fill="#000000")
chars.append(im.convert(mode='1'))
byte = 0
bit = 0
for image in chars:
pixels = image.load()
for y in range(image.size[1]):
for x in range(image.size[0]):
byte |= (not bool(pixels[x,y])) << bit
bit += 1
if bit == 8:
bytes.append(byte)
byte = 0
bit = 0
return bytes | Python | 0 | |
a136eeefdd6cf276a0d4815fa39453737ed04727 | Add py solution for 556. Next Greater Element III | py/next-greater-element-iii.py | py/next-greater-element-iii.py | class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
| Python | 0.000001 | |
1c2eebe236dcfcc607749ebcba7a769bb27b5176 | test creation of blank CounterJournal item | pycounter/test/test_classes.py | pycounter/test/test_classes.py | import unittest
from pycounter import report
class TestJournalClass(unittest.TestCase):
def test_counter_journal(self):
journal = report.CounterJournal()
self.assertEqual(journal.issn, "")
| Python | 0 | |
feb47562d45294cb4e9c3ae2d0bc80b7b766bcc8 | Create pKaKs3.py | Modules/pKaKs3.py | Modules/pKaKs3.py | #This short script uses the output values of KaKs.pl & SnpEff to calculate mutational load using Nei-Gojobori: pKa/Ks = [-3/4ln(1-4pn/3)] / [-3/4ln(1-4ps/3)], where ps = syn SNPs / syn sites and pn = nonsyn SNPs / nonsyn sites
from math import log #If for some reason you need to calculate the logarithm of a negative number, import cmath instead.
import configparser
config = configparser.RawConfigParser()
config.read("config.ini")
nonSyn_site = float(config.get("myvars", "non-synonymous_number"))
Syn_site = float(config.get("myvars", "synonymous_number"))
nonSyn_SNP = float(config.get("myvars", "non-synonymous_snp"))
Syn_SNP = float(config.get("myvars", "synonymous_snp"))
pn = nonSyn_SNP/nonSyn_site
ps = Syn_SNP/Syn_site
print("The pKs/Ks ratio for this organism is:", (-3/4*log(1-(4*pn)/3))/(-3/4*log(1-(4*ps)/3)) )
| Python | 0.000001 | |
ed611e9f9c3470712b296188e5ee6e2432cb04b5 | Add scanner | PyARPScanner.py | PyARPScanner.py | #!/usr/bin/env python
import netifaces
import commands
import sys
from scapy.all import *
def scanner():
# default = "route | grep 'default' | awk '{print $8}'"
gws = netifaces.gateways()
default = gws['default'][netifaces.AF_INET]
print 'Default Interface -- '+default[1]+' Gateway -- '+default[0]
# diface = commands.getoutput(default)
diface = default[1]
srcip = netifaces.ifaddresses(diface)[2][0]['addr']
netmask = netifaces.ifaddresses(diface)[2][0]['netmask']
octets = srcip.split('.')
starttime = time.time()
global gw
gw = octets[0] + "." + octets[1] + "." + octets[2]
dest = gw + ".0/24"
# print dest
answered, unanswered = srp(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=str(dest)), timeout=2, verbose=0)
endtime = time.time()
ifaces = "ifconfig | grep -o " + str(diface) + " | wc -l"
num = int(commands.getoutput(ifaces))
setips = defaultdict(list)
setips[diface].append(str(srcip))
existing = [srcip]
freeips = []
totaltime = endtime - starttime
print "Sent ARP requests in %f seconds..." % (totaltime)
for i in range(0, num - 1):
iface = diface + ":" + str(i)
ip = netifaces.ifaddresses(iface)[2][0]['addr']
setips[iface].append(str(ip))
existing.append(str(ip))
# print setips
for i in range(0,len(answered)):
print "Response from ip -- " + answered[i][1].psrc + " using MAC -- " + answered[i][1].hwsrc
print "Found %d ips that are already set to this computer." % (len(setips))
for i in range(0, len(unanswered)):
freeips.append(str(unanswered[i][1].pdst))
freeips = set(freeips) - set(existing)
freeips.remove(gw + '.0')
freeips.remove(gw + '.255')
# freeips.remove(gw+'.1')
print "Found %d ips that are free." % (len(freeips))
completedtime = time.time()
totaltime = completedtime - starttime
print "Completed scan in %f seconds..." % totaltime
print 'The following ips are set to this computer',existing
# unanswered = unanswered.remove(srcip)
# return freeips
# print setips
if __name__ == '__main__':
scanner() | Python | 0.000001 | |
843083b9469362ee3ef1c2e2259f1ce3e1e966d0 | Add ELF "loader"/parser in Python | tools/pseudo_elf_loader.py | tools/pseudo_elf_loader.py | import sys
if sys.version_info[0] != 3:
exit("Py3 required.")
import ast
class MockRam(dict):
def __missing__(self, addr):
return None
def b2i(l):
return sum([a*0x100**i for i,a in enumerate(l)])
def i2b(i):
b = []
while i:
b.append(i%0x100)
i //= 0x100
return b
def main(fn, mreq):
with open(fn, "rb") as f:
img = f.read()
# verify image
print("Verifying...")
assert img[0:4] == bytes([0x7f, 0x45, 0x4c, 0x46]), "magic"
assert img[4] == 0x2, "bitness"
assert img[18] == 0x3e, "instruction set"
assert img[5] == 0x1, "endianess"
assert img[6] == 0x1, "version"
assert img[54] == 0x38, "program header size"
print("Verification ok.\n")
print("Load point {:#x}".format(b2i(img[24:24+8])))
pht_pos = b2i(img[32:32+8])
pht_len = b2i(img[56:56+2])
print("Program header len={} pos={:#x}".format(pht_len, pht_pos))
ptr = pht_pos
ram = MockRam()
for index in range(pht_len):
print("Header #{}:".format(index+1))
segement_type = img[ptr]
if segement_type == 1:
print(" This is a LOAD segment")
flags = b2i(img[(ptr+4):(ptr+4)+4])
p_offset = b2i(img[(ptr+8):(ptr+8)+8])
p_vaddr = b2i(img[(ptr+16):(ptr+16)+8])
p_filesz = b2i(img[(ptr+32):(ptr+32)+8])
p_memsz = b2i(img[(ptr+40):(ptr+40)+8])
# clear
for i in range(p_memsz):
ram[p_vaddr+i] = 0
# copy
for i in range(p_filesz):
ram[p_vaddr+i] = img[p_offset+i]
if p_vaddr+i in mreq:
print("{:#x}->{:#x}: {:#x}".format(p_offset+i, p_vaddr+i, ram[p_vaddr+i]))
print(" Flags: {} ({:#b})".format("".join([(l*(flags&(1<<i)!=0)) for i,l in enumerate("XWR")]), flags))
print(" Clear {:#x} bytes starting at {:#x}".format(p_memsz, p_vaddr))
print(" Copy {:#x} bytes from {:#x} to {:#x}".format(p_filesz, p_offset, p_vaddr))
print(" Initialized: {:#x} bytes, uninitialized: {:#x} bytes".format(p_filesz, p_memsz-p_filesz))
else:
print(" This isn't a LOAD segment")
ptr += 0x38
for r in mreq:
print("{:#x}: {:#x}".format(r, ram[r]))
if __name__ == '__main__':
main(sys.argv[1], [int(ast.literal_eval(r)) for r in sys.argv[2:]])
| Python | 0.000028 | |
0d90e90b496c4ba69220c5ca225e99eec85cc18f | add ParticleFilter function | ParticleFilter.py | ParticleFilter.py | import numpy as np
import math
import cv2
import operator
'''
old_im = cv2.imread('old_image.jpg')
old_im_compensate = cv2.imread('affine_frame.jpg')
#old_gray = cv2.cvtColor(old_im, cv2.COLOR_BGR2GRAY)
new_im = cv2.imread('new_image.jpg')
#new_gray = cv2.cvtColor(new_im, cv2.COLOR_BGR2GRAY)
diff = np.absolute(new_im - old_im)
diff_compensate = np.absolute(new_im - old_im_compensate)
diff_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
diff_gray_compensate = cv2.cvtColor(diff_compensate, cv2.COLOR_BGR2GRAY)
#diff_gray = old_gray - new_gray
#cv2.namedWindow('diff_image', 0)
#cv2.imshow('diff_image', diff_gray)
#cv2.namedWindow('diff_image_compensate', 0)
#cv2.imshow('diff_image_compensate', diff_gray_compensate)
#cv2.waitKey(0)
'''
#threshold = 250
#length, width = diff_gray.shape
'''
im_thres=np.zeros((length, width))
im_thres_compensate=np.zeros((length, width))
for i, j in zip(*np.where(diff_gray>threshold)):
im_thres[i,j] = 255
for i, j in zip(*np.where(diff_gray_compensate>threshold)):
im_thres_compensate[i,j] = 255
cv2.namedWindow('im_thres', 0)
cv2.imshow('im_thres', im_thres)
cv2.namedWindow('im_thres_compensate', 0)
cv2.imshow('im_thres_compensate', im_thres_compensate)
cv2.waitKey(0)
'''
##########Particle Filter#########################
def ParticleFilter(St_prev, diff_im):
St = []
n = 0
k = 0
m = 6
alpha = 0
length, width = diff_im.shape
particle_filter = np.zeros((length,width))
if not St_prev:
print 'St_prev is empty.'
threshold = 253
for i,j in zip(*np.where(diff_im>threshold)):
wt = 1/math.sqrt(m)*sum(sum(diff_im[max(i-m/2,0):min(i+m/2,length), max(j-m/2,0):min(j+m/2,width)]))
alpha=alpha+wt
St.append((i,j,wt))
n=n+1
#if wt>0:
#particle_filter[i,j]=255
else:
print 'The size of St_prev is ', len(St_prev)
St_prev_sorted = sorted(St_prev, key=operator.itemgetter(2))
while n<10000:
i,j = (St_prev_sorted[n][0], St_prev_sorted[n][1])
wt = 1/math.sqrt(m)*sum(sum(diff_im[max(i-m/2,0):min(i+m/2,length), max(j-m/2,0):min(j+m/2,width)]))
alpha = alpha+wt
St.append((i,j,wt))
n=n+1
for idx in range(n):
St[idx] = (St[idx][0],St[idx][1],St[idx][2]/alpha)
if St[idx][2]>0:
#print St[idx][2]
particle_filter[St[idx][0],St[idx][1]]=255
print '(alpha, n) = ', alpha, n
cv2.namedWindow('Particle_Filter',0)
cv2.imshow('Particle_Filter', particle_filter)
cv2.namedWindow('diff_gray_compensate',0)
cv2.imshow('diff_gray_compensate', diff_im)
cv2.waitKey(2000)
return St
'''
St_prev = []
St = []
for i in range(4):
diff_im = cv2.imread('diff%d.jpg' % i)
if len(diff_im.shape)==3:
diff_im = cv2.cvtColor(diff_im, cv2.COLOR_BGR2GRAY)
St = ParticleFilter(St_prev, diff_im)
St_prev = St
St = []
'''
| Python | 0.000001 | |
ba8eb16640a40f9c2f361251adecb8c91d1c9a07 | create stream.py | PhloxAR/stream.py | PhloxAR/stream.py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
# TODO: more detailed
from PhloxAR.base import * | Python | 0.000001 | |
d8dc3b1696ce9e8b64bb2eea55e718553789cfc1 | Add Time.Timeout.TimeoutAbsMono class, which is like Timeout.TimeoutAbs but is taking MonoTime instead of realtime as an argument. | Time/Timeout.py | Time/Timeout.py | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from twisted.internet import task, reactor
from traceback import print_exc, format_list, extract_stack
from sys import stdout
class TimeoutAbsMono:
_task = None
_timeout_callback = None
def __init__(self, timeout_callback, etime, *callback_arguments):
etime = -etime.offsetFromNow()
if etime < 0:
etime = 0
self._timeout_callback = timeout_callback
self._task = reactor.callLater(etime, self._run_once, *callback_arguments)
def _run_once(self, *callback_arguments):
try:
self._timeout_callback(*callback_arguments)
except:
print datetime.now(), 'TimeoutAbsMono: unhandled exception in timeout callback'
print '-' * 70
print_exc(file = stdout)
print '-' * 70
stdout.flush()
self._task = None
self._timeout_callback = None
def cancel(self):
self._task.cancel()
self._task = None
self._timeout_callback = None
if __name__ == '__main__':
from twisted.internet import reactor
from sippy.Time.MonoTime import MonoTime
def test1(arguments, testnum, mtm):
arguments['delay'] = mtm.offsetFromNow()
print testnum, arguments['delay']
arguments['test'] = True
reactor.crash()
def test2(arguments, testnum, mtm):
arguments['delay'] = mtm.offsetFromNow()
print testnum, arguments['delay']
arguments['test'] = 'bar'
reactor.crash()
mt = MonoTime()
arguments = {'test':False, 'delay':None}
timeout_1 = TimeoutAbsMono(test1, mt, arguments, 'test1', mt)
reactor.run()
assert(arguments['test'])
assert(arguments['delay'] < 0.1)
mt1 = mt.getOffsetCopy(0.1)
mt2 = mt.getOffsetCopy(0.2)
arguments = {'test':False, 'delay':None}
timeout_1 = TimeoutAbsMono(test1, mt1, arguments, 'test2', mt1)
timeout_2 = TimeoutAbsMono(test2, mt2, arguments, 'test3', mt2)
timeout_1.cancel()
reactor.run()
assert(arguments['test'] == 'bar')
assert(arguments['delay'] < 0.1)
| Python | 0 | |
e4b108fa5c0221eb2b585550b04be14ff56d26e5 | Add Toy playlist creation | Toy_Playlist.py | Toy_Playlist.py | '''
Written by Paul Lamere 06/05/2015
Accessed 10/23/2016
https://github.com/plamere/spotipy/blob/master/examples/create_playlist.py
Modified by Stephen Longofono
10/23/2016
'''
import sys
import os
import subprocess
import spotipy
import spotipy.util as util
if len(sys.argv) > 2:
username = sys.argv[1]
playlist_name = sys.argv[2]
else:
print("Usage: %s username playlist-name" % (sys.argv[0],))
sys.exit()
token = util.prompt_for_user_token(username)
if token:
sp = spotipy.Spotify(auth=token)
sp.trace = False
playlists = sp.user_playlist_create(username, playlist_name)
# Get new songs to add from file
try:
songIDs = []
songList = open('recommended.txt', 'r')
for song in songlist:
songIDs.append(song)
songList.close()
except:
print "Error processing recommendations..."
sys.exit()
# Add songs
try:
for song in songIDs:
sp.user_playlist_add_tracks(username, playlist_id, track_ids)
except:
print "Error adding songs to playlist..."
sys.exit()
# Add to list of already suggested songs
x = open('oldsongs', 'a+')
for song in songIDs:
x.write(str(song))
x.write('\n')
x.close()
# Remove recommended songs
else:
print("Can't get token for", username)
| Python | 0 | |
32d46fe3e080b13ab9ae9dc3d868e9a724cccda9 | Add unit test for IosBrowserFinder. | tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py | tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
| Python | 0.000013 | |
f753711c502b54ad8bf2c992336a5ad002e069bb | Create bearing.py | server/traffic_calc/bearing.py | server/traffic_calc/bearing.py | #!/usr/bin/python
'''
/***************************************************************************************
Name : bearng
Description : calculates the bearing(angle) between given two lattitude and
longitude points
Parameters : l_lat1 and l_lng1 are point one lattitude and longitude respectively
l_lat2 and l_lng2 are point two lattitude and longitude respectively
Return : This function will return the bearing(angle) between given two
lattitude and longitude points
****************************************************************************************/
'''
import math
def bearng(l_lat1,l_lng1,l_lat2,l_lng2):
l_lat1 = float(l_lat1)
l_lng1 = float(l_lng1)
l_lat2 = float(l_lat2)
l_lng2= float(l_lng2)
lndif = (l_lng2 - l_lng1)
y = math.sin(lndif) * math.cos(l_lat1)
x = math.cos(l_lat2) * math.sin(l_lat1) - math.sin(l_lat2) * math.cos(l_lat1)*math.cos(lndif)
l_brng = math.atan2(y,x)
l_brng = math.degrees(l_brng)
l_brng = (l_brng +360)%360
l_brng = (360-l_brng)
return l_brng
| Python | 0 | |
99fd5661e976dfc3bf8968f171b41af83ff5f034 | add plot for percentage | plot/microbench/percentage/cpr_email.py | plot/microbench/percentage/cpr_email.py | import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import numpy as np
import csv
NUM_LINES = 4
LINE_NAMES = ["Single-Char", "Double-Char", "ALM", "3-Grams", "4-Grams", "ALM-Improved"]
NUM_3_POINTS = 7
NUM_4_EXTRA_POINTS = 2
#COLORS = ['#fef0d9', '#fdcc8a', '#fc8d59', '#d7301f', '#33b3cc', '#3366CC']
COLORS = ['#fff7ec', '#fee8c8', '#fdd49e', '#fc8d59', '#d7301f', '#7f0000']
SHAPE_BORDER = 0.5
EDGE_COLOR = 'black'
Y_LABEL = "Compression Rate"
Y_LABEL_FONT_SIZE = 20
X_LABEL = "Data Percentage"
X_LABEL_FONT_SIZE = 20
X_TICK_FONT_SIZE = 16
Y_TICK_FONT_SIZE = 16
X_START = 10
X_LIMIT = 100
LEGEND_FONT_SIZE = 14
LEGEND_POS = 'upper left'
GRAPH_HEIGHT = 4.5 #inches
GRAPH_WIDTH = 8 #inches
CSV_FILE_PATH = "results/microbench/percentage/per_cpr_lat.csv"
GRAPH_OUTPUT_PATH = "figures/microbench/percentage/cpr_email_dict_size.pdf"
data_x = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
f_in_y = open(CSV_FILE_PATH)
reader = csv.reader(f_in_y)
csvrows = list(reader)
data_y = []
for row in csvrows :
for item in row :
data_y.append(float(item))
single_x = []
double_x = []
three_grams_x = []
three_grams_y = []
four_grams_x = []
four_grams_y = []
alm_x = []
alm_y = []
alm_improved_x = []
alm_improved_y = []
def get_add(start_idx, end_idx, step, data):
part_data = []
for i in range(start_idx, end_idx, step):
part_data.append(data[i])
return part_data
single_y = get_data(0, 20, 2, data)
double_y = get_data(20, 40, 2, data)
three_grams_y = get_data(40, 60, 2, data)
four_grams_y = get_data(60, 80, 2, data)
alm_y = get_data(80, 100, 2, data)
alm_improved_y = get_data(100, 120, 2, data)
#========================================================================================
mpl.rcParams['ps.useafm'] = True
mpl.rcParams['pdf.use14corefonts'] = True
mpl.rcParams['text.usetex'] = False
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#========================================================================================
fig = plot.figure(figsize={GRAPH_HEIGHT, GRAPH_WIDTH})
ax = fig.add_subplot(111)
ax.plot(data_x, single_y, 's-', ms=10, mew = SHAPE_BORDER, mec = EDGE_COLOR, lw = 3, color=COLORS[0], label=LINE_NAMES[0])
ax.plot(double_x, double_y, 'o-', ms=10, mew = SHAPE_BORDER, mec = EDGE_COLOR,lw = 3, color=COLORS[1], label=LINE_NAMES[1])
ax.plot(data_x, alm_y, 'd-', ms=10, lw = 3, mew = SHAPE_BORDER, mec = EDGE_COLOR, color=COLORS[2], label=LINE_NAMES[2])
ax.plot(data_x, three_grams_y, 'p-', mew = SHAPE_BORDER, mec = EDGE_COLOR, ms=10, lw = 3, color=COLORS[3], label=LINE_NAMES[3])
ax.plot(data_x, four_grams_y, '^-', mew = SHAPE_BORDER, mec = EDGE_COLOR, ms=10, lw = 3, color=COLORS[4], label=LINE_NAMES[4])
ax.plot(data_x, alm_improved_y, 'v-', mew = SHAPE_BORDER,mec = EDGE_COLOR, ms=10, lw = 3, color=COLORS[5], label=LINE_NAMES[5])
for label in ax.get_xticklabels():
label.set_fontsize(X_TICK_FONT_SIZE)
for label in ax.get_yticklabels():
label.set_fontsize(Y_TICK_FONT_SIZE)
ax.set_xlabel(X_LABEL, fontsize=X_LABEL_FONT_SIZE)
ax.set_xticks(data_x)
ax.set_xlim(X_START, X_LIMIT)
y_ticks = [0, 1.0, 2.0, 3.0, 4.0]
ax.set_yticks(y_ticks)
ax.set_ylim(0, 4.0)
ax.set_ylabel(Y_LABEL, fontsize=Y_LABEL_FONT_SIZE)
ax.grid()
ax.set_axisbelow(True)
ax.legend(loc=LEGEND_POS, prop={'size':LEGEND_FONT_SIZE}, ncol=2)
plot.savefig(GRAPH_OUTPUT_PATH, bbox_inches='tight')
| Python | 0.000017 | |
0f00e710f3a2239024d6a2f0efd539d32b5c8aaf | Add taxonomy loader | components/taxonomy/scripts/load_taxonomy.py | components/taxonomy/scripts/load_taxonomy.py | """
Created on Wed Aug 22 19:55:11 PDT 2018
@author: rickpr
Requirements:
- toml, pymongo need to be installed
- mongodb needs to be running
Installation:
pip3 install pymomgo
pip3 install toml
"""
import sys
from pymongo import MongoClient
import toml
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class LoadTaxonomy:
""" Creates JSON from TOML and loads it into MongoDB """
database_name = 'brigade-matchmaker'
client = MongoClient('localhost', 27017)
db = client[database_name]
def __init__(self, toml_filename='taxonomy.toml'):
# load the taxonomy data from the TOML file, and create JSON
self.taxonomy_toml = toml.load(toml_filename)
def taxonomy_json(self):
""" Create the JSON to put into MongoDB """
fixed_dict = [ self.add_parent(key, value) for key, value in self.taxonomy_toml.items() ]
return fixed_dict
def load_taxonomy(self):
""" Load the JSON into the database. Dangerous! """
self.db.projecttaxonomies.drop()
for row in self.taxonomy_json():
self.db.projecttaxonomies.insert_one(row)
return True
def add_parent(self, key, value):
""" Add the parent for the Mongo Entry """
split_key = key.split('/')
value['name'] = split_key[-1]
value['parent'] = split_key[-2] if len(split_key) > 1 else None
return value
# When calling from command line, you may specify input and output file
TOML_FILE = sys.argv[1] if len(sys.argv) >= 2 else 'taxonomy.toml'
LOADER = LoadTaxonomy(TOML_FILE)
LOADER.load_taxonomy()
| Python | 0.000001 | |
9ec883040abbdc91c1eef7884b514d45adbf809a | Add Slave file | assignment2/slave.py | assignment2/slave.py | '''
################################## server.py #############################
# Lab1 gRPC RocksDB Server
################################## server.py #############################
'''
import time
import grpc
import replicator_pb2
import replicator_pb2_grpc
import uuid
import rocksdb
import encodings
class Slave:
def __init__(self):
self.slave_db = rocksdb.DB("slave.db", rocksdb.Options(create_if_missing=True))
def put(self, key, value):
print("put")
self.slave_db.put(key.encode(), value.encode());
def get(self, key):
print("get")
value = (self.slave_db.get(key.encode())).decode();
return value | Python | 0.000001 | |
4eb8a1e2e3b9618806bf9a1108dbd2043fa88724 | add twitter mod | appartbot/twitter.py | appartbot/twitter.py |
import twython
import logging
class twytbot:
def __init__(self, key, secret, acctok, sectok):
self.KEY = key
self.SECRET = secret
self.ACCESS_TOKEN = acctok
self.SECRET_TOKEN = sectok
self.twitter = None
def authentificate(self):
self.twitter = twython.Twython(self.KEY, self.SECRET, self.ACCESS_TOKEN, self.SECRET_TOKEN)
try:
self.twitter.verify_credentials()
except Exception as e:
logging.warn("Twitter log failed %s" % e)
| Python | 0 | |
6be45a83ccccff440f4a4590cc19fe1fa498f8d0 | Add a basic integration test | integration_tests.py | integration_tests.py | """Integration tests for ckanext-deadoralive and deadoralive.
ckanext-deadoralive and deadoralive both have their own detailed tests, but
these don't test whether the two work together: do they agree on the protocol,
e.g. the URLs to send requests to and the params to send and receive back?
This module just adds an extra layer of a few basic integration tests,
where we actually hook up deadoralive to ckanext-deadoralive and see whether
the two work together.
"""
import datetime
import httpretty
import ckan.new_tests.factories as factories
import ckan.new_tests.helpers as helpers
import ckanext.deadoralive.tests.helpers as custom_helpers
import ckanext.deadoralive.model.results as results
import deadoralive
class TestIntegration(custom_helpers.FunctionalTestBaseClass):
def _forward_to_test_app(self, method, uri, headers):
"""Receive an httpretty request and forward it to a webtest test app.
Takes an HTTP request in httpretty's format, forwards it to a webtest
TestApp for CKAN, receives the webtest result, translates that into an
httpretty result and returns it.
This is how we hook up the deadoralive link checker service to
ckanext-deadoralive: The HTTP requests that deadoralive makes are
intercepted by httpretty and redirected to this method, which sends them
to a CKAN test app with the deadoralive plugin enabled and then sends
the response back to httpretty which forwards it back to deadoralive.
The deadoralive API client code has no idea: it thinks it just made an
API request to a site and get the response back. The CKAN deadoralive
plugin code also has no idea: it thinks it just received an HTTP request
and sent back a response.
"""
# httpretty puts things like ints and unicode in the headers,
# but webtest crashes they aren't all strings.
headers = headers.copy()
for key in headers:
headers[key] = str(headers[key])
if method.command == 'POST':
response = self.app.post(method.path, headers=headers,
params=method.body)
elif method.command == 'GET':
response = self.app.get(method.path, headers=headers)
# requests (or was it httpretty?) seems to require a "server" header
# but the webtest response doesn't have one.
if "server" not in response.headers:
response.headers["server"] = headers["server"]
# Return an httpretty response tuple based on the webtest response
# object.
return (response.status_int, response.headers, response.body)
@httpretty.activate
def test(self):
"""Test that deadoralive and ckanext-deadoralive work together.
Add some resources with working and some with broken links to CKAN,
run deadoralive, check that it added the right results.
"""
results.create_database_table()
# The URL of the CKAN site we'll be using.
# We'll be mocking the URLs on this domain that we expect to be sending
# requests to.
ckan_url = "http://test.ckan.org"
# Mock some working and some broken resource URLs.
# We'll create resources with these URLs in CKAN below.
url_1 = "http://demo.ckan.org/url_1"
httpretty.register_uri(httpretty.GET, url_1, status=200),
url_2 = "http://demo.ckan.org/url_2"
httpretty.register_uri(httpretty.GET, url_2, status=500),
url_3 = "http://demo.ckan.org/url_3"
httpretty.register_uri(httpretty.GET, url_3, status=200),
# We're also going to mock the CKAN API URLs that deadoralive will be
# requesting. We'll catch these requests and then forward them to a CKAN
# test app.
# FIXME: It would be nice if we could just mock http://test.ckan.org/*
# and forward all requests on to the test app, but I don't think
# httpretty supports this.
get_resource_ids_url = (
ckan_url + "/api/action/ckanext_deadoralive_get_resources_to_check")
httpretty.register_uri(httpretty.GET, get_resource_ids_url,
body=self._forward_to_test_app)
httpretty.register_uri(httpretty.POST, get_resource_ids_url,
body=self._forward_to_test_app)
get_url_for_id_url = ckan_url + "/api/action/resource_show"
httpretty.register_uri(httpretty.GET, get_url_for_id_url,
body=self._forward_to_test_app)
httpretty.register_uri(httpretty.POST, get_url_for_id_url,
body=self._forward_to_test_app)
upsert_result_url = ckan_url + "/api/action/ckanext_deadoralive_upsert"
httpretty.register_uri(httpretty.GET, upsert_result_url,
body=self._forward_to_test_app)
httpretty.register_uri(httpretty.POST, upsert_result_url,
body=self._forward_to_test_app)
# Create the resources in CKAN whose links will be checked.
resource_1 = factories.Resource(url=url_1)
resource_2 = factories.Resource(url=url_2)
resource_3 = factories.Resource(url=url_3)
# Call deadoralive: It should get the IDs of the three resources from
# CKAN. get each resource's URL from CKAN, test each URL, and then post
# the test results back to CKAN.
before = datetime.datetime.utcnow()
deadoralive.main("--url {0}".format(ckan_url).split())
after = datetime.datetime.utcnow()
# Now check that the links were checked and the correct results were
# saved in ckanext-deadoralive's database table.
# First check the two resources with working links.
for resource in (resource_1, resource_3):
result = helpers.call_action("ckanext_deadoralive_get",
resource_id=resource["id"])
assert result["resource_id"] == resource["id"]
assert result["alive"] is True
last_checked = datetime.datetime.strptime(result["last_checked"],
"%Y-%m-%dT%H:%M:%S.%f")
assert last_checked > before
assert last_checked < after
last_successful = datetime.datetime.strptime(
result["last_successful"], "%Y-%m-%dT%H:%M:%S.%f")
assert last_successful > before
assert last_successful < after
assert result["num_fails"] == 0
assert result["pending"] is False
assert result["pending_since"] is None
# Now check the expected result for the resource with a broken link.
result = helpers.call_action("ckanext_deadoralive_get",
resource_id=resource_2["id"])
assert result["resource_id"] == resource_2["id"]
assert result["alive"] is False
last_checked = datetime.datetime.strptime(result["last_checked"],
"%Y-%m-%dT%H:%M:%S.%f")
assert last_checked > before
assert last_checked < after
assert result["last_successful"] is None
assert result["num_fails"] == 1
assert result["pending"] is False
assert result["pending_since"] is None
result = helpers.call_action("ckanext_deadoralive_get",
resource_id=resource_3["id"])
assert result["resource_id"] == resource_3["id"]
assert result["alive"] is True
last_checked = datetime.datetime.strptime(result["last_checked"],
"%Y-%m-%dT%H:%M:%S.%f")
assert last_checked > before
assert last_checked < after
last_successful = datetime.datetime.strptime(result["last_successful"],
"%Y-%m-%dT%H:%M:%S.%f")
assert last_successful > before
assert last_successful < after
assert result["num_fails"] == 0
assert result["pending"] is False
assert result["pending_since"] is None
| Python | 0.000005 | |
bc35e89d04e541f75fc12788893b21a3b876aaf9 | Create test case for tail from file | tail/tests/test_tail.py | tail/tests/test_tail.py | """
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"] | Python | 0.000001 | |
f956b2ce8e8e2ef87be0dc11aac48dce54e57088 | Test Logger | pelicangit/log.py | pelicangit/log.py | import logging
def setup_logging():
home_dir = os.path.expanduser("~")
log_file = os.path.join(home_dir, 'pelicangit.log')
logger = logging.getLogger('pelicangit')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(asctime)s :: %(message)s')
logger.setFormatter(formatter)
file_handler = logging.FileHandler(filename=log_file)
logger.addHandler(file_handler) | Python | 0.000001 | |
d08c619b8ea6063f8a414c69c8d38226719e292b | Correct super call in DatabaseIntrospection subclass | src/olympia/core/db/mysql/base.py | src/olympia/core/db/mysql/base.py | from django.db.backends.mysql.base import (
DatabaseWrapper as MySQLDBWrapper,
DatabaseIntrospection as MySQLDBIntrospection,
DatabaseSchemaEditor as MySQLDBSchemeEditor)
class DatabaseIntrospection(MySQLDBIntrospection):
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(
data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
if description.is_unsigned:
return 'PositiveAutoField'
return field_type
class DatabaseSchemaEditor(MySQLDBSchemeEditor):
def create_model(self, model):
for field in model._meta.local_fields:
# Autoincrement SQL for backends with post table definition variant
if field.get_internal_type() == "PositiveAutoField":
autoinc_sql = self.connection.ops.autoinc_sql(
model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
super(DatabaseSchemaEditor, self).create_model(model)
class DatabaseWrapper(MySQLDBWrapper):
introspection_class = DatabaseIntrospection
SchemaEditorClass = DatabaseSchemaEditor
_data_types = dict(
MySQLDBWrapper._data_types,
PositiveAutoField='integer UNSIGNED AUTO_INCREMENT')
| from django.db.backends.mysql.base import (
DatabaseWrapper as MySQLDBWrapper,
DatabaseIntrospection as MySQLDBIntrospection,
DatabaseSchemaEditor as MySQLDBSchemeEditor)
class DatabaseIntrospection(MySQLDBIntrospection):
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
if description.is_unsigned:
return 'PositiveAutoField'
return field_type
class DatabaseSchemaEditor(MySQLDBSchemeEditor):
def create_model(self, model):
for field in model._meta.local_fields:
# Autoincrement SQL for backends with post table definition variant
if field.get_internal_type() == "PositiveAutoField":
autoinc_sql = self.connection.ops.autoinc_sql(
model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
super(DatabaseSchemaEditor, self).create_model(model)
class DatabaseWrapper(MySQLDBWrapper):
introspection_class = DatabaseIntrospection
SchemaEditorClass = DatabaseSchemaEditor
_data_types = dict(
MySQLDBWrapper._data_types,
PositiveAutoField='integer UNSIGNED AUTO_INCREMENT')
| Python | 0.000001 |
87a79b2c3e43a5408aa89880f5b0f65dcfb810d9 | solve 11909 | UVA/vol-119/11909.py | UVA/vol-119/11909.py | from sys import stdin, stdout
from itertools import zip_longest
import math
for l,w,h,t in zip_longest(*[iter(map(int, stdin.read().split()))]*4):
r = math.pi * t / 180
o = l * math.tan(r)
if o <= h:
s = l*h - l*o/2
else:
r = math.pi/2 - r
o = h * math.tan(r)
s = h * o / 2
stdout.write('{:.3f} mL\n'.format(w * s))
| Python | 0.999999 | |
e6898282c82dfe890c02f702da6dd46c00adc0f3 | Add tests for multishuffle | tests/test_utilities.py | tests/test_utilities.py | import math
import tempfile
import pathlib
import numpy as np
import h5py
import scri
import pytest
def generate_bit_widths(bit_width):
possible_widths = 2 ** np.arange(0, int(np.log2(bit_width)))
bit_widths = []
while np.sum(bit_widths) < bit_width:
next_width = np.random.choice(possible_widths)
if np.sum(bit_widths) + next_width <= bit_width:
bit_widths.append(next_width)
return bit_widths
@pytest.mark.parametrize("bit_width", [8, 16, 32, 64])
def test_multishuffle_reversibility(bit_width):
dt = np.dtype(f'u{bit_width//8}')
np.random.seed(123)
data = np.random.randint(0, high=2**bit_width, size=5_000, dtype=dt)
for bit_widths in [[1]*bit_width, [8]*(bit_width//8)] + [generate_bit_widths(bit_width) for _ in range(10)]:
shuffle = scri.utilities.multishuffle(bit_widths)
unshuffle = scri.utilities.multishuffle(bit_widths, forward=False)
assert np.array_equal(data, unshuffle(shuffle(data))), bit_widths
@pytest.mark.parametrize("bit_width", [8, 16, 32, 64])
def test_multishuffle_like_hdf5(bit_width):
dt = np.dtype(f'u{bit_width//8}')
np.random.seed(1234)
data = np.random.randint(0, high=2**bit_width, size=5_000, dtype=dt)
# Save the data to file via h5py, then extract the raw data to see what
# HDF5's shuffle looks like
with tempfile.TemporaryDirectory() as temp_dir:
file_name = pathlib.Path(temp_dir) / 'test.h5'
with h5py.File(file_name, 'w') as f:
f.create_dataset('data', data=data, shuffle=True, chunks=(data.size,))
with h5py.File(file_name, 'r') as f:
ds = f['data']
filter_mask, raw_data_bytes = ds.id.read_direct_chunk((0,))
hdf5_raw_data = np.frombuffer(raw_data_bytes, dtype=dt)
# Shuffle with our function
shuffle = scri.utilities.multishuffle([8]*(bit_width//8))
scri_shuffle_data = shuffle(data)
# Check that they are equal
assert np.array_equal(scri_shuffle_data, hdf5_raw_data)
| Python | 0 | |
9f031861b75d7b99b0ab94d5272d378a8c3fba2e | Convert stickBreakingDemo.m to python (#613) | scripts/stick_breaking_demo.py | scripts/stick_breaking_demo.py | # Generates from stick-breaking construction
import pyprobml_utils as pml
import numpy as np
import matplotlib.pyplot as plt
alphas = [2, 5]
nn = 20
# From MATLAB's random generator.
match_matlab = True # Set True to match MATLAB's figure.
beta1 = [0.4428, 0.0078, 0.1398, 0.5018, 0.0320, 0.3614, 0.8655,
0.6066, 0.2783, 0.4055, 0.1617, 0.3294, 0.0956, 0.1245,
0.2214, 0.3461, 0.5673, 0.2649, 0.1153, 0.7366]
beta2 = [0.2037, 0.3486, 0.5342, 0.0609, 0.2997, 0.2542, 0.0860,
0.1865, 0.0510, 0.4900, 0.4891, 0.7105, 0.7633, 0.1619,
0.3604, 0.0604, 0.1312, 0.3338, 0.2036, 0.1306]
beta3 = [0.3273, 0.0253, 0.1415, 0.1574, 0.0460, 0.0721, 0.3386,
0.1817, 0.2750, 0.0791, 0.0535, 0.1091, 0.1935, 0.0550,
0.3977, 0.2322, 0.0270, 0.0871, 0.0144, 0.4171]
beta4 = [0.0395, 0.1170, 0.0272, 0.0155, 0.2190, 0.1812, 0.0569,
0.2569, 0.1311, 0.0388, 0.3619, 0.1974, 0.3794, 0.1917,
0.0670, 0.0294, 0.0957, 0.1267, 0.0381, 0.2525]
beta_all = [np.array(beta1), np.array(beta2), np.array(beta3), np.array(beta4)]
np.random.seed(0)
fig, axs = plt.subplots(2, 2)
fig.tight_layout()
for ii, alpha in enumerate(alphas):
for trial in range(2):
if match_matlab:
beta = beta_all[ii*2+trial]
else:
beta = np.random.beta(1, alpha, [nn])
neg = np.cumprod(1-beta)
neg[1:] = neg[:-1]
neg[0] = 1
pi = beta*neg
axs[ii, trial].bar(np.arange(nn), pi, edgecolor='k')
axs[ii, trial].set_title(r'$\alpha = %s$' % alpha)
pml.savefig("stickBreakingDemo.pdf")
plt.show()
| Python | 0.999997 | |
584e9597bf40a3c738071db1f2c7f1671bad1efa | Create 3sum_closet.py | Array/3sum_closet.py | Array/3sum_closet.py | #Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target.
#Return the sum of the three integers. You may assume that each input would have exactly one solution.
class Solution:
# @return an integer
def threeSumClosest(self, num, target):
num.sort()
res = num[0]+num[1]+num[2]
if res == target: return res
for i in xrange(len(num)):
j = i+1
k = len(num)-1
while j < k:
tmp = num[i]+num[j]+num[k]
if tmp == target:
return tmp
tmpres = abs(target-tmp)
if tmpres < abs(target-res):
res = tmp
if tmp > target:
while j < k:
k -= 1
if num[k] != num[k+1]: break
if tmp < target:
while j < k:
j += 1
if num[j] != num[j-1]: break
return res
| Python | 0.000083 | |
7399645c7fb3d704f3e44b3113cf38efc32c85e8 | add archive tool | tools/archive_models.py | tools/archive_models.py | import os
import sys
import json
import glob
paths = sys.argv[1:]
models = {}
for name in paths:
with open(name, mode='r') as f:
m = json.load(f)
key, _ = os.path.splitext(os.path.basename(name))
models[key] = m
print(json.dumps(models))
| Python | 0.000001 | |
38414cd66365768d4bef9e28d56282d5b66886e5 | Add resnet101 inference code for the oid-v2 trained image classification model. | tools/classify_oidv2.py | tools/classify_oidv2.py | #!/usr/bin/env python
#
# Copyright 2017 The Open Images Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This script takes a resnet_v1_101 checkpoint, runs the classifier on the
image and prints top(n) predictions in human-readable form.
-------------------------------
Example command:
-------------------------------
# 0. Create directory for model/data
WORK_PATH="/tmp/oidv2"
mkdir -p "${WORK_PATH}"
cd "${WORK_PATH}"
# 1. Download the model and sample image
wget https://storage.googleapis.com/openimages/2017_07/classes-trainable.txt
wget https://storage.googleapis.com/openimages/2017_07/class-descriptions.csv
wget https://storage.googleapis.com/openimages/2017_07/oidv2-resnet_v1_101.ckpt.tar.gz
tar -xzf oidv2-resnet_v1_101.ckpt.tar.gz
wget -O cat.jpg https://farm6.staticflickr.com/5470/9372235876_d7d69f1790_b.jpg
# 2. Run inference
python classify_v2.py \
--checkpoint_path='oidv2-resnet_v1_101.ckpt' \
--labelmap='classes-trainable.txt' \
--dict='class-descriptions.csv' \
--image="cat.jpg" \
--top_k=10 \
--score_threshold=0.3
# Sample output:
Image: "cat.jpg"
3272: /m/068hy - Pet (score = 0.96)
1076: /m/01yrx - Cat (score = 0.95)
0708: /m/01l7qd - Whiskers (score = 0.90)
4755: /m/0jbk - Animal (score = 0.90)
2847: /m/04rky - Mammal (score = 0.89)
2036: /m/0307l - Felidae (score = 0.79)
3574: /m/07k6w8 - Small to medium-sized cats (score = 0.77)
4799: /m/0k0pj - Nose (score = 0.70)
1495: /m/02cqfm - Close-up (score = 0.55)
0036: /m/012c9l - Domestic short-haired cat (score = 0.40)
-------------------------------
Details on model training:
-------------------------------
The model was trained using the tf-slim image classification model library
available at https://github.com/tensorflow/models/tree/master/research/slim. Vgg
input preprocessing was used with image resolution 299x299. The classification
layer is defined as
logits, end_points = resnet_v1.resnet_v1_101(images, num_classes=5000)
logits = tf.squeeze(logits, name='SpatialSqueeze')
end_points['multi_predictions'] = tf.nn.sigmoid(
logits, name='multi_predictions')
The model was trained on the open-images v2 dataset asynchronously with 50 GPU
workers and batch size 32 for 61995903 steps. RMSProp optimizer was used with
the following settings:
learning_rate = tf.train.exponential_decay(
0.045, # learning_rate
slim.get_or_create_global_step(),
552345, # decay_steps
0.94, # learning_rate_decay_factor
staircase=True
)
opt = tf.train.RMSPropOptimizer(
learning_rate,
0.9, # decay
0.9, # momentum
1.0 #rmsprop_epsilon
)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from google.protobuf import text_format
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('labelmap', '/cns/is-d/home/image-understanding/nalldrin/datasets/open-image-dataset/v2/2017_07/classes-trainable.txt', 'Labels, one per line.')
flags.DEFINE_string('dict', '/cns/is-d/home/image-understanding/nalldrin/datasets/open-image-dataset/v2/2017_07/class-descriptions.csv', 'Descriptive string for each label.')
flags.DEFINE_string('checkpoint_path', '/usr/local/google/home/nalldrin/temp/oid_graph/local/adapt-model.ckpt-61995903',
'Path to checkpoint file.')
flags.DEFINE_string('image', '',
'Comma separated paths to image files on which to perform '
'inference.')
flags.DEFINE_integer('top_k', 10, 'Maximum number of results to show.')
flags.DEFINE_float('score_threshold', None, 'Score threshold.')
def LoadLabelMap(labelmap_path, dict_path):
"""Load index->mid and mid->display name maps.
Args:
labelmap_path: path to the file with the list of mids, describing predictions.
dict_path: path to the dict.csv that translates from mids to display names.
Returns:
labelmap: an index to mid list
label_dict: mid to display name dictionary
"""
labelmap = [line.rstrip()
for line in tf.gfile.GFile(labelmap_path).readlines()]
label_dict = {}
for line in tf.gfile.GFile(dict_path).readlines():
words = [word.strip(' "\n') for word in line.split(',', 1)]
label_dict[words[0]] = words[1]
return labelmap, label_dict
def main(_):
# Load labelmap and dictionary from disk.
labelmap, label_dict = LoadLabelMap(FLAGS.labelmap, FLAGS.dict)
num_classes = len(labelmap)
g = tf.Graph()
with g.as_default():
with tf.Session() as sess:
saver = tf.train.import_meta_graph(FLAGS.checkpoint_path + '.meta')
saver.restore(sess, FLAGS.checkpoint_path)
input_values = g.get_tensor_by_name('input_values:0')
predictions = g.get_tensor_by_name('multi_predictions:0')
for image_filename in FLAGS.image.split(','):
compressed_image = tf.gfile.FastGFile(image_filename, 'rb').read()
predictions_eval = sess.run(predictions,
feed_dict={input_values: [compressed_image]})
top_k = predictions_eval.argsort()[::-1] # indices of predictions_eval sorted by score
if FLAGS.top_k > 0:
top_k = top_k[:FLAGS.top_k]
if FLAGS.score_threshold is not None:
top_k = [i for i in top_k if predictions_eval[i] >= FLAGS.score_threshold]
print('Image: "%s"\n' % image_filename)
for idx in top_k:
mid = labelmap[idx]
display_name = label_dict[mid]
score = predictions_eval[idx]
print('{:04d}: {} - {} (score = {:.2f})'.format(idx, mid, display_name,
score))
if __name__ == '__main__':
tf.app.run()
| Python | 0 | |
11fe39e743019ef7fdaadc0ae4f8782add0dc918 | update aoj | aoj/11/aoj1142.py | aoj/11/aoj1142.py | m = int(input())
for i in range(m):
d = input()
trains = [d]
for j in range(1, len(d)):
f, b = d[:j], d[j:]
rf, rb = f[::-1], b[::-1]
trains.extend([rf+b, f+rb, rf+rb, b+f, rb+f, b+rf, rb+rf])
print(len(set(trains)))
| Python | 0.000002 | |
2c2694d4c9ef3fdd51039b45951223708cbef3b9 | Add nbsp template tag | base/templatetags/nbsp.py | base/templatetags/nbsp.py | # templatetags/nbsp.py
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter()
def nbsp(value):
return mark_safe(" ".join(str(value).split(' ')))
| Python | 0 | |
5b80553b05b2c9df3818b815a2b156ad2f9f6437 | add SQS plugin to match diamond | structured_metrics/plugins/sqs.py | structured_metrics/plugins/sqs.py | from . import Plugin
class SqsPlugin(Plugin):
targets = [
{
'match': '^servers\.(?P<server>[^\.]+)\.sqs\.(?P<region>[^\.]+)\.(?P<queue>[^\.]+)\.(?P<type>ApproximateNumberOfMessages.*)$',
'target_type': 'gauge',
'configure': [
lambda self, target: self.add_tag(target, 'unit', 'Msg'),
]
}
]
# vim: ts=4 et sw=4:
| Python | 0 | |
f0c0027750aab6b116c553707edc7b1ebde5ce44 | Create allsites_LST.py | allsites_LST.py | allsites_LST.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 10:30:22 2015
Last updated on Tue Jul 30 02:12 2015
@author: O. B. Alam
@email: oba3@cornell.edu
"""
import numpy as np
import matplotlib.pyplot as plt
import shlex
###############################################################################
content = [] # list for all data
lt = [] # list for ozone localtime values
mt = [] # list for ozone month values
vel = [] # list for ozone velocities
err = [] # list for ozone errors
plotvel = [] # list for plotting the weighted averages
plotstd = [] # list for plotting the errorbars
plotvel2 = [] # list for plotting the weighted averages
plotstd2 = [] # list for plotting the errorbars
hays_vel = [] # list for haystack velocities
hays_err = [] # list for haystack errors
union_vel = [] # list for union velocities
union_err = [] # list for union errors
chs_vel = [] # list for chs velocities
chs_err = [] # list for chs errors
###############################################################################
fig = plt.figure(figsize=(10,10)) # setup figure
###############################################################################
''' returns the weighted average of a data set, given its error set '''
def wavg(data, err):
num = sum(x*(1/y)*(1/y) for x, y in zip(data, err))
denom = sum((1/y)*(1/y) for y in err)
return num/denom
''' returns a normal average of a data set with weight w = 1 '''
def navg(data, err):
return sum(data)/len(data)
''' returns the weighted standard deviation of a data set, given its error set '''
def wstd(data, err):
num1 = sum((1/y)*(1/y)*(x-wavg(data, err))*(x-wavg(data, err))
for x, y in zip(data, err))
num2 = sum((1/y)*(1/y) for y in err)
num3 = sum((1/y)*(1/y)*(1/y)*(1/y) for y in err)
return np.sqrt(num1*num2/(num2*num2-num3))
''' returns the weighted standard deviation of a data set, given its error set
and with weight w = 1. '''
def wstd1(data, err):
num1 = sum((x-navg(data, err))*(x-navg(data, err)) for x in data)
num2 = sum(err)
num3 = num2
return np.sqrt(num1*num2/(num2*num2-num3))
''' returns the standard deviation of a data set, given its error set '''
def std(data, err):
num = sum((x-wavg(data,err))*(x-wavg(data,err)) for x in data)
denom = len(data) - 1
return np.sqrt(num/denom)
###############################################################################
def plot_aeer_hourly(lt, vel, err):
# extract data from file
with open('vel_vs_localtime.txt') as q: content = q.readlines()
# kill all whitespace and replace with commas
content = [','.join(shlex.split(x)) for x in content]
# create separate localtime, velocity, error lists
for x in content:
eachRow = x.split(',')
lt.append(float(eachRow[1]))
vel.append(float(eachRow[3]))
err.append(float(eachRow[6]))
if(eachRow[8] == 'hays'):
hays_vel.append(float(eachRow[3]))
hays_err.append(float(eachRow[6]))
if(eachRow[8] == 'union'):
union_vel.append(float(eachRow[3]))
union_err.append(float(eachRow[6]))
if(eachRow[8] == 'chs'):
chs_vel.append(float(eachRow[3]))
chs_err.append(float(eachRow[6]))
# sort all lists by localtime
lt = np.array(lt)
vel = np.array(vel)
err = np.array(err)
inds = lt.argsort()
lt = lt[inds].tolist()
vel = vel[inds].tolist()
err = err[inds].tolist()
# convert localtime to integers
lt = [int(x) for x in lt]
# process normal averages and weighted standard deviations
for i in range(-5,6):
if i != 5:
ii = lt.index(i)
ij = lt.index(i+1)
plotvel.append(navg(vel[ii:ij], err[ii:ij]))
plotstd.append(wstd(vel[ii:ij], err[ii:ij]))
else:
plotvel.append(navg(vel[ij:len(vel)], err[ij:len(vel)]))
plotstd.append(wstd(vel[ij:len(vel)], err[ij:len(vel)]))
# plot data
fig.add_subplot(411)
plt.xticks(range(-5,6),[])
plt.plot(range(-5,6), hays_vel, '.', label = "Haystack", c = 'black')
plt.errorbar(range(-5,6), hays_vel, yerr = hays_err, fmt = '.', c = 'black')
plt.legend(loc=4,numpoints=1)
plt.ylim(-42,48)
plt.xlim(-6,6)
plt.title("Nighttime Variation in Ozone Velocity")
fig.add_subplot(412)
plt.xticks(range(-5,6),[])
plt.plot(range(-5,6), union_vel, '.', label = "Union", c = 'black')
plt.legend(loc=4,numpoints=1)
plt.errorbar(range(-5,6), union_vel, yerr = union_err, fmt = '.', c = 'black')
plt.ylim(-42,48)
plt.xlim(-6,6)
fig.add_subplot(413)
plt.xticks(range(-5,6),[])
plt.plot(range(-5,6), chs_vel, '.', label = "Chelmsford", c = 'black')
plt.errorbar(range(-5,6), chs_vel, yerr = chs_err, fmt = '.', c = 'black')
plt.legend(loc=4,numpoints=1)
plt.ylim(-42,48)
plt.ylabel('Meridional Wind Velocity (m/s)')
plt.xlim(-6,6)
fig.add_subplot(414)
#plt.xticks(range(-5,6),[])
plt.plot(range(-5,6), plotvel, '.', label = "Ozone average", c = 'black')
plt.errorbar(range(-5,6), plotvel, yerr = plotstd, fmt = '.', c = 'black')
plt.legend(loc=4,numpoints=1) # place the plot legend at the bottom right corner
#plt.title('Northward Wind Velocity (m/s) versus Hour')
plt.ylim(-42,48)
plt.xlim(-6,6)
plt.xlabel("Local Solar Time (Hours)")
###############################################################################
plot_aeer_hourly(lt, vel, err)
vel = []
err = []
with open('out_half.txt') as q: vel = q.readlines()
vel = [x.strip('\n') for x in vel]
vel = [x.strip(' ') for x in vel]
vel = [float(x) for x in vel]
with open('out_err_half.txt') as q: err = q.readlines()
err = [x.strip('\n') for x in err]
err = [x.strip(' ') for x in err]
err = [float(x) for x in err]
fig.savefig('FIN_ALLSITES_LT.eps', format='eps', dpi=1000)
plt.show()
| Python | 0.000001 | |
e2ad0b6bfa01e0aa263dd401a29ad60c24c755b0 | Create bf4-server-status.py | bf4-server-status.py | bf4-server-status.py | #!/usr/bin/env python
# debian deps:
# python-django
'''Get BF4 server data and output to HTML
Usage: bf4_server_status.py [--debug]
'''
import urllib
import json
import os
import socket
import sys
import time
from django.template import Template, Context
from django.conf import settings
from django.utils.datastructures import SortedDict
settings.configure() # We have to do this to use django templates standalone - see
# http://stackoverflow.com/questions/98135/how-do-i-use-django-templates-without-the-rest-of-django
# Our template. Could just as easily be stored in a separate file
template = """
<style>
table,th,td
{
border:1px solid black;
font-size:95%;
}
</style>
<meta http-equiv="refresh" content="{{refresh}}" >
{{player_count}} player(s) on {{current_map}}.
<table style="width:270px">
<tr>
<td>Player</td>
<td>Cheat Score</td>
</tr>
{% for key, value in player_data.items %}
<tr>
<td><a href="http://battlelog.battlefield.com/bf4/soldier/{{key}}/stats/{{value.personaId}}/pc/">{{key}}</a></td>
{% if value.cheatscore < 10 or value.cheatscore == None %}
<td><a href="{{value.bf4db_url}}">{{value.cheatscore}}</a></td>
{% else %}
<td bgcolor="red"><a href="{{value.bf4db_url}}">{{value.cheatscore}}</a></td>
{% endif %}
</tr>
{% endfor %}
</table>
Last updated at {{update_time}} UTC.
"""
# Mapping engine map names to human-readable names
map_names = {'MP_Abandoned': 'Zavod 311',
'MP_Damage': 'Lancang Dam',
'MP_Flooded': 'Flood Zone',
'MP_Journey': 'Golmud Railway',
'MP_Naval': 'Paracel Storm',
'MP_Prison': 'Operation Locker',
'MP_Resort': 'Hainan Resort',
'MP_Siege': 'Siege of Shanghai',
'MP_TheDish': 'Rogue Transmission',
'MP_Tremors': 'Dawnbreaker',
'XP1_001': 'Silk Road',
'XP1_002': 'Altai Range',
'XP1_003': 'Guilin Peaks',
'XP1_004': 'Dragon Pass',
'XP0_Caspian': 'Caspian Border',
'XP0_Firestorm': 'Operation Firestorm',
'XP0_Metro': 'Operation Metro',
'XP0_Oman': 'Gulf of Oman',
'XP2_001': 'Lost Islands',
'XP2_002': 'Nansha strike',
'XP2_003': 'WaveBreaker',
'XP2_004': 'Operation Mortar'}
# Put your URLs here
server_url = ''
file_dir = ''
refresh = 60
bf4db_url = 'http://api.bf4db.com/api-player.php?name='
# Generic way to write our files
def write_file(filename, text):
with open(filename, 'w') as f:
f.write(text)
# http://stackoverflow.com/questions/788411/check-to-see-if-python-script-is-running
def get_lock(process_name):
global lock_socket
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
lock_socket.bind('\0' + process_name)
except socket.error:
print 'already running. exiting.'
sys.exit()
def json_query(json_url):
retry_limit = range(1,6)
for x in retry_limit:
try:
result_json = json.load(urllib.urlopen(json_url))
return result_json
except:
print 'query failed - URL was ' + json_url
print 'attempting retry ' + str(x) + ' of ' + str(retry_limit[-1])
time.sleep(1)
if x >= retry_limit[-1]:
print 'giving up. exiting.'
sys.exit(1)
else:
continue
def write_template(player_count, current_map, player_data):
write_file(os.path.join(file_dir + 'player_count.html'), player_count)
update_time = time.strftime('%H:%M:%S %m/%d/%Y')
t = Template(template)
c = Context({"player_count": player_count,
"current_map": current_map,
"map_names": map_names,
"refresh": refresh,
"update_time": update_time,
"player_data": player_data})
write_file(os.path.join(file_dir + 'index.html'), t.render(c))
def server_status(server_url):
server_json = json_query(server_url)
try:
current_map_id = server_json['message']['SERVER_INFO']['map']
player_count_json = server_json['message']['SERVER_INFO']['slots']['2']['current']
except TypeError:
if debug:
print 'Unable to query battlelog'
sys.exit(1)
current_map = map_names[current_map_id]
player_count = str(player_count_json)
player_list = []
for x in range(0, len(server_json['message']['SERVER_PLAYERS'])):
player_list.append(server_json['message']['SERVER_PLAYERS'][x]['persona']['user']['username'])
if debug:
print 'Player count: ' + player_count
return player_list, player_count, current_map
def bf4db_query(player_list):
player_dict = SortedDict()
for x in sorted(player_list, key=lambda s: s.lower()):
time.sleep(0.5)
try:
bf4db_json = json_query(bf4db_url + x)
player_dict[x] = bf4db_json['data']
except ValueError:
player_dict[x] = None
if debug:
print x + ' ' + str(player_dict[x]['cheatscore'])
return player_dict
def cmdline():
global debug
try:
arg = sys.argv[1]
if arg == '--debug':
debug = True
else:
print __doc__
sys.exit(1)
except IndexError:
debug = False
cmdline()
get_lock('bf4_server_status.py')
players = server_status(server_url)
player_data = bf4db_query(players[0])
write_template(players[1], players[2], player_data)
| Python | 0.000002 | |
6619bbff82f9a74a1de6c8cb569ea5cc639557d0 | Refresh access token after user signs in #44 | datalab/context/_context.py | datalab/context/_context.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Context functionality."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
from . import _project
from . import _utils
class Context(object):
"""Maintains contextual state for connecting to Cloud APIs.
"""
_global_context = None
def __init__(self, project_id, credentials):
"""Initializes an instance of a Context object.
Args:
project_id: the current cloud project.
credentials: the credentials to use to authorize requests.
"""
self._project_id = project_id
self._credentials = credentials
@property
def credentials(self):
"""Retrieves the value of the credentials property.
Returns:
The current credentials used in authorizing API requests.
"""
return self._credentials
def set_credentials(self, credentials):
""" Set the credentials for the context. """
self._credentials = credentials
@property
def project_id(self):
"""Retrieves the value of the project_id property.
Returns:
The current project id to associate with API requests.
"""
if not self._project_id:
raise Exception('No project ID found. Perhaps you should set one with the "%projects set ..." magic.')
return self._project_id
def set_project_id(self, project_id):
""" Set the project_id for the context. """
self._project_id = project_id
@staticmethod
def is_signed_in():
""" If the user has signed in or it is on GCE VM with default credential."""
try:
_utils.get_credentials()
return True
except Exception:
return False
@staticmethod
def default():
"""Retrieves a default Context object, creating it if necessary.
The default Context is a global shared instance used every time the default context is
retrieved.
Attempting to use a Context with no project_id will raise an exception, so on first use
set_project_id must be called.
Returns:
An initialized and shared instance of a Context object.
"""
credentials = _utils.get_credentials()
if Context._global_context is None:
project = _project.Projects.get_default_id(credentials)
Context._global_context = Context(project, credentials)
else:
# Always update the credentials in case the access token is revoked or expired
Context._global_context.set_credentials(credentials)
return Context._global_context
| # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Context functionality."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
from . import _project
from . import _utils
class Context(object):
"""Maintains contextual state for connecting to Cloud APIs.
"""
_global_context = None
def __init__(self, project_id, credentials):
"""Initializes an instance of a Context object.
Args:
project_id: the current cloud project.
credentials: the credentials to use to authorize requests.
"""
self._project_id = project_id
self._credentials = credentials
@property
def credentials(self):
"""Retrieves the value of the credentials property.
Returns:
The current credentials used in authorizing API requests.
"""
return self._credentials
def set_credentials(self, credentials):
""" Set the credentials for the context. """
self._credentials = credentials
@property
def project_id(self):
"""Retrieves the value of the project_id property.
Returns:
The current project id to associate with API requests.
"""
if not self._project_id:
raise Exception('No project ID found. Perhaps you should set one with the "%projects set ..." magic.')
return self._project_id
def set_project_id(self, project_id):
""" Set the project_id for the context. """
self._project_id = project_id
@staticmethod
def is_signed_in():
""" If the user has signed in or it is on GCE VM with default credential."""
try:
_utils.get_credentials()
return True
except Exception:
return False
@staticmethod
def default():
"""Retrieves a default Context object, creating it if necessary.
The default Context is a global shared instance used every time the default context is
retrieved.
Attempting to use a Context with no project_id will raise an exception, so on first use
set_project_id must be called.
Returns:
An initialized and shared instance of a Context object.
"""
if Context._global_context is None:
credentials = _utils.get_credentials()
project = _project.Projects.get_default_id(credentials)
Context._global_context = Context(project, credentials)
return Context._global_context
| Python | 0 |
0f80b1d304eb0d4443498c94557b0ef96d098c15 | Add version | ernest/version.py | ernest/version.py | import os
VERSION = '0.1a1'
VERSION_RAW = os.environ.get('ERNEST_VERSION', VERSION)
| Python | 0 | |
32cf7ab02ecb8f1dbd02b8a78001f8c15a97f794 | Create population.py | analysis/population.py | analysis/population.py | import pandas as pd
import matplotlib.pyplot as plt
import urllib2
"""
This program reads in a csv file containing census data on Northamptonshire county in the UK.
It then plots the population data according to gender, gender by age range, and total population by age range.
Two figure files will be output to the directory specified in the variable plot_path.
"""
# the original populaton data file can be found at http://www.northamptonshireanalysis.co.uk/dataviews/view?viewId=151
# at the above URL under 'Geo-Types' choose the csv for 'County'
# where to save the figure files
plot_path = "C:\\Users\mcassar\Desktop\Coursolve project\\"
url = 'http://www.northamptonshireanalysis.co.uk/data/csv?viewId=151&geoId=28&subsetId=&viewer=CSV'
response = urllib2.urlopen(url)
#pop_file = "C:\\Users\\mcassar\\Desktop\\coursolve project\\council_pop.csv"
#df_pop = pd.read_csv(pop_file)
df_pop = pd.read_csv(response)
# remove rows not corresponding to Northamptonshire population
df_pop = df_pop[0:1]
# these ranges match what is given in the data file
age_ranges = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79', '80-84', '85-89', '90'] # should be 90+ but that kept giving an error
# create total population by gender dataframe
df_pop_male_female = df_pop[['Total Resident Population Persons all ages Male(2011)', 'Total Resident Population Persons All Ages Female(2011)']]
df_pop_male_female.columns = ['Male', 'Female']
df_pop_male_female_percent = df_pop_male_female / df_pop.values[0,2] #dividing by total population
# generate bar plots for total population by gender
fig, axes = plt.subplots(1,2)
df_pop_male_female.transpose().plot(kind='bar', ax=axes[0], color=['b', 'y'], legend=False, title='Actual Value') # need the transpose because the bins are the age ranges and these need to be the rows not columns
df_pop_male_female_percent.transpose().plot(kind='bar', ax=axes[1], color=['b', 'y'], legend=False, title='Percent of Total')
plt.suptitle("Northamptonshire Population by Gender (2011)", size=16)
plt.subplots_adjust(top=0.85, bottom=0.15) # adjust spacing so subplot titles are farther from main title
#plt.savefig(plot_path + 'County_population_gender.png')
# segment the data by total population by age range (need to use 'Total(2011)' to do this as every column uses just 'Total'
total_cols = [col for col in df_pop.columns if 'Total(2011)' in col]
df_pop_total = df_pop[total_cols] # select only the column names with 'Male' in the title
total_pop = df_pop_total.values[0,0]
df_pop_total_by_age = df_pop_total.drop(df_pop_total.columns[[0]], axis=1)
df_pop_total_by_age.columns = age_ranges
df_pop_total_by_age_percent = df_pop_total_by_age / total_pop # get % pop by age by dividing by total population
# segment the data by male and age range
male_cols = [col for col in df_pop.columns if 'Male' in col]
df_pop_male = df_pop[male_cols] # select only the column names with 'Male' in the title
total_male_pop = df_pop_male.values[0,0]
df_pop_male_by_age = df_pop_male.drop(df_pop_male.columns[[0]], axis=1) # get rid of total male population column
df_pop_male_by_age.columns = age_ranges
df_pop_male_by_age_percent = df_pop_male_by_age / total_male_pop # get % pop by age by dividing by total male pop
# segment the data by female and age range
female_cols = [col for col in df_pop.columns if 'Female' in col]
df_pop_female = df_pop[female_cols] # select only the column names with 'Female' in the title
total_female_pop = df_pop_female.values[0,0]
df_pop_female_by_age = df_pop_female.drop(df_pop_female.columns[[0]], axis=1) # get rid of total female population column
df_pop_female_by_age.columns = age_ranges
df_pop_female_by_age_percent = df_pop_female_by_age / total_female_pop # get % pop by age by dividing by total female pop
# generate bar plots for total population, total male population, and female population by age range
fig, axes = plt.subplots(3,2)
ax = df_pop_total_by_age.transpose().plot(kind='bar', ax = axes[0,0], legend=False, title='Count')
df_pop_total_by_age_percent.transpose().plot(kind='bar', ax=axes[0,1], legend=False, title='Percent')
ax1 = df_pop_male_by_age.transpose().plot(kind='bar', ax = axes[1,0], legend=False)
df_pop_male_by_age_percent.transpose().plot(kind='bar', ax=axes[1,1], legend=False)
ax2 = df_pop_female_by_age.transpose().plot(kind='bar', ax = axes[2,0], legend=False)
df_pop_female_by_age_percent.transpose().plot(kind='bar', ax=axes[2,1], legend=False)
plt.suptitle("Northamptonshire Population by Age Range (2011)", size=16)
plt.subplots_adjust(top=0.87, left=0.15, hspace=0.4) # adjust spacing b/w subplots and so so subplot titles are farther from main title
ax.set_ylabel('Total')
ax1.set_ylabel('Male')
ax2.set_ylabel('Female')
#plt.savefig(plot_path + 'County_population_gender_age.png')
plt.show()
| Python | 0 | |
139c7e2ac5b5c702cd32f4e014d8f3f654855c32 | Add ping pong python script | nRF51/examples/ping_pong/scripts/ping_pong.py | nRF51/examples/ping_pong/scripts/ping_pong.py | from threading import Thread
import subprocess
import sys
import os
import time
import datetime
import serial
import serial.tools.list_ports
SEGGER_VID = 1366
BAUDRATE = 460800;
verbose = False
flow_control = True
startTime = datetime.datetime.now()
central = None
snr_max = 0
def printUsage():
print "Usage: ping_pong.py [<baudrate>] [-f] [-v] [-c <port>]"
print "\t<baudrate>\tThe desired baudrate. If no argument is given, the baudrate defaults to " + str(BAUDRATE) + "."
print "\t-f\t\tEnable RTS/CTS flow control"
print "\t-v\t\tEnable all event logging from central device"
print "\t-c <port>\t\tForce the designated com port to act as central"
if "-h" in sys.argv or "--help" in sys.argv:
printUsage()
exit(0)
if "-f" in sys.argv:
flow_control = True
if "-v" in sys.argv:
verbose = True
if "-c" in sys.argv:
central_index = sys.argv.index("-c") + 1
if len(sys.argv) <= central_index:
printUsage()
exit(160) #bad arguments
central = sys.argv[central_index]
for arg in sys.argv:
try:
BAUDRATE = int(sys.argv[1])
if BAUDRATE is None:
printUsage()
exit(160)
except:
pass
def getPorts():
if sys.platform is "Windows":
ports = [(name, hwid[12:16]) for (name, desc, hwid) in serial.tools.list_ports.comports()]
portnames = [name for (name, vid) in ports if vid == str(SEGGER_VID)]
else:
portnames = [port[0] for port in serial.tools.list_ports.comports()]
return portnames
def portThread(port, snr):
global startTime
global snr_max
s = None
try:
s = serial.Serial(port, BAUDRATE, rtscts = flow_control)
except:
if not s is None:
s.close()
sys.stdout.write("Failed to establish connection to " + port + " (handle " + str(snr) + ")\n")
return
msgnum = 1
try:
s.write(str(snr) + "\r\n")
sys.stdout.write("Assigned handle " + str(snr) + " to " + port + "\r\n")
prevtime = datetime.datetime.now()
while True:
data = s.read()
if snr is 0:
sys.stdout.write(data)
except Exception, e:
print e
sys.stdout.write("Lost " + port + "\n")
return
threads = []
def monitorThread():
global central
global snr_max
ports = []
snr = 1
while True:
current_ports = getPorts()
for port in current_ports:
if not port in ports:
this_snr = snr
snr_max = int(snr)
if central == None or central == port: #force central
this_snr = 0
central = port
else:
snr += 1
thread = Thread(target = portThread, args = (port, this_snr), name = port)
thread.daemon = True
thread.start()
threads.append(thread)
ports = current_ports
time.sleep(1)
thread = Thread(target = monitorThread, name = "monitor")
thread.daemon = True
thread.start()
threads.append(thread)
try:
while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
| Python | 0.000004 | |
97b9e370d31e2e7abb3d9d56c046f61e2723dc90 | Create 1-helloworld.py | Code/1-helloworld.py | Code/1-helloworld.py | #Print Hello World!
print "Hello World!"
| Python | 0.999994 | |
0fbaaddf7cffd97ae162f24b8ae22fe88dbfa055 | Add a node class for binomial random variables | bayespy/inference/vmp/nodes/binomial.py | bayespy/inference/vmp/nodes/binomial.py | ######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
A module for the binomial distribution node
"""
import numpy as np
import scipy.special as special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .beta import BetaMoments
from .node import (Moments,
ensureparents)
from bayespy.utils import utils
class BinomialMoments(Moments):
"""
Class for the moments of binomial variables
"""
ndim_observations = 0
def __init__(self, N):
self.N = N
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Make sure the values are integers in valid range
x = np.round(x)
if np.any(x < 0) or np.any(x >= self.N):
raise ValueError("Invalid count")
# Now, the moments are just the counts
return [x]
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The realizations are scalars, thus the shape of the moment is ().
"""
return ( (), )
class BinomialDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of binomial variables.
"""
# Only one moment (the counts) and that is a scalar
ndims = (0,)
# Only one moment (the log-probabilities) and that is a vector
ndims_parents = ( (1,), )
def __init__(self, N):
if not utils.isinteger(N):
raise ValueError("Number of trials must be integer")
if np.any(N < 0):
raise ValueError("Number of trials must be non-negative")
self.N = N
super().__init__()
def compute_message_to_parent(self, parent, index, u_self, u_p):
"""
Compute the message to a parent node.
"""
raise NotImplementedError()
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
phi0 = logp0 - logp1
return [phi0]
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
u0 = self.N / (1 + np.exp(-phi[0]))
g = -self.N * np.log1p(np.exp(phi[0]))
return ( [u0], g )
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
return self.N * logp1
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Make sure the values are integers in valid range
if not utils.isinteger(x):
raise ValueError("Counts must be integer")
if np.any(x < 0) or np.any(x >= self.N):
raise ValueError("Invalid count")
# Now, the moments are just the counts
u = [x]
f = (special.gammaln(self.N+1) -
special.gammaln(x+1) -
special.gammaln(self.N-x+1))
return (u, f)
def shape_of_value(self, dims):
"""
Return the shape of realizations
The realizations are scalars (counts) so the shape is ().
"""
return ()
class Binomial(ExponentialFamily):
"""
Node for binomial random variables.
"""
_parent_moments = (BetaMoments(),)
@ensureparents
@useconstructor
def __init__(self, p, n=None, **kwargs):
super().__init__(p, **kwargs)
@classmethod
def _constructor(cls, p, n=None, plates=None, **kwargs):
"""
Constructs distribution and moments objects.
"""
moments = BinomialMoments(n)
distribution = BinomialDistribution(n)
return ( ( (), ),
cls._total_plates(plates, p.plates, np.shape(n)),
distribution,
moments,
cls._parent_moments)
def random(self):
"""
Draw a random sample from the distribution.
"""
raise NotImplementedError()
def show(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
n = self._distribution.N
print("%s ~ Binomial(n, p)" % self.name)
print(" n = ")
print(n)
print(" p = ")
print(p)
| Python | 0 | |
50f6792de9b8dce54492b897fcffae33d1cb75ba | create test url as an optional setting | authorize/conf.py | authorize/conf.py | from django.conf import settings
from appconf import AppConf
class authorizeConf(AppConf):
TEST_URL = False
class Meta:
prefix = 'authorize'
| Python | 0 | |
6edd782d39fd64fceca86c8edb224ae3f2378083 | Create new package (#6477) | var/spack/repos/builtin/packages/r-seurat/package.py | var/spack/repos/builtin/packages/r-seurat/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSeurat(RPackage):
"""Seurat is an R package designed for QC, analysis, and exploration of
single cell RNA-seq data."""
homepage = "http://satijalab.org/seurat/"
url = "https://cran.r-project.org/src/contrib/Seurat_2.1.0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/Seurat"
version('2.1.0', '46427837bb739883f9b7addd08fccee5')
version('2.0.1', 'a77794891e93b9fa1ef41735fe8424ea')
depends_on('r@3.4.0:3.4.9')
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-ape', type=('build', 'run'))
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-caret', type=('build', 'run'))
depends_on('r-gdata', type=('build', 'run'))
depends_on('r-glue', type=('build', 'run'))
depends_on('r-pkgconfig', type=('build', 'run'))
depends_on('r-plogr', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-cowplot', type=('build', 'run'))
depends_on('r-rocr', type=('build', 'run'))
depends_on('r-hmisc', type=('build', 'run'))
depends_on('r-nmf', type=('build', 'run'))
depends_on('r-irlba', type=('build', 'run'))
depends_on('r-igraph', type=('build', 'run'))
depends_on('r-fpc', type=('build', 'run'))
depends_on('r-rcppprogress', type=('build', 'run'))
depends_on('r-lars', type=('build', 'run'))
depends_on('r-dtw', type=('build', 'run'))
depends_on('r-mixtools', type=('build', 'run'))
depends_on('r-ica', type=('build', 'run'))
depends_on('r-diffusionmap', type=('build', 'run'))
depends_on('r-tsne', type=('build', 'run'))
depends_on('r-rtsne', type=('build', 'run'))
depends_on('r-ranger', type=('build', 'run'))
depends_on('r-pbapply', type=('build', 'run'))
depends_on('r-ggjoy', type=('build', 'run'))
depends_on('r-plotly', type=('build', 'run'))
depends_on('r-sdmtools', type=('build', 'run'))
depends_on('r-tclust', type=('build', 'run'))
depends_on('r-fnn', type=('build', 'run'))
depends_on('r-vgam', type=('build', 'run'))
| Python | 0 | |
0a80cf698a26abdf17aeeb01e21cb9910e6463d0 | add a test suite | booger_test.py | booger_test.py | #!/usr/bin/python
################################################################################
# "THE BEER-WARE LICENSE" (Revision 42):
# <thenoviceoof> wrote this file. As long as you retain this notice
# you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in
# return
# Nathan Hwang <thenoviceoof>
# ----------------------------------------------------------------------------
################################################################################
from unittest import TestCase
################################################################################
# Nosetest parser
from booger import NOSE_DIV_WIDTH, NosetestsParser
class NosetestsParserTest(TestCase):
def setUp(self):
self.parser = NosetestsParser()
def short_output_test(self):
inp = '=' * 70
out, end = self.parser.parse_short_output(inp)
assert end == True
| Python | 0.000001 | |
771773cd3451dc04340e4a4856f7346841349772 | Add cross bicoherence | bicoherencex.py | bicoherencex.py | #!/usr/bin/env python
from __future__ import division
import numpy as np
from scipy.linalg import hankel
import scipy.io as sio
import matplotlib.pyplot as plt
from tools import *
def bicoherencex(w, x, y, nfft=None, wind=None, nsamp=None, overlap=None):
"""
Direct (FD) method for estimating cross-bicoherence
Parameters:
w,x,y - data vector or time-series
- should have identical dimensions
nfft - fft length [default = power of two > nsamp]
actual size used is power of two greater than 'nsamp'
wind - specifies the time-domain window to be applied to each
data segment; should be of length 'segsamp' (see below);
otherwise, the default Hanning window is used.
segsamp - samples per segment [default: such that we have 8 segments]
- if x is a matrix, segsamp is set to the number of rows
overlap - percentage overlap, 0 to 99 [default = 50]
- if y is a matrix, overlap is set to 0.
Output:
bic - estimated cross-bicoherence: an nfft x nfft array, with
origin at center, and axes pointing down and to the right.
waxis - vector of frequencies associated with the rows and columns
of bic; sampling frequency is assumed to be 1.
"""
if w.shape != x.shape or x.shape != y.shape:
raise ValueError('w, x and y should have identical dimentions')
(ly, nrecs) = y.shape
if ly == 1:
ly = nrecs
nrecs = 1
w = w.reshape(1,-1)
x = x.reshape(1,-1)
y = y.reshape(1,-1)
if not nfft:
nfft = 128
if not overlap: overlap = 50
overlap = max(0,min(overlap,99))
if nrecs > 1: overlap = 0
if not nsamp: nsamp = 0
if nrecs > 1: nsamp = ly
if nrecs == 1 and nsamp <= 0:
nsamp = np.fix(ly/ (8 - 7 * overlap/100))
if nfft < nsamp:
nfft = 2**nextpow2(nsamp)
overlap = np.fix(overlap/100 * nsamp)
nadvance = nsamp - overlap
nrecs = np.fix((ly*nrecs - overlap) / nadvance)
if not wind:
wind = np.hanning(nsamp)
try:
(rw, cw) = wind.shape
except ValueError:
(rw,) = wind.shape
cw = 1
if min(rw, cw) != 1 or max(rw, cw) != nsamp:
print "Segment size is " + str(nsamp)
print "Wind array is " + str(rw) + " by " + str(cw)
print "Using default Hanning window"
wind = np.hanning(nsamp)
wind = wind.reshape(1,-1)
# Accumulate triple products
bic = np.zeros([nfft, nfft])
Pyy = np.zeros([nfft,1])
Pww = np.zeros([nfft,1])
Pxx = np.zeros([nfft,1])
mask = hankel(np.arange(nfft),np.array([nfft-1]+range(nfft-1)))
Yf12 = np.zeros([nfft,nfft])
ind = np.transpose(np.arange(nsamp))
w = w.ravel(order='F')
x = x.ravel(order='F')
y = y.ravel(order='F')
for k in xrange(nrecs):
ws = w[ind]
ws = (ws - np.mean(ws)) * wind
Wf = np.fft.fft(ws, nfft) / nsamp
CWf = np.conjugate(Wf)
Pww = Pww + flat_eq(Pww, (Wf*CWf))
xs = x[ind]
xs = (xs - np.mean(xs)) * wind
Xf = np.fft.fft(xs, nfft) / nsamp
CXf = np.conjugate(Xf)
Pxx = Pxx + flat_eq(Pxx, (Xf*CXf))
ys = y[ind]
ys = (ys - np.mean(ys)) * wind
Yf = np.fft.fft(ys, nfft) / nsamp
CYf = np.conjugate(Yf)
Pyy = Pyy + flat_eq(Pyy, (Yf*CYf))
Yf12 = flat_eq(Yf12, CYf.ravel(order='F')[mask])
bic = bic + (Wf * np.transpose(Xf)) * Yf12
ind = ind + int(nadvance)
bic = bic / nrecs
Pww = Pww / nrecs
Pxx = Pxx / nrecs
Pyy = Pyy / nrecs
mask = flat_eq(mask, Pyy.ravel(order='F')[mask])
bic = abs(bic)**2 / ((Pww * np.transpose(Pxx)) * mask)
bic = np.fft.fftshift(bic)
# Contour plot of magnitude bispectrum
if nfft%2 == 0:
waxis = np.transpose(np.arange(-1*nfft/2, nfft/2)) / nfft
else:
waxis = np.transpose(np.arange(-1*(nfft-1)/2, (nfft-1)/2+1)) / nfft
cont = plt.contourf(waxis,waxis,bic,100, cmap=plt.cm.Spectral_r)
plt.colorbar(cont)
plt.title('Bicoherence estimated via the direct (FFT) method')
plt.xlabel('f1')
plt.ylabel('f2')
colmax, row = bic.max(0), bic.argmax(0)
maxval, col = colmax.max(0), colmax.argmax(0)
print 'Max: bic('+str(waxis[col])+','+str(waxis[col])+') = '+str(maxval)
plt.show()
return (bic, waxis)
def test():
nl1 = sio.loadmat('matfile/demo/nl1.mat')
dbic = bicoherencex(nl1['x'], nl1['x'], nl1['y'])
if __name__ == '__main__':
test()
| Python | 0.998832 | |
edf7c8c1d3ea1f85c6c9888dd5ee759443f1db1c | add billing urls | billing/urls.py | billing/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^generate_pdf/(?P<bill_id>\d+)$', views.generate_pdf,
name='generate-pdf')
]
| Python | 0.000001 | |
e3a750dcca3727d576833351bfc09bbd858871f6 | Fix indent on test code for test/assembly broken in r1220 Review URL: https://chromiumcodereview.appspot.com/9429007 | test/assembly/gyptest-assembly.py | test/assembly/gyptest-assembly.py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A basic test of compiling assembler files.
"""
import sys
import TestGyp
if sys.platform != 'win32':
# TODO(bradnelson): get this working for windows.
test = TestGyp.TestGyp(formats=['make', 'ninja', 'scons', 'xcode'])
test.run_gyp('assembly.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('assembly.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Got 42.
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
| #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A basic test of compiling assembler files.
"""
import sys
import TestGyp
if sys.platform != 'win32':
# TODO(bradnelson): get this working for windows.
test = TestGyp.TestGyp(formats=['make', 'ninja', 'scons', 'xcode'])
test.run_gyp('assembly.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('assembly.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Got 42.
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
| Python | 0.001074 |
e4e52abb5654804f847fb3894293de58f97c7c91 | Add new control test for Front-End | Front-End/src/swig/Siconos/tests/test_smc.py | Front-End/src/swig/Siconos/tests/test_smc.py | #!/usr/bin/env python
# this test is taken almost verbatim from RelayBiSimulation_OT2_noCplugin.py
def test_smc_1():
from Siconos.Kernel import FirstOrderLinearDS, Model, TimeDiscretisation,\
TimeStepping, Moreau, ControlManager, linearSensor, linearSMC_OT2,\
getMatrix, SimpleMatrix
from matplotlib.pyplot import subplot, title, plot, grid, show
from numpy import array, eye, empty, zeros, savetxt
from math import ceil, sin
from numpy.linalg import norm
# Derive our own version of FirstOrderLinearDS
class MyFOLDS(FirstOrderLinearDS):
def computeb(self, time):
t = sin(50*time)
tmpz = self.z()
# XXX fix this !
if len(tmpz) != 2:
print("DEBUG z has length ", len(tmpz))
return
# XXX we need to find a smarter way to do things here
# we need to convert from vector (sage) to arrayish
u = [t, -t] + tmpz
self.setb(u)
# variable declaration
ndof = 2 # Number of degrees of freedom of your system
t0 = 0.0 # start time
T = 1 # end time
h = 1.0e-4 # time step for simulation
hControl = 1.0e-2 # time step for control
Xinit = 1.0 # initial position
theta = 0.5
N = ceil((T-t0)/h + 10) # number of time steps
outputSize = 5 # number of variable to store at each time step
# Matrix declaration
A = zeros((ndof,ndof))
x0 = [Xinit, -Xinit]
sensorC = eye(ndof)
sensorD = zeros((ndof,ndof))
Csurface = [0, 1.0]
# Simple check
if h > hControl:
print "hControl must be bigger than h"
exit(1)
# Declaration of the Dynamical System
processDS = MyFOLDS(x0, A)
# XXX b is not automatically created ...
processDS.setb([0, 0])
# Model
process = Model(t0, T)
process.nonSmoothDynamicalSystem().insertDynamicalSystem(processDS)
# time discretization
processTD = TimeDiscretisation(t0, h)
tSensor = TimeDiscretisation(t0, hControl)
tActuator = TimeDiscretisation(t0, hControl)
# Creation of the Simulation
processSimulation = TimeStepping(processTD, 0)
processSimulation.setName("plant simulation")
# Declaration of the integrator
processIntegrator = Moreau(processDS, theta)
processSimulation.insertIntegrator(processIntegrator)
# Actuator, Sensor & ControlManager
control = ControlManager(process)
sens = linearSensor(100, tSensor, process, sensorC, sensorD)
control.addSensorPtr(sens)
act = linearSMC_OT2(104, tActuator, process)
act.addSensorPtr(sens)
control.addActuatorPtr(act)
# Initialization.
process.initialize(processSimulation)
control.initialize()
act.setCsurfacePtr(Csurface)
# This is not working right now
#eventsManager = s.eventsManager()
# Matrix for data storage
dataPlot = empty((3*(N+1), outputSize))
dataPlot[0, 0] = t0
dataPlot[0, 1] = processDS.x()[0]
dataPlot[0, 2] = processDS.x()[1]
dataPlot[0, 3] = processDS.z()[0]
dataPlot[0, 4] = processDS.z()[1]
# Main loop
k = 1
while(processSimulation.nextTime() < T):
processSimulation.computeOneStep()
dataPlot[k, 0] = processSimulation.nextTime()
dataPlot[k, 1] = processDS.x()[0]
dataPlot[k, 2] = processDS.x()[1]
dataPlot[k, 3] = processDS.z()[0]
dataPlot[k, 4] = processDS.z()[1]
k += 1
processSimulation.nextStep()
# print processSimulation.nextTime()
# Resize matrix
dataPlot.resize(k, outputSize)
| Python | 0.000001 | |
1197f5885b2e7275d9a4f108c62bd2506816c8b1 | Create test_madagascar.py | test/countries/test_madagascar.py | test/countries/test_madagascar.py | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestMadagascar(unittest.TestCase):
def setUp(self):
self.holidays = holidays.MG()
def test_new_years(self):
self.assertIn(date(2010, 1, 1), self.holidays)
self.assertIn(date(2020, 1, 1), self.holidays)
def test_mahery_fo(self):
self.assertIn(date(2010, 3, 29), self.holidays)
self.assertIn(date(2015, 3, 29), self.holidays)
self.assertIn(date(2022, 3, 29), self.holidays)
def test_paska(self):
self.assertIn(date(2022, 4, 17), self.holidays) # Andron'ny paska
self.assertIn(date(2022, 4, 18), self.holidays) # Alatsinain'ny Paska
def test_not_holiday(self):
self.assertNotIn(date(2022, 4, 20), self.holidays)
| Python | 0.000005 | |
d21743f2543f8d953a837d75bff0fcdb0105f4db | Add page extension for tracking page creation and modification dates. | feincms/module/page/extensions/changedate.py | feincms/module/page/extensions/changedate.py | """
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save | Python | 0 | |
1411daac4efd06b1208e19c3fce1a126230583cb | Use a proper mechanism for catching warnings | tinydb/utils.py | tinydb/utils.py | """
Utility functions.
"""
from contextlib import contextmanager
import warnings
class LRUCache(dict):
"""
A simple LRU cache.
"""
def __init__(self, *args, **kwargs):
"""
:param capacity: How many items to store before cleaning up old items
or ``None`` for an unlimited cache size
"""
self.capacity = kwargs.pop('capacity', None) or float('nan')
self.lru = []
super(LRUCache, self).__init__(*args, **kwargs)
def refresh(self, key):
"""
Push a key to the head of the LRU queue
"""
if key in self.lru:
self.lru.remove(key)
self.lru.append(key)
def get(self, key, default=None):
self.refresh(key)
return super(LRUCache, self).get(key, default)
def __getitem__(self, key):
self.refresh(key)
return super(LRUCache, self).__getitem__(key)
def __setitem__(self, key, value):
super(LRUCache, self).__setitem__(key, value)
self.refresh(key)
# Check, if the cache is full and we have to remove old items
# If the queue is of unlimited size, self.capacity is NaN and
# x > NaN is always False in Python and the cache won't be cleared.
if len(self) > self.capacity:
self.pop(self.lru.pop(0))
def __delitem__(self, key):
super(LRUCache, self).__delitem__(key)
self.lru.remove(key)
def clear(self):
super(LRUCache, self).clear()
del self.lru[:]
# Source: https://github.com/PythonCharmers/python-future/blob/466bfb2dfa36d865285dc31fe2b0c0a53ff0f181/future/utils/__init__.py#L102-L134
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class Metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return Metaclass('temporary_class', None, {})
@contextmanager
def catch_warning(warning_cls):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('error')
yield
| """
Utility functions.
"""
from contextlib import contextmanager
import warnings
class LRUCache(dict):
"""
A simple LRU cache.
"""
def __init__(self, *args, **kwargs):
"""
:param capacity: How many items to store before cleaning up old items
or ``None`` for an unlimited cache size
"""
self.capacity = kwargs.pop('capacity', None) or float('nan')
self.lru = []
super(LRUCache, self).__init__(*args, **kwargs)
def refresh(self, key):
"""
Push a key to the head of the LRU queue
"""
if key in self.lru:
self.lru.remove(key)
self.lru.append(key)
def get(self, key, default=None):
self.refresh(key)
return super(LRUCache, self).get(key, default)
def __getitem__(self, key):
self.refresh(key)
return super(LRUCache, self).__getitem__(key)
def __setitem__(self, key, value):
super(LRUCache, self).__setitem__(key, value)
self.refresh(key)
# Check, if the cache is full and we have to remove old items
# If the queue is of unlimited size, self.capacity is NaN and
# x > NaN is always False in Python and the cache won't be cleared.
if len(self) > self.capacity:
self.pop(self.lru.pop(0))
def __delitem__(self, key):
super(LRUCache, self).__delitem__(key)
self.lru.remove(key)
def clear(self):
super(LRUCache, self).clear()
del self.lru[:]
# Source: https://github.com/PythonCharmers/python-future/blob/466bfb2dfa36d865285dc31fe2b0c0a53ff0f181/future/utils/__init__.py#L102-L134
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class Metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return Metaclass('temporary_class', None, {})
@contextmanager
def catch_warning(warning_cls):
warning_filter = [f for f in warnings.filters if f[2] == warning_cls]
warnings.filterwarnings(action="error", category=warning_cls)
try:
yield # Run user code
finally:
if warning_filter:
# Reset original filter
warnings.filterwarnings(action=warning_filter[0][0],
category=warning_cls)
| Python | 0 |
bcb9437fb99c2577c9ca9628c60b80becc2a24b3 | Add media_tags and a new filter for photo alignment normalization | organization/media/templatetags/media_tags.py | organization/media/templatetags/media_tags.py | # -*- coding: utf-8 -*-
from mezzanine.template import Library
register = Library()
@register.filter
def get_photo_alignment(value):
if value == 'left':
return 0
elif value == 'center':
return 0.5
return 1
| Python | 0 | |
ed0d0f913b209bf6ea8ec32d0aa10c31bc97e2c9 | create index on vote.mandate_id | alembic/versions/33f79ee8632_vote_mandate_id_inde.py | alembic/versions/33f79ee8632_vote_mandate_id_inde.py | revision = '33f79ee8632'
down_revision = '3abf407e34a'
from alembic import op
def upgrade():
op.create_index('vote_mandate_id_index', 'vote', ['mandate_id'])
def downgrade():
op.drop_index('vote_mandate_id_index')
| Python | 0.000001 | |
0676a5d8fb7ffeb9f1b84848fd849a181a8c1176 | renamed to gadgets | analytics/gadgets.py | analytics/gadgets.py | from analytics import settings
from analytics import models
from analytics.sites import gadgets
class BaseWidget(object):
def __init__(self, title, metrics, value_type, frequency, samples, width, height):
self.title = title
self.metrics = metrics
self.value_type = value_type
self.frequency = frequency
self.samples = samples
self.width = width
self.height = height
class BarWidget(BaseWidget):
pass
class NumberWidget(BaseWidget):
pass
class Registrations(NumberWidget):
def render(self):
return 'foo'
gadgets.register(Registrations('Registrations', [models.Registrations,], settings.COUNT, 'd', 30, 4, 1))
| Python | 0.9994 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.